1 /* This is the Linux kernel elf-loading code, ported into user space */ 2 #include "qemu/osdep.h" 3 #include <sys/param.h> 4 5 #include <sys/prctl.h> 6 #include <sys/resource.h> 7 #include <sys/shm.h> 8 9 #include "qemu.h" 10 #include "user/tswap-target.h" 11 #include "user/guest-base.h" 12 #include "user-internals.h" 13 #include "signal-common.h" 14 #include "loader.h" 15 #include "user-mmap.h" 16 #include "disas/disas.h" 17 #include "qemu/bitops.h" 18 #include "qemu/path.h" 19 #include "qemu/queue.h" 20 #include "qemu/guest-random.h" 21 #include "qemu/units.h" 22 #include "qemu/selfmap.h" 23 #include "qemu/lockable.h" 24 #include "qapi/error.h" 25 #include "qemu/error-report.h" 26 #include "target_signal.h" 27 #include "tcg/debuginfo.h" 28 29 #ifdef TARGET_ARM 30 #include "target/arm/cpu-features.h" 31 #endif 32 33 #ifdef _ARCH_PPC64 34 #undef ARCH_DLINFO 35 #undef ELF_PLATFORM 36 #undef ELF_HWCAP 37 #undef ELF_HWCAP2 38 #undef ELF_CLASS 39 #undef ELF_DATA 40 #undef ELF_ARCH 41 #endif 42 43 #ifndef TARGET_ARCH_HAS_SIGTRAMP_PAGE 44 #define TARGET_ARCH_HAS_SIGTRAMP_PAGE 0 45 #endif 46 47 typedef struct { 48 const uint8_t *image; 49 const uint32_t *relocs; 50 unsigned image_size; 51 unsigned reloc_count; 52 unsigned sigreturn_ofs; 53 unsigned rt_sigreturn_ofs; 54 } VdsoImageInfo; 55 56 #define ELF_OSABI ELFOSABI_SYSV 57 58 /* from personality.h */ 59 60 /* 61 * Flags for bug emulation. 62 * 63 * These occupy the top three bytes. 64 */ 65 enum { 66 ADDR_NO_RANDOMIZE = 0x0040000, /* disable randomization of VA space */ 67 FDPIC_FUNCPTRS = 0x0080000, /* userspace function ptrs point to 68 descriptors (signal handling) */ 69 MMAP_PAGE_ZERO = 0x0100000, 70 ADDR_COMPAT_LAYOUT = 0x0200000, 71 READ_IMPLIES_EXEC = 0x0400000, 72 ADDR_LIMIT_32BIT = 0x0800000, 73 SHORT_INODE = 0x1000000, 74 WHOLE_SECONDS = 0x2000000, 75 STICKY_TIMEOUTS = 0x4000000, 76 ADDR_LIMIT_3GB = 0x8000000, 77 }; 78 79 /* 80 * Personality types. 81 * 82 * These go in the low byte. Avoid using the top bit, it will 83 * conflict with error returns. 84 */ 85 enum { 86 PER_LINUX = 0x0000, 87 PER_LINUX_32BIT = 0x0000 | ADDR_LIMIT_32BIT, 88 PER_LINUX_FDPIC = 0x0000 | FDPIC_FUNCPTRS, 89 PER_SVR4 = 0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO, 90 PER_SVR3 = 0x0002 | STICKY_TIMEOUTS | SHORT_INODE, 91 PER_SCOSVR3 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS | SHORT_INODE, 92 PER_OSR5 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS, 93 PER_WYSEV386 = 0x0004 | STICKY_TIMEOUTS | SHORT_INODE, 94 PER_ISCR4 = 0x0005 | STICKY_TIMEOUTS, 95 PER_BSD = 0x0006, 96 PER_SUNOS = 0x0006 | STICKY_TIMEOUTS, 97 PER_XENIX = 0x0007 | STICKY_TIMEOUTS | SHORT_INODE, 98 PER_LINUX32 = 0x0008, 99 PER_LINUX32_3GB = 0x0008 | ADDR_LIMIT_3GB, 100 PER_IRIX32 = 0x0009 | STICKY_TIMEOUTS,/* IRIX5 32-bit */ 101 PER_IRIXN32 = 0x000a | STICKY_TIMEOUTS,/* IRIX6 new 32-bit */ 102 PER_IRIX64 = 0x000b | STICKY_TIMEOUTS,/* IRIX6 64-bit */ 103 PER_RISCOS = 0x000c, 104 PER_SOLARIS = 0x000d | STICKY_TIMEOUTS, 105 PER_UW7 = 0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO, 106 PER_OSF4 = 0x000f, /* OSF/1 v4 */ 107 PER_HPUX = 0x0010, 108 PER_MASK = 0x00ff, 109 }; 110 111 /* 112 * Return the base personality without flags. 113 */ 114 #define personality(pers) (pers & PER_MASK) 115 116 int info_is_fdpic(struct image_info *info) 117 { 118 return info->personality == PER_LINUX_FDPIC; 119 } 120 121 /* this flag is uneffective under linux too, should be deleted */ 122 #ifndef MAP_DENYWRITE 123 #define MAP_DENYWRITE 0 124 #endif 125 126 /* should probably go in elf.h */ 127 #ifndef ELIBBAD 128 #define ELIBBAD 80 129 #endif 130 131 #if TARGET_BIG_ENDIAN 132 #define ELF_DATA ELFDATA2MSB 133 #else 134 #define ELF_DATA ELFDATA2LSB 135 #endif 136 137 #ifdef TARGET_ABI_MIPSN32 138 typedef abi_ullong target_elf_greg_t; 139 #define tswapreg(ptr) tswap64(ptr) 140 #else 141 typedef abi_ulong target_elf_greg_t; 142 #define tswapreg(ptr) tswapal(ptr) 143 #endif 144 145 #ifdef USE_UID16 146 typedef abi_ushort target_uid_t; 147 typedef abi_ushort target_gid_t; 148 #else 149 typedef abi_uint target_uid_t; 150 typedef abi_uint target_gid_t; 151 #endif 152 typedef abi_int target_pid_t; 153 154 #ifdef TARGET_I386 155 156 #define ELF_HWCAP get_elf_hwcap() 157 158 static uint32_t get_elf_hwcap(void) 159 { 160 X86CPU *cpu = X86_CPU(thread_cpu); 161 162 return cpu->env.features[FEAT_1_EDX]; 163 } 164 165 #ifdef TARGET_X86_64 166 #define ELF_CLASS ELFCLASS64 167 #define ELF_ARCH EM_X86_64 168 169 #define ELF_PLATFORM "x86_64" 170 171 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) 172 { 173 regs->rax = 0; 174 regs->rsp = infop->start_stack; 175 regs->rip = infop->entry; 176 } 177 178 #define ELF_NREG 27 179 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 180 181 /* 182 * Note that ELF_NREG should be 29 as there should be place for 183 * TRAPNO and ERR "registers" as well but linux doesn't dump 184 * those. 185 * 186 * See linux kernel: arch/x86/include/asm/elf.h 187 */ 188 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *env) 189 { 190 (*regs)[0] = tswapreg(env->regs[15]); 191 (*regs)[1] = tswapreg(env->regs[14]); 192 (*regs)[2] = tswapreg(env->regs[13]); 193 (*regs)[3] = tswapreg(env->regs[12]); 194 (*regs)[4] = tswapreg(env->regs[R_EBP]); 195 (*regs)[5] = tswapreg(env->regs[R_EBX]); 196 (*regs)[6] = tswapreg(env->regs[11]); 197 (*regs)[7] = tswapreg(env->regs[10]); 198 (*regs)[8] = tswapreg(env->regs[9]); 199 (*regs)[9] = tswapreg(env->regs[8]); 200 (*regs)[10] = tswapreg(env->regs[R_EAX]); 201 (*regs)[11] = tswapreg(env->regs[R_ECX]); 202 (*regs)[12] = tswapreg(env->regs[R_EDX]); 203 (*regs)[13] = tswapreg(env->regs[R_ESI]); 204 (*regs)[14] = tswapreg(env->regs[R_EDI]); 205 (*regs)[15] = tswapreg(env->regs[R_EAX]); /* XXX */ 206 (*regs)[16] = tswapreg(env->eip); 207 (*regs)[17] = tswapreg(env->segs[R_CS].selector & 0xffff); 208 (*regs)[18] = tswapreg(env->eflags); 209 (*regs)[19] = tswapreg(env->regs[R_ESP]); 210 (*regs)[20] = tswapreg(env->segs[R_SS].selector & 0xffff); 211 (*regs)[21] = tswapreg(env->segs[R_FS].selector & 0xffff); 212 (*regs)[22] = tswapreg(env->segs[R_GS].selector & 0xffff); 213 (*regs)[23] = tswapreg(env->segs[R_DS].selector & 0xffff); 214 (*regs)[24] = tswapreg(env->segs[R_ES].selector & 0xffff); 215 (*regs)[25] = tswapreg(env->segs[R_FS].selector & 0xffff); 216 (*regs)[26] = tswapreg(env->segs[R_GS].selector & 0xffff); 217 } 218 219 #if ULONG_MAX > UINT32_MAX 220 #define INIT_GUEST_COMMPAGE 221 static bool init_guest_commpage(void) 222 { 223 /* 224 * The vsyscall page is at a high negative address aka kernel space, 225 * which means that we cannot actually allocate it with target_mmap. 226 * We still should be able to use page_set_flags, unless the user 227 * has specified -R reserved_va, which would trigger an assert(). 228 */ 229 if (reserved_va != 0 && 230 TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE - 1 > reserved_va) { 231 error_report("Cannot allocate vsyscall page"); 232 exit(EXIT_FAILURE); 233 } 234 page_set_flags(TARGET_VSYSCALL_PAGE, 235 TARGET_VSYSCALL_PAGE | ~TARGET_PAGE_MASK, 236 PAGE_EXEC | PAGE_VALID); 237 return true; 238 } 239 #endif 240 #else 241 242 /* 243 * This is used to ensure we don't load something for the wrong architecture. 244 */ 245 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) ) 246 247 /* 248 * These are used to set parameters in the core dumps. 249 */ 250 #define ELF_CLASS ELFCLASS32 251 #define ELF_ARCH EM_386 252 253 #define ELF_PLATFORM get_elf_platform() 254 #define EXSTACK_DEFAULT true 255 256 static const char *get_elf_platform(void) 257 { 258 static char elf_platform[] = "i386"; 259 int family = object_property_get_int(OBJECT(thread_cpu), "family", NULL); 260 if (family > 6) { 261 family = 6; 262 } 263 if (family >= 3) { 264 elf_platform[1] = '0' + family; 265 } 266 return elf_platform; 267 } 268 269 static inline void init_thread(struct target_pt_regs *regs, 270 struct image_info *infop) 271 { 272 regs->esp = infop->start_stack; 273 regs->eip = infop->entry; 274 275 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program 276 starts %edx contains a pointer to a function which might be 277 registered using `atexit'. This provides a mean for the 278 dynamic linker to call DT_FINI functions for shared libraries 279 that have been loaded before the code runs. 280 281 A value of 0 tells we have no such handler. */ 282 regs->edx = 0; 283 } 284 285 #define ELF_NREG 17 286 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 287 288 /* 289 * Note that ELF_NREG should be 19 as there should be place for 290 * TRAPNO and ERR "registers" as well but linux doesn't dump 291 * those. 292 * 293 * See linux kernel: arch/x86/include/asm/elf.h 294 */ 295 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *env) 296 { 297 (*regs)[0] = tswapreg(env->regs[R_EBX]); 298 (*regs)[1] = tswapreg(env->regs[R_ECX]); 299 (*regs)[2] = tswapreg(env->regs[R_EDX]); 300 (*regs)[3] = tswapreg(env->regs[R_ESI]); 301 (*regs)[4] = tswapreg(env->regs[R_EDI]); 302 (*regs)[5] = tswapreg(env->regs[R_EBP]); 303 (*regs)[6] = tswapreg(env->regs[R_EAX]); 304 (*regs)[7] = tswapreg(env->segs[R_DS].selector & 0xffff); 305 (*regs)[8] = tswapreg(env->segs[R_ES].selector & 0xffff); 306 (*regs)[9] = tswapreg(env->segs[R_FS].selector & 0xffff); 307 (*regs)[10] = tswapreg(env->segs[R_GS].selector & 0xffff); 308 (*regs)[11] = tswapreg(env->regs[R_EAX]); /* XXX */ 309 (*regs)[12] = tswapreg(env->eip); 310 (*regs)[13] = tswapreg(env->segs[R_CS].selector & 0xffff); 311 (*regs)[14] = tswapreg(env->eflags); 312 (*regs)[15] = tswapreg(env->regs[R_ESP]); 313 (*regs)[16] = tswapreg(env->segs[R_SS].selector & 0xffff); 314 } 315 316 /* 317 * i386 is the only target which supplies AT_SYSINFO for the vdso. 318 * All others only supply AT_SYSINFO_EHDR. 319 */ 320 #define DLINFO_ARCH_ITEMS (vdso_info != NULL) 321 #define ARCH_DLINFO \ 322 do { \ 323 if (vdso_info) { \ 324 NEW_AUX_ENT(AT_SYSINFO, vdso_info->entry); \ 325 } \ 326 } while (0) 327 328 #endif /* TARGET_X86_64 */ 329 330 #define VDSO_HEADER "vdso.c.inc" 331 332 #define USE_ELF_CORE_DUMP 333 #define ELF_EXEC_PAGESIZE 4096 334 335 #endif /* TARGET_I386 */ 336 337 #ifdef TARGET_ARM 338 339 #ifndef TARGET_AARCH64 340 /* 32 bit ARM definitions */ 341 342 #define ELF_ARCH EM_ARM 343 #define ELF_CLASS ELFCLASS32 344 #define EXSTACK_DEFAULT true 345 346 static inline void init_thread(struct target_pt_regs *regs, 347 struct image_info *infop) 348 { 349 abi_long stack = infop->start_stack; 350 memset(regs, 0, sizeof(*regs)); 351 352 regs->uregs[16] = ARM_CPU_MODE_USR; 353 if (infop->entry & 1) { 354 regs->uregs[16] |= CPSR_T; 355 } 356 regs->uregs[15] = infop->entry & 0xfffffffe; 357 regs->uregs[13] = infop->start_stack; 358 /* FIXME - what to for failure of get_user()? */ 359 get_user_ual(regs->uregs[2], stack + 8); /* envp */ 360 get_user_ual(regs->uregs[1], stack + 4); /* envp */ 361 /* XXX: it seems that r0 is zeroed after ! */ 362 regs->uregs[0] = 0; 363 /* For uClinux PIC binaries. */ 364 /* XXX: Linux does this only on ARM with no MMU (do we care ?) */ 365 regs->uregs[10] = infop->start_data; 366 367 /* Support ARM FDPIC. */ 368 if (info_is_fdpic(infop)) { 369 /* As described in the ABI document, r7 points to the loadmap info 370 * prepared by the kernel. If an interpreter is needed, r8 points 371 * to the interpreter loadmap and r9 points to the interpreter 372 * PT_DYNAMIC info. If no interpreter is needed, r8 is zero, and 373 * r9 points to the main program PT_DYNAMIC info. 374 */ 375 regs->uregs[7] = infop->loadmap_addr; 376 if (infop->interpreter_loadmap_addr) { 377 /* Executable is dynamically loaded. */ 378 regs->uregs[8] = infop->interpreter_loadmap_addr; 379 regs->uregs[9] = infop->interpreter_pt_dynamic_addr; 380 } else { 381 regs->uregs[8] = 0; 382 regs->uregs[9] = infop->pt_dynamic_addr; 383 } 384 } 385 } 386 387 #define ELF_NREG 18 388 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 389 390 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUARMState *env) 391 { 392 (*regs)[0] = tswapreg(env->regs[0]); 393 (*regs)[1] = tswapreg(env->regs[1]); 394 (*regs)[2] = tswapreg(env->regs[2]); 395 (*regs)[3] = tswapreg(env->regs[3]); 396 (*regs)[4] = tswapreg(env->regs[4]); 397 (*regs)[5] = tswapreg(env->regs[5]); 398 (*regs)[6] = tswapreg(env->regs[6]); 399 (*regs)[7] = tswapreg(env->regs[7]); 400 (*regs)[8] = tswapreg(env->regs[8]); 401 (*regs)[9] = tswapreg(env->regs[9]); 402 (*regs)[10] = tswapreg(env->regs[10]); 403 (*regs)[11] = tswapreg(env->regs[11]); 404 (*regs)[12] = tswapreg(env->regs[12]); 405 (*regs)[13] = tswapreg(env->regs[13]); 406 (*regs)[14] = tswapreg(env->regs[14]); 407 (*regs)[15] = tswapreg(env->regs[15]); 408 409 (*regs)[16] = tswapreg(cpsr_read((CPUARMState *)env)); 410 (*regs)[17] = tswapreg(env->regs[0]); /* XXX */ 411 } 412 413 #define USE_ELF_CORE_DUMP 414 #define ELF_EXEC_PAGESIZE 4096 415 416 enum 417 { 418 ARM_HWCAP_ARM_SWP = 1 << 0, 419 ARM_HWCAP_ARM_HALF = 1 << 1, 420 ARM_HWCAP_ARM_THUMB = 1 << 2, 421 ARM_HWCAP_ARM_26BIT = 1 << 3, 422 ARM_HWCAP_ARM_FAST_MULT = 1 << 4, 423 ARM_HWCAP_ARM_FPA = 1 << 5, 424 ARM_HWCAP_ARM_VFP = 1 << 6, 425 ARM_HWCAP_ARM_EDSP = 1 << 7, 426 ARM_HWCAP_ARM_JAVA = 1 << 8, 427 ARM_HWCAP_ARM_IWMMXT = 1 << 9, 428 ARM_HWCAP_ARM_CRUNCH = 1 << 10, 429 ARM_HWCAP_ARM_THUMBEE = 1 << 11, 430 ARM_HWCAP_ARM_NEON = 1 << 12, 431 ARM_HWCAP_ARM_VFPv3 = 1 << 13, 432 ARM_HWCAP_ARM_VFPv3D16 = 1 << 14, 433 ARM_HWCAP_ARM_TLS = 1 << 15, 434 ARM_HWCAP_ARM_VFPv4 = 1 << 16, 435 ARM_HWCAP_ARM_IDIVA = 1 << 17, 436 ARM_HWCAP_ARM_IDIVT = 1 << 18, 437 ARM_HWCAP_ARM_VFPD32 = 1 << 19, 438 ARM_HWCAP_ARM_LPAE = 1 << 20, 439 ARM_HWCAP_ARM_EVTSTRM = 1 << 21, 440 ARM_HWCAP_ARM_FPHP = 1 << 22, 441 ARM_HWCAP_ARM_ASIMDHP = 1 << 23, 442 ARM_HWCAP_ARM_ASIMDDP = 1 << 24, 443 ARM_HWCAP_ARM_ASIMDFHM = 1 << 25, 444 ARM_HWCAP_ARM_ASIMDBF16 = 1 << 26, 445 ARM_HWCAP_ARM_I8MM = 1 << 27, 446 }; 447 448 enum { 449 ARM_HWCAP2_ARM_AES = 1 << 0, 450 ARM_HWCAP2_ARM_PMULL = 1 << 1, 451 ARM_HWCAP2_ARM_SHA1 = 1 << 2, 452 ARM_HWCAP2_ARM_SHA2 = 1 << 3, 453 ARM_HWCAP2_ARM_CRC32 = 1 << 4, 454 ARM_HWCAP2_ARM_SB = 1 << 5, 455 ARM_HWCAP2_ARM_SSBS = 1 << 6, 456 }; 457 458 /* The commpage only exists for 32 bit kernels */ 459 460 #define HI_COMMPAGE (intptr_t)0xffff0f00u 461 462 static bool init_guest_commpage(void) 463 { 464 ARMCPU *cpu = ARM_CPU(thread_cpu); 465 int host_page_size = qemu_real_host_page_size(); 466 abi_ptr commpage; 467 void *want; 468 void *addr; 469 470 /* 471 * M-profile allocates maximum of 2GB address space, so can never 472 * allocate the commpage. Skip it. 473 */ 474 if (arm_feature(&cpu->env, ARM_FEATURE_M)) { 475 return true; 476 } 477 478 commpage = HI_COMMPAGE & -host_page_size; 479 want = g2h_untagged(commpage); 480 addr = mmap(want, host_page_size, PROT_READ | PROT_WRITE, 481 MAP_ANONYMOUS | MAP_PRIVATE | 482 (commpage < reserved_va ? MAP_FIXED : MAP_FIXED_NOREPLACE), 483 -1, 0); 484 485 if (addr == MAP_FAILED) { 486 perror("Allocating guest commpage"); 487 exit(EXIT_FAILURE); 488 } 489 if (addr != want) { 490 return false; 491 } 492 493 /* Set kernel helper versions; rest of page is 0. */ 494 __put_user(5, (uint32_t *)g2h_untagged(0xffff0ffcu)); 495 496 if (mprotect(addr, host_page_size, PROT_READ)) { 497 perror("Protecting guest commpage"); 498 exit(EXIT_FAILURE); 499 } 500 501 page_set_flags(commpage, commpage | (host_page_size - 1), 502 PAGE_READ | PAGE_EXEC | PAGE_VALID); 503 return true; 504 } 505 506 #define ELF_HWCAP get_elf_hwcap() 507 #define ELF_HWCAP2 get_elf_hwcap2() 508 509 uint32_t get_elf_hwcap(void) 510 { 511 ARMCPU *cpu = ARM_CPU(thread_cpu); 512 uint32_t hwcaps = 0; 513 514 hwcaps |= ARM_HWCAP_ARM_SWP; 515 hwcaps |= ARM_HWCAP_ARM_HALF; 516 hwcaps |= ARM_HWCAP_ARM_THUMB; 517 hwcaps |= ARM_HWCAP_ARM_FAST_MULT; 518 519 /* probe for the extra features */ 520 #define GET_FEATURE(feat, hwcap) \ 521 do { if (arm_feature(&cpu->env, feat)) { hwcaps |= hwcap; } } while (0) 522 523 #define GET_FEATURE_ID(feat, hwcap) \ 524 do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0) 525 526 /* EDSP is in v5TE and above, but all our v5 CPUs are v5TE */ 527 GET_FEATURE(ARM_FEATURE_V5, ARM_HWCAP_ARM_EDSP); 528 GET_FEATURE(ARM_FEATURE_IWMMXT, ARM_HWCAP_ARM_IWMMXT); 529 GET_FEATURE(ARM_FEATURE_THUMB2EE, ARM_HWCAP_ARM_THUMBEE); 530 GET_FEATURE(ARM_FEATURE_NEON, ARM_HWCAP_ARM_NEON); 531 GET_FEATURE(ARM_FEATURE_V6K, ARM_HWCAP_ARM_TLS); 532 GET_FEATURE(ARM_FEATURE_LPAE, ARM_HWCAP_ARM_LPAE); 533 GET_FEATURE_ID(aa32_arm_div, ARM_HWCAP_ARM_IDIVA); 534 GET_FEATURE_ID(aa32_thumb_div, ARM_HWCAP_ARM_IDIVT); 535 GET_FEATURE_ID(aa32_vfp, ARM_HWCAP_ARM_VFP); 536 537 if (cpu_isar_feature(aa32_fpsp_v3, cpu) || 538 cpu_isar_feature(aa32_fpdp_v3, cpu)) { 539 hwcaps |= ARM_HWCAP_ARM_VFPv3; 540 if (cpu_isar_feature(aa32_simd_r32, cpu)) { 541 hwcaps |= ARM_HWCAP_ARM_VFPD32; 542 } else { 543 hwcaps |= ARM_HWCAP_ARM_VFPv3D16; 544 } 545 } 546 GET_FEATURE_ID(aa32_simdfmac, ARM_HWCAP_ARM_VFPv4); 547 /* 548 * MVFR1.FPHP and .SIMDHP must be in sync, and QEMU uses the same 549 * isar_feature function for both. The kernel reports them as two hwcaps. 550 */ 551 GET_FEATURE_ID(aa32_fp16_arith, ARM_HWCAP_ARM_FPHP); 552 GET_FEATURE_ID(aa32_fp16_arith, ARM_HWCAP_ARM_ASIMDHP); 553 GET_FEATURE_ID(aa32_dp, ARM_HWCAP_ARM_ASIMDDP); 554 GET_FEATURE_ID(aa32_fhm, ARM_HWCAP_ARM_ASIMDFHM); 555 GET_FEATURE_ID(aa32_bf16, ARM_HWCAP_ARM_ASIMDBF16); 556 GET_FEATURE_ID(aa32_i8mm, ARM_HWCAP_ARM_I8MM); 557 558 return hwcaps; 559 } 560 561 uint64_t get_elf_hwcap2(void) 562 { 563 ARMCPU *cpu = ARM_CPU(thread_cpu); 564 uint64_t hwcaps = 0; 565 566 GET_FEATURE_ID(aa32_aes, ARM_HWCAP2_ARM_AES); 567 GET_FEATURE_ID(aa32_pmull, ARM_HWCAP2_ARM_PMULL); 568 GET_FEATURE_ID(aa32_sha1, ARM_HWCAP2_ARM_SHA1); 569 GET_FEATURE_ID(aa32_sha2, ARM_HWCAP2_ARM_SHA2); 570 GET_FEATURE_ID(aa32_crc32, ARM_HWCAP2_ARM_CRC32); 571 GET_FEATURE_ID(aa32_sb, ARM_HWCAP2_ARM_SB); 572 GET_FEATURE_ID(aa32_ssbs, ARM_HWCAP2_ARM_SSBS); 573 return hwcaps; 574 } 575 576 const char *elf_hwcap_str(uint32_t bit) 577 { 578 static const char *hwcap_str[] = { 579 [__builtin_ctz(ARM_HWCAP_ARM_SWP )] = "swp", 580 [__builtin_ctz(ARM_HWCAP_ARM_HALF )] = "half", 581 [__builtin_ctz(ARM_HWCAP_ARM_THUMB )] = "thumb", 582 [__builtin_ctz(ARM_HWCAP_ARM_26BIT )] = "26bit", 583 [__builtin_ctz(ARM_HWCAP_ARM_FAST_MULT)] = "fast_mult", 584 [__builtin_ctz(ARM_HWCAP_ARM_FPA )] = "fpa", 585 [__builtin_ctz(ARM_HWCAP_ARM_VFP )] = "vfp", 586 [__builtin_ctz(ARM_HWCAP_ARM_EDSP )] = "edsp", 587 [__builtin_ctz(ARM_HWCAP_ARM_JAVA )] = "java", 588 [__builtin_ctz(ARM_HWCAP_ARM_IWMMXT )] = "iwmmxt", 589 [__builtin_ctz(ARM_HWCAP_ARM_CRUNCH )] = "crunch", 590 [__builtin_ctz(ARM_HWCAP_ARM_THUMBEE )] = "thumbee", 591 [__builtin_ctz(ARM_HWCAP_ARM_NEON )] = "neon", 592 [__builtin_ctz(ARM_HWCAP_ARM_VFPv3 )] = "vfpv3", 593 [__builtin_ctz(ARM_HWCAP_ARM_VFPv3D16 )] = "vfpv3d16", 594 [__builtin_ctz(ARM_HWCAP_ARM_TLS )] = "tls", 595 [__builtin_ctz(ARM_HWCAP_ARM_VFPv4 )] = "vfpv4", 596 [__builtin_ctz(ARM_HWCAP_ARM_IDIVA )] = "idiva", 597 [__builtin_ctz(ARM_HWCAP_ARM_IDIVT )] = "idivt", 598 [__builtin_ctz(ARM_HWCAP_ARM_VFPD32 )] = "vfpd32", 599 [__builtin_ctz(ARM_HWCAP_ARM_LPAE )] = "lpae", 600 [__builtin_ctz(ARM_HWCAP_ARM_EVTSTRM )] = "evtstrm", 601 [__builtin_ctz(ARM_HWCAP_ARM_FPHP )] = "fphp", 602 [__builtin_ctz(ARM_HWCAP_ARM_ASIMDHP )] = "asimdhp", 603 [__builtin_ctz(ARM_HWCAP_ARM_ASIMDDP )] = "asimddp", 604 [__builtin_ctz(ARM_HWCAP_ARM_ASIMDFHM )] = "asimdfhm", 605 [__builtin_ctz(ARM_HWCAP_ARM_ASIMDBF16)] = "asimdbf16", 606 [__builtin_ctz(ARM_HWCAP_ARM_I8MM )] = "i8mm", 607 }; 608 609 return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL; 610 } 611 612 const char *elf_hwcap2_str(uint32_t bit) 613 { 614 static const char *hwcap_str[] = { 615 [__builtin_ctz(ARM_HWCAP2_ARM_AES )] = "aes", 616 [__builtin_ctz(ARM_HWCAP2_ARM_PMULL)] = "pmull", 617 [__builtin_ctz(ARM_HWCAP2_ARM_SHA1 )] = "sha1", 618 [__builtin_ctz(ARM_HWCAP2_ARM_SHA2 )] = "sha2", 619 [__builtin_ctz(ARM_HWCAP2_ARM_CRC32)] = "crc32", 620 [__builtin_ctz(ARM_HWCAP2_ARM_SB )] = "sb", 621 [__builtin_ctz(ARM_HWCAP2_ARM_SSBS )] = "ssbs", 622 }; 623 624 return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL; 625 } 626 627 #undef GET_FEATURE 628 #undef GET_FEATURE_ID 629 630 #define ELF_PLATFORM get_elf_platform() 631 632 static const char *get_elf_platform(void) 633 { 634 CPUARMState *env = cpu_env(thread_cpu); 635 636 #if TARGET_BIG_ENDIAN 637 # define END "b" 638 #else 639 # define END "l" 640 #endif 641 642 if (arm_feature(env, ARM_FEATURE_V8)) { 643 return "v8" END; 644 } else if (arm_feature(env, ARM_FEATURE_V7)) { 645 if (arm_feature(env, ARM_FEATURE_M)) { 646 return "v7m" END; 647 } else { 648 return "v7" END; 649 } 650 } else if (arm_feature(env, ARM_FEATURE_V6)) { 651 return "v6" END; 652 } else if (arm_feature(env, ARM_FEATURE_V5)) { 653 return "v5" END; 654 } else { 655 return "v4" END; 656 } 657 658 #undef END 659 } 660 661 #else 662 /* 64 bit ARM definitions */ 663 664 #define ELF_ARCH EM_AARCH64 665 #define ELF_CLASS ELFCLASS64 666 #if TARGET_BIG_ENDIAN 667 # define ELF_PLATFORM "aarch64_be" 668 #else 669 # define ELF_PLATFORM "aarch64" 670 #endif 671 672 static inline void init_thread(struct target_pt_regs *regs, 673 struct image_info *infop) 674 { 675 abi_long stack = infop->start_stack; 676 memset(regs, 0, sizeof(*regs)); 677 678 regs->pc = infop->entry & ~0x3ULL; 679 regs->sp = stack; 680 } 681 682 #define ELF_NREG 34 683 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 684 685 static void elf_core_copy_regs(target_elf_gregset_t *regs, 686 const CPUARMState *env) 687 { 688 int i; 689 690 for (i = 0; i < 32; i++) { 691 (*regs)[i] = tswapreg(env->xregs[i]); 692 } 693 (*regs)[32] = tswapreg(env->pc); 694 (*regs)[33] = tswapreg(pstate_read((CPUARMState *)env)); 695 } 696 697 #define USE_ELF_CORE_DUMP 698 #define ELF_EXEC_PAGESIZE 4096 699 700 enum { 701 ARM_HWCAP_A64_FP = 1 << 0, 702 ARM_HWCAP_A64_ASIMD = 1 << 1, 703 ARM_HWCAP_A64_EVTSTRM = 1 << 2, 704 ARM_HWCAP_A64_AES = 1 << 3, 705 ARM_HWCAP_A64_PMULL = 1 << 4, 706 ARM_HWCAP_A64_SHA1 = 1 << 5, 707 ARM_HWCAP_A64_SHA2 = 1 << 6, 708 ARM_HWCAP_A64_CRC32 = 1 << 7, 709 ARM_HWCAP_A64_ATOMICS = 1 << 8, 710 ARM_HWCAP_A64_FPHP = 1 << 9, 711 ARM_HWCAP_A64_ASIMDHP = 1 << 10, 712 ARM_HWCAP_A64_CPUID = 1 << 11, 713 ARM_HWCAP_A64_ASIMDRDM = 1 << 12, 714 ARM_HWCAP_A64_JSCVT = 1 << 13, 715 ARM_HWCAP_A64_FCMA = 1 << 14, 716 ARM_HWCAP_A64_LRCPC = 1 << 15, 717 ARM_HWCAP_A64_DCPOP = 1 << 16, 718 ARM_HWCAP_A64_SHA3 = 1 << 17, 719 ARM_HWCAP_A64_SM3 = 1 << 18, 720 ARM_HWCAP_A64_SM4 = 1 << 19, 721 ARM_HWCAP_A64_ASIMDDP = 1 << 20, 722 ARM_HWCAP_A64_SHA512 = 1 << 21, 723 ARM_HWCAP_A64_SVE = 1 << 22, 724 ARM_HWCAP_A64_ASIMDFHM = 1 << 23, 725 ARM_HWCAP_A64_DIT = 1 << 24, 726 ARM_HWCAP_A64_USCAT = 1 << 25, 727 ARM_HWCAP_A64_ILRCPC = 1 << 26, 728 ARM_HWCAP_A64_FLAGM = 1 << 27, 729 ARM_HWCAP_A64_SSBS = 1 << 28, 730 ARM_HWCAP_A64_SB = 1 << 29, 731 ARM_HWCAP_A64_PACA = 1 << 30, 732 ARM_HWCAP_A64_PACG = 1UL << 31, 733 734 ARM_HWCAP2_A64_DCPODP = 1 << 0, 735 ARM_HWCAP2_A64_SVE2 = 1 << 1, 736 ARM_HWCAP2_A64_SVEAES = 1 << 2, 737 ARM_HWCAP2_A64_SVEPMULL = 1 << 3, 738 ARM_HWCAP2_A64_SVEBITPERM = 1 << 4, 739 ARM_HWCAP2_A64_SVESHA3 = 1 << 5, 740 ARM_HWCAP2_A64_SVESM4 = 1 << 6, 741 ARM_HWCAP2_A64_FLAGM2 = 1 << 7, 742 ARM_HWCAP2_A64_FRINT = 1 << 8, 743 ARM_HWCAP2_A64_SVEI8MM = 1 << 9, 744 ARM_HWCAP2_A64_SVEF32MM = 1 << 10, 745 ARM_HWCAP2_A64_SVEF64MM = 1 << 11, 746 ARM_HWCAP2_A64_SVEBF16 = 1 << 12, 747 ARM_HWCAP2_A64_I8MM = 1 << 13, 748 ARM_HWCAP2_A64_BF16 = 1 << 14, 749 ARM_HWCAP2_A64_DGH = 1 << 15, 750 ARM_HWCAP2_A64_RNG = 1 << 16, 751 ARM_HWCAP2_A64_BTI = 1 << 17, 752 ARM_HWCAP2_A64_MTE = 1 << 18, 753 ARM_HWCAP2_A64_ECV = 1 << 19, 754 ARM_HWCAP2_A64_AFP = 1 << 20, 755 ARM_HWCAP2_A64_RPRES = 1 << 21, 756 ARM_HWCAP2_A64_MTE3 = 1 << 22, 757 ARM_HWCAP2_A64_SME = 1 << 23, 758 ARM_HWCAP2_A64_SME_I16I64 = 1 << 24, 759 ARM_HWCAP2_A64_SME_F64F64 = 1 << 25, 760 ARM_HWCAP2_A64_SME_I8I32 = 1 << 26, 761 ARM_HWCAP2_A64_SME_F16F32 = 1 << 27, 762 ARM_HWCAP2_A64_SME_B16F32 = 1 << 28, 763 ARM_HWCAP2_A64_SME_F32F32 = 1 << 29, 764 ARM_HWCAP2_A64_SME_FA64 = 1 << 30, 765 ARM_HWCAP2_A64_WFXT = 1ULL << 31, 766 ARM_HWCAP2_A64_EBF16 = 1ULL << 32, 767 ARM_HWCAP2_A64_SVE_EBF16 = 1ULL << 33, 768 ARM_HWCAP2_A64_CSSC = 1ULL << 34, 769 ARM_HWCAP2_A64_RPRFM = 1ULL << 35, 770 ARM_HWCAP2_A64_SVE2P1 = 1ULL << 36, 771 ARM_HWCAP2_A64_SME2 = 1ULL << 37, 772 ARM_HWCAP2_A64_SME2P1 = 1ULL << 38, 773 ARM_HWCAP2_A64_SME_I16I32 = 1ULL << 39, 774 ARM_HWCAP2_A64_SME_BI32I32 = 1ULL << 40, 775 ARM_HWCAP2_A64_SME_B16B16 = 1ULL << 41, 776 ARM_HWCAP2_A64_SME_F16F16 = 1ULL << 42, 777 ARM_HWCAP2_A64_MOPS = 1ULL << 43, 778 ARM_HWCAP2_A64_HBC = 1ULL << 44, 779 }; 780 781 #define ELF_HWCAP get_elf_hwcap() 782 #define ELF_HWCAP2 get_elf_hwcap2() 783 784 #define GET_FEATURE_ID(feat, hwcap) \ 785 do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0) 786 787 uint32_t get_elf_hwcap(void) 788 { 789 ARMCPU *cpu = ARM_CPU(thread_cpu); 790 uint32_t hwcaps = 0; 791 792 hwcaps |= ARM_HWCAP_A64_FP; 793 hwcaps |= ARM_HWCAP_A64_ASIMD; 794 hwcaps |= ARM_HWCAP_A64_CPUID; 795 796 /* probe for the extra features */ 797 798 GET_FEATURE_ID(aa64_aes, ARM_HWCAP_A64_AES); 799 GET_FEATURE_ID(aa64_pmull, ARM_HWCAP_A64_PMULL); 800 GET_FEATURE_ID(aa64_sha1, ARM_HWCAP_A64_SHA1); 801 GET_FEATURE_ID(aa64_sha256, ARM_HWCAP_A64_SHA2); 802 GET_FEATURE_ID(aa64_sha512, ARM_HWCAP_A64_SHA512); 803 GET_FEATURE_ID(aa64_crc32, ARM_HWCAP_A64_CRC32); 804 GET_FEATURE_ID(aa64_sha3, ARM_HWCAP_A64_SHA3); 805 GET_FEATURE_ID(aa64_sm3, ARM_HWCAP_A64_SM3); 806 GET_FEATURE_ID(aa64_sm4, ARM_HWCAP_A64_SM4); 807 GET_FEATURE_ID(aa64_fp16, ARM_HWCAP_A64_FPHP | ARM_HWCAP_A64_ASIMDHP); 808 GET_FEATURE_ID(aa64_atomics, ARM_HWCAP_A64_ATOMICS); 809 GET_FEATURE_ID(aa64_lse2, ARM_HWCAP_A64_USCAT); 810 GET_FEATURE_ID(aa64_rdm, ARM_HWCAP_A64_ASIMDRDM); 811 GET_FEATURE_ID(aa64_dp, ARM_HWCAP_A64_ASIMDDP); 812 GET_FEATURE_ID(aa64_fcma, ARM_HWCAP_A64_FCMA); 813 GET_FEATURE_ID(aa64_sve, ARM_HWCAP_A64_SVE); 814 GET_FEATURE_ID(aa64_pauth, ARM_HWCAP_A64_PACA | ARM_HWCAP_A64_PACG); 815 GET_FEATURE_ID(aa64_fhm, ARM_HWCAP_A64_ASIMDFHM); 816 GET_FEATURE_ID(aa64_dit, ARM_HWCAP_A64_DIT); 817 GET_FEATURE_ID(aa64_jscvt, ARM_HWCAP_A64_JSCVT); 818 GET_FEATURE_ID(aa64_sb, ARM_HWCAP_A64_SB); 819 GET_FEATURE_ID(aa64_condm_4, ARM_HWCAP_A64_FLAGM); 820 GET_FEATURE_ID(aa64_dcpop, ARM_HWCAP_A64_DCPOP); 821 GET_FEATURE_ID(aa64_rcpc_8_3, ARM_HWCAP_A64_LRCPC); 822 GET_FEATURE_ID(aa64_rcpc_8_4, ARM_HWCAP_A64_ILRCPC); 823 824 return hwcaps; 825 } 826 827 uint64_t get_elf_hwcap2(void) 828 { 829 ARMCPU *cpu = ARM_CPU(thread_cpu); 830 uint64_t hwcaps = 0; 831 832 GET_FEATURE_ID(aa64_dcpodp, ARM_HWCAP2_A64_DCPODP); 833 GET_FEATURE_ID(aa64_sve2, ARM_HWCAP2_A64_SVE2); 834 GET_FEATURE_ID(aa64_sve2_aes, ARM_HWCAP2_A64_SVEAES); 835 GET_FEATURE_ID(aa64_sve2_pmull128, ARM_HWCAP2_A64_SVEPMULL); 836 GET_FEATURE_ID(aa64_sve2_bitperm, ARM_HWCAP2_A64_SVEBITPERM); 837 GET_FEATURE_ID(aa64_sve2_sha3, ARM_HWCAP2_A64_SVESHA3); 838 GET_FEATURE_ID(aa64_sve2_sm4, ARM_HWCAP2_A64_SVESM4); 839 GET_FEATURE_ID(aa64_condm_5, ARM_HWCAP2_A64_FLAGM2); 840 GET_FEATURE_ID(aa64_frint, ARM_HWCAP2_A64_FRINT); 841 GET_FEATURE_ID(aa64_sve_i8mm, ARM_HWCAP2_A64_SVEI8MM); 842 GET_FEATURE_ID(aa64_sve_f32mm, ARM_HWCAP2_A64_SVEF32MM); 843 GET_FEATURE_ID(aa64_sve_f64mm, ARM_HWCAP2_A64_SVEF64MM); 844 GET_FEATURE_ID(aa64_sve_bf16, ARM_HWCAP2_A64_SVEBF16); 845 GET_FEATURE_ID(aa64_i8mm, ARM_HWCAP2_A64_I8MM); 846 GET_FEATURE_ID(aa64_bf16, ARM_HWCAP2_A64_BF16); 847 GET_FEATURE_ID(aa64_rndr, ARM_HWCAP2_A64_RNG); 848 GET_FEATURE_ID(aa64_bti, ARM_HWCAP2_A64_BTI); 849 GET_FEATURE_ID(aa64_mte, ARM_HWCAP2_A64_MTE); 850 GET_FEATURE_ID(aa64_mte3, ARM_HWCAP2_A64_MTE3); 851 GET_FEATURE_ID(aa64_sme, (ARM_HWCAP2_A64_SME | 852 ARM_HWCAP2_A64_SME_F32F32 | 853 ARM_HWCAP2_A64_SME_B16F32 | 854 ARM_HWCAP2_A64_SME_F16F32 | 855 ARM_HWCAP2_A64_SME_I8I32)); 856 GET_FEATURE_ID(aa64_sme_f64f64, ARM_HWCAP2_A64_SME_F64F64); 857 GET_FEATURE_ID(aa64_sme_i16i64, ARM_HWCAP2_A64_SME_I16I64); 858 GET_FEATURE_ID(aa64_sme_fa64, ARM_HWCAP2_A64_SME_FA64); 859 GET_FEATURE_ID(aa64_hbc, ARM_HWCAP2_A64_HBC); 860 GET_FEATURE_ID(aa64_mops, ARM_HWCAP2_A64_MOPS); 861 862 return hwcaps; 863 } 864 865 const char *elf_hwcap_str(uint32_t bit) 866 { 867 static const char *hwcap_str[] = { 868 [__builtin_ctz(ARM_HWCAP_A64_FP )] = "fp", 869 [__builtin_ctz(ARM_HWCAP_A64_ASIMD )] = "asimd", 870 [__builtin_ctz(ARM_HWCAP_A64_EVTSTRM )] = "evtstrm", 871 [__builtin_ctz(ARM_HWCAP_A64_AES )] = "aes", 872 [__builtin_ctz(ARM_HWCAP_A64_PMULL )] = "pmull", 873 [__builtin_ctz(ARM_HWCAP_A64_SHA1 )] = "sha1", 874 [__builtin_ctz(ARM_HWCAP_A64_SHA2 )] = "sha2", 875 [__builtin_ctz(ARM_HWCAP_A64_CRC32 )] = "crc32", 876 [__builtin_ctz(ARM_HWCAP_A64_ATOMICS )] = "atomics", 877 [__builtin_ctz(ARM_HWCAP_A64_FPHP )] = "fphp", 878 [__builtin_ctz(ARM_HWCAP_A64_ASIMDHP )] = "asimdhp", 879 [__builtin_ctz(ARM_HWCAP_A64_CPUID )] = "cpuid", 880 [__builtin_ctz(ARM_HWCAP_A64_ASIMDRDM)] = "asimdrdm", 881 [__builtin_ctz(ARM_HWCAP_A64_JSCVT )] = "jscvt", 882 [__builtin_ctz(ARM_HWCAP_A64_FCMA )] = "fcma", 883 [__builtin_ctz(ARM_HWCAP_A64_LRCPC )] = "lrcpc", 884 [__builtin_ctz(ARM_HWCAP_A64_DCPOP )] = "dcpop", 885 [__builtin_ctz(ARM_HWCAP_A64_SHA3 )] = "sha3", 886 [__builtin_ctz(ARM_HWCAP_A64_SM3 )] = "sm3", 887 [__builtin_ctz(ARM_HWCAP_A64_SM4 )] = "sm4", 888 [__builtin_ctz(ARM_HWCAP_A64_ASIMDDP )] = "asimddp", 889 [__builtin_ctz(ARM_HWCAP_A64_SHA512 )] = "sha512", 890 [__builtin_ctz(ARM_HWCAP_A64_SVE )] = "sve", 891 [__builtin_ctz(ARM_HWCAP_A64_ASIMDFHM)] = "asimdfhm", 892 [__builtin_ctz(ARM_HWCAP_A64_DIT )] = "dit", 893 [__builtin_ctz(ARM_HWCAP_A64_USCAT )] = "uscat", 894 [__builtin_ctz(ARM_HWCAP_A64_ILRCPC )] = "ilrcpc", 895 [__builtin_ctz(ARM_HWCAP_A64_FLAGM )] = "flagm", 896 [__builtin_ctz(ARM_HWCAP_A64_SSBS )] = "ssbs", 897 [__builtin_ctz(ARM_HWCAP_A64_SB )] = "sb", 898 [__builtin_ctz(ARM_HWCAP_A64_PACA )] = "paca", 899 [__builtin_ctz(ARM_HWCAP_A64_PACG )] = "pacg", 900 }; 901 902 return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL; 903 } 904 905 const char *elf_hwcap2_str(uint32_t bit) 906 { 907 static const char *hwcap_str[] = { 908 [__builtin_ctz(ARM_HWCAP2_A64_DCPODP )] = "dcpodp", 909 [__builtin_ctz(ARM_HWCAP2_A64_SVE2 )] = "sve2", 910 [__builtin_ctz(ARM_HWCAP2_A64_SVEAES )] = "sveaes", 911 [__builtin_ctz(ARM_HWCAP2_A64_SVEPMULL )] = "svepmull", 912 [__builtin_ctz(ARM_HWCAP2_A64_SVEBITPERM )] = "svebitperm", 913 [__builtin_ctz(ARM_HWCAP2_A64_SVESHA3 )] = "svesha3", 914 [__builtin_ctz(ARM_HWCAP2_A64_SVESM4 )] = "svesm4", 915 [__builtin_ctz(ARM_HWCAP2_A64_FLAGM2 )] = "flagm2", 916 [__builtin_ctz(ARM_HWCAP2_A64_FRINT )] = "frint", 917 [__builtin_ctz(ARM_HWCAP2_A64_SVEI8MM )] = "svei8mm", 918 [__builtin_ctz(ARM_HWCAP2_A64_SVEF32MM )] = "svef32mm", 919 [__builtin_ctz(ARM_HWCAP2_A64_SVEF64MM )] = "svef64mm", 920 [__builtin_ctz(ARM_HWCAP2_A64_SVEBF16 )] = "svebf16", 921 [__builtin_ctz(ARM_HWCAP2_A64_I8MM )] = "i8mm", 922 [__builtin_ctz(ARM_HWCAP2_A64_BF16 )] = "bf16", 923 [__builtin_ctz(ARM_HWCAP2_A64_DGH )] = "dgh", 924 [__builtin_ctz(ARM_HWCAP2_A64_RNG )] = "rng", 925 [__builtin_ctz(ARM_HWCAP2_A64_BTI )] = "bti", 926 [__builtin_ctz(ARM_HWCAP2_A64_MTE )] = "mte", 927 [__builtin_ctz(ARM_HWCAP2_A64_ECV )] = "ecv", 928 [__builtin_ctz(ARM_HWCAP2_A64_AFP )] = "afp", 929 [__builtin_ctz(ARM_HWCAP2_A64_RPRES )] = "rpres", 930 [__builtin_ctz(ARM_HWCAP2_A64_MTE3 )] = "mte3", 931 [__builtin_ctz(ARM_HWCAP2_A64_SME )] = "sme", 932 [__builtin_ctz(ARM_HWCAP2_A64_SME_I16I64 )] = "smei16i64", 933 [__builtin_ctz(ARM_HWCAP2_A64_SME_F64F64 )] = "smef64f64", 934 [__builtin_ctz(ARM_HWCAP2_A64_SME_I8I32 )] = "smei8i32", 935 [__builtin_ctz(ARM_HWCAP2_A64_SME_F16F32 )] = "smef16f32", 936 [__builtin_ctz(ARM_HWCAP2_A64_SME_B16F32 )] = "smeb16f32", 937 [__builtin_ctz(ARM_HWCAP2_A64_SME_F32F32 )] = "smef32f32", 938 [__builtin_ctz(ARM_HWCAP2_A64_SME_FA64 )] = "smefa64", 939 [__builtin_ctz(ARM_HWCAP2_A64_WFXT )] = "wfxt", 940 [__builtin_ctzll(ARM_HWCAP2_A64_EBF16 )] = "ebf16", 941 [__builtin_ctzll(ARM_HWCAP2_A64_SVE_EBF16 )] = "sveebf16", 942 [__builtin_ctzll(ARM_HWCAP2_A64_CSSC )] = "cssc", 943 [__builtin_ctzll(ARM_HWCAP2_A64_RPRFM )] = "rprfm", 944 [__builtin_ctzll(ARM_HWCAP2_A64_SVE2P1 )] = "sve2p1", 945 [__builtin_ctzll(ARM_HWCAP2_A64_SME2 )] = "sme2", 946 [__builtin_ctzll(ARM_HWCAP2_A64_SME2P1 )] = "sme2p1", 947 [__builtin_ctzll(ARM_HWCAP2_A64_SME_I16I32 )] = "smei16i32", 948 [__builtin_ctzll(ARM_HWCAP2_A64_SME_BI32I32)] = "smebi32i32", 949 [__builtin_ctzll(ARM_HWCAP2_A64_SME_B16B16 )] = "smeb16b16", 950 [__builtin_ctzll(ARM_HWCAP2_A64_SME_F16F16 )] = "smef16f16", 951 [__builtin_ctzll(ARM_HWCAP2_A64_MOPS )] = "mops", 952 [__builtin_ctzll(ARM_HWCAP2_A64_HBC )] = "hbc", 953 }; 954 955 return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL; 956 } 957 958 #undef GET_FEATURE_ID 959 960 #endif /* not TARGET_AARCH64 */ 961 962 #if TARGET_BIG_ENDIAN 963 # define VDSO_HEADER "vdso-be.c.inc" 964 #else 965 # define VDSO_HEADER "vdso-le.c.inc" 966 #endif 967 968 #endif /* TARGET_ARM */ 969 970 #ifdef TARGET_SPARC 971 #ifdef TARGET_SPARC64 972 973 #define ELF_HWCAP (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | HWCAP_SPARC_SWAP \ 974 | HWCAP_SPARC_MULDIV | HWCAP_SPARC_V9) 975 #ifndef TARGET_ABI32 976 #define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS ) 977 #else 978 #define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC ) 979 #endif 980 981 #define ELF_CLASS ELFCLASS64 982 #define ELF_ARCH EM_SPARCV9 983 #else 984 #define ELF_HWCAP (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | HWCAP_SPARC_SWAP \ 985 | HWCAP_SPARC_MULDIV) 986 #define ELF_CLASS ELFCLASS32 987 #define ELF_ARCH EM_SPARC 988 #endif /* TARGET_SPARC64 */ 989 990 static inline void init_thread(struct target_pt_regs *regs, 991 struct image_info *infop) 992 { 993 /* Note that target_cpu_copy_regs does not read psr/tstate. */ 994 regs->pc = infop->entry; 995 regs->npc = regs->pc + 4; 996 regs->y = 0; 997 regs->u_regs[14] = (infop->start_stack - 16 * sizeof(abi_ulong) 998 - TARGET_STACK_BIAS); 999 } 1000 #endif /* TARGET_SPARC */ 1001 1002 #ifdef TARGET_PPC 1003 1004 #define ELF_MACHINE PPC_ELF_MACHINE 1005 1006 #if defined(TARGET_PPC64) 1007 1008 #define elf_check_arch(x) ( (x) == EM_PPC64 ) 1009 1010 #define ELF_CLASS ELFCLASS64 1011 1012 #else 1013 1014 #define ELF_CLASS ELFCLASS32 1015 #define EXSTACK_DEFAULT true 1016 1017 #endif 1018 1019 #define ELF_ARCH EM_PPC 1020 1021 /* Feature masks for the Aux Vector Hardware Capabilities (AT_HWCAP). 1022 See arch/powerpc/include/asm/cputable.h. */ 1023 enum { 1024 QEMU_PPC_FEATURE_32 = 0x80000000, 1025 QEMU_PPC_FEATURE_64 = 0x40000000, 1026 QEMU_PPC_FEATURE_601_INSTR = 0x20000000, 1027 QEMU_PPC_FEATURE_HAS_ALTIVEC = 0x10000000, 1028 QEMU_PPC_FEATURE_HAS_FPU = 0x08000000, 1029 QEMU_PPC_FEATURE_HAS_MMU = 0x04000000, 1030 QEMU_PPC_FEATURE_HAS_4xxMAC = 0x02000000, 1031 QEMU_PPC_FEATURE_UNIFIED_CACHE = 0x01000000, 1032 QEMU_PPC_FEATURE_HAS_SPE = 0x00800000, 1033 QEMU_PPC_FEATURE_HAS_EFP_SINGLE = 0x00400000, 1034 QEMU_PPC_FEATURE_HAS_EFP_DOUBLE = 0x00200000, 1035 QEMU_PPC_FEATURE_NO_TB = 0x00100000, 1036 QEMU_PPC_FEATURE_POWER4 = 0x00080000, 1037 QEMU_PPC_FEATURE_POWER5 = 0x00040000, 1038 QEMU_PPC_FEATURE_POWER5_PLUS = 0x00020000, 1039 QEMU_PPC_FEATURE_CELL = 0x00010000, 1040 QEMU_PPC_FEATURE_BOOKE = 0x00008000, 1041 QEMU_PPC_FEATURE_SMT = 0x00004000, 1042 QEMU_PPC_FEATURE_ICACHE_SNOOP = 0x00002000, 1043 QEMU_PPC_FEATURE_ARCH_2_05 = 0x00001000, 1044 QEMU_PPC_FEATURE_PA6T = 0x00000800, 1045 QEMU_PPC_FEATURE_HAS_DFP = 0x00000400, 1046 QEMU_PPC_FEATURE_POWER6_EXT = 0x00000200, 1047 QEMU_PPC_FEATURE_ARCH_2_06 = 0x00000100, 1048 QEMU_PPC_FEATURE_HAS_VSX = 0x00000080, 1049 QEMU_PPC_FEATURE_PSERIES_PERFMON_COMPAT = 0x00000040, 1050 1051 QEMU_PPC_FEATURE_TRUE_LE = 0x00000002, 1052 QEMU_PPC_FEATURE_PPC_LE = 0x00000001, 1053 1054 /* Feature definitions in AT_HWCAP2. */ 1055 QEMU_PPC_FEATURE2_ARCH_2_07 = 0x80000000, /* ISA 2.07 */ 1056 QEMU_PPC_FEATURE2_HAS_HTM = 0x40000000, /* Hardware Transactional Memory */ 1057 QEMU_PPC_FEATURE2_HAS_DSCR = 0x20000000, /* Data Stream Control Register */ 1058 QEMU_PPC_FEATURE2_HAS_EBB = 0x10000000, /* Event Base Branching */ 1059 QEMU_PPC_FEATURE2_HAS_ISEL = 0x08000000, /* Integer Select */ 1060 QEMU_PPC_FEATURE2_HAS_TAR = 0x04000000, /* Target Address Register */ 1061 QEMU_PPC_FEATURE2_VEC_CRYPTO = 0x02000000, 1062 QEMU_PPC_FEATURE2_HTM_NOSC = 0x01000000, 1063 QEMU_PPC_FEATURE2_ARCH_3_00 = 0x00800000, /* ISA 3.00 */ 1064 QEMU_PPC_FEATURE2_HAS_IEEE128 = 0x00400000, /* VSX IEEE Bin Float 128-bit */ 1065 QEMU_PPC_FEATURE2_DARN = 0x00200000, /* darn random number insn */ 1066 QEMU_PPC_FEATURE2_SCV = 0x00100000, /* scv syscall */ 1067 QEMU_PPC_FEATURE2_HTM_NO_SUSPEND = 0x00080000, /* TM w/o suspended state */ 1068 QEMU_PPC_FEATURE2_ARCH_3_1 = 0x00040000, /* ISA 3.1 */ 1069 QEMU_PPC_FEATURE2_MMA = 0x00020000, /* Matrix-Multiply Assist */ 1070 }; 1071 1072 #define ELF_HWCAP get_elf_hwcap() 1073 1074 static uint32_t get_elf_hwcap(void) 1075 { 1076 PowerPCCPU *cpu = POWERPC_CPU(thread_cpu); 1077 uint32_t features = 0; 1078 1079 /* We don't have to be terribly complete here; the high points are 1080 Altivec/FP/SPE support. Anything else is just a bonus. */ 1081 #define GET_FEATURE(flag, feature) \ 1082 do { if (cpu->env.insns_flags & flag) { features |= feature; } } while (0) 1083 #define GET_FEATURE2(flags, feature) \ 1084 do { \ 1085 if ((cpu->env.insns_flags2 & flags) == flags) { \ 1086 features |= feature; \ 1087 } \ 1088 } while (0) 1089 GET_FEATURE(PPC_64B, QEMU_PPC_FEATURE_64); 1090 GET_FEATURE(PPC_FLOAT, QEMU_PPC_FEATURE_HAS_FPU); 1091 GET_FEATURE(PPC_ALTIVEC, QEMU_PPC_FEATURE_HAS_ALTIVEC); 1092 GET_FEATURE(PPC_SPE, QEMU_PPC_FEATURE_HAS_SPE); 1093 GET_FEATURE(PPC_SPE_SINGLE, QEMU_PPC_FEATURE_HAS_EFP_SINGLE); 1094 GET_FEATURE(PPC_SPE_DOUBLE, QEMU_PPC_FEATURE_HAS_EFP_DOUBLE); 1095 GET_FEATURE(PPC_BOOKE, QEMU_PPC_FEATURE_BOOKE); 1096 GET_FEATURE(PPC_405_MAC, QEMU_PPC_FEATURE_HAS_4xxMAC); 1097 GET_FEATURE2(PPC2_DFP, QEMU_PPC_FEATURE_HAS_DFP); 1098 GET_FEATURE2(PPC2_VSX, QEMU_PPC_FEATURE_HAS_VSX); 1099 GET_FEATURE2((PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 | PPC2_ATOMIC_ISA206 | 1100 PPC2_FP_CVT_ISA206 | PPC2_FP_TST_ISA206), 1101 QEMU_PPC_FEATURE_ARCH_2_06); 1102 #undef GET_FEATURE 1103 #undef GET_FEATURE2 1104 1105 return features; 1106 } 1107 1108 #define ELF_HWCAP2 get_elf_hwcap2() 1109 1110 static uint32_t get_elf_hwcap2(void) 1111 { 1112 PowerPCCPU *cpu = POWERPC_CPU(thread_cpu); 1113 uint32_t features = 0; 1114 1115 #define GET_FEATURE(flag, feature) \ 1116 do { if (cpu->env.insns_flags & flag) { features |= feature; } } while (0) 1117 #define GET_FEATURE2(flag, feature) \ 1118 do { if (cpu->env.insns_flags2 & flag) { features |= feature; } } while (0) 1119 1120 GET_FEATURE(PPC_ISEL, QEMU_PPC_FEATURE2_HAS_ISEL); 1121 GET_FEATURE2(PPC2_BCTAR_ISA207, QEMU_PPC_FEATURE2_HAS_TAR); 1122 GET_FEATURE2((PPC2_BCTAR_ISA207 | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 | 1123 PPC2_ISA207S), QEMU_PPC_FEATURE2_ARCH_2_07 | 1124 QEMU_PPC_FEATURE2_VEC_CRYPTO); 1125 GET_FEATURE2(PPC2_ISA300, QEMU_PPC_FEATURE2_ARCH_3_00 | 1126 QEMU_PPC_FEATURE2_DARN | QEMU_PPC_FEATURE2_HAS_IEEE128); 1127 GET_FEATURE2(PPC2_ISA310, QEMU_PPC_FEATURE2_ARCH_3_1 | 1128 QEMU_PPC_FEATURE2_MMA); 1129 1130 #undef GET_FEATURE 1131 #undef GET_FEATURE2 1132 1133 return features; 1134 } 1135 1136 /* 1137 * The requirements here are: 1138 * - keep the final alignment of sp (sp & 0xf) 1139 * - make sure the 32-bit value at the first 16 byte aligned position of 1140 * AUXV is greater than 16 for glibc compatibility. 1141 * AT_IGNOREPPC is used for that. 1142 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC, 1143 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined. 1144 */ 1145 #define DLINFO_ARCH_ITEMS 5 1146 #define ARCH_DLINFO \ 1147 do { \ 1148 PowerPCCPU *cpu = POWERPC_CPU(thread_cpu); \ 1149 /* \ 1150 * Handle glibc compatibility: these magic entries must \ 1151 * be at the lowest addresses in the final auxv. \ 1152 */ \ 1153 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \ 1154 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \ 1155 NEW_AUX_ENT(AT_DCACHEBSIZE, cpu->env.dcache_line_size); \ 1156 NEW_AUX_ENT(AT_ICACHEBSIZE, cpu->env.icache_line_size); \ 1157 NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \ 1158 } while (0) 1159 1160 static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop) 1161 { 1162 _regs->gpr[1] = infop->start_stack; 1163 #if defined(TARGET_PPC64) 1164 if (get_ppc64_abi(infop) < 2) { 1165 uint64_t val; 1166 get_user_u64(val, infop->entry + 8); 1167 _regs->gpr[2] = val + infop->load_bias; 1168 get_user_u64(val, infop->entry); 1169 infop->entry = val + infop->load_bias; 1170 } else { 1171 _regs->gpr[12] = infop->entry; /* r12 set to global entry address */ 1172 } 1173 #endif 1174 _regs->nip = infop->entry; 1175 } 1176 1177 /* See linux kernel: arch/powerpc/include/asm/elf.h. */ 1178 #define ELF_NREG 48 1179 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1180 1181 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUPPCState *env) 1182 { 1183 int i; 1184 target_ulong ccr = 0; 1185 1186 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 1187 (*regs)[i] = tswapreg(env->gpr[i]); 1188 } 1189 1190 (*regs)[32] = tswapreg(env->nip); 1191 (*regs)[33] = tswapreg(env->msr); 1192 (*regs)[35] = tswapreg(env->ctr); 1193 (*regs)[36] = tswapreg(env->lr); 1194 (*regs)[37] = tswapreg(cpu_read_xer(env)); 1195 1196 ccr = ppc_get_cr(env); 1197 (*regs)[38] = tswapreg(ccr); 1198 } 1199 1200 #define USE_ELF_CORE_DUMP 1201 #define ELF_EXEC_PAGESIZE 4096 1202 1203 #ifndef TARGET_PPC64 1204 # define VDSO_HEADER "vdso-32.c.inc" 1205 #elif TARGET_BIG_ENDIAN 1206 # define VDSO_HEADER "vdso-64.c.inc" 1207 #else 1208 # define VDSO_HEADER "vdso-64le.c.inc" 1209 #endif 1210 1211 #endif 1212 1213 #ifdef TARGET_LOONGARCH64 1214 1215 #define ELF_CLASS ELFCLASS64 1216 #define ELF_ARCH EM_LOONGARCH 1217 #define EXSTACK_DEFAULT true 1218 1219 #define elf_check_arch(x) ((x) == EM_LOONGARCH) 1220 1221 #define VDSO_HEADER "vdso.c.inc" 1222 1223 static inline void init_thread(struct target_pt_regs *regs, 1224 struct image_info *infop) 1225 { 1226 /*Set crmd PG,DA = 1,0 */ 1227 regs->csr.crmd = 2 << 3; 1228 regs->csr.era = infop->entry; 1229 regs->regs[3] = infop->start_stack; 1230 } 1231 1232 /* See linux kernel: arch/loongarch/include/asm/elf.h */ 1233 #define ELF_NREG 45 1234 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1235 1236 enum { 1237 TARGET_EF_R0 = 0, 1238 TARGET_EF_CSR_ERA = TARGET_EF_R0 + 33, 1239 TARGET_EF_CSR_BADV = TARGET_EF_R0 + 34, 1240 }; 1241 1242 static void elf_core_copy_regs(target_elf_gregset_t *regs, 1243 const CPULoongArchState *env) 1244 { 1245 int i; 1246 1247 (*regs)[TARGET_EF_R0] = 0; 1248 1249 for (i = 1; i < ARRAY_SIZE(env->gpr); i++) { 1250 (*regs)[TARGET_EF_R0 + i] = tswapreg(env->gpr[i]); 1251 } 1252 1253 (*regs)[TARGET_EF_CSR_ERA] = tswapreg(env->pc); 1254 (*regs)[TARGET_EF_CSR_BADV] = tswapreg(env->CSR_BADV); 1255 } 1256 1257 #define USE_ELF_CORE_DUMP 1258 #define ELF_EXEC_PAGESIZE 4096 1259 1260 #define ELF_HWCAP get_elf_hwcap() 1261 1262 /* See arch/loongarch/include/uapi/asm/hwcap.h */ 1263 enum { 1264 HWCAP_LOONGARCH_CPUCFG = (1 << 0), 1265 HWCAP_LOONGARCH_LAM = (1 << 1), 1266 HWCAP_LOONGARCH_UAL = (1 << 2), 1267 HWCAP_LOONGARCH_FPU = (1 << 3), 1268 HWCAP_LOONGARCH_LSX = (1 << 4), 1269 HWCAP_LOONGARCH_LASX = (1 << 5), 1270 HWCAP_LOONGARCH_CRC32 = (1 << 6), 1271 HWCAP_LOONGARCH_COMPLEX = (1 << 7), 1272 HWCAP_LOONGARCH_CRYPTO = (1 << 8), 1273 HWCAP_LOONGARCH_LVZ = (1 << 9), 1274 HWCAP_LOONGARCH_LBT_X86 = (1 << 10), 1275 HWCAP_LOONGARCH_LBT_ARM = (1 << 11), 1276 HWCAP_LOONGARCH_LBT_MIPS = (1 << 12), 1277 }; 1278 1279 static uint32_t get_elf_hwcap(void) 1280 { 1281 LoongArchCPU *cpu = LOONGARCH_CPU(thread_cpu); 1282 uint32_t hwcaps = 0; 1283 1284 hwcaps |= HWCAP_LOONGARCH_CRC32; 1285 1286 if (FIELD_EX32(cpu->env.cpucfg[1], CPUCFG1, UAL)) { 1287 hwcaps |= HWCAP_LOONGARCH_UAL; 1288 } 1289 1290 if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, FP)) { 1291 hwcaps |= HWCAP_LOONGARCH_FPU; 1292 } 1293 1294 if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LAM)) { 1295 hwcaps |= HWCAP_LOONGARCH_LAM; 1296 } 1297 1298 if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LSX)) { 1299 hwcaps |= HWCAP_LOONGARCH_LSX; 1300 } 1301 1302 if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LASX)) { 1303 hwcaps |= HWCAP_LOONGARCH_LASX; 1304 } 1305 1306 return hwcaps; 1307 } 1308 1309 #define ELF_PLATFORM "loongarch" 1310 1311 #endif /* TARGET_LOONGARCH64 */ 1312 1313 #ifdef TARGET_MIPS 1314 1315 #ifdef TARGET_MIPS64 1316 #define ELF_CLASS ELFCLASS64 1317 #else 1318 #define ELF_CLASS ELFCLASS32 1319 #endif 1320 #define ELF_ARCH EM_MIPS 1321 #define EXSTACK_DEFAULT true 1322 1323 #ifdef TARGET_ABI_MIPSN32 1324 #define elf_check_abi(x) ((x) & EF_MIPS_ABI2) 1325 #else 1326 #define elf_check_abi(x) (!((x) & EF_MIPS_ABI2)) 1327 #endif 1328 1329 #define ELF_BASE_PLATFORM get_elf_base_platform() 1330 1331 #define MATCH_PLATFORM_INSN(_flags, _base_platform) \ 1332 do { if ((cpu->env.insn_flags & (_flags)) == _flags) \ 1333 { return _base_platform; } } while (0) 1334 1335 static const char *get_elf_base_platform(void) 1336 { 1337 MIPSCPU *cpu = MIPS_CPU(thread_cpu); 1338 1339 /* 64 bit ISAs goes first */ 1340 MATCH_PLATFORM_INSN(CPU_MIPS64R6, "mips64r6"); 1341 MATCH_PLATFORM_INSN(CPU_MIPS64R5, "mips64r5"); 1342 MATCH_PLATFORM_INSN(CPU_MIPS64R2, "mips64r2"); 1343 MATCH_PLATFORM_INSN(CPU_MIPS64R1, "mips64"); 1344 MATCH_PLATFORM_INSN(CPU_MIPS5, "mips5"); 1345 MATCH_PLATFORM_INSN(CPU_MIPS4, "mips4"); 1346 MATCH_PLATFORM_INSN(CPU_MIPS3, "mips3"); 1347 1348 /* 32 bit ISAs */ 1349 MATCH_PLATFORM_INSN(CPU_MIPS32R6, "mips32r6"); 1350 MATCH_PLATFORM_INSN(CPU_MIPS32R5, "mips32r5"); 1351 MATCH_PLATFORM_INSN(CPU_MIPS32R2, "mips32r2"); 1352 MATCH_PLATFORM_INSN(CPU_MIPS32R1, "mips32"); 1353 MATCH_PLATFORM_INSN(CPU_MIPS2, "mips2"); 1354 1355 /* Fallback */ 1356 return "mips"; 1357 } 1358 #undef MATCH_PLATFORM_INSN 1359 1360 static inline void init_thread(struct target_pt_regs *regs, 1361 struct image_info *infop) 1362 { 1363 regs->cp0_status = 2 << CP0St_KSU; 1364 regs->cp0_epc = infop->entry; 1365 regs->regs[29] = infop->start_stack; 1366 } 1367 1368 /* See linux kernel: arch/mips/include/asm/elf.h. */ 1369 #define ELF_NREG 45 1370 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1371 1372 /* See linux kernel: arch/mips/include/asm/reg.h. */ 1373 enum { 1374 #ifdef TARGET_MIPS64 1375 TARGET_EF_R0 = 0, 1376 #else 1377 TARGET_EF_R0 = 6, 1378 #endif 1379 TARGET_EF_R26 = TARGET_EF_R0 + 26, 1380 TARGET_EF_R27 = TARGET_EF_R0 + 27, 1381 TARGET_EF_LO = TARGET_EF_R0 + 32, 1382 TARGET_EF_HI = TARGET_EF_R0 + 33, 1383 TARGET_EF_CP0_EPC = TARGET_EF_R0 + 34, 1384 TARGET_EF_CP0_BADVADDR = TARGET_EF_R0 + 35, 1385 TARGET_EF_CP0_STATUS = TARGET_EF_R0 + 36, 1386 TARGET_EF_CP0_CAUSE = TARGET_EF_R0 + 37 1387 }; 1388 1389 /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs. */ 1390 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUMIPSState *env) 1391 { 1392 int i; 1393 1394 for (i = 0; i < TARGET_EF_R0; i++) { 1395 (*regs)[i] = 0; 1396 } 1397 (*regs)[TARGET_EF_R0] = 0; 1398 1399 for (i = 1; i < ARRAY_SIZE(env->active_tc.gpr); i++) { 1400 (*regs)[TARGET_EF_R0 + i] = tswapreg(env->active_tc.gpr[i]); 1401 } 1402 1403 (*regs)[TARGET_EF_R26] = 0; 1404 (*regs)[TARGET_EF_R27] = 0; 1405 (*regs)[TARGET_EF_LO] = tswapreg(env->active_tc.LO[0]); 1406 (*regs)[TARGET_EF_HI] = tswapreg(env->active_tc.HI[0]); 1407 (*regs)[TARGET_EF_CP0_EPC] = tswapreg(env->active_tc.PC); 1408 (*regs)[TARGET_EF_CP0_BADVADDR] = tswapreg(env->CP0_BadVAddr); 1409 (*regs)[TARGET_EF_CP0_STATUS] = tswapreg(env->CP0_Status); 1410 (*regs)[TARGET_EF_CP0_CAUSE] = tswapreg(env->CP0_Cause); 1411 } 1412 1413 #define USE_ELF_CORE_DUMP 1414 #define ELF_EXEC_PAGESIZE 4096 1415 1416 /* See arch/mips/include/uapi/asm/hwcap.h. */ 1417 enum { 1418 HWCAP_MIPS_R6 = (1 << 0), 1419 HWCAP_MIPS_MSA = (1 << 1), 1420 HWCAP_MIPS_CRC32 = (1 << 2), 1421 HWCAP_MIPS_MIPS16 = (1 << 3), 1422 HWCAP_MIPS_MDMX = (1 << 4), 1423 HWCAP_MIPS_MIPS3D = (1 << 5), 1424 HWCAP_MIPS_SMARTMIPS = (1 << 6), 1425 HWCAP_MIPS_DSP = (1 << 7), 1426 HWCAP_MIPS_DSP2 = (1 << 8), 1427 HWCAP_MIPS_DSP3 = (1 << 9), 1428 HWCAP_MIPS_MIPS16E2 = (1 << 10), 1429 HWCAP_LOONGSON_MMI = (1 << 11), 1430 HWCAP_LOONGSON_EXT = (1 << 12), 1431 HWCAP_LOONGSON_EXT2 = (1 << 13), 1432 HWCAP_LOONGSON_CPUCFG = (1 << 14), 1433 }; 1434 1435 #define ELF_HWCAP get_elf_hwcap() 1436 1437 #define GET_FEATURE_INSN(_flag, _hwcap) \ 1438 do { if (cpu->env.insn_flags & (_flag)) { hwcaps |= _hwcap; } } while (0) 1439 1440 #define GET_FEATURE_REG_SET(_reg, _mask, _hwcap) \ 1441 do { if (cpu->env._reg & (_mask)) { hwcaps |= _hwcap; } } while (0) 1442 1443 #define GET_FEATURE_REG_EQU(_reg, _start, _length, _val, _hwcap) \ 1444 do { \ 1445 if (extract32(cpu->env._reg, (_start), (_length)) == (_val)) { \ 1446 hwcaps |= _hwcap; \ 1447 } \ 1448 } while (0) 1449 1450 static uint32_t get_elf_hwcap(void) 1451 { 1452 MIPSCPU *cpu = MIPS_CPU(thread_cpu); 1453 uint32_t hwcaps = 0; 1454 1455 GET_FEATURE_REG_EQU(CP0_Config0, CP0C0_AR, CP0C0_AR_LENGTH, 1456 2, HWCAP_MIPS_R6); 1457 GET_FEATURE_REG_SET(CP0_Config3, 1 << CP0C3_MSAP, HWCAP_MIPS_MSA); 1458 GET_FEATURE_INSN(ASE_LMMI, HWCAP_LOONGSON_MMI); 1459 GET_FEATURE_INSN(ASE_LEXT, HWCAP_LOONGSON_EXT); 1460 1461 return hwcaps; 1462 } 1463 1464 #undef GET_FEATURE_REG_EQU 1465 #undef GET_FEATURE_REG_SET 1466 #undef GET_FEATURE_INSN 1467 1468 #endif /* TARGET_MIPS */ 1469 1470 #ifdef TARGET_MICROBLAZE 1471 1472 #define elf_check_arch(x) ( (x) == EM_MICROBLAZE || (x) == EM_MICROBLAZE_OLD) 1473 1474 #define ELF_CLASS ELFCLASS32 1475 #define ELF_ARCH EM_MICROBLAZE 1476 1477 static inline void init_thread(struct target_pt_regs *regs, 1478 struct image_info *infop) 1479 { 1480 regs->pc = infop->entry; 1481 regs->r1 = infop->start_stack; 1482 1483 } 1484 1485 #define ELF_EXEC_PAGESIZE 4096 1486 1487 #define USE_ELF_CORE_DUMP 1488 #define ELF_NREG 38 1489 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1490 1491 /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs. */ 1492 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUMBState *env) 1493 { 1494 int i, pos = 0; 1495 1496 for (i = 0; i < 32; i++) { 1497 (*regs)[pos++] = tswapreg(env->regs[i]); 1498 } 1499 1500 (*regs)[pos++] = tswapreg(env->pc); 1501 (*regs)[pos++] = tswapreg(mb_cpu_read_msr(env)); 1502 (*regs)[pos++] = 0; 1503 (*regs)[pos++] = tswapreg(env->ear); 1504 (*regs)[pos++] = 0; 1505 (*regs)[pos++] = tswapreg(env->esr); 1506 } 1507 1508 #endif /* TARGET_MICROBLAZE */ 1509 1510 #ifdef TARGET_OPENRISC 1511 1512 #define ELF_ARCH EM_OPENRISC 1513 #define ELF_CLASS ELFCLASS32 1514 #define ELF_DATA ELFDATA2MSB 1515 1516 static inline void init_thread(struct target_pt_regs *regs, 1517 struct image_info *infop) 1518 { 1519 regs->pc = infop->entry; 1520 regs->gpr[1] = infop->start_stack; 1521 } 1522 1523 #define USE_ELF_CORE_DUMP 1524 #define ELF_EXEC_PAGESIZE 8192 1525 1526 /* See linux kernel arch/openrisc/include/asm/elf.h. */ 1527 #define ELF_NREG 34 /* gprs and pc, sr */ 1528 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1529 1530 static void elf_core_copy_regs(target_elf_gregset_t *regs, 1531 const CPUOpenRISCState *env) 1532 { 1533 int i; 1534 1535 for (i = 0; i < 32; i++) { 1536 (*regs)[i] = tswapreg(cpu_get_gpr(env, i)); 1537 } 1538 (*regs)[32] = tswapreg(env->pc); 1539 (*regs)[33] = tswapreg(cpu_get_sr(env)); 1540 } 1541 #define ELF_HWCAP 0 1542 #define ELF_PLATFORM NULL 1543 1544 #endif /* TARGET_OPENRISC */ 1545 1546 #ifdef TARGET_SH4 1547 1548 #define ELF_CLASS ELFCLASS32 1549 #define ELF_ARCH EM_SH 1550 1551 static inline void init_thread(struct target_pt_regs *regs, 1552 struct image_info *infop) 1553 { 1554 /* Check other registers XXXXX */ 1555 regs->pc = infop->entry; 1556 regs->regs[15] = infop->start_stack; 1557 } 1558 1559 /* See linux kernel: arch/sh/include/asm/elf.h. */ 1560 #define ELF_NREG 23 1561 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1562 1563 /* See linux kernel: arch/sh/include/asm/ptrace.h. */ 1564 enum { 1565 TARGET_REG_PC = 16, 1566 TARGET_REG_PR = 17, 1567 TARGET_REG_SR = 18, 1568 TARGET_REG_GBR = 19, 1569 TARGET_REG_MACH = 20, 1570 TARGET_REG_MACL = 21, 1571 TARGET_REG_SYSCALL = 22 1572 }; 1573 1574 static inline void elf_core_copy_regs(target_elf_gregset_t *regs, 1575 const CPUSH4State *env) 1576 { 1577 int i; 1578 1579 for (i = 0; i < 16; i++) { 1580 (*regs)[i] = tswapreg(env->gregs[i]); 1581 } 1582 1583 (*regs)[TARGET_REG_PC] = tswapreg(env->pc); 1584 (*regs)[TARGET_REG_PR] = tswapreg(env->pr); 1585 (*regs)[TARGET_REG_SR] = tswapreg(env->sr); 1586 (*regs)[TARGET_REG_GBR] = tswapreg(env->gbr); 1587 (*regs)[TARGET_REG_MACH] = tswapreg(env->mach); 1588 (*regs)[TARGET_REG_MACL] = tswapreg(env->macl); 1589 (*regs)[TARGET_REG_SYSCALL] = 0; /* FIXME */ 1590 } 1591 1592 #define USE_ELF_CORE_DUMP 1593 #define ELF_EXEC_PAGESIZE 4096 1594 1595 enum { 1596 SH_CPU_HAS_FPU = 0x0001, /* Hardware FPU support */ 1597 SH_CPU_HAS_P2_FLUSH_BUG = 0x0002, /* Need to flush the cache in P2 area */ 1598 SH_CPU_HAS_MMU_PAGE_ASSOC = 0x0004, /* SH3: TLB way selection bit support */ 1599 SH_CPU_HAS_DSP = 0x0008, /* SH-DSP: DSP support */ 1600 SH_CPU_HAS_PERF_COUNTER = 0x0010, /* Hardware performance counters */ 1601 SH_CPU_HAS_PTEA = 0x0020, /* PTEA register */ 1602 SH_CPU_HAS_LLSC = 0x0040, /* movli.l/movco.l */ 1603 SH_CPU_HAS_L2_CACHE = 0x0080, /* Secondary cache / URAM */ 1604 SH_CPU_HAS_OP32 = 0x0100, /* 32-bit instruction support */ 1605 SH_CPU_HAS_PTEAEX = 0x0200, /* PTE ASID Extension support */ 1606 }; 1607 1608 #define ELF_HWCAP get_elf_hwcap() 1609 1610 static uint32_t get_elf_hwcap(void) 1611 { 1612 SuperHCPU *cpu = SUPERH_CPU(thread_cpu); 1613 uint32_t hwcap = 0; 1614 1615 hwcap |= SH_CPU_HAS_FPU; 1616 1617 if (cpu->env.features & SH_FEATURE_SH4A) { 1618 hwcap |= SH_CPU_HAS_LLSC; 1619 } 1620 1621 return hwcap; 1622 } 1623 1624 #endif 1625 1626 #ifdef TARGET_CRIS 1627 1628 #define ELF_CLASS ELFCLASS32 1629 #define ELF_ARCH EM_CRIS 1630 1631 static inline void init_thread(struct target_pt_regs *regs, 1632 struct image_info *infop) 1633 { 1634 regs->erp = infop->entry; 1635 } 1636 1637 #define ELF_EXEC_PAGESIZE 8192 1638 1639 #endif 1640 1641 #ifdef TARGET_M68K 1642 1643 #define ELF_CLASS ELFCLASS32 1644 #define ELF_ARCH EM_68K 1645 1646 /* ??? Does this need to do anything? 1647 #define ELF_PLAT_INIT(_r) */ 1648 1649 static inline void init_thread(struct target_pt_regs *regs, 1650 struct image_info *infop) 1651 { 1652 regs->usp = infop->start_stack; 1653 regs->sr = 0; 1654 regs->pc = infop->entry; 1655 } 1656 1657 /* See linux kernel: arch/m68k/include/asm/elf.h. */ 1658 #define ELF_NREG 20 1659 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1660 1661 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUM68KState *env) 1662 { 1663 (*regs)[0] = tswapreg(env->dregs[1]); 1664 (*regs)[1] = tswapreg(env->dregs[2]); 1665 (*regs)[2] = tswapreg(env->dregs[3]); 1666 (*regs)[3] = tswapreg(env->dregs[4]); 1667 (*regs)[4] = tswapreg(env->dregs[5]); 1668 (*regs)[5] = tswapreg(env->dregs[6]); 1669 (*regs)[6] = tswapreg(env->dregs[7]); 1670 (*regs)[7] = tswapreg(env->aregs[0]); 1671 (*regs)[8] = tswapreg(env->aregs[1]); 1672 (*regs)[9] = tswapreg(env->aregs[2]); 1673 (*regs)[10] = tswapreg(env->aregs[3]); 1674 (*regs)[11] = tswapreg(env->aregs[4]); 1675 (*regs)[12] = tswapreg(env->aregs[5]); 1676 (*regs)[13] = tswapreg(env->aregs[6]); 1677 (*regs)[14] = tswapreg(env->dregs[0]); 1678 (*regs)[15] = tswapreg(env->aregs[7]); 1679 (*regs)[16] = tswapreg(env->dregs[0]); /* FIXME: orig_d0 */ 1680 (*regs)[17] = tswapreg(env->sr); 1681 (*regs)[18] = tswapreg(env->pc); 1682 (*regs)[19] = 0; /* FIXME: regs->format | regs->vector */ 1683 } 1684 1685 #define USE_ELF_CORE_DUMP 1686 #define ELF_EXEC_PAGESIZE 8192 1687 1688 #endif 1689 1690 #ifdef TARGET_ALPHA 1691 1692 #define ELF_CLASS ELFCLASS64 1693 #define ELF_ARCH EM_ALPHA 1694 1695 static inline void init_thread(struct target_pt_regs *regs, 1696 struct image_info *infop) 1697 { 1698 regs->pc = infop->entry; 1699 regs->ps = 8; 1700 regs->usp = infop->start_stack; 1701 } 1702 1703 #define ELF_EXEC_PAGESIZE 8192 1704 1705 #endif /* TARGET_ALPHA */ 1706 1707 #ifdef TARGET_S390X 1708 1709 #define ELF_CLASS ELFCLASS64 1710 #define ELF_DATA ELFDATA2MSB 1711 #define ELF_ARCH EM_S390 1712 1713 #include "elf.h" 1714 1715 #define ELF_HWCAP get_elf_hwcap() 1716 1717 #define GET_FEATURE(_feat, _hwcap) \ 1718 do { if (s390_has_feat(_feat)) { hwcap |= _hwcap; } } while (0) 1719 1720 uint32_t get_elf_hwcap(void) 1721 { 1722 /* 1723 * Let's assume we always have esan3 and zarch. 1724 * 31-bit processes can use 64-bit registers (high gprs). 1725 */ 1726 uint32_t hwcap = HWCAP_S390_ESAN3 | HWCAP_S390_ZARCH | HWCAP_S390_HIGH_GPRS; 1727 1728 GET_FEATURE(S390_FEAT_STFLE, HWCAP_S390_STFLE); 1729 GET_FEATURE(S390_FEAT_MSA, HWCAP_S390_MSA); 1730 GET_FEATURE(S390_FEAT_LONG_DISPLACEMENT, HWCAP_S390_LDISP); 1731 GET_FEATURE(S390_FEAT_EXTENDED_IMMEDIATE, HWCAP_S390_EIMM); 1732 if (s390_has_feat(S390_FEAT_EXTENDED_TRANSLATION_3) && 1733 s390_has_feat(S390_FEAT_ETF3_ENH)) { 1734 hwcap |= HWCAP_S390_ETF3EH; 1735 } 1736 GET_FEATURE(S390_FEAT_VECTOR, HWCAP_S390_VXRS); 1737 GET_FEATURE(S390_FEAT_VECTOR_ENH, HWCAP_S390_VXRS_EXT); 1738 GET_FEATURE(S390_FEAT_VECTOR_ENH2, HWCAP_S390_VXRS_EXT2); 1739 1740 return hwcap; 1741 } 1742 1743 const char *elf_hwcap_str(uint32_t bit) 1744 { 1745 static const char *hwcap_str[] = { 1746 [HWCAP_S390_NR_ESAN3] = "esan3", 1747 [HWCAP_S390_NR_ZARCH] = "zarch", 1748 [HWCAP_S390_NR_STFLE] = "stfle", 1749 [HWCAP_S390_NR_MSA] = "msa", 1750 [HWCAP_S390_NR_LDISP] = "ldisp", 1751 [HWCAP_S390_NR_EIMM] = "eimm", 1752 [HWCAP_S390_NR_DFP] = "dfp", 1753 [HWCAP_S390_NR_HPAGE] = "edat", 1754 [HWCAP_S390_NR_ETF3EH] = "etf3eh", 1755 [HWCAP_S390_NR_HIGH_GPRS] = "highgprs", 1756 [HWCAP_S390_NR_TE] = "te", 1757 [HWCAP_S390_NR_VXRS] = "vx", 1758 [HWCAP_S390_NR_VXRS_BCD] = "vxd", 1759 [HWCAP_S390_NR_VXRS_EXT] = "vxe", 1760 [HWCAP_S390_NR_GS] = "gs", 1761 [HWCAP_S390_NR_VXRS_EXT2] = "vxe2", 1762 [HWCAP_S390_NR_VXRS_PDE] = "vxp", 1763 [HWCAP_S390_NR_SORT] = "sort", 1764 [HWCAP_S390_NR_DFLT] = "dflt", 1765 [HWCAP_S390_NR_NNPA] = "nnpa", 1766 [HWCAP_S390_NR_PCI_MIO] = "pcimio", 1767 [HWCAP_S390_NR_SIE] = "sie", 1768 }; 1769 1770 return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL; 1771 } 1772 1773 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) 1774 { 1775 regs->psw.addr = infop->entry; 1776 regs->psw.mask = PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | \ 1777 PSW_MASK_MCHECK | PSW_MASK_PSTATE | PSW_MASK_64 | \ 1778 PSW_MASK_32; 1779 regs->gprs[15] = infop->start_stack; 1780 } 1781 1782 /* See linux kernel: arch/s390/include/uapi/asm/ptrace.h (s390_regs). */ 1783 #define ELF_NREG 27 1784 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1785 1786 enum { 1787 TARGET_REG_PSWM = 0, 1788 TARGET_REG_PSWA = 1, 1789 TARGET_REG_GPRS = 2, 1790 TARGET_REG_ARS = 18, 1791 TARGET_REG_ORIG_R2 = 26, 1792 }; 1793 1794 static void elf_core_copy_regs(target_elf_gregset_t *regs, 1795 const CPUS390XState *env) 1796 { 1797 int i; 1798 uint32_t *aregs; 1799 1800 (*regs)[TARGET_REG_PSWM] = tswapreg(env->psw.mask); 1801 (*regs)[TARGET_REG_PSWA] = tswapreg(env->psw.addr); 1802 for (i = 0; i < 16; i++) { 1803 (*regs)[TARGET_REG_GPRS + i] = tswapreg(env->regs[i]); 1804 } 1805 aregs = (uint32_t *)&((*regs)[TARGET_REG_ARS]); 1806 for (i = 0; i < 16; i++) { 1807 aregs[i] = tswap32(env->aregs[i]); 1808 } 1809 (*regs)[TARGET_REG_ORIG_R2] = 0; 1810 } 1811 1812 #define USE_ELF_CORE_DUMP 1813 #define ELF_EXEC_PAGESIZE 4096 1814 1815 #define VDSO_HEADER "vdso.c.inc" 1816 1817 #endif /* TARGET_S390X */ 1818 1819 #ifdef TARGET_RISCV 1820 1821 #define ELF_ARCH EM_RISCV 1822 1823 #ifdef TARGET_RISCV32 1824 #define ELF_CLASS ELFCLASS32 1825 #define VDSO_HEADER "vdso-32.c.inc" 1826 #else 1827 #define ELF_CLASS ELFCLASS64 1828 #define VDSO_HEADER "vdso-64.c.inc" 1829 #endif 1830 1831 #define ELF_HWCAP get_elf_hwcap() 1832 1833 static uint32_t get_elf_hwcap(void) 1834 { 1835 #define MISA_BIT(EXT) (1 << (EXT - 'A')) 1836 RISCVCPU *cpu = RISCV_CPU(thread_cpu); 1837 uint32_t mask = MISA_BIT('I') | MISA_BIT('M') | MISA_BIT('A') 1838 | MISA_BIT('F') | MISA_BIT('D') | MISA_BIT('C') 1839 | MISA_BIT('V'); 1840 1841 return cpu->env.misa_ext & mask; 1842 #undef MISA_BIT 1843 } 1844 1845 static inline void init_thread(struct target_pt_regs *regs, 1846 struct image_info *infop) 1847 { 1848 regs->sepc = infop->entry; 1849 regs->sp = infop->start_stack; 1850 } 1851 1852 #define ELF_EXEC_PAGESIZE 4096 1853 1854 #endif /* TARGET_RISCV */ 1855 1856 #ifdef TARGET_HPPA 1857 1858 #define ELF_CLASS ELFCLASS32 1859 #define ELF_ARCH EM_PARISC 1860 #define ELF_PLATFORM "PARISC" 1861 #define STACK_GROWS_DOWN 0 1862 #define STACK_ALIGNMENT 64 1863 1864 #define VDSO_HEADER "vdso.c.inc" 1865 1866 static inline void init_thread(struct target_pt_regs *regs, 1867 struct image_info *infop) 1868 { 1869 regs->iaoq[0] = infop->entry; 1870 regs->iaoq[1] = infop->entry + 4; 1871 regs->gr[23] = 0; 1872 regs->gr[24] = infop->argv; 1873 regs->gr[25] = infop->argc; 1874 /* The top-of-stack contains a linkage buffer. */ 1875 regs->gr[30] = infop->start_stack + 64; 1876 regs->gr[31] = infop->entry; 1877 } 1878 1879 #define LO_COMMPAGE 0 1880 1881 static bool init_guest_commpage(void) 1882 { 1883 /* If reserved_va, then we have already mapped 0 page on the host. */ 1884 if (!reserved_va) { 1885 void *want, *addr; 1886 1887 want = g2h_untagged(LO_COMMPAGE); 1888 addr = mmap(want, TARGET_PAGE_SIZE, PROT_NONE, 1889 MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED_NOREPLACE, -1, 0); 1890 if (addr == MAP_FAILED) { 1891 perror("Allocating guest commpage"); 1892 exit(EXIT_FAILURE); 1893 } 1894 if (addr != want) { 1895 return false; 1896 } 1897 } 1898 1899 /* 1900 * On Linux, page zero is normally marked execute only + gateway. 1901 * Normal read or write is supposed to fail (thus PROT_NONE above), 1902 * but specific offsets have kernel code mapped to raise permissions 1903 * and implement syscalls. Here, simply mark the page executable. 1904 * Special case the entry points during translation (see do_page_zero). 1905 */ 1906 page_set_flags(LO_COMMPAGE, LO_COMMPAGE | ~TARGET_PAGE_MASK, 1907 PAGE_EXEC | PAGE_VALID); 1908 return true; 1909 } 1910 1911 #endif /* TARGET_HPPA */ 1912 1913 #ifdef TARGET_XTENSA 1914 1915 #define ELF_CLASS ELFCLASS32 1916 #define ELF_ARCH EM_XTENSA 1917 1918 static inline void init_thread(struct target_pt_regs *regs, 1919 struct image_info *infop) 1920 { 1921 regs->windowbase = 0; 1922 regs->windowstart = 1; 1923 regs->areg[1] = infop->start_stack; 1924 regs->pc = infop->entry; 1925 if (info_is_fdpic(infop)) { 1926 regs->areg[4] = infop->loadmap_addr; 1927 regs->areg[5] = infop->interpreter_loadmap_addr; 1928 if (infop->interpreter_loadmap_addr) { 1929 regs->areg[6] = infop->interpreter_pt_dynamic_addr; 1930 } else { 1931 regs->areg[6] = infop->pt_dynamic_addr; 1932 } 1933 } 1934 } 1935 1936 /* See linux kernel: arch/xtensa/include/asm/elf.h. */ 1937 #define ELF_NREG 128 1938 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1939 1940 enum { 1941 TARGET_REG_PC, 1942 TARGET_REG_PS, 1943 TARGET_REG_LBEG, 1944 TARGET_REG_LEND, 1945 TARGET_REG_LCOUNT, 1946 TARGET_REG_SAR, 1947 TARGET_REG_WINDOWSTART, 1948 TARGET_REG_WINDOWBASE, 1949 TARGET_REG_THREADPTR, 1950 TARGET_REG_AR0 = 64, 1951 }; 1952 1953 static void elf_core_copy_regs(target_elf_gregset_t *regs, 1954 const CPUXtensaState *env) 1955 { 1956 unsigned i; 1957 1958 (*regs)[TARGET_REG_PC] = tswapreg(env->pc); 1959 (*regs)[TARGET_REG_PS] = tswapreg(env->sregs[PS] & ~PS_EXCM); 1960 (*regs)[TARGET_REG_LBEG] = tswapreg(env->sregs[LBEG]); 1961 (*regs)[TARGET_REG_LEND] = tswapreg(env->sregs[LEND]); 1962 (*regs)[TARGET_REG_LCOUNT] = tswapreg(env->sregs[LCOUNT]); 1963 (*regs)[TARGET_REG_SAR] = tswapreg(env->sregs[SAR]); 1964 (*regs)[TARGET_REG_WINDOWSTART] = tswapreg(env->sregs[WINDOW_START]); 1965 (*regs)[TARGET_REG_WINDOWBASE] = tswapreg(env->sregs[WINDOW_BASE]); 1966 (*regs)[TARGET_REG_THREADPTR] = tswapreg(env->uregs[THREADPTR]); 1967 xtensa_sync_phys_from_window((CPUXtensaState *)env); 1968 for (i = 0; i < env->config->nareg; ++i) { 1969 (*regs)[TARGET_REG_AR0 + i] = tswapreg(env->phys_regs[i]); 1970 } 1971 } 1972 1973 #define USE_ELF_CORE_DUMP 1974 #define ELF_EXEC_PAGESIZE 4096 1975 1976 #endif /* TARGET_XTENSA */ 1977 1978 #ifdef TARGET_HEXAGON 1979 1980 #define ELF_CLASS ELFCLASS32 1981 #define ELF_ARCH EM_HEXAGON 1982 1983 static inline void init_thread(struct target_pt_regs *regs, 1984 struct image_info *infop) 1985 { 1986 regs->sepc = infop->entry; 1987 regs->sp = infop->start_stack; 1988 } 1989 1990 #endif /* TARGET_HEXAGON */ 1991 1992 #ifndef ELF_BASE_PLATFORM 1993 #define ELF_BASE_PLATFORM (NULL) 1994 #endif 1995 1996 #ifndef ELF_PLATFORM 1997 #define ELF_PLATFORM (NULL) 1998 #endif 1999 2000 #ifndef ELF_MACHINE 2001 #define ELF_MACHINE ELF_ARCH 2002 #endif 2003 2004 #ifndef elf_check_arch 2005 #define elf_check_arch(x) ((x) == ELF_ARCH) 2006 #endif 2007 2008 #ifndef elf_check_abi 2009 #define elf_check_abi(x) (1) 2010 #endif 2011 2012 #ifndef ELF_HWCAP 2013 #define ELF_HWCAP 0 2014 #endif 2015 2016 #ifndef STACK_GROWS_DOWN 2017 #define STACK_GROWS_DOWN 1 2018 #endif 2019 2020 #ifndef STACK_ALIGNMENT 2021 #define STACK_ALIGNMENT 16 2022 #endif 2023 2024 #ifdef TARGET_ABI32 2025 #undef ELF_CLASS 2026 #define ELF_CLASS ELFCLASS32 2027 #undef bswaptls 2028 #define bswaptls(ptr) bswap32s(ptr) 2029 #endif 2030 2031 #ifndef EXSTACK_DEFAULT 2032 #define EXSTACK_DEFAULT false 2033 #endif 2034 2035 #include "elf.h" 2036 2037 /* We must delay the following stanzas until after "elf.h". */ 2038 #if defined(TARGET_AARCH64) 2039 2040 static bool arch_parse_elf_property(uint32_t pr_type, uint32_t pr_datasz, 2041 const uint32_t *data, 2042 struct image_info *info, 2043 Error **errp) 2044 { 2045 if (pr_type == GNU_PROPERTY_AARCH64_FEATURE_1_AND) { 2046 if (pr_datasz != sizeof(uint32_t)) { 2047 error_setg(errp, "Ill-formed GNU_PROPERTY_AARCH64_FEATURE_1_AND"); 2048 return false; 2049 } 2050 /* We will extract GNU_PROPERTY_AARCH64_FEATURE_1_BTI later. */ 2051 info->note_flags = *data; 2052 } 2053 return true; 2054 } 2055 #define ARCH_USE_GNU_PROPERTY 1 2056 2057 #else 2058 2059 static bool arch_parse_elf_property(uint32_t pr_type, uint32_t pr_datasz, 2060 const uint32_t *data, 2061 struct image_info *info, 2062 Error **errp) 2063 { 2064 g_assert_not_reached(); 2065 } 2066 #define ARCH_USE_GNU_PROPERTY 0 2067 2068 #endif 2069 2070 struct exec 2071 { 2072 unsigned int a_info; /* Use macros N_MAGIC, etc for access */ 2073 unsigned int a_text; /* length of text, in bytes */ 2074 unsigned int a_data; /* length of data, in bytes */ 2075 unsigned int a_bss; /* length of uninitialized data area, in bytes */ 2076 unsigned int a_syms; /* length of symbol table data in file, in bytes */ 2077 unsigned int a_entry; /* start address */ 2078 unsigned int a_trsize; /* length of relocation info for text, in bytes */ 2079 unsigned int a_drsize; /* length of relocation info for data, in bytes */ 2080 }; 2081 2082 2083 #define N_MAGIC(exec) ((exec).a_info & 0xffff) 2084 #define OMAGIC 0407 2085 #define NMAGIC 0410 2086 #define ZMAGIC 0413 2087 #define QMAGIC 0314 2088 2089 #define DLINFO_ITEMS 16 2090 2091 static inline void memcpy_fromfs(void * to, const void * from, unsigned long n) 2092 { 2093 memcpy(to, from, n); 2094 } 2095 2096 #ifdef BSWAP_NEEDED 2097 static void bswap_ehdr(struct elfhdr *ehdr) 2098 { 2099 bswap16s(&ehdr->e_type); /* Object file type */ 2100 bswap16s(&ehdr->e_machine); /* Architecture */ 2101 bswap32s(&ehdr->e_version); /* Object file version */ 2102 bswaptls(&ehdr->e_entry); /* Entry point virtual address */ 2103 bswaptls(&ehdr->e_phoff); /* Program header table file offset */ 2104 bswaptls(&ehdr->e_shoff); /* Section header table file offset */ 2105 bswap32s(&ehdr->e_flags); /* Processor-specific flags */ 2106 bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */ 2107 bswap16s(&ehdr->e_phentsize); /* Program header table entry size */ 2108 bswap16s(&ehdr->e_phnum); /* Program header table entry count */ 2109 bswap16s(&ehdr->e_shentsize); /* Section header table entry size */ 2110 bswap16s(&ehdr->e_shnum); /* Section header table entry count */ 2111 bswap16s(&ehdr->e_shstrndx); /* Section header string table index */ 2112 } 2113 2114 static void bswap_phdr(struct elf_phdr *phdr, int phnum) 2115 { 2116 int i; 2117 for (i = 0; i < phnum; ++i, ++phdr) { 2118 bswap32s(&phdr->p_type); /* Segment type */ 2119 bswap32s(&phdr->p_flags); /* Segment flags */ 2120 bswaptls(&phdr->p_offset); /* Segment file offset */ 2121 bswaptls(&phdr->p_vaddr); /* Segment virtual address */ 2122 bswaptls(&phdr->p_paddr); /* Segment physical address */ 2123 bswaptls(&phdr->p_filesz); /* Segment size in file */ 2124 bswaptls(&phdr->p_memsz); /* Segment size in memory */ 2125 bswaptls(&phdr->p_align); /* Segment alignment */ 2126 } 2127 } 2128 2129 static void bswap_shdr(struct elf_shdr *shdr, int shnum) 2130 { 2131 int i; 2132 for (i = 0; i < shnum; ++i, ++shdr) { 2133 bswap32s(&shdr->sh_name); 2134 bswap32s(&shdr->sh_type); 2135 bswaptls(&shdr->sh_flags); 2136 bswaptls(&shdr->sh_addr); 2137 bswaptls(&shdr->sh_offset); 2138 bswaptls(&shdr->sh_size); 2139 bswap32s(&shdr->sh_link); 2140 bswap32s(&shdr->sh_info); 2141 bswaptls(&shdr->sh_addralign); 2142 bswaptls(&shdr->sh_entsize); 2143 } 2144 } 2145 2146 static void bswap_sym(struct elf_sym *sym) 2147 { 2148 bswap32s(&sym->st_name); 2149 bswaptls(&sym->st_value); 2150 bswaptls(&sym->st_size); 2151 bswap16s(&sym->st_shndx); 2152 } 2153 2154 #ifdef TARGET_MIPS 2155 static void bswap_mips_abiflags(Mips_elf_abiflags_v0 *abiflags) 2156 { 2157 bswap16s(&abiflags->version); 2158 bswap32s(&abiflags->ases); 2159 bswap32s(&abiflags->isa_ext); 2160 bswap32s(&abiflags->flags1); 2161 bswap32s(&abiflags->flags2); 2162 } 2163 #endif 2164 #else 2165 static inline void bswap_ehdr(struct elfhdr *ehdr) { } 2166 static inline void bswap_phdr(struct elf_phdr *phdr, int phnum) { } 2167 static inline void bswap_shdr(struct elf_shdr *shdr, int shnum) { } 2168 static inline void bswap_sym(struct elf_sym *sym) { } 2169 #ifdef TARGET_MIPS 2170 static inline void bswap_mips_abiflags(Mips_elf_abiflags_v0 *abiflags) { } 2171 #endif 2172 #endif 2173 2174 #ifdef USE_ELF_CORE_DUMP 2175 static int elf_core_dump(int, const CPUArchState *); 2176 #endif /* USE_ELF_CORE_DUMP */ 2177 static void load_symbols(struct elfhdr *hdr, const ImageSource *src, 2178 abi_ulong load_bias); 2179 2180 /* Verify the portions of EHDR within E_IDENT for the target. 2181 This can be performed before bswapping the entire header. */ 2182 static bool elf_check_ident(struct elfhdr *ehdr) 2183 { 2184 return (ehdr->e_ident[EI_MAG0] == ELFMAG0 2185 && ehdr->e_ident[EI_MAG1] == ELFMAG1 2186 && ehdr->e_ident[EI_MAG2] == ELFMAG2 2187 && ehdr->e_ident[EI_MAG3] == ELFMAG3 2188 && ehdr->e_ident[EI_CLASS] == ELF_CLASS 2189 && ehdr->e_ident[EI_DATA] == ELF_DATA 2190 && ehdr->e_ident[EI_VERSION] == EV_CURRENT); 2191 } 2192 2193 /* Verify the portions of EHDR outside of E_IDENT for the target. 2194 This has to wait until after bswapping the header. */ 2195 static bool elf_check_ehdr(struct elfhdr *ehdr) 2196 { 2197 return (elf_check_arch(ehdr->e_machine) 2198 && elf_check_abi(ehdr->e_flags) 2199 && ehdr->e_ehsize == sizeof(struct elfhdr) 2200 && ehdr->e_phentsize == sizeof(struct elf_phdr) 2201 && (ehdr->e_type == ET_EXEC || ehdr->e_type == ET_DYN)); 2202 } 2203 2204 /* 2205 * 'copy_elf_strings()' copies argument/envelope strings from user 2206 * memory to free pages in kernel mem. These are in a format ready 2207 * to be put directly into the top of new user memory. 2208 * 2209 */ 2210 static abi_ulong copy_elf_strings(int argc, char **argv, char *scratch, 2211 abi_ulong p, abi_ulong stack_limit) 2212 { 2213 char *tmp; 2214 int len, i; 2215 abi_ulong top = p; 2216 2217 if (!p) { 2218 return 0; /* bullet-proofing */ 2219 } 2220 2221 if (STACK_GROWS_DOWN) { 2222 int offset = ((p - 1) % TARGET_PAGE_SIZE) + 1; 2223 for (i = argc - 1; i >= 0; --i) { 2224 tmp = argv[i]; 2225 if (!tmp) { 2226 fprintf(stderr, "VFS: argc is wrong"); 2227 exit(-1); 2228 } 2229 len = strlen(tmp) + 1; 2230 tmp += len; 2231 2232 if (len > (p - stack_limit)) { 2233 return 0; 2234 } 2235 while (len) { 2236 int bytes_to_copy = (len > offset) ? offset : len; 2237 tmp -= bytes_to_copy; 2238 p -= bytes_to_copy; 2239 offset -= bytes_to_copy; 2240 len -= bytes_to_copy; 2241 2242 memcpy_fromfs(scratch + offset, tmp, bytes_to_copy); 2243 2244 if (offset == 0) { 2245 memcpy_to_target(p, scratch, top - p); 2246 top = p; 2247 offset = TARGET_PAGE_SIZE; 2248 } 2249 } 2250 } 2251 if (p != top) { 2252 memcpy_to_target(p, scratch + offset, top - p); 2253 } 2254 } else { 2255 int remaining = TARGET_PAGE_SIZE - (p % TARGET_PAGE_SIZE); 2256 for (i = 0; i < argc; ++i) { 2257 tmp = argv[i]; 2258 if (!tmp) { 2259 fprintf(stderr, "VFS: argc is wrong"); 2260 exit(-1); 2261 } 2262 len = strlen(tmp) + 1; 2263 if (len > (stack_limit - p)) { 2264 return 0; 2265 } 2266 while (len) { 2267 int bytes_to_copy = (len > remaining) ? remaining : len; 2268 2269 memcpy_fromfs(scratch + (p - top), tmp, bytes_to_copy); 2270 2271 tmp += bytes_to_copy; 2272 remaining -= bytes_to_copy; 2273 p += bytes_to_copy; 2274 len -= bytes_to_copy; 2275 2276 if (remaining == 0) { 2277 memcpy_to_target(top, scratch, p - top); 2278 top = p; 2279 remaining = TARGET_PAGE_SIZE; 2280 } 2281 } 2282 } 2283 if (p != top) { 2284 memcpy_to_target(top, scratch, p - top); 2285 } 2286 } 2287 2288 return p; 2289 } 2290 2291 /* Older linux kernels provide up to MAX_ARG_PAGES (default: 32) of 2292 * argument/environment space. Newer kernels (>2.6.33) allow more, 2293 * dependent on stack size, but guarantee at least 32 pages for 2294 * backwards compatibility. 2295 */ 2296 #define STACK_LOWER_LIMIT (32 * TARGET_PAGE_SIZE) 2297 2298 static abi_ulong setup_arg_pages(struct linux_binprm *bprm, 2299 struct image_info *info) 2300 { 2301 abi_ulong size, error, guard; 2302 int prot; 2303 2304 size = guest_stack_size; 2305 if (size < STACK_LOWER_LIMIT) { 2306 size = STACK_LOWER_LIMIT; 2307 } 2308 2309 if (STACK_GROWS_DOWN) { 2310 guard = TARGET_PAGE_SIZE; 2311 if (guard < qemu_real_host_page_size()) { 2312 guard = qemu_real_host_page_size(); 2313 } 2314 } else { 2315 /* no guard page for hppa target where stack grows upwards. */ 2316 guard = 0; 2317 } 2318 2319 prot = PROT_READ | PROT_WRITE; 2320 if (info->exec_stack) { 2321 prot |= PROT_EXEC; 2322 } 2323 error = target_mmap(0, size + guard, prot, 2324 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 2325 if (error == -1) { 2326 perror("mmap stack"); 2327 exit(-1); 2328 } 2329 2330 /* We reserve one extra page at the top of the stack as guard. */ 2331 if (STACK_GROWS_DOWN) { 2332 target_mprotect(error, guard, PROT_NONE); 2333 info->stack_limit = error + guard; 2334 return info->stack_limit + size - sizeof(void *); 2335 } else { 2336 info->stack_limit = error + size; 2337 return error; 2338 } 2339 } 2340 2341 /** 2342 * zero_bss: 2343 * 2344 * Map and zero the bss. We need to explicitly zero any fractional pages 2345 * after the data section (i.e. bss). Return false on mapping failure. 2346 */ 2347 static bool zero_bss(abi_ulong start_bss, abi_ulong end_bss, 2348 int prot, Error **errp) 2349 { 2350 abi_ulong align_bss; 2351 2352 /* We only expect writable bss; the code segment shouldn't need this. */ 2353 if (!(prot & PROT_WRITE)) { 2354 error_setg(errp, "PT_LOAD with non-writable bss"); 2355 return false; 2356 } 2357 2358 align_bss = TARGET_PAGE_ALIGN(start_bss); 2359 end_bss = TARGET_PAGE_ALIGN(end_bss); 2360 2361 if (start_bss < align_bss) { 2362 int flags = page_get_flags(start_bss); 2363 2364 if (!(flags & PAGE_BITS)) { 2365 /* 2366 * The whole address space of the executable was reserved 2367 * at the start, therefore all pages will be VALID. 2368 * But assuming there are no PROT_NONE PT_LOAD segments, 2369 * a PROT_NONE page means no data all bss, and we can 2370 * simply extend the new anon mapping back to the start 2371 * of the page of bss. 2372 */ 2373 align_bss -= TARGET_PAGE_SIZE; 2374 } else { 2375 /* 2376 * The start of the bss shares a page with something. 2377 * The only thing that we expect is the data section, 2378 * which would already be marked writable. 2379 * Overlapping the RX code segment seems malformed. 2380 */ 2381 if (!(flags & PAGE_WRITE)) { 2382 error_setg(errp, "PT_LOAD with bss overlapping " 2383 "non-writable page"); 2384 return false; 2385 } 2386 2387 /* The page is already mapped and writable. */ 2388 memset(g2h_untagged(start_bss), 0, align_bss - start_bss); 2389 } 2390 } 2391 2392 if (align_bss < end_bss && 2393 target_mmap(align_bss, end_bss - align_bss, prot, 2394 MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0) == -1) { 2395 error_setg_errno(errp, errno, "Error mapping bss"); 2396 return false; 2397 } 2398 return true; 2399 } 2400 2401 #if defined(TARGET_ARM) 2402 static int elf_is_fdpic(struct elfhdr *exec) 2403 { 2404 return exec->e_ident[EI_OSABI] == ELFOSABI_ARM_FDPIC; 2405 } 2406 #elif defined(TARGET_XTENSA) 2407 static int elf_is_fdpic(struct elfhdr *exec) 2408 { 2409 return exec->e_ident[EI_OSABI] == ELFOSABI_XTENSA_FDPIC; 2410 } 2411 #else 2412 /* Default implementation, always false. */ 2413 static int elf_is_fdpic(struct elfhdr *exec) 2414 { 2415 return 0; 2416 } 2417 #endif 2418 2419 static abi_ulong loader_build_fdpic_loadmap(struct image_info *info, abi_ulong sp) 2420 { 2421 uint16_t n; 2422 struct elf32_fdpic_loadseg *loadsegs = info->loadsegs; 2423 2424 /* elf32_fdpic_loadseg */ 2425 n = info->nsegs; 2426 while (n--) { 2427 sp -= 12; 2428 put_user_u32(loadsegs[n].addr, sp+0); 2429 put_user_u32(loadsegs[n].p_vaddr, sp+4); 2430 put_user_u32(loadsegs[n].p_memsz, sp+8); 2431 } 2432 2433 /* elf32_fdpic_loadmap */ 2434 sp -= 4; 2435 put_user_u16(0, sp+0); /* version */ 2436 put_user_u16(info->nsegs, sp+2); /* nsegs */ 2437 2438 info->personality = PER_LINUX_FDPIC; 2439 info->loadmap_addr = sp; 2440 2441 return sp; 2442 } 2443 2444 static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc, 2445 struct elfhdr *exec, 2446 struct image_info *info, 2447 struct image_info *interp_info, 2448 struct image_info *vdso_info) 2449 { 2450 abi_ulong sp; 2451 abi_ulong u_argc, u_argv, u_envp, u_auxv; 2452 int size; 2453 int i; 2454 abi_ulong u_rand_bytes; 2455 uint8_t k_rand_bytes[16]; 2456 abi_ulong u_platform, u_base_platform; 2457 const char *k_platform, *k_base_platform; 2458 const int n = sizeof(elf_addr_t); 2459 2460 sp = p; 2461 2462 /* Needs to be before we load the env/argc/... */ 2463 if (elf_is_fdpic(exec)) { 2464 /* Need 4 byte alignment for these structs */ 2465 sp &= ~3; 2466 sp = loader_build_fdpic_loadmap(info, sp); 2467 info->other_info = interp_info; 2468 if (interp_info) { 2469 interp_info->other_info = info; 2470 sp = loader_build_fdpic_loadmap(interp_info, sp); 2471 info->interpreter_loadmap_addr = interp_info->loadmap_addr; 2472 info->interpreter_pt_dynamic_addr = interp_info->pt_dynamic_addr; 2473 } else { 2474 info->interpreter_loadmap_addr = 0; 2475 info->interpreter_pt_dynamic_addr = 0; 2476 } 2477 } 2478 2479 u_base_platform = 0; 2480 k_base_platform = ELF_BASE_PLATFORM; 2481 if (k_base_platform) { 2482 size_t len = strlen(k_base_platform) + 1; 2483 if (STACK_GROWS_DOWN) { 2484 sp -= (len + n - 1) & ~(n - 1); 2485 u_base_platform = sp; 2486 /* FIXME - check return value of memcpy_to_target() for failure */ 2487 memcpy_to_target(sp, k_base_platform, len); 2488 } else { 2489 memcpy_to_target(sp, k_base_platform, len); 2490 u_base_platform = sp; 2491 sp += len + 1; 2492 } 2493 } 2494 2495 u_platform = 0; 2496 k_platform = ELF_PLATFORM; 2497 if (k_platform) { 2498 size_t len = strlen(k_platform) + 1; 2499 if (STACK_GROWS_DOWN) { 2500 sp -= (len + n - 1) & ~(n - 1); 2501 u_platform = sp; 2502 /* FIXME - check return value of memcpy_to_target() for failure */ 2503 memcpy_to_target(sp, k_platform, len); 2504 } else { 2505 memcpy_to_target(sp, k_platform, len); 2506 u_platform = sp; 2507 sp += len + 1; 2508 } 2509 } 2510 2511 /* Provide 16 byte alignment for the PRNG, and basic alignment for 2512 * the argv and envp pointers. 2513 */ 2514 if (STACK_GROWS_DOWN) { 2515 sp = QEMU_ALIGN_DOWN(sp, 16); 2516 } else { 2517 sp = QEMU_ALIGN_UP(sp, 16); 2518 } 2519 2520 /* 2521 * Generate 16 random bytes for userspace PRNG seeding. 2522 */ 2523 qemu_guest_getrandom_nofail(k_rand_bytes, sizeof(k_rand_bytes)); 2524 if (STACK_GROWS_DOWN) { 2525 sp -= 16; 2526 u_rand_bytes = sp; 2527 /* FIXME - check return value of memcpy_to_target() for failure */ 2528 memcpy_to_target(sp, k_rand_bytes, 16); 2529 } else { 2530 memcpy_to_target(sp, k_rand_bytes, 16); 2531 u_rand_bytes = sp; 2532 sp += 16; 2533 } 2534 2535 size = (DLINFO_ITEMS + 1) * 2; 2536 if (k_base_platform) { 2537 size += 2; 2538 } 2539 if (k_platform) { 2540 size += 2; 2541 } 2542 if (vdso_info) { 2543 size += 2; 2544 } 2545 #ifdef DLINFO_ARCH_ITEMS 2546 size += DLINFO_ARCH_ITEMS * 2; 2547 #endif 2548 #ifdef ELF_HWCAP2 2549 size += 2; 2550 #endif 2551 info->auxv_len = size * n; 2552 2553 size += envc + argc + 2; 2554 size += 1; /* argc itself */ 2555 size *= n; 2556 2557 /* Allocate space and finalize stack alignment for entry now. */ 2558 if (STACK_GROWS_DOWN) { 2559 u_argc = QEMU_ALIGN_DOWN(sp - size, STACK_ALIGNMENT); 2560 sp = u_argc; 2561 } else { 2562 u_argc = sp; 2563 sp = QEMU_ALIGN_UP(sp + size, STACK_ALIGNMENT); 2564 } 2565 2566 u_argv = u_argc + n; 2567 u_envp = u_argv + (argc + 1) * n; 2568 u_auxv = u_envp + (envc + 1) * n; 2569 info->saved_auxv = u_auxv; 2570 info->argc = argc; 2571 info->envc = envc; 2572 info->argv = u_argv; 2573 info->envp = u_envp; 2574 2575 /* This is correct because Linux defines 2576 * elf_addr_t as Elf32_Off / Elf64_Off 2577 */ 2578 #define NEW_AUX_ENT(id, val) do { \ 2579 put_user_ual(id, u_auxv); u_auxv += n; \ 2580 put_user_ual(val, u_auxv); u_auxv += n; \ 2581 } while(0) 2582 2583 #ifdef ARCH_DLINFO 2584 /* 2585 * ARCH_DLINFO must come first so platform specific code can enforce 2586 * special alignment requirements on the AUXV if necessary (eg. PPC). 2587 */ 2588 ARCH_DLINFO; 2589 #endif 2590 /* There must be exactly DLINFO_ITEMS entries here, or the assert 2591 * on info->auxv_len will trigger. 2592 */ 2593 NEW_AUX_ENT(AT_PHDR, (abi_ulong)(info->load_addr + exec->e_phoff)); 2594 NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof (struct elf_phdr))); 2595 NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum)); 2596 NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE)); 2597 NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_info ? interp_info->load_addr : 0)); 2598 NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0); 2599 NEW_AUX_ENT(AT_ENTRY, info->entry); 2600 NEW_AUX_ENT(AT_UID, (abi_ulong) getuid()); 2601 NEW_AUX_ENT(AT_EUID, (abi_ulong) geteuid()); 2602 NEW_AUX_ENT(AT_GID, (abi_ulong) getgid()); 2603 NEW_AUX_ENT(AT_EGID, (abi_ulong) getegid()); 2604 NEW_AUX_ENT(AT_HWCAP, (abi_ulong) ELF_HWCAP); 2605 NEW_AUX_ENT(AT_CLKTCK, (abi_ulong) sysconf(_SC_CLK_TCK)); 2606 NEW_AUX_ENT(AT_RANDOM, (abi_ulong) u_rand_bytes); 2607 NEW_AUX_ENT(AT_SECURE, (abi_ulong) qemu_getauxval(AT_SECURE)); 2608 NEW_AUX_ENT(AT_EXECFN, info->file_string); 2609 2610 #ifdef ELF_HWCAP2 2611 NEW_AUX_ENT(AT_HWCAP2, (abi_ulong) ELF_HWCAP2); 2612 #endif 2613 2614 if (u_base_platform) { 2615 NEW_AUX_ENT(AT_BASE_PLATFORM, u_base_platform); 2616 } 2617 if (u_platform) { 2618 NEW_AUX_ENT(AT_PLATFORM, u_platform); 2619 } 2620 if (vdso_info) { 2621 NEW_AUX_ENT(AT_SYSINFO_EHDR, vdso_info->load_addr); 2622 } 2623 NEW_AUX_ENT (AT_NULL, 0); 2624 #undef NEW_AUX_ENT 2625 2626 /* Check that our initial calculation of the auxv length matches how much 2627 * we actually put into it. 2628 */ 2629 assert(info->auxv_len == u_auxv - info->saved_auxv); 2630 2631 put_user_ual(argc, u_argc); 2632 2633 p = info->arg_strings; 2634 for (i = 0; i < argc; ++i) { 2635 put_user_ual(p, u_argv); 2636 u_argv += n; 2637 p += target_strlen(p) + 1; 2638 } 2639 put_user_ual(0, u_argv); 2640 2641 p = info->env_strings; 2642 for (i = 0; i < envc; ++i) { 2643 put_user_ual(p, u_envp); 2644 u_envp += n; 2645 p += target_strlen(p) + 1; 2646 } 2647 put_user_ual(0, u_envp); 2648 2649 return sp; 2650 } 2651 2652 #if defined(HI_COMMPAGE) 2653 #define LO_COMMPAGE -1 2654 #elif defined(LO_COMMPAGE) 2655 #define HI_COMMPAGE 0 2656 #else 2657 #define HI_COMMPAGE 0 2658 #define LO_COMMPAGE -1 2659 #ifndef INIT_GUEST_COMMPAGE 2660 #define init_guest_commpage() true 2661 #endif 2662 #endif 2663 2664 /** 2665 * pgb_try_mmap: 2666 * @addr: host start address 2667 * @addr_last: host last address 2668 * @keep: do not unmap the probe region 2669 * 2670 * Return 1 if [@addr, @addr_last] is not mapped in the host, 2671 * return 0 if it is not available to map, and -1 on mmap error. 2672 * If @keep, the region is left mapped on success, otherwise unmapped. 2673 */ 2674 static int pgb_try_mmap(uintptr_t addr, uintptr_t addr_last, bool keep) 2675 { 2676 size_t size = addr_last - addr + 1; 2677 void *p = mmap((void *)addr, size, PROT_NONE, 2678 MAP_ANONYMOUS | MAP_PRIVATE | 2679 MAP_NORESERVE | MAP_FIXED_NOREPLACE, -1, 0); 2680 int ret; 2681 2682 if (p == MAP_FAILED) { 2683 return errno == EEXIST ? 0 : -1; 2684 } 2685 ret = p == (void *)addr; 2686 if (!keep || !ret) { 2687 munmap(p, size); 2688 } 2689 return ret; 2690 } 2691 2692 /** 2693 * pgb_try_mmap_skip_brk(uintptr_t addr, uintptr_t size, uintptr_t brk) 2694 * @addr: host address 2695 * @addr_last: host last address 2696 * @brk: host brk 2697 * 2698 * Like pgb_try_mmap, but additionally reserve some memory following brk. 2699 */ 2700 static int pgb_try_mmap_skip_brk(uintptr_t addr, uintptr_t addr_last, 2701 uintptr_t brk, bool keep) 2702 { 2703 uintptr_t brk_last = brk + 16 * MiB - 1; 2704 2705 /* Do not map anything close to the host brk. */ 2706 if (addr <= brk_last && brk <= addr_last) { 2707 return 0; 2708 } 2709 return pgb_try_mmap(addr, addr_last, keep); 2710 } 2711 2712 /** 2713 * pgb_try_mmap_set: 2714 * @ga: set of guest addrs 2715 * @base: guest_base 2716 * @brk: host brk 2717 * 2718 * Return true if all @ga can be mapped by the host at @base. 2719 * On success, retain the mapping at index 0 for reserved_va. 2720 */ 2721 2722 typedef struct PGBAddrs { 2723 uintptr_t bounds[3][2]; /* start/last pairs */ 2724 int nbounds; 2725 } PGBAddrs; 2726 2727 static bool pgb_try_mmap_set(const PGBAddrs *ga, uintptr_t base, uintptr_t brk) 2728 { 2729 for (int i = ga->nbounds - 1; i >= 0; --i) { 2730 if (pgb_try_mmap_skip_brk(ga->bounds[i][0] + base, 2731 ga->bounds[i][1] + base, 2732 brk, i == 0 && reserved_va) <= 0) { 2733 return false; 2734 } 2735 } 2736 return true; 2737 } 2738 2739 /** 2740 * pgb_addr_set: 2741 * @ga: output set of guest addrs 2742 * @guest_loaddr: guest image low address 2743 * @guest_loaddr: guest image high address 2744 * @identity: create for identity mapping 2745 * 2746 * Fill in @ga with the image, COMMPAGE and NULL page. 2747 */ 2748 static bool pgb_addr_set(PGBAddrs *ga, abi_ulong guest_loaddr, 2749 abi_ulong guest_hiaddr, bool try_identity) 2750 { 2751 int n; 2752 2753 /* 2754 * With a low commpage, or a guest mapped very low, 2755 * we may not be able to use the identity map. 2756 */ 2757 if (try_identity) { 2758 if (LO_COMMPAGE != -1 && LO_COMMPAGE < mmap_min_addr) { 2759 return false; 2760 } 2761 if (guest_loaddr != 0 && guest_loaddr < mmap_min_addr) { 2762 return false; 2763 } 2764 } 2765 2766 memset(ga, 0, sizeof(*ga)); 2767 n = 0; 2768 2769 if (reserved_va) { 2770 ga->bounds[n][0] = try_identity ? mmap_min_addr : 0; 2771 ga->bounds[n][1] = reserved_va; 2772 n++; 2773 /* LO_COMMPAGE and NULL handled by reserving from 0. */ 2774 } else { 2775 /* Add any LO_COMMPAGE or NULL page. */ 2776 if (LO_COMMPAGE != -1) { 2777 ga->bounds[n][0] = 0; 2778 ga->bounds[n][1] = LO_COMMPAGE + TARGET_PAGE_SIZE - 1; 2779 n++; 2780 } else if (!try_identity) { 2781 ga->bounds[n][0] = 0; 2782 ga->bounds[n][1] = TARGET_PAGE_SIZE - 1; 2783 n++; 2784 } 2785 2786 /* Add the guest image for ET_EXEC. */ 2787 if (guest_loaddr) { 2788 ga->bounds[n][0] = guest_loaddr; 2789 ga->bounds[n][1] = guest_hiaddr; 2790 n++; 2791 } 2792 } 2793 2794 /* 2795 * Temporarily disable 2796 * "comparison is always false due to limited range of data type" 2797 * due to comparison between unsigned and (possible) 0. 2798 */ 2799 #pragma GCC diagnostic push 2800 #pragma GCC diagnostic ignored "-Wtype-limits" 2801 2802 /* Add any HI_COMMPAGE not covered by reserved_va. */ 2803 if (reserved_va < HI_COMMPAGE) { 2804 ga->bounds[n][0] = HI_COMMPAGE & qemu_real_host_page_mask(); 2805 ga->bounds[n][1] = HI_COMMPAGE + TARGET_PAGE_SIZE - 1; 2806 n++; 2807 } 2808 2809 #pragma GCC diagnostic pop 2810 2811 ga->nbounds = n; 2812 return true; 2813 } 2814 2815 static void pgb_fail_in_use(const char *image_name) 2816 { 2817 error_report("%s: requires virtual address space that is in use " 2818 "(omit the -B option or choose a different value)", 2819 image_name); 2820 exit(EXIT_FAILURE); 2821 } 2822 2823 static void pgb_fixed(const char *image_name, uintptr_t guest_loaddr, 2824 uintptr_t guest_hiaddr, uintptr_t align) 2825 { 2826 PGBAddrs ga; 2827 uintptr_t brk = (uintptr_t)sbrk(0); 2828 2829 if (!QEMU_IS_ALIGNED(guest_base, align)) { 2830 fprintf(stderr, "Requested guest base %p does not satisfy " 2831 "host minimum alignment (0x%" PRIxPTR ")\n", 2832 (void *)guest_base, align); 2833 exit(EXIT_FAILURE); 2834 } 2835 2836 if (!pgb_addr_set(&ga, guest_loaddr, guest_hiaddr, !guest_base) 2837 || !pgb_try_mmap_set(&ga, guest_base, brk)) { 2838 pgb_fail_in_use(image_name); 2839 } 2840 } 2841 2842 /** 2843 * pgb_find_fallback: 2844 * 2845 * This is a fallback method for finding holes in the host address space 2846 * if we don't have the benefit of being able to access /proc/self/map. 2847 * It can potentially take a very long time as we can only dumbly iterate 2848 * up the host address space seeing if the allocation would work. 2849 */ 2850 static uintptr_t pgb_find_fallback(const PGBAddrs *ga, uintptr_t align, 2851 uintptr_t brk) 2852 { 2853 /* TODO: come up with a better estimate of how much to skip. */ 2854 uintptr_t skip = sizeof(uintptr_t) == 4 ? MiB : GiB; 2855 2856 for (uintptr_t base = skip; ; base += skip) { 2857 base = ROUND_UP(base, align); 2858 if (pgb_try_mmap_set(ga, base, brk)) { 2859 return base; 2860 } 2861 if (base >= -skip) { 2862 return -1; 2863 } 2864 } 2865 } 2866 2867 static uintptr_t pgb_try_itree(const PGBAddrs *ga, uintptr_t base, 2868 IntervalTreeRoot *root) 2869 { 2870 for (int i = ga->nbounds - 1; i >= 0; --i) { 2871 uintptr_t s = base + ga->bounds[i][0]; 2872 uintptr_t l = base + ga->bounds[i][1]; 2873 IntervalTreeNode *n; 2874 2875 if (l < s) { 2876 /* Wraparound. Skip to advance S to mmap_min_addr. */ 2877 return mmap_min_addr - s; 2878 } 2879 2880 n = interval_tree_iter_first(root, s, l); 2881 if (n != NULL) { 2882 /* Conflict. Skip to advance S to LAST + 1. */ 2883 return n->last - s + 1; 2884 } 2885 } 2886 return 0; /* success */ 2887 } 2888 2889 static uintptr_t pgb_find_itree(const PGBAddrs *ga, IntervalTreeRoot *root, 2890 uintptr_t align, uintptr_t brk) 2891 { 2892 uintptr_t last = mmap_min_addr; 2893 uintptr_t base, skip; 2894 2895 while (true) { 2896 base = ROUND_UP(last, align); 2897 if (base < last) { 2898 return -1; 2899 } 2900 2901 skip = pgb_try_itree(ga, base, root); 2902 if (skip == 0) { 2903 break; 2904 } 2905 2906 last = base + skip; 2907 if (last < base) { 2908 return -1; 2909 } 2910 } 2911 2912 /* 2913 * We've chosen 'base' based on holes in the interval tree, 2914 * but we don't yet know if it is a valid host address. 2915 * Because it is the first matching hole, if the host addresses 2916 * are invalid we know there are no further matches. 2917 */ 2918 return pgb_try_mmap_set(ga, base, brk) ? base : -1; 2919 } 2920 2921 static void pgb_dynamic(const char *image_name, uintptr_t guest_loaddr, 2922 uintptr_t guest_hiaddr, uintptr_t align) 2923 { 2924 IntervalTreeRoot *root; 2925 uintptr_t brk, ret; 2926 PGBAddrs ga; 2927 2928 /* Try the identity map first. */ 2929 if (pgb_addr_set(&ga, guest_loaddr, guest_hiaddr, true)) { 2930 brk = (uintptr_t)sbrk(0); 2931 if (pgb_try_mmap_set(&ga, 0, brk)) { 2932 guest_base = 0; 2933 return; 2934 } 2935 } 2936 2937 /* 2938 * Rebuild the address set for non-identity map. 2939 * This differs in the mapping of the guest NULL page. 2940 */ 2941 pgb_addr_set(&ga, guest_loaddr, guest_hiaddr, false); 2942 2943 root = read_self_maps(); 2944 2945 /* Read brk after we've read the maps, which will malloc. */ 2946 brk = (uintptr_t)sbrk(0); 2947 2948 if (!root) { 2949 ret = pgb_find_fallback(&ga, align, brk); 2950 } else { 2951 /* 2952 * Reserve the area close to the host brk. 2953 * This will be freed with the rest of the tree. 2954 */ 2955 IntervalTreeNode *b = g_new0(IntervalTreeNode, 1); 2956 b->start = brk; 2957 b->last = brk + 16 * MiB - 1; 2958 interval_tree_insert(b, root); 2959 2960 ret = pgb_find_itree(&ga, root, align, brk); 2961 free_self_maps(root); 2962 } 2963 2964 if (ret == -1) { 2965 int w = TARGET_LONG_BITS / 4; 2966 2967 error_report("%s: Unable to find a guest_base to satisfy all " 2968 "guest address mapping requirements", image_name); 2969 2970 for (int i = 0; i < ga.nbounds; ++i) { 2971 error_printf(" %0*" PRIx64 "-%0*" PRIx64 "\n", 2972 w, (uint64_t)ga.bounds[i][0], 2973 w, (uint64_t)ga.bounds[i][1]); 2974 } 2975 exit(EXIT_FAILURE); 2976 } 2977 guest_base = ret; 2978 } 2979 2980 void probe_guest_base(const char *image_name, abi_ulong guest_loaddr, 2981 abi_ulong guest_hiaddr) 2982 { 2983 /* In order to use host shmat, we must be able to honor SHMLBA. */ 2984 uintptr_t align = MAX(SHMLBA, TARGET_PAGE_SIZE); 2985 2986 /* Sanity check the guest binary. */ 2987 if (reserved_va) { 2988 if (guest_hiaddr > reserved_va) { 2989 error_report("%s: requires more than reserved virtual " 2990 "address space (0x%" PRIx64 " > 0x%lx)", 2991 image_name, (uint64_t)guest_hiaddr, reserved_va); 2992 exit(EXIT_FAILURE); 2993 } 2994 } else { 2995 if (guest_hiaddr != (uintptr_t)guest_hiaddr) { 2996 error_report("%s: requires more virtual address space " 2997 "than the host can provide (0x%" PRIx64 ")", 2998 image_name, (uint64_t)guest_hiaddr + 1); 2999 exit(EXIT_FAILURE); 3000 } 3001 } 3002 3003 if (have_guest_base) { 3004 pgb_fixed(image_name, guest_loaddr, guest_hiaddr, align); 3005 } else { 3006 pgb_dynamic(image_name, guest_loaddr, guest_hiaddr, align); 3007 } 3008 3009 /* Reserve and initialize the commpage. */ 3010 if (!init_guest_commpage()) { 3011 /* We have already probed for the commpage being free. */ 3012 g_assert_not_reached(); 3013 } 3014 3015 assert(QEMU_IS_ALIGNED(guest_base, align)); 3016 qemu_log_mask(CPU_LOG_PAGE, "Locating guest address space " 3017 "@ 0x%" PRIx64 "\n", (uint64_t)guest_base); 3018 } 3019 3020 enum { 3021 /* The string "GNU\0" as a magic number. */ 3022 GNU0_MAGIC = const_le32('G' | 'N' << 8 | 'U' << 16), 3023 NOTE_DATA_SZ = 1 * KiB, 3024 NOTE_NAME_SZ = 4, 3025 ELF_GNU_PROPERTY_ALIGN = ELF_CLASS == ELFCLASS32 ? 4 : 8, 3026 }; 3027 3028 /* 3029 * Process a single gnu_property entry. 3030 * Return false for error. 3031 */ 3032 static bool parse_elf_property(const uint32_t *data, int *off, int datasz, 3033 struct image_info *info, bool have_prev_type, 3034 uint32_t *prev_type, Error **errp) 3035 { 3036 uint32_t pr_type, pr_datasz, step; 3037 3038 if (*off > datasz || !QEMU_IS_ALIGNED(*off, ELF_GNU_PROPERTY_ALIGN)) { 3039 goto error_data; 3040 } 3041 datasz -= *off; 3042 data += *off / sizeof(uint32_t); 3043 3044 if (datasz < 2 * sizeof(uint32_t)) { 3045 goto error_data; 3046 } 3047 pr_type = data[0]; 3048 pr_datasz = data[1]; 3049 data += 2; 3050 datasz -= 2 * sizeof(uint32_t); 3051 step = ROUND_UP(pr_datasz, ELF_GNU_PROPERTY_ALIGN); 3052 if (step > datasz) { 3053 goto error_data; 3054 } 3055 3056 /* Properties are supposed to be unique and sorted on pr_type. */ 3057 if (have_prev_type && pr_type <= *prev_type) { 3058 if (pr_type == *prev_type) { 3059 error_setg(errp, "Duplicate property in PT_GNU_PROPERTY"); 3060 } else { 3061 error_setg(errp, "Unsorted property in PT_GNU_PROPERTY"); 3062 } 3063 return false; 3064 } 3065 *prev_type = pr_type; 3066 3067 if (!arch_parse_elf_property(pr_type, pr_datasz, data, info, errp)) { 3068 return false; 3069 } 3070 3071 *off += 2 * sizeof(uint32_t) + step; 3072 return true; 3073 3074 error_data: 3075 error_setg(errp, "Ill-formed property in PT_GNU_PROPERTY"); 3076 return false; 3077 } 3078 3079 /* Process NT_GNU_PROPERTY_TYPE_0. */ 3080 static bool parse_elf_properties(const ImageSource *src, 3081 struct image_info *info, 3082 const struct elf_phdr *phdr, 3083 Error **errp) 3084 { 3085 union { 3086 struct elf_note nhdr; 3087 uint32_t data[NOTE_DATA_SZ / sizeof(uint32_t)]; 3088 } note; 3089 3090 int n, off, datasz; 3091 bool have_prev_type; 3092 uint32_t prev_type; 3093 3094 /* Unless the arch requires properties, ignore them. */ 3095 if (!ARCH_USE_GNU_PROPERTY) { 3096 return true; 3097 } 3098 3099 /* If the properties are crazy large, that's too bad. */ 3100 n = phdr->p_filesz; 3101 if (n > sizeof(note)) { 3102 error_setg(errp, "PT_GNU_PROPERTY too large"); 3103 return false; 3104 } 3105 if (n < sizeof(note.nhdr)) { 3106 error_setg(errp, "PT_GNU_PROPERTY too small"); 3107 return false; 3108 } 3109 3110 if (!imgsrc_read(¬e, phdr->p_offset, n, src, errp)) { 3111 return false; 3112 } 3113 3114 /* 3115 * The contents of a valid PT_GNU_PROPERTY is a sequence 3116 * of uint32_t -- swap them all now. 3117 */ 3118 #ifdef BSWAP_NEEDED 3119 for (int i = 0; i < n / 4; i++) { 3120 bswap32s(note.data + i); 3121 } 3122 #endif 3123 3124 /* 3125 * Note that nhdr is 3 words, and that the "name" described by namesz 3126 * immediately follows nhdr and is thus at the 4th word. Further, all 3127 * of the inputs to the kernel's round_up are multiples of 4. 3128 */ 3129 if (note.nhdr.n_type != NT_GNU_PROPERTY_TYPE_0 || 3130 note.nhdr.n_namesz != NOTE_NAME_SZ || 3131 note.data[3] != GNU0_MAGIC) { 3132 error_setg(errp, "Invalid note in PT_GNU_PROPERTY"); 3133 return false; 3134 } 3135 off = sizeof(note.nhdr) + NOTE_NAME_SZ; 3136 3137 datasz = note.nhdr.n_descsz + off; 3138 if (datasz > n) { 3139 error_setg(errp, "Invalid note size in PT_GNU_PROPERTY"); 3140 return false; 3141 } 3142 3143 have_prev_type = false; 3144 prev_type = 0; 3145 while (1) { 3146 if (off == datasz) { 3147 return true; /* end, exit ok */ 3148 } 3149 if (!parse_elf_property(note.data, &off, datasz, info, 3150 have_prev_type, &prev_type, errp)) { 3151 return false; 3152 } 3153 have_prev_type = true; 3154 } 3155 } 3156 3157 /** 3158 * load_elf_image: Load an ELF image into the address space. 3159 * @image_name: the filename of the image, to use in error messages. 3160 * @src: the ImageSource from which to read. 3161 * @info: info collected from the loaded image. 3162 * @ehdr: the ELF header, not yet bswapped. 3163 * @pinterp_name: record any PT_INTERP string found. 3164 * 3165 * On return: @info values will be filled in, as necessary or available. 3166 */ 3167 3168 static void load_elf_image(const char *image_name, const ImageSource *src, 3169 struct image_info *info, struct elfhdr *ehdr, 3170 char **pinterp_name) 3171 { 3172 g_autofree struct elf_phdr *phdr = NULL; 3173 abi_ulong load_addr, load_bias, loaddr, hiaddr, error; 3174 int i, prot_exec; 3175 Error *err = NULL; 3176 3177 /* 3178 * First of all, some simple consistency checks. 3179 * Note that we rely on the bswapped ehdr staying in bprm_buf, 3180 * for later use by load_elf_binary and create_elf_tables. 3181 */ 3182 if (!imgsrc_read(ehdr, 0, sizeof(*ehdr), src, &err)) { 3183 goto exit_errmsg; 3184 } 3185 if (!elf_check_ident(ehdr)) { 3186 error_setg(&err, "Invalid ELF image for this architecture"); 3187 goto exit_errmsg; 3188 } 3189 bswap_ehdr(ehdr); 3190 if (!elf_check_ehdr(ehdr)) { 3191 error_setg(&err, "Invalid ELF image for this architecture"); 3192 goto exit_errmsg; 3193 } 3194 3195 phdr = imgsrc_read_alloc(ehdr->e_phoff, 3196 ehdr->e_phnum * sizeof(struct elf_phdr), 3197 src, &err); 3198 if (phdr == NULL) { 3199 goto exit_errmsg; 3200 } 3201 bswap_phdr(phdr, ehdr->e_phnum); 3202 3203 info->nsegs = 0; 3204 info->pt_dynamic_addr = 0; 3205 3206 mmap_lock(); 3207 3208 /* 3209 * Find the maximum size of the image and allocate an appropriate 3210 * amount of memory to handle that. Locate the interpreter, if any. 3211 */ 3212 loaddr = -1, hiaddr = 0; 3213 info->alignment = 0; 3214 info->exec_stack = EXSTACK_DEFAULT; 3215 for (i = 0; i < ehdr->e_phnum; ++i) { 3216 struct elf_phdr *eppnt = phdr + i; 3217 if (eppnt->p_type == PT_LOAD) { 3218 abi_ulong a = eppnt->p_vaddr & TARGET_PAGE_MASK; 3219 if (a < loaddr) { 3220 loaddr = a; 3221 } 3222 a = eppnt->p_vaddr + eppnt->p_memsz - 1; 3223 if (a > hiaddr) { 3224 hiaddr = a; 3225 } 3226 ++info->nsegs; 3227 info->alignment |= eppnt->p_align; 3228 } else if (eppnt->p_type == PT_INTERP && pinterp_name) { 3229 g_autofree char *interp_name = NULL; 3230 3231 if (*pinterp_name) { 3232 error_setg(&err, "Multiple PT_INTERP entries"); 3233 goto exit_errmsg; 3234 } 3235 3236 interp_name = imgsrc_read_alloc(eppnt->p_offset, eppnt->p_filesz, 3237 src, &err); 3238 if (interp_name == NULL) { 3239 goto exit_errmsg; 3240 } 3241 if (interp_name[eppnt->p_filesz - 1] != 0) { 3242 error_setg(&err, "Invalid PT_INTERP entry"); 3243 goto exit_errmsg; 3244 } 3245 *pinterp_name = g_steal_pointer(&interp_name); 3246 } else if (eppnt->p_type == PT_GNU_PROPERTY) { 3247 if (!parse_elf_properties(src, info, eppnt, &err)) { 3248 goto exit_errmsg; 3249 } 3250 } else if (eppnt->p_type == PT_GNU_STACK) { 3251 info->exec_stack = eppnt->p_flags & PF_X; 3252 } 3253 } 3254 3255 load_addr = loaddr; 3256 3257 if (pinterp_name != NULL) { 3258 if (ehdr->e_type == ET_EXEC) { 3259 /* 3260 * Make sure that the low address does not conflict with 3261 * MMAP_MIN_ADDR or the QEMU application itself. 3262 */ 3263 probe_guest_base(image_name, loaddr, hiaddr); 3264 } else { 3265 abi_ulong align; 3266 3267 /* 3268 * The binary is dynamic, but we still need to 3269 * select guest_base. In this case we pass a size. 3270 */ 3271 probe_guest_base(image_name, 0, hiaddr - loaddr); 3272 3273 /* 3274 * Avoid collision with the loader by providing a different 3275 * default load address. 3276 */ 3277 load_addr += elf_et_dyn_base; 3278 3279 /* 3280 * TODO: Better support for mmap alignment is desirable. 3281 * Since we do not have complete control over the guest 3282 * address space, we prefer the kernel to choose some address 3283 * rather than force the use of LOAD_ADDR via MAP_FIXED. 3284 * But without MAP_FIXED we cannot guarantee alignment, 3285 * only suggest it. 3286 */ 3287 align = pow2ceil(info->alignment); 3288 if (align) { 3289 load_addr &= -align; 3290 } 3291 } 3292 } 3293 3294 /* 3295 * Reserve address space for all of this. 3296 * 3297 * In the case of ET_EXEC, we supply MAP_FIXED_NOREPLACE so that we get 3298 * exactly the address range that is required. Without reserved_va, 3299 * the guest address space is not isolated. We have attempted to avoid 3300 * conflict with the host program itself via probe_guest_base, but using 3301 * MAP_FIXED_NOREPLACE instead of MAP_FIXED provides an extra check. 3302 * 3303 * Otherwise this is ET_DYN, and we are searching for a location 3304 * that can hold the memory space required. If the image is 3305 * pre-linked, LOAD_ADDR will be non-zero, and the kernel should 3306 * honor that address if it happens to be free. 3307 * 3308 * In both cases, we will overwrite pages in this range with mappings 3309 * from the executable. 3310 */ 3311 load_addr = target_mmap(load_addr, (size_t)hiaddr - loaddr + 1, PROT_NONE, 3312 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | 3313 (ehdr->e_type == ET_EXEC ? MAP_FIXED_NOREPLACE : 0), 3314 -1, 0); 3315 if (load_addr == -1) { 3316 goto exit_mmap; 3317 } 3318 load_bias = load_addr - loaddr; 3319 3320 if (elf_is_fdpic(ehdr)) { 3321 struct elf32_fdpic_loadseg *loadsegs = info->loadsegs = 3322 g_malloc(sizeof(*loadsegs) * info->nsegs); 3323 3324 for (i = 0; i < ehdr->e_phnum; ++i) { 3325 switch (phdr[i].p_type) { 3326 case PT_DYNAMIC: 3327 info->pt_dynamic_addr = phdr[i].p_vaddr + load_bias; 3328 break; 3329 case PT_LOAD: 3330 loadsegs->addr = phdr[i].p_vaddr + load_bias; 3331 loadsegs->p_vaddr = phdr[i].p_vaddr; 3332 loadsegs->p_memsz = phdr[i].p_memsz; 3333 ++loadsegs; 3334 break; 3335 } 3336 } 3337 } 3338 3339 info->load_bias = load_bias; 3340 info->code_offset = load_bias; 3341 info->data_offset = load_bias; 3342 info->load_addr = load_addr; 3343 info->entry = ehdr->e_entry + load_bias; 3344 info->start_code = -1; 3345 info->end_code = 0; 3346 info->start_data = -1; 3347 info->end_data = 0; 3348 /* Usual start for brk is after all sections of the main executable. */ 3349 info->brk = TARGET_PAGE_ALIGN(hiaddr + load_bias); 3350 info->elf_flags = ehdr->e_flags; 3351 3352 prot_exec = PROT_EXEC; 3353 #ifdef TARGET_AARCH64 3354 /* 3355 * If the BTI feature is present, this indicates that the executable 3356 * pages of the startup binary should be mapped with PROT_BTI, so that 3357 * branch targets are enforced. 3358 * 3359 * The startup binary is either the interpreter or the static executable. 3360 * The interpreter is responsible for all pages of a dynamic executable. 3361 * 3362 * Elf notes are backward compatible to older cpus. 3363 * Do not enable BTI unless it is supported. 3364 */ 3365 if ((info->note_flags & GNU_PROPERTY_AARCH64_FEATURE_1_BTI) 3366 && (pinterp_name == NULL || *pinterp_name == 0) 3367 && cpu_isar_feature(aa64_bti, ARM_CPU(thread_cpu))) { 3368 prot_exec |= TARGET_PROT_BTI; 3369 } 3370 #endif 3371 3372 for (i = 0; i < ehdr->e_phnum; i++) { 3373 struct elf_phdr *eppnt = phdr + i; 3374 if (eppnt->p_type == PT_LOAD) { 3375 abi_ulong vaddr, vaddr_po, vaddr_ps, vaddr_ef, vaddr_em; 3376 int elf_prot = 0; 3377 3378 if (eppnt->p_flags & PF_R) { 3379 elf_prot |= PROT_READ; 3380 } 3381 if (eppnt->p_flags & PF_W) { 3382 elf_prot |= PROT_WRITE; 3383 } 3384 if (eppnt->p_flags & PF_X) { 3385 elf_prot |= prot_exec; 3386 } 3387 3388 vaddr = load_bias + eppnt->p_vaddr; 3389 vaddr_po = vaddr & ~TARGET_PAGE_MASK; 3390 vaddr_ps = vaddr & TARGET_PAGE_MASK; 3391 3392 vaddr_ef = vaddr + eppnt->p_filesz; 3393 vaddr_em = vaddr + eppnt->p_memsz; 3394 3395 /* 3396 * Some segments may be completely empty, with a non-zero p_memsz 3397 * but no backing file segment. 3398 */ 3399 if (eppnt->p_filesz != 0) { 3400 error = imgsrc_mmap(vaddr_ps, eppnt->p_filesz + vaddr_po, 3401 elf_prot, MAP_PRIVATE | MAP_FIXED, 3402 src, eppnt->p_offset - vaddr_po); 3403 if (error == -1) { 3404 goto exit_mmap; 3405 } 3406 } 3407 3408 /* If the load segment requests extra zeros (e.g. bss), map it. */ 3409 if (vaddr_ef < vaddr_em && 3410 !zero_bss(vaddr_ef, vaddr_em, elf_prot, &err)) { 3411 goto exit_errmsg; 3412 } 3413 3414 /* Find the full program boundaries. */ 3415 if (elf_prot & PROT_EXEC) { 3416 if (vaddr < info->start_code) { 3417 info->start_code = vaddr; 3418 } 3419 if (vaddr_ef > info->end_code) { 3420 info->end_code = vaddr_ef; 3421 } 3422 } 3423 if (elf_prot & PROT_WRITE) { 3424 if (vaddr < info->start_data) { 3425 info->start_data = vaddr; 3426 } 3427 if (vaddr_ef > info->end_data) { 3428 info->end_data = vaddr_ef; 3429 } 3430 } 3431 #ifdef TARGET_MIPS 3432 } else if (eppnt->p_type == PT_MIPS_ABIFLAGS) { 3433 Mips_elf_abiflags_v0 abiflags; 3434 3435 if (!imgsrc_read(&abiflags, eppnt->p_offset, sizeof(abiflags), 3436 src, &err)) { 3437 goto exit_errmsg; 3438 } 3439 bswap_mips_abiflags(&abiflags); 3440 info->fp_abi = abiflags.fp_abi; 3441 #endif 3442 } 3443 } 3444 3445 if (info->end_data == 0) { 3446 info->start_data = info->end_code; 3447 info->end_data = info->end_code; 3448 } 3449 3450 if (qemu_log_enabled()) { 3451 load_symbols(ehdr, src, load_bias); 3452 } 3453 3454 debuginfo_report_elf(image_name, src->fd, load_bias); 3455 3456 mmap_unlock(); 3457 3458 close(src->fd); 3459 return; 3460 3461 exit_mmap: 3462 error_setg_errno(&err, errno, "Error mapping file"); 3463 goto exit_errmsg; 3464 exit_errmsg: 3465 error_reportf_err(err, "%s: ", image_name); 3466 exit(-1); 3467 } 3468 3469 static void load_elf_interp(const char *filename, struct image_info *info, 3470 char bprm_buf[BPRM_BUF_SIZE]) 3471 { 3472 struct elfhdr ehdr; 3473 ImageSource src; 3474 int fd, retval; 3475 Error *err = NULL; 3476 3477 fd = open(path(filename), O_RDONLY); 3478 if (fd < 0) { 3479 error_setg_file_open(&err, errno, filename); 3480 error_report_err(err); 3481 exit(-1); 3482 } 3483 3484 retval = read(fd, bprm_buf, BPRM_BUF_SIZE); 3485 if (retval < 0) { 3486 error_setg_errno(&err, errno, "Error reading file header"); 3487 error_reportf_err(err, "%s: ", filename); 3488 exit(-1); 3489 } 3490 3491 src.fd = fd; 3492 src.cache = bprm_buf; 3493 src.cache_size = retval; 3494 3495 load_elf_image(filename, &src, info, &ehdr, NULL); 3496 } 3497 3498 #ifdef VDSO_HEADER 3499 #include VDSO_HEADER 3500 #define vdso_image_info() &vdso_image_info 3501 #else 3502 #define vdso_image_info() NULL 3503 #endif 3504 3505 static void load_elf_vdso(struct image_info *info, const VdsoImageInfo *vdso) 3506 { 3507 ImageSource src; 3508 struct elfhdr ehdr; 3509 abi_ulong load_bias, load_addr; 3510 3511 src.fd = -1; 3512 src.cache = vdso->image; 3513 src.cache_size = vdso->image_size; 3514 3515 load_elf_image("<internal-vdso>", &src, info, &ehdr, NULL); 3516 load_addr = info->load_addr; 3517 load_bias = info->load_bias; 3518 3519 /* 3520 * We need to relocate the VDSO image. The one built into the kernel 3521 * is built for a fixed address. The one built for QEMU is not, since 3522 * that requires close control of the guest address space. 3523 * We pre-processed the image to locate all of the addresses that need 3524 * to be updated. 3525 */ 3526 for (unsigned i = 0, n = vdso->reloc_count; i < n; i++) { 3527 abi_ulong *addr = g2h_untagged(load_addr + vdso->relocs[i]); 3528 *addr = tswapal(tswapal(*addr) + load_bias); 3529 } 3530 3531 /* Install signal trampolines, if present. */ 3532 if (vdso->sigreturn_ofs) { 3533 default_sigreturn = load_addr + vdso->sigreturn_ofs; 3534 } 3535 if (vdso->rt_sigreturn_ofs) { 3536 default_rt_sigreturn = load_addr + vdso->rt_sigreturn_ofs; 3537 } 3538 3539 /* Remove write from VDSO segment. */ 3540 target_mprotect(info->start_data, info->end_data - info->start_data, 3541 PROT_READ | PROT_EXEC); 3542 } 3543 3544 static int symfind(const void *s0, const void *s1) 3545 { 3546 struct elf_sym *sym = (struct elf_sym *)s1; 3547 __typeof(sym->st_value) addr = *(uint64_t *)s0; 3548 int result = 0; 3549 3550 if (addr < sym->st_value) { 3551 result = -1; 3552 } else if (addr >= sym->st_value + sym->st_size) { 3553 result = 1; 3554 } 3555 return result; 3556 } 3557 3558 static const char *lookup_symbolxx(struct syminfo *s, uint64_t orig_addr) 3559 { 3560 #if ELF_CLASS == ELFCLASS32 3561 struct elf_sym *syms = s->disas_symtab.elf32; 3562 #else 3563 struct elf_sym *syms = s->disas_symtab.elf64; 3564 #endif 3565 3566 // binary search 3567 struct elf_sym *sym; 3568 3569 sym = bsearch(&orig_addr, syms, s->disas_num_syms, sizeof(*syms), symfind); 3570 if (sym != NULL) { 3571 return s->disas_strtab + sym->st_name; 3572 } 3573 3574 return ""; 3575 } 3576 3577 /* FIXME: This should use elf_ops.h.inc */ 3578 static int symcmp(const void *s0, const void *s1) 3579 { 3580 struct elf_sym *sym0 = (struct elf_sym *)s0; 3581 struct elf_sym *sym1 = (struct elf_sym *)s1; 3582 return (sym0->st_value < sym1->st_value) 3583 ? -1 3584 : ((sym0->st_value > sym1->st_value) ? 1 : 0); 3585 } 3586 3587 /* Best attempt to load symbols from this ELF object. */ 3588 static void load_symbols(struct elfhdr *hdr, const ImageSource *src, 3589 abi_ulong load_bias) 3590 { 3591 int i, shnum, nsyms, sym_idx = 0, str_idx = 0; 3592 g_autofree struct elf_shdr *shdr = NULL; 3593 char *strings = NULL; 3594 struct elf_sym *syms = NULL; 3595 struct elf_sym *new_syms; 3596 uint64_t segsz; 3597 3598 shnum = hdr->e_shnum; 3599 shdr = imgsrc_read_alloc(hdr->e_shoff, shnum * sizeof(struct elf_shdr), 3600 src, NULL); 3601 if (shdr == NULL) { 3602 return; 3603 } 3604 3605 bswap_shdr(shdr, shnum); 3606 for (i = 0; i < shnum; ++i) { 3607 if (shdr[i].sh_type == SHT_SYMTAB) { 3608 sym_idx = i; 3609 str_idx = shdr[i].sh_link; 3610 goto found; 3611 } 3612 } 3613 3614 /* There will be no symbol table if the file was stripped. */ 3615 return; 3616 3617 found: 3618 /* Now know where the strtab and symtab are. Snarf them. */ 3619 3620 segsz = shdr[str_idx].sh_size; 3621 strings = g_try_malloc(segsz); 3622 if (!strings) { 3623 goto give_up; 3624 } 3625 if (!imgsrc_read(strings, shdr[str_idx].sh_offset, segsz, src, NULL)) { 3626 goto give_up; 3627 } 3628 3629 segsz = shdr[sym_idx].sh_size; 3630 if (segsz / sizeof(struct elf_sym) > INT_MAX) { 3631 /* 3632 * Implausibly large symbol table: give up rather than ploughing 3633 * on with the number of symbols calculation overflowing. 3634 */ 3635 goto give_up; 3636 } 3637 nsyms = segsz / sizeof(struct elf_sym); 3638 syms = g_try_malloc(segsz); 3639 if (!syms) { 3640 goto give_up; 3641 } 3642 if (!imgsrc_read(syms, shdr[sym_idx].sh_offset, segsz, src, NULL)) { 3643 goto give_up; 3644 } 3645 3646 for (i = 0; i < nsyms; ) { 3647 bswap_sym(syms + i); 3648 /* Throw away entries which we do not need. */ 3649 if (syms[i].st_shndx == SHN_UNDEF 3650 || syms[i].st_shndx >= SHN_LORESERVE 3651 || ELF_ST_TYPE(syms[i].st_info) != STT_FUNC) { 3652 if (i < --nsyms) { 3653 syms[i] = syms[nsyms]; 3654 } 3655 } else { 3656 #if defined(TARGET_ARM) || defined (TARGET_MIPS) 3657 /* The bottom address bit marks a Thumb or MIPS16 symbol. */ 3658 syms[i].st_value &= ~(target_ulong)1; 3659 #endif 3660 syms[i].st_value += load_bias; 3661 i++; 3662 } 3663 } 3664 3665 /* No "useful" symbol. */ 3666 if (nsyms == 0) { 3667 goto give_up; 3668 } 3669 3670 /* 3671 * Attempt to free the storage associated with the local symbols 3672 * that we threw away. Whether or not this has any effect on the 3673 * memory allocation depends on the malloc implementation and how 3674 * many symbols we managed to discard. 3675 */ 3676 new_syms = g_try_renew(struct elf_sym, syms, nsyms); 3677 if (new_syms == NULL) { 3678 goto give_up; 3679 } 3680 syms = new_syms; 3681 3682 qsort(syms, nsyms, sizeof(*syms), symcmp); 3683 3684 { 3685 struct syminfo *s = g_new(struct syminfo, 1); 3686 3687 s->disas_strtab = strings; 3688 s->disas_num_syms = nsyms; 3689 #if ELF_CLASS == ELFCLASS32 3690 s->disas_symtab.elf32 = syms; 3691 #else 3692 s->disas_symtab.elf64 = syms; 3693 #endif 3694 s->lookup_symbol = lookup_symbolxx; 3695 s->next = syminfos; 3696 syminfos = s; 3697 } 3698 return; 3699 3700 give_up: 3701 g_free(strings); 3702 g_free(syms); 3703 } 3704 3705 uint32_t get_elf_eflags(int fd) 3706 { 3707 struct elfhdr ehdr; 3708 off_t offset; 3709 int ret; 3710 3711 /* Read ELF header */ 3712 offset = lseek(fd, 0, SEEK_SET); 3713 if (offset == (off_t) -1) { 3714 return 0; 3715 } 3716 ret = read(fd, &ehdr, sizeof(ehdr)); 3717 if (ret < sizeof(ehdr)) { 3718 return 0; 3719 } 3720 offset = lseek(fd, offset, SEEK_SET); 3721 if (offset == (off_t) -1) { 3722 return 0; 3723 } 3724 3725 /* Check ELF signature */ 3726 if (!elf_check_ident(&ehdr)) { 3727 return 0; 3728 } 3729 3730 /* check header */ 3731 bswap_ehdr(&ehdr); 3732 if (!elf_check_ehdr(&ehdr)) { 3733 return 0; 3734 } 3735 3736 /* return architecture id */ 3737 return ehdr.e_flags; 3738 } 3739 3740 int load_elf_binary(struct linux_binprm *bprm, struct image_info *info) 3741 { 3742 /* 3743 * We need a copy of the elf header for passing to create_elf_tables. 3744 * We will have overwritten the original when we re-use bprm->buf 3745 * while loading the interpreter. Allocate the storage for this now 3746 * and let elf_load_image do any swapping that may be required. 3747 */ 3748 struct elfhdr ehdr; 3749 struct image_info interp_info, vdso_info; 3750 char *elf_interpreter = NULL; 3751 char *scratch; 3752 3753 memset(&interp_info, 0, sizeof(interp_info)); 3754 #ifdef TARGET_MIPS 3755 interp_info.fp_abi = MIPS_ABI_FP_UNKNOWN; 3756 #endif 3757 3758 load_elf_image(bprm->filename, &bprm->src, info, &ehdr, &elf_interpreter); 3759 3760 /* Do this so that we can load the interpreter, if need be. We will 3761 change some of these later */ 3762 bprm->p = setup_arg_pages(bprm, info); 3763 3764 scratch = g_new0(char, TARGET_PAGE_SIZE); 3765 if (STACK_GROWS_DOWN) { 3766 bprm->p = copy_elf_strings(1, &bprm->filename, scratch, 3767 bprm->p, info->stack_limit); 3768 info->file_string = bprm->p; 3769 bprm->p = copy_elf_strings(bprm->envc, bprm->envp, scratch, 3770 bprm->p, info->stack_limit); 3771 info->env_strings = bprm->p; 3772 bprm->p = copy_elf_strings(bprm->argc, bprm->argv, scratch, 3773 bprm->p, info->stack_limit); 3774 info->arg_strings = bprm->p; 3775 } else { 3776 info->arg_strings = bprm->p; 3777 bprm->p = copy_elf_strings(bprm->argc, bprm->argv, scratch, 3778 bprm->p, info->stack_limit); 3779 info->env_strings = bprm->p; 3780 bprm->p = copy_elf_strings(bprm->envc, bprm->envp, scratch, 3781 bprm->p, info->stack_limit); 3782 info->file_string = bprm->p; 3783 bprm->p = copy_elf_strings(1, &bprm->filename, scratch, 3784 bprm->p, info->stack_limit); 3785 } 3786 3787 g_free(scratch); 3788 3789 if (!bprm->p) { 3790 fprintf(stderr, "%s: %s\n", bprm->filename, strerror(E2BIG)); 3791 exit(-1); 3792 } 3793 3794 if (elf_interpreter) { 3795 load_elf_interp(elf_interpreter, &interp_info, bprm->buf); 3796 3797 /* 3798 * While unusual because of ELF_ET_DYN_BASE, if we are unlucky 3799 * with the mappings the interpreter can be loaded above but 3800 * near the main executable, which can leave very little room 3801 * for the heap. 3802 * If the current brk has less than 16MB, use the end of the 3803 * interpreter. 3804 */ 3805 if (interp_info.brk > info->brk && 3806 interp_info.load_bias - info->brk < 16 * MiB) { 3807 info->brk = interp_info.brk; 3808 } 3809 3810 /* If the program interpreter is one of these two, then assume 3811 an iBCS2 image. Otherwise assume a native linux image. */ 3812 3813 if (strcmp(elf_interpreter, "/usr/lib/libc.so.1") == 0 3814 || strcmp(elf_interpreter, "/usr/lib/ld.so.1") == 0) { 3815 info->personality = PER_SVR4; 3816 3817 /* Why this, you ask??? Well SVr4 maps page 0 as read-only, 3818 and some applications "depend" upon this behavior. Since 3819 we do not have the power to recompile these, we emulate 3820 the SVr4 behavior. Sigh. */ 3821 target_mmap(0, TARGET_PAGE_SIZE, PROT_READ | PROT_EXEC, 3822 MAP_FIXED_NOREPLACE | MAP_PRIVATE | MAP_ANONYMOUS, 3823 -1, 0); 3824 } 3825 #ifdef TARGET_MIPS 3826 info->interp_fp_abi = interp_info.fp_abi; 3827 #endif 3828 } 3829 3830 /* 3831 * Load a vdso if available, which will amongst other things contain the 3832 * signal trampolines. Otherwise, allocate a separate page for them. 3833 */ 3834 const VdsoImageInfo *vdso = vdso_image_info(); 3835 if (vdso) { 3836 load_elf_vdso(&vdso_info, vdso); 3837 info->vdso = vdso_info.load_bias; 3838 } else if (TARGET_ARCH_HAS_SIGTRAMP_PAGE) { 3839 abi_long tramp_page = target_mmap(0, TARGET_PAGE_SIZE, 3840 PROT_READ | PROT_WRITE, 3841 MAP_PRIVATE | MAP_ANON, -1, 0); 3842 if (tramp_page == -1) { 3843 return -errno; 3844 } 3845 3846 setup_sigtramp(tramp_page); 3847 target_mprotect(tramp_page, TARGET_PAGE_SIZE, PROT_READ | PROT_EXEC); 3848 } 3849 3850 bprm->p = create_elf_tables(bprm->p, bprm->argc, bprm->envc, &ehdr, info, 3851 elf_interpreter ? &interp_info : NULL, 3852 vdso ? &vdso_info : NULL); 3853 info->start_stack = bprm->p; 3854 3855 /* If we have an interpreter, set that as the program's entry point. 3856 Copy the load_bias as well, to help PPC64 interpret the entry 3857 point as a function descriptor. Do this after creating elf tables 3858 so that we copy the original program entry point into the AUXV. */ 3859 if (elf_interpreter) { 3860 info->load_bias = interp_info.load_bias; 3861 info->entry = interp_info.entry; 3862 g_free(elf_interpreter); 3863 } 3864 3865 #ifdef USE_ELF_CORE_DUMP 3866 bprm->core_dump = &elf_core_dump; 3867 #endif 3868 3869 return 0; 3870 } 3871 3872 #ifdef USE_ELF_CORE_DUMP 3873 #include "exec/translate-all.h" 3874 3875 /* 3876 * Definitions to generate Intel SVR4-like core files. 3877 * These mostly have the same names as the SVR4 types with "target_elf_" 3878 * tacked on the front to prevent clashes with linux definitions, 3879 * and the typedef forms have been avoided. This is mostly like 3880 * the SVR4 structure, but more Linuxy, with things that Linux does 3881 * not support and which gdb doesn't really use excluded. 3882 * 3883 * Fields we don't dump (their contents is zero) in linux-user qemu 3884 * are marked with XXX. 3885 * 3886 * Core dump code is copied from linux kernel (fs/binfmt_elf.c). 3887 * 3888 * Porting ELF coredump for target is (quite) simple process. First you 3889 * define USE_ELF_CORE_DUMP in target ELF code (where init_thread() for 3890 * the target resides): 3891 * 3892 * #define USE_ELF_CORE_DUMP 3893 * 3894 * Next you define type of register set used for dumping. ELF specification 3895 * says that it needs to be array of elf_greg_t that has size of ELF_NREG. 3896 * 3897 * typedef <target_regtype> target_elf_greg_t; 3898 * #define ELF_NREG <number of registers> 3899 * typedef taret_elf_greg_t target_elf_gregset_t[ELF_NREG]; 3900 * 3901 * Last step is to implement target specific function that copies registers 3902 * from given cpu into just specified register set. Prototype is: 3903 * 3904 * static void elf_core_copy_regs(taret_elf_gregset_t *regs, 3905 * const CPUArchState *env); 3906 * 3907 * Parameters: 3908 * regs - copy register values into here (allocated and zeroed by caller) 3909 * env - copy registers from here 3910 * 3911 * Example for ARM target is provided in this file. 3912 */ 3913 3914 struct target_elf_siginfo { 3915 abi_int si_signo; /* signal number */ 3916 abi_int si_code; /* extra code */ 3917 abi_int si_errno; /* errno */ 3918 }; 3919 3920 struct target_elf_prstatus { 3921 struct target_elf_siginfo pr_info; /* Info associated with signal */ 3922 abi_short pr_cursig; /* Current signal */ 3923 abi_ulong pr_sigpend; /* XXX */ 3924 abi_ulong pr_sighold; /* XXX */ 3925 target_pid_t pr_pid; 3926 target_pid_t pr_ppid; 3927 target_pid_t pr_pgrp; 3928 target_pid_t pr_sid; 3929 struct target_timeval pr_utime; /* XXX User time */ 3930 struct target_timeval pr_stime; /* XXX System time */ 3931 struct target_timeval pr_cutime; /* XXX Cumulative user time */ 3932 struct target_timeval pr_cstime; /* XXX Cumulative system time */ 3933 target_elf_gregset_t pr_reg; /* GP registers */ 3934 abi_int pr_fpvalid; /* XXX */ 3935 }; 3936 3937 #define ELF_PRARGSZ (80) /* Number of chars for args */ 3938 3939 struct target_elf_prpsinfo { 3940 char pr_state; /* numeric process state */ 3941 char pr_sname; /* char for pr_state */ 3942 char pr_zomb; /* zombie */ 3943 char pr_nice; /* nice val */ 3944 abi_ulong pr_flag; /* flags */ 3945 target_uid_t pr_uid; 3946 target_gid_t pr_gid; 3947 target_pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid; 3948 /* Lots missing */ 3949 char pr_fname[16] QEMU_NONSTRING; /* filename of executable */ 3950 char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */ 3951 }; 3952 3953 #ifdef BSWAP_NEEDED 3954 static void bswap_prstatus(struct target_elf_prstatus *prstatus) 3955 { 3956 prstatus->pr_info.si_signo = tswap32(prstatus->pr_info.si_signo); 3957 prstatus->pr_info.si_code = tswap32(prstatus->pr_info.si_code); 3958 prstatus->pr_info.si_errno = tswap32(prstatus->pr_info.si_errno); 3959 prstatus->pr_cursig = tswap16(prstatus->pr_cursig); 3960 prstatus->pr_sigpend = tswapal(prstatus->pr_sigpend); 3961 prstatus->pr_sighold = tswapal(prstatus->pr_sighold); 3962 prstatus->pr_pid = tswap32(prstatus->pr_pid); 3963 prstatus->pr_ppid = tswap32(prstatus->pr_ppid); 3964 prstatus->pr_pgrp = tswap32(prstatus->pr_pgrp); 3965 prstatus->pr_sid = tswap32(prstatus->pr_sid); 3966 /* cpu times are not filled, so we skip them */ 3967 /* regs should be in correct format already */ 3968 prstatus->pr_fpvalid = tswap32(prstatus->pr_fpvalid); 3969 } 3970 3971 static void bswap_psinfo(struct target_elf_prpsinfo *psinfo) 3972 { 3973 psinfo->pr_flag = tswapal(psinfo->pr_flag); 3974 psinfo->pr_uid = tswap16(psinfo->pr_uid); 3975 psinfo->pr_gid = tswap16(psinfo->pr_gid); 3976 psinfo->pr_pid = tswap32(psinfo->pr_pid); 3977 psinfo->pr_ppid = tswap32(psinfo->pr_ppid); 3978 psinfo->pr_pgrp = tswap32(psinfo->pr_pgrp); 3979 psinfo->pr_sid = tswap32(psinfo->pr_sid); 3980 } 3981 3982 static void bswap_note(struct elf_note *en) 3983 { 3984 bswap32s(&en->n_namesz); 3985 bswap32s(&en->n_descsz); 3986 bswap32s(&en->n_type); 3987 } 3988 #else 3989 static inline void bswap_prstatus(struct target_elf_prstatus *p) { } 3990 static inline void bswap_psinfo(struct target_elf_prpsinfo *p) {} 3991 static inline void bswap_note(struct elf_note *en) { } 3992 #endif /* BSWAP_NEEDED */ 3993 3994 /* 3995 * Calculate file (dump) size of given memory region. 3996 */ 3997 static size_t vma_dump_size(target_ulong start, target_ulong end, 3998 unsigned long flags) 3999 { 4000 /* The area must be readable. */ 4001 if (!(flags & PAGE_READ)) { 4002 return 0; 4003 } 4004 4005 /* 4006 * Usually we don't dump executable pages as they contain 4007 * non-writable code that debugger can read directly from 4008 * target library etc. If there is no elf header, we dump it. 4009 */ 4010 if (!(flags & PAGE_WRITE_ORG) && 4011 (flags & PAGE_EXEC) && 4012 memcmp(g2h_untagged(start), ELFMAG, SELFMAG) == 0) { 4013 return 0; 4014 } 4015 4016 return end - start; 4017 } 4018 4019 static size_t size_note(const char *name, size_t datasz) 4020 { 4021 size_t namesz = strlen(name) + 1; 4022 4023 namesz = ROUND_UP(namesz, 4); 4024 datasz = ROUND_UP(datasz, 4); 4025 4026 return sizeof(struct elf_note) + namesz + datasz; 4027 } 4028 4029 static void *fill_note(void **pptr, int type, const char *name, size_t datasz) 4030 { 4031 void *ptr = *pptr; 4032 struct elf_note *n = ptr; 4033 size_t namesz = strlen(name) + 1; 4034 4035 n->n_namesz = namesz; 4036 n->n_descsz = datasz; 4037 n->n_type = type; 4038 bswap_note(n); 4039 4040 ptr += sizeof(*n); 4041 memcpy(ptr, name, namesz); 4042 4043 namesz = ROUND_UP(namesz, 4); 4044 datasz = ROUND_UP(datasz, 4); 4045 4046 *pptr = ptr + namesz + datasz; 4047 return ptr + namesz; 4048 } 4049 4050 static void fill_elf_header(struct elfhdr *elf, int segs, uint16_t machine, 4051 uint32_t flags) 4052 { 4053 memcpy(elf->e_ident, ELFMAG, SELFMAG); 4054 4055 elf->e_ident[EI_CLASS] = ELF_CLASS; 4056 elf->e_ident[EI_DATA] = ELF_DATA; 4057 elf->e_ident[EI_VERSION] = EV_CURRENT; 4058 elf->e_ident[EI_OSABI] = ELF_OSABI; 4059 4060 elf->e_type = ET_CORE; 4061 elf->e_machine = machine; 4062 elf->e_version = EV_CURRENT; 4063 elf->e_phoff = sizeof(struct elfhdr); 4064 elf->e_flags = flags; 4065 elf->e_ehsize = sizeof(struct elfhdr); 4066 elf->e_phentsize = sizeof(struct elf_phdr); 4067 elf->e_phnum = segs; 4068 4069 bswap_ehdr(elf); 4070 } 4071 4072 static void fill_elf_note_phdr(struct elf_phdr *phdr, size_t sz, off_t offset) 4073 { 4074 phdr->p_type = PT_NOTE; 4075 phdr->p_offset = offset; 4076 phdr->p_filesz = sz; 4077 4078 bswap_phdr(phdr, 1); 4079 } 4080 4081 static void fill_prstatus_note(void *data, const TaskState *ts, 4082 CPUState *cpu, int signr) 4083 { 4084 /* 4085 * Because note memory is only aligned to 4, and target_elf_prstatus 4086 * may well have higher alignment requirements, fill locally and 4087 * memcpy to the destination afterward. 4088 */ 4089 struct target_elf_prstatus prstatus = { 4090 .pr_info.si_signo = signr, 4091 .pr_cursig = signr, 4092 .pr_pid = ts->ts_tid, 4093 .pr_ppid = getppid(), 4094 .pr_pgrp = getpgrp(), 4095 .pr_sid = getsid(0), 4096 }; 4097 4098 elf_core_copy_regs(&prstatus.pr_reg, cpu_env(cpu)); 4099 bswap_prstatus(&prstatus); 4100 memcpy(data, &prstatus, sizeof(prstatus)); 4101 } 4102 4103 static void fill_prpsinfo_note(void *data, const TaskState *ts) 4104 { 4105 /* 4106 * Because note memory is only aligned to 4, and target_elf_prpsinfo 4107 * may well have higher alignment requirements, fill locally and 4108 * memcpy to the destination afterward. 4109 */ 4110 struct target_elf_prpsinfo psinfo = { 4111 .pr_pid = getpid(), 4112 .pr_ppid = getppid(), 4113 .pr_pgrp = getpgrp(), 4114 .pr_sid = getsid(0), 4115 .pr_uid = getuid(), 4116 .pr_gid = getgid(), 4117 }; 4118 char *base_filename; 4119 size_t len; 4120 4121 len = ts->info->env_strings - ts->info->arg_strings; 4122 len = MIN(len, ELF_PRARGSZ); 4123 memcpy(&psinfo.pr_psargs, g2h_untagged(ts->info->arg_strings), len); 4124 for (size_t i = 0; i < len; i++) { 4125 if (psinfo.pr_psargs[i] == 0) { 4126 psinfo.pr_psargs[i] = ' '; 4127 } 4128 } 4129 4130 base_filename = g_path_get_basename(ts->bprm->filename); 4131 /* 4132 * Using strncpy here is fine: at max-length, 4133 * this field is not NUL-terminated. 4134 */ 4135 strncpy(psinfo.pr_fname, base_filename, sizeof(psinfo.pr_fname)); 4136 g_free(base_filename); 4137 4138 bswap_psinfo(&psinfo); 4139 memcpy(data, &psinfo, sizeof(psinfo)); 4140 } 4141 4142 static void fill_auxv_note(void *data, const TaskState *ts) 4143 { 4144 memcpy(data, g2h_untagged(ts->info->saved_auxv), ts->info->auxv_len); 4145 } 4146 4147 /* 4148 * Constructs name of coredump file. We have following convention 4149 * for the name: 4150 * qemu_<basename-of-target-binary>_<date>-<time>_<pid>.core 4151 * 4152 * Returns the filename 4153 */ 4154 static char *core_dump_filename(const TaskState *ts) 4155 { 4156 g_autoptr(GDateTime) now = g_date_time_new_now_local(); 4157 g_autofree char *nowstr = g_date_time_format(now, "%Y%m%d-%H%M%S"); 4158 g_autofree char *base_filename = g_path_get_basename(ts->bprm->filename); 4159 4160 return g_strdup_printf("qemu_%s_%s_%d.core", 4161 base_filename, nowstr, (int)getpid()); 4162 } 4163 4164 static int dump_write(int fd, const void *ptr, size_t size) 4165 { 4166 const char *bufp = (const char *)ptr; 4167 ssize_t bytes_written, bytes_left; 4168 4169 bytes_written = 0; 4170 bytes_left = size; 4171 4172 /* 4173 * In normal conditions, single write(2) should do but 4174 * in case of socket etc. this mechanism is more portable. 4175 */ 4176 do { 4177 bytes_written = write(fd, bufp, bytes_left); 4178 if (bytes_written < 0) { 4179 if (errno == EINTR) 4180 continue; 4181 return (-1); 4182 } else if (bytes_written == 0) { /* eof */ 4183 return (-1); 4184 } 4185 bufp += bytes_written; 4186 bytes_left -= bytes_written; 4187 } while (bytes_left > 0); 4188 4189 return (0); 4190 } 4191 4192 static int wmr_page_unprotect_regions(void *opaque, target_ulong start, 4193 target_ulong end, unsigned long flags) 4194 { 4195 if ((flags & (PAGE_WRITE | PAGE_WRITE_ORG)) == PAGE_WRITE_ORG) { 4196 size_t step = MAX(TARGET_PAGE_SIZE, qemu_real_host_page_size()); 4197 4198 while (1) { 4199 page_unprotect(start, 0); 4200 if (end - start <= step) { 4201 break; 4202 } 4203 start += step; 4204 } 4205 } 4206 return 0; 4207 } 4208 4209 typedef struct { 4210 unsigned count; 4211 size_t size; 4212 } CountAndSizeRegions; 4213 4214 static int wmr_count_and_size_regions(void *opaque, target_ulong start, 4215 target_ulong end, unsigned long flags) 4216 { 4217 CountAndSizeRegions *css = opaque; 4218 4219 css->count++; 4220 css->size += vma_dump_size(start, end, flags); 4221 return 0; 4222 } 4223 4224 typedef struct { 4225 struct elf_phdr *phdr; 4226 off_t offset; 4227 } FillRegionPhdr; 4228 4229 static int wmr_fill_region_phdr(void *opaque, target_ulong start, 4230 target_ulong end, unsigned long flags) 4231 { 4232 FillRegionPhdr *d = opaque; 4233 struct elf_phdr *phdr = d->phdr; 4234 4235 phdr->p_type = PT_LOAD; 4236 phdr->p_vaddr = start; 4237 phdr->p_paddr = 0; 4238 phdr->p_filesz = vma_dump_size(start, end, flags); 4239 phdr->p_offset = d->offset; 4240 d->offset += phdr->p_filesz; 4241 phdr->p_memsz = end - start; 4242 phdr->p_flags = (flags & PAGE_READ ? PF_R : 0) 4243 | (flags & PAGE_WRITE_ORG ? PF_W : 0) 4244 | (flags & PAGE_EXEC ? PF_X : 0); 4245 phdr->p_align = ELF_EXEC_PAGESIZE; 4246 4247 bswap_phdr(phdr, 1); 4248 d->phdr = phdr + 1; 4249 return 0; 4250 } 4251 4252 static int wmr_write_region(void *opaque, target_ulong start, 4253 target_ulong end, unsigned long flags) 4254 { 4255 int fd = *(int *)opaque; 4256 size_t size = vma_dump_size(start, end, flags); 4257 4258 if (!size) { 4259 return 0; 4260 } 4261 return dump_write(fd, g2h_untagged(start), size); 4262 } 4263 4264 /* 4265 * Write out ELF coredump. 4266 * 4267 * See documentation of ELF object file format in: 4268 * http://www.caldera.com/developers/devspecs/gabi41.pdf 4269 * 4270 * Coredump format in linux is following: 4271 * 4272 * 0 +----------------------+ \ 4273 * | ELF header | ET_CORE | 4274 * +----------------------+ | 4275 * | ELF program headers | |--- headers 4276 * | - NOTE section | | 4277 * | - PT_LOAD sections | | 4278 * +----------------------+ / 4279 * | NOTEs: | 4280 * | - NT_PRSTATUS | 4281 * | - NT_PRSINFO | 4282 * | - NT_AUXV | 4283 * +----------------------+ <-- aligned to target page 4284 * | Process memory dump | 4285 * : : 4286 * . . 4287 * : : 4288 * | | 4289 * +----------------------+ 4290 * 4291 * NT_PRSTATUS -> struct elf_prstatus (per thread) 4292 * NT_PRSINFO -> struct elf_prpsinfo 4293 * NT_AUXV is array of { type, value } pairs (see fill_auxv_note()). 4294 * 4295 * Format follows System V format as close as possible. Current 4296 * version limitations are as follows: 4297 * - no floating point registers are dumped 4298 * 4299 * Function returns 0 in case of success, negative errno otherwise. 4300 * 4301 * TODO: make this work also during runtime: it should be 4302 * possible to force coredump from running process and then 4303 * continue processing. For example qemu could set up SIGUSR2 4304 * handler (provided that target process haven't registered 4305 * handler for that) that does the dump when signal is received. 4306 */ 4307 static int elf_core_dump(int signr, const CPUArchState *env) 4308 { 4309 const CPUState *cpu = env_cpu((CPUArchState *)env); 4310 const TaskState *ts = (const TaskState *)get_task_state((CPUState *)cpu); 4311 struct rlimit dumpsize; 4312 CountAndSizeRegions css; 4313 off_t offset, note_offset, data_offset; 4314 size_t note_size; 4315 int cpus, ret; 4316 int fd = -1; 4317 CPUState *cpu_iter; 4318 4319 if (prctl(PR_GET_DUMPABLE) == 0) { 4320 return 0; 4321 } 4322 4323 if (getrlimit(RLIMIT_CORE, &dumpsize) < 0 || dumpsize.rlim_cur == 0) { 4324 return 0; 4325 } 4326 4327 cpu_list_lock(); 4328 mmap_lock(); 4329 4330 /* By unprotecting, we merge vmas that might be split. */ 4331 walk_memory_regions(NULL, wmr_page_unprotect_regions); 4332 4333 /* 4334 * Walk through target process memory mappings and 4335 * set up structure containing this information. 4336 */ 4337 memset(&css, 0, sizeof(css)); 4338 walk_memory_regions(&css, wmr_count_and_size_regions); 4339 4340 cpus = 0; 4341 CPU_FOREACH(cpu_iter) { 4342 cpus++; 4343 } 4344 4345 offset = sizeof(struct elfhdr); 4346 offset += (css.count + 1) * sizeof(struct elf_phdr); 4347 note_offset = offset; 4348 4349 offset += size_note("CORE", ts->info->auxv_len); 4350 offset += size_note("CORE", sizeof(struct target_elf_prpsinfo)); 4351 offset += size_note("CORE", sizeof(struct target_elf_prstatus)) * cpus; 4352 note_size = offset - note_offset; 4353 data_offset = ROUND_UP(offset, ELF_EXEC_PAGESIZE); 4354 4355 /* Do not dump if the corefile size exceeds the limit. */ 4356 if (dumpsize.rlim_cur != RLIM_INFINITY 4357 && dumpsize.rlim_cur < data_offset + css.size) { 4358 errno = 0; 4359 goto out; 4360 } 4361 4362 { 4363 g_autofree char *corefile = core_dump_filename(ts); 4364 fd = open(corefile, O_WRONLY | O_CREAT | O_TRUNC, 4365 S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); 4366 } 4367 if (fd < 0) { 4368 goto out; 4369 } 4370 4371 /* 4372 * There is a fair amount of alignment padding within the notes 4373 * as well as preceeding the process memory. Allocate a zeroed 4374 * block to hold it all. Write all of the headers directly into 4375 * this buffer and then write it out as a block. 4376 */ 4377 { 4378 g_autofree void *header = g_malloc0(data_offset); 4379 FillRegionPhdr frp; 4380 void *hptr, *dptr; 4381 4382 /* Create elf file header. */ 4383 hptr = header; 4384 fill_elf_header(hptr, css.count + 1, ELF_MACHINE, 0); 4385 hptr += sizeof(struct elfhdr); 4386 4387 /* Create elf program headers. */ 4388 fill_elf_note_phdr(hptr, note_size, note_offset); 4389 hptr += sizeof(struct elf_phdr); 4390 4391 frp.phdr = hptr; 4392 frp.offset = data_offset; 4393 walk_memory_regions(&frp, wmr_fill_region_phdr); 4394 hptr = frp.phdr; 4395 4396 /* Create the notes. */ 4397 dptr = fill_note(&hptr, NT_AUXV, "CORE", ts->info->auxv_len); 4398 fill_auxv_note(dptr, ts); 4399 4400 dptr = fill_note(&hptr, NT_PRPSINFO, "CORE", 4401 sizeof(struct target_elf_prpsinfo)); 4402 fill_prpsinfo_note(dptr, ts); 4403 4404 CPU_FOREACH(cpu_iter) { 4405 dptr = fill_note(&hptr, NT_PRSTATUS, "CORE", 4406 sizeof(struct target_elf_prstatus)); 4407 fill_prstatus_note(dptr, ts, cpu_iter, 4408 cpu_iter == cpu ? signr : 0); 4409 } 4410 4411 if (dump_write(fd, header, data_offset) < 0) { 4412 goto out; 4413 } 4414 } 4415 4416 /* 4417 * Finally write process memory into the corefile as well. 4418 */ 4419 if (walk_memory_regions(&fd, wmr_write_region) < 0) { 4420 goto out; 4421 } 4422 errno = 0; 4423 4424 out: 4425 ret = -errno; 4426 mmap_unlock(); 4427 cpu_list_unlock(); 4428 if (fd >= 0) { 4429 close(fd); 4430 } 4431 return ret; 4432 } 4433 #endif /* USE_ELF_CORE_DUMP */ 4434 4435 void do_init_thread(struct target_pt_regs *regs, struct image_info *infop) 4436 { 4437 init_thread(regs, infop); 4438 } 4439