1 /* This is the Linux kernel elf-loading code, ported into user space */ 2 #include "qemu/osdep.h" 3 #include <sys/param.h> 4 5 #include <sys/prctl.h> 6 #include <sys/resource.h> 7 #include <sys/shm.h> 8 9 #include "qemu.h" 10 #include "user/tswap-target.h" 11 #include "user-internals.h" 12 #include "signal-common.h" 13 #include "loader.h" 14 #include "user-mmap.h" 15 #include "disas/disas.h" 16 #include "qemu/bitops.h" 17 #include "qemu/path.h" 18 #include "qemu/queue.h" 19 #include "qemu/guest-random.h" 20 #include "qemu/units.h" 21 #include "qemu/selfmap.h" 22 #include "qemu/lockable.h" 23 #include "qapi/error.h" 24 #include "qemu/error-report.h" 25 #include "target_signal.h" 26 #include "tcg/debuginfo.h" 27 28 #ifdef TARGET_ARM 29 #include "target/arm/cpu-features.h" 30 #endif 31 32 #ifdef _ARCH_PPC64 33 #undef ARCH_DLINFO 34 #undef ELF_PLATFORM 35 #undef ELF_HWCAP 36 #undef ELF_HWCAP2 37 #undef ELF_CLASS 38 #undef ELF_DATA 39 #undef ELF_ARCH 40 #endif 41 42 #ifndef TARGET_ARCH_HAS_SIGTRAMP_PAGE 43 #define TARGET_ARCH_HAS_SIGTRAMP_PAGE 0 44 #endif 45 46 typedef struct { 47 const uint8_t *image; 48 const uint32_t *relocs; 49 unsigned image_size; 50 unsigned reloc_count; 51 unsigned sigreturn_ofs; 52 unsigned rt_sigreturn_ofs; 53 } VdsoImageInfo; 54 55 #define ELF_OSABI ELFOSABI_SYSV 56 57 /* from personality.h */ 58 59 /* 60 * Flags for bug emulation. 61 * 62 * These occupy the top three bytes. 63 */ 64 enum { 65 ADDR_NO_RANDOMIZE = 0x0040000, /* disable randomization of VA space */ 66 FDPIC_FUNCPTRS = 0x0080000, /* userspace function ptrs point to 67 descriptors (signal handling) */ 68 MMAP_PAGE_ZERO = 0x0100000, 69 ADDR_COMPAT_LAYOUT = 0x0200000, 70 READ_IMPLIES_EXEC = 0x0400000, 71 ADDR_LIMIT_32BIT = 0x0800000, 72 SHORT_INODE = 0x1000000, 73 WHOLE_SECONDS = 0x2000000, 74 STICKY_TIMEOUTS = 0x4000000, 75 ADDR_LIMIT_3GB = 0x8000000, 76 }; 77 78 /* 79 * Personality types. 80 * 81 * These go in the low byte. Avoid using the top bit, it will 82 * conflict with error returns. 83 */ 84 enum { 85 PER_LINUX = 0x0000, 86 PER_LINUX_32BIT = 0x0000 | ADDR_LIMIT_32BIT, 87 PER_LINUX_FDPIC = 0x0000 | FDPIC_FUNCPTRS, 88 PER_SVR4 = 0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO, 89 PER_SVR3 = 0x0002 | STICKY_TIMEOUTS | SHORT_INODE, 90 PER_SCOSVR3 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS | SHORT_INODE, 91 PER_OSR5 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS, 92 PER_WYSEV386 = 0x0004 | STICKY_TIMEOUTS | SHORT_INODE, 93 PER_ISCR4 = 0x0005 | STICKY_TIMEOUTS, 94 PER_BSD = 0x0006, 95 PER_SUNOS = 0x0006 | STICKY_TIMEOUTS, 96 PER_XENIX = 0x0007 | STICKY_TIMEOUTS | SHORT_INODE, 97 PER_LINUX32 = 0x0008, 98 PER_LINUX32_3GB = 0x0008 | ADDR_LIMIT_3GB, 99 PER_IRIX32 = 0x0009 | STICKY_TIMEOUTS,/* IRIX5 32-bit */ 100 PER_IRIXN32 = 0x000a | STICKY_TIMEOUTS,/* IRIX6 new 32-bit */ 101 PER_IRIX64 = 0x000b | STICKY_TIMEOUTS,/* IRIX6 64-bit */ 102 PER_RISCOS = 0x000c, 103 PER_SOLARIS = 0x000d | STICKY_TIMEOUTS, 104 PER_UW7 = 0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO, 105 PER_OSF4 = 0x000f, /* OSF/1 v4 */ 106 PER_HPUX = 0x0010, 107 PER_MASK = 0x00ff, 108 }; 109 110 /* 111 * Return the base personality without flags. 112 */ 113 #define personality(pers) (pers & PER_MASK) 114 115 int info_is_fdpic(struct image_info *info) 116 { 117 return info->personality == PER_LINUX_FDPIC; 118 } 119 120 /* this flag is uneffective under linux too, should be deleted */ 121 #ifndef MAP_DENYWRITE 122 #define MAP_DENYWRITE 0 123 #endif 124 125 /* should probably go in elf.h */ 126 #ifndef ELIBBAD 127 #define ELIBBAD 80 128 #endif 129 130 #if TARGET_BIG_ENDIAN 131 #define ELF_DATA ELFDATA2MSB 132 #else 133 #define ELF_DATA ELFDATA2LSB 134 #endif 135 136 #ifdef TARGET_ABI_MIPSN32 137 typedef abi_ullong target_elf_greg_t; 138 #define tswapreg(ptr) tswap64(ptr) 139 #else 140 typedef abi_ulong target_elf_greg_t; 141 #define tswapreg(ptr) tswapal(ptr) 142 #endif 143 144 #ifdef USE_UID16 145 typedef abi_ushort target_uid_t; 146 typedef abi_ushort target_gid_t; 147 #else 148 typedef abi_uint target_uid_t; 149 typedef abi_uint target_gid_t; 150 #endif 151 typedef abi_int target_pid_t; 152 153 #ifdef TARGET_I386 154 155 #define ELF_HWCAP get_elf_hwcap() 156 157 static uint32_t get_elf_hwcap(void) 158 { 159 X86CPU *cpu = X86_CPU(thread_cpu); 160 161 return cpu->env.features[FEAT_1_EDX]; 162 } 163 164 #ifdef TARGET_X86_64 165 #define ELF_CLASS ELFCLASS64 166 #define ELF_ARCH EM_X86_64 167 168 #define ELF_PLATFORM "x86_64" 169 170 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) 171 { 172 regs->rax = 0; 173 regs->rsp = infop->start_stack; 174 regs->rip = infop->entry; 175 } 176 177 #define ELF_NREG 27 178 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 179 180 /* 181 * Note that ELF_NREG should be 29 as there should be place for 182 * TRAPNO and ERR "registers" as well but linux doesn't dump 183 * those. 184 * 185 * See linux kernel: arch/x86/include/asm/elf.h 186 */ 187 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *env) 188 { 189 (*regs)[0] = tswapreg(env->regs[15]); 190 (*regs)[1] = tswapreg(env->regs[14]); 191 (*regs)[2] = tswapreg(env->regs[13]); 192 (*regs)[3] = tswapreg(env->regs[12]); 193 (*regs)[4] = tswapreg(env->regs[R_EBP]); 194 (*regs)[5] = tswapreg(env->regs[R_EBX]); 195 (*regs)[6] = tswapreg(env->regs[11]); 196 (*regs)[7] = tswapreg(env->regs[10]); 197 (*regs)[8] = tswapreg(env->regs[9]); 198 (*regs)[9] = tswapreg(env->regs[8]); 199 (*regs)[10] = tswapreg(env->regs[R_EAX]); 200 (*regs)[11] = tswapreg(env->regs[R_ECX]); 201 (*regs)[12] = tswapreg(env->regs[R_EDX]); 202 (*regs)[13] = tswapreg(env->regs[R_ESI]); 203 (*regs)[14] = tswapreg(env->regs[R_EDI]); 204 (*regs)[15] = tswapreg(env->regs[R_EAX]); /* XXX */ 205 (*regs)[16] = tswapreg(env->eip); 206 (*regs)[17] = tswapreg(env->segs[R_CS].selector & 0xffff); 207 (*regs)[18] = tswapreg(env->eflags); 208 (*regs)[19] = tswapreg(env->regs[R_ESP]); 209 (*regs)[20] = tswapreg(env->segs[R_SS].selector & 0xffff); 210 (*regs)[21] = tswapreg(env->segs[R_FS].selector & 0xffff); 211 (*regs)[22] = tswapreg(env->segs[R_GS].selector & 0xffff); 212 (*regs)[23] = tswapreg(env->segs[R_DS].selector & 0xffff); 213 (*regs)[24] = tswapreg(env->segs[R_ES].selector & 0xffff); 214 (*regs)[25] = tswapreg(env->segs[R_FS].selector & 0xffff); 215 (*regs)[26] = tswapreg(env->segs[R_GS].selector & 0xffff); 216 } 217 218 #if ULONG_MAX > UINT32_MAX 219 #define INIT_GUEST_COMMPAGE 220 static bool init_guest_commpage(void) 221 { 222 /* 223 * The vsyscall page is at a high negative address aka kernel space, 224 * which means that we cannot actually allocate it with target_mmap. 225 * We still should be able to use page_set_flags, unless the user 226 * has specified -R reserved_va, which would trigger an assert(). 227 */ 228 if (reserved_va != 0 && 229 TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE - 1 > reserved_va) { 230 error_report("Cannot allocate vsyscall page"); 231 exit(EXIT_FAILURE); 232 } 233 page_set_flags(TARGET_VSYSCALL_PAGE, 234 TARGET_VSYSCALL_PAGE | ~TARGET_PAGE_MASK, 235 PAGE_EXEC | PAGE_VALID); 236 return true; 237 } 238 #endif 239 #else 240 241 /* 242 * This is used to ensure we don't load something for the wrong architecture. 243 */ 244 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) ) 245 246 /* 247 * These are used to set parameters in the core dumps. 248 */ 249 #define ELF_CLASS ELFCLASS32 250 #define ELF_ARCH EM_386 251 252 #define ELF_PLATFORM get_elf_platform() 253 #define EXSTACK_DEFAULT true 254 255 static const char *get_elf_platform(void) 256 { 257 static char elf_platform[] = "i386"; 258 int family = object_property_get_int(OBJECT(thread_cpu), "family", NULL); 259 if (family > 6) { 260 family = 6; 261 } 262 if (family >= 3) { 263 elf_platform[1] = '0' + family; 264 } 265 return elf_platform; 266 } 267 268 static inline void init_thread(struct target_pt_regs *regs, 269 struct image_info *infop) 270 { 271 regs->esp = infop->start_stack; 272 regs->eip = infop->entry; 273 274 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program 275 starts %edx contains a pointer to a function which might be 276 registered using `atexit'. This provides a mean for the 277 dynamic linker to call DT_FINI functions for shared libraries 278 that have been loaded before the code runs. 279 280 A value of 0 tells we have no such handler. */ 281 regs->edx = 0; 282 } 283 284 #define ELF_NREG 17 285 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 286 287 /* 288 * Note that ELF_NREG should be 19 as there should be place for 289 * TRAPNO and ERR "registers" as well but linux doesn't dump 290 * those. 291 * 292 * See linux kernel: arch/x86/include/asm/elf.h 293 */ 294 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *env) 295 { 296 (*regs)[0] = tswapreg(env->regs[R_EBX]); 297 (*regs)[1] = tswapreg(env->regs[R_ECX]); 298 (*regs)[2] = tswapreg(env->regs[R_EDX]); 299 (*regs)[3] = tswapreg(env->regs[R_ESI]); 300 (*regs)[4] = tswapreg(env->regs[R_EDI]); 301 (*regs)[5] = tswapreg(env->regs[R_EBP]); 302 (*regs)[6] = tswapreg(env->regs[R_EAX]); 303 (*regs)[7] = tswapreg(env->segs[R_DS].selector & 0xffff); 304 (*regs)[8] = tswapreg(env->segs[R_ES].selector & 0xffff); 305 (*regs)[9] = tswapreg(env->segs[R_FS].selector & 0xffff); 306 (*regs)[10] = tswapreg(env->segs[R_GS].selector & 0xffff); 307 (*regs)[11] = tswapreg(env->regs[R_EAX]); /* XXX */ 308 (*regs)[12] = tswapreg(env->eip); 309 (*regs)[13] = tswapreg(env->segs[R_CS].selector & 0xffff); 310 (*regs)[14] = tswapreg(env->eflags); 311 (*regs)[15] = tswapreg(env->regs[R_ESP]); 312 (*regs)[16] = tswapreg(env->segs[R_SS].selector & 0xffff); 313 } 314 315 /* 316 * i386 is the only target which supplies AT_SYSINFO for the vdso. 317 * All others only supply AT_SYSINFO_EHDR. 318 */ 319 #define DLINFO_ARCH_ITEMS (vdso_info != NULL) 320 #define ARCH_DLINFO \ 321 do { \ 322 if (vdso_info) { \ 323 NEW_AUX_ENT(AT_SYSINFO, vdso_info->entry); \ 324 } \ 325 } while (0) 326 327 #endif /* TARGET_X86_64 */ 328 329 #define VDSO_HEADER "vdso.c.inc" 330 331 #define USE_ELF_CORE_DUMP 332 #define ELF_EXEC_PAGESIZE 4096 333 334 #endif /* TARGET_I386 */ 335 336 #ifdef TARGET_ARM 337 338 #ifndef TARGET_AARCH64 339 /* 32 bit ARM definitions */ 340 341 #define ELF_ARCH EM_ARM 342 #define ELF_CLASS ELFCLASS32 343 #define EXSTACK_DEFAULT true 344 345 static inline void init_thread(struct target_pt_regs *regs, 346 struct image_info *infop) 347 { 348 abi_long stack = infop->start_stack; 349 memset(regs, 0, sizeof(*regs)); 350 351 regs->uregs[16] = ARM_CPU_MODE_USR; 352 if (infop->entry & 1) { 353 regs->uregs[16] |= CPSR_T; 354 } 355 regs->uregs[15] = infop->entry & 0xfffffffe; 356 regs->uregs[13] = infop->start_stack; 357 /* FIXME - what to for failure of get_user()? */ 358 get_user_ual(regs->uregs[2], stack + 8); /* envp */ 359 get_user_ual(regs->uregs[1], stack + 4); /* envp */ 360 /* XXX: it seems that r0 is zeroed after ! */ 361 regs->uregs[0] = 0; 362 /* For uClinux PIC binaries. */ 363 /* XXX: Linux does this only on ARM with no MMU (do we care ?) */ 364 regs->uregs[10] = infop->start_data; 365 366 /* Support ARM FDPIC. */ 367 if (info_is_fdpic(infop)) { 368 /* As described in the ABI document, r7 points to the loadmap info 369 * prepared by the kernel. If an interpreter is needed, r8 points 370 * to the interpreter loadmap and r9 points to the interpreter 371 * PT_DYNAMIC info. If no interpreter is needed, r8 is zero, and 372 * r9 points to the main program PT_DYNAMIC info. 373 */ 374 regs->uregs[7] = infop->loadmap_addr; 375 if (infop->interpreter_loadmap_addr) { 376 /* Executable is dynamically loaded. */ 377 regs->uregs[8] = infop->interpreter_loadmap_addr; 378 regs->uregs[9] = infop->interpreter_pt_dynamic_addr; 379 } else { 380 regs->uregs[8] = 0; 381 regs->uregs[9] = infop->pt_dynamic_addr; 382 } 383 } 384 } 385 386 #define ELF_NREG 18 387 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 388 389 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUARMState *env) 390 { 391 (*regs)[0] = tswapreg(env->regs[0]); 392 (*regs)[1] = tswapreg(env->regs[1]); 393 (*regs)[2] = tswapreg(env->regs[2]); 394 (*regs)[3] = tswapreg(env->regs[3]); 395 (*regs)[4] = tswapreg(env->regs[4]); 396 (*regs)[5] = tswapreg(env->regs[5]); 397 (*regs)[6] = tswapreg(env->regs[6]); 398 (*regs)[7] = tswapreg(env->regs[7]); 399 (*regs)[8] = tswapreg(env->regs[8]); 400 (*regs)[9] = tswapreg(env->regs[9]); 401 (*regs)[10] = tswapreg(env->regs[10]); 402 (*regs)[11] = tswapreg(env->regs[11]); 403 (*regs)[12] = tswapreg(env->regs[12]); 404 (*regs)[13] = tswapreg(env->regs[13]); 405 (*regs)[14] = tswapreg(env->regs[14]); 406 (*regs)[15] = tswapreg(env->regs[15]); 407 408 (*regs)[16] = tswapreg(cpsr_read((CPUARMState *)env)); 409 (*regs)[17] = tswapreg(env->regs[0]); /* XXX */ 410 } 411 412 #define USE_ELF_CORE_DUMP 413 #define ELF_EXEC_PAGESIZE 4096 414 415 enum 416 { 417 ARM_HWCAP_ARM_SWP = 1 << 0, 418 ARM_HWCAP_ARM_HALF = 1 << 1, 419 ARM_HWCAP_ARM_THUMB = 1 << 2, 420 ARM_HWCAP_ARM_26BIT = 1 << 3, 421 ARM_HWCAP_ARM_FAST_MULT = 1 << 4, 422 ARM_HWCAP_ARM_FPA = 1 << 5, 423 ARM_HWCAP_ARM_VFP = 1 << 6, 424 ARM_HWCAP_ARM_EDSP = 1 << 7, 425 ARM_HWCAP_ARM_JAVA = 1 << 8, 426 ARM_HWCAP_ARM_IWMMXT = 1 << 9, 427 ARM_HWCAP_ARM_CRUNCH = 1 << 10, 428 ARM_HWCAP_ARM_THUMBEE = 1 << 11, 429 ARM_HWCAP_ARM_NEON = 1 << 12, 430 ARM_HWCAP_ARM_VFPv3 = 1 << 13, 431 ARM_HWCAP_ARM_VFPv3D16 = 1 << 14, 432 ARM_HWCAP_ARM_TLS = 1 << 15, 433 ARM_HWCAP_ARM_VFPv4 = 1 << 16, 434 ARM_HWCAP_ARM_IDIVA = 1 << 17, 435 ARM_HWCAP_ARM_IDIVT = 1 << 18, 436 ARM_HWCAP_ARM_VFPD32 = 1 << 19, 437 ARM_HWCAP_ARM_LPAE = 1 << 20, 438 ARM_HWCAP_ARM_EVTSTRM = 1 << 21, 439 ARM_HWCAP_ARM_FPHP = 1 << 22, 440 ARM_HWCAP_ARM_ASIMDHP = 1 << 23, 441 ARM_HWCAP_ARM_ASIMDDP = 1 << 24, 442 ARM_HWCAP_ARM_ASIMDFHM = 1 << 25, 443 ARM_HWCAP_ARM_ASIMDBF16 = 1 << 26, 444 ARM_HWCAP_ARM_I8MM = 1 << 27, 445 }; 446 447 enum { 448 ARM_HWCAP2_ARM_AES = 1 << 0, 449 ARM_HWCAP2_ARM_PMULL = 1 << 1, 450 ARM_HWCAP2_ARM_SHA1 = 1 << 2, 451 ARM_HWCAP2_ARM_SHA2 = 1 << 3, 452 ARM_HWCAP2_ARM_CRC32 = 1 << 4, 453 ARM_HWCAP2_ARM_SB = 1 << 5, 454 ARM_HWCAP2_ARM_SSBS = 1 << 6, 455 }; 456 457 /* The commpage only exists for 32 bit kernels */ 458 459 #define HI_COMMPAGE (intptr_t)0xffff0f00u 460 461 static bool init_guest_commpage(void) 462 { 463 ARMCPU *cpu = ARM_CPU(thread_cpu); 464 int host_page_size = qemu_real_host_page_size(); 465 abi_ptr commpage; 466 void *want; 467 void *addr; 468 469 /* 470 * M-profile allocates maximum of 2GB address space, so can never 471 * allocate the commpage. Skip it. 472 */ 473 if (arm_feature(&cpu->env, ARM_FEATURE_M)) { 474 return true; 475 } 476 477 commpage = HI_COMMPAGE & -host_page_size; 478 want = g2h_untagged(commpage); 479 addr = mmap(want, host_page_size, PROT_READ | PROT_WRITE, 480 MAP_ANONYMOUS | MAP_PRIVATE | 481 (commpage < reserved_va ? MAP_FIXED : MAP_FIXED_NOREPLACE), 482 -1, 0); 483 484 if (addr == MAP_FAILED) { 485 perror("Allocating guest commpage"); 486 exit(EXIT_FAILURE); 487 } 488 if (addr != want) { 489 return false; 490 } 491 492 /* Set kernel helper versions; rest of page is 0. */ 493 __put_user(5, (uint32_t *)g2h_untagged(0xffff0ffcu)); 494 495 if (mprotect(addr, host_page_size, PROT_READ)) { 496 perror("Protecting guest commpage"); 497 exit(EXIT_FAILURE); 498 } 499 500 page_set_flags(commpage, commpage | (host_page_size - 1), 501 PAGE_READ | PAGE_EXEC | PAGE_VALID); 502 return true; 503 } 504 505 #define ELF_HWCAP get_elf_hwcap() 506 #define ELF_HWCAP2 get_elf_hwcap2() 507 508 uint32_t get_elf_hwcap(void) 509 { 510 ARMCPU *cpu = ARM_CPU(thread_cpu); 511 uint32_t hwcaps = 0; 512 513 hwcaps |= ARM_HWCAP_ARM_SWP; 514 hwcaps |= ARM_HWCAP_ARM_HALF; 515 hwcaps |= ARM_HWCAP_ARM_THUMB; 516 hwcaps |= ARM_HWCAP_ARM_FAST_MULT; 517 518 /* probe for the extra features */ 519 #define GET_FEATURE(feat, hwcap) \ 520 do { if (arm_feature(&cpu->env, feat)) { hwcaps |= hwcap; } } while (0) 521 522 #define GET_FEATURE_ID(feat, hwcap) \ 523 do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0) 524 525 /* EDSP is in v5TE and above, but all our v5 CPUs are v5TE */ 526 GET_FEATURE(ARM_FEATURE_V5, ARM_HWCAP_ARM_EDSP); 527 GET_FEATURE(ARM_FEATURE_IWMMXT, ARM_HWCAP_ARM_IWMMXT); 528 GET_FEATURE(ARM_FEATURE_THUMB2EE, ARM_HWCAP_ARM_THUMBEE); 529 GET_FEATURE(ARM_FEATURE_NEON, ARM_HWCAP_ARM_NEON); 530 GET_FEATURE(ARM_FEATURE_V6K, ARM_HWCAP_ARM_TLS); 531 GET_FEATURE(ARM_FEATURE_LPAE, ARM_HWCAP_ARM_LPAE); 532 GET_FEATURE_ID(aa32_arm_div, ARM_HWCAP_ARM_IDIVA); 533 GET_FEATURE_ID(aa32_thumb_div, ARM_HWCAP_ARM_IDIVT); 534 GET_FEATURE_ID(aa32_vfp, ARM_HWCAP_ARM_VFP); 535 536 if (cpu_isar_feature(aa32_fpsp_v3, cpu) || 537 cpu_isar_feature(aa32_fpdp_v3, cpu)) { 538 hwcaps |= ARM_HWCAP_ARM_VFPv3; 539 if (cpu_isar_feature(aa32_simd_r32, cpu)) { 540 hwcaps |= ARM_HWCAP_ARM_VFPD32; 541 } else { 542 hwcaps |= ARM_HWCAP_ARM_VFPv3D16; 543 } 544 } 545 GET_FEATURE_ID(aa32_simdfmac, ARM_HWCAP_ARM_VFPv4); 546 /* 547 * MVFR1.FPHP and .SIMDHP must be in sync, and QEMU uses the same 548 * isar_feature function for both. The kernel reports them as two hwcaps. 549 */ 550 GET_FEATURE_ID(aa32_fp16_arith, ARM_HWCAP_ARM_FPHP); 551 GET_FEATURE_ID(aa32_fp16_arith, ARM_HWCAP_ARM_ASIMDHP); 552 GET_FEATURE_ID(aa32_dp, ARM_HWCAP_ARM_ASIMDDP); 553 GET_FEATURE_ID(aa32_fhm, ARM_HWCAP_ARM_ASIMDFHM); 554 GET_FEATURE_ID(aa32_bf16, ARM_HWCAP_ARM_ASIMDBF16); 555 GET_FEATURE_ID(aa32_i8mm, ARM_HWCAP_ARM_I8MM); 556 557 return hwcaps; 558 } 559 560 uint64_t get_elf_hwcap2(void) 561 { 562 ARMCPU *cpu = ARM_CPU(thread_cpu); 563 uint64_t hwcaps = 0; 564 565 GET_FEATURE_ID(aa32_aes, ARM_HWCAP2_ARM_AES); 566 GET_FEATURE_ID(aa32_pmull, ARM_HWCAP2_ARM_PMULL); 567 GET_FEATURE_ID(aa32_sha1, ARM_HWCAP2_ARM_SHA1); 568 GET_FEATURE_ID(aa32_sha2, ARM_HWCAP2_ARM_SHA2); 569 GET_FEATURE_ID(aa32_crc32, ARM_HWCAP2_ARM_CRC32); 570 GET_FEATURE_ID(aa32_sb, ARM_HWCAP2_ARM_SB); 571 GET_FEATURE_ID(aa32_ssbs, ARM_HWCAP2_ARM_SSBS); 572 return hwcaps; 573 } 574 575 const char *elf_hwcap_str(uint32_t bit) 576 { 577 static const char *hwcap_str[] = { 578 [__builtin_ctz(ARM_HWCAP_ARM_SWP )] = "swp", 579 [__builtin_ctz(ARM_HWCAP_ARM_HALF )] = "half", 580 [__builtin_ctz(ARM_HWCAP_ARM_THUMB )] = "thumb", 581 [__builtin_ctz(ARM_HWCAP_ARM_26BIT )] = "26bit", 582 [__builtin_ctz(ARM_HWCAP_ARM_FAST_MULT)] = "fast_mult", 583 [__builtin_ctz(ARM_HWCAP_ARM_FPA )] = "fpa", 584 [__builtin_ctz(ARM_HWCAP_ARM_VFP )] = "vfp", 585 [__builtin_ctz(ARM_HWCAP_ARM_EDSP )] = "edsp", 586 [__builtin_ctz(ARM_HWCAP_ARM_JAVA )] = "java", 587 [__builtin_ctz(ARM_HWCAP_ARM_IWMMXT )] = "iwmmxt", 588 [__builtin_ctz(ARM_HWCAP_ARM_CRUNCH )] = "crunch", 589 [__builtin_ctz(ARM_HWCAP_ARM_THUMBEE )] = "thumbee", 590 [__builtin_ctz(ARM_HWCAP_ARM_NEON )] = "neon", 591 [__builtin_ctz(ARM_HWCAP_ARM_VFPv3 )] = "vfpv3", 592 [__builtin_ctz(ARM_HWCAP_ARM_VFPv3D16 )] = "vfpv3d16", 593 [__builtin_ctz(ARM_HWCAP_ARM_TLS )] = "tls", 594 [__builtin_ctz(ARM_HWCAP_ARM_VFPv4 )] = "vfpv4", 595 [__builtin_ctz(ARM_HWCAP_ARM_IDIVA )] = "idiva", 596 [__builtin_ctz(ARM_HWCAP_ARM_IDIVT )] = "idivt", 597 [__builtin_ctz(ARM_HWCAP_ARM_VFPD32 )] = "vfpd32", 598 [__builtin_ctz(ARM_HWCAP_ARM_LPAE )] = "lpae", 599 [__builtin_ctz(ARM_HWCAP_ARM_EVTSTRM )] = "evtstrm", 600 [__builtin_ctz(ARM_HWCAP_ARM_FPHP )] = "fphp", 601 [__builtin_ctz(ARM_HWCAP_ARM_ASIMDHP )] = "asimdhp", 602 [__builtin_ctz(ARM_HWCAP_ARM_ASIMDDP )] = "asimddp", 603 [__builtin_ctz(ARM_HWCAP_ARM_ASIMDFHM )] = "asimdfhm", 604 [__builtin_ctz(ARM_HWCAP_ARM_ASIMDBF16)] = "asimdbf16", 605 [__builtin_ctz(ARM_HWCAP_ARM_I8MM )] = "i8mm", 606 }; 607 608 return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL; 609 } 610 611 const char *elf_hwcap2_str(uint32_t bit) 612 { 613 static const char *hwcap_str[] = { 614 [__builtin_ctz(ARM_HWCAP2_ARM_AES )] = "aes", 615 [__builtin_ctz(ARM_HWCAP2_ARM_PMULL)] = "pmull", 616 [__builtin_ctz(ARM_HWCAP2_ARM_SHA1 )] = "sha1", 617 [__builtin_ctz(ARM_HWCAP2_ARM_SHA2 )] = "sha2", 618 [__builtin_ctz(ARM_HWCAP2_ARM_CRC32)] = "crc32", 619 [__builtin_ctz(ARM_HWCAP2_ARM_SB )] = "sb", 620 [__builtin_ctz(ARM_HWCAP2_ARM_SSBS )] = "ssbs", 621 }; 622 623 return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL; 624 } 625 626 #undef GET_FEATURE 627 #undef GET_FEATURE_ID 628 629 #define ELF_PLATFORM get_elf_platform() 630 631 static const char *get_elf_platform(void) 632 { 633 CPUARMState *env = cpu_env(thread_cpu); 634 635 #if TARGET_BIG_ENDIAN 636 # define END "b" 637 #else 638 # define END "l" 639 #endif 640 641 if (arm_feature(env, ARM_FEATURE_V8)) { 642 return "v8" END; 643 } else if (arm_feature(env, ARM_FEATURE_V7)) { 644 if (arm_feature(env, ARM_FEATURE_M)) { 645 return "v7m" END; 646 } else { 647 return "v7" END; 648 } 649 } else if (arm_feature(env, ARM_FEATURE_V6)) { 650 return "v6" END; 651 } else if (arm_feature(env, ARM_FEATURE_V5)) { 652 return "v5" END; 653 } else { 654 return "v4" END; 655 } 656 657 #undef END 658 } 659 660 #else 661 /* 64 bit ARM definitions */ 662 663 #define ELF_ARCH EM_AARCH64 664 #define ELF_CLASS ELFCLASS64 665 #if TARGET_BIG_ENDIAN 666 # define ELF_PLATFORM "aarch64_be" 667 #else 668 # define ELF_PLATFORM "aarch64" 669 #endif 670 671 static inline void init_thread(struct target_pt_regs *regs, 672 struct image_info *infop) 673 { 674 abi_long stack = infop->start_stack; 675 memset(regs, 0, sizeof(*regs)); 676 677 regs->pc = infop->entry & ~0x3ULL; 678 regs->sp = stack; 679 } 680 681 #define ELF_NREG 34 682 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 683 684 static void elf_core_copy_regs(target_elf_gregset_t *regs, 685 const CPUARMState *env) 686 { 687 int i; 688 689 for (i = 0; i < 32; i++) { 690 (*regs)[i] = tswapreg(env->xregs[i]); 691 } 692 (*regs)[32] = tswapreg(env->pc); 693 (*regs)[33] = tswapreg(pstate_read((CPUARMState *)env)); 694 } 695 696 #define USE_ELF_CORE_DUMP 697 #define ELF_EXEC_PAGESIZE 4096 698 699 enum { 700 ARM_HWCAP_A64_FP = 1 << 0, 701 ARM_HWCAP_A64_ASIMD = 1 << 1, 702 ARM_HWCAP_A64_EVTSTRM = 1 << 2, 703 ARM_HWCAP_A64_AES = 1 << 3, 704 ARM_HWCAP_A64_PMULL = 1 << 4, 705 ARM_HWCAP_A64_SHA1 = 1 << 5, 706 ARM_HWCAP_A64_SHA2 = 1 << 6, 707 ARM_HWCAP_A64_CRC32 = 1 << 7, 708 ARM_HWCAP_A64_ATOMICS = 1 << 8, 709 ARM_HWCAP_A64_FPHP = 1 << 9, 710 ARM_HWCAP_A64_ASIMDHP = 1 << 10, 711 ARM_HWCAP_A64_CPUID = 1 << 11, 712 ARM_HWCAP_A64_ASIMDRDM = 1 << 12, 713 ARM_HWCAP_A64_JSCVT = 1 << 13, 714 ARM_HWCAP_A64_FCMA = 1 << 14, 715 ARM_HWCAP_A64_LRCPC = 1 << 15, 716 ARM_HWCAP_A64_DCPOP = 1 << 16, 717 ARM_HWCAP_A64_SHA3 = 1 << 17, 718 ARM_HWCAP_A64_SM3 = 1 << 18, 719 ARM_HWCAP_A64_SM4 = 1 << 19, 720 ARM_HWCAP_A64_ASIMDDP = 1 << 20, 721 ARM_HWCAP_A64_SHA512 = 1 << 21, 722 ARM_HWCAP_A64_SVE = 1 << 22, 723 ARM_HWCAP_A64_ASIMDFHM = 1 << 23, 724 ARM_HWCAP_A64_DIT = 1 << 24, 725 ARM_HWCAP_A64_USCAT = 1 << 25, 726 ARM_HWCAP_A64_ILRCPC = 1 << 26, 727 ARM_HWCAP_A64_FLAGM = 1 << 27, 728 ARM_HWCAP_A64_SSBS = 1 << 28, 729 ARM_HWCAP_A64_SB = 1 << 29, 730 ARM_HWCAP_A64_PACA = 1 << 30, 731 ARM_HWCAP_A64_PACG = 1UL << 31, 732 733 ARM_HWCAP2_A64_DCPODP = 1 << 0, 734 ARM_HWCAP2_A64_SVE2 = 1 << 1, 735 ARM_HWCAP2_A64_SVEAES = 1 << 2, 736 ARM_HWCAP2_A64_SVEPMULL = 1 << 3, 737 ARM_HWCAP2_A64_SVEBITPERM = 1 << 4, 738 ARM_HWCAP2_A64_SVESHA3 = 1 << 5, 739 ARM_HWCAP2_A64_SVESM4 = 1 << 6, 740 ARM_HWCAP2_A64_FLAGM2 = 1 << 7, 741 ARM_HWCAP2_A64_FRINT = 1 << 8, 742 ARM_HWCAP2_A64_SVEI8MM = 1 << 9, 743 ARM_HWCAP2_A64_SVEF32MM = 1 << 10, 744 ARM_HWCAP2_A64_SVEF64MM = 1 << 11, 745 ARM_HWCAP2_A64_SVEBF16 = 1 << 12, 746 ARM_HWCAP2_A64_I8MM = 1 << 13, 747 ARM_HWCAP2_A64_BF16 = 1 << 14, 748 ARM_HWCAP2_A64_DGH = 1 << 15, 749 ARM_HWCAP2_A64_RNG = 1 << 16, 750 ARM_HWCAP2_A64_BTI = 1 << 17, 751 ARM_HWCAP2_A64_MTE = 1 << 18, 752 ARM_HWCAP2_A64_ECV = 1 << 19, 753 ARM_HWCAP2_A64_AFP = 1 << 20, 754 ARM_HWCAP2_A64_RPRES = 1 << 21, 755 ARM_HWCAP2_A64_MTE3 = 1 << 22, 756 ARM_HWCAP2_A64_SME = 1 << 23, 757 ARM_HWCAP2_A64_SME_I16I64 = 1 << 24, 758 ARM_HWCAP2_A64_SME_F64F64 = 1 << 25, 759 ARM_HWCAP2_A64_SME_I8I32 = 1 << 26, 760 ARM_HWCAP2_A64_SME_F16F32 = 1 << 27, 761 ARM_HWCAP2_A64_SME_B16F32 = 1 << 28, 762 ARM_HWCAP2_A64_SME_F32F32 = 1 << 29, 763 ARM_HWCAP2_A64_SME_FA64 = 1 << 30, 764 ARM_HWCAP2_A64_WFXT = 1ULL << 31, 765 ARM_HWCAP2_A64_EBF16 = 1ULL << 32, 766 ARM_HWCAP2_A64_SVE_EBF16 = 1ULL << 33, 767 ARM_HWCAP2_A64_CSSC = 1ULL << 34, 768 ARM_HWCAP2_A64_RPRFM = 1ULL << 35, 769 ARM_HWCAP2_A64_SVE2P1 = 1ULL << 36, 770 ARM_HWCAP2_A64_SME2 = 1ULL << 37, 771 ARM_HWCAP2_A64_SME2P1 = 1ULL << 38, 772 ARM_HWCAP2_A64_SME_I16I32 = 1ULL << 39, 773 ARM_HWCAP2_A64_SME_BI32I32 = 1ULL << 40, 774 ARM_HWCAP2_A64_SME_B16B16 = 1ULL << 41, 775 ARM_HWCAP2_A64_SME_F16F16 = 1ULL << 42, 776 ARM_HWCAP2_A64_MOPS = 1ULL << 43, 777 ARM_HWCAP2_A64_HBC = 1ULL << 44, 778 }; 779 780 #define ELF_HWCAP get_elf_hwcap() 781 #define ELF_HWCAP2 get_elf_hwcap2() 782 783 #define GET_FEATURE_ID(feat, hwcap) \ 784 do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0) 785 786 uint32_t get_elf_hwcap(void) 787 { 788 ARMCPU *cpu = ARM_CPU(thread_cpu); 789 uint32_t hwcaps = 0; 790 791 hwcaps |= ARM_HWCAP_A64_FP; 792 hwcaps |= ARM_HWCAP_A64_ASIMD; 793 hwcaps |= ARM_HWCAP_A64_CPUID; 794 795 /* probe for the extra features */ 796 797 GET_FEATURE_ID(aa64_aes, ARM_HWCAP_A64_AES); 798 GET_FEATURE_ID(aa64_pmull, ARM_HWCAP_A64_PMULL); 799 GET_FEATURE_ID(aa64_sha1, ARM_HWCAP_A64_SHA1); 800 GET_FEATURE_ID(aa64_sha256, ARM_HWCAP_A64_SHA2); 801 GET_FEATURE_ID(aa64_sha512, ARM_HWCAP_A64_SHA512); 802 GET_FEATURE_ID(aa64_crc32, ARM_HWCAP_A64_CRC32); 803 GET_FEATURE_ID(aa64_sha3, ARM_HWCAP_A64_SHA3); 804 GET_FEATURE_ID(aa64_sm3, ARM_HWCAP_A64_SM3); 805 GET_FEATURE_ID(aa64_sm4, ARM_HWCAP_A64_SM4); 806 GET_FEATURE_ID(aa64_fp16, ARM_HWCAP_A64_FPHP | ARM_HWCAP_A64_ASIMDHP); 807 GET_FEATURE_ID(aa64_atomics, ARM_HWCAP_A64_ATOMICS); 808 GET_FEATURE_ID(aa64_lse2, ARM_HWCAP_A64_USCAT); 809 GET_FEATURE_ID(aa64_rdm, ARM_HWCAP_A64_ASIMDRDM); 810 GET_FEATURE_ID(aa64_dp, ARM_HWCAP_A64_ASIMDDP); 811 GET_FEATURE_ID(aa64_fcma, ARM_HWCAP_A64_FCMA); 812 GET_FEATURE_ID(aa64_sve, ARM_HWCAP_A64_SVE); 813 GET_FEATURE_ID(aa64_pauth, ARM_HWCAP_A64_PACA | ARM_HWCAP_A64_PACG); 814 GET_FEATURE_ID(aa64_fhm, ARM_HWCAP_A64_ASIMDFHM); 815 GET_FEATURE_ID(aa64_dit, ARM_HWCAP_A64_DIT); 816 GET_FEATURE_ID(aa64_jscvt, ARM_HWCAP_A64_JSCVT); 817 GET_FEATURE_ID(aa64_sb, ARM_HWCAP_A64_SB); 818 GET_FEATURE_ID(aa64_condm_4, ARM_HWCAP_A64_FLAGM); 819 GET_FEATURE_ID(aa64_dcpop, ARM_HWCAP_A64_DCPOP); 820 GET_FEATURE_ID(aa64_rcpc_8_3, ARM_HWCAP_A64_LRCPC); 821 GET_FEATURE_ID(aa64_rcpc_8_4, ARM_HWCAP_A64_ILRCPC); 822 823 return hwcaps; 824 } 825 826 uint64_t get_elf_hwcap2(void) 827 { 828 ARMCPU *cpu = ARM_CPU(thread_cpu); 829 uint64_t hwcaps = 0; 830 831 GET_FEATURE_ID(aa64_dcpodp, ARM_HWCAP2_A64_DCPODP); 832 GET_FEATURE_ID(aa64_sve2, ARM_HWCAP2_A64_SVE2); 833 GET_FEATURE_ID(aa64_sve2_aes, ARM_HWCAP2_A64_SVEAES); 834 GET_FEATURE_ID(aa64_sve2_pmull128, ARM_HWCAP2_A64_SVEPMULL); 835 GET_FEATURE_ID(aa64_sve2_bitperm, ARM_HWCAP2_A64_SVEBITPERM); 836 GET_FEATURE_ID(aa64_sve2_sha3, ARM_HWCAP2_A64_SVESHA3); 837 GET_FEATURE_ID(aa64_sve2_sm4, ARM_HWCAP2_A64_SVESM4); 838 GET_FEATURE_ID(aa64_condm_5, ARM_HWCAP2_A64_FLAGM2); 839 GET_FEATURE_ID(aa64_frint, ARM_HWCAP2_A64_FRINT); 840 GET_FEATURE_ID(aa64_sve_i8mm, ARM_HWCAP2_A64_SVEI8MM); 841 GET_FEATURE_ID(aa64_sve_f32mm, ARM_HWCAP2_A64_SVEF32MM); 842 GET_FEATURE_ID(aa64_sve_f64mm, ARM_HWCAP2_A64_SVEF64MM); 843 GET_FEATURE_ID(aa64_sve_bf16, ARM_HWCAP2_A64_SVEBF16); 844 GET_FEATURE_ID(aa64_i8mm, ARM_HWCAP2_A64_I8MM); 845 GET_FEATURE_ID(aa64_bf16, ARM_HWCAP2_A64_BF16); 846 GET_FEATURE_ID(aa64_rndr, ARM_HWCAP2_A64_RNG); 847 GET_FEATURE_ID(aa64_bti, ARM_HWCAP2_A64_BTI); 848 GET_FEATURE_ID(aa64_mte, ARM_HWCAP2_A64_MTE); 849 GET_FEATURE_ID(aa64_mte3, ARM_HWCAP2_A64_MTE3); 850 GET_FEATURE_ID(aa64_sme, (ARM_HWCAP2_A64_SME | 851 ARM_HWCAP2_A64_SME_F32F32 | 852 ARM_HWCAP2_A64_SME_B16F32 | 853 ARM_HWCAP2_A64_SME_F16F32 | 854 ARM_HWCAP2_A64_SME_I8I32)); 855 GET_FEATURE_ID(aa64_sme_f64f64, ARM_HWCAP2_A64_SME_F64F64); 856 GET_FEATURE_ID(aa64_sme_i16i64, ARM_HWCAP2_A64_SME_I16I64); 857 GET_FEATURE_ID(aa64_sme_fa64, ARM_HWCAP2_A64_SME_FA64); 858 GET_FEATURE_ID(aa64_hbc, ARM_HWCAP2_A64_HBC); 859 GET_FEATURE_ID(aa64_mops, ARM_HWCAP2_A64_MOPS); 860 861 return hwcaps; 862 } 863 864 const char *elf_hwcap_str(uint32_t bit) 865 { 866 static const char *hwcap_str[] = { 867 [__builtin_ctz(ARM_HWCAP_A64_FP )] = "fp", 868 [__builtin_ctz(ARM_HWCAP_A64_ASIMD )] = "asimd", 869 [__builtin_ctz(ARM_HWCAP_A64_EVTSTRM )] = "evtstrm", 870 [__builtin_ctz(ARM_HWCAP_A64_AES )] = "aes", 871 [__builtin_ctz(ARM_HWCAP_A64_PMULL )] = "pmull", 872 [__builtin_ctz(ARM_HWCAP_A64_SHA1 )] = "sha1", 873 [__builtin_ctz(ARM_HWCAP_A64_SHA2 )] = "sha2", 874 [__builtin_ctz(ARM_HWCAP_A64_CRC32 )] = "crc32", 875 [__builtin_ctz(ARM_HWCAP_A64_ATOMICS )] = "atomics", 876 [__builtin_ctz(ARM_HWCAP_A64_FPHP )] = "fphp", 877 [__builtin_ctz(ARM_HWCAP_A64_ASIMDHP )] = "asimdhp", 878 [__builtin_ctz(ARM_HWCAP_A64_CPUID )] = "cpuid", 879 [__builtin_ctz(ARM_HWCAP_A64_ASIMDRDM)] = "asimdrdm", 880 [__builtin_ctz(ARM_HWCAP_A64_JSCVT )] = "jscvt", 881 [__builtin_ctz(ARM_HWCAP_A64_FCMA )] = "fcma", 882 [__builtin_ctz(ARM_HWCAP_A64_LRCPC )] = "lrcpc", 883 [__builtin_ctz(ARM_HWCAP_A64_DCPOP )] = "dcpop", 884 [__builtin_ctz(ARM_HWCAP_A64_SHA3 )] = "sha3", 885 [__builtin_ctz(ARM_HWCAP_A64_SM3 )] = "sm3", 886 [__builtin_ctz(ARM_HWCAP_A64_SM4 )] = "sm4", 887 [__builtin_ctz(ARM_HWCAP_A64_ASIMDDP )] = "asimddp", 888 [__builtin_ctz(ARM_HWCAP_A64_SHA512 )] = "sha512", 889 [__builtin_ctz(ARM_HWCAP_A64_SVE )] = "sve", 890 [__builtin_ctz(ARM_HWCAP_A64_ASIMDFHM)] = "asimdfhm", 891 [__builtin_ctz(ARM_HWCAP_A64_DIT )] = "dit", 892 [__builtin_ctz(ARM_HWCAP_A64_USCAT )] = "uscat", 893 [__builtin_ctz(ARM_HWCAP_A64_ILRCPC )] = "ilrcpc", 894 [__builtin_ctz(ARM_HWCAP_A64_FLAGM )] = "flagm", 895 [__builtin_ctz(ARM_HWCAP_A64_SSBS )] = "ssbs", 896 [__builtin_ctz(ARM_HWCAP_A64_SB )] = "sb", 897 [__builtin_ctz(ARM_HWCAP_A64_PACA )] = "paca", 898 [__builtin_ctz(ARM_HWCAP_A64_PACG )] = "pacg", 899 }; 900 901 return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL; 902 } 903 904 const char *elf_hwcap2_str(uint32_t bit) 905 { 906 static const char *hwcap_str[] = { 907 [__builtin_ctz(ARM_HWCAP2_A64_DCPODP )] = "dcpodp", 908 [__builtin_ctz(ARM_HWCAP2_A64_SVE2 )] = "sve2", 909 [__builtin_ctz(ARM_HWCAP2_A64_SVEAES )] = "sveaes", 910 [__builtin_ctz(ARM_HWCAP2_A64_SVEPMULL )] = "svepmull", 911 [__builtin_ctz(ARM_HWCAP2_A64_SVEBITPERM )] = "svebitperm", 912 [__builtin_ctz(ARM_HWCAP2_A64_SVESHA3 )] = "svesha3", 913 [__builtin_ctz(ARM_HWCAP2_A64_SVESM4 )] = "svesm4", 914 [__builtin_ctz(ARM_HWCAP2_A64_FLAGM2 )] = "flagm2", 915 [__builtin_ctz(ARM_HWCAP2_A64_FRINT )] = "frint", 916 [__builtin_ctz(ARM_HWCAP2_A64_SVEI8MM )] = "svei8mm", 917 [__builtin_ctz(ARM_HWCAP2_A64_SVEF32MM )] = "svef32mm", 918 [__builtin_ctz(ARM_HWCAP2_A64_SVEF64MM )] = "svef64mm", 919 [__builtin_ctz(ARM_HWCAP2_A64_SVEBF16 )] = "svebf16", 920 [__builtin_ctz(ARM_HWCAP2_A64_I8MM )] = "i8mm", 921 [__builtin_ctz(ARM_HWCAP2_A64_BF16 )] = "bf16", 922 [__builtin_ctz(ARM_HWCAP2_A64_DGH )] = "dgh", 923 [__builtin_ctz(ARM_HWCAP2_A64_RNG )] = "rng", 924 [__builtin_ctz(ARM_HWCAP2_A64_BTI )] = "bti", 925 [__builtin_ctz(ARM_HWCAP2_A64_MTE )] = "mte", 926 [__builtin_ctz(ARM_HWCAP2_A64_ECV )] = "ecv", 927 [__builtin_ctz(ARM_HWCAP2_A64_AFP )] = "afp", 928 [__builtin_ctz(ARM_HWCAP2_A64_RPRES )] = "rpres", 929 [__builtin_ctz(ARM_HWCAP2_A64_MTE3 )] = "mte3", 930 [__builtin_ctz(ARM_HWCAP2_A64_SME )] = "sme", 931 [__builtin_ctz(ARM_HWCAP2_A64_SME_I16I64 )] = "smei16i64", 932 [__builtin_ctz(ARM_HWCAP2_A64_SME_F64F64 )] = "smef64f64", 933 [__builtin_ctz(ARM_HWCAP2_A64_SME_I8I32 )] = "smei8i32", 934 [__builtin_ctz(ARM_HWCAP2_A64_SME_F16F32 )] = "smef16f32", 935 [__builtin_ctz(ARM_HWCAP2_A64_SME_B16F32 )] = "smeb16f32", 936 [__builtin_ctz(ARM_HWCAP2_A64_SME_F32F32 )] = "smef32f32", 937 [__builtin_ctz(ARM_HWCAP2_A64_SME_FA64 )] = "smefa64", 938 [__builtin_ctz(ARM_HWCAP2_A64_WFXT )] = "wfxt", 939 [__builtin_ctzll(ARM_HWCAP2_A64_EBF16 )] = "ebf16", 940 [__builtin_ctzll(ARM_HWCAP2_A64_SVE_EBF16 )] = "sveebf16", 941 [__builtin_ctzll(ARM_HWCAP2_A64_CSSC )] = "cssc", 942 [__builtin_ctzll(ARM_HWCAP2_A64_RPRFM )] = "rprfm", 943 [__builtin_ctzll(ARM_HWCAP2_A64_SVE2P1 )] = "sve2p1", 944 [__builtin_ctzll(ARM_HWCAP2_A64_SME2 )] = "sme2", 945 [__builtin_ctzll(ARM_HWCAP2_A64_SME2P1 )] = "sme2p1", 946 [__builtin_ctzll(ARM_HWCAP2_A64_SME_I16I32 )] = "smei16i32", 947 [__builtin_ctzll(ARM_HWCAP2_A64_SME_BI32I32)] = "smebi32i32", 948 [__builtin_ctzll(ARM_HWCAP2_A64_SME_B16B16 )] = "smeb16b16", 949 [__builtin_ctzll(ARM_HWCAP2_A64_SME_F16F16 )] = "smef16f16", 950 [__builtin_ctzll(ARM_HWCAP2_A64_MOPS )] = "mops", 951 [__builtin_ctzll(ARM_HWCAP2_A64_HBC )] = "hbc", 952 }; 953 954 return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL; 955 } 956 957 #undef GET_FEATURE_ID 958 959 #endif /* not TARGET_AARCH64 */ 960 961 #if TARGET_BIG_ENDIAN 962 # define VDSO_HEADER "vdso-be.c.inc" 963 #else 964 # define VDSO_HEADER "vdso-le.c.inc" 965 #endif 966 967 #endif /* TARGET_ARM */ 968 969 #ifdef TARGET_SPARC 970 #ifdef TARGET_SPARC64 971 972 #define ELF_HWCAP (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | HWCAP_SPARC_SWAP \ 973 | HWCAP_SPARC_MULDIV | HWCAP_SPARC_V9) 974 #ifndef TARGET_ABI32 975 #define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS ) 976 #else 977 #define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC ) 978 #endif 979 980 #define ELF_CLASS ELFCLASS64 981 #define ELF_ARCH EM_SPARCV9 982 #else 983 #define ELF_HWCAP (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | HWCAP_SPARC_SWAP \ 984 | HWCAP_SPARC_MULDIV) 985 #define ELF_CLASS ELFCLASS32 986 #define ELF_ARCH EM_SPARC 987 #endif /* TARGET_SPARC64 */ 988 989 static inline void init_thread(struct target_pt_regs *regs, 990 struct image_info *infop) 991 { 992 /* Note that target_cpu_copy_regs does not read psr/tstate. */ 993 regs->pc = infop->entry; 994 regs->npc = regs->pc + 4; 995 regs->y = 0; 996 regs->u_regs[14] = (infop->start_stack - 16 * sizeof(abi_ulong) 997 - TARGET_STACK_BIAS); 998 } 999 #endif /* TARGET_SPARC */ 1000 1001 #ifdef TARGET_PPC 1002 1003 #define ELF_MACHINE PPC_ELF_MACHINE 1004 1005 #if defined(TARGET_PPC64) 1006 1007 #define elf_check_arch(x) ( (x) == EM_PPC64 ) 1008 1009 #define ELF_CLASS ELFCLASS64 1010 1011 #else 1012 1013 #define ELF_CLASS ELFCLASS32 1014 #define EXSTACK_DEFAULT true 1015 1016 #endif 1017 1018 #define ELF_ARCH EM_PPC 1019 1020 /* Feature masks for the Aux Vector Hardware Capabilities (AT_HWCAP). 1021 See arch/powerpc/include/asm/cputable.h. */ 1022 enum { 1023 QEMU_PPC_FEATURE_32 = 0x80000000, 1024 QEMU_PPC_FEATURE_64 = 0x40000000, 1025 QEMU_PPC_FEATURE_601_INSTR = 0x20000000, 1026 QEMU_PPC_FEATURE_HAS_ALTIVEC = 0x10000000, 1027 QEMU_PPC_FEATURE_HAS_FPU = 0x08000000, 1028 QEMU_PPC_FEATURE_HAS_MMU = 0x04000000, 1029 QEMU_PPC_FEATURE_HAS_4xxMAC = 0x02000000, 1030 QEMU_PPC_FEATURE_UNIFIED_CACHE = 0x01000000, 1031 QEMU_PPC_FEATURE_HAS_SPE = 0x00800000, 1032 QEMU_PPC_FEATURE_HAS_EFP_SINGLE = 0x00400000, 1033 QEMU_PPC_FEATURE_HAS_EFP_DOUBLE = 0x00200000, 1034 QEMU_PPC_FEATURE_NO_TB = 0x00100000, 1035 QEMU_PPC_FEATURE_POWER4 = 0x00080000, 1036 QEMU_PPC_FEATURE_POWER5 = 0x00040000, 1037 QEMU_PPC_FEATURE_POWER5_PLUS = 0x00020000, 1038 QEMU_PPC_FEATURE_CELL = 0x00010000, 1039 QEMU_PPC_FEATURE_BOOKE = 0x00008000, 1040 QEMU_PPC_FEATURE_SMT = 0x00004000, 1041 QEMU_PPC_FEATURE_ICACHE_SNOOP = 0x00002000, 1042 QEMU_PPC_FEATURE_ARCH_2_05 = 0x00001000, 1043 QEMU_PPC_FEATURE_PA6T = 0x00000800, 1044 QEMU_PPC_FEATURE_HAS_DFP = 0x00000400, 1045 QEMU_PPC_FEATURE_POWER6_EXT = 0x00000200, 1046 QEMU_PPC_FEATURE_ARCH_2_06 = 0x00000100, 1047 QEMU_PPC_FEATURE_HAS_VSX = 0x00000080, 1048 QEMU_PPC_FEATURE_PSERIES_PERFMON_COMPAT = 0x00000040, 1049 1050 QEMU_PPC_FEATURE_TRUE_LE = 0x00000002, 1051 QEMU_PPC_FEATURE_PPC_LE = 0x00000001, 1052 1053 /* Feature definitions in AT_HWCAP2. */ 1054 QEMU_PPC_FEATURE2_ARCH_2_07 = 0x80000000, /* ISA 2.07 */ 1055 QEMU_PPC_FEATURE2_HAS_HTM = 0x40000000, /* Hardware Transactional Memory */ 1056 QEMU_PPC_FEATURE2_HAS_DSCR = 0x20000000, /* Data Stream Control Register */ 1057 QEMU_PPC_FEATURE2_HAS_EBB = 0x10000000, /* Event Base Branching */ 1058 QEMU_PPC_FEATURE2_HAS_ISEL = 0x08000000, /* Integer Select */ 1059 QEMU_PPC_FEATURE2_HAS_TAR = 0x04000000, /* Target Address Register */ 1060 QEMU_PPC_FEATURE2_VEC_CRYPTO = 0x02000000, 1061 QEMU_PPC_FEATURE2_HTM_NOSC = 0x01000000, 1062 QEMU_PPC_FEATURE2_ARCH_3_00 = 0x00800000, /* ISA 3.00 */ 1063 QEMU_PPC_FEATURE2_HAS_IEEE128 = 0x00400000, /* VSX IEEE Bin Float 128-bit */ 1064 QEMU_PPC_FEATURE2_DARN = 0x00200000, /* darn random number insn */ 1065 QEMU_PPC_FEATURE2_SCV = 0x00100000, /* scv syscall */ 1066 QEMU_PPC_FEATURE2_HTM_NO_SUSPEND = 0x00080000, /* TM w/o suspended state */ 1067 QEMU_PPC_FEATURE2_ARCH_3_1 = 0x00040000, /* ISA 3.1 */ 1068 QEMU_PPC_FEATURE2_MMA = 0x00020000, /* Matrix-Multiply Assist */ 1069 }; 1070 1071 #define ELF_HWCAP get_elf_hwcap() 1072 1073 static uint32_t get_elf_hwcap(void) 1074 { 1075 PowerPCCPU *cpu = POWERPC_CPU(thread_cpu); 1076 uint32_t features = 0; 1077 1078 /* We don't have to be terribly complete here; the high points are 1079 Altivec/FP/SPE support. Anything else is just a bonus. */ 1080 #define GET_FEATURE(flag, feature) \ 1081 do { if (cpu->env.insns_flags & flag) { features |= feature; } } while (0) 1082 #define GET_FEATURE2(flags, feature) \ 1083 do { \ 1084 if ((cpu->env.insns_flags2 & flags) == flags) { \ 1085 features |= feature; \ 1086 } \ 1087 } while (0) 1088 GET_FEATURE(PPC_64B, QEMU_PPC_FEATURE_64); 1089 GET_FEATURE(PPC_FLOAT, QEMU_PPC_FEATURE_HAS_FPU); 1090 GET_FEATURE(PPC_ALTIVEC, QEMU_PPC_FEATURE_HAS_ALTIVEC); 1091 GET_FEATURE(PPC_SPE, QEMU_PPC_FEATURE_HAS_SPE); 1092 GET_FEATURE(PPC_SPE_SINGLE, QEMU_PPC_FEATURE_HAS_EFP_SINGLE); 1093 GET_FEATURE(PPC_SPE_DOUBLE, QEMU_PPC_FEATURE_HAS_EFP_DOUBLE); 1094 GET_FEATURE(PPC_BOOKE, QEMU_PPC_FEATURE_BOOKE); 1095 GET_FEATURE(PPC_405_MAC, QEMU_PPC_FEATURE_HAS_4xxMAC); 1096 GET_FEATURE2(PPC2_DFP, QEMU_PPC_FEATURE_HAS_DFP); 1097 GET_FEATURE2(PPC2_VSX, QEMU_PPC_FEATURE_HAS_VSX); 1098 GET_FEATURE2((PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 | PPC2_ATOMIC_ISA206 | 1099 PPC2_FP_CVT_ISA206 | PPC2_FP_TST_ISA206), 1100 QEMU_PPC_FEATURE_ARCH_2_06); 1101 #undef GET_FEATURE 1102 #undef GET_FEATURE2 1103 1104 return features; 1105 } 1106 1107 #define ELF_HWCAP2 get_elf_hwcap2() 1108 1109 static uint32_t get_elf_hwcap2(void) 1110 { 1111 PowerPCCPU *cpu = POWERPC_CPU(thread_cpu); 1112 uint32_t features = 0; 1113 1114 #define GET_FEATURE(flag, feature) \ 1115 do { if (cpu->env.insns_flags & flag) { features |= feature; } } while (0) 1116 #define GET_FEATURE2(flag, feature) \ 1117 do { if (cpu->env.insns_flags2 & flag) { features |= feature; } } while (0) 1118 1119 GET_FEATURE(PPC_ISEL, QEMU_PPC_FEATURE2_HAS_ISEL); 1120 GET_FEATURE2(PPC2_BCTAR_ISA207, QEMU_PPC_FEATURE2_HAS_TAR); 1121 GET_FEATURE2((PPC2_BCTAR_ISA207 | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 | 1122 PPC2_ISA207S), QEMU_PPC_FEATURE2_ARCH_2_07 | 1123 QEMU_PPC_FEATURE2_VEC_CRYPTO); 1124 GET_FEATURE2(PPC2_ISA300, QEMU_PPC_FEATURE2_ARCH_3_00 | 1125 QEMU_PPC_FEATURE2_DARN | QEMU_PPC_FEATURE2_HAS_IEEE128); 1126 GET_FEATURE2(PPC2_ISA310, QEMU_PPC_FEATURE2_ARCH_3_1 | 1127 QEMU_PPC_FEATURE2_MMA); 1128 1129 #undef GET_FEATURE 1130 #undef GET_FEATURE2 1131 1132 return features; 1133 } 1134 1135 /* 1136 * The requirements here are: 1137 * - keep the final alignment of sp (sp & 0xf) 1138 * - make sure the 32-bit value at the first 16 byte aligned position of 1139 * AUXV is greater than 16 for glibc compatibility. 1140 * AT_IGNOREPPC is used for that. 1141 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC, 1142 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined. 1143 */ 1144 #define DLINFO_ARCH_ITEMS 5 1145 #define ARCH_DLINFO \ 1146 do { \ 1147 PowerPCCPU *cpu = POWERPC_CPU(thread_cpu); \ 1148 /* \ 1149 * Handle glibc compatibility: these magic entries must \ 1150 * be at the lowest addresses in the final auxv. \ 1151 */ \ 1152 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \ 1153 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \ 1154 NEW_AUX_ENT(AT_DCACHEBSIZE, cpu->env.dcache_line_size); \ 1155 NEW_AUX_ENT(AT_ICACHEBSIZE, cpu->env.icache_line_size); \ 1156 NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \ 1157 } while (0) 1158 1159 static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop) 1160 { 1161 _regs->gpr[1] = infop->start_stack; 1162 #if defined(TARGET_PPC64) 1163 if (get_ppc64_abi(infop) < 2) { 1164 uint64_t val; 1165 get_user_u64(val, infop->entry + 8); 1166 _regs->gpr[2] = val + infop->load_bias; 1167 get_user_u64(val, infop->entry); 1168 infop->entry = val + infop->load_bias; 1169 } else { 1170 _regs->gpr[12] = infop->entry; /* r12 set to global entry address */ 1171 } 1172 #endif 1173 _regs->nip = infop->entry; 1174 } 1175 1176 /* See linux kernel: arch/powerpc/include/asm/elf.h. */ 1177 #define ELF_NREG 48 1178 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1179 1180 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUPPCState *env) 1181 { 1182 int i; 1183 target_ulong ccr = 0; 1184 1185 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 1186 (*regs)[i] = tswapreg(env->gpr[i]); 1187 } 1188 1189 (*regs)[32] = tswapreg(env->nip); 1190 (*regs)[33] = tswapreg(env->msr); 1191 (*regs)[35] = tswapreg(env->ctr); 1192 (*regs)[36] = tswapreg(env->lr); 1193 (*regs)[37] = tswapreg(cpu_read_xer(env)); 1194 1195 ccr = ppc_get_cr(env); 1196 (*regs)[38] = tswapreg(ccr); 1197 } 1198 1199 #define USE_ELF_CORE_DUMP 1200 #define ELF_EXEC_PAGESIZE 4096 1201 1202 #ifndef TARGET_PPC64 1203 # define VDSO_HEADER "vdso-32.c.inc" 1204 #elif TARGET_BIG_ENDIAN 1205 # define VDSO_HEADER "vdso-64.c.inc" 1206 #else 1207 # define VDSO_HEADER "vdso-64le.c.inc" 1208 #endif 1209 1210 #endif 1211 1212 #ifdef TARGET_LOONGARCH64 1213 1214 #define ELF_CLASS ELFCLASS64 1215 #define ELF_ARCH EM_LOONGARCH 1216 #define EXSTACK_DEFAULT true 1217 1218 #define elf_check_arch(x) ((x) == EM_LOONGARCH) 1219 1220 #define VDSO_HEADER "vdso.c.inc" 1221 1222 static inline void init_thread(struct target_pt_regs *regs, 1223 struct image_info *infop) 1224 { 1225 /*Set crmd PG,DA = 1,0 */ 1226 regs->csr.crmd = 2 << 3; 1227 regs->csr.era = infop->entry; 1228 regs->regs[3] = infop->start_stack; 1229 } 1230 1231 /* See linux kernel: arch/loongarch/include/asm/elf.h */ 1232 #define ELF_NREG 45 1233 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1234 1235 enum { 1236 TARGET_EF_R0 = 0, 1237 TARGET_EF_CSR_ERA = TARGET_EF_R0 + 33, 1238 TARGET_EF_CSR_BADV = TARGET_EF_R0 + 34, 1239 }; 1240 1241 static void elf_core_copy_regs(target_elf_gregset_t *regs, 1242 const CPULoongArchState *env) 1243 { 1244 int i; 1245 1246 (*regs)[TARGET_EF_R0] = 0; 1247 1248 for (i = 1; i < ARRAY_SIZE(env->gpr); i++) { 1249 (*regs)[TARGET_EF_R0 + i] = tswapreg(env->gpr[i]); 1250 } 1251 1252 (*regs)[TARGET_EF_CSR_ERA] = tswapreg(env->pc); 1253 (*regs)[TARGET_EF_CSR_BADV] = tswapreg(env->CSR_BADV); 1254 } 1255 1256 #define USE_ELF_CORE_DUMP 1257 #define ELF_EXEC_PAGESIZE 4096 1258 1259 #define ELF_HWCAP get_elf_hwcap() 1260 1261 /* See arch/loongarch/include/uapi/asm/hwcap.h */ 1262 enum { 1263 HWCAP_LOONGARCH_CPUCFG = (1 << 0), 1264 HWCAP_LOONGARCH_LAM = (1 << 1), 1265 HWCAP_LOONGARCH_UAL = (1 << 2), 1266 HWCAP_LOONGARCH_FPU = (1 << 3), 1267 HWCAP_LOONGARCH_LSX = (1 << 4), 1268 HWCAP_LOONGARCH_LASX = (1 << 5), 1269 HWCAP_LOONGARCH_CRC32 = (1 << 6), 1270 HWCAP_LOONGARCH_COMPLEX = (1 << 7), 1271 HWCAP_LOONGARCH_CRYPTO = (1 << 8), 1272 HWCAP_LOONGARCH_LVZ = (1 << 9), 1273 HWCAP_LOONGARCH_LBT_X86 = (1 << 10), 1274 HWCAP_LOONGARCH_LBT_ARM = (1 << 11), 1275 HWCAP_LOONGARCH_LBT_MIPS = (1 << 12), 1276 }; 1277 1278 static uint32_t get_elf_hwcap(void) 1279 { 1280 LoongArchCPU *cpu = LOONGARCH_CPU(thread_cpu); 1281 uint32_t hwcaps = 0; 1282 1283 hwcaps |= HWCAP_LOONGARCH_CRC32; 1284 1285 if (FIELD_EX32(cpu->env.cpucfg[1], CPUCFG1, UAL)) { 1286 hwcaps |= HWCAP_LOONGARCH_UAL; 1287 } 1288 1289 if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, FP)) { 1290 hwcaps |= HWCAP_LOONGARCH_FPU; 1291 } 1292 1293 if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LAM)) { 1294 hwcaps |= HWCAP_LOONGARCH_LAM; 1295 } 1296 1297 if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LSX)) { 1298 hwcaps |= HWCAP_LOONGARCH_LSX; 1299 } 1300 1301 if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LASX)) { 1302 hwcaps |= HWCAP_LOONGARCH_LASX; 1303 } 1304 1305 return hwcaps; 1306 } 1307 1308 #define ELF_PLATFORM "loongarch" 1309 1310 #endif /* TARGET_LOONGARCH64 */ 1311 1312 #ifdef TARGET_MIPS 1313 1314 #ifdef TARGET_MIPS64 1315 #define ELF_CLASS ELFCLASS64 1316 #else 1317 #define ELF_CLASS ELFCLASS32 1318 #endif 1319 #define ELF_ARCH EM_MIPS 1320 #define EXSTACK_DEFAULT true 1321 1322 #ifdef TARGET_ABI_MIPSN32 1323 #define elf_check_abi(x) ((x) & EF_MIPS_ABI2) 1324 #else 1325 #define elf_check_abi(x) (!((x) & EF_MIPS_ABI2)) 1326 #endif 1327 1328 #define ELF_BASE_PLATFORM get_elf_base_platform() 1329 1330 #define MATCH_PLATFORM_INSN(_flags, _base_platform) \ 1331 do { if ((cpu->env.insn_flags & (_flags)) == _flags) \ 1332 { return _base_platform; } } while (0) 1333 1334 static const char *get_elf_base_platform(void) 1335 { 1336 MIPSCPU *cpu = MIPS_CPU(thread_cpu); 1337 1338 /* 64 bit ISAs goes first */ 1339 MATCH_PLATFORM_INSN(CPU_MIPS64R6, "mips64r6"); 1340 MATCH_PLATFORM_INSN(CPU_MIPS64R5, "mips64r5"); 1341 MATCH_PLATFORM_INSN(CPU_MIPS64R2, "mips64r2"); 1342 MATCH_PLATFORM_INSN(CPU_MIPS64R1, "mips64"); 1343 MATCH_PLATFORM_INSN(CPU_MIPS5, "mips5"); 1344 MATCH_PLATFORM_INSN(CPU_MIPS4, "mips4"); 1345 MATCH_PLATFORM_INSN(CPU_MIPS3, "mips3"); 1346 1347 /* 32 bit ISAs */ 1348 MATCH_PLATFORM_INSN(CPU_MIPS32R6, "mips32r6"); 1349 MATCH_PLATFORM_INSN(CPU_MIPS32R5, "mips32r5"); 1350 MATCH_PLATFORM_INSN(CPU_MIPS32R2, "mips32r2"); 1351 MATCH_PLATFORM_INSN(CPU_MIPS32R1, "mips32"); 1352 MATCH_PLATFORM_INSN(CPU_MIPS2, "mips2"); 1353 1354 /* Fallback */ 1355 return "mips"; 1356 } 1357 #undef MATCH_PLATFORM_INSN 1358 1359 static inline void init_thread(struct target_pt_regs *regs, 1360 struct image_info *infop) 1361 { 1362 regs->cp0_status = 2 << CP0St_KSU; 1363 regs->cp0_epc = infop->entry; 1364 regs->regs[29] = infop->start_stack; 1365 } 1366 1367 /* See linux kernel: arch/mips/include/asm/elf.h. */ 1368 #define ELF_NREG 45 1369 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1370 1371 /* See linux kernel: arch/mips/include/asm/reg.h. */ 1372 enum { 1373 #ifdef TARGET_MIPS64 1374 TARGET_EF_R0 = 0, 1375 #else 1376 TARGET_EF_R0 = 6, 1377 #endif 1378 TARGET_EF_R26 = TARGET_EF_R0 + 26, 1379 TARGET_EF_R27 = TARGET_EF_R0 + 27, 1380 TARGET_EF_LO = TARGET_EF_R0 + 32, 1381 TARGET_EF_HI = TARGET_EF_R0 + 33, 1382 TARGET_EF_CP0_EPC = TARGET_EF_R0 + 34, 1383 TARGET_EF_CP0_BADVADDR = TARGET_EF_R0 + 35, 1384 TARGET_EF_CP0_STATUS = TARGET_EF_R0 + 36, 1385 TARGET_EF_CP0_CAUSE = TARGET_EF_R0 + 37 1386 }; 1387 1388 /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs. */ 1389 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUMIPSState *env) 1390 { 1391 int i; 1392 1393 for (i = 0; i < TARGET_EF_R0; i++) { 1394 (*regs)[i] = 0; 1395 } 1396 (*regs)[TARGET_EF_R0] = 0; 1397 1398 for (i = 1; i < ARRAY_SIZE(env->active_tc.gpr); i++) { 1399 (*regs)[TARGET_EF_R0 + i] = tswapreg(env->active_tc.gpr[i]); 1400 } 1401 1402 (*regs)[TARGET_EF_R26] = 0; 1403 (*regs)[TARGET_EF_R27] = 0; 1404 (*regs)[TARGET_EF_LO] = tswapreg(env->active_tc.LO[0]); 1405 (*regs)[TARGET_EF_HI] = tswapreg(env->active_tc.HI[0]); 1406 (*regs)[TARGET_EF_CP0_EPC] = tswapreg(env->active_tc.PC); 1407 (*regs)[TARGET_EF_CP0_BADVADDR] = tswapreg(env->CP0_BadVAddr); 1408 (*regs)[TARGET_EF_CP0_STATUS] = tswapreg(env->CP0_Status); 1409 (*regs)[TARGET_EF_CP0_CAUSE] = tswapreg(env->CP0_Cause); 1410 } 1411 1412 #define USE_ELF_CORE_DUMP 1413 #define ELF_EXEC_PAGESIZE 4096 1414 1415 /* See arch/mips/include/uapi/asm/hwcap.h. */ 1416 enum { 1417 HWCAP_MIPS_R6 = (1 << 0), 1418 HWCAP_MIPS_MSA = (1 << 1), 1419 HWCAP_MIPS_CRC32 = (1 << 2), 1420 HWCAP_MIPS_MIPS16 = (1 << 3), 1421 HWCAP_MIPS_MDMX = (1 << 4), 1422 HWCAP_MIPS_MIPS3D = (1 << 5), 1423 HWCAP_MIPS_SMARTMIPS = (1 << 6), 1424 HWCAP_MIPS_DSP = (1 << 7), 1425 HWCAP_MIPS_DSP2 = (1 << 8), 1426 HWCAP_MIPS_DSP3 = (1 << 9), 1427 HWCAP_MIPS_MIPS16E2 = (1 << 10), 1428 HWCAP_LOONGSON_MMI = (1 << 11), 1429 HWCAP_LOONGSON_EXT = (1 << 12), 1430 HWCAP_LOONGSON_EXT2 = (1 << 13), 1431 HWCAP_LOONGSON_CPUCFG = (1 << 14), 1432 }; 1433 1434 #define ELF_HWCAP get_elf_hwcap() 1435 1436 #define GET_FEATURE_INSN(_flag, _hwcap) \ 1437 do { if (cpu->env.insn_flags & (_flag)) { hwcaps |= _hwcap; } } while (0) 1438 1439 #define GET_FEATURE_REG_SET(_reg, _mask, _hwcap) \ 1440 do { if (cpu->env._reg & (_mask)) { hwcaps |= _hwcap; } } while (0) 1441 1442 #define GET_FEATURE_REG_EQU(_reg, _start, _length, _val, _hwcap) \ 1443 do { \ 1444 if (extract32(cpu->env._reg, (_start), (_length)) == (_val)) { \ 1445 hwcaps |= _hwcap; \ 1446 } \ 1447 } while (0) 1448 1449 static uint32_t get_elf_hwcap(void) 1450 { 1451 MIPSCPU *cpu = MIPS_CPU(thread_cpu); 1452 uint32_t hwcaps = 0; 1453 1454 GET_FEATURE_REG_EQU(CP0_Config0, CP0C0_AR, CP0C0_AR_LENGTH, 1455 2, HWCAP_MIPS_R6); 1456 GET_FEATURE_REG_SET(CP0_Config3, 1 << CP0C3_MSAP, HWCAP_MIPS_MSA); 1457 GET_FEATURE_INSN(ASE_LMMI, HWCAP_LOONGSON_MMI); 1458 GET_FEATURE_INSN(ASE_LEXT, HWCAP_LOONGSON_EXT); 1459 1460 return hwcaps; 1461 } 1462 1463 #undef GET_FEATURE_REG_EQU 1464 #undef GET_FEATURE_REG_SET 1465 #undef GET_FEATURE_INSN 1466 1467 #endif /* TARGET_MIPS */ 1468 1469 #ifdef TARGET_MICROBLAZE 1470 1471 #define elf_check_arch(x) ( (x) == EM_MICROBLAZE || (x) == EM_MICROBLAZE_OLD) 1472 1473 #define ELF_CLASS ELFCLASS32 1474 #define ELF_ARCH EM_MICROBLAZE 1475 1476 static inline void init_thread(struct target_pt_regs *regs, 1477 struct image_info *infop) 1478 { 1479 regs->pc = infop->entry; 1480 regs->r1 = infop->start_stack; 1481 1482 } 1483 1484 #define ELF_EXEC_PAGESIZE 4096 1485 1486 #define USE_ELF_CORE_DUMP 1487 #define ELF_NREG 38 1488 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1489 1490 /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs. */ 1491 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUMBState *env) 1492 { 1493 int i, pos = 0; 1494 1495 for (i = 0; i < 32; i++) { 1496 (*regs)[pos++] = tswapreg(env->regs[i]); 1497 } 1498 1499 (*regs)[pos++] = tswapreg(env->pc); 1500 (*regs)[pos++] = tswapreg(mb_cpu_read_msr(env)); 1501 (*regs)[pos++] = 0; 1502 (*regs)[pos++] = tswapreg(env->ear); 1503 (*regs)[pos++] = 0; 1504 (*regs)[pos++] = tswapreg(env->esr); 1505 } 1506 1507 #endif /* TARGET_MICROBLAZE */ 1508 1509 #ifdef TARGET_OPENRISC 1510 1511 #define ELF_ARCH EM_OPENRISC 1512 #define ELF_CLASS ELFCLASS32 1513 #define ELF_DATA ELFDATA2MSB 1514 1515 static inline void init_thread(struct target_pt_regs *regs, 1516 struct image_info *infop) 1517 { 1518 regs->pc = infop->entry; 1519 regs->gpr[1] = infop->start_stack; 1520 } 1521 1522 #define USE_ELF_CORE_DUMP 1523 #define ELF_EXEC_PAGESIZE 8192 1524 1525 /* See linux kernel arch/openrisc/include/asm/elf.h. */ 1526 #define ELF_NREG 34 /* gprs and pc, sr */ 1527 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1528 1529 static void elf_core_copy_regs(target_elf_gregset_t *regs, 1530 const CPUOpenRISCState *env) 1531 { 1532 int i; 1533 1534 for (i = 0; i < 32; i++) { 1535 (*regs)[i] = tswapreg(cpu_get_gpr(env, i)); 1536 } 1537 (*regs)[32] = tswapreg(env->pc); 1538 (*regs)[33] = tswapreg(cpu_get_sr(env)); 1539 } 1540 #define ELF_HWCAP 0 1541 #define ELF_PLATFORM NULL 1542 1543 #endif /* TARGET_OPENRISC */ 1544 1545 #ifdef TARGET_SH4 1546 1547 #define ELF_CLASS ELFCLASS32 1548 #define ELF_ARCH EM_SH 1549 1550 static inline void init_thread(struct target_pt_regs *regs, 1551 struct image_info *infop) 1552 { 1553 /* Check other registers XXXXX */ 1554 regs->pc = infop->entry; 1555 regs->regs[15] = infop->start_stack; 1556 } 1557 1558 /* See linux kernel: arch/sh/include/asm/elf.h. */ 1559 #define ELF_NREG 23 1560 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1561 1562 /* See linux kernel: arch/sh/include/asm/ptrace.h. */ 1563 enum { 1564 TARGET_REG_PC = 16, 1565 TARGET_REG_PR = 17, 1566 TARGET_REG_SR = 18, 1567 TARGET_REG_GBR = 19, 1568 TARGET_REG_MACH = 20, 1569 TARGET_REG_MACL = 21, 1570 TARGET_REG_SYSCALL = 22 1571 }; 1572 1573 static inline void elf_core_copy_regs(target_elf_gregset_t *regs, 1574 const CPUSH4State *env) 1575 { 1576 int i; 1577 1578 for (i = 0; i < 16; i++) { 1579 (*regs)[i] = tswapreg(env->gregs[i]); 1580 } 1581 1582 (*regs)[TARGET_REG_PC] = tswapreg(env->pc); 1583 (*regs)[TARGET_REG_PR] = tswapreg(env->pr); 1584 (*regs)[TARGET_REG_SR] = tswapreg(env->sr); 1585 (*regs)[TARGET_REG_GBR] = tswapreg(env->gbr); 1586 (*regs)[TARGET_REG_MACH] = tswapreg(env->mach); 1587 (*regs)[TARGET_REG_MACL] = tswapreg(env->macl); 1588 (*regs)[TARGET_REG_SYSCALL] = 0; /* FIXME */ 1589 } 1590 1591 #define USE_ELF_CORE_DUMP 1592 #define ELF_EXEC_PAGESIZE 4096 1593 1594 enum { 1595 SH_CPU_HAS_FPU = 0x0001, /* Hardware FPU support */ 1596 SH_CPU_HAS_P2_FLUSH_BUG = 0x0002, /* Need to flush the cache in P2 area */ 1597 SH_CPU_HAS_MMU_PAGE_ASSOC = 0x0004, /* SH3: TLB way selection bit support */ 1598 SH_CPU_HAS_DSP = 0x0008, /* SH-DSP: DSP support */ 1599 SH_CPU_HAS_PERF_COUNTER = 0x0010, /* Hardware performance counters */ 1600 SH_CPU_HAS_PTEA = 0x0020, /* PTEA register */ 1601 SH_CPU_HAS_LLSC = 0x0040, /* movli.l/movco.l */ 1602 SH_CPU_HAS_L2_CACHE = 0x0080, /* Secondary cache / URAM */ 1603 SH_CPU_HAS_OP32 = 0x0100, /* 32-bit instruction support */ 1604 SH_CPU_HAS_PTEAEX = 0x0200, /* PTE ASID Extension support */ 1605 }; 1606 1607 #define ELF_HWCAP get_elf_hwcap() 1608 1609 static uint32_t get_elf_hwcap(void) 1610 { 1611 SuperHCPU *cpu = SUPERH_CPU(thread_cpu); 1612 uint32_t hwcap = 0; 1613 1614 hwcap |= SH_CPU_HAS_FPU; 1615 1616 if (cpu->env.features & SH_FEATURE_SH4A) { 1617 hwcap |= SH_CPU_HAS_LLSC; 1618 } 1619 1620 return hwcap; 1621 } 1622 1623 #endif 1624 1625 #ifdef TARGET_CRIS 1626 1627 #define ELF_CLASS ELFCLASS32 1628 #define ELF_ARCH EM_CRIS 1629 1630 static inline void init_thread(struct target_pt_regs *regs, 1631 struct image_info *infop) 1632 { 1633 regs->erp = infop->entry; 1634 } 1635 1636 #define ELF_EXEC_PAGESIZE 8192 1637 1638 #endif 1639 1640 #ifdef TARGET_M68K 1641 1642 #define ELF_CLASS ELFCLASS32 1643 #define ELF_ARCH EM_68K 1644 1645 /* ??? Does this need to do anything? 1646 #define ELF_PLAT_INIT(_r) */ 1647 1648 static inline void init_thread(struct target_pt_regs *regs, 1649 struct image_info *infop) 1650 { 1651 regs->usp = infop->start_stack; 1652 regs->sr = 0; 1653 regs->pc = infop->entry; 1654 } 1655 1656 /* See linux kernel: arch/m68k/include/asm/elf.h. */ 1657 #define ELF_NREG 20 1658 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1659 1660 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUM68KState *env) 1661 { 1662 (*regs)[0] = tswapreg(env->dregs[1]); 1663 (*regs)[1] = tswapreg(env->dregs[2]); 1664 (*regs)[2] = tswapreg(env->dregs[3]); 1665 (*regs)[3] = tswapreg(env->dregs[4]); 1666 (*regs)[4] = tswapreg(env->dregs[5]); 1667 (*regs)[5] = tswapreg(env->dregs[6]); 1668 (*regs)[6] = tswapreg(env->dregs[7]); 1669 (*regs)[7] = tswapreg(env->aregs[0]); 1670 (*regs)[8] = tswapreg(env->aregs[1]); 1671 (*regs)[9] = tswapreg(env->aregs[2]); 1672 (*regs)[10] = tswapreg(env->aregs[3]); 1673 (*regs)[11] = tswapreg(env->aregs[4]); 1674 (*regs)[12] = tswapreg(env->aregs[5]); 1675 (*regs)[13] = tswapreg(env->aregs[6]); 1676 (*regs)[14] = tswapreg(env->dregs[0]); 1677 (*regs)[15] = tswapreg(env->aregs[7]); 1678 (*regs)[16] = tswapreg(env->dregs[0]); /* FIXME: orig_d0 */ 1679 (*regs)[17] = tswapreg(env->sr); 1680 (*regs)[18] = tswapreg(env->pc); 1681 (*regs)[19] = 0; /* FIXME: regs->format | regs->vector */ 1682 } 1683 1684 #define USE_ELF_CORE_DUMP 1685 #define ELF_EXEC_PAGESIZE 8192 1686 1687 #endif 1688 1689 #ifdef TARGET_ALPHA 1690 1691 #define ELF_CLASS ELFCLASS64 1692 #define ELF_ARCH EM_ALPHA 1693 1694 static inline void init_thread(struct target_pt_regs *regs, 1695 struct image_info *infop) 1696 { 1697 regs->pc = infop->entry; 1698 regs->ps = 8; 1699 regs->usp = infop->start_stack; 1700 } 1701 1702 #define ELF_EXEC_PAGESIZE 8192 1703 1704 #endif /* TARGET_ALPHA */ 1705 1706 #ifdef TARGET_S390X 1707 1708 #define ELF_CLASS ELFCLASS64 1709 #define ELF_DATA ELFDATA2MSB 1710 #define ELF_ARCH EM_S390 1711 1712 #include "elf.h" 1713 1714 #define ELF_HWCAP get_elf_hwcap() 1715 1716 #define GET_FEATURE(_feat, _hwcap) \ 1717 do { if (s390_has_feat(_feat)) { hwcap |= _hwcap; } } while (0) 1718 1719 uint32_t get_elf_hwcap(void) 1720 { 1721 /* 1722 * Let's assume we always have esan3 and zarch. 1723 * 31-bit processes can use 64-bit registers (high gprs). 1724 */ 1725 uint32_t hwcap = HWCAP_S390_ESAN3 | HWCAP_S390_ZARCH | HWCAP_S390_HIGH_GPRS; 1726 1727 GET_FEATURE(S390_FEAT_STFLE, HWCAP_S390_STFLE); 1728 GET_FEATURE(S390_FEAT_MSA, HWCAP_S390_MSA); 1729 GET_FEATURE(S390_FEAT_LONG_DISPLACEMENT, HWCAP_S390_LDISP); 1730 GET_FEATURE(S390_FEAT_EXTENDED_IMMEDIATE, HWCAP_S390_EIMM); 1731 if (s390_has_feat(S390_FEAT_EXTENDED_TRANSLATION_3) && 1732 s390_has_feat(S390_FEAT_ETF3_ENH)) { 1733 hwcap |= HWCAP_S390_ETF3EH; 1734 } 1735 GET_FEATURE(S390_FEAT_VECTOR, HWCAP_S390_VXRS); 1736 GET_FEATURE(S390_FEAT_VECTOR_ENH, HWCAP_S390_VXRS_EXT); 1737 GET_FEATURE(S390_FEAT_VECTOR_ENH2, HWCAP_S390_VXRS_EXT2); 1738 1739 return hwcap; 1740 } 1741 1742 const char *elf_hwcap_str(uint32_t bit) 1743 { 1744 static const char *hwcap_str[] = { 1745 [HWCAP_S390_NR_ESAN3] = "esan3", 1746 [HWCAP_S390_NR_ZARCH] = "zarch", 1747 [HWCAP_S390_NR_STFLE] = "stfle", 1748 [HWCAP_S390_NR_MSA] = "msa", 1749 [HWCAP_S390_NR_LDISP] = "ldisp", 1750 [HWCAP_S390_NR_EIMM] = "eimm", 1751 [HWCAP_S390_NR_DFP] = "dfp", 1752 [HWCAP_S390_NR_HPAGE] = "edat", 1753 [HWCAP_S390_NR_ETF3EH] = "etf3eh", 1754 [HWCAP_S390_NR_HIGH_GPRS] = "highgprs", 1755 [HWCAP_S390_NR_TE] = "te", 1756 [HWCAP_S390_NR_VXRS] = "vx", 1757 [HWCAP_S390_NR_VXRS_BCD] = "vxd", 1758 [HWCAP_S390_NR_VXRS_EXT] = "vxe", 1759 [HWCAP_S390_NR_GS] = "gs", 1760 [HWCAP_S390_NR_VXRS_EXT2] = "vxe2", 1761 [HWCAP_S390_NR_VXRS_PDE] = "vxp", 1762 [HWCAP_S390_NR_SORT] = "sort", 1763 [HWCAP_S390_NR_DFLT] = "dflt", 1764 [HWCAP_S390_NR_NNPA] = "nnpa", 1765 [HWCAP_S390_NR_PCI_MIO] = "pcimio", 1766 [HWCAP_S390_NR_SIE] = "sie", 1767 }; 1768 1769 return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL; 1770 } 1771 1772 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) 1773 { 1774 regs->psw.addr = infop->entry; 1775 regs->psw.mask = PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | \ 1776 PSW_MASK_MCHECK | PSW_MASK_PSTATE | PSW_MASK_64 | \ 1777 PSW_MASK_32; 1778 regs->gprs[15] = infop->start_stack; 1779 } 1780 1781 /* See linux kernel: arch/s390/include/uapi/asm/ptrace.h (s390_regs). */ 1782 #define ELF_NREG 27 1783 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1784 1785 enum { 1786 TARGET_REG_PSWM = 0, 1787 TARGET_REG_PSWA = 1, 1788 TARGET_REG_GPRS = 2, 1789 TARGET_REG_ARS = 18, 1790 TARGET_REG_ORIG_R2 = 26, 1791 }; 1792 1793 static void elf_core_copy_regs(target_elf_gregset_t *regs, 1794 const CPUS390XState *env) 1795 { 1796 int i; 1797 uint32_t *aregs; 1798 1799 (*regs)[TARGET_REG_PSWM] = tswapreg(env->psw.mask); 1800 (*regs)[TARGET_REG_PSWA] = tswapreg(env->psw.addr); 1801 for (i = 0; i < 16; i++) { 1802 (*regs)[TARGET_REG_GPRS + i] = tswapreg(env->regs[i]); 1803 } 1804 aregs = (uint32_t *)&((*regs)[TARGET_REG_ARS]); 1805 for (i = 0; i < 16; i++) { 1806 aregs[i] = tswap32(env->aregs[i]); 1807 } 1808 (*regs)[TARGET_REG_ORIG_R2] = 0; 1809 } 1810 1811 #define USE_ELF_CORE_DUMP 1812 #define ELF_EXEC_PAGESIZE 4096 1813 1814 #define VDSO_HEADER "vdso.c.inc" 1815 1816 #endif /* TARGET_S390X */ 1817 1818 #ifdef TARGET_RISCV 1819 1820 #define ELF_ARCH EM_RISCV 1821 1822 #ifdef TARGET_RISCV32 1823 #define ELF_CLASS ELFCLASS32 1824 #define VDSO_HEADER "vdso-32.c.inc" 1825 #else 1826 #define ELF_CLASS ELFCLASS64 1827 #define VDSO_HEADER "vdso-64.c.inc" 1828 #endif 1829 1830 #define ELF_HWCAP get_elf_hwcap() 1831 1832 static uint32_t get_elf_hwcap(void) 1833 { 1834 #define MISA_BIT(EXT) (1 << (EXT - 'A')) 1835 RISCVCPU *cpu = RISCV_CPU(thread_cpu); 1836 uint32_t mask = MISA_BIT('I') | MISA_BIT('M') | MISA_BIT('A') 1837 | MISA_BIT('F') | MISA_BIT('D') | MISA_BIT('C') 1838 | MISA_BIT('V'); 1839 1840 return cpu->env.misa_ext & mask; 1841 #undef MISA_BIT 1842 } 1843 1844 static inline void init_thread(struct target_pt_regs *regs, 1845 struct image_info *infop) 1846 { 1847 regs->sepc = infop->entry; 1848 regs->sp = infop->start_stack; 1849 } 1850 1851 #define ELF_EXEC_PAGESIZE 4096 1852 1853 #endif /* TARGET_RISCV */ 1854 1855 #ifdef TARGET_HPPA 1856 1857 #define ELF_CLASS ELFCLASS32 1858 #define ELF_ARCH EM_PARISC 1859 #define ELF_PLATFORM "PARISC" 1860 #define STACK_GROWS_DOWN 0 1861 #define STACK_ALIGNMENT 64 1862 1863 #define VDSO_HEADER "vdso.c.inc" 1864 1865 static inline void init_thread(struct target_pt_regs *regs, 1866 struct image_info *infop) 1867 { 1868 regs->iaoq[0] = infop->entry; 1869 regs->iaoq[1] = infop->entry + 4; 1870 regs->gr[23] = 0; 1871 regs->gr[24] = infop->argv; 1872 regs->gr[25] = infop->argc; 1873 /* The top-of-stack contains a linkage buffer. */ 1874 regs->gr[30] = infop->start_stack + 64; 1875 regs->gr[31] = infop->entry; 1876 } 1877 1878 #define LO_COMMPAGE 0 1879 1880 static bool init_guest_commpage(void) 1881 { 1882 /* If reserved_va, then we have already mapped 0 page on the host. */ 1883 if (!reserved_va) { 1884 void *want, *addr; 1885 1886 want = g2h_untagged(LO_COMMPAGE); 1887 addr = mmap(want, TARGET_PAGE_SIZE, PROT_NONE, 1888 MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED_NOREPLACE, -1, 0); 1889 if (addr == MAP_FAILED) { 1890 perror("Allocating guest commpage"); 1891 exit(EXIT_FAILURE); 1892 } 1893 if (addr != want) { 1894 return false; 1895 } 1896 } 1897 1898 /* 1899 * On Linux, page zero is normally marked execute only + gateway. 1900 * Normal read or write is supposed to fail (thus PROT_NONE above), 1901 * but specific offsets have kernel code mapped to raise permissions 1902 * and implement syscalls. Here, simply mark the page executable. 1903 * Special case the entry points during translation (see do_page_zero). 1904 */ 1905 page_set_flags(LO_COMMPAGE, LO_COMMPAGE | ~TARGET_PAGE_MASK, 1906 PAGE_EXEC | PAGE_VALID); 1907 return true; 1908 } 1909 1910 #endif /* TARGET_HPPA */ 1911 1912 #ifdef TARGET_XTENSA 1913 1914 #define ELF_CLASS ELFCLASS32 1915 #define ELF_ARCH EM_XTENSA 1916 1917 static inline void init_thread(struct target_pt_regs *regs, 1918 struct image_info *infop) 1919 { 1920 regs->windowbase = 0; 1921 regs->windowstart = 1; 1922 regs->areg[1] = infop->start_stack; 1923 regs->pc = infop->entry; 1924 if (info_is_fdpic(infop)) { 1925 regs->areg[4] = infop->loadmap_addr; 1926 regs->areg[5] = infop->interpreter_loadmap_addr; 1927 if (infop->interpreter_loadmap_addr) { 1928 regs->areg[6] = infop->interpreter_pt_dynamic_addr; 1929 } else { 1930 regs->areg[6] = infop->pt_dynamic_addr; 1931 } 1932 } 1933 } 1934 1935 /* See linux kernel: arch/xtensa/include/asm/elf.h. */ 1936 #define ELF_NREG 128 1937 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1938 1939 enum { 1940 TARGET_REG_PC, 1941 TARGET_REG_PS, 1942 TARGET_REG_LBEG, 1943 TARGET_REG_LEND, 1944 TARGET_REG_LCOUNT, 1945 TARGET_REG_SAR, 1946 TARGET_REG_WINDOWSTART, 1947 TARGET_REG_WINDOWBASE, 1948 TARGET_REG_THREADPTR, 1949 TARGET_REG_AR0 = 64, 1950 }; 1951 1952 static void elf_core_copy_regs(target_elf_gregset_t *regs, 1953 const CPUXtensaState *env) 1954 { 1955 unsigned i; 1956 1957 (*regs)[TARGET_REG_PC] = tswapreg(env->pc); 1958 (*regs)[TARGET_REG_PS] = tswapreg(env->sregs[PS] & ~PS_EXCM); 1959 (*regs)[TARGET_REG_LBEG] = tswapreg(env->sregs[LBEG]); 1960 (*regs)[TARGET_REG_LEND] = tswapreg(env->sregs[LEND]); 1961 (*regs)[TARGET_REG_LCOUNT] = tswapreg(env->sregs[LCOUNT]); 1962 (*regs)[TARGET_REG_SAR] = tswapreg(env->sregs[SAR]); 1963 (*regs)[TARGET_REG_WINDOWSTART] = tswapreg(env->sregs[WINDOW_START]); 1964 (*regs)[TARGET_REG_WINDOWBASE] = tswapreg(env->sregs[WINDOW_BASE]); 1965 (*regs)[TARGET_REG_THREADPTR] = tswapreg(env->uregs[THREADPTR]); 1966 xtensa_sync_phys_from_window((CPUXtensaState *)env); 1967 for (i = 0; i < env->config->nareg; ++i) { 1968 (*regs)[TARGET_REG_AR0 + i] = tswapreg(env->phys_regs[i]); 1969 } 1970 } 1971 1972 #define USE_ELF_CORE_DUMP 1973 #define ELF_EXEC_PAGESIZE 4096 1974 1975 #endif /* TARGET_XTENSA */ 1976 1977 #ifdef TARGET_HEXAGON 1978 1979 #define ELF_CLASS ELFCLASS32 1980 #define ELF_ARCH EM_HEXAGON 1981 1982 static inline void init_thread(struct target_pt_regs *regs, 1983 struct image_info *infop) 1984 { 1985 regs->sepc = infop->entry; 1986 regs->sp = infop->start_stack; 1987 } 1988 1989 #endif /* TARGET_HEXAGON */ 1990 1991 #ifndef ELF_BASE_PLATFORM 1992 #define ELF_BASE_PLATFORM (NULL) 1993 #endif 1994 1995 #ifndef ELF_PLATFORM 1996 #define ELF_PLATFORM (NULL) 1997 #endif 1998 1999 #ifndef ELF_MACHINE 2000 #define ELF_MACHINE ELF_ARCH 2001 #endif 2002 2003 #ifndef elf_check_arch 2004 #define elf_check_arch(x) ((x) == ELF_ARCH) 2005 #endif 2006 2007 #ifndef elf_check_abi 2008 #define elf_check_abi(x) (1) 2009 #endif 2010 2011 #ifndef ELF_HWCAP 2012 #define ELF_HWCAP 0 2013 #endif 2014 2015 #ifndef STACK_GROWS_DOWN 2016 #define STACK_GROWS_DOWN 1 2017 #endif 2018 2019 #ifndef STACK_ALIGNMENT 2020 #define STACK_ALIGNMENT 16 2021 #endif 2022 2023 #ifdef TARGET_ABI32 2024 #undef ELF_CLASS 2025 #define ELF_CLASS ELFCLASS32 2026 #undef bswaptls 2027 #define bswaptls(ptr) bswap32s(ptr) 2028 #endif 2029 2030 #ifndef EXSTACK_DEFAULT 2031 #define EXSTACK_DEFAULT false 2032 #endif 2033 2034 #include "elf.h" 2035 2036 /* We must delay the following stanzas until after "elf.h". */ 2037 #if defined(TARGET_AARCH64) 2038 2039 static bool arch_parse_elf_property(uint32_t pr_type, uint32_t pr_datasz, 2040 const uint32_t *data, 2041 struct image_info *info, 2042 Error **errp) 2043 { 2044 if (pr_type == GNU_PROPERTY_AARCH64_FEATURE_1_AND) { 2045 if (pr_datasz != sizeof(uint32_t)) { 2046 error_setg(errp, "Ill-formed GNU_PROPERTY_AARCH64_FEATURE_1_AND"); 2047 return false; 2048 } 2049 /* We will extract GNU_PROPERTY_AARCH64_FEATURE_1_BTI later. */ 2050 info->note_flags = *data; 2051 } 2052 return true; 2053 } 2054 #define ARCH_USE_GNU_PROPERTY 1 2055 2056 #else 2057 2058 static bool arch_parse_elf_property(uint32_t pr_type, uint32_t pr_datasz, 2059 const uint32_t *data, 2060 struct image_info *info, 2061 Error **errp) 2062 { 2063 g_assert_not_reached(); 2064 } 2065 #define ARCH_USE_GNU_PROPERTY 0 2066 2067 #endif 2068 2069 struct exec 2070 { 2071 unsigned int a_info; /* Use macros N_MAGIC, etc for access */ 2072 unsigned int a_text; /* length of text, in bytes */ 2073 unsigned int a_data; /* length of data, in bytes */ 2074 unsigned int a_bss; /* length of uninitialized data area, in bytes */ 2075 unsigned int a_syms; /* length of symbol table data in file, in bytes */ 2076 unsigned int a_entry; /* start address */ 2077 unsigned int a_trsize; /* length of relocation info for text, in bytes */ 2078 unsigned int a_drsize; /* length of relocation info for data, in bytes */ 2079 }; 2080 2081 2082 #define N_MAGIC(exec) ((exec).a_info & 0xffff) 2083 #define OMAGIC 0407 2084 #define NMAGIC 0410 2085 #define ZMAGIC 0413 2086 #define QMAGIC 0314 2087 2088 #define DLINFO_ITEMS 16 2089 2090 static inline void memcpy_fromfs(void * to, const void * from, unsigned long n) 2091 { 2092 memcpy(to, from, n); 2093 } 2094 2095 #ifdef BSWAP_NEEDED 2096 static void bswap_ehdr(struct elfhdr *ehdr) 2097 { 2098 bswap16s(&ehdr->e_type); /* Object file type */ 2099 bswap16s(&ehdr->e_machine); /* Architecture */ 2100 bswap32s(&ehdr->e_version); /* Object file version */ 2101 bswaptls(&ehdr->e_entry); /* Entry point virtual address */ 2102 bswaptls(&ehdr->e_phoff); /* Program header table file offset */ 2103 bswaptls(&ehdr->e_shoff); /* Section header table file offset */ 2104 bswap32s(&ehdr->e_flags); /* Processor-specific flags */ 2105 bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */ 2106 bswap16s(&ehdr->e_phentsize); /* Program header table entry size */ 2107 bswap16s(&ehdr->e_phnum); /* Program header table entry count */ 2108 bswap16s(&ehdr->e_shentsize); /* Section header table entry size */ 2109 bswap16s(&ehdr->e_shnum); /* Section header table entry count */ 2110 bswap16s(&ehdr->e_shstrndx); /* Section header string table index */ 2111 } 2112 2113 static void bswap_phdr(struct elf_phdr *phdr, int phnum) 2114 { 2115 int i; 2116 for (i = 0; i < phnum; ++i, ++phdr) { 2117 bswap32s(&phdr->p_type); /* Segment type */ 2118 bswap32s(&phdr->p_flags); /* Segment flags */ 2119 bswaptls(&phdr->p_offset); /* Segment file offset */ 2120 bswaptls(&phdr->p_vaddr); /* Segment virtual address */ 2121 bswaptls(&phdr->p_paddr); /* Segment physical address */ 2122 bswaptls(&phdr->p_filesz); /* Segment size in file */ 2123 bswaptls(&phdr->p_memsz); /* Segment size in memory */ 2124 bswaptls(&phdr->p_align); /* Segment alignment */ 2125 } 2126 } 2127 2128 static void bswap_shdr(struct elf_shdr *shdr, int shnum) 2129 { 2130 int i; 2131 for (i = 0; i < shnum; ++i, ++shdr) { 2132 bswap32s(&shdr->sh_name); 2133 bswap32s(&shdr->sh_type); 2134 bswaptls(&shdr->sh_flags); 2135 bswaptls(&shdr->sh_addr); 2136 bswaptls(&shdr->sh_offset); 2137 bswaptls(&shdr->sh_size); 2138 bswap32s(&shdr->sh_link); 2139 bswap32s(&shdr->sh_info); 2140 bswaptls(&shdr->sh_addralign); 2141 bswaptls(&shdr->sh_entsize); 2142 } 2143 } 2144 2145 static void bswap_sym(struct elf_sym *sym) 2146 { 2147 bswap32s(&sym->st_name); 2148 bswaptls(&sym->st_value); 2149 bswaptls(&sym->st_size); 2150 bswap16s(&sym->st_shndx); 2151 } 2152 2153 #ifdef TARGET_MIPS 2154 static void bswap_mips_abiflags(Mips_elf_abiflags_v0 *abiflags) 2155 { 2156 bswap16s(&abiflags->version); 2157 bswap32s(&abiflags->ases); 2158 bswap32s(&abiflags->isa_ext); 2159 bswap32s(&abiflags->flags1); 2160 bswap32s(&abiflags->flags2); 2161 } 2162 #endif 2163 #else 2164 static inline void bswap_ehdr(struct elfhdr *ehdr) { } 2165 static inline void bswap_phdr(struct elf_phdr *phdr, int phnum) { } 2166 static inline void bswap_shdr(struct elf_shdr *shdr, int shnum) { } 2167 static inline void bswap_sym(struct elf_sym *sym) { } 2168 #ifdef TARGET_MIPS 2169 static inline void bswap_mips_abiflags(Mips_elf_abiflags_v0 *abiflags) { } 2170 #endif 2171 #endif 2172 2173 #ifdef USE_ELF_CORE_DUMP 2174 static int elf_core_dump(int, const CPUArchState *); 2175 #endif /* USE_ELF_CORE_DUMP */ 2176 static void load_symbols(struct elfhdr *hdr, const ImageSource *src, 2177 abi_ulong load_bias); 2178 2179 /* Verify the portions of EHDR within E_IDENT for the target. 2180 This can be performed before bswapping the entire header. */ 2181 static bool elf_check_ident(struct elfhdr *ehdr) 2182 { 2183 return (ehdr->e_ident[EI_MAG0] == ELFMAG0 2184 && ehdr->e_ident[EI_MAG1] == ELFMAG1 2185 && ehdr->e_ident[EI_MAG2] == ELFMAG2 2186 && ehdr->e_ident[EI_MAG3] == ELFMAG3 2187 && ehdr->e_ident[EI_CLASS] == ELF_CLASS 2188 && ehdr->e_ident[EI_DATA] == ELF_DATA 2189 && ehdr->e_ident[EI_VERSION] == EV_CURRENT); 2190 } 2191 2192 /* Verify the portions of EHDR outside of E_IDENT for the target. 2193 This has to wait until after bswapping the header. */ 2194 static bool elf_check_ehdr(struct elfhdr *ehdr) 2195 { 2196 return (elf_check_arch(ehdr->e_machine) 2197 && elf_check_abi(ehdr->e_flags) 2198 && ehdr->e_ehsize == sizeof(struct elfhdr) 2199 && ehdr->e_phentsize == sizeof(struct elf_phdr) 2200 && (ehdr->e_type == ET_EXEC || ehdr->e_type == ET_DYN)); 2201 } 2202 2203 /* 2204 * 'copy_elf_strings()' copies argument/envelope strings from user 2205 * memory to free pages in kernel mem. These are in a format ready 2206 * to be put directly into the top of new user memory. 2207 * 2208 */ 2209 static abi_ulong copy_elf_strings(int argc, char **argv, char *scratch, 2210 abi_ulong p, abi_ulong stack_limit) 2211 { 2212 char *tmp; 2213 int len, i; 2214 abi_ulong top = p; 2215 2216 if (!p) { 2217 return 0; /* bullet-proofing */ 2218 } 2219 2220 if (STACK_GROWS_DOWN) { 2221 int offset = ((p - 1) % TARGET_PAGE_SIZE) + 1; 2222 for (i = argc - 1; i >= 0; --i) { 2223 tmp = argv[i]; 2224 if (!tmp) { 2225 fprintf(stderr, "VFS: argc is wrong"); 2226 exit(-1); 2227 } 2228 len = strlen(tmp) + 1; 2229 tmp += len; 2230 2231 if (len > (p - stack_limit)) { 2232 return 0; 2233 } 2234 while (len) { 2235 int bytes_to_copy = (len > offset) ? offset : len; 2236 tmp -= bytes_to_copy; 2237 p -= bytes_to_copy; 2238 offset -= bytes_to_copy; 2239 len -= bytes_to_copy; 2240 2241 memcpy_fromfs(scratch + offset, tmp, bytes_to_copy); 2242 2243 if (offset == 0) { 2244 memcpy_to_target(p, scratch, top - p); 2245 top = p; 2246 offset = TARGET_PAGE_SIZE; 2247 } 2248 } 2249 } 2250 if (p != top) { 2251 memcpy_to_target(p, scratch + offset, top - p); 2252 } 2253 } else { 2254 int remaining = TARGET_PAGE_SIZE - (p % TARGET_PAGE_SIZE); 2255 for (i = 0; i < argc; ++i) { 2256 tmp = argv[i]; 2257 if (!tmp) { 2258 fprintf(stderr, "VFS: argc is wrong"); 2259 exit(-1); 2260 } 2261 len = strlen(tmp) + 1; 2262 if (len > (stack_limit - p)) { 2263 return 0; 2264 } 2265 while (len) { 2266 int bytes_to_copy = (len > remaining) ? remaining : len; 2267 2268 memcpy_fromfs(scratch + (p - top), tmp, bytes_to_copy); 2269 2270 tmp += bytes_to_copy; 2271 remaining -= bytes_to_copy; 2272 p += bytes_to_copy; 2273 len -= bytes_to_copy; 2274 2275 if (remaining == 0) { 2276 memcpy_to_target(top, scratch, p - top); 2277 top = p; 2278 remaining = TARGET_PAGE_SIZE; 2279 } 2280 } 2281 } 2282 if (p != top) { 2283 memcpy_to_target(top, scratch, p - top); 2284 } 2285 } 2286 2287 return p; 2288 } 2289 2290 /* Older linux kernels provide up to MAX_ARG_PAGES (default: 32) of 2291 * argument/environment space. Newer kernels (>2.6.33) allow more, 2292 * dependent on stack size, but guarantee at least 32 pages for 2293 * backwards compatibility. 2294 */ 2295 #define STACK_LOWER_LIMIT (32 * TARGET_PAGE_SIZE) 2296 2297 static abi_ulong setup_arg_pages(struct linux_binprm *bprm, 2298 struct image_info *info) 2299 { 2300 abi_ulong size, error, guard; 2301 int prot; 2302 2303 size = guest_stack_size; 2304 if (size < STACK_LOWER_LIMIT) { 2305 size = STACK_LOWER_LIMIT; 2306 } 2307 2308 if (STACK_GROWS_DOWN) { 2309 guard = TARGET_PAGE_SIZE; 2310 if (guard < qemu_real_host_page_size()) { 2311 guard = qemu_real_host_page_size(); 2312 } 2313 } else { 2314 /* no guard page for hppa target where stack grows upwards. */ 2315 guard = 0; 2316 } 2317 2318 prot = PROT_READ | PROT_WRITE; 2319 if (info->exec_stack) { 2320 prot |= PROT_EXEC; 2321 } 2322 error = target_mmap(0, size + guard, prot, 2323 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 2324 if (error == -1) { 2325 perror("mmap stack"); 2326 exit(-1); 2327 } 2328 2329 /* We reserve one extra page at the top of the stack as guard. */ 2330 if (STACK_GROWS_DOWN) { 2331 target_mprotect(error, guard, PROT_NONE); 2332 info->stack_limit = error + guard; 2333 return info->stack_limit + size - sizeof(void *); 2334 } else { 2335 info->stack_limit = error + size; 2336 return error; 2337 } 2338 } 2339 2340 /** 2341 * zero_bss: 2342 * 2343 * Map and zero the bss. We need to explicitly zero any fractional pages 2344 * after the data section (i.e. bss). Return false on mapping failure. 2345 */ 2346 static bool zero_bss(abi_ulong start_bss, abi_ulong end_bss, 2347 int prot, Error **errp) 2348 { 2349 abi_ulong align_bss; 2350 2351 /* We only expect writable bss; the code segment shouldn't need this. */ 2352 if (!(prot & PROT_WRITE)) { 2353 error_setg(errp, "PT_LOAD with non-writable bss"); 2354 return false; 2355 } 2356 2357 align_bss = TARGET_PAGE_ALIGN(start_bss); 2358 end_bss = TARGET_PAGE_ALIGN(end_bss); 2359 2360 if (start_bss < align_bss) { 2361 int flags = page_get_flags(start_bss); 2362 2363 if (!(flags & PAGE_BITS)) { 2364 /* 2365 * The whole address space of the executable was reserved 2366 * at the start, therefore all pages will be VALID. 2367 * But assuming there are no PROT_NONE PT_LOAD segments, 2368 * a PROT_NONE page means no data all bss, and we can 2369 * simply extend the new anon mapping back to the start 2370 * of the page of bss. 2371 */ 2372 align_bss -= TARGET_PAGE_SIZE; 2373 } else { 2374 /* 2375 * The start of the bss shares a page with something. 2376 * The only thing that we expect is the data section, 2377 * which would already be marked writable. 2378 * Overlapping the RX code segment seems malformed. 2379 */ 2380 if (!(flags & PAGE_WRITE)) { 2381 error_setg(errp, "PT_LOAD with bss overlapping " 2382 "non-writable page"); 2383 return false; 2384 } 2385 2386 /* The page is already mapped and writable. */ 2387 memset(g2h_untagged(start_bss), 0, align_bss - start_bss); 2388 } 2389 } 2390 2391 if (align_bss < end_bss && 2392 target_mmap(align_bss, end_bss - align_bss, prot, 2393 MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0) == -1) { 2394 error_setg_errno(errp, errno, "Error mapping bss"); 2395 return false; 2396 } 2397 return true; 2398 } 2399 2400 #if defined(TARGET_ARM) 2401 static int elf_is_fdpic(struct elfhdr *exec) 2402 { 2403 return exec->e_ident[EI_OSABI] == ELFOSABI_ARM_FDPIC; 2404 } 2405 #elif defined(TARGET_XTENSA) 2406 static int elf_is_fdpic(struct elfhdr *exec) 2407 { 2408 return exec->e_ident[EI_OSABI] == ELFOSABI_XTENSA_FDPIC; 2409 } 2410 #else 2411 /* Default implementation, always false. */ 2412 static int elf_is_fdpic(struct elfhdr *exec) 2413 { 2414 return 0; 2415 } 2416 #endif 2417 2418 static abi_ulong loader_build_fdpic_loadmap(struct image_info *info, abi_ulong sp) 2419 { 2420 uint16_t n; 2421 struct elf32_fdpic_loadseg *loadsegs = info->loadsegs; 2422 2423 /* elf32_fdpic_loadseg */ 2424 n = info->nsegs; 2425 while (n--) { 2426 sp -= 12; 2427 put_user_u32(loadsegs[n].addr, sp+0); 2428 put_user_u32(loadsegs[n].p_vaddr, sp+4); 2429 put_user_u32(loadsegs[n].p_memsz, sp+8); 2430 } 2431 2432 /* elf32_fdpic_loadmap */ 2433 sp -= 4; 2434 put_user_u16(0, sp+0); /* version */ 2435 put_user_u16(info->nsegs, sp+2); /* nsegs */ 2436 2437 info->personality = PER_LINUX_FDPIC; 2438 info->loadmap_addr = sp; 2439 2440 return sp; 2441 } 2442 2443 static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc, 2444 struct elfhdr *exec, 2445 struct image_info *info, 2446 struct image_info *interp_info, 2447 struct image_info *vdso_info) 2448 { 2449 abi_ulong sp; 2450 abi_ulong u_argc, u_argv, u_envp, u_auxv; 2451 int size; 2452 int i; 2453 abi_ulong u_rand_bytes; 2454 uint8_t k_rand_bytes[16]; 2455 abi_ulong u_platform, u_base_platform; 2456 const char *k_platform, *k_base_platform; 2457 const int n = sizeof(elf_addr_t); 2458 2459 sp = p; 2460 2461 /* Needs to be before we load the env/argc/... */ 2462 if (elf_is_fdpic(exec)) { 2463 /* Need 4 byte alignment for these structs */ 2464 sp &= ~3; 2465 sp = loader_build_fdpic_loadmap(info, sp); 2466 info->other_info = interp_info; 2467 if (interp_info) { 2468 interp_info->other_info = info; 2469 sp = loader_build_fdpic_loadmap(interp_info, sp); 2470 info->interpreter_loadmap_addr = interp_info->loadmap_addr; 2471 info->interpreter_pt_dynamic_addr = interp_info->pt_dynamic_addr; 2472 } else { 2473 info->interpreter_loadmap_addr = 0; 2474 info->interpreter_pt_dynamic_addr = 0; 2475 } 2476 } 2477 2478 u_base_platform = 0; 2479 k_base_platform = ELF_BASE_PLATFORM; 2480 if (k_base_platform) { 2481 size_t len = strlen(k_base_platform) + 1; 2482 if (STACK_GROWS_DOWN) { 2483 sp -= (len + n - 1) & ~(n - 1); 2484 u_base_platform = sp; 2485 /* FIXME - check return value of memcpy_to_target() for failure */ 2486 memcpy_to_target(sp, k_base_platform, len); 2487 } else { 2488 memcpy_to_target(sp, k_base_platform, len); 2489 u_base_platform = sp; 2490 sp += len + 1; 2491 } 2492 } 2493 2494 u_platform = 0; 2495 k_platform = ELF_PLATFORM; 2496 if (k_platform) { 2497 size_t len = strlen(k_platform) + 1; 2498 if (STACK_GROWS_DOWN) { 2499 sp -= (len + n - 1) & ~(n - 1); 2500 u_platform = sp; 2501 /* FIXME - check return value of memcpy_to_target() for failure */ 2502 memcpy_to_target(sp, k_platform, len); 2503 } else { 2504 memcpy_to_target(sp, k_platform, len); 2505 u_platform = sp; 2506 sp += len + 1; 2507 } 2508 } 2509 2510 /* Provide 16 byte alignment for the PRNG, and basic alignment for 2511 * the argv and envp pointers. 2512 */ 2513 if (STACK_GROWS_DOWN) { 2514 sp = QEMU_ALIGN_DOWN(sp, 16); 2515 } else { 2516 sp = QEMU_ALIGN_UP(sp, 16); 2517 } 2518 2519 /* 2520 * Generate 16 random bytes for userspace PRNG seeding. 2521 */ 2522 qemu_guest_getrandom_nofail(k_rand_bytes, sizeof(k_rand_bytes)); 2523 if (STACK_GROWS_DOWN) { 2524 sp -= 16; 2525 u_rand_bytes = sp; 2526 /* FIXME - check return value of memcpy_to_target() for failure */ 2527 memcpy_to_target(sp, k_rand_bytes, 16); 2528 } else { 2529 memcpy_to_target(sp, k_rand_bytes, 16); 2530 u_rand_bytes = sp; 2531 sp += 16; 2532 } 2533 2534 size = (DLINFO_ITEMS + 1) * 2; 2535 if (k_base_platform) { 2536 size += 2; 2537 } 2538 if (k_platform) { 2539 size += 2; 2540 } 2541 if (vdso_info) { 2542 size += 2; 2543 } 2544 #ifdef DLINFO_ARCH_ITEMS 2545 size += DLINFO_ARCH_ITEMS * 2; 2546 #endif 2547 #ifdef ELF_HWCAP2 2548 size += 2; 2549 #endif 2550 info->auxv_len = size * n; 2551 2552 size += envc + argc + 2; 2553 size += 1; /* argc itself */ 2554 size *= n; 2555 2556 /* Allocate space and finalize stack alignment for entry now. */ 2557 if (STACK_GROWS_DOWN) { 2558 u_argc = QEMU_ALIGN_DOWN(sp - size, STACK_ALIGNMENT); 2559 sp = u_argc; 2560 } else { 2561 u_argc = sp; 2562 sp = QEMU_ALIGN_UP(sp + size, STACK_ALIGNMENT); 2563 } 2564 2565 u_argv = u_argc + n; 2566 u_envp = u_argv + (argc + 1) * n; 2567 u_auxv = u_envp + (envc + 1) * n; 2568 info->saved_auxv = u_auxv; 2569 info->argc = argc; 2570 info->envc = envc; 2571 info->argv = u_argv; 2572 info->envp = u_envp; 2573 2574 /* This is correct because Linux defines 2575 * elf_addr_t as Elf32_Off / Elf64_Off 2576 */ 2577 #define NEW_AUX_ENT(id, val) do { \ 2578 put_user_ual(id, u_auxv); u_auxv += n; \ 2579 put_user_ual(val, u_auxv); u_auxv += n; \ 2580 } while(0) 2581 2582 #ifdef ARCH_DLINFO 2583 /* 2584 * ARCH_DLINFO must come first so platform specific code can enforce 2585 * special alignment requirements on the AUXV if necessary (eg. PPC). 2586 */ 2587 ARCH_DLINFO; 2588 #endif 2589 /* There must be exactly DLINFO_ITEMS entries here, or the assert 2590 * on info->auxv_len will trigger. 2591 */ 2592 NEW_AUX_ENT(AT_PHDR, (abi_ulong)(info->load_addr + exec->e_phoff)); 2593 NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof (struct elf_phdr))); 2594 NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum)); 2595 NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE)); 2596 NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_info ? interp_info->load_addr : 0)); 2597 NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0); 2598 NEW_AUX_ENT(AT_ENTRY, info->entry); 2599 NEW_AUX_ENT(AT_UID, (abi_ulong) getuid()); 2600 NEW_AUX_ENT(AT_EUID, (abi_ulong) geteuid()); 2601 NEW_AUX_ENT(AT_GID, (abi_ulong) getgid()); 2602 NEW_AUX_ENT(AT_EGID, (abi_ulong) getegid()); 2603 NEW_AUX_ENT(AT_HWCAP, (abi_ulong) ELF_HWCAP); 2604 NEW_AUX_ENT(AT_CLKTCK, (abi_ulong) sysconf(_SC_CLK_TCK)); 2605 NEW_AUX_ENT(AT_RANDOM, (abi_ulong) u_rand_bytes); 2606 NEW_AUX_ENT(AT_SECURE, (abi_ulong) qemu_getauxval(AT_SECURE)); 2607 NEW_AUX_ENT(AT_EXECFN, info->file_string); 2608 2609 #ifdef ELF_HWCAP2 2610 NEW_AUX_ENT(AT_HWCAP2, (abi_ulong) ELF_HWCAP2); 2611 #endif 2612 2613 if (u_base_platform) { 2614 NEW_AUX_ENT(AT_BASE_PLATFORM, u_base_platform); 2615 } 2616 if (u_platform) { 2617 NEW_AUX_ENT(AT_PLATFORM, u_platform); 2618 } 2619 if (vdso_info) { 2620 NEW_AUX_ENT(AT_SYSINFO_EHDR, vdso_info->load_addr); 2621 } 2622 NEW_AUX_ENT (AT_NULL, 0); 2623 #undef NEW_AUX_ENT 2624 2625 /* Check that our initial calculation of the auxv length matches how much 2626 * we actually put into it. 2627 */ 2628 assert(info->auxv_len == u_auxv - info->saved_auxv); 2629 2630 put_user_ual(argc, u_argc); 2631 2632 p = info->arg_strings; 2633 for (i = 0; i < argc; ++i) { 2634 put_user_ual(p, u_argv); 2635 u_argv += n; 2636 p += target_strlen(p) + 1; 2637 } 2638 put_user_ual(0, u_argv); 2639 2640 p = info->env_strings; 2641 for (i = 0; i < envc; ++i) { 2642 put_user_ual(p, u_envp); 2643 u_envp += n; 2644 p += target_strlen(p) + 1; 2645 } 2646 put_user_ual(0, u_envp); 2647 2648 return sp; 2649 } 2650 2651 #if defined(HI_COMMPAGE) 2652 #define LO_COMMPAGE -1 2653 #elif defined(LO_COMMPAGE) 2654 #define HI_COMMPAGE 0 2655 #else 2656 #define HI_COMMPAGE 0 2657 #define LO_COMMPAGE -1 2658 #ifndef INIT_GUEST_COMMPAGE 2659 #define init_guest_commpage() true 2660 #endif 2661 #endif 2662 2663 /** 2664 * pgb_try_mmap: 2665 * @addr: host start address 2666 * @addr_last: host last address 2667 * @keep: do not unmap the probe region 2668 * 2669 * Return 1 if [@addr, @addr_last] is not mapped in the host, 2670 * return 0 if it is not available to map, and -1 on mmap error. 2671 * If @keep, the region is left mapped on success, otherwise unmapped. 2672 */ 2673 static int pgb_try_mmap(uintptr_t addr, uintptr_t addr_last, bool keep) 2674 { 2675 size_t size = addr_last - addr + 1; 2676 void *p = mmap((void *)addr, size, PROT_NONE, 2677 MAP_ANONYMOUS | MAP_PRIVATE | 2678 MAP_NORESERVE | MAP_FIXED_NOREPLACE, -1, 0); 2679 int ret; 2680 2681 if (p == MAP_FAILED) { 2682 return errno == EEXIST ? 0 : -1; 2683 } 2684 ret = p == (void *)addr; 2685 if (!keep || !ret) { 2686 munmap(p, size); 2687 } 2688 return ret; 2689 } 2690 2691 /** 2692 * pgb_try_mmap_skip_brk(uintptr_t addr, uintptr_t size, uintptr_t brk) 2693 * @addr: host address 2694 * @addr_last: host last address 2695 * @brk: host brk 2696 * 2697 * Like pgb_try_mmap, but additionally reserve some memory following brk. 2698 */ 2699 static int pgb_try_mmap_skip_brk(uintptr_t addr, uintptr_t addr_last, 2700 uintptr_t brk, bool keep) 2701 { 2702 uintptr_t brk_last = brk + 16 * MiB - 1; 2703 2704 /* Do not map anything close to the host brk. */ 2705 if (addr <= brk_last && brk <= addr_last) { 2706 return 0; 2707 } 2708 return pgb_try_mmap(addr, addr_last, keep); 2709 } 2710 2711 /** 2712 * pgb_try_mmap_set: 2713 * @ga: set of guest addrs 2714 * @base: guest_base 2715 * @brk: host brk 2716 * 2717 * Return true if all @ga can be mapped by the host at @base. 2718 * On success, retain the mapping at index 0 for reserved_va. 2719 */ 2720 2721 typedef struct PGBAddrs { 2722 uintptr_t bounds[3][2]; /* start/last pairs */ 2723 int nbounds; 2724 } PGBAddrs; 2725 2726 static bool pgb_try_mmap_set(const PGBAddrs *ga, uintptr_t base, uintptr_t brk) 2727 { 2728 for (int i = ga->nbounds - 1; i >= 0; --i) { 2729 if (pgb_try_mmap_skip_brk(ga->bounds[i][0] + base, 2730 ga->bounds[i][1] + base, 2731 brk, i == 0 && reserved_va) <= 0) { 2732 return false; 2733 } 2734 } 2735 return true; 2736 } 2737 2738 /** 2739 * pgb_addr_set: 2740 * @ga: output set of guest addrs 2741 * @guest_loaddr: guest image low address 2742 * @guest_loaddr: guest image high address 2743 * @identity: create for identity mapping 2744 * 2745 * Fill in @ga with the image, COMMPAGE and NULL page. 2746 */ 2747 static bool pgb_addr_set(PGBAddrs *ga, abi_ulong guest_loaddr, 2748 abi_ulong guest_hiaddr, bool try_identity) 2749 { 2750 int n; 2751 2752 /* 2753 * With a low commpage, or a guest mapped very low, 2754 * we may not be able to use the identity map. 2755 */ 2756 if (try_identity) { 2757 if (LO_COMMPAGE != -1 && LO_COMMPAGE < mmap_min_addr) { 2758 return false; 2759 } 2760 if (guest_loaddr != 0 && guest_loaddr < mmap_min_addr) { 2761 return false; 2762 } 2763 } 2764 2765 memset(ga, 0, sizeof(*ga)); 2766 n = 0; 2767 2768 if (reserved_va) { 2769 ga->bounds[n][0] = try_identity ? mmap_min_addr : 0; 2770 ga->bounds[n][1] = reserved_va; 2771 n++; 2772 /* LO_COMMPAGE and NULL handled by reserving from 0. */ 2773 } else { 2774 /* Add any LO_COMMPAGE or NULL page. */ 2775 if (LO_COMMPAGE != -1) { 2776 ga->bounds[n][0] = 0; 2777 ga->bounds[n][1] = LO_COMMPAGE + TARGET_PAGE_SIZE - 1; 2778 n++; 2779 } else if (!try_identity) { 2780 ga->bounds[n][0] = 0; 2781 ga->bounds[n][1] = TARGET_PAGE_SIZE - 1; 2782 n++; 2783 } 2784 2785 /* Add the guest image for ET_EXEC. */ 2786 if (guest_loaddr) { 2787 ga->bounds[n][0] = guest_loaddr; 2788 ga->bounds[n][1] = guest_hiaddr; 2789 n++; 2790 } 2791 } 2792 2793 /* 2794 * Temporarily disable 2795 * "comparison is always false due to limited range of data type" 2796 * due to comparison between unsigned and (possible) 0. 2797 */ 2798 #pragma GCC diagnostic push 2799 #pragma GCC diagnostic ignored "-Wtype-limits" 2800 2801 /* Add any HI_COMMPAGE not covered by reserved_va. */ 2802 if (reserved_va < HI_COMMPAGE) { 2803 ga->bounds[n][0] = HI_COMMPAGE & qemu_real_host_page_mask(); 2804 ga->bounds[n][1] = HI_COMMPAGE + TARGET_PAGE_SIZE - 1; 2805 n++; 2806 } 2807 2808 #pragma GCC diagnostic pop 2809 2810 ga->nbounds = n; 2811 return true; 2812 } 2813 2814 static void pgb_fail_in_use(const char *image_name) 2815 { 2816 error_report("%s: requires virtual address space that is in use " 2817 "(omit the -B option or choose a different value)", 2818 image_name); 2819 exit(EXIT_FAILURE); 2820 } 2821 2822 static void pgb_fixed(const char *image_name, uintptr_t guest_loaddr, 2823 uintptr_t guest_hiaddr, uintptr_t align) 2824 { 2825 PGBAddrs ga; 2826 uintptr_t brk = (uintptr_t)sbrk(0); 2827 2828 if (!QEMU_IS_ALIGNED(guest_base, align)) { 2829 fprintf(stderr, "Requested guest base %p does not satisfy " 2830 "host minimum alignment (0x%" PRIxPTR ")\n", 2831 (void *)guest_base, align); 2832 exit(EXIT_FAILURE); 2833 } 2834 2835 if (!pgb_addr_set(&ga, guest_loaddr, guest_hiaddr, !guest_base) 2836 || !pgb_try_mmap_set(&ga, guest_base, brk)) { 2837 pgb_fail_in_use(image_name); 2838 } 2839 } 2840 2841 /** 2842 * pgb_find_fallback: 2843 * 2844 * This is a fallback method for finding holes in the host address space 2845 * if we don't have the benefit of being able to access /proc/self/map. 2846 * It can potentially take a very long time as we can only dumbly iterate 2847 * up the host address space seeing if the allocation would work. 2848 */ 2849 static uintptr_t pgb_find_fallback(const PGBAddrs *ga, uintptr_t align, 2850 uintptr_t brk) 2851 { 2852 /* TODO: come up with a better estimate of how much to skip. */ 2853 uintptr_t skip = sizeof(uintptr_t) == 4 ? MiB : GiB; 2854 2855 for (uintptr_t base = skip; ; base += skip) { 2856 base = ROUND_UP(base, align); 2857 if (pgb_try_mmap_set(ga, base, brk)) { 2858 return base; 2859 } 2860 if (base >= -skip) { 2861 return -1; 2862 } 2863 } 2864 } 2865 2866 static uintptr_t pgb_try_itree(const PGBAddrs *ga, uintptr_t base, 2867 IntervalTreeRoot *root) 2868 { 2869 for (int i = ga->nbounds - 1; i >= 0; --i) { 2870 uintptr_t s = base + ga->bounds[i][0]; 2871 uintptr_t l = base + ga->bounds[i][1]; 2872 IntervalTreeNode *n; 2873 2874 if (l < s) { 2875 /* Wraparound. Skip to advance S to mmap_min_addr. */ 2876 return mmap_min_addr - s; 2877 } 2878 2879 n = interval_tree_iter_first(root, s, l); 2880 if (n != NULL) { 2881 /* Conflict. Skip to advance S to LAST + 1. */ 2882 return n->last - s + 1; 2883 } 2884 } 2885 return 0; /* success */ 2886 } 2887 2888 static uintptr_t pgb_find_itree(const PGBAddrs *ga, IntervalTreeRoot *root, 2889 uintptr_t align, uintptr_t brk) 2890 { 2891 uintptr_t last = mmap_min_addr; 2892 uintptr_t base, skip; 2893 2894 while (true) { 2895 base = ROUND_UP(last, align); 2896 if (base < last) { 2897 return -1; 2898 } 2899 2900 skip = pgb_try_itree(ga, base, root); 2901 if (skip == 0) { 2902 break; 2903 } 2904 2905 last = base + skip; 2906 if (last < base) { 2907 return -1; 2908 } 2909 } 2910 2911 /* 2912 * We've chosen 'base' based on holes in the interval tree, 2913 * but we don't yet know if it is a valid host address. 2914 * Because it is the first matching hole, if the host addresses 2915 * are invalid we know there are no further matches. 2916 */ 2917 return pgb_try_mmap_set(ga, base, brk) ? base : -1; 2918 } 2919 2920 static void pgb_dynamic(const char *image_name, uintptr_t guest_loaddr, 2921 uintptr_t guest_hiaddr, uintptr_t align) 2922 { 2923 IntervalTreeRoot *root; 2924 uintptr_t brk, ret; 2925 PGBAddrs ga; 2926 2927 /* Try the identity map first. */ 2928 if (pgb_addr_set(&ga, guest_loaddr, guest_hiaddr, true)) { 2929 brk = (uintptr_t)sbrk(0); 2930 if (pgb_try_mmap_set(&ga, 0, brk)) { 2931 guest_base = 0; 2932 return; 2933 } 2934 } 2935 2936 /* 2937 * Rebuild the address set for non-identity map. 2938 * This differs in the mapping of the guest NULL page. 2939 */ 2940 pgb_addr_set(&ga, guest_loaddr, guest_hiaddr, false); 2941 2942 root = read_self_maps(); 2943 2944 /* Read brk after we've read the maps, which will malloc. */ 2945 brk = (uintptr_t)sbrk(0); 2946 2947 if (!root) { 2948 ret = pgb_find_fallback(&ga, align, brk); 2949 } else { 2950 /* 2951 * Reserve the area close to the host brk. 2952 * This will be freed with the rest of the tree. 2953 */ 2954 IntervalTreeNode *b = g_new0(IntervalTreeNode, 1); 2955 b->start = brk; 2956 b->last = brk + 16 * MiB - 1; 2957 interval_tree_insert(b, root); 2958 2959 ret = pgb_find_itree(&ga, root, align, brk); 2960 free_self_maps(root); 2961 } 2962 2963 if (ret == -1) { 2964 int w = TARGET_LONG_BITS / 4; 2965 2966 error_report("%s: Unable to find a guest_base to satisfy all " 2967 "guest address mapping requirements", image_name); 2968 2969 for (int i = 0; i < ga.nbounds; ++i) { 2970 error_printf(" %0*" PRIx64 "-%0*" PRIx64 "\n", 2971 w, (uint64_t)ga.bounds[i][0], 2972 w, (uint64_t)ga.bounds[i][1]); 2973 } 2974 exit(EXIT_FAILURE); 2975 } 2976 guest_base = ret; 2977 } 2978 2979 void probe_guest_base(const char *image_name, abi_ulong guest_loaddr, 2980 abi_ulong guest_hiaddr) 2981 { 2982 /* In order to use host shmat, we must be able to honor SHMLBA. */ 2983 uintptr_t align = MAX(SHMLBA, TARGET_PAGE_SIZE); 2984 2985 /* Sanity check the guest binary. */ 2986 if (reserved_va) { 2987 if (guest_hiaddr > reserved_va) { 2988 error_report("%s: requires more than reserved virtual " 2989 "address space (0x%" PRIx64 " > 0x%lx)", 2990 image_name, (uint64_t)guest_hiaddr, reserved_va); 2991 exit(EXIT_FAILURE); 2992 } 2993 } else { 2994 if (guest_hiaddr != (uintptr_t)guest_hiaddr) { 2995 error_report("%s: requires more virtual address space " 2996 "than the host can provide (0x%" PRIx64 ")", 2997 image_name, (uint64_t)guest_hiaddr + 1); 2998 exit(EXIT_FAILURE); 2999 } 3000 } 3001 3002 if (have_guest_base) { 3003 pgb_fixed(image_name, guest_loaddr, guest_hiaddr, align); 3004 } else { 3005 pgb_dynamic(image_name, guest_loaddr, guest_hiaddr, align); 3006 } 3007 3008 /* Reserve and initialize the commpage. */ 3009 if (!init_guest_commpage()) { 3010 /* We have already probed for the commpage being free. */ 3011 g_assert_not_reached(); 3012 } 3013 3014 assert(QEMU_IS_ALIGNED(guest_base, align)); 3015 qemu_log_mask(CPU_LOG_PAGE, "Locating guest address space " 3016 "@ 0x%" PRIx64 "\n", (uint64_t)guest_base); 3017 } 3018 3019 enum { 3020 /* The string "GNU\0" as a magic number. */ 3021 GNU0_MAGIC = const_le32('G' | 'N' << 8 | 'U' << 16), 3022 NOTE_DATA_SZ = 1 * KiB, 3023 NOTE_NAME_SZ = 4, 3024 ELF_GNU_PROPERTY_ALIGN = ELF_CLASS == ELFCLASS32 ? 4 : 8, 3025 }; 3026 3027 /* 3028 * Process a single gnu_property entry. 3029 * Return false for error. 3030 */ 3031 static bool parse_elf_property(const uint32_t *data, int *off, int datasz, 3032 struct image_info *info, bool have_prev_type, 3033 uint32_t *prev_type, Error **errp) 3034 { 3035 uint32_t pr_type, pr_datasz, step; 3036 3037 if (*off > datasz || !QEMU_IS_ALIGNED(*off, ELF_GNU_PROPERTY_ALIGN)) { 3038 goto error_data; 3039 } 3040 datasz -= *off; 3041 data += *off / sizeof(uint32_t); 3042 3043 if (datasz < 2 * sizeof(uint32_t)) { 3044 goto error_data; 3045 } 3046 pr_type = data[0]; 3047 pr_datasz = data[1]; 3048 data += 2; 3049 datasz -= 2 * sizeof(uint32_t); 3050 step = ROUND_UP(pr_datasz, ELF_GNU_PROPERTY_ALIGN); 3051 if (step > datasz) { 3052 goto error_data; 3053 } 3054 3055 /* Properties are supposed to be unique and sorted on pr_type. */ 3056 if (have_prev_type && pr_type <= *prev_type) { 3057 if (pr_type == *prev_type) { 3058 error_setg(errp, "Duplicate property in PT_GNU_PROPERTY"); 3059 } else { 3060 error_setg(errp, "Unsorted property in PT_GNU_PROPERTY"); 3061 } 3062 return false; 3063 } 3064 *prev_type = pr_type; 3065 3066 if (!arch_parse_elf_property(pr_type, pr_datasz, data, info, errp)) { 3067 return false; 3068 } 3069 3070 *off += 2 * sizeof(uint32_t) + step; 3071 return true; 3072 3073 error_data: 3074 error_setg(errp, "Ill-formed property in PT_GNU_PROPERTY"); 3075 return false; 3076 } 3077 3078 /* Process NT_GNU_PROPERTY_TYPE_0. */ 3079 static bool parse_elf_properties(const ImageSource *src, 3080 struct image_info *info, 3081 const struct elf_phdr *phdr, 3082 Error **errp) 3083 { 3084 union { 3085 struct elf_note nhdr; 3086 uint32_t data[NOTE_DATA_SZ / sizeof(uint32_t)]; 3087 } note; 3088 3089 int n, off, datasz; 3090 bool have_prev_type; 3091 uint32_t prev_type; 3092 3093 /* Unless the arch requires properties, ignore them. */ 3094 if (!ARCH_USE_GNU_PROPERTY) { 3095 return true; 3096 } 3097 3098 /* If the properties are crazy large, that's too bad. */ 3099 n = phdr->p_filesz; 3100 if (n > sizeof(note)) { 3101 error_setg(errp, "PT_GNU_PROPERTY too large"); 3102 return false; 3103 } 3104 if (n < sizeof(note.nhdr)) { 3105 error_setg(errp, "PT_GNU_PROPERTY too small"); 3106 return false; 3107 } 3108 3109 if (!imgsrc_read(¬e, phdr->p_offset, n, src, errp)) { 3110 return false; 3111 } 3112 3113 /* 3114 * The contents of a valid PT_GNU_PROPERTY is a sequence 3115 * of uint32_t -- swap them all now. 3116 */ 3117 #ifdef BSWAP_NEEDED 3118 for (int i = 0; i < n / 4; i++) { 3119 bswap32s(note.data + i); 3120 } 3121 #endif 3122 3123 /* 3124 * Note that nhdr is 3 words, and that the "name" described by namesz 3125 * immediately follows nhdr and is thus at the 4th word. Further, all 3126 * of the inputs to the kernel's round_up are multiples of 4. 3127 */ 3128 if (note.nhdr.n_type != NT_GNU_PROPERTY_TYPE_0 || 3129 note.nhdr.n_namesz != NOTE_NAME_SZ || 3130 note.data[3] != GNU0_MAGIC) { 3131 error_setg(errp, "Invalid note in PT_GNU_PROPERTY"); 3132 return false; 3133 } 3134 off = sizeof(note.nhdr) + NOTE_NAME_SZ; 3135 3136 datasz = note.nhdr.n_descsz + off; 3137 if (datasz > n) { 3138 error_setg(errp, "Invalid note size in PT_GNU_PROPERTY"); 3139 return false; 3140 } 3141 3142 have_prev_type = false; 3143 prev_type = 0; 3144 while (1) { 3145 if (off == datasz) { 3146 return true; /* end, exit ok */ 3147 } 3148 if (!parse_elf_property(note.data, &off, datasz, info, 3149 have_prev_type, &prev_type, errp)) { 3150 return false; 3151 } 3152 have_prev_type = true; 3153 } 3154 } 3155 3156 /** 3157 * load_elf_image: Load an ELF image into the address space. 3158 * @image_name: the filename of the image, to use in error messages. 3159 * @src: the ImageSource from which to read. 3160 * @info: info collected from the loaded image. 3161 * @ehdr: the ELF header, not yet bswapped. 3162 * @pinterp_name: record any PT_INTERP string found. 3163 * 3164 * On return: @info values will be filled in, as necessary or available. 3165 */ 3166 3167 static void load_elf_image(const char *image_name, const ImageSource *src, 3168 struct image_info *info, struct elfhdr *ehdr, 3169 char **pinterp_name) 3170 { 3171 g_autofree struct elf_phdr *phdr = NULL; 3172 abi_ulong load_addr, load_bias, loaddr, hiaddr, error; 3173 int i, prot_exec; 3174 Error *err = NULL; 3175 3176 /* 3177 * First of all, some simple consistency checks. 3178 * Note that we rely on the bswapped ehdr staying in bprm_buf, 3179 * for later use by load_elf_binary and create_elf_tables. 3180 */ 3181 if (!imgsrc_read(ehdr, 0, sizeof(*ehdr), src, &err)) { 3182 goto exit_errmsg; 3183 } 3184 if (!elf_check_ident(ehdr)) { 3185 error_setg(&err, "Invalid ELF image for this architecture"); 3186 goto exit_errmsg; 3187 } 3188 bswap_ehdr(ehdr); 3189 if (!elf_check_ehdr(ehdr)) { 3190 error_setg(&err, "Invalid ELF image for this architecture"); 3191 goto exit_errmsg; 3192 } 3193 3194 phdr = imgsrc_read_alloc(ehdr->e_phoff, 3195 ehdr->e_phnum * sizeof(struct elf_phdr), 3196 src, &err); 3197 if (phdr == NULL) { 3198 goto exit_errmsg; 3199 } 3200 bswap_phdr(phdr, ehdr->e_phnum); 3201 3202 info->nsegs = 0; 3203 info->pt_dynamic_addr = 0; 3204 3205 mmap_lock(); 3206 3207 /* 3208 * Find the maximum size of the image and allocate an appropriate 3209 * amount of memory to handle that. Locate the interpreter, if any. 3210 */ 3211 loaddr = -1, hiaddr = 0; 3212 info->alignment = 0; 3213 info->exec_stack = EXSTACK_DEFAULT; 3214 for (i = 0; i < ehdr->e_phnum; ++i) { 3215 struct elf_phdr *eppnt = phdr + i; 3216 if (eppnt->p_type == PT_LOAD) { 3217 abi_ulong a = eppnt->p_vaddr & TARGET_PAGE_MASK; 3218 if (a < loaddr) { 3219 loaddr = a; 3220 } 3221 a = eppnt->p_vaddr + eppnt->p_memsz - 1; 3222 if (a > hiaddr) { 3223 hiaddr = a; 3224 } 3225 ++info->nsegs; 3226 info->alignment |= eppnt->p_align; 3227 } else if (eppnt->p_type == PT_INTERP && pinterp_name) { 3228 g_autofree char *interp_name = NULL; 3229 3230 if (*pinterp_name) { 3231 error_setg(&err, "Multiple PT_INTERP entries"); 3232 goto exit_errmsg; 3233 } 3234 3235 interp_name = imgsrc_read_alloc(eppnt->p_offset, eppnt->p_filesz, 3236 src, &err); 3237 if (interp_name == NULL) { 3238 goto exit_errmsg; 3239 } 3240 if (interp_name[eppnt->p_filesz - 1] != 0) { 3241 error_setg(&err, "Invalid PT_INTERP entry"); 3242 goto exit_errmsg; 3243 } 3244 *pinterp_name = g_steal_pointer(&interp_name); 3245 } else if (eppnt->p_type == PT_GNU_PROPERTY) { 3246 if (!parse_elf_properties(src, info, eppnt, &err)) { 3247 goto exit_errmsg; 3248 } 3249 } else if (eppnt->p_type == PT_GNU_STACK) { 3250 info->exec_stack = eppnt->p_flags & PF_X; 3251 } 3252 } 3253 3254 load_addr = loaddr; 3255 3256 if (pinterp_name != NULL) { 3257 if (ehdr->e_type == ET_EXEC) { 3258 /* 3259 * Make sure that the low address does not conflict with 3260 * MMAP_MIN_ADDR or the QEMU application itself. 3261 */ 3262 probe_guest_base(image_name, loaddr, hiaddr); 3263 } else { 3264 abi_ulong align; 3265 3266 /* 3267 * The binary is dynamic, but we still need to 3268 * select guest_base. In this case we pass a size. 3269 */ 3270 probe_guest_base(image_name, 0, hiaddr - loaddr); 3271 3272 /* 3273 * Avoid collision with the loader by providing a different 3274 * default load address. 3275 */ 3276 load_addr += elf_et_dyn_base; 3277 3278 /* 3279 * TODO: Better support for mmap alignment is desirable. 3280 * Since we do not have complete control over the guest 3281 * address space, we prefer the kernel to choose some address 3282 * rather than force the use of LOAD_ADDR via MAP_FIXED. 3283 * But without MAP_FIXED we cannot guarantee alignment, 3284 * only suggest it. 3285 */ 3286 align = pow2ceil(info->alignment); 3287 if (align) { 3288 load_addr &= -align; 3289 } 3290 } 3291 } 3292 3293 /* 3294 * Reserve address space for all of this. 3295 * 3296 * In the case of ET_EXEC, we supply MAP_FIXED_NOREPLACE so that we get 3297 * exactly the address range that is required. Without reserved_va, 3298 * the guest address space is not isolated. We have attempted to avoid 3299 * conflict with the host program itself via probe_guest_base, but using 3300 * MAP_FIXED_NOREPLACE instead of MAP_FIXED provides an extra check. 3301 * 3302 * Otherwise this is ET_DYN, and we are searching for a location 3303 * that can hold the memory space required. If the image is 3304 * pre-linked, LOAD_ADDR will be non-zero, and the kernel should 3305 * honor that address if it happens to be free. 3306 * 3307 * In both cases, we will overwrite pages in this range with mappings 3308 * from the executable. 3309 */ 3310 load_addr = target_mmap(load_addr, (size_t)hiaddr - loaddr + 1, PROT_NONE, 3311 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | 3312 (ehdr->e_type == ET_EXEC ? MAP_FIXED_NOREPLACE : 0), 3313 -1, 0); 3314 if (load_addr == -1) { 3315 goto exit_mmap; 3316 } 3317 load_bias = load_addr - loaddr; 3318 3319 if (elf_is_fdpic(ehdr)) { 3320 struct elf32_fdpic_loadseg *loadsegs = info->loadsegs = 3321 g_malloc(sizeof(*loadsegs) * info->nsegs); 3322 3323 for (i = 0; i < ehdr->e_phnum; ++i) { 3324 switch (phdr[i].p_type) { 3325 case PT_DYNAMIC: 3326 info->pt_dynamic_addr = phdr[i].p_vaddr + load_bias; 3327 break; 3328 case PT_LOAD: 3329 loadsegs->addr = phdr[i].p_vaddr + load_bias; 3330 loadsegs->p_vaddr = phdr[i].p_vaddr; 3331 loadsegs->p_memsz = phdr[i].p_memsz; 3332 ++loadsegs; 3333 break; 3334 } 3335 } 3336 } 3337 3338 info->load_bias = load_bias; 3339 info->code_offset = load_bias; 3340 info->data_offset = load_bias; 3341 info->load_addr = load_addr; 3342 info->entry = ehdr->e_entry + load_bias; 3343 info->start_code = -1; 3344 info->end_code = 0; 3345 info->start_data = -1; 3346 info->end_data = 0; 3347 /* Usual start for brk is after all sections of the main executable. */ 3348 info->brk = TARGET_PAGE_ALIGN(hiaddr + load_bias); 3349 info->elf_flags = ehdr->e_flags; 3350 3351 prot_exec = PROT_EXEC; 3352 #ifdef TARGET_AARCH64 3353 /* 3354 * If the BTI feature is present, this indicates that the executable 3355 * pages of the startup binary should be mapped with PROT_BTI, so that 3356 * branch targets are enforced. 3357 * 3358 * The startup binary is either the interpreter or the static executable. 3359 * The interpreter is responsible for all pages of a dynamic executable. 3360 * 3361 * Elf notes are backward compatible to older cpus. 3362 * Do not enable BTI unless it is supported. 3363 */ 3364 if ((info->note_flags & GNU_PROPERTY_AARCH64_FEATURE_1_BTI) 3365 && (pinterp_name == NULL || *pinterp_name == 0) 3366 && cpu_isar_feature(aa64_bti, ARM_CPU(thread_cpu))) { 3367 prot_exec |= TARGET_PROT_BTI; 3368 } 3369 #endif 3370 3371 for (i = 0; i < ehdr->e_phnum; i++) { 3372 struct elf_phdr *eppnt = phdr + i; 3373 if (eppnt->p_type == PT_LOAD) { 3374 abi_ulong vaddr, vaddr_po, vaddr_ps, vaddr_ef, vaddr_em; 3375 int elf_prot = 0; 3376 3377 if (eppnt->p_flags & PF_R) { 3378 elf_prot |= PROT_READ; 3379 } 3380 if (eppnt->p_flags & PF_W) { 3381 elf_prot |= PROT_WRITE; 3382 } 3383 if (eppnt->p_flags & PF_X) { 3384 elf_prot |= prot_exec; 3385 } 3386 3387 vaddr = load_bias + eppnt->p_vaddr; 3388 vaddr_po = vaddr & ~TARGET_PAGE_MASK; 3389 vaddr_ps = vaddr & TARGET_PAGE_MASK; 3390 3391 vaddr_ef = vaddr + eppnt->p_filesz; 3392 vaddr_em = vaddr + eppnt->p_memsz; 3393 3394 /* 3395 * Some segments may be completely empty, with a non-zero p_memsz 3396 * but no backing file segment. 3397 */ 3398 if (eppnt->p_filesz != 0) { 3399 error = imgsrc_mmap(vaddr_ps, eppnt->p_filesz + vaddr_po, 3400 elf_prot, MAP_PRIVATE | MAP_FIXED, 3401 src, eppnt->p_offset - vaddr_po); 3402 if (error == -1) { 3403 goto exit_mmap; 3404 } 3405 } 3406 3407 /* If the load segment requests extra zeros (e.g. bss), map it. */ 3408 if (vaddr_ef < vaddr_em && 3409 !zero_bss(vaddr_ef, vaddr_em, elf_prot, &err)) { 3410 goto exit_errmsg; 3411 } 3412 3413 /* Find the full program boundaries. */ 3414 if (elf_prot & PROT_EXEC) { 3415 if (vaddr < info->start_code) { 3416 info->start_code = vaddr; 3417 } 3418 if (vaddr_ef > info->end_code) { 3419 info->end_code = vaddr_ef; 3420 } 3421 } 3422 if (elf_prot & PROT_WRITE) { 3423 if (vaddr < info->start_data) { 3424 info->start_data = vaddr; 3425 } 3426 if (vaddr_ef > info->end_data) { 3427 info->end_data = vaddr_ef; 3428 } 3429 } 3430 #ifdef TARGET_MIPS 3431 } else if (eppnt->p_type == PT_MIPS_ABIFLAGS) { 3432 Mips_elf_abiflags_v0 abiflags; 3433 3434 if (!imgsrc_read(&abiflags, eppnt->p_offset, sizeof(abiflags), 3435 src, &err)) { 3436 goto exit_errmsg; 3437 } 3438 bswap_mips_abiflags(&abiflags); 3439 info->fp_abi = abiflags.fp_abi; 3440 #endif 3441 } 3442 } 3443 3444 if (info->end_data == 0) { 3445 info->start_data = info->end_code; 3446 info->end_data = info->end_code; 3447 } 3448 3449 if (qemu_log_enabled()) { 3450 load_symbols(ehdr, src, load_bias); 3451 } 3452 3453 debuginfo_report_elf(image_name, src->fd, load_bias); 3454 3455 mmap_unlock(); 3456 3457 close(src->fd); 3458 return; 3459 3460 exit_mmap: 3461 error_setg_errno(&err, errno, "Error mapping file"); 3462 goto exit_errmsg; 3463 exit_errmsg: 3464 error_reportf_err(err, "%s: ", image_name); 3465 exit(-1); 3466 } 3467 3468 static void load_elf_interp(const char *filename, struct image_info *info, 3469 char bprm_buf[BPRM_BUF_SIZE]) 3470 { 3471 struct elfhdr ehdr; 3472 ImageSource src; 3473 int fd, retval; 3474 Error *err = NULL; 3475 3476 fd = open(path(filename), O_RDONLY); 3477 if (fd < 0) { 3478 error_setg_file_open(&err, errno, filename); 3479 error_report_err(err); 3480 exit(-1); 3481 } 3482 3483 retval = read(fd, bprm_buf, BPRM_BUF_SIZE); 3484 if (retval < 0) { 3485 error_setg_errno(&err, errno, "Error reading file header"); 3486 error_reportf_err(err, "%s: ", filename); 3487 exit(-1); 3488 } 3489 3490 src.fd = fd; 3491 src.cache = bprm_buf; 3492 src.cache_size = retval; 3493 3494 load_elf_image(filename, &src, info, &ehdr, NULL); 3495 } 3496 3497 #ifdef VDSO_HEADER 3498 #include VDSO_HEADER 3499 #define vdso_image_info() &vdso_image_info 3500 #else 3501 #define vdso_image_info() NULL 3502 #endif 3503 3504 static void load_elf_vdso(struct image_info *info, const VdsoImageInfo *vdso) 3505 { 3506 ImageSource src; 3507 struct elfhdr ehdr; 3508 abi_ulong load_bias, load_addr; 3509 3510 src.fd = -1; 3511 src.cache = vdso->image; 3512 src.cache_size = vdso->image_size; 3513 3514 load_elf_image("<internal-vdso>", &src, info, &ehdr, NULL); 3515 load_addr = info->load_addr; 3516 load_bias = info->load_bias; 3517 3518 /* 3519 * We need to relocate the VDSO image. The one built into the kernel 3520 * is built for a fixed address. The one built for QEMU is not, since 3521 * that requires close control of the guest address space. 3522 * We pre-processed the image to locate all of the addresses that need 3523 * to be updated. 3524 */ 3525 for (unsigned i = 0, n = vdso->reloc_count; i < n; i++) { 3526 abi_ulong *addr = g2h_untagged(load_addr + vdso->relocs[i]); 3527 *addr = tswapal(tswapal(*addr) + load_bias); 3528 } 3529 3530 /* Install signal trampolines, if present. */ 3531 if (vdso->sigreturn_ofs) { 3532 default_sigreturn = load_addr + vdso->sigreturn_ofs; 3533 } 3534 if (vdso->rt_sigreturn_ofs) { 3535 default_rt_sigreturn = load_addr + vdso->rt_sigreturn_ofs; 3536 } 3537 3538 /* Remove write from VDSO segment. */ 3539 target_mprotect(info->start_data, info->end_data - info->start_data, 3540 PROT_READ | PROT_EXEC); 3541 } 3542 3543 static int symfind(const void *s0, const void *s1) 3544 { 3545 struct elf_sym *sym = (struct elf_sym *)s1; 3546 __typeof(sym->st_value) addr = *(uint64_t *)s0; 3547 int result = 0; 3548 3549 if (addr < sym->st_value) { 3550 result = -1; 3551 } else if (addr >= sym->st_value + sym->st_size) { 3552 result = 1; 3553 } 3554 return result; 3555 } 3556 3557 static const char *lookup_symbolxx(struct syminfo *s, uint64_t orig_addr) 3558 { 3559 #if ELF_CLASS == ELFCLASS32 3560 struct elf_sym *syms = s->disas_symtab.elf32; 3561 #else 3562 struct elf_sym *syms = s->disas_symtab.elf64; 3563 #endif 3564 3565 // binary search 3566 struct elf_sym *sym; 3567 3568 sym = bsearch(&orig_addr, syms, s->disas_num_syms, sizeof(*syms), symfind); 3569 if (sym != NULL) { 3570 return s->disas_strtab + sym->st_name; 3571 } 3572 3573 return ""; 3574 } 3575 3576 /* FIXME: This should use elf_ops.h.inc */ 3577 static int symcmp(const void *s0, const void *s1) 3578 { 3579 struct elf_sym *sym0 = (struct elf_sym *)s0; 3580 struct elf_sym *sym1 = (struct elf_sym *)s1; 3581 return (sym0->st_value < sym1->st_value) 3582 ? -1 3583 : ((sym0->st_value > sym1->st_value) ? 1 : 0); 3584 } 3585 3586 /* Best attempt to load symbols from this ELF object. */ 3587 static void load_symbols(struct elfhdr *hdr, const ImageSource *src, 3588 abi_ulong load_bias) 3589 { 3590 int i, shnum, nsyms, sym_idx = 0, str_idx = 0; 3591 g_autofree struct elf_shdr *shdr = NULL; 3592 char *strings = NULL; 3593 struct elf_sym *syms = NULL; 3594 struct elf_sym *new_syms; 3595 uint64_t segsz; 3596 3597 shnum = hdr->e_shnum; 3598 shdr = imgsrc_read_alloc(hdr->e_shoff, shnum * sizeof(struct elf_shdr), 3599 src, NULL); 3600 if (shdr == NULL) { 3601 return; 3602 } 3603 3604 bswap_shdr(shdr, shnum); 3605 for (i = 0; i < shnum; ++i) { 3606 if (shdr[i].sh_type == SHT_SYMTAB) { 3607 sym_idx = i; 3608 str_idx = shdr[i].sh_link; 3609 goto found; 3610 } 3611 } 3612 3613 /* There will be no symbol table if the file was stripped. */ 3614 return; 3615 3616 found: 3617 /* Now know where the strtab and symtab are. Snarf them. */ 3618 3619 segsz = shdr[str_idx].sh_size; 3620 strings = g_try_malloc(segsz); 3621 if (!strings) { 3622 goto give_up; 3623 } 3624 if (!imgsrc_read(strings, shdr[str_idx].sh_offset, segsz, src, NULL)) { 3625 goto give_up; 3626 } 3627 3628 segsz = shdr[sym_idx].sh_size; 3629 if (segsz / sizeof(struct elf_sym) > INT_MAX) { 3630 /* 3631 * Implausibly large symbol table: give up rather than ploughing 3632 * on with the number of symbols calculation overflowing. 3633 */ 3634 goto give_up; 3635 } 3636 nsyms = segsz / sizeof(struct elf_sym); 3637 syms = g_try_malloc(segsz); 3638 if (!syms) { 3639 goto give_up; 3640 } 3641 if (!imgsrc_read(syms, shdr[sym_idx].sh_offset, segsz, src, NULL)) { 3642 goto give_up; 3643 } 3644 3645 for (i = 0; i < nsyms; ) { 3646 bswap_sym(syms + i); 3647 /* Throw away entries which we do not need. */ 3648 if (syms[i].st_shndx == SHN_UNDEF 3649 || syms[i].st_shndx >= SHN_LORESERVE 3650 || ELF_ST_TYPE(syms[i].st_info) != STT_FUNC) { 3651 if (i < --nsyms) { 3652 syms[i] = syms[nsyms]; 3653 } 3654 } else { 3655 #if defined(TARGET_ARM) || defined (TARGET_MIPS) 3656 /* The bottom address bit marks a Thumb or MIPS16 symbol. */ 3657 syms[i].st_value &= ~(target_ulong)1; 3658 #endif 3659 syms[i].st_value += load_bias; 3660 i++; 3661 } 3662 } 3663 3664 /* No "useful" symbol. */ 3665 if (nsyms == 0) { 3666 goto give_up; 3667 } 3668 3669 /* 3670 * Attempt to free the storage associated with the local symbols 3671 * that we threw away. Whether or not this has any effect on the 3672 * memory allocation depends on the malloc implementation and how 3673 * many symbols we managed to discard. 3674 */ 3675 new_syms = g_try_renew(struct elf_sym, syms, nsyms); 3676 if (new_syms == NULL) { 3677 goto give_up; 3678 } 3679 syms = new_syms; 3680 3681 qsort(syms, nsyms, sizeof(*syms), symcmp); 3682 3683 { 3684 struct syminfo *s = g_new(struct syminfo, 1); 3685 3686 s->disas_strtab = strings; 3687 s->disas_num_syms = nsyms; 3688 #if ELF_CLASS == ELFCLASS32 3689 s->disas_symtab.elf32 = syms; 3690 #else 3691 s->disas_symtab.elf64 = syms; 3692 #endif 3693 s->lookup_symbol = lookup_symbolxx; 3694 s->next = syminfos; 3695 syminfos = s; 3696 } 3697 return; 3698 3699 give_up: 3700 g_free(strings); 3701 g_free(syms); 3702 } 3703 3704 uint32_t get_elf_eflags(int fd) 3705 { 3706 struct elfhdr ehdr; 3707 off_t offset; 3708 int ret; 3709 3710 /* Read ELF header */ 3711 offset = lseek(fd, 0, SEEK_SET); 3712 if (offset == (off_t) -1) { 3713 return 0; 3714 } 3715 ret = read(fd, &ehdr, sizeof(ehdr)); 3716 if (ret < sizeof(ehdr)) { 3717 return 0; 3718 } 3719 offset = lseek(fd, offset, SEEK_SET); 3720 if (offset == (off_t) -1) { 3721 return 0; 3722 } 3723 3724 /* Check ELF signature */ 3725 if (!elf_check_ident(&ehdr)) { 3726 return 0; 3727 } 3728 3729 /* check header */ 3730 bswap_ehdr(&ehdr); 3731 if (!elf_check_ehdr(&ehdr)) { 3732 return 0; 3733 } 3734 3735 /* return architecture id */ 3736 return ehdr.e_flags; 3737 } 3738 3739 int load_elf_binary(struct linux_binprm *bprm, struct image_info *info) 3740 { 3741 /* 3742 * We need a copy of the elf header for passing to create_elf_tables. 3743 * We will have overwritten the original when we re-use bprm->buf 3744 * while loading the interpreter. Allocate the storage for this now 3745 * and let elf_load_image do any swapping that may be required. 3746 */ 3747 struct elfhdr ehdr; 3748 struct image_info interp_info, vdso_info; 3749 char *elf_interpreter = NULL; 3750 char *scratch; 3751 3752 memset(&interp_info, 0, sizeof(interp_info)); 3753 #ifdef TARGET_MIPS 3754 interp_info.fp_abi = MIPS_ABI_FP_UNKNOWN; 3755 #endif 3756 3757 load_elf_image(bprm->filename, &bprm->src, info, &ehdr, &elf_interpreter); 3758 3759 /* Do this so that we can load the interpreter, if need be. We will 3760 change some of these later */ 3761 bprm->p = setup_arg_pages(bprm, info); 3762 3763 scratch = g_new0(char, TARGET_PAGE_SIZE); 3764 if (STACK_GROWS_DOWN) { 3765 bprm->p = copy_elf_strings(1, &bprm->filename, scratch, 3766 bprm->p, info->stack_limit); 3767 info->file_string = bprm->p; 3768 bprm->p = copy_elf_strings(bprm->envc, bprm->envp, scratch, 3769 bprm->p, info->stack_limit); 3770 info->env_strings = bprm->p; 3771 bprm->p = copy_elf_strings(bprm->argc, bprm->argv, scratch, 3772 bprm->p, info->stack_limit); 3773 info->arg_strings = bprm->p; 3774 } else { 3775 info->arg_strings = bprm->p; 3776 bprm->p = copy_elf_strings(bprm->argc, bprm->argv, scratch, 3777 bprm->p, info->stack_limit); 3778 info->env_strings = bprm->p; 3779 bprm->p = copy_elf_strings(bprm->envc, bprm->envp, scratch, 3780 bprm->p, info->stack_limit); 3781 info->file_string = bprm->p; 3782 bprm->p = copy_elf_strings(1, &bprm->filename, scratch, 3783 bprm->p, info->stack_limit); 3784 } 3785 3786 g_free(scratch); 3787 3788 if (!bprm->p) { 3789 fprintf(stderr, "%s: %s\n", bprm->filename, strerror(E2BIG)); 3790 exit(-1); 3791 } 3792 3793 if (elf_interpreter) { 3794 load_elf_interp(elf_interpreter, &interp_info, bprm->buf); 3795 3796 /* 3797 * While unusual because of ELF_ET_DYN_BASE, if we are unlucky 3798 * with the mappings the interpreter can be loaded above but 3799 * near the main executable, which can leave very little room 3800 * for the heap. 3801 * If the current brk has less than 16MB, use the end of the 3802 * interpreter. 3803 */ 3804 if (interp_info.brk > info->brk && 3805 interp_info.load_bias - info->brk < 16 * MiB) { 3806 info->brk = interp_info.brk; 3807 } 3808 3809 /* If the program interpreter is one of these two, then assume 3810 an iBCS2 image. Otherwise assume a native linux image. */ 3811 3812 if (strcmp(elf_interpreter, "/usr/lib/libc.so.1") == 0 3813 || strcmp(elf_interpreter, "/usr/lib/ld.so.1") == 0) { 3814 info->personality = PER_SVR4; 3815 3816 /* Why this, you ask??? Well SVr4 maps page 0 as read-only, 3817 and some applications "depend" upon this behavior. Since 3818 we do not have the power to recompile these, we emulate 3819 the SVr4 behavior. Sigh. */ 3820 target_mmap(0, TARGET_PAGE_SIZE, PROT_READ | PROT_EXEC, 3821 MAP_FIXED_NOREPLACE | MAP_PRIVATE | MAP_ANONYMOUS, 3822 -1, 0); 3823 } 3824 #ifdef TARGET_MIPS 3825 info->interp_fp_abi = interp_info.fp_abi; 3826 #endif 3827 } 3828 3829 /* 3830 * Load a vdso if available, which will amongst other things contain the 3831 * signal trampolines. Otherwise, allocate a separate page for them. 3832 */ 3833 const VdsoImageInfo *vdso = vdso_image_info(); 3834 if (vdso) { 3835 load_elf_vdso(&vdso_info, vdso); 3836 info->vdso = vdso_info.load_bias; 3837 } else if (TARGET_ARCH_HAS_SIGTRAMP_PAGE) { 3838 abi_long tramp_page = target_mmap(0, TARGET_PAGE_SIZE, 3839 PROT_READ | PROT_WRITE, 3840 MAP_PRIVATE | MAP_ANON, -1, 0); 3841 if (tramp_page == -1) { 3842 return -errno; 3843 } 3844 3845 setup_sigtramp(tramp_page); 3846 target_mprotect(tramp_page, TARGET_PAGE_SIZE, PROT_READ | PROT_EXEC); 3847 } 3848 3849 bprm->p = create_elf_tables(bprm->p, bprm->argc, bprm->envc, &ehdr, info, 3850 elf_interpreter ? &interp_info : NULL, 3851 vdso ? &vdso_info : NULL); 3852 info->start_stack = bprm->p; 3853 3854 /* If we have an interpreter, set that as the program's entry point. 3855 Copy the load_bias as well, to help PPC64 interpret the entry 3856 point as a function descriptor. Do this after creating elf tables 3857 so that we copy the original program entry point into the AUXV. */ 3858 if (elf_interpreter) { 3859 info->load_bias = interp_info.load_bias; 3860 info->entry = interp_info.entry; 3861 g_free(elf_interpreter); 3862 } 3863 3864 #ifdef USE_ELF_CORE_DUMP 3865 bprm->core_dump = &elf_core_dump; 3866 #endif 3867 3868 return 0; 3869 } 3870 3871 #ifdef USE_ELF_CORE_DUMP 3872 #include "exec/translate-all.h" 3873 3874 /* 3875 * Definitions to generate Intel SVR4-like core files. 3876 * These mostly have the same names as the SVR4 types with "target_elf_" 3877 * tacked on the front to prevent clashes with linux definitions, 3878 * and the typedef forms have been avoided. This is mostly like 3879 * the SVR4 structure, but more Linuxy, with things that Linux does 3880 * not support and which gdb doesn't really use excluded. 3881 * 3882 * Fields we don't dump (their contents is zero) in linux-user qemu 3883 * are marked with XXX. 3884 * 3885 * Core dump code is copied from linux kernel (fs/binfmt_elf.c). 3886 * 3887 * Porting ELF coredump for target is (quite) simple process. First you 3888 * define USE_ELF_CORE_DUMP in target ELF code (where init_thread() for 3889 * the target resides): 3890 * 3891 * #define USE_ELF_CORE_DUMP 3892 * 3893 * Next you define type of register set used for dumping. ELF specification 3894 * says that it needs to be array of elf_greg_t that has size of ELF_NREG. 3895 * 3896 * typedef <target_regtype> target_elf_greg_t; 3897 * #define ELF_NREG <number of registers> 3898 * typedef taret_elf_greg_t target_elf_gregset_t[ELF_NREG]; 3899 * 3900 * Last step is to implement target specific function that copies registers 3901 * from given cpu into just specified register set. Prototype is: 3902 * 3903 * static void elf_core_copy_regs(taret_elf_gregset_t *regs, 3904 * const CPUArchState *env); 3905 * 3906 * Parameters: 3907 * regs - copy register values into here (allocated and zeroed by caller) 3908 * env - copy registers from here 3909 * 3910 * Example for ARM target is provided in this file. 3911 */ 3912 3913 struct target_elf_siginfo { 3914 abi_int si_signo; /* signal number */ 3915 abi_int si_code; /* extra code */ 3916 abi_int si_errno; /* errno */ 3917 }; 3918 3919 struct target_elf_prstatus { 3920 struct target_elf_siginfo pr_info; /* Info associated with signal */ 3921 abi_short pr_cursig; /* Current signal */ 3922 abi_ulong pr_sigpend; /* XXX */ 3923 abi_ulong pr_sighold; /* XXX */ 3924 target_pid_t pr_pid; 3925 target_pid_t pr_ppid; 3926 target_pid_t pr_pgrp; 3927 target_pid_t pr_sid; 3928 struct target_timeval pr_utime; /* XXX User time */ 3929 struct target_timeval pr_stime; /* XXX System time */ 3930 struct target_timeval pr_cutime; /* XXX Cumulative user time */ 3931 struct target_timeval pr_cstime; /* XXX Cumulative system time */ 3932 target_elf_gregset_t pr_reg; /* GP registers */ 3933 abi_int pr_fpvalid; /* XXX */ 3934 }; 3935 3936 #define ELF_PRARGSZ (80) /* Number of chars for args */ 3937 3938 struct target_elf_prpsinfo { 3939 char pr_state; /* numeric process state */ 3940 char pr_sname; /* char for pr_state */ 3941 char pr_zomb; /* zombie */ 3942 char pr_nice; /* nice val */ 3943 abi_ulong pr_flag; /* flags */ 3944 target_uid_t pr_uid; 3945 target_gid_t pr_gid; 3946 target_pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid; 3947 /* Lots missing */ 3948 char pr_fname[16] QEMU_NONSTRING; /* filename of executable */ 3949 char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */ 3950 }; 3951 3952 #ifdef BSWAP_NEEDED 3953 static void bswap_prstatus(struct target_elf_prstatus *prstatus) 3954 { 3955 prstatus->pr_info.si_signo = tswap32(prstatus->pr_info.si_signo); 3956 prstatus->pr_info.si_code = tswap32(prstatus->pr_info.si_code); 3957 prstatus->pr_info.si_errno = tswap32(prstatus->pr_info.si_errno); 3958 prstatus->pr_cursig = tswap16(prstatus->pr_cursig); 3959 prstatus->pr_sigpend = tswapal(prstatus->pr_sigpend); 3960 prstatus->pr_sighold = tswapal(prstatus->pr_sighold); 3961 prstatus->pr_pid = tswap32(prstatus->pr_pid); 3962 prstatus->pr_ppid = tswap32(prstatus->pr_ppid); 3963 prstatus->pr_pgrp = tswap32(prstatus->pr_pgrp); 3964 prstatus->pr_sid = tswap32(prstatus->pr_sid); 3965 /* cpu times are not filled, so we skip them */ 3966 /* regs should be in correct format already */ 3967 prstatus->pr_fpvalid = tswap32(prstatus->pr_fpvalid); 3968 } 3969 3970 static void bswap_psinfo(struct target_elf_prpsinfo *psinfo) 3971 { 3972 psinfo->pr_flag = tswapal(psinfo->pr_flag); 3973 psinfo->pr_uid = tswap16(psinfo->pr_uid); 3974 psinfo->pr_gid = tswap16(psinfo->pr_gid); 3975 psinfo->pr_pid = tswap32(psinfo->pr_pid); 3976 psinfo->pr_ppid = tswap32(psinfo->pr_ppid); 3977 psinfo->pr_pgrp = tswap32(psinfo->pr_pgrp); 3978 psinfo->pr_sid = tswap32(psinfo->pr_sid); 3979 } 3980 3981 static void bswap_note(struct elf_note *en) 3982 { 3983 bswap32s(&en->n_namesz); 3984 bswap32s(&en->n_descsz); 3985 bswap32s(&en->n_type); 3986 } 3987 #else 3988 static inline void bswap_prstatus(struct target_elf_prstatus *p) { } 3989 static inline void bswap_psinfo(struct target_elf_prpsinfo *p) {} 3990 static inline void bswap_note(struct elf_note *en) { } 3991 #endif /* BSWAP_NEEDED */ 3992 3993 /* 3994 * Calculate file (dump) size of given memory region. 3995 */ 3996 static size_t vma_dump_size(target_ulong start, target_ulong end, 3997 unsigned long flags) 3998 { 3999 /* The area must be readable. */ 4000 if (!(flags & PAGE_READ)) { 4001 return 0; 4002 } 4003 4004 /* 4005 * Usually we don't dump executable pages as they contain 4006 * non-writable code that debugger can read directly from 4007 * target library etc. If there is no elf header, we dump it. 4008 */ 4009 if (!(flags & PAGE_WRITE_ORG) && 4010 (flags & PAGE_EXEC) && 4011 memcmp(g2h_untagged(start), ELFMAG, SELFMAG) == 0) { 4012 return 0; 4013 } 4014 4015 return end - start; 4016 } 4017 4018 static size_t size_note(const char *name, size_t datasz) 4019 { 4020 size_t namesz = strlen(name) + 1; 4021 4022 namesz = ROUND_UP(namesz, 4); 4023 datasz = ROUND_UP(datasz, 4); 4024 4025 return sizeof(struct elf_note) + namesz + datasz; 4026 } 4027 4028 static void *fill_note(void **pptr, int type, const char *name, size_t datasz) 4029 { 4030 void *ptr = *pptr; 4031 struct elf_note *n = ptr; 4032 size_t namesz = strlen(name) + 1; 4033 4034 n->n_namesz = namesz; 4035 n->n_descsz = datasz; 4036 n->n_type = type; 4037 bswap_note(n); 4038 4039 ptr += sizeof(*n); 4040 memcpy(ptr, name, namesz); 4041 4042 namesz = ROUND_UP(namesz, 4); 4043 datasz = ROUND_UP(datasz, 4); 4044 4045 *pptr = ptr + namesz + datasz; 4046 return ptr + namesz; 4047 } 4048 4049 static void fill_elf_header(struct elfhdr *elf, int segs, uint16_t machine, 4050 uint32_t flags) 4051 { 4052 memcpy(elf->e_ident, ELFMAG, SELFMAG); 4053 4054 elf->e_ident[EI_CLASS] = ELF_CLASS; 4055 elf->e_ident[EI_DATA] = ELF_DATA; 4056 elf->e_ident[EI_VERSION] = EV_CURRENT; 4057 elf->e_ident[EI_OSABI] = ELF_OSABI; 4058 4059 elf->e_type = ET_CORE; 4060 elf->e_machine = machine; 4061 elf->e_version = EV_CURRENT; 4062 elf->e_phoff = sizeof(struct elfhdr); 4063 elf->e_flags = flags; 4064 elf->e_ehsize = sizeof(struct elfhdr); 4065 elf->e_phentsize = sizeof(struct elf_phdr); 4066 elf->e_phnum = segs; 4067 4068 bswap_ehdr(elf); 4069 } 4070 4071 static void fill_elf_note_phdr(struct elf_phdr *phdr, size_t sz, off_t offset) 4072 { 4073 phdr->p_type = PT_NOTE; 4074 phdr->p_offset = offset; 4075 phdr->p_filesz = sz; 4076 4077 bswap_phdr(phdr, 1); 4078 } 4079 4080 static void fill_prstatus_note(void *data, const TaskState *ts, 4081 CPUState *cpu, int signr) 4082 { 4083 /* 4084 * Because note memory is only aligned to 4, and target_elf_prstatus 4085 * may well have higher alignment requirements, fill locally and 4086 * memcpy to the destination afterward. 4087 */ 4088 struct target_elf_prstatus prstatus = { 4089 .pr_info.si_signo = signr, 4090 .pr_cursig = signr, 4091 .pr_pid = ts->ts_tid, 4092 .pr_ppid = getppid(), 4093 .pr_pgrp = getpgrp(), 4094 .pr_sid = getsid(0), 4095 }; 4096 4097 elf_core_copy_regs(&prstatus.pr_reg, cpu_env(cpu)); 4098 bswap_prstatus(&prstatus); 4099 memcpy(data, &prstatus, sizeof(prstatus)); 4100 } 4101 4102 static void fill_prpsinfo_note(void *data, const TaskState *ts) 4103 { 4104 /* 4105 * Because note memory is only aligned to 4, and target_elf_prpsinfo 4106 * may well have higher alignment requirements, fill locally and 4107 * memcpy to the destination afterward. 4108 */ 4109 struct target_elf_prpsinfo psinfo = { 4110 .pr_pid = getpid(), 4111 .pr_ppid = getppid(), 4112 .pr_pgrp = getpgrp(), 4113 .pr_sid = getsid(0), 4114 .pr_uid = getuid(), 4115 .pr_gid = getgid(), 4116 }; 4117 char *base_filename; 4118 size_t len; 4119 4120 len = ts->info->env_strings - ts->info->arg_strings; 4121 len = MIN(len, ELF_PRARGSZ); 4122 memcpy(&psinfo.pr_psargs, g2h_untagged(ts->info->arg_strings), len); 4123 for (size_t i = 0; i < len; i++) { 4124 if (psinfo.pr_psargs[i] == 0) { 4125 psinfo.pr_psargs[i] = ' '; 4126 } 4127 } 4128 4129 base_filename = g_path_get_basename(ts->bprm->filename); 4130 /* 4131 * Using strncpy here is fine: at max-length, 4132 * this field is not NUL-terminated. 4133 */ 4134 strncpy(psinfo.pr_fname, base_filename, sizeof(psinfo.pr_fname)); 4135 g_free(base_filename); 4136 4137 bswap_psinfo(&psinfo); 4138 memcpy(data, &psinfo, sizeof(psinfo)); 4139 } 4140 4141 static void fill_auxv_note(void *data, const TaskState *ts) 4142 { 4143 memcpy(data, g2h_untagged(ts->info->saved_auxv), ts->info->auxv_len); 4144 } 4145 4146 /* 4147 * Constructs name of coredump file. We have following convention 4148 * for the name: 4149 * qemu_<basename-of-target-binary>_<date>-<time>_<pid>.core 4150 * 4151 * Returns the filename 4152 */ 4153 static char *core_dump_filename(const TaskState *ts) 4154 { 4155 g_autoptr(GDateTime) now = g_date_time_new_now_local(); 4156 g_autofree char *nowstr = g_date_time_format(now, "%Y%m%d-%H%M%S"); 4157 g_autofree char *base_filename = g_path_get_basename(ts->bprm->filename); 4158 4159 return g_strdup_printf("qemu_%s_%s_%d.core", 4160 base_filename, nowstr, (int)getpid()); 4161 } 4162 4163 static int dump_write(int fd, const void *ptr, size_t size) 4164 { 4165 const char *bufp = (const char *)ptr; 4166 ssize_t bytes_written, bytes_left; 4167 4168 bytes_written = 0; 4169 bytes_left = size; 4170 4171 /* 4172 * In normal conditions, single write(2) should do but 4173 * in case of socket etc. this mechanism is more portable. 4174 */ 4175 do { 4176 bytes_written = write(fd, bufp, bytes_left); 4177 if (bytes_written < 0) { 4178 if (errno == EINTR) 4179 continue; 4180 return (-1); 4181 } else if (bytes_written == 0) { /* eof */ 4182 return (-1); 4183 } 4184 bufp += bytes_written; 4185 bytes_left -= bytes_written; 4186 } while (bytes_left > 0); 4187 4188 return (0); 4189 } 4190 4191 static int wmr_page_unprotect_regions(void *opaque, target_ulong start, 4192 target_ulong end, unsigned long flags) 4193 { 4194 if ((flags & (PAGE_WRITE | PAGE_WRITE_ORG)) == PAGE_WRITE_ORG) { 4195 size_t step = MAX(TARGET_PAGE_SIZE, qemu_real_host_page_size()); 4196 4197 while (1) { 4198 page_unprotect(start, 0); 4199 if (end - start <= step) { 4200 break; 4201 } 4202 start += step; 4203 } 4204 } 4205 return 0; 4206 } 4207 4208 typedef struct { 4209 unsigned count; 4210 size_t size; 4211 } CountAndSizeRegions; 4212 4213 static int wmr_count_and_size_regions(void *opaque, target_ulong start, 4214 target_ulong end, unsigned long flags) 4215 { 4216 CountAndSizeRegions *css = opaque; 4217 4218 css->count++; 4219 css->size += vma_dump_size(start, end, flags); 4220 return 0; 4221 } 4222 4223 typedef struct { 4224 struct elf_phdr *phdr; 4225 off_t offset; 4226 } FillRegionPhdr; 4227 4228 static int wmr_fill_region_phdr(void *opaque, target_ulong start, 4229 target_ulong end, unsigned long flags) 4230 { 4231 FillRegionPhdr *d = opaque; 4232 struct elf_phdr *phdr = d->phdr; 4233 4234 phdr->p_type = PT_LOAD; 4235 phdr->p_vaddr = start; 4236 phdr->p_paddr = 0; 4237 phdr->p_filesz = vma_dump_size(start, end, flags); 4238 phdr->p_offset = d->offset; 4239 d->offset += phdr->p_filesz; 4240 phdr->p_memsz = end - start; 4241 phdr->p_flags = (flags & PAGE_READ ? PF_R : 0) 4242 | (flags & PAGE_WRITE_ORG ? PF_W : 0) 4243 | (flags & PAGE_EXEC ? PF_X : 0); 4244 phdr->p_align = ELF_EXEC_PAGESIZE; 4245 4246 bswap_phdr(phdr, 1); 4247 d->phdr = phdr + 1; 4248 return 0; 4249 } 4250 4251 static int wmr_write_region(void *opaque, target_ulong start, 4252 target_ulong end, unsigned long flags) 4253 { 4254 int fd = *(int *)opaque; 4255 size_t size = vma_dump_size(start, end, flags); 4256 4257 if (!size) { 4258 return 0; 4259 } 4260 return dump_write(fd, g2h_untagged(start), size); 4261 } 4262 4263 /* 4264 * Write out ELF coredump. 4265 * 4266 * See documentation of ELF object file format in: 4267 * http://www.caldera.com/developers/devspecs/gabi41.pdf 4268 * 4269 * Coredump format in linux is following: 4270 * 4271 * 0 +----------------------+ \ 4272 * | ELF header | ET_CORE | 4273 * +----------------------+ | 4274 * | ELF program headers | |--- headers 4275 * | - NOTE section | | 4276 * | - PT_LOAD sections | | 4277 * +----------------------+ / 4278 * | NOTEs: | 4279 * | - NT_PRSTATUS | 4280 * | - NT_PRSINFO | 4281 * | - NT_AUXV | 4282 * +----------------------+ <-- aligned to target page 4283 * | Process memory dump | 4284 * : : 4285 * . . 4286 * : : 4287 * | | 4288 * +----------------------+ 4289 * 4290 * NT_PRSTATUS -> struct elf_prstatus (per thread) 4291 * NT_PRSINFO -> struct elf_prpsinfo 4292 * NT_AUXV is array of { type, value } pairs (see fill_auxv_note()). 4293 * 4294 * Format follows System V format as close as possible. Current 4295 * version limitations are as follows: 4296 * - no floating point registers are dumped 4297 * 4298 * Function returns 0 in case of success, negative errno otherwise. 4299 * 4300 * TODO: make this work also during runtime: it should be 4301 * possible to force coredump from running process and then 4302 * continue processing. For example qemu could set up SIGUSR2 4303 * handler (provided that target process haven't registered 4304 * handler for that) that does the dump when signal is received. 4305 */ 4306 static int elf_core_dump(int signr, const CPUArchState *env) 4307 { 4308 const CPUState *cpu = env_cpu((CPUArchState *)env); 4309 const TaskState *ts = (const TaskState *)get_task_state((CPUState *)cpu); 4310 struct rlimit dumpsize; 4311 CountAndSizeRegions css; 4312 off_t offset, note_offset, data_offset; 4313 size_t note_size; 4314 int cpus, ret; 4315 int fd = -1; 4316 CPUState *cpu_iter; 4317 4318 if (prctl(PR_GET_DUMPABLE) == 0) { 4319 return 0; 4320 } 4321 4322 if (getrlimit(RLIMIT_CORE, &dumpsize) < 0 || dumpsize.rlim_cur == 0) { 4323 return 0; 4324 } 4325 4326 cpu_list_lock(); 4327 mmap_lock(); 4328 4329 /* By unprotecting, we merge vmas that might be split. */ 4330 walk_memory_regions(NULL, wmr_page_unprotect_regions); 4331 4332 /* 4333 * Walk through target process memory mappings and 4334 * set up structure containing this information. 4335 */ 4336 memset(&css, 0, sizeof(css)); 4337 walk_memory_regions(&css, wmr_count_and_size_regions); 4338 4339 cpus = 0; 4340 CPU_FOREACH(cpu_iter) { 4341 cpus++; 4342 } 4343 4344 offset = sizeof(struct elfhdr); 4345 offset += (css.count + 1) * sizeof(struct elf_phdr); 4346 note_offset = offset; 4347 4348 offset += size_note("CORE", ts->info->auxv_len); 4349 offset += size_note("CORE", sizeof(struct target_elf_prpsinfo)); 4350 offset += size_note("CORE", sizeof(struct target_elf_prstatus)) * cpus; 4351 note_size = offset - note_offset; 4352 data_offset = ROUND_UP(offset, ELF_EXEC_PAGESIZE); 4353 4354 /* Do not dump if the corefile size exceeds the limit. */ 4355 if (dumpsize.rlim_cur != RLIM_INFINITY 4356 && dumpsize.rlim_cur < data_offset + css.size) { 4357 errno = 0; 4358 goto out; 4359 } 4360 4361 { 4362 g_autofree char *corefile = core_dump_filename(ts); 4363 fd = open(corefile, O_WRONLY | O_CREAT | O_TRUNC, 4364 S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); 4365 } 4366 if (fd < 0) { 4367 goto out; 4368 } 4369 4370 /* 4371 * There is a fair amount of alignment padding within the notes 4372 * as well as preceeding the process memory. Allocate a zeroed 4373 * block to hold it all. Write all of the headers directly into 4374 * this buffer and then write it out as a block. 4375 */ 4376 { 4377 g_autofree void *header = g_malloc0(data_offset); 4378 FillRegionPhdr frp; 4379 void *hptr, *dptr; 4380 4381 /* Create elf file header. */ 4382 hptr = header; 4383 fill_elf_header(hptr, css.count + 1, ELF_MACHINE, 0); 4384 hptr += sizeof(struct elfhdr); 4385 4386 /* Create elf program headers. */ 4387 fill_elf_note_phdr(hptr, note_size, note_offset); 4388 hptr += sizeof(struct elf_phdr); 4389 4390 frp.phdr = hptr; 4391 frp.offset = data_offset; 4392 walk_memory_regions(&frp, wmr_fill_region_phdr); 4393 hptr = frp.phdr; 4394 4395 /* Create the notes. */ 4396 dptr = fill_note(&hptr, NT_AUXV, "CORE", ts->info->auxv_len); 4397 fill_auxv_note(dptr, ts); 4398 4399 dptr = fill_note(&hptr, NT_PRPSINFO, "CORE", 4400 sizeof(struct target_elf_prpsinfo)); 4401 fill_prpsinfo_note(dptr, ts); 4402 4403 CPU_FOREACH(cpu_iter) { 4404 dptr = fill_note(&hptr, NT_PRSTATUS, "CORE", 4405 sizeof(struct target_elf_prstatus)); 4406 fill_prstatus_note(dptr, ts, cpu_iter, 4407 cpu_iter == cpu ? signr : 0); 4408 } 4409 4410 if (dump_write(fd, header, data_offset) < 0) { 4411 goto out; 4412 } 4413 } 4414 4415 /* 4416 * Finally write process memory into the corefile as well. 4417 */ 4418 if (walk_memory_regions(&fd, wmr_write_region) < 0) { 4419 goto out; 4420 } 4421 errno = 0; 4422 4423 out: 4424 ret = -errno; 4425 mmap_unlock(); 4426 cpu_list_unlock(); 4427 if (fd >= 0) { 4428 close(fd); 4429 } 4430 return ret; 4431 } 4432 #endif /* USE_ELF_CORE_DUMP */ 4433 4434 void do_init_thread(struct target_pt_regs *regs, struct image_info *infop) 4435 { 4436 init_thread(regs, infop); 4437 } 4438