1 /* This is the Linux kernel elf-loading code, ported into user space */ 2 #include "qemu/osdep.h" 3 #include <sys/param.h> 4 5 #include <sys/resource.h> 6 #include <sys/shm.h> 7 8 #include "qemu.h" 9 #include "user-internals.h" 10 #include "signal-common.h" 11 #include "loader.h" 12 #include "user-mmap.h" 13 #include "disas/disas.h" 14 #include "qemu/bitops.h" 15 #include "qemu/path.h" 16 #include "qemu/queue.h" 17 #include "qemu/guest-random.h" 18 #include "qemu/units.h" 19 #include "qemu/selfmap.h" 20 #include "qemu/lockable.h" 21 #include "qapi/error.h" 22 #include "qemu/error-report.h" 23 #include "target_signal.h" 24 #include "accel/tcg/debuginfo.h" 25 26 #ifdef TARGET_ARM 27 #include "target/arm/cpu-features.h" 28 #endif 29 30 #ifdef _ARCH_PPC64 31 #undef ARCH_DLINFO 32 #undef ELF_PLATFORM 33 #undef ELF_HWCAP 34 #undef ELF_HWCAP2 35 #undef ELF_CLASS 36 #undef ELF_DATA 37 #undef ELF_ARCH 38 #endif 39 40 #define ELF_OSABI ELFOSABI_SYSV 41 42 /* from personality.h */ 43 44 /* 45 * Flags for bug emulation. 46 * 47 * These occupy the top three bytes. 48 */ 49 enum { 50 ADDR_NO_RANDOMIZE = 0x0040000, /* disable randomization of VA space */ 51 FDPIC_FUNCPTRS = 0x0080000, /* userspace function ptrs point to 52 descriptors (signal handling) */ 53 MMAP_PAGE_ZERO = 0x0100000, 54 ADDR_COMPAT_LAYOUT = 0x0200000, 55 READ_IMPLIES_EXEC = 0x0400000, 56 ADDR_LIMIT_32BIT = 0x0800000, 57 SHORT_INODE = 0x1000000, 58 WHOLE_SECONDS = 0x2000000, 59 STICKY_TIMEOUTS = 0x4000000, 60 ADDR_LIMIT_3GB = 0x8000000, 61 }; 62 63 /* 64 * Personality types. 65 * 66 * These go in the low byte. Avoid using the top bit, it will 67 * conflict with error returns. 68 */ 69 enum { 70 PER_LINUX = 0x0000, 71 PER_LINUX_32BIT = 0x0000 | ADDR_LIMIT_32BIT, 72 PER_LINUX_FDPIC = 0x0000 | FDPIC_FUNCPTRS, 73 PER_SVR4 = 0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO, 74 PER_SVR3 = 0x0002 | STICKY_TIMEOUTS | SHORT_INODE, 75 PER_SCOSVR3 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS | SHORT_INODE, 76 PER_OSR5 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS, 77 PER_WYSEV386 = 0x0004 | STICKY_TIMEOUTS | SHORT_INODE, 78 PER_ISCR4 = 0x0005 | STICKY_TIMEOUTS, 79 PER_BSD = 0x0006, 80 PER_SUNOS = 0x0006 | STICKY_TIMEOUTS, 81 PER_XENIX = 0x0007 | STICKY_TIMEOUTS | SHORT_INODE, 82 PER_LINUX32 = 0x0008, 83 PER_LINUX32_3GB = 0x0008 | ADDR_LIMIT_3GB, 84 PER_IRIX32 = 0x0009 | STICKY_TIMEOUTS,/* IRIX5 32-bit */ 85 PER_IRIXN32 = 0x000a | STICKY_TIMEOUTS,/* IRIX6 new 32-bit */ 86 PER_IRIX64 = 0x000b | STICKY_TIMEOUTS,/* IRIX6 64-bit */ 87 PER_RISCOS = 0x000c, 88 PER_SOLARIS = 0x000d | STICKY_TIMEOUTS, 89 PER_UW7 = 0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO, 90 PER_OSF4 = 0x000f, /* OSF/1 v4 */ 91 PER_HPUX = 0x0010, 92 PER_MASK = 0x00ff, 93 }; 94 95 /* 96 * Return the base personality without flags. 97 */ 98 #define personality(pers) (pers & PER_MASK) 99 100 int info_is_fdpic(struct image_info *info) 101 { 102 return info->personality == PER_LINUX_FDPIC; 103 } 104 105 /* this flag is uneffective under linux too, should be deleted */ 106 #ifndef MAP_DENYWRITE 107 #define MAP_DENYWRITE 0 108 #endif 109 110 /* should probably go in elf.h */ 111 #ifndef ELIBBAD 112 #define ELIBBAD 80 113 #endif 114 115 #if TARGET_BIG_ENDIAN 116 #define ELF_DATA ELFDATA2MSB 117 #else 118 #define ELF_DATA ELFDATA2LSB 119 #endif 120 121 #ifdef TARGET_ABI_MIPSN32 122 typedef abi_ullong target_elf_greg_t; 123 #define tswapreg(ptr) tswap64(ptr) 124 #else 125 typedef abi_ulong target_elf_greg_t; 126 #define tswapreg(ptr) tswapal(ptr) 127 #endif 128 129 #ifdef USE_UID16 130 typedef abi_ushort target_uid_t; 131 typedef abi_ushort target_gid_t; 132 #else 133 typedef abi_uint target_uid_t; 134 typedef abi_uint target_gid_t; 135 #endif 136 typedef abi_int target_pid_t; 137 138 #ifdef TARGET_I386 139 140 #define ELF_HWCAP get_elf_hwcap() 141 142 static uint32_t get_elf_hwcap(void) 143 { 144 X86CPU *cpu = X86_CPU(thread_cpu); 145 146 return cpu->env.features[FEAT_1_EDX]; 147 } 148 149 #ifdef TARGET_X86_64 150 #define ELF_CLASS ELFCLASS64 151 #define ELF_ARCH EM_X86_64 152 153 #define ELF_PLATFORM "x86_64" 154 155 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) 156 { 157 regs->rax = 0; 158 regs->rsp = infop->start_stack; 159 regs->rip = infop->entry; 160 } 161 162 #define ELF_NREG 27 163 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 164 165 /* 166 * Note that ELF_NREG should be 29 as there should be place for 167 * TRAPNO and ERR "registers" as well but linux doesn't dump 168 * those. 169 * 170 * See linux kernel: arch/x86/include/asm/elf.h 171 */ 172 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *env) 173 { 174 (*regs)[0] = tswapreg(env->regs[15]); 175 (*regs)[1] = tswapreg(env->regs[14]); 176 (*regs)[2] = tswapreg(env->regs[13]); 177 (*regs)[3] = tswapreg(env->regs[12]); 178 (*regs)[4] = tswapreg(env->regs[R_EBP]); 179 (*regs)[5] = tswapreg(env->regs[R_EBX]); 180 (*regs)[6] = tswapreg(env->regs[11]); 181 (*regs)[7] = tswapreg(env->regs[10]); 182 (*regs)[8] = tswapreg(env->regs[9]); 183 (*regs)[9] = tswapreg(env->regs[8]); 184 (*regs)[10] = tswapreg(env->regs[R_EAX]); 185 (*regs)[11] = tswapreg(env->regs[R_ECX]); 186 (*regs)[12] = tswapreg(env->regs[R_EDX]); 187 (*regs)[13] = tswapreg(env->regs[R_ESI]); 188 (*regs)[14] = tswapreg(env->regs[R_EDI]); 189 (*regs)[15] = tswapreg(env->regs[R_EAX]); /* XXX */ 190 (*regs)[16] = tswapreg(env->eip); 191 (*regs)[17] = tswapreg(env->segs[R_CS].selector & 0xffff); 192 (*regs)[18] = tswapreg(env->eflags); 193 (*regs)[19] = tswapreg(env->regs[R_ESP]); 194 (*regs)[20] = tswapreg(env->segs[R_SS].selector & 0xffff); 195 (*regs)[21] = tswapreg(env->segs[R_FS].selector & 0xffff); 196 (*regs)[22] = tswapreg(env->segs[R_GS].selector & 0xffff); 197 (*regs)[23] = tswapreg(env->segs[R_DS].selector & 0xffff); 198 (*regs)[24] = tswapreg(env->segs[R_ES].selector & 0xffff); 199 (*regs)[25] = tswapreg(env->segs[R_FS].selector & 0xffff); 200 (*regs)[26] = tswapreg(env->segs[R_GS].selector & 0xffff); 201 } 202 203 #if ULONG_MAX > UINT32_MAX 204 #define INIT_GUEST_COMMPAGE 205 static bool init_guest_commpage(void) 206 { 207 /* 208 * The vsyscall page is at a high negative address aka kernel space, 209 * which means that we cannot actually allocate it with target_mmap. 210 * We still should be able to use page_set_flags, unless the user 211 * has specified -R reserved_va, which would trigger an assert(). 212 */ 213 if (reserved_va != 0 && 214 TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE - 1 > reserved_va) { 215 error_report("Cannot allocate vsyscall page"); 216 exit(EXIT_FAILURE); 217 } 218 page_set_flags(TARGET_VSYSCALL_PAGE, 219 TARGET_VSYSCALL_PAGE | ~TARGET_PAGE_MASK, 220 PAGE_EXEC | PAGE_VALID); 221 return true; 222 } 223 #endif 224 #else 225 226 /* 227 * This is used to ensure we don't load something for the wrong architecture. 228 */ 229 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) ) 230 231 /* 232 * These are used to set parameters in the core dumps. 233 */ 234 #define ELF_CLASS ELFCLASS32 235 #define ELF_ARCH EM_386 236 237 #define ELF_PLATFORM get_elf_platform() 238 #define EXSTACK_DEFAULT true 239 240 static const char *get_elf_platform(void) 241 { 242 static char elf_platform[] = "i386"; 243 int family = object_property_get_int(OBJECT(thread_cpu), "family", NULL); 244 if (family > 6) { 245 family = 6; 246 } 247 if (family >= 3) { 248 elf_platform[1] = '0' + family; 249 } 250 return elf_platform; 251 } 252 253 static inline void init_thread(struct target_pt_regs *regs, 254 struct image_info *infop) 255 { 256 regs->esp = infop->start_stack; 257 regs->eip = infop->entry; 258 259 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program 260 starts %edx contains a pointer to a function which might be 261 registered using `atexit'. This provides a mean for the 262 dynamic linker to call DT_FINI functions for shared libraries 263 that have been loaded before the code runs. 264 265 A value of 0 tells we have no such handler. */ 266 regs->edx = 0; 267 } 268 269 #define ELF_NREG 17 270 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 271 272 /* 273 * Note that ELF_NREG should be 19 as there should be place for 274 * TRAPNO and ERR "registers" as well but linux doesn't dump 275 * those. 276 * 277 * See linux kernel: arch/x86/include/asm/elf.h 278 */ 279 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *env) 280 { 281 (*regs)[0] = tswapreg(env->regs[R_EBX]); 282 (*regs)[1] = tswapreg(env->regs[R_ECX]); 283 (*regs)[2] = tswapreg(env->regs[R_EDX]); 284 (*regs)[3] = tswapreg(env->regs[R_ESI]); 285 (*regs)[4] = tswapreg(env->regs[R_EDI]); 286 (*regs)[5] = tswapreg(env->regs[R_EBP]); 287 (*regs)[6] = tswapreg(env->regs[R_EAX]); 288 (*regs)[7] = tswapreg(env->segs[R_DS].selector & 0xffff); 289 (*regs)[8] = tswapreg(env->segs[R_ES].selector & 0xffff); 290 (*regs)[9] = tswapreg(env->segs[R_FS].selector & 0xffff); 291 (*regs)[10] = tswapreg(env->segs[R_GS].selector & 0xffff); 292 (*regs)[11] = tswapreg(env->regs[R_EAX]); /* XXX */ 293 (*regs)[12] = tswapreg(env->eip); 294 (*regs)[13] = tswapreg(env->segs[R_CS].selector & 0xffff); 295 (*regs)[14] = tswapreg(env->eflags); 296 (*regs)[15] = tswapreg(env->regs[R_ESP]); 297 (*regs)[16] = tswapreg(env->segs[R_SS].selector & 0xffff); 298 } 299 #endif 300 301 #define USE_ELF_CORE_DUMP 302 #define ELF_EXEC_PAGESIZE 4096 303 304 #endif 305 306 #ifdef TARGET_ARM 307 308 #ifndef TARGET_AARCH64 309 /* 32 bit ARM definitions */ 310 311 #define ELF_ARCH EM_ARM 312 #define ELF_CLASS ELFCLASS32 313 #define EXSTACK_DEFAULT true 314 315 static inline void init_thread(struct target_pt_regs *regs, 316 struct image_info *infop) 317 { 318 abi_long stack = infop->start_stack; 319 memset(regs, 0, sizeof(*regs)); 320 321 regs->uregs[16] = ARM_CPU_MODE_USR; 322 if (infop->entry & 1) { 323 regs->uregs[16] |= CPSR_T; 324 } 325 regs->uregs[15] = infop->entry & 0xfffffffe; 326 regs->uregs[13] = infop->start_stack; 327 /* FIXME - what to for failure of get_user()? */ 328 get_user_ual(regs->uregs[2], stack + 8); /* envp */ 329 get_user_ual(regs->uregs[1], stack + 4); /* envp */ 330 /* XXX: it seems that r0 is zeroed after ! */ 331 regs->uregs[0] = 0; 332 /* For uClinux PIC binaries. */ 333 /* XXX: Linux does this only on ARM with no MMU (do we care ?) */ 334 regs->uregs[10] = infop->start_data; 335 336 /* Support ARM FDPIC. */ 337 if (info_is_fdpic(infop)) { 338 /* As described in the ABI document, r7 points to the loadmap info 339 * prepared by the kernel. If an interpreter is needed, r8 points 340 * to the interpreter loadmap and r9 points to the interpreter 341 * PT_DYNAMIC info. If no interpreter is needed, r8 is zero, and 342 * r9 points to the main program PT_DYNAMIC info. 343 */ 344 regs->uregs[7] = infop->loadmap_addr; 345 if (infop->interpreter_loadmap_addr) { 346 /* Executable is dynamically loaded. */ 347 regs->uregs[8] = infop->interpreter_loadmap_addr; 348 regs->uregs[9] = infop->interpreter_pt_dynamic_addr; 349 } else { 350 regs->uregs[8] = 0; 351 regs->uregs[9] = infop->pt_dynamic_addr; 352 } 353 } 354 } 355 356 #define ELF_NREG 18 357 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 358 359 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUARMState *env) 360 { 361 (*regs)[0] = tswapreg(env->regs[0]); 362 (*regs)[1] = tswapreg(env->regs[1]); 363 (*regs)[2] = tswapreg(env->regs[2]); 364 (*regs)[3] = tswapreg(env->regs[3]); 365 (*regs)[4] = tswapreg(env->regs[4]); 366 (*regs)[5] = tswapreg(env->regs[5]); 367 (*regs)[6] = tswapreg(env->regs[6]); 368 (*regs)[7] = tswapreg(env->regs[7]); 369 (*regs)[8] = tswapreg(env->regs[8]); 370 (*regs)[9] = tswapreg(env->regs[9]); 371 (*regs)[10] = tswapreg(env->regs[10]); 372 (*regs)[11] = tswapreg(env->regs[11]); 373 (*regs)[12] = tswapreg(env->regs[12]); 374 (*regs)[13] = tswapreg(env->regs[13]); 375 (*regs)[14] = tswapreg(env->regs[14]); 376 (*regs)[15] = tswapreg(env->regs[15]); 377 378 (*regs)[16] = tswapreg(cpsr_read((CPUARMState *)env)); 379 (*regs)[17] = tswapreg(env->regs[0]); /* XXX */ 380 } 381 382 #define USE_ELF_CORE_DUMP 383 #define ELF_EXEC_PAGESIZE 4096 384 385 enum 386 { 387 ARM_HWCAP_ARM_SWP = 1 << 0, 388 ARM_HWCAP_ARM_HALF = 1 << 1, 389 ARM_HWCAP_ARM_THUMB = 1 << 2, 390 ARM_HWCAP_ARM_26BIT = 1 << 3, 391 ARM_HWCAP_ARM_FAST_MULT = 1 << 4, 392 ARM_HWCAP_ARM_FPA = 1 << 5, 393 ARM_HWCAP_ARM_VFP = 1 << 6, 394 ARM_HWCAP_ARM_EDSP = 1 << 7, 395 ARM_HWCAP_ARM_JAVA = 1 << 8, 396 ARM_HWCAP_ARM_IWMMXT = 1 << 9, 397 ARM_HWCAP_ARM_CRUNCH = 1 << 10, 398 ARM_HWCAP_ARM_THUMBEE = 1 << 11, 399 ARM_HWCAP_ARM_NEON = 1 << 12, 400 ARM_HWCAP_ARM_VFPv3 = 1 << 13, 401 ARM_HWCAP_ARM_VFPv3D16 = 1 << 14, 402 ARM_HWCAP_ARM_TLS = 1 << 15, 403 ARM_HWCAP_ARM_VFPv4 = 1 << 16, 404 ARM_HWCAP_ARM_IDIVA = 1 << 17, 405 ARM_HWCAP_ARM_IDIVT = 1 << 18, 406 ARM_HWCAP_ARM_VFPD32 = 1 << 19, 407 ARM_HWCAP_ARM_LPAE = 1 << 20, 408 ARM_HWCAP_ARM_EVTSTRM = 1 << 21, 409 ARM_HWCAP_ARM_FPHP = 1 << 22, 410 ARM_HWCAP_ARM_ASIMDHP = 1 << 23, 411 ARM_HWCAP_ARM_ASIMDDP = 1 << 24, 412 ARM_HWCAP_ARM_ASIMDFHM = 1 << 25, 413 ARM_HWCAP_ARM_ASIMDBF16 = 1 << 26, 414 ARM_HWCAP_ARM_I8MM = 1 << 27, 415 }; 416 417 enum { 418 ARM_HWCAP2_ARM_AES = 1 << 0, 419 ARM_HWCAP2_ARM_PMULL = 1 << 1, 420 ARM_HWCAP2_ARM_SHA1 = 1 << 2, 421 ARM_HWCAP2_ARM_SHA2 = 1 << 3, 422 ARM_HWCAP2_ARM_CRC32 = 1 << 4, 423 ARM_HWCAP2_ARM_SB = 1 << 5, 424 ARM_HWCAP2_ARM_SSBS = 1 << 6, 425 }; 426 427 /* The commpage only exists for 32 bit kernels */ 428 429 #define HI_COMMPAGE (intptr_t)0xffff0f00u 430 431 static bool init_guest_commpage(void) 432 { 433 ARMCPU *cpu = ARM_CPU(thread_cpu); 434 abi_ptr commpage; 435 void *want; 436 void *addr; 437 438 /* 439 * M-profile allocates maximum of 2GB address space, so can never 440 * allocate the commpage. Skip it. 441 */ 442 if (arm_feature(&cpu->env, ARM_FEATURE_M)) { 443 return true; 444 } 445 446 commpage = HI_COMMPAGE & -qemu_host_page_size; 447 want = g2h_untagged(commpage); 448 addr = mmap(want, qemu_host_page_size, PROT_READ | PROT_WRITE, 449 MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0); 450 451 if (addr == MAP_FAILED) { 452 perror("Allocating guest commpage"); 453 exit(EXIT_FAILURE); 454 } 455 if (addr != want) { 456 return false; 457 } 458 459 /* Set kernel helper versions; rest of page is 0. */ 460 __put_user(5, (uint32_t *)g2h_untagged(0xffff0ffcu)); 461 462 if (mprotect(addr, qemu_host_page_size, PROT_READ)) { 463 perror("Protecting guest commpage"); 464 exit(EXIT_FAILURE); 465 } 466 467 page_set_flags(commpage, commpage | ~qemu_host_page_mask, 468 PAGE_READ | PAGE_EXEC | PAGE_VALID); 469 return true; 470 } 471 472 #define ELF_HWCAP get_elf_hwcap() 473 #define ELF_HWCAP2 get_elf_hwcap2() 474 475 uint32_t get_elf_hwcap(void) 476 { 477 ARMCPU *cpu = ARM_CPU(thread_cpu); 478 uint32_t hwcaps = 0; 479 480 hwcaps |= ARM_HWCAP_ARM_SWP; 481 hwcaps |= ARM_HWCAP_ARM_HALF; 482 hwcaps |= ARM_HWCAP_ARM_THUMB; 483 hwcaps |= ARM_HWCAP_ARM_FAST_MULT; 484 485 /* probe for the extra features */ 486 #define GET_FEATURE(feat, hwcap) \ 487 do { if (arm_feature(&cpu->env, feat)) { hwcaps |= hwcap; } } while (0) 488 489 #define GET_FEATURE_ID(feat, hwcap) \ 490 do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0) 491 492 /* EDSP is in v5TE and above, but all our v5 CPUs are v5TE */ 493 GET_FEATURE(ARM_FEATURE_V5, ARM_HWCAP_ARM_EDSP); 494 GET_FEATURE(ARM_FEATURE_IWMMXT, ARM_HWCAP_ARM_IWMMXT); 495 GET_FEATURE(ARM_FEATURE_THUMB2EE, ARM_HWCAP_ARM_THUMBEE); 496 GET_FEATURE(ARM_FEATURE_NEON, ARM_HWCAP_ARM_NEON); 497 GET_FEATURE(ARM_FEATURE_V6K, ARM_HWCAP_ARM_TLS); 498 GET_FEATURE(ARM_FEATURE_LPAE, ARM_HWCAP_ARM_LPAE); 499 GET_FEATURE_ID(aa32_arm_div, ARM_HWCAP_ARM_IDIVA); 500 GET_FEATURE_ID(aa32_thumb_div, ARM_HWCAP_ARM_IDIVT); 501 GET_FEATURE_ID(aa32_vfp, ARM_HWCAP_ARM_VFP); 502 503 if (cpu_isar_feature(aa32_fpsp_v3, cpu) || 504 cpu_isar_feature(aa32_fpdp_v3, cpu)) { 505 hwcaps |= ARM_HWCAP_ARM_VFPv3; 506 if (cpu_isar_feature(aa32_simd_r32, cpu)) { 507 hwcaps |= ARM_HWCAP_ARM_VFPD32; 508 } else { 509 hwcaps |= ARM_HWCAP_ARM_VFPv3D16; 510 } 511 } 512 GET_FEATURE_ID(aa32_simdfmac, ARM_HWCAP_ARM_VFPv4); 513 /* 514 * MVFR1.FPHP and .SIMDHP must be in sync, and QEMU uses the same 515 * isar_feature function for both. The kernel reports them as two hwcaps. 516 */ 517 GET_FEATURE_ID(aa32_fp16_arith, ARM_HWCAP_ARM_FPHP); 518 GET_FEATURE_ID(aa32_fp16_arith, ARM_HWCAP_ARM_ASIMDHP); 519 GET_FEATURE_ID(aa32_dp, ARM_HWCAP_ARM_ASIMDDP); 520 GET_FEATURE_ID(aa32_fhm, ARM_HWCAP_ARM_ASIMDFHM); 521 GET_FEATURE_ID(aa32_bf16, ARM_HWCAP_ARM_ASIMDBF16); 522 GET_FEATURE_ID(aa32_i8mm, ARM_HWCAP_ARM_I8MM); 523 524 return hwcaps; 525 } 526 527 uint32_t get_elf_hwcap2(void) 528 { 529 ARMCPU *cpu = ARM_CPU(thread_cpu); 530 uint32_t hwcaps = 0; 531 532 GET_FEATURE_ID(aa32_aes, ARM_HWCAP2_ARM_AES); 533 GET_FEATURE_ID(aa32_pmull, ARM_HWCAP2_ARM_PMULL); 534 GET_FEATURE_ID(aa32_sha1, ARM_HWCAP2_ARM_SHA1); 535 GET_FEATURE_ID(aa32_sha2, ARM_HWCAP2_ARM_SHA2); 536 GET_FEATURE_ID(aa32_crc32, ARM_HWCAP2_ARM_CRC32); 537 GET_FEATURE_ID(aa32_sb, ARM_HWCAP2_ARM_SB); 538 GET_FEATURE_ID(aa32_ssbs, ARM_HWCAP2_ARM_SSBS); 539 return hwcaps; 540 } 541 542 const char *elf_hwcap_str(uint32_t bit) 543 { 544 static const char *hwcap_str[] = { 545 [__builtin_ctz(ARM_HWCAP_ARM_SWP )] = "swp", 546 [__builtin_ctz(ARM_HWCAP_ARM_HALF )] = "half", 547 [__builtin_ctz(ARM_HWCAP_ARM_THUMB )] = "thumb", 548 [__builtin_ctz(ARM_HWCAP_ARM_26BIT )] = "26bit", 549 [__builtin_ctz(ARM_HWCAP_ARM_FAST_MULT)] = "fast_mult", 550 [__builtin_ctz(ARM_HWCAP_ARM_FPA )] = "fpa", 551 [__builtin_ctz(ARM_HWCAP_ARM_VFP )] = "vfp", 552 [__builtin_ctz(ARM_HWCAP_ARM_EDSP )] = "edsp", 553 [__builtin_ctz(ARM_HWCAP_ARM_JAVA )] = "java", 554 [__builtin_ctz(ARM_HWCAP_ARM_IWMMXT )] = "iwmmxt", 555 [__builtin_ctz(ARM_HWCAP_ARM_CRUNCH )] = "crunch", 556 [__builtin_ctz(ARM_HWCAP_ARM_THUMBEE )] = "thumbee", 557 [__builtin_ctz(ARM_HWCAP_ARM_NEON )] = "neon", 558 [__builtin_ctz(ARM_HWCAP_ARM_VFPv3 )] = "vfpv3", 559 [__builtin_ctz(ARM_HWCAP_ARM_VFPv3D16 )] = "vfpv3d16", 560 [__builtin_ctz(ARM_HWCAP_ARM_TLS )] = "tls", 561 [__builtin_ctz(ARM_HWCAP_ARM_VFPv4 )] = "vfpv4", 562 [__builtin_ctz(ARM_HWCAP_ARM_IDIVA )] = "idiva", 563 [__builtin_ctz(ARM_HWCAP_ARM_IDIVT )] = "idivt", 564 [__builtin_ctz(ARM_HWCAP_ARM_VFPD32 )] = "vfpd32", 565 [__builtin_ctz(ARM_HWCAP_ARM_LPAE )] = "lpae", 566 [__builtin_ctz(ARM_HWCAP_ARM_EVTSTRM )] = "evtstrm", 567 [__builtin_ctz(ARM_HWCAP_ARM_FPHP )] = "fphp", 568 [__builtin_ctz(ARM_HWCAP_ARM_ASIMDHP )] = "asimdhp", 569 [__builtin_ctz(ARM_HWCAP_ARM_ASIMDDP )] = "asimddp", 570 [__builtin_ctz(ARM_HWCAP_ARM_ASIMDFHM )] = "asimdfhm", 571 [__builtin_ctz(ARM_HWCAP_ARM_ASIMDBF16)] = "asimdbf16", 572 [__builtin_ctz(ARM_HWCAP_ARM_I8MM )] = "i8mm", 573 }; 574 575 return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL; 576 } 577 578 const char *elf_hwcap2_str(uint32_t bit) 579 { 580 static const char *hwcap_str[] = { 581 [__builtin_ctz(ARM_HWCAP2_ARM_AES )] = "aes", 582 [__builtin_ctz(ARM_HWCAP2_ARM_PMULL)] = "pmull", 583 [__builtin_ctz(ARM_HWCAP2_ARM_SHA1 )] = "sha1", 584 [__builtin_ctz(ARM_HWCAP2_ARM_SHA2 )] = "sha2", 585 [__builtin_ctz(ARM_HWCAP2_ARM_CRC32)] = "crc32", 586 [__builtin_ctz(ARM_HWCAP2_ARM_SB )] = "sb", 587 [__builtin_ctz(ARM_HWCAP2_ARM_SSBS )] = "ssbs", 588 }; 589 590 return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL; 591 } 592 593 #undef GET_FEATURE 594 #undef GET_FEATURE_ID 595 596 #define ELF_PLATFORM get_elf_platform() 597 598 static const char *get_elf_platform(void) 599 { 600 CPUARMState *env = cpu_env(thread_cpu); 601 602 #if TARGET_BIG_ENDIAN 603 # define END "b" 604 #else 605 # define END "l" 606 #endif 607 608 if (arm_feature(env, ARM_FEATURE_V8)) { 609 return "v8" END; 610 } else if (arm_feature(env, ARM_FEATURE_V7)) { 611 if (arm_feature(env, ARM_FEATURE_M)) { 612 return "v7m" END; 613 } else { 614 return "v7" END; 615 } 616 } else if (arm_feature(env, ARM_FEATURE_V6)) { 617 return "v6" END; 618 } else if (arm_feature(env, ARM_FEATURE_V5)) { 619 return "v5" END; 620 } else { 621 return "v4" END; 622 } 623 624 #undef END 625 } 626 627 #else 628 /* 64 bit ARM definitions */ 629 630 #define ELF_ARCH EM_AARCH64 631 #define ELF_CLASS ELFCLASS64 632 #if TARGET_BIG_ENDIAN 633 # define ELF_PLATFORM "aarch64_be" 634 #else 635 # define ELF_PLATFORM "aarch64" 636 #endif 637 638 static inline void init_thread(struct target_pt_regs *regs, 639 struct image_info *infop) 640 { 641 abi_long stack = infop->start_stack; 642 memset(regs, 0, sizeof(*regs)); 643 644 regs->pc = infop->entry & ~0x3ULL; 645 regs->sp = stack; 646 } 647 648 #define ELF_NREG 34 649 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 650 651 static void elf_core_copy_regs(target_elf_gregset_t *regs, 652 const CPUARMState *env) 653 { 654 int i; 655 656 for (i = 0; i < 32; i++) { 657 (*regs)[i] = tswapreg(env->xregs[i]); 658 } 659 (*regs)[32] = tswapreg(env->pc); 660 (*regs)[33] = tswapreg(pstate_read((CPUARMState *)env)); 661 } 662 663 #define USE_ELF_CORE_DUMP 664 #define ELF_EXEC_PAGESIZE 4096 665 666 enum { 667 ARM_HWCAP_A64_FP = 1 << 0, 668 ARM_HWCAP_A64_ASIMD = 1 << 1, 669 ARM_HWCAP_A64_EVTSTRM = 1 << 2, 670 ARM_HWCAP_A64_AES = 1 << 3, 671 ARM_HWCAP_A64_PMULL = 1 << 4, 672 ARM_HWCAP_A64_SHA1 = 1 << 5, 673 ARM_HWCAP_A64_SHA2 = 1 << 6, 674 ARM_HWCAP_A64_CRC32 = 1 << 7, 675 ARM_HWCAP_A64_ATOMICS = 1 << 8, 676 ARM_HWCAP_A64_FPHP = 1 << 9, 677 ARM_HWCAP_A64_ASIMDHP = 1 << 10, 678 ARM_HWCAP_A64_CPUID = 1 << 11, 679 ARM_HWCAP_A64_ASIMDRDM = 1 << 12, 680 ARM_HWCAP_A64_JSCVT = 1 << 13, 681 ARM_HWCAP_A64_FCMA = 1 << 14, 682 ARM_HWCAP_A64_LRCPC = 1 << 15, 683 ARM_HWCAP_A64_DCPOP = 1 << 16, 684 ARM_HWCAP_A64_SHA3 = 1 << 17, 685 ARM_HWCAP_A64_SM3 = 1 << 18, 686 ARM_HWCAP_A64_SM4 = 1 << 19, 687 ARM_HWCAP_A64_ASIMDDP = 1 << 20, 688 ARM_HWCAP_A64_SHA512 = 1 << 21, 689 ARM_HWCAP_A64_SVE = 1 << 22, 690 ARM_HWCAP_A64_ASIMDFHM = 1 << 23, 691 ARM_HWCAP_A64_DIT = 1 << 24, 692 ARM_HWCAP_A64_USCAT = 1 << 25, 693 ARM_HWCAP_A64_ILRCPC = 1 << 26, 694 ARM_HWCAP_A64_FLAGM = 1 << 27, 695 ARM_HWCAP_A64_SSBS = 1 << 28, 696 ARM_HWCAP_A64_SB = 1 << 29, 697 ARM_HWCAP_A64_PACA = 1 << 30, 698 ARM_HWCAP_A64_PACG = 1UL << 31, 699 700 ARM_HWCAP2_A64_DCPODP = 1 << 0, 701 ARM_HWCAP2_A64_SVE2 = 1 << 1, 702 ARM_HWCAP2_A64_SVEAES = 1 << 2, 703 ARM_HWCAP2_A64_SVEPMULL = 1 << 3, 704 ARM_HWCAP2_A64_SVEBITPERM = 1 << 4, 705 ARM_HWCAP2_A64_SVESHA3 = 1 << 5, 706 ARM_HWCAP2_A64_SVESM4 = 1 << 6, 707 ARM_HWCAP2_A64_FLAGM2 = 1 << 7, 708 ARM_HWCAP2_A64_FRINT = 1 << 8, 709 ARM_HWCAP2_A64_SVEI8MM = 1 << 9, 710 ARM_HWCAP2_A64_SVEF32MM = 1 << 10, 711 ARM_HWCAP2_A64_SVEF64MM = 1 << 11, 712 ARM_HWCAP2_A64_SVEBF16 = 1 << 12, 713 ARM_HWCAP2_A64_I8MM = 1 << 13, 714 ARM_HWCAP2_A64_BF16 = 1 << 14, 715 ARM_HWCAP2_A64_DGH = 1 << 15, 716 ARM_HWCAP2_A64_RNG = 1 << 16, 717 ARM_HWCAP2_A64_BTI = 1 << 17, 718 ARM_HWCAP2_A64_MTE = 1 << 18, 719 ARM_HWCAP2_A64_ECV = 1 << 19, 720 ARM_HWCAP2_A64_AFP = 1 << 20, 721 ARM_HWCAP2_A64_RPRES = 1 << 21, 722 ARM_HWCAP2_A64_MTE3 = 1 << 22, 723 ARM_HWCAP2_A64_SME = 1 << 23, 724 ARM_HWCAP2_A64_SME_I16I64 = 1 << 24, 725 ARM_HWCAP2_A64_SME_F64F64 = 1 << 25, 726 ARM_HWCAP2_A64_SME_I8I32 = 1 << 26, 727 ARM_HWCAP2_A64_SME_F16F32 = 1 << 27, 728 ARM_HWCAP2_A64_SME_B16F32 = 1 << 28, 729 ARM_HWCAP2_A64_SME_F32F32 = 1 << 29, 730 ARM_HWCAP2_A64_SME_FA64 = 1 << 30, 731 ARM_HWCAP2_A64_WFXT = 1ULL << 31, 732 ARM_HWCAP2_A64_EBF16 = 1ULL << 32, 733 ARM_HWCAP2_A64_SVE_EBF16 = 1ULL << 33, 734 ARM_HWCAP2_A64_CSSC = 1ULL << 34, 735 ARM_HWCAP2_A64_RPRFM = 1ULL << 35, 736 ARM_HWCAP2_A64_SVE2P1 = 1ULL << 36, 737 ARM_HWCAP2_A64_SME2 = 1ULL << 37, 738 ARM_HWCAP2_A64_SME2P1 = 1ULL << 38, 739 ARM_HWCAP2_A64_SME_I16I32 = 1ULL << 39, 740 ARM_HWCAP2_A64_SME_BI32I32 = 1ULL << 40, 741 ARM_HWCAP2_A64_SME_B16B16 = 1ULL << 41, 742 ARM_HWCAP2_A64_SME_F16F16 = 1ULL << 42, 743 ARM_HWCAP2_A64_MOPS = 1ULL << 43, 744 ARM_HWCAP2_A64_HBC = 1ULL << 44, 745 }; 746 747 #define ELF_HWCAP get_elf_hwcap() 748 #define ELF_HWCAP2 get_elf_hwcap2() 749 750 #define GET_FEATURE_ID(feat, hwcap) \ 751 do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0) 752 753 uint32_t get_elf_hwcap(void) 754 { 755 ARMCPU *cpu = ARM_CPU(thread_cpu); 756 uint32_t hwcaps = 0; 757 758 hwcaps |= ARM_HWCAP_A64_FP; 759 hwcaps |= ARM_HWCAP_A64_ASIMD; 760 hwcaps |= ARM_HWCAP_A64_CPUID; 761 762 /* probe for the extra features */ 763 764 GET_FEATURE_ID(aa64_aes, ARM_HWCAP_A64_AES); 765 GET_FEATURE_ID(aa64_pmull, ARM_HWCAP_A64_PMULL); 766 GET_FEATURE_ID(aa64_sha1, ARM_HWCAP_A64_SHA1); 767 GET_FEATURE_ID(aa64_sha256, ARM_HWCAP_A64_SHA2); 768 GET_FEATURE_ID(aa64_sha512, ARM_HWCAP_A64_SHA512); 769 GET_FEATURE_ID(aa64_crc32, ARM_HWCAP_A64_CRC32); 770 GET_FEATURE_ID(aa64_sha3, ARM_HWCAP_A64_SHA3); 771 GET_FEATURE_ID(aa64_sm3, ARM_HWCAP_A64_SM3); 772 GET_FEATURE_ID(aa64_sm4, ARM_HWCAP_A64_SM4); 773 GET_FEATURE_ID(aa64_fp16, ARM_HWCAP_A64_FPHP | ARM_HWCAP_A64_ASIMDHP); 774 GET_FEATURE_ID(aa64_atomics, ARM_HWCAP_A64_ATOMICS); 775 GET_FEATURE_ID(aa64_rdm, ARM_HWCAP_A64_ASIMDRDM); 776 GET_FEATURE_ID(aa64_dp, ARM_HWCAP_A64_ASIMDDP); 777 GET_FEATURE_ID(aa64_fcma, ARM_HWCAP_A64_FCMA); 778 GET_FEATURE_ID(aa64_sve, ARM_HWCAP_A64_SVE); 779 GET_FEATURE_ID(aa64_pauth, ARM_HWCAP_A64_PACA | ARM_HWCAP_A64_PACG); 780 GET_FEATURE_ID(aa64_fhm, ARM_HWCAP_A64_ASIMDFHM); 781 GET_FEATURE_ID(aa64_jscvt, ARM_HWCAP_A64_JSCVT); 782 GET_FEATURE_ID(aa64_sb, ARM_HWCAP_A64_SB); 783 GET_FEATURE_ID(aa64_condm_4, ARM_HWCAP_A64_FLAGM); 784 GET_FEATURE_ID(aa64_dcpop, ARM_HWCAP_A64_DCPOP); 785 GET_FEATURE_ID(aa64_rcpc_8_3, ARM_HWCAP_A64_LRCPC); 786 GET_FEATURE_ID(aa64_rcpc_8_4, ARM_HWCAP_A64_ILRCPC); 787 788 return hwcaps; 789 } 790 791 uint32_t get_elf_hwcap2(void) 792 { 793 ARMCPU *cpu = ARM_CPU(thread_cpu); 794 uint32_t hwcaps = 0; 795 796 GET_FEATURE_ID(aa64_dcpodp, ARM_HWCAP2_A64_DCPODP); 797 GET_FEATURE_ID(aa64_sve2, ARM_HWCAP2_A64_SVE2); 798 GET_FEATURE_ID(aa64_sve2_aes, ARM_HWCAP2_A64_SVEAES); 799 GET_FEATURE_ID(aa64_sve2_pmull128, ARM_HWCAP2_A64_SVEPMULL); 800 GET_FEATURE_ID(aa64_sve2_bitperm, ARM_HWCAP2_A64_SVEBITPERM); 801 GET_FEATURE_ID(aa64_sve2_sha3, ARM_HWCAP2_A64_SVESHA3); 802 GET_FEATURE_ID(aa64_sve2_sm4, ARM_HWCAP2_A64_SVESM4); 803 GET_FEATURE_ID(aa64_condm_5, ARM_HWCAP2_A64_FLAGM2); 804 GET_FEATURE_ID(aa64_frint, ARM_HWCAP2_A64_FRINT); 805 GET_FEATURE_ID(aa64_sve_i8mm, ARM_HWCAP2_A64_SVEI8MM); 806 GET_FEATURE_ID(aa64_sve_f32mm, ARM_HWCAP2_A64_SVEF32MM); 807 GET_FEATURE_ID(aa64_sve_f64mm, ARM_HWCAP2_A64_SVEF64MM); 808 GET_FEATURE_ID(aa64_sve_bf16, ARM_HWCAP2_A64_SVEBF16); 809 GET_FEATURE_ID(aa64_i8mm, ARM_HWCAP2_A64_I8MM); 810 GET_FEATURE_ID(aa64_bf16, ARM_HWCAP2_A64_BF16); 811 GET_FEATURE_ID(aa64_rndr, ARM_HWCAP2_A64_RNG); 812 GET_FEATURE_ID(aa64_bti, ARM_HWCAP2_A64_BTI); 813 GET_FEATURE_ID(aa64_mte, ARM_HWCAP2_A64_MTE); 814 GET_FEATURE_ID(aa64_sme, (ARM_HWCAP2_A64_SME | 815 ARM_HWCAP2_A64_SME_F32F32 | 816 ARM_HWCAP2_A64_SME_B16F32 | 817 ARM_HWCAP2_A64_SME_F16F32 | 818 ARM_HWCAP2_A64_SME_I8I32)); 819 GET_FEATURE_ID(aa64_sme_f64f64, ARM_HWCAP2_A64_SME_F64F64); 820 GET_FEATURE_ID(aa64_sme_i16i64, ARM_HWCAP2_A64_SME_I16I64); 821 GET_FEATURE_ID(aa64_sme_fa64, ARM_HWCAP2_A64_SME_FA64); 822 GET_FEATURE_ID(aa64_hbc, ARM_HWCAP2_A64_HBC); 823 GET_FEATURE_ID(aa64_mops, ARM_HWCAP2_A64_MOPS); 824 825 return hwcaps; 826 } 827 828 const char *elf_hwcap_str(uint32_t bit) 829 { 830 static const char *hwcap_str[] = { 831 [__builtin_ctz(ARM_HWCAP_A64_FP )] = "fp", 832 [__builtin_ctz(ARM_HWCAP_A64_ASIMD )] = "asimd", 833 [__builtin_ctz(ARM_HWCAP_A64_EVTSTRM )] = "evtstrm", 834 [__builtin_ctz(ARM_HWCAP_A64_AES )] = "aes", 835 [__builtin_ctz(ARM_HWCAP_A64_PMULL )] = "pmull", 836 [__builtin_ctz(ARM_HWCAP_A64_SHA1 )] = "sha1", 837 [__builtin_ctz(ARM_HWCAP_A64_SHA2 )] = "sha2", 838 [__builtin_ctz(ARM_HWCAP_A64_CRC32 )] = "crc32", 839 [__builtin_ctz(ARM_HWCAP_A64_ATOMICS )] = "atomics", 840 [__builtin_ctz(ARM_HWCAP_A64_FPHP )] = "fphp", 841 [__builtin_ctz(ARM_HWCAP_A64_ASIMDHP )] = "asimdhp", 842 [__builtin_ctz(ARM_HWCAP_A64_CPUID )] = "cpuid", 843 [__builtin_ctz(ARM_HWCAP_A64_ASIMDRDM)] = "asimdrdm", 844 [__builtin_ctz(ARM_HWCAP_A64_JSCVT )] = "jscvt", 845 [__builtin_ctz(ARM_HWCAP_A64_FCMA )] = "fcma", 846 [__builtin_ctz(ARM_HWCAP_A64_LRCPC )] = "lrcpc", 847 [__builtin_ctz(ARM_HWCAP_A64_DCPOP )] = "dcpop", 848 [__builtin_ctz(ARM_HWCAP_A64_SHA3 )] = "sha3", 849 [__builtin_ctz(ARM_HWCAP_A64_SM3 )] = "sm3", 850 [__builtin_ctz(ARM_HWCAP_A64_SM4 )] = "sm4", 851 [__builtin_ctz(ARM_HWCAP_A64_ASIMDDP )] = "asimddp", 852 [__builtin_ctz(ARM_HWCAP_A64_SHA512 )] = "sha512", 853 [__builtin_ctz(ARM_HWCAP_A64_SVE )] = "sve", 854 [__builtin_ctz(ARM_HWCAP_A64_ASIMDFHM)] = "asimdfhm", 855 [__builtin_ctz(ARM_HWCAP_A64_DIT )] = "dit", 856 [__builtin_ctz(ARM_HWCAP_A64_USCAT )] = "uscat", 857 [__builtin_ctz(ARM_HWCAP_A64_ILRCPC )] = "ilrcpc", 858 [__builtin_ctz(ARM_HWCAP_A64_FLAGM )] = "flagm", 859 [__builtin_ctz(ARM_HWCAP_A64_SSBS )] = "ssbs", 860 [__builtin_ctz(ARM_HWCAP_A64_SB )] = "sb", 861 [__builtin_ctz(ARM_HWCAP_A64_PACA )] = "paca", 862 [__builtin_ctz(ARM_HWCAP_A64_PACG )] = "pacg", 863 }; 864 865 return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL; 866 } 867 868 const char *elf_hwcap2_str(uint32_t bit) 869 { 870 static const char *hwcap_str[] = { 871 [__builtin_ctz(ARM_HWCAP2_A64_DCPODP )] = "dcpodp", 872 [__builtin_ctz(ARM_HWCAP2_A64_SVE2 )] = "sve2", 873 [__builtin_ctz(ARM_HWCAP2_A64_SVEAES )] = "sveaes", 874 [__builtin_ctz(ARM_HWCAP2_A64_SVEPMULL )] = "svepmull", 875 [__builtin_ctz(ARM_HWCAP2_A64_SVEBITPERM )] = "svebitperm", 876 [__builtin_ctz(ARM_HWCAP2_A64_SVESHA3 )] = "svesha3", 877 [__builtin_ctz(ARM_HWCAP2_A64_SVESM4 )] = "svesm4", 878 [__builtin_ctz(ARM_HWCAP2_A64_FLAGM2 )] = "flagm2", 879 [__builtin_ctz(ARM_HWCAP2_A64_FRINT )] = "frint", 880 [__builtin_ctz(ARM_HWCAP2_A64_SVEI8MM )] = "svei8mm", 881 [__builtin_ctz(ARM_HWCAP2_A64_SVEF32MM )] = "svef32mm", 882 [__builtin_ctz(ARM_HWCAP2_A64_SVEF64MM )] = "svef64mm", 883 [__builtin_ctz(ARM_HWCAP2_A64_SVEBF16 )] = "svebf16", 884 [__builtin_ctz(ARM_HWCAP2_A64_I8MM )] = "i8mm", 885 [__builtin_ctz(ARM_HWCAP2_A64_BF16 )] = "bf16", 886 [__builtin_ctz(ARM_HWCAP2_A64_DGH )] = "dgh", 887 [__builtin_ctz(ARM_HWCAP2_A64_RNG )] = "rng", 888 [__builtin_ctz(ARM_HWCAP2_A64_BTI )] = "bti", 889 [__builtin_ctz(ARM_HWCAP2_A64_MTE )] = "mte", 890 [__builtin_ctz(ARM_HWCAP2_A64_ECV )] = "ecv", 891 [__builtin_ctz(ARM_HWCAP2_A64_AFP )] = "afp", 892 [__builtin_ctz(ARM_HWCAP2_A64_RPRES )] = "rpres", 893 [__builtin_ctz(ARM_HWCAP2_A64_MTE3 )] = "mte3", 894 [__builtin_ctz(ARM_HWCAP2_A64_SME )] = "sme", 895 [__builtin_ctz(ARM_HWCAP2_A64_SME_I16I64 )] = "smei16i64", 896 [__builtin_ctz(ARM_HWCAP2_A64_SME_F64F64 )] = "smef64f64", 897 [__builtin_ctz(ARM_HWCAP2_A64_SME_I8I32 )] = "smei8i32", 898 [__builtin_ctz(ARM_HWCAP2_A64_SME_F16F32 )] = "smef16f32", 899 [__builtin_ctz(ARM_HWCAP2_A64_SME_B16F32 )] = "smeb16f32", 900 [__builtin_ctz(ARM_HWCAP2_A64_SME_F32F32 )] = "smef32f32", 901 [__builtin_ctz(ARM_HWCAP2_A64_SME_FA64 )] = "smefa64", 902 [__builtin_ctz(ARM_HWCAP2_A64_WFXT )] = "wfxt", 903 [__builtin_ctzll(ARM_HWCAP2_A64_EBF16 )] = "ebf16", 904 [__builtin_ctzll(ARM_HWCAP2_A64_SVE_EBF16 )] = "sveebf16", 905 [__builtin_ctzll(ARM_HWCAP2_A64_CSSC )] = "cssc", 906 [__builtin_ctzll(ARM_HWCAP2_A64_RPRFM )] = "rprfm", 907 [__builtin_ctzll(ARM_HWCAP2_A64_SVE2P1 )] = "sve2p1", 908 [__builtin_ctzll(ARM_HWCAP2_A64_SME2 )] = "sme2", 909 [__builtin_ctzll(ARM_HWCAP2_A64_SME2P1 )] = "sme2p1", 910 [__builtin_ctzll(ARM_HWCAP2_A64_SME_I16I32 )] = "smei16i32", 911 [__builtin_ctzll(ARM_HWCAP2_A64_SME_BI32I32)] = "smebi32i32", 912 [__builtin_ctzll(ARM_HWCAP2_A64_SME_B16B16 )] = "smeb16b16", 913 [__builtin_ctzll(ARM_HWCAP2_A64_SME_F16F16 )] = "smef16f16", 914 [__builtin_ctzll(ARM_HWCAP2_A64_MOPS )] = "mops", 915 [__builtin_ctzll(ARM_HWCAP2_A64_HBC )] = "hbc", 916 }; 917 918 return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL; 919 } 920 921 #undef GET_FEATURE_ID 922 923 #endif /* not TARGET_AARCH64 */ 924 #endif /* TARGET_ARM */ 925 926 #ifdef TARGET_SPARC 927 #ifdef TARGET_SPARC64 928 929 #define ELF_HWCAP (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | HWCAP_SPARC_SWAP \ 930 | HWCAP_SPARC_MULDIV | HWCAP_SPARC_V9) 931 #ifndef TARGET_ABI32 932 #define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS ) 933 #else 934 #define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC ) 935 #endif 936 937 #define ELF_CLASS ELFCLASS64 938 #define ELF_ARCH EM_SPARCV9 939 #else 940 #define ELF_HWCAP (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | HWCAP_SPARC_SWAP \ 941 | HWCAP_SPARC_MULDIV) 942 #define ELF_CLASS ELFCLASS32 943 #define ELF_ARCH EM_SPARC 944 #endif /* TARGET_SPARC64 */ 945 946 static inline void init_thread(struct target_pt_regs *regs, 947 struct image_info *infop) 948 { 949 /* Note that target_cpu_copy_regs does not read psr/tstate. */ 950 regs->pc = infop->entry; 951 regs->npc = regs->pc + 4; 952 regs->y = 0; 953 regs->u_regs[14] = (infop->start_stack - 16 * sizeof(abi_ulong) 954 - TARGET_STACK_BIAS); 955 } 956 #endif /* TARGET_SPARC */ 957 958 #ifdef TARGET_PPC 959 960 #define ELF_MACHINE PPC_ELF_MACHINE 961 962 #if defined(TARGET_PPC64) 963 964 #define elf_check_arch(x) ( (x) == EM_PPC64 ) 965 966 #define ELF_CLASS ELFCLASS64 967 968 #else 969 970 #define ELF_CLASS ELFCLASS32 971 #define EXSTACK_DEFAULT true 972 973 #endif 974 975 #define ELF_ARCH EM_PPC 976 977 /* Feature masks for the Aux Vector Hardware Capabilities (AT_HWCAP). 978 See arch/powerpc/include/asm/cputable.h. */ 979 enum { 980 QEMU_PPC_FEATURE_32 = 0x80000000, 981 QEMU_PPC_FEATURE_64 = 0x40000000, 982 QEMU_PPC_FEATURE_601_INSTR = 0x20000000, 983 QEMU_PPC_FEATURE_HAS_ALTIVEC = 0x10000000, 984 QEMU_PPC_FEATURE_HAS_FPU = 0x08000000, 985 QEMU_PPC_FEATURE_HAS_MMU = 0x04000000, 986 QEMU_PPC_FEATURE_HAS_4xxMAC = 0x02000000, 987 QEMU_PPC_FEATURE_UNIFIED_CACHE = 0x01000000, 988 QEMU_PPC_FEATURE_HAS_SPE = 0x00800000, 989 QEMU_PPC_FEATURE_HAS_EFP_SINGLE = 0x00400000, 990 QEMU_PPC_FEATURE_HAS_EFP_DOUBLE = 0x00200000, 991 QEMU_PPC_FEATURE_NO_TB = 0x00100000, 992 QEMU_PPC_FEATURE_POWER4 = 0x00080000, 993 QEMU_PPC_FEATURE_POWER5 = 0x00040000, 994 QEMU_PPC_FEATURE_POWER5_PLUS = 0x00020000, 995 QEMU_PPC_FEATURE_CELL = 0x00010000, 996 QEMU_PPC_FEATURE_BOOKE = 0x00008000, 997 QEMU_PPC_FEATURE_SMT = 0x00004000, 998 QEMU_PPC_FEATURE_ICACHE_SNOOP = 0x00002000, 999 QEMU_PPC_FEATURE_ARCH_2_05 = 0x00001000, 1000 QEMU_PPC_FEATURE_PA6T = 0x00000800, 1001 QEMU_PPC_FEATURE_HAS_DFP = 0x00000400, 1002 QEMU_PPC_FEATURE_POWER6_EXT = 0x00000200, 1003 QEMU_PPC_FEATURE_ARCH_2_06 = 0x00000100, 1004 QEMU_PPC_FEATURE_HAS_VSX = 0x00000080, 1005 QEMU_PPC_FEATURE_PSERIES_PERFMON_COMPAT = 0x00000040, 1006 1007 QEMU_PPC_FEATURE_TRUE_LE = 0x00000002, 1008 QEMU_PPC_FEATURE_PPC_LE = 0x00000001, 1009 1010 /* Feature definitions in AT_HWCAP2. */ 1011 QEMU_PPC_FEATURE2_ARCH_2_07 = 0x80000000, /* ISA 2.07 */ 1012 QEMU_PPC_FEATURE2_HAS_HTM = 0x40000000, /* Hardware Transactional Memory */ 1013 QEMU_PPC_FEATURE2_HAS_DSCR = 0x20000000, /* Data Stream Control Register */ 1014 QEMU_PPC_FEATURE2_HAS_EBB = 0x10000000, /* Event Base Branching */ 1015 QEMU_PPC_FEATURE2_HAS_ISEL = 0x08000000, /* Integer Select */ 1016 QEMU_PPC_FEATURE2_HAS_TAR = 0x04000000, /* Target Address Register */ 1017 QEMU_PPC_FEATURE2_VEC_CRYPTO = 0x02000000, 1018 QEMU_PPC_FEATURE2_HTM_NOSC = 0x01000000, 1019 QEMU_PPC_FEATURE2_ARCH_3_00 = 0x00800000, /* ISA 3.00 */ 1020 QEMU_PPC_FEATURE2_HAS_IEEE128 = 0x00400000, /* VSX IEEE Bin Float 128-bit */ 1021 QEMU_PPC_FEATURE2_DARN = 0x00200000, /* darn random number insn */ 1022 QEMU_PPC_FEATURE2_SCV = 0x00100000, /* scv syscall */ 1023 QEMU_PPC_FEATURE2_HTM_NO_SUSPEND = 0x00080000, /* TM w/o suspended state */ 1024 QEMU_PPC_FEATURE2_ARCH_3_1 = 0x00040000, /* ISA 3.1 */ 1025 QEMU_PPC_FEATURE2_MMA = 0x00020000, /* Matrix-Multiply Assist */ 1026 }; 1027 1028 #define ELF_HWCAP get_elf_hwcap() 1029 1030 static uint32_t get_elf_hwcap(void) 1031 { 1032 PowerPCCPU *cpu = POWERPC_CPU(thread_cpu); 1033 uint32_t features = 0; 1034 1035 /* We don't have to be terribly complete here; the high points are 1036 Altivec/FP/SPE support. Anything else is just a bonus. */ 1037 #define GET_FEATURE(flag, feature) \ 1038 do { if (cpu->env.insns_flags & flag) { features |= feature; } } while (0) 1039 #define GET_FEATURE2(flags, feature) \ 1040 do { \ 1041 if ((cpu->env.insns_flags2 & flags) == flags) { \ 1042 features |= feature; \ 1043 } \ 1044 } while (0) 1045 GET_FEATURE(PPC_64B, QEMU_PPC_FEATURE_64); 1046 GET_FEATURE(PPC_FLOAT, QEMU_PPC_FEATURE_HAS_FPU); 1047 GET_FEATURE(PPC_ALTIVEC, QEMU_PPC_FEATURE_HAS_ALTIVEC); 1048 GET_FEATURE(PPC_SPE, QEMU_PPC_FEATURE_HAS_SPE); 1049 GET_FEATURE(PPC_SPE_SINGLE, QEMU_PPC_FEATURE_HAS_EFP_SINGLE); 1050 GET_FEATURE(PPC_SPE_DOUBLE, QEMU_PPC_FEATURE_HAS_EFP_DOUBLE); 1051 GET_FEATURE(PPC_BOOKE, QEMU_PPC_FEATURE_BOOKE); 1052 GET_FEATURE(PPC_405_MAC, QEMU_PPC_FEATURE_HAS_4xxMAC); 1053 GET_FEATURE2(PPC2_DFP, QEMU_PPC_FEATURE_HAS_DFP); 1054 GET_FEATURE2(PPC2_VSX, QEMU_PPC_FEATURE_HAS_VSX); 1055 GET_FEATURE2((PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 | PPC2_ATOMIC_ISA206 | 1056 PPC2_FP_CVT_ISA206 | PPC2_FP_TST_ISA206), 1057 QEMU_PPC_FEATURE_ARCH_2_06); 1058 #undef GET_FEATURE 1059 #undef GET_FEATURE2 1060 1061 return features; 1062 } 1063 1064 #define ELF_HWCAP2 get_elf_hwcap2() 1065 1066 static uint32_t get_elf_hwcap2(void) 1067 { 1068 PowerPCCPU *cpu = POWERPC_CPU(thread_cpu); 1069 uint32_t features = 0; 1070 1071 #define GET_FEATURE(flag, feature) \ 1072 do { if (cpu->env.insns_flags & flag) { features |= feature; } } while (0) 1073 #define GET_FEATURE2(flag, feature) \ 1074 do { if (cpu->env.insns_flags2 & flag) { features |= feature; } } while (0) 1075 1076 GET_FEATURE(PPC_ISEL, QEMU_PPC_FEATURE2_HAS_ISEL); 1077 GET_FEATURE2(PPC2_BCTAR_ISA207, QEMU_PPC_FEATURE2_HAS_TAR); 1078 GET_FEATURE2((PPC2_BCTAR_ISA207 | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 | 1079 PPC2_ISA207S), QEMU_PPC_FEATURE2_ARCH_2_07 | 1080 QEMU_PPC_FEATURE2_VEC_CRYPTO); 1081 GET_FEATURE2(PPC2_ISA300, QEMU_PPC_FEATURE2_ARCH_3_00 | 1082 QEMU_PPC_FEATURE2_DARN | QEMU_PPC_FEATURE2_HAS_IEEE128); 1083 GET_FEATURE2(PPC2_ISA310, QEMU_PPC_FEATURE2_ARCH_3_1 | 1084 QEMU_PPC_FEATURE2_MMA); 1085 1086 #undef GET_FEATURE 1087 #undef GET_FEATURE2 1088 1089 return features; 1090 } 1091 1092 /* 1093 * The requirements here are: 1094 * - keep the final alignment of sp (sp & 0xf) 1095 * - make sure the 32-bit value at the first 16 byte aligned position of 1096 * AUXV is greater than 16 for glibc compatibility. 1097 * AT_IGNOREPPC is used for that. 1098 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC, 1099 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined. 1100 */ 1101 #define DLINFO_ARCH_ITEMS 5 1102 #define ARCH_DLINFO \ 1103 do { \ 1104 PowerPCCPU *cpu = POWERPC_CPU(thread_cpu); \ 1105 /* \ 1106 * Handle glibc compatibility: these magic entries must \ 1107 * be at the lowest addresses in the final auxv. \ 1108 */ \ 1109 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \ 1110 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \ 1111 NEW_AUX_ENT(AT_DCACHEBSIZE, cpu->env.dcache_line_size); \ 1112 NEW_AUX_ENT(AT_ICACHEBSIZE, cpu->env.icache_line_size); \ 1113 NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \ 1114 } while (0) 1115 1116 static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop) 1117 { 1118 _regs->gpr[1] = infop->start_stack; 1119 #if defined(TARGET_PPC64) 1120 if (get_ppc64_abi(infop) < 2) { 1121 uint64_t val; 1122 get_user_u64(val, infop->entry + 8); 1123 _regs->gpr[2] = val + infop->load_bias; 1124 get_user_u64(val, infop->entry); 1125 infop->entry = val + infop->load_bias; 1126 } else { 1127 _regs->gpr[12] = infop->entry; /* r12 set to global entry address */ 1128 } 1129 #endif 1130 _regs->nip = infop->entry; 1131 } 1132 1133 /* See linux kernel: arch/powerpc/include/asm/elf.h. */ 1134 #define ELF_NREG 48 1135 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1136 1137 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUPPCState *env) 1138 { 1139 int i; 1140 target_ulong ccr = 0; 1141 1142 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { 1143 (*regs)[i] = tswapreg(env->gpr[i]); 1144 } 1145 1146 (*regs)[32] = tswapreg(env->nip); 1147 (*regs)[33] = tswapreg(env->msr); 1148 (*regs)[35] = tswapreg(env->ctr); 1149 (*regs)[36] = tswapreg(env->lr); 1150 (*regs)[37] = tswapreg(cpu_read_xer(env)); 1151 1152 ccr = ppc_get_cr(env); 1153 (*regs)[38] = tswapreg(ccr); 1154 } 1155 1156 #define USE_ELF_CORE_DUMP 1157 #define ELF_EXEC_PAGESIZE 4096 1158 1159 #endif 1160 1161 #ifdef TARGET_LOONGARCH64 1162 1163 #define ELF_CLASS ELFCLASS64 1164 #define ELF_ARCH EM_LOONGARCH 1165 #define EXSTACK_DEFAULT true 1166 1167 #define elf_check_arch(x) ((x) == EM_LOONGARCH) 1168 1169 static inline void init_thread(struct target_pt_regs *regs, 1170 struct image_info *infop) 1171 { 1172 /*Set crmd PG,DA = 1,0 */ 1173 regs->csr.crmd = 2 << 3; 1174 regs->csr.era = infop->entry; 1175 regs->regs[3] = infop->start_stack; 1176 } 1177 1178 /* See linux kernel: arch/loongarch/include/asm/elf.h */ 1179 #define ELF_NREG 45 1180 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1181 1182 enum { 1183 TARGET_EF_R0 = 0, 1184 TARGET_EF_CSR_ERA = TARGET_EF_R0 + 33, 1185 TARGET_EF_CSR_BADV = TARGET_EF_R0 + 34, 1186 }; 1187 1188 static void elf_core_copy_regs(target_elf_gregset_t *regs, 1189 const CPULoongArchState *env) 1190 { 1191 int i; 1192 1193 (*regs)[TARGET_EF_R0] = 0; 1194 1195 for (i = 1; i < ARRAY_SIZE(env->gpr); i++) { 1196 (*regs)[TARGET_EF_R0 + i] = tswapreg(env->gpr[i]); 1197 } 1198 1199 (*regs)[TARGET_EF_CSR_ERA] = tswapreg(env->pc); 1200 (*regs)[TARGET_EF_CSR_BADV] = tswapreg(env->CSR_BADV); 1201 } 1202 1203 #define USE_ELF_CORE_DUMP 1204 #define ELF_EXEC_PAGESIZE 4096 1205 1206 #define ELF_HWCAP get_elf_hwcap() 1207 1208 /* See arch/loongarch/include/uapi/asm/hwcap.h */ 1209 enum { 1210 HWCAP_LOONGARCH_CPUCFG = (1 << 0), 1211 HWCAP_LOONGARCH_LAM = (1 << 1), 1212 HWCAP_LOONGARCH_UAL = (1 << 2), 1213 HWCAP_LOONGARCH_FPU = (1 << 3), 1214 HWCAP_LOONGARCH_LSX = (1 << 4), 1215 HWCAP_LOONGARCH_LASX = (1 << 5), 1216 HWCAP_LOONGARCH_CRC32 = (1 << 6), 1217 HWCAP_LOONGARCH_COMPLEX = (1 << 7), 1218 HWCAP_LOONGARCH_CRYPTO = (1 << 8), 1219 HWCAP_LOONGARCH_LVZ = (1 << 9), 1220 HWCAP_LOONGARCH_LBT_X86 = (1 << 10), 1221 HWCAP_LOONGARCH_LBT_ARM = (1 << 11), 1222 HWCAP_LOONGARCH_LBT_MIPS = (1 << 12), 1223 }; 1224 1225 static uint32_t get_elf_hwcap(void) 1226 { 1227 LoongArchCPU *cpu = LOONGARCH_CPU(thread_cpu); 1228 uint32_t hwcaps = 0; 1229 1230 hwcaps |= HWCAP_LOONGARCH_CRC32; 1231 1232 if (FIELD_EX32(cpu->env.cpucfg[1], CPUCFG1, UAL)) { 1233 hwcaps |= HWCAP_LOONGARCH_UAL; 1234 } 1235 1236 if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, FP)) { 1237 hwcaps |= HWCAP_LOONGARCH_FPU; 1238 } 1239 1240 if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LAM)) { 1241 hwcaps |= HWCAP_LOONGARCH_LAM; 1242 } 1243 1244 if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LSX)) { 1245 hwcaps |= HWCAP_LOONGARCH_LSX; 1246 } 1247 1248 if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LASX)) { 1249 hwcaps |= HWCAP_LOONGARCH_LASX; 1250 } 1251 1252 return hwcaps; 1253 } 1254 1255 #define ELF_PLATFORM "loongarch" 1256 1257 #endif /* TARGET_LOONGARCH64 */ 1258 1259 #ifdef TARGET_MIPS 1260 1261 #ifdef TARGET_MIPS64 1262 #define ELF_CLASS ELFCLASS64 1263 #else 1264 #define ELF_CLASS ELFCLASS32 1265 #endif 1266 #define ELF_ARCH EM_MIPS 1267 #define EXSTACK_DEFAULT true 1268 1269 #ifdef TARGET_ABI_MIPSN32 1270 #define elf_check_abi(x) ((x) & EF_MIPS_ABI2) 1271 #else 1272 #define elf_check_abi(x) (!((x) & EF_MIPS_ABI2)) 1273 #endif 1274 1275 #define ELF_BASE_PLATFORM get_elf_base_platform() 1276 1277 #define MATCH_PLATFORM_INSN(_flags, _base_platform) \ 1278 do { if ((cpu->env.insn_flags & (_flags)) == _flags) \ 1279 { return _base_platform; } } while (0) 1280 1281 static const char *get_elf_base_platform(void) 1282 { 1283 MIPSCPU *cpu = MIPS_CPU(thread_cpu); 1284 1285 /* 64 bit ISAs goes first */ 1286 MATCH_PLATFORM_INSN(CPU_MIPS64R6, "mips64r6"); 1287 MATCH_PLATFORM_INSN(CPU_MIPS64R5, "mips64r5"); 1288 MATCH_PLATFORM_INSN(CPU_MIPS64R2, "mips64r2"); 1289 MATCH_PLATFORM_INSN(CPU_MIPS64R1, "mips64"); 1290 MATCH_PLATFORM_INSN(CPU_MIPS5, "mips5"); 1291 MATCH_PLATFORM_INSN(CPU_MIPS4, "mips4"); 1292 MATCH_PLATFORM_INSN(CPU_MIPS3, "mips3"); 1293 1294 /* 32 bit ISAs */ 1295 MATCH_PLATFORM_INSN(CPU_MIPS32R6, "mips32r6"); 1296 MATCH_PLATFORM_INSN(CPU_MIPS32R5, "mips32r5"); 1297 MATCH_PLATFORM_INSN(CPU_MIPS32R2, "mips32r2"); 1298 MATCH_PLATFORM_INSN(CPU_MIPS32R1, "mips32"); 1299 MATCH_PLATFORM_INSN(CPU_MIPS2, "mips2"); 1300 1301 /* Fallback */ 1302 return "mips"; 1303 } 1304 #undef MATCH_PLATFORM_INSN 1305 1306 static inline void init_thread(struct target_pt_regs *regs, 1307 struct image_info *infop) 1308 { 1309 regs->cp0_status = 2 << CP0St_KSU; 1310 regs->cp0_epc = infop->entry; 1311 regs->regs[29] = infop->start_stack; 1312 } 1313 1314 /* See linux kernel: arch/mips/include/asm/elf.h. */ 1315 #define ELF_NREG 45 1316 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1317 1318 /* See linux kernel: arch/mips/include/asm/reg.h. */ 1319 enum { 1320 #ifdef TARGET_MIPS64 1321 TARGET_EF_R0 = 0, 1322 #else 1323 TARGET_EF_R0 = 6, 1324 #endif 1325 TARGET_EF_R26 = TARGET_EF_R0 + 26, 1326 TARGET_EF_R27 = TARGET_EF_R0 + 27, 1327 TARGET_EF_LO = TARGET_EF_R0 + 32, 1328 TARGET_EF_HI = TARGET_EF_R0 + 33, 1329 TARGET_EF_CP0_EPC = TARGET_EF_R0 + 34, 1330 TARGET_EF_CP0_BADVADDR = TARGET_EF_R0 + 35, 1331 TARGET_EF_CP0_STATUS = TARGET_EF_R0 + 36, 1332 TARGET_EF_CP0_CAUSE = TARGET_EF_R0 + 37 1333 }; 1334 1335 /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs. */ 1336 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUMIPSState *env) 1337 { 1338 int i; 1339 1340 for (i = 0; i < TARGET_EF_R0; i++) { 1341 (*regs)[i] = 0; 1342 } 1343 (*regs)[TARGET_EF_R0] = 0; 1344 1345 for (i = 1; i < ARRAY_SIZE(env->active_tc.gpr); i++) { 1346 (*regs)[TARGET_EF_R0 + i] = tswapreg(env->active_tc.gpr[i]); 1347 } 1348 1349 (*regs)[TARGET_EF_R26] = 0; 1350 (*regs)[TARGET_EF_R27] = 0; 1351 (*regs)[TARGET_EF_LO] = tswapreg(env->active_tc.LO[0]); 1352 (*regs)[TARGET_EF_HI] = tswapreg(env->active_tc.HI[0]); 1353 (*regs)[TARGET_EF_CP0_EPC] = tswapreg(env->active_tc.PC); 1354 (*regs)[TARGET_EF_CP0_BADVADDR] = tswapreg(env->CP0_BadVAddr); 1355 (*regs)[TARGET_EF_CP0_STATUS] = tswapreg(env->CP0_Status); 1356 (*regs)[TARGET_EF_CP0_CAUSE] = tswapreg(env->CP0_Cause); 1357 } 1358 1359 #define USE_ELF_CORE_DUMP 1360 #define ELF_EXEC_PAGESIZE 4096 1361 1362 /* See arch/mips/include/uapi/asm/hwcap.h. */ 1363 enum { 1364 HWCAP_MIPS_R6 = (1 << 0), 1365 HWCAP_MIPS_MSA = (1 << 1), 1366 HWCAP_MIPS_CRC32 = (1 << 2), 1367 HWCAP_MIPS_MIPS16 = (1 << 3), 1368 HWCAP_MIPS_MDMX = (1 << 4), 1369 HWCAP_MIPS_MIPS3D = (1 << 5), 1370 HWCAP_MIPS_SMARTMIPS = (1 << 6), 1371 HWCAP_MIPS_DSP = (1 << 7), 1372 HWCAP_MIPS_DSP2 = (1 << 8), 1373 HWCAP_MIPS_DSP3 = (1 << 9), 1374 HWCAP_MIPS_MIPS16E2 = (1 << 10), 1375 HWCAP_LOONGSON_MMI = (1 << 11), 1376 HWCAP_LOONGSON_EXT = (1 << 12), 1377 HWCAP_LOONGSON_EXT2 = (1 << 13), 1378 HWCAP_LOONGSON_CPUCFG = (1 << 14), 1379 }; 1380 1381 #define ELF_HWCAP get_elf_hwcap() 1382 1383 #define GET_FEATURE_INSN(_flag, _hwcap) \ 1384 do { if (cpu->env.insn_flags & (_flag)) { hwcaps |= _hwcap; } } while (0) 1385 1386 #define GET_FEATURE_REG_SET(_reg, _mask, _hwcap) \ 1387 do { if (cpu->env._reg & (_mask)) { hwcaps |= _hwcap; } } while (0) 1388 1389 #define GET_FEATURE_REG_EQU(_reg, _start, _length, _val, _hwcap) \ 1390 do { \ 1391 if (extract32(cpu->env._reg, (_start), (_length)) == (_val)) { \ 1392 hwcaps |= _hwcap; \ 1393 } \ 1394 } while (0) 1395 1396 static uint32_t get_elf_hwcap(void) 1397 { 1398 MIPSCPU *cpu = MIPS_CPU(thread_cpu); 1399 uint32_t hwcaps = 0; 1400 1401 GET_FEATURE_REG_EQU(CP0_Config0, CP0C0_AR, CP0C0_AR_LENGTH, 1402 2, HWCAP_MIPS_R6); 1403 GET_FEATURE_REG_SET(CP0_Config3, 1 << CP0C3_MSAP, HWCAP_MIPS_MSA); 1404 GET_FEATURE_INSN(ASE_LMMI, HWCAP_LOONGSON_MMI); 1405 GET_FEATURE_INSN(ASE_LEXT, HWCAP_LOONGSON_EXT); 1406 1407 return hwcaps; 1408 } 1409 1410 #undef GET_FEATURE_REG_EQU 1411 #undef GET_FEATURE_REG_SET 1412 #undef GET_FEATURE_INSN 1413 1414 #endif /* TARGET_MIPS */ 1415 1416 #ifdef TARGET_MICROBLAZE 1417 1418 #define elf_check_arch(x) ( (x) == EM_MICROBLAZE || (x) == EM_MICROBLAZE_OLD) 1419 1420 #define ELF_CLASS ELFCLASS32 1421 #define ELF_ARCH EM_MICROBLAZE 1422 1423 static inline void init_thread(struct target_pt_regs *regs, 1424 struct image_info *infop) 1425 { 1426 regs->pc = infop->entry; 1427 regs->r1 = infop->start_stack; 1428 1429 } 1430 1431 #define ELF_EXEC_PAGESIZE 4096 1432 1433 #define USE_ELF_CORE_DUMP 1434 #define ELF_NREG 38 1435 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1436 1437 /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs. */ 1438 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUMBState *env) 1439 { 1440 int i, pos = 0; 1441 1442 for (i = 0; i < 32; i++) { 1443 (*regs)[pos++] = tswapreg(env->regs[i]); 1444 } 1445 1446 (*regs)[pos++] = tswapreg(env->pc); 1447 (*regs)[pos++] = tswapreg(mb_cpu_read_msr(env)); 1448 (*regs)[pos++] = 0; 1449 (*regs)[pos++] = tswapreg(env->ear); 1450 (*regs)[pos++] = 0; 1451 (*regs)[pos++] = tswapreg(env->esr); 1452 } 1453 1454 #endif /* TARGET_MICROBLAZE */ 1455 1456 #ifdef TARGET_NIOS2 1457 1458 #define elf_check_arch(x) ((x) == EM_ALTERA_NIOS2) 1459 1460 #define ELF_CLASS ELFCLASS32 1461 #define ELF_ARCH EM_ALTERA_NIOS2 1462 1463 static void init_thread(struct target_pt_regs *regs, struct image_info *infop) 1464 { 1465 regs->ea = infop->entry; 1466 regs->sp = infop->start_stack; 1467 } 1468 1469 #define LO_COMMPAGE TARGET_PAGE_SIZE 1470 1471 static bool init_guest_commpage(void) 1472 { 1473 static const uint8_t kuser_page[4 + 2 * 64] = { 1474 /* __kuser_helper_version */ 1475 [0x00] = 0x02, 0x00, 0x00, 0x00, 1476 1477 /* __kuser_cmpxchg */ 1478 [0x04] = 0x3a, 0x6c, 0x3b, 0x00, /* trap 16 */ 1479 0x3a, 0x28, 0x00, 0xf8, /* ret */ 1480 1481 /* __kuser_sigtramp */ 1482 [0x44] = 0xc4, 0x22, 0x80, 0x00, /* movi r2, __NR_rt_sigreturn */ 1483 0x3a, 0x68, 0x3b, 0x00, /* trap 0 */ 1484 }; 1485 1486 void *want = g2h_untagged(LO_COMMPAGE & -qemu_host_page_size); 1487 void *addr = mmap(want, qemu_host_page_size, PROT_READ | PROT_WRITE, 1488 MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0); 1489 1490 if (addr == MAP_FAILED) { 1491 perror("Allocating guest commpage"); 1492 exit(EXIT_FAILURE); 1493 } 1494 if (addr != want) { 1495 return false; 1496 } 1497 1498 memcpy(addr, kuser_page, sizeof(kuser_page)); 1499 1500 if (mprotect(addr, qemu_host_page_size, PROT_READ)) { 1501 perror("Protecting guest commpage"); 1502 exit(EXIT_FAILURE); 1503 } 1504 1505 page_set_flags(LO_COMMPAGE, LO_COMMPAGE | ~TARGET_PAGE_MASK, 1506 PAGE_READ | PAGE_EXEC | PAGE_VALID); 1507 return true; 1508 } 1509 1510 #define ELF_EXEC_PAGESIZE 4096 1511 1512 #define USE_ELF_CORE_DUMP 1513 #define ELF_NREG 49 1514 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1515 1516 /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs. */ 1517 static void elf_core_copy_regs(target_elf_gregset_t *regs, 1518 const CPUNios2State *env) 1519 { 1520 int i; 1521 1522 (*regs)[0] = -1; 1523 for (i = 1; i < 8; i++) /* r0-r7 */ 1524 (*regs)[i] = tswapreg(env->regs[i + 7]); 1525 1526 for (i = 8; i < 16; i++) /* r8-r15 */ 1527 (*regs)[i] = tswapreg(env->regs[i - 8]); 1528 1529 for (i = 16; i < 24; i++) /* r16-r23 */ 1530 (*regs)[i] = tswapreg(env->regs[i + 7]); 1531 (*regs)[24] = -1; /* R_ET */ 1532 (*regs)[25] = -1; /* R_BT */ 1533 (*regs)[26] = tswapreg(env->regs[R_GP]); 1534 (*regs)[27] = tswapreg(env->regs[R_SP]); 1535 (*regs)[28] = tswapreg(env->regs[R_FP]); 1536 (*regs)[29] = tswapreg(env->regs[R_EA]); 1537 (*regs)[30] = -1; /* R_SSTATUS */ 1538 (*regs)[31] = tswapreg(env->regs[R_RA]); 1539 1540 (*regs)[32] = tswapreg(env->pc); 1541 1542 (*regs)[33] = -1; /* R_STATUS */ 1543 (*regs)[34] = tswapreg(env->regs[CR_ESTATUS]); 1544 1545 for (i = 35; i < 49; i++) /* ... */ 1546 (*regs)[i] = -1; 1547 } 1548 1549 #endif /* TARGET_NIOS2 */ 1550 1551 #ifdef TARGET_OPENRISC 1552 1553 #define ELF_ARCH EM_OPENRISC 1554 #define ELF_CLASS ELFCLASS32 1555 #define ELF_DATA ELFDATA2MSB 1556 1557 static inline void init_thread(struct target_pt_regs *regs, 1558 struct image_info *infop) 1559 { 1560 regs->pc = infop->entry; 1561 regs->gpr[1] = infop->start_stack; 1562 } 1563 1564 #define USE_ELF_CORE_DUMP 1565 #define ELF_EXEC_PAGESIZE 8192 1566 1567 /* See linux kernel arch/openrisc/include/asm/elf.h. */ 1568 #define ELF_NREG 34 /* gprs and pc, sr */ 1569 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1570 1571 static void elf_core_copy_regs(target_elf_gregset_t *regs, 1572 const CPUOpenRISCState *env) 1573 { 1574 int i; 1575 1576 for (i = 0; i < 32; i++) { 1577 (*regs)[i] = tswapreg(cpu_get_gpr(env, i)); 1578 } 1579 (*regs)[32] = tswapreg(env->pc); 1580 (*regs)[33] = tswapreg(cpu_get_sr(env)); 1581 } 1582 #define ELF_HWCAP 0 1583 #define ELF_PLATFORM NULL 1584 1585 #endif /* TARGET_OPENRISC */ 1586 1587 #ifdef TARGET_SH4 1588 1589 #define ELF_CLASS ELFCLASS32 1590 #define ELF_ARCH EM_SH 1591 1592 static inline void init_thread(struct target_pt_regs *regs, 1593 struct image_info *infop) 1594 { 1595 /* Check other registers XXXXX */ 1596 regs->pc = infop->entry; 1597 regs->regs[15] = infop->start_stack; 1598 } 1599 1600 /* See linux kernel: arch/sh/include/asm/elf.h. */ 1601 #define ELF_NREG 23 1602 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1603 1604 /* See linux kernel: arch/sh/include/asm/ptrace.h. */ 1605 enum { 1606 TARGET_REG_PC = 16, 1607 TARGET_REG_PR = 17, 1608 TARGET_REG_SR = 18, 1609 TARGET_REG_GBR = 19, 1610 TARGET_REG_MACH = 20, 1611 TARGET_REG_MACL = 21, 1612 TARGET_REG_SYSCALL = 22 1613 }; 1614 1615 static inline void elf_core_copy_regs(target_elf_gregset_t *regs, 1616 const CPUSH4State *env) 1617 { 1618 int i; 1619 1620 for (i = 0; i < 16; i++) { 1621 (*regs)[i] = tswapreg(env->gregs[i]); 1622 } 1623 1624 (*regs)[TARGET_REG_PC] = tswapreg(env->pc); 1625 (*regs)[TARGET_REG_PR] = tswapreg(env->pr); 1626 (*regs)[TARGET_REG_SR] = tswapreg(env->sr); 1627 (*regs)[TARGET_REG_GBR] = tswapreg(env->gbr); 1628 (*regs)[TARGET_REG_MACH] = tswapreg(env->mach); 1629 (*regs)[TARGET_REG_MACL] = tswapreg(env->macl); 1630 (*regs)[TARGET_REG_SYSCALL] = 0; /* FIXME */ 1631 } 1632 1633 #define USE_ELF_CORE_DUMP 1634 #define ELF_EXEC_PAGESIZE 4096 1635 1636 enum { 1637 SH_CPU_HAS_FPU = 0x0001, /* Hardware FPU support */ 1638 SH_CPU_HAS_P2_FLUSH_BUG = 0x0002, /* Need to flush the cache in P2 area */ 1639 SH_CPU_HAS_MMU_PAGE_ASSOC = 0x0004, /* SH3: TLB way selection bit support */ 1640 SH_CPU_HAS_DSP = 0x0008, /* SH-DSP: DSP support */ 1641 SH_CPU_HAS_PERF_COUNTER = 0x0010, /* Hardware performance counters */ 1642 SH_CPU_HAS_PTEA = 0x0020, /* PTEA register */ 1643 SH_CPU_HAS_LLSC = 0x0040, /* movli.l/movco.l */ 1644 SH_CPU_HAS_L2_CACHE = 0x0080, /* Secondary cache / URAM */ 1645 SH_CPU_HAS_OP32 = 0x0100, /* 32-bit instruction support */ 1646 SH_CPU_HAS_PTEAEX = 0x0200, /* PTE ASID Extension support */ 1647 }; 1648 1649 #define ELF_HWCAP get_elf_hwcap() 1650 1651 static uint32_t get_elf_hwcap(void) 1652 { 1653 SuperHCPU *cpu = SUPERH_CPU(thread_cpu); 1654 uint32_t hwcap = 0; 1655 1656 hwcap |= SH_CPU_HAS_FPU; 1657 1658 if (cpu->env.features & SH_FEATURE_SH4A) { 1659 hwcap |= SH_CPU_HAS_LLSC; 1660 } 1661 1662 return hwcap; 1663 } 1664 1665 #endif 1666 1667 #ifdef TARGET_CRIS 1668 1669 #define ELF_CLASS ELFCLASS32 1670 #define ELF_ARCH EM_CRIS 1671 1672 static inline void init_thread(struct target_pt_regs *regs, 1673 struct image_info *infop) 1674 { 1675 regs->erp = infop->entry; 1676 } 1677 1678 #define ELF_EXEC_PAGESIZE 8192 1679 1680 #endif 1681 1682 #ifdef TARGET_M68K 1683 1684 #define ELF_CLASS ELFCLASS32 1685 #define ELF_ARCH EM_68K 1686 1687 /* ??? Does this need to do anything? 1688 #define ELF_PLAT_INIT(_r) */ 1689 1690 static inline void init_thread(struct target_pt_regs *regs, 1691 struct image_info *infop) 1692 { 1693 regs->usp = infop->start_stack; 1694 regs->sr = 0; 1695 regs->pc = infop->entry; 1696 } 1697 1698 /* See linux kernel: arch/m68k/include/asm/elf.h. */ 1699 #define ELF_NREG 20 1700 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1701 1702 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUM68KState *env) 1703 { 1704 (*regs)[0] = tswapreg(env->dregs[1]); 1705 (*regs)[1] = tswapreg(env->dregs[2]); 1706 (*regs)[2] = tswapreg(env->dregs[3]); 1707 (*regs)[3] = tswapreg(env->dregs[4]); 1708 (*regs)[4] = tswapreg(env->dregs[5]); 1709 (*regs)[5] = tswapreg(env->dregs[6]); 1710 (*regs)[6] = tswapreg(env->dregs[7]); 1711 (*regs)[7] = tswapreg(env->aregs[0]); 1712 (*regs)[8] = tswapreg(env->aregs[1]); 1713 (*regs)[9] = tswapreg(env->aregs[2]); 1714 (*regs)[10] = tswapreg(env->aregs[3]); 1715 (*regs)[11] = tswapreg(env->aregs[4]); 1716 (*regs)[12] = tswapreg(env->aregs[5]); 1717 (*regs)[13] = tswapreg(env->aregs[6]); 1718 (*regs)[14] = tswapreg(env->dregs[0]); 1719 (*regs)[15] = tswapreg(env->aregs[7]); 1720 (*regs)[16] = tswapreg(env->dregs[0]); /* FIXME: orig_d0 */ 1721 (*regs)[17] = tswapreg(env->sr); 1722 (*regs)[18] = tswapreg(env->pc); 1723 (*regs)[19] = 0; /* FIXME: regs->format | regs->vector */ 1724 } 1725 1726 #define USE_ELF_CORE_DUMP 1727 #define ELF_EXEC_PAGESIZE 8192 1728 1729 #endif 1730 1731 #ifdef TARGET_ALPHA 1732 1733 #define ELF_CLASS ELFCLASS64 1734 #define ELF_ARCH EM_ALPHA 1735 1736 static inline void init_thread(struct target_pt_regs *regs, 1737 struct image_info *infop) 1738 { 1739 regs->pc = infop->entry; 1740 regs->ps = 8; 1741 regs->usp = infop->start_stack; 1742 } 1743 1744 #define ELF_EXEC_PAGESIZE 8192 1745 1746 #endif /* TARGET_ALPHA */ 1747 1748 #ifdef TARGET_S390X 1749 1750 #define ELF_CLASS ELFCLASS64 1751 #define ELF_DATA ELFDATA2MSB 1752 #define ELF_ARCH EM_S390 1753 1754 #include "elf.h" 1755 1756 #define ELF_HWCAP get_elf_hwcap() 1757 1758 #define GET_FEATURE(_feat, _hwcap) \ 1759 do { if (s390_has_feat(_feat)) { hwcap |= _hwcap; } } while (0) 1760 1761 uint32_t get_elf_hwcap(void) 1762 { 1763 /* 1764 * Let's assume we always have esan3 and zarch. 1765 * 31-bit processes can use 64-bit registers (high gprs). 1766 */ 1767 uint32_t hwcap = HWCAP_S390_ESAN3 | HWCAP_S390_ZARCH | HWCAP_S390_HIGH_GPRS; 1768 1769 GET_FEATURE(S390_FEAT_STFLE, HWCAP_S390_STFLE); 1770 GET_FEATURE(S390_FEAT_MSA, HWCAP_S390_MSA); 1771 GET_FEATURE(S390_FEAT_LONG_DISPLACEMENT, HWCAP_S390_LDISP); 1772 GET_FEATURE(S390_FEAT_EXTENDED_IMMEDIATE, HWCAP_S390_EIMM); 1773 if (s390_has_feat(S390_FEAT_EXTENDED_TRANSLATION_3) && 1774 s390_has_feat(S390_FEAT_ETF3_ENH)) { 1775 hwcap |= HWCAP_S390_ETF3EH; 1776 } 1777 GET_FEATURE(S390_FEAT_VECTOR, HWCAP_S390_VXRS); 1778 GET_FEATURE(S390_FEAT_VECTOR_ENH, HWCAP_S390_VXRS_EXT); 1779 GET_FEATURE(S390_FEAT_VECTOR_ENH2, HWCAP_S390_VXRS_EXT2); 1780 1781 return hwcap; 1782 } 1783 1784 const char *elf_hwcap_str(uint32_t bit) 1785 { 1786 static const char *hwcap_str[] = { 1787 [HWCAP_S390_NR_ESAN3] = "esan3", 1788 [HWCAP_S390_NR_ZARCH] = "zarch", 1789 [HWCAP_S390_NR_STFLE] = "stfle", 1790 [HWCAP_S390_NR_MSA] = "msa", 1791 [HWCAP_S390_NR_LDISP] = "ldisp", 1792 [HWCAP_S390_NR_EIMM] = "eimm", 1793 [HWCAP_S390_NR_DFP] = "dfp", 1794 [HWCAP_S390_NR_HPAGE] = "edat", 1795 [HWCAP_S390_NR_ETF3EH] = "etf3eh", 1796 [HWCAP_S390_NR_HIGH_GPRS] = "highgprs", 1797 [HWCAP_S390_NR_TE] = "te", 1798 [HWCAP_S390_NR_VXRS] = "vx", 1799 [HWCAP_S390_NR_VXRS_BCD] = "vxd", 1800 [HWCAP_S390_NR_VXRS_EXT] = "vxe", 1801 [HWCAP_S390_NR_GS] = "gs", 1802 [HWCAP_S390_NR_VXRS_EXT2] = "vxe2", 1803 [HWCAP_S390_NR_VXRS_PDE] = "vxp", 1804 [HWCAP_S390_NR_SORT] = "sort", 1805 [HWCAP_S390_NR_DFLT] = "dflt", 1806 [HWCAP_S390_NR_NNPA] = "nnpa", 1807 [HWCAP_S390_NR_PCI_MIO] = "pcimio", 1808 [HWCAP_S390_NR_SIE] = "sie", 1809 }; 1810 1811 return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL; 1812 } 1813 1814 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) 1815 { 1816 regs->psw.addr = infop->entry; 1817 regs->psw.mask = PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | \ 1818 PSW_MASK_MCHECK | PSW_MASK_PSTATE | PSW_MASK_64 | \ 1819 PSW_MASK_32; 1820 regs->gprs[15] = infop->start_stack; 1821 } 1822 1823 /* See linux kernel: arch/s390/include/uapi/asm/ptrace.h (s390_regs). */ 1824 #define ELF_NREG 27 1825 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1826 1827 enum { 1828 TARGET_REG_PSWM = 0, 1829 TARGET_REG_PSWA = 1, 1830 TARGET_REG_GPRS = 2, 1831 TARGET_REG_ARS = 18, 1832 TARGET_REG_ORIG_R2 = 26, 1833 }; 1834 1835 static void elf_core_copy_regs(target_elf_gregset_t *regs, 1836 const CPUS390XState *env) 1837 { 1838 int i; 1839 uint32_t *aregs; 1840 1841 (*regs)[TARGET_REG_PSWM] = tswapreg(env->psw.mask); 1842 (*regs)[TARGET_REG_PSWA] = tswapreg(env->psw.addr); 1843 for (i = 0; i < 16; i++) { 1844 (*regs)[TARGET_REG_GPRS + i] = tswapreg(env->regs[i]); 1845 } 1846 aregs = (uint32_t *)&((*regs)[TARGET_REG_ARS]); 1847 for (i = 0; i < 16; i++) { 1848 aregs[i] = tswap32(env->aregs[i]); 1849 } 1850 (*regs)[TARGET_REG_ORIG_R2] = 0; 1851 } 1852 1853 #define USE_ELF_CORE_DUMP 1854 #define ELF_EXEC_PAGESIZE 4096 1855 1856 #endif /* TARGET_S390X */ 1857 1858 #ifdef TARGET_RISCV 1859 1860 #define ELF_ARCH EM_RISCV 1861 1862 #ifdef TARGET_RISCV32 1863 #define ELF_CLASS ELFCLASS32 1864 #else 1865 #define ELF_CLASS ELFCLASS64 1866 #endif 1867 1868 #define ELF_HWCAP get_elf_hwcap() 1869 1870 static uint32_t get_elf_hwcap(void) 1871 { 1872 #define MISA_BIT(EXT) (1 << (EXT - 'A')) 1873 RISCVCPU *cpu = RISCV_CPU(thread_cpu); 1874 uint32_t mask = MISA_BIT('I') | MISA_BIT('M') | MISA_BIT('A') 1875 | MISA_BIT('F') | MISA_BIT('D') | MISA_BIT('C') 1876 | MISA_BIT('V'); 1877 1878 return cpu->env.misa_ext & mask; 1879 #undef MISA_BIT 1880 } 1881 1882 static inline void init_thread(struct target_pt_regs *regs, 1883 struct image_info *infop) 1884 { 1885 regs->sepc = infop->entry; 1886 regs->sp = infop->start_stack; 1887 } 1888 1889 #define ELF_EXEC_PAGESIZE 4096 1890 1891 #endif /* TARGET_RISCV */ 1892 1893 #ifdef TARGET_HPPA 1894 1895 #define ELF_CLASS ELFCLASS32 1896 #define ELF_ARCH EM_PARISC 1897 #define ELF_PLATFORM "PARISC" 1898 #define STACK_GROWS_DOWN 0 1899 #define STACK_ALIGNMENT 64 1900 1901 static inline void init_thread(struct target_pt_regs *regs, 1902 struct image_info *infop) 1903 { 1904 regs->iaoq[0] = infop->entry; 1905 regs->iaoq[1] = infop->entry + 4; 1906 regs->gr[23] = 0; 1907 regs->gr[24] = infop->argv; 1908 regs->gr[25] = infop->argc; 1909 /* The top-of-stack contains a linkage buffer. */ 1910 regs->gr[30] = infop->start_stack + 64; 1911 regs->gr[31] = infop->entry; 1912 } 1913 1914 #define LO_COMMPAGE 0 1915 1916 static bool init_guest_commpage(void) 1917 { 1918 void *want = g2h_untagged(LO_COMMPAGE); 1919 void *addr = mmap(want, qemu_host_page_size, PROT_NONE, 1920 MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0); 1921 1922 if (addr == MAP_FAILED) { 1923 perror("Allocating guest commpage"); 1924 exit(EXIT_FAILURE); 1925 } 1926 if (addr != want) { 1927 return false; 1928 } 1929 1930 /* 1931 * On Linux, page zero is normally marked execute only + gateway. 1932 * Normal read or write is supposed to fail (thus PROT_NONE above), 1933 * but specific offsets have kernel code mapped to raise permissions 1934 * and implement syscalls. Here, simply mark the page executable. 1935 * Special case the entry points during translation (see do_page_zero). 1936 */ 1937 page_set_flags(LO_COMMPAGE, LO_COMMPAGE | ~TARGET_PAGE_MASK, 1938 PAGE_EXEC | PAGE_VALID); 1939 return true; 1940 } 1941 1942 #endif /* TARGET_HPPA */ 1943 1944 #ifdef TARGET_XTENSA 1945 1946 #define ELF_CLASS ELFCLASS32 1947 #define ELF_ARCH EM_XTENSA 1948 1949 static inline void init_thread(struct target_pt_regs *regs, 1950 struct image_info *infop) 1951 { 1952 regs->windowbase = 0; 1953 regs->windowstart = 1; 1954 regs->areg[1] = infop->start_stack; 1955 regs->pc = infop->entry; 1956 if (info_is_fdpic(infop)) { 1957 regs->areg[4] = infop->loadmap_addr; 1958 regs->areg[5] = infop->interpreter_loadmap_addr; 1959 if (infop->interpreter_loadmap_addr) { 1960 regs->areg[6] = infop->interpreter_pt_dynamic_addr; 1961 } else { 1962 regs->areg[6] = infop->pt_dynamic_addr; 1963 } 1964 } 1965 } 1966 1967 /* See linux kernel: arch/xtensa/include/asm/elf.h. */ 1968 #define ELF_NREG 128 1969 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; 1970 1971 enum { 1972 TARGET_REG_PC, 1973 TARGET_REG_PS, 1974 TARGET_REG_LBEG, 1975 TARGET_REG_LEND, 1976 TARGET_REG_LCOUNT, 1977 TARGET_REG_SAR, 1978 TARGET_REG_WINDOWSTART, 1979 TARGET_REG_WINDOWBASE, 1980 TARGET_REG_THREADPTR, 1981 TARGET_REG_AR0 = 64, 1982 }; 1983 1984 static void elf_core_copy_regs(target_elf_gregset_t *regs, 1985 const CPUXtensaState *env) 1986 { 1987 unsigned i; 1988 1989 (*regs)[TARGET_REG_PC] = tswapreg(env->pc); 1990 (*regs)[TARGET_REG_PS] = tswapreg(env->sregs[PS] & ~PS_EXCM); 1991 (*regs)[TARGET_REG_LBEG] = tswapreg(env->sregs[LBEG]); 1992 (*regs)[TARGET_REG_LEND] = tswapreg(env->sregs[LEND]); 1993 (*regs)[TARGET_REG_LCOUNT] = tswapreg(env->sregs[LCOUNT]); 1994 (*regs)[TARGET_REG_SAR] = tswapreg(env->sregs[SAR]); 1995 (*regs)[TARGET_REG_WINDOWSTART] = tswapreg(env->sregs[WINDOW_START]); 1996 (*regs)[TARGET_REG_WINDOWBASE] = tswapreg(env->sregs[WINDOW_BASE]); 1997 (*regs)[TARGET_REG_THREADPTR] = tswapreg(env->uregs[THREADPTR]); 1998 xtensa_sync_phys_from_window((CPUXtensaState *)env); 1999 for (i = 0; i < env->config->nareg; ++i) { 2000 (*regs)[TARGET_REG_AR0 + i] = tswapreg(env->phys_regs[i]); 2001 } 2002 } 2003 2004 #define USE_ELF_CORE_DUMP 2005 #define ELF_EXEC_PAGESIZE 4096 2006 2007 #endif /* TARGET_XTENSA */ 2008 2009 #ifdef TARGET_HEXAGON 2010 2011 #define ELF_CLASS ELFCLASS32 2012 #define ELF_ARCH EM_HEXAGON 2013 2014 static inline void init_thread(struct target_pt_regs *regs, 2015 struct image_info *infop) 2016 { 2017 regs->sepc = infop->entry; 2018 regs->sp = infop->start_stack; 2019 } 2020 2021 #endif /* TARGET_HEXAGON */ 2022 2023 #ifndef ELF_BASE_PLATFORM 2024 #define ELF_BASE_PLATFORM (NULL) 2025 #endif 2026 2027 #ifndef ELF_PLATFORM 2028 #define ELF_PLATFORM (NULL) 2029 #endif 2030 2031 #ifndef ELF_MACHINE 2032 #define ELF_MACHINE ELF_ARCH 2033 #endif 2034 2035 #ifndef elf_check_arch 2036 #define elf_check_arch(x) ((x) == ELF_ARCH) 2037 #endif 2038 2039 #ifndef elf_check_abi 2040 #define elf_check_abi(x) (1) 2041 #endif 2042 2043 #ifndef ELF_HWCAP 2044 #define ELF_HWCAP 0 2045 #endif 2046 2047 #ifndef STACK_GROWS_DOWN 2048 #define STACK_GROWS_DOWN 1 2049 #endif 2050 2051 #ifndef STACK_ALIGNMENT 2052 #define STACK_ALIGNMENT 16 2053 #endif 2054 2055 #ifdef TARGET_ABI32 2056 #undef ELF_CLASS 2057 #define ELF_CLASS ELFCLASS32 2058 #undef bswaptls 2059 #define bswaptls(ptr) bswap32s(ptr) 2060 #endif 2061 2062 #ifndef EXSTACK_DEFAULT 2063 #define EXSTACK_DEFAULT false 2064 #endif 2065 2066 #include "elf.h" 2067 2068 /* We must delay the following stanzas until after "elf.h". */ 2069 #if defined(TARGET_AARCH64) 2070 2071 static bool arch_parse_elf_property(uint32_t pr_type, uint32_t pr_datasz, 2072 const uint32_t *data, 2073 struct image_info *info, 2074 Error **errp) 2075 { 2076 if (pr_type == GNU_PROPERTY_AARCH64_FEATURE_1_AND) { 2077 if (pr_datasz != sizeof(uint32_t)) { 2078 error_setg(errp, "Ill-formed GNU_PROPERTY_AARCH64_FEATURE_1_AND"); 2079 return false; 2080 } 2081 /* We will extract GNU_PROPERTY_AARCH64_FEATURE_1_BTI later. */ 2082 info->note_flags = *data; 2083 } 2084 return true; 2085 } 2086 #define ARCH_USE_GNU_PROPERTY 1 2087 2088 #else 2089 2090 static bool arch_parse_elf_property(uint32_t pr_type, uint32_t pr_datasz, 2091 const uint32_t *data, 2092 struct image_info *info, 2093 Error **errp) 2094 { 2095 g_assert_not_reached(); 2096 } 2097 #define ARCH_USE_GNU_PROPERTY 0 2098 2099 #endif 2100 2101 struct exec 2102 { 2103 unsigned int a_info; /* Use macros N_MAGIC, etc for access */ 2104 unsigned int a_text; /* length of text, in bytes */ 2105 unsigned int a_data; /* length of data, in bytes */ 2106 unsigned int a_bss; /* length of uninitialized data area, in bytes */ 2107 unsigned int a_syms; /* length of symbol table data in file, in bytes */ 2108 unsigned int a_entry; /* start address */ 2109 unsigned int a_trsize; /* length of relocation info for text, in bytes */ 2110 unsigned int a_drsize; /* length of relocation info for data, in bytes */ 2111 }; 2112 2113 2114 #define N_MAGIC(exec) ((exec).a_info & 0xffff) 2115 #define OMAGIC 0407 2116 #define NMAGIC 0410 2117 #define ZMAGIC 0413 2118 #define QMAGIC 0314 2119 2120 #define DLINFO_ITEMS 16 2121 2122 static inline void memcpy_fromfs(void * to, const void * from, unsigned long n) 2123 { 2124 memcpy(to, from, n); 2125 } 2126 2127 #ifdef BSWAP_NEEDED 2128 static void bswap_ehdr(struct elfhdr *ehdr) 2129 { 2130 bswap16s(&ehdr->e_type); /* Object file type */ 2131 bswap16s(&ehdr->e_machine); /* Architecture */ 2132 bswap32s(&ehdr->e_version); /* Object file version */ 2133 bswaptls(&ehdr->e_entry); /* Entry point virtual address */ 2134 bswaptls(&ehdr->e_phoff); /* Program header table file offset */ 2135 bswaptls(&ehdr->e_shoff); /* Section header table file offset */ 2136 bswap32s(&ehdr->e_flags); /* Processor-specific flags */ 2137 bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */ 2138 bswap16s(&ehdr->e_phentsize); /* Program header table entry size */ 2139 bswap16s(&ehdr->e_phnum); /* Program header table entry count */ 2140 bswap16s(&ehdr->e_shentsize); /* Section header table entry size */ 2141 bswap16s(&ehdr->e_shnum); /* Section header table entry count */ 2142 bswap16s(&ehdr->e_shstrndx); /* Section header string table index */ 2143 } 2144 2145 static void bswap_phdr(struct elf_phdr *phdr, int phnum) 2146 { 2147 int i; 2148 for (i = 0; i < phnum; ++i, ++phdr) { 2149 bswap32s(&phdr->p_type); /* Segment type */ 2150 bswap32s(&phdr->p_flags); /* Segment flags */ 2151 bswaptls(&phdr->p_offset); /* Segment file offset */ 2152 bswaptls(&phdr->p_vaddr); /* Segment virtual address */ 2153 bswaptls(&phdr->p_paddr); /* Segment physical address */ 2154 bswaptls(&phdr->p_filesz); /* Segment size in file */ 2155 bswaptls(&phdr->p_memsz); /* Segment size in memory */ 2156 bswaptls(&phdr->p_align); /* Segment alignment */ 2157 } 2158 } 2159 2160 static void bswap_shdr(struct elf_shdr *shdr, int shnum) 2161 { 2162 int i; 2163 for (i = 0; i < shnum; ++i, ++shdr) { 2164 bswap32s(&shdr->sh_name); 2165 bswap32s(&shdr->sh_type); 2166 bswaptls(&shdr->sh_flags); 2167 bswaptls(&shdr->sh_addr); 2168 bswaptls(&shdr->sh_offset); 2169 bswaptls(&shdr->sh_size); 2170 bswap32s(&shdr->sh_link); 2171 bswap32s(&shdr->sh_info); 2172 bswaptls(&shdr->sh_addralign); 2173 bswaptls(&shdr->sh_entsize); 2174 } 2175 } 2176 2177 static void bswap_sym(struct elf_sym *sym) 2178 { 2179 bswap32s(&sym->st_name); 2180 bswaptls(&sym->st_value); 2181 bswaptls(&sym->st_size); 2182 bswap16s(&sym->st_shndx); 2183 } 2184 2185 #ifdef TARGET_MIPS 2186 static void bswap_mips_abiflags(Mips_elf_abiflags_v0 *abiflags) 2187 { 2188 bswap16s(&abiflags->version); 2189 bswap32s(&abiflags->ases); 2190 bswap32s(&abiflags->isa_ext); 2191 bswap32s(&abiflags->flags1); 2192 bswap32s(&abiflags->flags2); 2193 } 2194 #endif 2195 #else 2196 static inline void bswap_ehdr(struct elfhdr *ehdr) { } 2197 static inline void bswap_phdr(struct elf_phdr *phdr, int phnum) { } 2198 static inline void bswap_shdr(struct elf_shdr *shdr, int shnum) { } 2199 static inline void bswap_sym(struct elf_sym *sym) { } 2200 #ifdef TARGET_MIPS 2201 static inline void bswap_mips_abiflags(Mips_elf_abiflags_v0 *abiflags) { } 2202 #endif 2203 #endif 2204 2205 #ifdef USE_ELF_CORE_DUMP 2206 static int elf_core_dump(int, const CPUArchState *); 2207 #endif /* USE_ELF_CORE_DUMP */ 2208 static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias); 2209 2210 /* Verify the portions of EHDR within E_IDENT for the target. 2211 This can be performed before bswapping the entire header. */ 2212 static bool elf_check_ident(struct elfhdr *ehdr) 2213 { 2214 return (ehdr->e_ident[EI_MAG0] == ELFMAG0 2215 && ehdr->e_ident[EI_MAG1] == ELFMAG1 2216 && ehdr->e_ident[EI_MAG2] == ELFMAG2 2217 && ehdr->e_ident[EI_MAG3] == ELFMAG3 2218 && ehdr->e_ident[EI_CLASS] == ELF_CLASS 2219 && ehdr->e_ident[EI_DATA] == ELF_DATA 2220 && ehdr->e_ident[EI_VERSION] == EV_CURRENT); 2221 } 2222 2223 /* Verify the portions of EHDR outside of E_IDENT for the target. 2224 This has to wait until after bswapping the header. */ 2225 static bool elf_check_ehdr(struct elfhdr *ehdr) 2226 { 2227 return (elf_check_arch(ehdr->e_machine) 2228 && elf_check_abi(ehdr->e_flags) 2229 && ehdr->e_ehsize == sizeof(struct elfhdr) 2230 && ehdr->e_phentsize == sizeof(struct elf_phdr) 2231 && (ehdr->e_type == ET_EXEC || ehdr->e_type == ET_DYN)); 2232 } 2233 2234 /* 2235 * 'copy_elf_strings()' copies argument/envelope strings from user 2236 * memory to free pages in kernel mem. These are in a format ready 2237 * to be put directly into the top of new user memory. 2238 * 2239 */ 2240 static abi_ulong copy_elf_strings(int argc, char **argv, char *scratch, 2241 abi_ulong p, abi_ulong stack_limit) 2242 { 2243 char *tmp; 2244 int len, i; 2245 abi_ulong top = p; 2246 2247 if (!p) { 2248 return 0; /* bullet-proofing */ 2249 } 2250 2251 if (STACK_GROWS_DOWN) { 2252 int offset = ((p - 1) % TARGET_PAGE_SIZE) + 1; 2253 for (i = argc - 1; i >= 0; --i) { 2254 tmp = argv[i]; 2255 if (!tmp) { 2256 fprintf(stderr, "VFS: argc is wrong"); 2257 exit(-1); 2258 } 2259 len = strlen(tmp) + 1; 2260 tmp += len; 2261 2262 if (len > (p - stack_limit)) { 2263 return 0; 2264 } 2265 while (len) { 2266 int bytes_to_copy = (len > offset) ? offset : len; 2267 tmp -= bytes_to_copy; 2268 p -= bytes_to_copy; 2269 offset -= bytes_to_copy; 2270 len -= bytes_to_copy; 2271 2272 memcpy_fromfs(scratch + offset, tmp, bytes_to_copy); 2273 2274 if (offset == 0) { 2275 memcpy_to_target(p, scratch, top - p); 2276 top = p; 2277 offset = TARGET_PAGE_SIZE; 2278 } 2279 } 2280 } 2281 if (p != top) { 2282 memcpy_to_target(p, scratch + offset, top - p); 2283 } 2284 } else { 2285 int remaining = TARGET_PAGE_SIZE - (p % TARGET_PAGE_SIZE); 2286 for (i = 0; i < argc; ++i) { 2287 tmp = argv[i]; 2288 if (!tmp) { 2289 fprintf(stderr, "VFS: argc is wrong"); 2290 exit(-1); 2291 } 2292 len = strlen(tmp) + 1; 2293 if (len > (stack_limit - p)) { 2294 return 0; 2295 } 2296 while (len) { 2297 int bytes_to_copy = (len > remaining) ? remaining : len; 2298 2299 memcpy_fromfs(scratch + (p - top), tmp, bytes_to_copy); 2300 2301 tmp += bytes_to_copy; 2302 remaining -= bytes_to_copy; 2303 p += bytes_to_copy; 2304 len -= bytes_to_copy; 2305 2306 if (remaining == 0) { 2307 memcpy_to_target(top, scratch, p - top); 2308 top = p; 2309 remaining = TARGET_PAGE_SIZE; 2310 } 2311 } 2312 } 2313 if (p != top) { 2314 memcpy_to_target(top, scratch, p - top); 2315 } 2316 } 2317 2318 return p; 2319 } 2320 2321 /* Older linux kernels provide up to MAX_ARG_PAGES (default: 32) of 2322 * argument/environment space. Newer kernels (>2.6.33) allow more, 2323 * dependent on stack size, but guarantee at least 32 pages for 2324 * backwards compatibility. 2325 */ 2326 #define STACK_LOWER_LIMIT (32 * TARGET_PAGE_SIZE) 2327 2328 static abi_ulong setup_arg_pages(struct linux_binprm *bprm, 2329 struct image_info *info) 2330 { 2331 abi_ulong size, error, guard; 2332 int prot; 2333 2334 size = guest_stack_size; 2335 if (size < STACK_LOWER_LIMIT) { 2336 size = STACK_LOWER_LIMIT; 2337 } 2338 2339 if (STACK_GROWS_DOWN) { 2340 guard = TARGET_PAGE_SIZE; 2341 if (guard < qemu_real_host_page_size()) { 2342 guard = qemu_real_host_page_size(); 2343 } 2344 } else { 2345 /* no guard page for hppa target where stack grows upwards. */ 2346 guard = 0; 2347 } 2348 2349 prot = PROT_READ | PROT_WRITE; 2350 if (info->exec_stack) { 2351 prot |= PROT_EXEC; 2352 } 2353 error = target_mmap(0, size + guard, prot, 2354 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 2355 if (error == -1) { 2356 perror("mmap stack"); 2357 exit(-1); 2358 } 2359 2360 /* We reserve one extra page at the top of the stack as guard. */ 2361 if (STACK_GROWS_DOWN) { 2362 target_mprotect(error, guard, PROT_NONE); 2363 info->stack_limit = error + guard; 2364 return info->stack_limit + size - sizeof(void *); 2365 } else { 2366 info->stack_limit = error + size; 2367 return error; 2368 } 2369 } 2370 2371 /** 2372 * zero_bss: 2373 * 2374 * Map and zero the bss. We need to explicitly zero any fractional pages 2375 * after the data section (i.e. bss). Return false on mapping failure. 2376 */ 2377 static bool zero_bss(abi_ulong start_bss, abi_ulong end_bss, 2378 int prot, Error **errp) 2379 { 2380 abi_ulong align_bss; 2381 2382 /* We only expect writable bss; the code segment shouldn't need this. */ 2383 if (!(prot & PROT_WRITE)) { 2384 error_setg(errp, "PT_LOAD with non-writable bss"); 2385 return false; 2386 } 2387 2388 align_bss = TARGET_PAGE_ALIGN(start_bss); 2389 end_bss = TARGET_PAGE_ALIGN(end_bss); 2390 2391 if (start_bss < align_bss) { 2392 int flags = page_get_flags(start_bss); 2393 2394 if (!(flags & PAGE_BITS)) { 2395 /* 2396 * The whole address space of the executable was reserved 2397 * at the start, therefore all pages will be VALID. 2398 * But assuming there are no PROT_NONE PT_LOAD segments, 2399 * a PROT_NONE page means no data all bss, and we can 2400 * simply extend the new anon mapping back to the start 2401 * of the page of bss. 2402 */ 2403 align_bss -= TARGET_PAGE_SIZE; 2404 } else { 2405 /* 2406 * The start of the bss shares a page with something. 2407 * The only thing that we expect is the data section, 2408 * which would already be marked writable. 2409 * Overlapping the RX code segment seems malformed. 2410 */ 2411 if (!(flags & PAGE_WRITE)) { 2412 error_setg(errp, "PT_LOAD with bss overlapping " 2413 "non-writable page"); 2414 return false; 2415 } 2416 2417 /* The page is already mapped and writable. */ 2418 memset(g2h_untagged(start_bss), 0, align_bss - start_bss); 2419 } 2420 } 2421 2422 if (align_bss < end_bss && 2423 target_mmap(align_bss, end_bss - align_bss, prot, 2424 MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0) == -1) { 2425 error_setg_errno(errp, errno, "Error mapping bss"); 2426 return false; 2427 } 2428 return true; 2429 } 2430 2431 #if defined(TARGET_ARM) 2432 static int elf_is_fdpic(struct elfhdr *exec) 2433 { 2434 return exec->e_ident[EI_OSABI] == ELFOSABI_ARM_FDPIC; 2435 } 2436 #elif defined(TARGET_XTENSA) 2437 static int elf_is_fdpic(struct elfhdr *exec) 2438 { 2439 return exec->e_ident[EI_OSABI] == ELFOSABI_XTENSA_FDPIC; 2440 } 2441 #else 2442 /* Default implementation, always false. */ 2443 static int elf_is_fdpic(struct elfhdr *exec) 2444 { 2445 return 0; 2446 } 2447 #endif 2448 2449 static abi_ulong loader_build_fdpic_loadmap(struct image_info *info, abi_ulong sp) 2450 { 2451 uint16_t n; 2452 struct elf32_fdpic_loadseg *loadsegs = info->loadsegs; 2453 2454 /* elf32_fdpic_loadseg */ 2455 n = info->nsegs; 2456 while (n--) { 2457 sp -= 12; 2458 put_user_u32(loadsegs[n].addr, sp+0); 2459 put_user_u32(loadsegs[n].p_vaddr, sp+4); 2460 put_user_u32(loadsegs[n].p_memsz, sp+8); 2461 } 2462 2463 /* elf32_fdpic_loadmap */ 2464 sp -= 4; 2465 put_user_u16(0, sp+0); /* version */ 2466 put_user_u16(info->nsegs, sp+2); /* nsegs */ 2467 2468 info->personality = PER_LINUX_FDPIC; 2469 info->loadmap_addr = sp; 2470 2471 return sp; 2472 } 2473 2474 static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc, 2475 struct elfhdr *exec, 2476 struct image_info *info, 2477 struct image_info *interp_info) 2478 { 2479 abi_ulong sp; 2480 abi_ulong u_argc, u_argv, u_envp, u_auxv; 2481 int size; 2482 int i; 2483 abi_ulong u_rand_bytes; 2484 uint8_t k_rand_bytes[16]; 2485 abi_ulong u_platform, u_base_platform; 2486 const char *k_platform, *k_base_platform; 2487 const int n = sizeof(elf_addr_t); 2488 2489 sp = p; 2490 2491 /* Needs to be before we load the env/argc/... */ 2492 if (elf_is_fdpic(exec)) { 2493 /* Need 4 byte alignment for these structs */ 2494 sp &= ~3; 2495 sp = loader_build_fdpic_loadmap(info, sp); 2496 info->other_info = interp_info; 2497 if (interp_info) { 2498 interp_info->other_info = info; 2499 sp = loader_build_fdpic_loadmap(interp_info, sp); 2500 info->interpreter_loadmap_addr = interp_info->loadmap_addr; 2501 info->interpreter_pt_dynamic_addr = interp_info->pt_dynamic_addr; 2502 } else { 2503 info->interpreter_loadmap_addr = 0; 2504 info->interpreter_pt_dynamic_addr = 0; 2505 } 2506 } 2507 2508 u_base_platform = 0; 2509 k_base_platform = ELF_BASE_PLATFORM; 2510 if (k_base_platform) { 2511 size_t len = strlen(k_base_platform) + 1; 2512 if (STACK_GROWS_DOWN) { 2513 sp -= (len + n - 1) & ~(n - 1); 2514 u_base_platform = sp; 2515 /* FIXME - check return value of memcpy_to_target() for failure */ 2516 memcpy_to_target(sp, k_base_platform, len); 2517 } else { 2518 memcpy_to_target(sp, k_base_platform, len); 2519 u_base_platform = sp; 2520 sp += len + 1; 2521 } 2522 } 2523 2524 u_platform = 0; 2525 k_platform = ELF_PLATFORM; 2526 if (k_platform) { 2527 size_t len = strlen(k_platform) + 1; 2528 if (STACK_GROWS_DOWN) { 2529 sp -= (len + n - 1) & ~(n - 1); 2530 u_platform = sp; 2531 /* FIXME - check return value of memcpy_to_target() for failure */ 2532 memcpy_to_target(sp, k_platform, len); 2533 } else { 2534 memcpy_to_target(sp, k_platform, len); 2535 u_platform = sp; 2536 sp += len + 1; 2537 } 2538 } 2539 2540 /* Provide 16 byte alignment for the PRNG, and basic alignment for 2541 * the argv and envp pointers. 2542 */ 2543 if (STACK_GROWS_DOWN) { 2544 sp = QEMU_ALIGN_DOWN(sp, 16); 2545 } else { 2546 sp = QEMU_ALIGN_UP(sp, 16); 2547 } 2548 2549 /* 2550 * Generate 16 random bytes for userspace PRNG seeding. 2551 */ 2552 qemu_guest_getrandom_nofail(k_rand_bytes, sizeof(k_rand_bytes)); 2553 if (STACK_GROWS_DOWN) { 2554 sp -= 16; 2555 u_rand_bytes = sp; 2556 /* FIXME - check return value of memcpy_to_target() for failure */ 2557 memcpy_to_target(sp, k_rand_bytes, 16); 2558 } else { 2559 memcpy_to_target(sp, k_rand_bytes, 16); 2560 u_rand_bytes = sp; 2561 sp += 16; 2562 } 2563 2564 size = (DLINFO_ITEMS + 1) * 2; 2565 if (k_base_platform) 2566 size += 2; 2567 if (k_platform) 2568 size += 2; 2569 #ifdef DLINFO_ARCH_ITEMS 2570 size += DLINFO_ARCH_ITEMS * 2; 2571 #endif 2572 #ifdef ELF_HWCAP2 2573 size += 2; 2574 #endif 2575 info->auxv_len = size * n; 2576 2577 size += envc + argc + 2; 2578 size += 1; /* argc itself */ 2579 size *= n; 2580 2581 /* Allocate space and finalize stack alignment for entry now. */ 2582 if (STACK_GROWS_DOWN) { 2583 u_argc = QEMU_ALIGN_DOWN(sp - size, STACK_ALIGNMENT); 2584 sp = u_argc; 2585 } else { 2586 u_argc = sp; 2587 sp = QEMU_ALIGN_UP(sp + size, STACK_ALIGNMENT); 2588 } 2589 2590 u_argv = u_argc + n; 2591 u_envp = u_argv + (argc + 1) * n; 2592 u_auxv = u_envp + (envc + 1) * n; 2593 info->saved_auxv = u_auxv; 2594 info->argc = argc; 2595 info->envc = envc; 2596 info->argv = u_argv; 2597 info->envp = u_envp; 2598 2599 /* This is correct because Linux defines 2600 * elf_addr_t as Elf32_Off / Elf64_Off 2601 */ 2602 #define NEW_AUX_ENT(id, val) do { \ 2603 put_user_ual(id, u_auxv); u_auxv += n; \ 2604 put_user_ual(val, u_auxv); u_auxv += n; \ 2605 } while(0) 2606 2607 #ifdef ARCH_DLINFO 2608 /* 2609 * ARCH_DLINFO must come first so platform specific code can enforce 2610 * special alignment requirements on the AUXV if necessary (eg. PPC). 2611 */ 2612 ARCH_DLINFO; 2613 #endif 2614 /* There must be exactly DLINFO_ITEMS entries here, or the assert 2615 * on info->auxv_len will trigger. 2616 */ 2617 NEW_AUX_ENT(AT_PHDR, (abi_ulong)(info->load_addr + exec->e_phoff)); 2618 NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof (struct elf_phdr))); 2619 NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum)); 2620 if ((info->alignment & ~qemu_host_page_mask) != 0) { 2621 /* Target doesn't support host page size alignment */ 2622 NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE)); 2623 } else { 2624 NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(MAX(TARGET_PAGE_SIZE, 2625 qemu_host_page_size))); 2626 } 2627 NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_info ? interp_info->load_addr : 0)); 2628 NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0); 2629 NEW_AUX_ENT(AT_ENTRY, info->entry); 2630 NEW_AUX_ENT(AT_UID, (abi_ulong) getuid()); 2631 NEW_AUX_ENT(AT_EUID, (abi_ulong) geteuid()); 2632 NEW_AUX_ENT(AT_GID, (abi_ulong) getgid()); 2633 NEW_AUX_ENT(AT_EGID, (abi_ulong) getegid()); 2634 NEW_AUX_ENT(AT_HWCAP, (abi_ulong) ELF_HWCAP); 2635 NEW_AUX_ENT(AT_CLKTCK, (abi_ulong) sysconf(_SC_CLK_TCK)); 2636 NEW_AUX_ENT(AT_RANDOM, (abi_ulong) u_rand_bytes); 2637 NEW_AUX_ENT(AT_SECURE, (abi_ulong) qemu_getauxval(AT_SECURE)); 2638 NEW_AUX_ENT(AT_EXECFN, info->file_string); 2639 2640 #ifdef ELF_HWCAP2 2641 NEW_AUX_ENT(AT_HWCAP2, (abi_ulong) ELF_HWCAP2); 2642 #endif 2643 2644 if (u_base_platform) { 2645 NEW_AUX_ENT(AT_BASE_PLATFORM, u_base_platform); 2646 } 2647 if (u_platform) { 2648 NEW_AUX_ENT(AT_PLATFORM, u_platform); 2649 } 2650 NEW_AUX_ENT (AT_NULL, 0); 2651 #undef NEW_AUX_ENT 2652 2653 /* Check that our initial calculation of the auxv length matches how much 2654 * we actually put into it. 2655 */ 2656 assert(info->auxv_len == u_auxv - info->saved_auxv); 2657 2658 put_user_ual(argc, u_argc); 2659 2660 p = info->arg_strings; 2661 for (i = 0; i < argc; ++i) { 2662 put_user_ual(p, u_argv); 2663 u_argv += n; 2664 p += target_strlen(p) + 1; 2665 } 2666 put_user_ual(0, u_argv); 2667 2668 p = info->env_strings; 2669 for (i = 0; i < envc; ++i) { 2670 put_user_ual(p, u_envp); 2671 u_envp += n; 2672 p += target_strlen(p) + 1; 2673 } 2674 put_user_ual(0, u_envp); 2675 2676 return sp; 2677 } 2678 2679 #if defined(HI_COMMPAGE) 2680 #define LO_COMMPAGE -1 2681 #elif defined(LO_COMMPAGE) 2682 #define HI_COMMPAGE 0 2683 #else 2684 #define HI_COMMPAGE 0 2685 #define LO_COMMPAGE -1 2686 #ifndef INIT_GUEST_COMMPAGE 2687 #define init_guest_commpage() true 2688 #endif 2689 #endif 2690 2691 /** 2692 * pgb_try_mmap: 2693 * @addr: host start address 2694 * @addr_last: host last address 2695 * @keep: do not unmap the probe region 2696 * 2697 * Return 1 if [@addr, @addr_last] is not mapped in the host, 2698 * return 0 if it is not available to map, and -1 on mmap error. 2699 * If @keep, the region is left mapped on success, otherwise unmapped. 2700 */ 2701 static int pgb_try_mmap(uintptr_t addr, uintptr_t addr_last, bool keep) 2702 { 2703 size_t size = addr_last - addr + 1; 2704 void *p = mmap((void *)addr, size, PROT_NONE, 2705 MAP_ANONYMOUS | MAP_PRIVATE | 2706 MAP_NORESERVE | MAP_FIXED_NOREPLACE, -1, 0); 2707 int ret; 2708 2709 if (p == MAP_FAILED) { 2710 return errno == EEXIST ? 0 : -1; 2711 } 2712 ret = p == (void *)addr; 2713 if (!keep || !ret) { 2714 munmap(p, size); 2715 } 2716 return ret; 2717 } 2718 2719 /** 2720 * pgb_try_mmap_skip_brk(uintptr_t addr, uintptr_t size, uintptr_t brk) 2721 * @addr: host address 2722 * @addr_last: host last address 2723 * @brk: host brk 2724 * 2725 * Like pgb_try_mmap, but additionally reserve some memory following brk. 2726 */ 2727 static int pgb_try_mmap_skip_brk(uintptr_t addr, uintptr_t addr_last, 2728 uintptr_t brk, bool keep) 2729 { 2730 uintptr_t brk_last = brk + 16 * MiB - 1; 2731 2732 /* Do not map anything close to the host brk. */ 2733 if (addr <= brk_last && brk <= addr_last) { 2734 return 0; 2735 } 2736 return pgb_try_mmap(addr, addr_last, keep); 2737 } 2738 2739 /** 2740 * pgb_try_mmap_set: 2741 * @ga: set of guest addrs 2742 * @base: guest_base 2743 * @brk: host brk 2744 * 2745 * Return true if all @ga can be mapped by the host at @base. 2746 * On success, retain the mapping at index 0 for reserved_va. 2747 */ 2748 2749 typedef struct PGBAddrs { 2750 uintptr_t bounds[3][2]; /* start/last pairs */ 2751 int nbounds; 2752 } PGBAddrs; 2753 2754 static bool pgb_try_mmap_set(const PGBAddrs *ga, uintptr_t base, uintptr_t brk) 2755 { 2756 for (int i = ga->nbounds - 1; i >= 0; --i) { 2757 if (pgb_try_mmap_skip_brk(ga->bounds[i][0] + base, 2758 ga->bounds[i][1] + base, 2759 brk, i == 0 && reserved_va) <= 0) { 2760 return false; 2761 } 2762 } 2763 return true; 2764 } 2765 2766 /** 2767 * pgb_addr_set: 2768 * @ga: output set of guest addrs 2769 * @guest_loaddr: guest image low address 2770 * @guest_loaddr: guest image high address 2771 * @identity: create for identity mapping 2772 * 2773 * Fill in @ga with the image, COMMPAGE and NULL page. 2774 */ 2775 static bool pgb_addr_set(PGBAddrs *ga, abi_ulong guest_loaddr, 2776 abi_ulong guest_hiaddr, bool try_identity) 2777 { 2778 int n; 2779 2780 /* 2781 * With a low commpage, or a guest mapped very low, 2782 * we may not be able to use the identity map. 2783 */ 2784 if (try_identity) { 2785 if (LO_COMMPAGE != -1 && LO_COMMPAGE < mmap_min_addr) { 2786 return false; 2787 } 2788 if (guest_loaddr != 0 && guest_loaddr < mmap_min_addr) { 2789 return false; 2790 } 2791 } 2792 2793 memset(ga, 0, sizeof(*ga)); 2794 n = 0; 2795 2796 if (reserved_va) { 2797 ga->bounds[n][0] = try_identity ? mmap_min_addr : 0; 2798 ga->bounds[n][1] = reserved_va; 2799 n++; 2800 /* LO_COMMPAGE and NULL handled by reserving from 0. */ 2801 } else { 2802 /* Add any LO_COMMPAGE or NULL page. */ 2803 if (LO_COMMPAGE != -1) { 2804 ga->bounds[n][0] = 0; 2805 ga->bounds[n][1] = LO_COMMPAGE + TARGET_PAGE_SIZE - 1; 2806 n++; 2807 } else if (!try_identity) { 2808 ga->bounds[n][0] = 0; 2809 ga->bounds[n][1] = TARGET_PAGE_SIZE - 1; 2810 n++; 2811 } 2812 2813 /* Add the guest image for ET_EXEC. */ 2814 if (guest_loaddr) { 2815 ga->bounds[n][0] = guest_loaddr; 2816 ga->bounds[n][1] = guest_hiaddr; 2817 n++; 2818 } 2819 } 2820 2821 /* 2822 * Temporarily disable 2823 * "comparison is always false due to limited range of data type" 2824 * due to comparison between unsigned and (possible) 0. 2825 */ 2826 #pragma GCC diagnostic push 2827 #pragma GCC diagnostic ignored "-Wtype-limits" 2828 2829 /* Add any HI_COMMPAGE not covered by reserved_va. */ 2830 if (reserved_va < HI_COMMPAGE) { 2831 ga->bounds[n][0] = HI_COMMPAGE & qemu_host_page_mask; 2832 ga->bounds[n][1] = HI_COMMPAGE + TARGET_PAGE_SIZE - 1; 2833 n++; 2834 } 2835 2836 #pragma GCC diagnostic pop 2837 2838 ga->nbounds = n; 2839 return true; 2840 } 2841 2842 static void pgb_fail_in_use(const char *image_name) 2843 { 2844 error_report("%s: requires virtual address space that is in use " 2845 "(omit the -B option or choose a different value)", 2846 image_name); 2847 exit(EXIT_FAILURE); 2848 } 2849 2850 static void pgb_fixed(const char *image_name, uintptr_t guest_loaddr, 2851 uintptr_t guest_hiaddr, uintptr_t align) 2852 { 2853 PGBAddrs ga; 2854 uintptr_t brk = (uintptr_t)sbrk(0); 2855 2856 if (!QEMU_IS_ALIGNED(guest_base, align)) { 2857 fprintf(stderr, "Requested guest base %p does not satisfy " 2858 "host minimum alignment (0x%" PRIxPTR ")\n", 2859 (void *)guest_base, align); 2860 exit(EXIT_FAILURE); 2861 } 2862 2863 if (!pgb_addr_set(&ga, guest_loaddr, guest_hiaddr, !guest_base) 2864 || !pgb_try_mmap_set(&ga, guest_base, brk)) { 2865 pgb_fail_in_use(image_name); 2866 } 2867 } 2868 2869 /** 2870 * pgb_find_fallback: 2871 * 2872 * This is a fallback method for finding holes in the host address space 2873 * if we don't have the benefit of being able to access /proc/self/map. 2874 * It can potentially take a very long time as we can only dumbly iterate 2875 * up the host address space seeing if the allocation would work. 2876 */ 2877 static uintptr_t pgb_find_fallback(const PGBAddrs *ga, uintptr_t align, 2878 uintptr_t brk) 2879 { 2880 /* TODO: come up with a better estimate of how much to skip. */ 2881 uintptr_t skip = sizeof(uintptr_t) == 4 ? MiB : GiB; 2882 2883 for (uintptr_t base = skip; ; base += skip) { 2884 base = ROUND_UP(base, align); 2885 if (pgb_try_mmap_set(ga, base, brk)) { 2886 return base; 2887 } 2888 if (base >= -skip) { 2889 return -1; 2890 } 2891 } 2892 } 2893 2894 static uintptr_t pgb_try_itree(const PGBAddrs *ga, uintptr_t base, 2895 IntervalTreeRoot *root) 2896 { 2897 for (int i = ga->nbounds - 1; i >= 0; --i) { 2898 uintptr_t s = base + ga->bounds[i][0]; 2899 uintptr_t l = base + ga->bounds[i][1]; 2900 IntervalTreeNode *n; 2901 2902 if (l < s) { 2903 /* Wraparound. Skip to advance S to mmap_min_addr. */ 2904 return mmap_min_addr - s; 2905 } 2906 2907 n = interval_tree_iter_first(root, s, l); 2908 if (n != NULL) { 2909 /* Conflict. Skip to advance S to LAST + 1. */ 2910 return n->last - s + 1; 2911 } 2912 } 2913 return 0; /* success */ 2914 } 2915 2916 static uintptr_t pgb_find_itree(const PGBAddrs *ga, IntervalTreeRoot *root, 2917 uintptr_t align, uintptr_t brk) 2918 { 2919 uintptr_t last = mmap_min_addr; 2920 uintptr_t base, skip; 2921 2922 while (true) { 2923 base = ROUND_UP(last, align); 2924 if (base < last) { 2925 return -1; 2926 } 2927 2928 skip = pgb_try_itree(ga, base, root); 2929 if (skip == 0) { 2930 break; 2931 } 2932 2933 last = base + skip; 2934 if (last < base) { 2935 return -1; 2936 } 2937 } 2938 2939 /* 2940 * We've chosen 'base' based on holes in the interval tree, 2941 * but we don't yet know if it is a valid host address. 2942 * Because it is the first matching hole, if the host addresses 2943 * are invalid we know there are no further matches. 2944 */ 2945 return pgb_try_mmap_set(ga, base, brk) ? base : -1; 2946 } 2947 2948 static void pgb_dynamic(const char *image_name, uintptr_t guest_loaddr, 2949 uintptr_t guest_hiaddr, uintptr_t align) 2950 { 2951 IntervalTreeRoot *root; 2952 uintptr_t brk, ret; 2953 PGBAddrs ga; 2954 2955 assert(QEMU_IS_ALIGNED(guest_loaddr, align)); 2956 2957 /* Try the identity map first. */ 2958 if (pgb_addr_set(&ga, guest_loaddr, guest_hiaddr, true)) { 2959 brk = (uintptr_t)sbrk(0); 2960 if (pgb_try_mmap_set(&ga, 0, brk)) { 2961 guest_base = 0; 2962 return; 2963 } 2964 } 2965 2966 /* 2967 * Rebuild the address set for non-identity map. 2968 * This differs in the mapping of the guest NULL page. 2969 */ 2970 pgb_addr_set(&ga, guest_loaddr, guest_hiaddr, false); 2971 2972 root = read_self_maps(); 2973 2974 /* Read brk after we've read the maps, which will malloc. */ 2975 brk = (uintptr_t)sbrk(0); 2976 2977 if (!root) { 2978 ret = pgb_find_fallback(&ga, align, brk); 2979 } else { 2980 /* 2981 * Reserve the area close to the host brk. 2982 * This will be freed with the rest of the tree. 2983 */ 2984 IntervalTreeNode *b = g_new0(IntervalTreeNode, 1); 2985 b->start = brk; 2986 b->last = brk + 16 * MiB - 1; 2987 interval_tree_insert(b, root); 2988 2989 ret = pgb_find_itree(&ga, root, align, brk); 2990 free_self_maps(root); 2991 } 2992 2993 if (ret == -1) { 2994 int w = TARGET_LONG_BITS / 4; 2995 2996 error_report("%s: Unable to find a guest_base to satisfy all " 2997 "guest address mapping requirements", image_name); 2998 2999 for (int i = 0; i < ga.nbounds; ++i) { 3000 error_printf(" %0*" PRIx64 "-%0*" PRIx64 "\n", 3001 w, (uint64_t)ga.bounds[i][0], 3002 w, (uint64_t)ga.bounds[i][1]); 3003 } 3004 exit(EXIT_FAILURE); 3005 } 3006 guest_base = ret; 3007 } 3008 3009 void probe_guest_base(const char *image_name, abi_ulong guest_loaddr, 3010 abi_ulong guest_hiaddr) 3011 { 3012 /* In order to use host shmat, we must be able to honor SHMLBA. */ 3013 uintptr_t align = MAX(SHMLBA, qemu_host_page_size); 3014 3015 /* Sanity check the guest binary. */ 3016 if (reserved_va) { 3017 if (guest_hiaddr > reserved_va) { 3018 error_report("%s: requires more than reserved virtual " 3019 "address space (0x%" PRIx64 " > 0x%lx)", 3020 image_name, (uint64_t)guest_hiaddr, reserved_va); 3021 exit(EXIT_FAILURE); 3022 } 3023 } else { 3024 if (guest_hiaddr != (uintptr_t)guest_hiaddr) { 3025 error_report("%s: requires more virtual address space " 3026 "than the host can provide (0x%" PRIx64 ")", 3027 image_name, (uint64_t)guest_hiaddr + 1); 3028 exit(EXIT_FAILURE); 3029 } 3030 } 3031 3032 if (have_guest_base) { 3033 pgb_fixed(image_name, guest_loaddr, guest_hiaddr, align); 3034 } else { 3035 pgb_dynamic(image_name, guest_loaddr, guest_hiaddr, align); 3036 } 3037 3038 /* Reserve and initialize the commpage. */ 3039 if (!init_guest_commpage()) { 3040 /* We have already probed for the commpage being free. */ 3041 g_assert_not_reached(); 3042 } 3043 3044 assert(QEMU_IS_ALIGNED(guest_base, align)); 3045 qemu_log_mask(CPU_LOG_PAGE, "Locating guest address space " 3046 "@ 0x%" PRIx64 "\n", (uint64_t)guest_base); 3047 } 3048 3049 enum { 3050 /* The string "GNU\0" as a magic number. */ 3051 GNU0_MAGIC = const_le32('G' | 'N' << 8 | 'U' << 16), 3052 NOTE_DATA_SZ = 1 * KiB, 3053 NOTE_NAME_SZ = 4, 3054 ELF_GNU_PROPERTY_ALIGN = ELF_CLASS == ELFCLASS32 ? 4 : 8, 3055 }; 3056 3057 /* 3058 * Process a single gnu_property entry. 3059 * Return false for error. 3060 */ 3061 static bool parse_elf_property(const uint32_t *data, int *off, int datasz, 3062 struct image_info *info, bool have_prev_type, 3063 uint32_t *prev_type, Error **errp) 3064 { 3065 uint32_t pr_type, pr_datasz, step; 3066 3067 if (*off > datasz || !QEMU_IS_ALIGNED(*off, ELF_GNU_PROPERTY_ALIGN)) { 3068 goto error_data; 3069 } 3070 datasz -= *off; 3071 data += *off / sizeof(uint32_t); 3072 3073 if (datasz < 2 * sizeof(uint32_t)) { 3074 goto error_data; 3075 } 3076 pr_type = data[0]; 3077 pr_datasz = data[1]; 3078 data += 2; 3079 datasz -= 2 * sizeof(uint32_t); 3080 step = ROUND_UP(pr_datasz, ELF_GNU_PROPERTY_ALIGN); 3081 if (step > datasz) { 3082 goto error_data; 3083 } 3084 3085 /* Properties are supposed to be unique and sorted on pr_type. */ 3086 if (have_prev_type && pr_type <= *prev_type) { 3087 if (pr_type == *prev_type) { 3088 error_setg(errp, "Duplicate property in PT_GNU_PROPERTY"); 3089 } else { 3090 error_setg(errp, "Unsorted property in PT_GNU_PROPERTY"); 3091 } 3092 return false; 3093 } 3094 *prev_type = pr_type; 3095 3096 if (!arch_parse_elf_property(pr_type, pr_datasz, data, info, errp)) { 3097 return false; 3098 } 3099 3100 *off += 2 * sizeof(uint32_t) + step; 3101 return true; 3102 3103 error_data: 3104 error_setg(errp, "Ill-formed property in PT_GNU_PROPERTY"); 3105 return false; 3106 } 3107 3108 /* Process NT_GNU_PROPERTY_TYPE_0. */ 3109 static bool parse_elf_properties(int image_fd, 3110 struct image_info *info, 3111 const struct elf_phdr *phdr, 3112 char bprm_buf[BPRM_BUF_SIZE], 3113 Error **errp) 3114 { 3115 union { 3116 struct elf_note nhdr; 3117 uint32_t data[NOTE_DATA_SZ / sizeof(uint32_t)]; 3118 } note; 3119 3120 int n, off, datasz; 3121 bool have_prev_type; 3122 uint32_t prev_type; 3123 3124 /* Unless the arch requires properties, ignore them. */ 3125 if (!ARCH_USE_GNU_PROPERTY) { 3126 return true; 3127 } 3128 3129 /* If the properties are crazy large, that's too bad. */ 3130 n = phdr->p_filesz; 3131 if (n > sizeof(note)) { 3132 error_setg(errp, "PT_GNU_PROPERTY too large"); 3133 return false; 3134 } 3135 if (n < sizeof(note.nhdr)) { 3136 error_setg(errp, "PT_GNU_PROPERTY too small"); 3137 return false; 3138 } 3139 3140 if (phdr->p_offset + n <= BPRM_BUF_SIZE) { 3141 memcpy(¬e, bprm_buf + phdr->p_offset, n); 3142 } else { 3143 ssize_t len = pread(image_fd, ¬e, n, phdr->p_offset); 3144 if (len != n) { 3145 error_setg_errno(errp, errno, "Error reading file header"); 3146 return false; 3147 } 3148 } 3149 3150 /* 3151 * The contents of a valid PT_GNU_PROPERTY is a sequence 3152 * of uint32_t -- swap them all now. 3153 */ 3154 #ifdef BSWAP_NEEDED 3155 for (int i = 0; i < n / 4; i++) { 3156 bswap32s(note.data + i); 3157 } 3158 #endif 3159 3160 /* 3161 * Note that nhdr is 3 words, and that the "name" described by namesz 3162 * immediately follows nhdr and is thus at the 4th word. Further, all 3163 * of the inputs to the kernel's round_up are multiples of 4. 3164 */ 3165 if (note.nhdr.n_type != NT_GNU_PROPERTY_TYPE_0 || 3166 note.nhdr.n_namesz != NOTE_NAME_SZ || 3167 note.data[3] != GNU0_MAGIC) { 3168 error_setg(errp, "Invalid note in PT_GNU_PROPERTY"); 3169 return false; 3170 } 3171 off = sizeof(note.nhdr) + NOTE_NAME_SZ; 3172 3173 datasz = note.nhdr.n_descsz + off; 3174 if (datasz > n) { 3175 error_setg(errp, "Invalid note size in PT_GNU_PROPERTY"); 3176 return false; 3177 } 3178 3179 have_prev_type = false; 3180 prev_type = 0; 3181 while (1) { 3182 if (off == datasz) { 3183 return true; /* end, exit ok */ 3184 } 3185 if (!parse_elf_property(note.data, &off, datasz, info, 3186 have_prev_type, &prev_type, errp)) { 3187 return false; 3188 } 3189 have_prev_type = true; 3190 } 3191 } 3192 3193 /* Load an ELF image into the address space. 3194 3195 IMAGE_NAME is the filename of the image, to use in error messages. 3196 IMAGE_FD is the open file descriptor for the image. 3197 3198 BPRM_BUF is a copy of the beginning of the file; this of course 3199 contains the elf file header at offset 0. It is assumed that this 3200 buffer is sufficiently aligned to present no problems to the host 3201 in accessing data at aligned offsets within the buffer. 3202 3203 On return: INFO values will be filled in, as necessary or available. */ 3204 3205 static void load_elf_image(const char *image_name, int image_fd, 3206 struct image_info *info, char **pinterp_name, 3207 char bprm_buf[BPRM_BUF_SIZE]) 3208 { 3209 struct elfhdr *ehdr = (struct elfhdr *)bprm_buf; 3210 struct elf_phdr *phdr; 3211 abi_ulong load_addr, load_bias, loaddr, hiaddr, error; 3212 int i, retval, prot_exec; 3213 Error *err = NULL; 3214 3215 /* First of all, some simple consistency checks */ 3216 if (!elf_check_ident(ehdr)) { 3217 error_setg(&err, "Invalid ELF image for this architecture"); 3218 goto exit_errmsg; 3219 } 3220 bswap_ehdr(ehdr); 3221 if (!elf_check_ehdr(ehdr)) { 3222 error_setg(&err, "Invalid ELF image for this architecture"); 3223 goto exit_errmsg; 3224 } 3225 3226 i = ehdr->e_phnum * sizeof(struct elf_phdr); 3227 if (ehdr->e_phoff + i <= BPRM_BUF_SIZE) { 3228 phdr = (struct elf_phdr *)(bprm_buf + ehdr->e_phoff); 3229 } else { 3230 phdr = (struct elf_phdr *) alloca(i); 3231 retval = pread(image_fd, phdr, i, ehdr->e_phoff); 3232 if (retval != i) { 3233 goto exit_read; 3234 } 3235 } 3236 bswap_phdr(phdr, ehdr->e_phnum); 3237 3238 info->nsegs = 0; 3239 info->pt_dynamic_addr = 0; 3240 3241 mmap_lock(); 3242 3243 /* 3244 * Find the maximum size of the image and allocate an appropriate 3245 * amount of memory to handle that. Locate the interpreter, if any. 3246 */ 3247 loaddr = -1, hiaddr = 0; 3248 info->alignment = 0; 3249 info->exec_stack = EXSTACK_DEFAULT; 3250 for (i = 0; i < ehdr->e_phnum; ++i) { 3251 struct elf_phdr *eppnt = phdr + i; 3252 if (eppnt->p_type == PT_LOAD) { 3253 abi_ulong a = eppnt->p_vaddr - eppnt->p_offset; 3254 if (a < loaddr) { 3255 loaddr = a; 3256 } 3257 a = eppnt->p_vaddr + eppnt->p_memsz - 1; 3258 if (a > hiaddr) { 3259 hiaddr = a; 3260 } 3261 ++info->nsegs; 3262 info->alignment |= eppnt->p_align; 3263 } else if (eppnt->p_type == PT_INTERP && pinterp_name) { 3264 g_autofree char *interp_name = NULL; 3265 3266 if (*pinterp_name) { 3267 error_setg(&err, "Multiple PT_INTERP entries"); 3268 goto exit_errmsg; 3269 } 3270 3271 interp_name = g_malloc(eppnt->p_filesz); 3272 3273 if (eppnt->p_offset + eppnt->p_filesz <= BPRM_BUF_SIZE) { 3274 memcpy(interp_name, bprm_buf + eppnt->p_offset, 3275 eppnt->p_filesz); 3276 } else { 3277 retval = pread(image_fd, interp_name, eppnt->p_filesz, 3278 eppnt->p_offset); 3279 if (retval != eppnt->p_filesz) { 3280 goto exit_read; 3281 } 3282 } 3283 if (interp_name[eppnt->p_filesz - 1] != 0) { 3284 error_setg(&err, "Invalid PT_INTERP entry"); 3285 goto exit_errmsg; 3286 } 3287 *pinterp_name = g_steal_pointer(&interp_name); 3288 } else if (eppnt->p_type == PT_GNU_PROPERTY) { 3289 if (!parse_elf_properties(image_fd, info, eppnt, bprm_buf, &err)) { 3290 goto exit_errmsg; 3291 } 3292 } else if (eppnt->p_type == PT_GNU_STACK) { 3293 info->exec_stack = eppnt->p_flags & PF_X; 3294 } 3295 } 3296 3297 load_addr = loaddr; 3298 3299 if (pinterp_name != NULL) { 3300 if (ehdr->e_type == ET_EXEC) { 3301 /* 3302 * Make sure that the low address does not conflict with 3303 * MMAP_MIN_ADDR or the QEMU application itself. 3304 */ 3305 probe_guest_base(image_name, loaddr, hiaddr); 3306 } else { 3307 abi_ulong align; 3308 3309 /* 3310 * The binary is dynamic, but we still need to 3311 * select guest_base. In this case we pass a size. 3312 */ 3313 probe_guest_base(image_name, 0, hiaddr - loaddr); 3314 3315 /* 3316 * Avoid collision with the loader by providing a different 3317 * default load address. 3318 */ 3319 load_addr += elf_et_dyn_base; 3320 3321 /* 3322 * TODO: Better support for mmap alignment is desirable. 3323 * Since we do not have complete control over the guest 3324 * address space, we prefer the kernel to choose some address 3325 * rather than force the use of LOAD_ADDR via MAP_FIXED. 3326 * But without MAP_FIXED we cannot guarantee alignment, 3327 * only suggest it. 3328 */ 3329 align = pow2ceil(info->alignment); 3330 if (align) { 3331 load_addr &= -align; 3332 } 3333 } 3334 } 3335 3336 /* 3337 * Reserve address space for all of this. 3338 * 3339 * In the case of ET_EXEC, we supply MAP_FIXED_NOREPLACE so that we get 3340 * exactly the address range that is required. Without reserved_va, 3341 * the guest address space is not isolated. We have attempted to avoid 3342 * conflict with the host program itself via probe_guest_base, but using 3343 * MAP_FIXED_NOREPLACE instead of MAP_FIXED provides an extra check. 3344 * 3345 * Otherwise this is ET_DYN, and we are searching for a location 3346 * that can hold the memory space required. If the image is 3347 * pre-linked, LOAD_ADDR will be non-zero, and the kernel should 3348 * honor that address if it happens to be free. 3349 * 3350 * In both cases, we will overwrite pages in this range with mappings 3351 * from the executable. 3352 */ 3353 load_addr = target_mmap(load_addr, (size_t)hiaddr - loaddr + 1, PROT_NONE, 3354 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | 3355 (ehdr->e_type == ET_EXEC ? MAP_FIXED_NOREPLACE : 0), 3356 -1, 0); 3357 if (load_addr == -1) { 3358 goto exit_mmap; 3359 } 3360 load_bias = load_addr - loaddr; 3361 3362 if (elf_is_fdpic(ehdr)) { 3363 struct elf32_fdpic_loadseg *loadsegs = info->loadsegs = 3364 g_malloc(sizeof(*loadsegs) * info->nsegs); 3365 3366 for (i = 0; i < ehdr->e_phnum; ++i) { 3367 switch (phdr[i].p_type) { 3368 case PT_DYNAMIC: 3369 info->pt_dynamic_addr = phdr[i].p_vaddr + load_bias; 3370 break; 3371 case PT_LOAD: 3372 loadsegs->addr = phdr[i].p_vaddr + load_bias; 3373 loadsegs->p_vaddr = phdr[i].p_vaddr; 3374 loadsegs->p_memsz = phdr[i].p_memsz; 3375 ++loadsegs; 3376 break; 3377 } 3378 } 3379 } 3380 3381 info->load_bias = load_bias; 3382 info->code_offset = load_bias; 3383 info->data_offset = load_bias; 3384 info->load_addr = load_addr; 3385 info->entry = ehdr->e_entry + load_bias; 3386 info->start_code = -1; 3387 info->end_code = 0; 3388 info->start_data = -1; 3389 info->end_data = 0; 3390 /* Usual start for brk is after all sections of the main executable. */ 3391 info->brk = TARGET_PAGE_ALIGN(hiaddr + load_bias); 3392 info->elf_flags = ehdr->e_flags; 3393 3394 prot_exec = PROT_EXEC; 3395 #ifdef TARGET_AARCH64 3396 /* 3397 * If the BTI feature is present, this indicates that the executable 3398 * pages of the startup binary should be mapped with PROT_BTI, so that 3399 * branch targets are enforced. 3400 * 3401 * The startup binary is either the interpreter or the static executable. 3402 * The interpreter is responsible for all pages of a dynamic executable. 3403 * 3404 * Elf notes are backward compatible to older cpus. 3405 * Do not enable BTI unless it is supported. 3406 */ 3407 if ((info->note_flags & GNU_PROPERTY_AARCH64_FEATURE_1_BTI) 3408 && (pinterp_name == NULL || *pinterp_name == 0) 3409 && cpu_isar_feature(aa64_bti, ARM_CPU(thread_cpu))) { 3410 prot_exec |= TARGET_PROT_BTI; 3411 } 3412 #endif 3413 3414 for (i = 0; i < ehdr->e_phnum; i++) { 3415 struct elf_phdr *eppnt = phdr + i; 3416 if (eppnt->p_type == PT_LOAD) { 3417 abi_ulong vaddr, vaddr_po, vaddr_ps, vaddr_ef, vaddr_em; 3418 int elf_prot = 0; 3419 3420 if (eppnt->p_flags & PF_R) { 3421 elf_prot |= PROT_READ; 3422 } 3423 if (eppnt->p_flags & PF_W) { 3424 elf_prot |= PROT_WRITE; 3425 } 3426 if (eppnt->p_flags & PF_X) { 3427 elf_prot |= prot_exec; 3428 } 3429 3430 vaddr = load_bias + eppnt->p_vaddr; 3431 vaddr_po = vaddr & ~TARGET_PAGE_MASK; 3432 vaddr_ps = vaddr & TARGET_PAGE_MASK; 3433 3434 vaddr_ef = vaddr + eppnt->p_filesz; 3435 vaddr_em = vaddr + eppnt->p_memsz; 3436 3437 /* 3438 * Some segments may be completely empty, with a non-zero p_memsz 3439 * but no backing file segment. 3440 */ 3441 if (eppnt->p_filesz != 0) { 3442 error = target_mmap(vaddr_ps, eppnt->p_filesz + vaddr_po, 3443 elf_prot, MAP_PRIVATE | MAP_FIXED, 3444 image_fd, eppnt->p_offset - vaddr_po); 3445 if (error == -1) { 3446 goto exit_mmap; 3447 } 3448 } 3449 3450 /* If the load segment requests extra zeros (e.g. bss), map it. */ 3451 if (vaddr_ef < vaddr_em && 3452 !zero_bss(vaddr_ef, vaddr_em, elf_prot, &err)) { 3453 goto exit_errmsg; 3454 } 3455 3456 /* Find the full program boundaries. */ 3457 if (elf_prot & PROT_EXEC) { 3458 if (vaddr < info->start_code) { 3459 info->start_code = vaddr; 3460 } 3461 if (vaddr_ef > info->end_code) { 3462 info->end_code = vaddr_ef; 3463 } 3464 } 3465 if (elf_prot & PROT_WRITE) { 3466 if (vaddr < info->start_data) { 3467 info->start_data = vaddr; 3468 } 3469 if (vaddr_ef > info->end_data) { 3470 info->end_data = vaddr_ef; 3471 } 3472 } 3473 #ifdef TARGET_MIPS 3474 } else if (eppnt->p_type == PT_MIPS_ABIFLAGS) { 3475 Mips_elf_abiflags_v0 abiflags; 3476 if (eppnt->p_filesz < sizeof(Mips_elf_abiflags_v0)) { 3477 error_setg(&err, "Invalid PT_MIPS_ABIFLAGS entry"); 3478 goto exit_errmsg; 3479 } 3480 if (eppnt->p_offset + eppnt->p_filesz <= BPRM_BUF_SIZE) { 3481 memcpy(&abiflags, bprm_buf + eppnt->p_offset, 3482 sizeof(Mips_elf_abiflags_v0)); 3483 } else { 3484 retval = pread(image_fd, &abiflags, sizeof(Mips_elf_abiflags_v0), 3485 eppnt->p_offset); 3486 if (retval != sizeof(Mips_elf_abiflags_v0)) { 3487 goto exit_read; 3488 } 3489 } 3490 bswap_mips_abiflags(&abiflags); 3491 info->fp_abi = abiflags.fp_abi; 3492 #endif 3493 } 3494 } 3495 3496 if (info->end_data == 0) { 3497 info->start_data = info->end_code; 3498 info->end_data = info->end_code; 3499 } 3500 3501 if (qemu_log_enabled()) { 3502 load_symbols(ehdr, image_fd, load_bias); 3503 } 3504 3505 debuginfo_report_elf(image_name, image_fd, load_bias); 3506 3507 mmap_unlock(); 3508 3509 close(image_fd); 3510 return; 3511 3512 exit_read: 3513 if (retval >= 0) { 3514 error_setg(&err, "Incomplete read of file header"); 3515 } else { 3516 error_setg_errno(&err, errno, "Error reading file header"); 3517 } 3518 goto exit_errmsg; 3519 exit_mmap: 3520 error_setg_errno(&err, errno, "Error mapping file"); 3521 goto exit_errmsg; 3522 exit_errmsg: 3523 error_reportf_err(err, "%s: ", image_name); 3524 exit(-1); 3525 } 3526 3527 static void load_elf_interp(const char *filename, struct image_info *info, 3528 char bprm_buf[BPRM_BUF_SIZE]) 3529 { 3530 int fd, retval; 3531 Error *err = NULL; 3532 3533 fd = open(path(filename), O_RDONLY); 3534 if (fd < 0) { 3535 error_setg_file_open(&err, errno, filename); 3536 error_report_err(err); 3537 exit(-1); 3538 } 3539 3540 retval = read(fd, bprm_buf, BPRM_BUF_SIZE); 3541 if (retval < 0) { 3542 error_setg_errno(&err, errno, "Error reading file header"); 3543 error_reportf_err(err, "%s: ", filename); 3544 exit(-1); 3545 } 3546 3547 if (retval < BPRM_BUF_SIZE) { 3548 memset(bprm_buf + retval, 0, BPRM_BUF_SIZE - retval); 3549 } 3550 3551 load_elf_image(filename, fd, info, NULL, bprm_buf); 3552 } 3553 3554 static int symfind(const void *s0, const void *s1) 3555 { 3556 struct elf_sym *sym = (struct elf_sym *)s1; 3557 __typeof(sym->st_value) addr = *(uint64_t *)s0; 3558 int result = 0; 3559 3560 if (addr < sym->st_value) { 3561 result = -1; 3562 } else if (addr >= sym->st_value + sym->st_size) { 3563 result = 1; 3564 } 3565 return result; 3566 } 3567 3568 static const char *lookup_symbolxx(struct syminfo *s, uint64_t orig_addr) 3569 { 3570 #if ELF_CLASS == ELFCLASS32 3571 struct elf_sym *syms = s->disas_symtab.elf32; 3572 #else 3573 struct elf_sym *syms = s->disas_symtab.elf64; 3574 #endif 3575 3576 // binary search 3577 struct elf_sym *sym; 3578 3579 sym = bsearch(&orig_addr, syms, s->disas_num_syms, sizeof(*syms), symfind); 3580 if (sym != NULL) { 3581 return s->disas_strtab + sym->st_name; 3582 } 3583 3584 return ""; 3585 } 3586 3587 /* FIXME: This should use elf_ops.h */ 3588 static int symcmp(const void *s0, const void *s1) 3589 { 3590 struct elf_sym *sym0 = (struct elf_sym *)s0; 3591 struct elf_sym *sym1 = (struct elf_sym *)s1; 3592 return (sym0->st_value < sym1->st_value) 3593 ? -1 3594 : ((sym0->st_value > sym1->st_value) ? 1 : 0); 3595 } 3596 3597 /* Best attempt to load symbols from this ELF object. */ 3598 static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias) 3599 { 3600 int i, shnum, nsyms, sym_idx = 0, str_idx = 0; 3601 uint64_t segsz; 3602 struct elf_shdr *shdr; 3603 char *strings = NULL; 3604 struct syminfo *s = NULL; 3605 struct elf_sym *new_syms, *syms = NULL; 3606 3607 shnum = hdr->e_shnum; 3608 i = shnum * sizeof(struct elf_shdr); 3609 shdr = (struct elf_shdr *)alloca(i); 3610 if (pread(fd, shdr, i, hdr->e_shoff) != i) { 3611 return; 3612 } 3613 3614 bswap_shdr(shdr, shnum); 3615 for (i = 0; i < shnum; ++i) { 3616 if (shdr[i].sh_type == SHT_SYMTAB) { 3617 sym_idx = i; 3618 str_idx = shdr[i].sh_link; 3619 goto found; 3620 } 3621 } 3622 3623 /* There will be no symbol table if the file was stripped. */ 3624 return; 3625 3626 found: 3627 /* Now know where the strtab and symtab are. Snarf them. */ 3628 s = g_try_new(struct syminfo, 1); 3629 if (!s) { 3630 goto give_up; 3631 } 3632 3633 segsz = shdr[str_idx].sh_size; 3634 s->disas_strtab = strings = g_try_malloc(segsz); 3635 if (!strings || 3636 pread(fd, strings, segsz, shdr[str_idx].sh_offset) != segsz) { 3637 goto give_up; 3638 } 3639 3640 segsz = shdr[sym_idx].sh_size; 3641 syms = g_try_malloc(segsz); 3642 if (!syms || pread(fd, syms, segsz, shdr[sym_idx].sh_offset) != segsz) { 3643 goto give_up; 3644 } 3645 3646 if (segsz / sizeof(struct elf_sym) > INT_MAX) { 3647 /* Implausibly large symbol table: give up rather than ploughing 3648 * on with the number of symbols calculation overflowing 3649 */ 3650 goto give_up; 3651 } 3652 nsyms = segsz / sizeof(struct elf_sym); 3653 for (i = 0; i < nsyms; ) { 3654 bswap_sym(syms + i); 3655 /* Throw away entries which we do not need. */ 3656 if (syms[i].st_shndx == SHN_UNDEF 3657 || syms[i].st_shndx >= SHN_LORESERVE 3658 || ELF_ST_TYPE(syms[i].st_info) != STT_FUNC) { 3659 if (i < --nsyms) { 3660 syms[i] = syms[nsyms]; 3661 } 3662 } else { 3663 #if defined(TARGET_ARM) || defined (TARGET_MIPS) 3664 /* The bottom address bit marks a Thumb or MIPS16 symbol. */ 3665 syms[i].st_value &= ~(target_ulong)1; 3666 #endif 3667 syms[i].st_value += load_bias; 3668 i++; 3669 } 3670 } 3671 3672 /* No "useful" symbol. */ 3673 if (nsyms == 0) { 3674 goto give_up; 3675 } 3676 3677 /* Attempt to free the storage associated with the local symbols 3678 that we threw away. Whether or not this has any effect on the 3679 memory allocation depends on the malloc implementation and how 3680 many symbols we managed to discard. */ 3681 new_syms = g_try_renew(struct elf_sym, syms, nsyms); 3682 if (new_syms == NULL) { 3683 goto give_up; 3684 } 3685 syms = new_syms; 3686 3687 qsort(syms, nsyms, sizeof(*syms), symcmp); 3688 3689 s->disas_num_syms = nsyms; 3690 #if ELF_CLASS == ELFCLASS32 3691 s->disas_symtab.elf32 = syms; 3692 #else 3693 s->disas_symtab.elf64 = syms; 3694 #endif 3695 s->lookup_symbol = lookup_symbolxx; 3696 s->next = syminfos; 3697 syminfos = s; 3698 3699 return; 3700 3701 give_up: 3702 g_free(s); 3703 g_free(strings); 3704 g_free(syms); 3705 } 3706 3707 uint32_t get_elf_eflags(int fd) 3708 { 3709 struct elfhdr ehdr; 3710 off_t offset; 3711 int ret; 3712 3713 /* Read ELF header */ 3714 offset = lseek(fd, 0, SEEK_SET); 3715 if (offset == (off_t) -1) { 3716 return 0; 3717 } 3718 ret = read(fd, &ehdr, sizeof(ehdr)); 3719 if (ret < sizeof(ehdr)) { 3720 return 0; 3721 } 3722 offset = lseek(fd, offset, SEEK_SET); 3723 if (offset == (off_t) -1) { 3724 return 0; 3725 } 3726 3727 /* Check ELF signature */ 3728 if (!elf_check_ident(&ehdr)) { 3729 return 0; 3730 } 3731 3732 /* check header */ 3733 bswap_ehdr(&ehdr); 3734 if (!elf_check_ehdr(&ehdr)) { 3735 return 0; 3736 } 3737 3738 /* return architecture id */ 3739 return ehdr.e_flags; 3740 } 3741 3742 int load_elf_binary(struct linux_binprm *bprm, struct image_info *info) 3743 { 3744 struct image_info interp_info; 3745 struct elfhdr elf_ex; 3746 char *elf_interpreter = NULL; 3747 char *scratch; 3748 3749 memset(&interp_info, 0, sizeof(interp_info)); 3750 #ifdef TARGET_MIPS 3751 interp_info.fp_abi = MIPS_ABI_FP_UNKNOWN; 3752 #endif 3753 3754 load_elf_image(bprm->filename, bprm->fd, info, 3755 &elf_interpreter, bprm->buf); 3756 3757 /* ??? We need a copy of the elf header for passing to create_elf_tables. 3758 If we do nothing, we'll have overwritten this when we re-use bprm->buf 3759 when we load the interpreter. */ 3760 elf_ex = *(struct elfhdr *)bprm->buf; 3761 3762 /* Do this so that we can load the interpreter, if need be. We will 3763 change some of these later */ 3764 bprm->p = setup_arg_pages(bprm, info); 3765 3766 scratch = g_new0(char, TARGET_PAGE_SIZE); 3767 if (STACK_GROWS_DOWN) { 3768 bprm->p = copy_elf_strings(1, &bprm->filename, scratch, 3769 bprm->p, info->stack_limit); 3770 info->file_string = bprm->p; 3771 bprm->p = copy_elf_strings(bprm->envc, bprm->envp, scratch, 3772 bprm->p, info->stack_limit); 3773 info->env_strings = bprm->p; 3774 bprm->p = copy_elf_strings(bprm->argc, bprm->argv, scratch, 3775 bprm->p, info->stack_limit); 3776 info->arg_strings = bprm->p; 3777 } else { 3778 info->arg_strings = bprm->p; 3779 bprm->p = copy_elf_strings(bprm->argc, bprm->argv, scratch, 3780 bprm->p, info->stack_limit); 3781 info->env_strings = bprm->p; 3782 bprm->p = copy_elf_strings(bprm->envc, bprm->envp, scratch, 3783 bprm->p, info->stack_limit); 3784 info->file_string = bprm->p; 3785 bprm->p = copy_elf_strings(1, &bprm->filename, scratch, 3786 bprm->p, info->stack_limit); 3787 } 3788 3789 g_free(scratch); 3790 3791 if (!bprm->p) { 3792 fprintf(stderr, "%s: %s\n", bprm->filename, strerror(E2BIG)); 3793 exit(-1); 3794 } 3795 3796 if (elf_interpreter) { 3797 load_elf_interp(elf_interpreter, &interp_info, bprm->buf); 3798 3799 /* 3800 * While unusual because of ELF_ET_DYN_BASE, if we are unlucky 3801 * with the mappings the interpreter can be loaded above but 3802 * near the main executable, which can leave very little room 3803 * for the heap. 3804 * If the current brk has less than 16MB, use the end of the 3805 * interpreter. 3806 */ 3807 if (interp_info.brk > info->brk && 3808 interp_info.load_bias - info->brk < 16 * MiB) { 3809 info->brk = interp_info.brk; 3810 } 3811 3812 /* If the program interpreter is one of these two, then assume 3813 an iBCS2 image. Otherwise assume a native linux image. */ 3814 3815 if (strcmp(elf_interpreter, "/usr/lib/libc.so.1") == 0 3816 || strcmp(elf_interpreter, "/usr/lib/ld.so.1") == 0) { 3817 info->personality = PER_SVR4; 3818 3819 /* Why this, you ask??? Well SVr4 maps page 0 as read-only, 3820 and some applications "depend" upon this behavior. Since 3821 we do not have the power to recompile these, we emulate 3822 the SVr4 behavior. Sigh. */ 3823 target_mmap(0, qemu_host_page_size, PROT_READ | PROT_EXEC, 3824 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 3825 } 3826 #ifdef TARGET_MIPS 3827 info->interp_fp_abi = interp_info.fp_abi; 3828 #endif 3829 } 3830 3831 /* 3832 * TODO: load a vdso, which would also contain the signal trampolines. 3833 * Otherwise, allocate a private page to hold them. 3834 */ 3835 if (TARGET_ARCH_HAS_SIGTRAMP_PAGE) { 3836 abi_long tramp_page = target_mmap(0, TARGET_PAGE_SIZE, 3837 PROT_READ | PROT_WRITE, 3838 MAP_PRIVATE | MAP_ANON, -1, 0); 3839 if (tramp_page == -1) { 3840 return -errno; 3841 } 3842 3843 setup_sigtramp(tramp_page); 3844 target_mprotect(tramp_page, TARGET_PAGE_SIZE, PROT_READ | PROT_EXEC); 3845 } 3846 3847 bprm->p = create_elf_tables(bprm->p, bprm->argc, bprm->envc, &elf_ex, 3848 info, (elf_interpreter ? &interp_info : NULL)); 3849 info->start_stack = bprm->p; 3850 3851 /* If we have an interpreter, set that as the program's entry point. 3852 Copy the load_bias as well, to help PPC64 interpret the entry 3853 point as a function descriptor. Do this after creating elf tables 3854 so that we copy the original program entry point into the AUXV. */ 3855 if (elf_interpreter) { 3856 info->load_bias = interp_info.load_bias; 3857 info->entry = interp_info.entry; 3858 g_free(elf_interpreter); 3859 } 3860 3861 #ifdef USE_ELF_CORE_DUMP 3862 bprm->core_dump = &elf_core_dump; 3863 #endif 3864 3865 return 0; 3866 } 3867 3868 #ifdef USE_ELF_CORE_DUMP 3869 /* 3870 * Definitions to generate Intel SVR4-like core files. 3871 * These mostly have the same names as the SVR4 types with "target_elf_" 3872 * tacked on the front to prevent clashes with linux definitions, 3873 * and the typedef forms have been avoided. This is mostly like 3874 * the SVR4 structure, but more Linuxy, with things that Linux does 3875 * not support and which gdb doesn't really use excluded. 3876 * 3877 * Fields we don't dump (their contents is zero) in linux-user qemu 3878 * are marked with XXX. 3879 * 3880 * Core dump code is copied from linux kernel (fs/binfmt_elf.c). 3881 * 3882 * Porting ELF coredump for target is (quite) simple process. First you 3883 * define USE_ELF_CORE_DUMP in target ELF code (where init_thread() for 3884 * the target resides): 3885 * 3886 * #define USE_ELF_CORE_DUMP 3887 * 3888 * Next you define type of register set used for dumping. ELF specification 3889 * says that it needs to be array of elf_greg_t that has size of ELF_NREG. 3890 * 3891 * typedef <target_regtype> target_elf_greg_t; 3892 * #define ELF_NREG <number of registers> 3893 * typedef taret_elf_greg_t target_elf_gregset_t[ELF_NREG]; 3894 * 3895 * Last step is to implement target specific function that copies registers 3896 * from given cpu into just specified register set. Prototype is: 3897 * 3898 * static void elf_core_copy_regs(taret_elf_gregset_t *regs, 3899 * const CPUArchState *env); 3900 * 3901 * Parameters: 3902 * regs - copy register values into here (allocated and zeroed by caller) 3903 * env - copy registers from here 3904 * 3905 * Example for ARM target is provided in this file. 3906 */ 3907 3908 /* An ELF note in memory */ 3909 struct memelfnote { 3910 const char *name; 3911 size_t namesz; 3912 size_t namesz_rounded; 3913 int type; 3914 size_t datasz; 3915 size_t datasz_rounded; 3916 void *data; 3917 size_t notesz; 3918 }; 3919 3920 struct target_elf_siginfo { 3921 abi_int si_signo; /* signal number */ 3922 abi_int si_code; /* extra code */ 3923 abi_int si_errno; /* errno */ 3924 }; 3925 3926 struct target_elf_prstatus { 3927 struct target_elf_siginfo pr_info; /* Info associated with signal */ 3928 abi_short pr_cursig; /* Current signal */ 3929 abi_ulong pr_sigpend; /* XXX */ 3930 abi_ulong pr_sighold; /* XXX */ 3931 target_pid_t pr_pid; 3932 target_pid_t pr_ppid; 3933 target_pid_t pr_pgrp; 3934 target_pid_t pr_sid; 3935 struct target_timeval pr_utime; /* XXX User time */ 3936 struct target_timeval pr_stime; /* XXX System time */ 3937 struct target_timeval pr_cutime; /* XXX Cumulative user time */ 3938 struct target_timeval pr_cstime; /* XXX Cumulative system time */ 3939 target_elf_gregset_t pr_reg; /* GP registers */ 3940 abi_int pr_fpvalid; /* XXX */ 3941 }; 3942 3943 #define ELF_PRARGSZ (80) /* Number of chars for args */ 3944 3945 struct target_elf_prpsinfo { 3946 char pr_state; /* numeric process state */ 3947 char pr_sname; /* char for pr_state */ 3948 char pr_zomb; /* zombie */ 3949 char pr_nice; /* nice val */ 3950 abi_ulong pr_flag; /* flags */ 3951 target_uid_t pr_uid; 3952 target_gid_t pr_gid; 3953 target_pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid; 3954 /* Lots missing */ 3955 char pr_fname[16] QEMU_NONSTRING; /* filename of executable */ 3956 char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */ 3957 }; 3958 3959 /* Here is the structure in which status of each thread is captured. */ 3960 struct elf_thread_status { 3961 QTAILQ_ENTRY(elf_thread_status) ets_link; 3962 struct target_elf_prstatus prstatus; /* NT_PRSTATUS */ 3963 #if 0 3964 elf_fpregset_t fpu; /* NT_PRFPREG */ 3965 struct task_struct *thread; 3966 elf_fpxregset_t xfpu; /* ELF_CORE_XFPREG_TYPE */ 3967 #endif 3968 struct memelfnote notes[1]; 3969 int num_notes; 3970 }; 3971 3972 struct elf_note_info { 3973 struct memelfnote *notes; 3974 struct target_elf_prstatus *prstatus; /* NT_PRSTATUS */ 3975 struct target_elf_prpsinfo *psinfo; /* NT_PRPSINFO */ 3976 3977 QTAILQ_HEAD(, elf_thread_status) thread_list; 3978 #if 0 3979 /* 3980 * Current version of ELF coredump doesn't support 3981 * dumping fp regs etc. 3982 */ 3983 elf_fpregset_t *fpu; 3984 elf_fpxregset_t *xfpu; 3985 int thread_status_size; 3986 #endif 3987 int notes_size; 3988 int numnote; 3989 }; 3990 3991 struct vm_area_struct { 3992 target_ulong vma_start; /* start vaddr of memory region */ 3993 target_ulong vma_end; /* end vaddr of memory region */ 3994 abi_ulong vma_flags; /* protection etc. flags for the region */ 3995 QTAILQ_ENTRY(vm_area_struct) vma_link; 3996 }; 3997 3998 struct mm_struct { 3999 QTAILQ_HEAD(, vm_area_struct) mm_mmap; 4000 int mm_count; /* number of mappings */ 4001 }; 4002 4003 static struct mm_struct *vma_init(void); 4004 static void vma_delete(struct mm_struct *); 4005 static int vma_add_mapping(struct mm_struct *, target_ulong, 4006 target_ulong, abi_ulong); 4007 static int vma_get_mapping_count(const struct mm_struct *); 4008 static struct vm_area_struct *vma_first(const struct mm_struct *); 4009 static struct vm_area_struct *vma_next(struct vm_area_struct *); 4010 static abi_ulong vma_dump_size(const struct vm_area_struct *); 4011 static int vma_walker(void *priv, target_ulong start, target_ulong end, 4012 unsigned long flags); 4013 4014 static void fill_elf_header(struct elfhdr *, int, uint16_t, uint32_t); 4015 static void fill_note(struct memelfnote *, const char *, int, 4016 unsigned int, void *); 4017 static void fill_prstatus(struct target_elf_prstatus *, const TaskState *, int); 4018 static int fill_psinfo(struct target_elf_prpsinfo *, const TaskState *); 4019 static void fill_auxv_note(struct memelfnote *, const TaskState *); 4020 static void fill_elf_note_phdr(struct elf_phdr *, int, off_t); 4021 static size_t note_size(const struct memelfnote *); 4022 static void free_note_info(struct elf_note_info *); 4023 static int fill_note_info(struct elf_note_info *, long, const CPUArchState *); 4024 static void fill_thread_info(struct elf_note_info *, const CPUArchState *); 4025 4026 static int dump_write(int, const void *, size_t); 4027 static int write_note(struct memelfnote *, int); 4028 static int write_note_info(struct elf_note_info *, int); 4029 4030 #ifdef BSWAP_NEEDED 4031 static void bswap_prstatus(struct target_elf_prstatus *prstatus) 4032 { 4033 prstatus->pr_info.si_signo = tswap32(prstatus->pr_info.si_signo); 4034 prstatus->pr_info.si_code = tswap32(prstatus->pr_info.si_code); 4035 prstatus->pr_info.si_errno = tswap32(prstatus->pr_info.si_errno); 4036 prstatus->pr_cursig = tswap16(prstatus->pr_cursig); 4037 prstatus->pr_sigpend = tswapal(prstatus->pr_sigpend); 4038 prstatus->pr_sighold = tswapal(prstatus->pr_sighold); 4039 prstatus->pr_pid = tswap32(prstatus->pr_pid); 4040 prstatus->pr_ppid = tswap32(prstatus->pr_ppid); 4041 prstatus->pr_pgrp = tswap32(prstatus->pr_pgrp); 4042 prstatus->pr_sid = tswap32(prstatus->pr_sid); 4043 /* cpu times are not filled, so we skip them */ 4044 /* regs should be in correct format already */ 4045 prstatus->pr_fpvalid = tswap32(prstatus->pr_fpvalid); 4046 } 4047 4048 static void bswap_psinfo(struct target_elf_prpsinfo *psinfo) 4049 { 4050 psinfo->pr_flag = tswapal(psinfo->pr_flag); 4051 psinfo->pr_uid = tswap16(psinfo->pr_uid); 4052 psinfo->pr_gid = tswap16(psinfo->pr_gid); 4053 psinfo->pr_pid = tswap32(psinfo->pr_pid); 4054 psinfo->pr_ppid = tswap32(psinfo->pr_ppid); 4055 psinfo->pr_pgrp = tswap32(psinfo->pr_pgrp); 4056 psinfo->pr_sid = tswap32(psinfo->pr_sid); 4057 } 4058 4059 static void bswap_note(struct elf_note *en) 4060 { 4061 bswap32s(&en->n_namesz); 4062 bswap32s(&en->n_descsz); 4063 bswap32s(&en->n_type); 4064 } 4065 #else 4066 static inline void bswap_prstatus(struct target_elf_prstatus *p) { } 4067 static inline void bswap_psinfo(struct target_elf_prpsinfo *p) {} 4068 static inline void bswap_note(struct elf_note *en) { } 4069 #endif /* BSWAP_NEEDED */ 4070 4071 /* 4072 * Minimal support for linux memory regions. These are needed 4073 * when we are finding out what memory exactly belongs to 4074 * emulated process. No locks needed here, as long as 4075 * thread that received the signal is stopped. 4076 */ 4077 4078 static struct mm_struct *vma_init(void) 4079 { 4080 struct mm_struct *mm; 4081 4082 if ((mm = g_malloc(sizeof (*mm))) == NULL) 4083 return (NULL); 4084 4085 mm->mm_count = 0; 4086 QTAILQ_INIT(&mm->mm_mmap); 4087 4088 return (mm); 4089 } 4090 4091 static void vma_delete(struct mm_struct *mm) 4092 { 4093 struct vm_area_struct *vma; 4094 4095 while ((vma = vma_first(mm)) != NULL) { 4096 QTAILQ_REMOVE(&mm->mm_mmap, vma, vma_link); 4097 g_free(vma); 4098 } 4099 g_free(mm); 4100 } 4101 4102 static int vma_add_mapping(struct mm_struct *mm, target_ulong start, 4103 target_ulong end, abi_ulong flags) 4104 { 4105 struct vm_area_struct *vma; 4106 4107 if ((vma = g_malloc0(sizeof (*vma))) == NULL) 4108 return (-1); 4109 4110 vma->vma_start = start; 4111 vma->vma_end = end; 4112 vma->vma_flags = flags; 4113 4114 QTAILQ_INSERT_TAIL(&mm->mm_mmap, vma, vma_link); 4115 mm->mm_count++; 4116 4117 return (0); 4118 } 4119 4120 static struct vm_area_struct *vma_first(const struct mm_struct *mm) 4121 { 4122 return (QTAILQ_FIRST(&mm->mm_mmap)); 4123 } 4124 4125 static struct vm_area_struct *vma_next(struct vm_area_struct *vma) 4126 { 4127 return (QTAILQ_NEXT(vma, vma_link)); 4128 } 4129 4130 static int vma_get_mapping_count(const struct mm_struct *mm) 4131 { 4132 return (mm->mm_count); 4133 } 4134 4135 /* 4136 * Calculate file (dump) size of given memory region. 4137 */ 4138 static abi_ulong vma_dump_size(const struct vm_area_struct *vma) 4139 { 4140 /* if we cannot even read the first page, skip it */ 4141 if (!access_ok_untagged(VERIFY_READ, vma->vma_start, TARGET_PAGE_SIZE)) 4142 return (0); 4143 4144 /* 4145 * Usually we don't dump executable pages as they contain 4146 * non-writable code that debugger can read directly from 4147 * target library etc. However, thread stacks are marked 4148 * also executable so we read in first page of given region 4149 * and check whether it contains elf header. If there is 4150 * no elf header, we dump it. 4151 */ 4152 if (vma->vma_flags & PROT_EXEC) { 4153 char page[TARGET_PAGE_SIZE]; 4154 4155 if (copy_from_user(page, vma->vma_start, sizeof (page))) { 4156 return 0; 4157 } 4158 if ((page[EI_MAG0] == ELFMAG0) && 4159 (page[EI_MAG1] == ELFMAG1) && 4160 (page[EI_MAG2] == ELFMAG2) && 4161 (page[EI_MAG3] == ELFMAG3)) { 4162 /* 4163 * Mappings are possibly from ELF binary. Don't dump 4164 * them. 4165 */ 4166 return (0); 4167 } 4168 } 4169 4170 return (vma->vma_end - vma->vma_start); 4171 } 4172 4173 static int vma_walker(void *priv, target_ulong start, target_ulong end, 4174 unsigned long flags) 4175 { 4176 struct mm_struct *mm = (struct mm_struct *)priv; 4177 4178 vma_add_mapping(mm, start, end, flags); 4179 return (0); 4180 } 4181 4182 static void fill_note(struct memelfnote *note, const char *name, int type, 4183 unsigned int sz, void *data) 4184 { 4185 unsigned int namesz; 4186 4187 namesz = strlen(name) + 1; 4188 note->name = name; 4189 note->namesz = namesz; 4190 note->namesz_rounded = roundup(namesz, sizeof (int32_t)); 4191 note->type = type; 4192 note->datasz = sz; 4193 note->datasz_rounded = roundup(sz, sizeof (int32_t)); 4194 4195 note->data = data; 4196 4197 /* 4198 * We calculate rounded up note size here as specified by 4199 * ELF document. 4200 */ 4201 note->notesz = sizeof (struct elf_note) + 4202 note->namesz_rounded + note->datasz_rounded; 4203 } 4204 4205 static void fill_elf_header(struct elfhdr *elf, int segs, uint16_t machine, 4206 uint32_t flags) 4207 { 4208 (void) memset(elf, 0, sizeof(*elf)); 4209 4210 (void) memcpy(elf->e_ident, ELFMAG, SELFMAG); 4211 elf->e_ident[EI_CLASS] = ELF_CLASS; 4212 elf->e_ident[EI_DATA] = ELF_DATA; 4213 elf->e_ident[EI_VERSION] = EV_CURRENT; 4214 elf->e_ident[EI_OSABI] = ELF_OSABI; 4215 4216 elf->e_type = ET_CORE; 4217 elf->e_machine = machine; 4218 elf->e_version = EV_CURRENT; 4219 elf->e_phoff = sizeof(struct elfhdr); 4220 elf->e_flags = flags; 4221 elf->e_ehsize = sizeof(struct elfhdr); 4222 elf->e_phentsize = sizeof(struct elf_phdr); 4223 elf->e_phnum = segs; 4224 4225 bswap_ehdr(elf); 4226 } 4227 4228 static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset) 4229 { 4230 phdr->p_type = PT_NOTE; 4231 phdr->p_offset = offset; 4232 phdr->p_vaddr = 0; 4233 phdr->p_paddr = 0; 4234 phdr->p_filesz = sz; 4235 phdr->p_memsz = 0; 4236 phdr->p_flags = 0; 4237 phdr->p_align = 0; 4238 4239 bswap_phdr(phdr, 1); 4240 } 4241 4242 static size_t note_size(const struct memelfnote *note) 4243 { 4244 return (note->notesz); 4245 } 4246 4247 static void fill_prstatus(struct target_elf_prstatus *prstatus, 4248 const TaskState *ts, int signr) 4249 { 4250 (void) memset(prstatus, 0, sizeof (*prstatus)); 4251 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr; 4252 prstatus->pr_pid = ts->ts_tid; 4253 prstatus->pr_ppid = getppid(); 4254 prstatus->pr_pgrp = getpgrp(); 4255 prstatus->pr_sid = getsid(0); 4256 4257 bswap_prstatus(prstatus); 4258 } 4259 4260 static int fill_psinfo(struct target_elf_prpsinfo *psinfo, const TaskState *ts) 4261 { 4262 char *base_filename; 4263 unsigned int i, len; 4264 4265 (void) memset(psinfo, 0, sizeof (*psinfo)); 4266 4267 len = ts->info->env_strings - ts->info->arg_strings; 4268 if (len >= ELF_PRARGSZ) 4269 len = ELF_PRARGSZ - 1; 4270 if (copy_from_user(&psinfo->pr_psargs, ts->info->arg_strings, len)) { 4271 return -EFAULT; 4272 } 4273 for (i = 0; i < len; i++) 4274 if (psinfo->pr_psargs[i] == 0) 4275 psinfo->pr_psargs[i] = ' '; 4276 psinfo->pr_psargs[len] = 0; 4277 4278 psinfo->pr_pid = getpid(); 4279 psinfo->pr_ppid = getppid(); 4280 psinfo->pr_pgrp = getpgrp(); 4281 psinfo->pr_sid = getsid(0); 4282 psinfo->pr_uid = getuid(); 4283 psinfo->pr_gid = getgid(); 4284 4285 base_filename = g_path_get_basename(ts->bprm->filename); 4286 /* 4287 * Using strncpy here is fine: at max-length, 4288 * this field is not NUL-terminated. 4289 */ 4290 (void) strncpy(psinfo->pr_fname, base_filename, 4291 sizeof(psinfo->pr_fname)); 4292 4293 g_free(base_filename); 4294 bswap_psinfo(psinfo); 4295 return (0); 4296 } 4297 4298 static void fill_auxv_note(struct memelfnote *note, const TaskState *ts) 4299 { 4300 elf_addr_t auxv = (elf_addr_t)ts->info->saved_auxv; 4301 elf_addr_t orig_auxv = auxv; 4302 void *ptr; 4303 int len = ts->info->auxv_len; 4304 4305 /* 4306 * Auxiliary vector is stored in target process stack. It contains 4307 * {type, value} pairs that we need to dump into note. This is not 4308 * strictly necessary but we do it here for sake of completeness. 4309 */ 4310 4311 /* read in whole auxv vector and copy it to memelfnote */ 4312 ptr = lock_user(VERIFY_READ, orig_auxv, len, 0); 4313 if (ptr != NULL) { 4314 fill_note(note, "CORE", NT_AUXV, len, ptr); 4315 unlock_user(ptr, auxv, len); 4316 } 4317 } 4318 4319 /* 4320 * Constructs name of coredump file. We have following convention 4321 * for the name: 4322 * qemu_<basename-of-target-binary>_<date>-<time>_<pid>.core 4323 * 4324 * Returns the filename 4325 */ 4326 static char *core_dump_filename(const TaskState *ts) 4327 { 4328 g_autoptr(GDateTime) now = g_date_time_new_now_local(); 4329 g_autofree char *nowstr = g_date_time_format(now, "%Y%m%d-%H%M%S"); 4330 g_autofree char *base_filename = g_path_get_basename(ts->bprm->filename); 4331 4332 return g_strdup_printf("qemu_%s_%s_%d.core", 4333 base_filename, nowstr, (int)getpid()); 4334 } 4335 4336 static int dump_write(int fd, const void *ptr, size_t size) 4337 { 4338 const char *bufp = (const char *)ptr; 4339 ssize_t bytes_written, bytes_left; 4340 struct rlimit dumpsize; 4341 off_t pos; 4342 4343 bytes_written = 0; 4344 getrlimit(RLIMIT_CORE, &dumpsize); 4345 if ((pos = lseek(fd, 0, SEEK_CUR))==-1) { 4346 if (errno == ESPIPE) { /* not a seekable stream */ 4347 bytes_left = size; 4348 } else { 4349 return pos; 4350 } 4351 } else { 4352 if (dumpsize.rlim_cur <= pos) { 4353 return -1; 4354 } else if (dumpsize.rlim_cur == RLIM_INFINITY) { 4355 bytes_left = size; 4356 } else { 4357 size_t limit_left=dumpsize.rlim_cur - pos; 4358 bytes_left = limit_left >= size ? size : limit_left ; 4359 } 4360 } 4361 4362 /* 4363 * In normal conditions, single write(2) should do but 4364 * in case of socket etc. this mechanism is more portable. 4365 */ 4366 do { 4367 bytes_written = write(fd, bufp, bytes_left); 4368 if (bytes_written < 0) { 4369 if (errno == EINTR) 4370 continue; 4371 return (-1); 4372 } else if (bytes_written == 0) { /* eof */ 4373 return (-1); 4374 } 4375 bufp += bytes_written; 4376 bytes_left -= bytes_written; 4377 } while (bytes_left > 0); 4378 4379 return (0); 4380 } 4381 4382 static int write_note(struct memelfnote *men, int fd) 4383 { 4384 struct elf_note en; 4385 4386 en.n_namesz = men->namesz; 4387 en.n_type = men->type; 4388 en.n_descsz = men->datasz; 4389 4390 bswap_note(&en); 4391 4392 if (dump_write(fd, &en, sizeof(en)) != 0) 4393 return (-1); 4394 if (dump_write(fd, men->name, men->namesz_rounded) != 0) 4395 return (-1); 4396 if (dump_write(fd, men->data, men->datasz_rounded) != 0) 4397 return (-1); 4398 4399 return (0); 4400 } 4401 4402 static void fill_thread_info(struct elf_note_info *info, const CPUArchState *env) 4403 { 4404 CPUState *cpu = env_cpu((CPUArchState *)env); 4405 TaskState *ts = (TaskState *)cpu->opaque; 4406 struct elf_thread_status *ets; 4407 4408 ets = g_malloc0(sizeof (*ets)); 4409 ets->num_notes = 1; /* only prstatus is dumped */ 4410 fill_prstatus(&ets->prstatus, ts, 0); 4411 elf_core_copy_regs(&ets->prstatus.pr_reg, env); 4412 fill_note(&ets->notes[0], "CORE", NT_PRSTATUS, sizeof (ets->prstatus), 4413 &ets->prstatus); 4414 4415 QTAILQ_INSERT_TAIL(&info->thread_list, ets, ets_link); 4416 4417 info->notes_size += note_size(&ets->notes[0]); 4418 } 4419 4420 static void init_note_info(struct elf_note_info *info) 4421 { 4422 /* Initialize the elf_note_info structure so that it is at 4423 * least safe to call free_note_info() on it. Must be 4424 * called before calling fill_note_info(). 4425 */ 4426 memset(info, 0, sizeof (*info)); 4427 QTAILQ_INIT(&info->thread_list); 4428 } 4429 4430 static int fill_note_info(struct elf_note_info *info, 4431 long signr, const CPUArchState *env) 4432 { 4433 #define NUMNOTES 3 4434 CPUState *cpu = env_cpu((CPUArchState *)env); 4435 TaskState *ts = (TaskState *)cpu->opaque; 4436 int i; 4437 4438 info->notes = g_new0(struct memelfnote, NUMNOTES); 4439 if (info->notes == NULL) 4440 return (-ENOMEM); 4441 info->prstatus = g_malloc0(sizeof (*info->prstatus)); 4442 if (info->prstatus == NULL) 4443 return (-ENOMEM); 4444 info->psinfo = g_malloc0(sizeof (*info->psinfo)); 4445 if (info->prstatus == NULL) 4446 return (-ENOMEM); 4447 4448 /* 4449 * First fill in status (and registers) of current thread 4450 * including process info & aux vector. 4451 */ 4452 fill_prstatus(info->prstatus, ts, signr); 4453 elf_core_copy_regs(&info->prstatus->pr_reg, env); 4454 fill_note(&info->notes[0], "CORE", NT_PRSTATUS, 4455 sizeof (*info->prstatus), info->prstatus); 4456 fill_psinfo(info->psinfo, ts); 4457 fill_note(&info->notes[1], "CORE", NT_PRPSINFO, 4458 sizeof (*info->psinfo), info->psinfo); 4459 fill_auxv_note(&info->notes[2], ts); 4460 info->numnote = 3; 4461 4462 info->notes_size = 0; 4463 for (i = 0; i < info->numnote; i++) 4464 info->notes_size += note_size(&info->notes[i]); 4465 4466 /* read and fill status of all threads */ 4467 WITH_QEMU_LOCK_GUARD(&qemu_cpu_list_lock) { 4468 CPU_FOREACH(cpu) { 4469 if (cpu == thread_cpu) { 4470 continue; 4471 } 4472 fill_thread_info(info, cpu_env(cpu)); 4473 } 4474 } 4475 4476 return (0); 4477 } 4478 4479 static void free_note_info(struct elf_note_info *info) 4480 { 4481 struct elf_thread_status *ets; 4482 4483 while (!QTAILQ_EMPTY(&info->thread_list)) { 4484 ets = QTAILQ_FIRST(&info->thread_list); 4485 QTAILQ_REMOVE(&info->thread_list, ets, ets_link); 4486 g_free(ets); 4487 } 4488 4489 g_free(info->prstatus); 4490 g_free(info->psinfo); 4491 g_free(info->notes); 4492 } 4493 4494 static int write_note_info(struct elf_note_info *info, int fd) 4495 { 4496 struct elf_thread_status *ets; 4497 int i, error = 0; 4498 4499 /* write prstatus, psinfo and auxv for current thread */ 4500 for (i = 0; i < info->numnote; i++) 4501 if ((error = write_note(&info->notes[i], fd)) != 0) 4502 return (error); 4503 4504 /* write prstatus for each thread */ 4505 QTAILQ_FOREACH(ets, &info->thread_list, ets_link) { 4506 if ((error = write_note(&ets->notes[0], fd)) != 0) 4507 return (error); 4508 } 4509 4510 return (0); 4511 } 4512 4513 /* 4514 * Write out ELF coredump. 4515 * 4516 * See documentation of ELF object file format in: 4517 * http://www.caldera.com/developers/devspecs/gabi41.pdf 4518 * 4519 * Coredump format in linux is following: 4520 * 4521 * 0 +----------------------+ \ 4522 * | ELF header | ET_CORE | 4523 * +----------------------+ | 4524 * | ELF program headers | |--- headers 4525 * | - NOTE section | | 4526 * | - PT_LOAD sections | | 4527 * +----------------------+ / 4528 * | NOTEs: | 4529 * | - NT_PRSTATUS | 4530 * | - NT_PRSINFO | 4531 * | - NT_AUXV | 4532 * +----------------------+ <-- aligned to target page 4533 * | Process memory dump | 4534 * : : 4535 * . . 4536 * : : 4537 * | | 4538 * +----------------------+ 4539 * 4540 * NT_PRSTATUS -> struct elf_prstatus (per thread) 4541 * NT_PRSINFO -> struct elf_prpsinfo 4542 * NT_AUXV is array of { type, value } pairs (see fill_auxv_note()). 4543 * 4544 * Format follows System V format as close as possible. Current 4545 * version limitations are as follows: 4546 * - no floating point registers are dumped 4547 * 4548 * Function returns 0 in case of success, negative errno otherwise. 4549 * 4550 * TODO: make this work also during runtime: it should be 4551 * possible to force coredump from running process and then 4552 * continue processing. For example qemu could set up SIGUSR2 4553 * handler (provided that target process haven't registered 4554 * handler for that) that does the dump when signal is received. 4555 */ 4556 static int elf_core_dump(int signr, const CPUArchState *env) 4557 { 4558 const CPUState *cpu = env_cpu((CPUArchState *)env); 4559 const TaskState *ts = (const TaskState *)cpu->opaque; 4560 struct vm_area_struct *vma = NULL; 4561 g_autofree char *corefile = NULL; 4562 struct elf_note_info info; 4563 struct elfhdr elf; 4564 struct elf_phdr phdr; 4565 struct rlimit dumpsize; 4566 struct mm_struct *mm = NULL; 4567 off_t offset = 0, data_offset = 0; 4568 int segs = 0; 4569 int fd = -1; 4570 4571 init_note_info(&info); 4572 4573 errno = 0; 4574 getrlimit(RLIMIT_CORE, &dumpsize); 4575 if (dumpsize.rlim_cur == 0) 4576 return 0; 4577 4578 corefile = core_dump_filename(ts); 4579 4580 if ((fd = open(corefile, O_WRONLY | O_CREAT, 4581 S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH)) < 0) 4582 return (-errno); 4583 4584 /* 4585 * Walk through target process memory mappings and 4586 * set up structure containing this information. After 4587 * this point vma_xxx functions can be used. 4588 */ 4589 if ((mm = vma_init()) == NULL) 4590 goto out; 4591 4592 walk_memory_regions(mm, vma_walker); 4593 segs = vma_get_mapping_count(mm); 4594 4595 /* 4596 * Construct valid coredump ELF header. We also 4597 * add one more segment for notes. 4598 */ 4599 fill_elf_header(&elf, segs + 1, ELF_MACHINE, 0); 4600 if (dump_write(fd, &elf, sizeof (elf)) != 0) 4601 goto out; 4602 4603 /* fill in the in-memory version of notes */ 4604 if (fill_note_info(&info, signr, env) < 0) 4605 goto out; 4606 4607 offset += sizeof (elf); /* elf header */ 4608 offset += (segs + 1) * sizeof (struct elf_phdr); /* program headers */ 4609 4610 /* write out notes program header */ 4611 fill_elf_note_phdr(&phdr, info.notes_size, offset); 4612 4613 offset += info.notes_size; 4614 if (dump_write(fd, &phdr, sizeof (phdr)) != 0) 4615 goto out; 4616 4617 /* 4618 * ELF specification wants data to start at page boundary so 4619 * we align it here. 4620 */ 4621 data_offset = offset = roundup(offset, ELF_EXEC_PAGESIZE); 4622 4623 /* 4624 * Write program headers for memory regions mapped in 4625 * the target process. 4626 */ 4627 for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) { 4628 (void) memset(&phdr, 0, sizeof (phdr)); 4629 4630 phdr.p_type = PT_LOAD; 4631 phdr.p_offset = offset; 4632 phdr.p_vaddr = vma->vma_start; 4633 phdr.p_paddr = 0; 4634 phdr.p_filesz = vma_dump_size(vma); 4635 offset += phdr.p_filesz; 4636 phdr.p_memsz = vma->vma_end - vma->vma_start; 4637 phdr.p_flags = vma->vma_flags & PROT_READ ? PF_R : 0; 4638 if (vma->vma_flags & PROT_WRITE) 4639 phdr.p_flags |= PF_W; 4640 if (vma->vma_flags & PROT_EXEC) 4641 phdr.p_flags |= PF_X; 4642 phdr.p_align = ELF_EXEC_PAGESIZE; 4643 4644 bswap_phdr(&phdr, 1); 4645 if (dump_write(fd, &phdr, sizeof(phdr)) != 0) { 4646 goto out; 4647 } 4648 } 4649 4650 /* 4651 * Next we write notes just after program headers. No 4652 * alignment needed here. 4653 */ 4654 if (write_note_info(&info, fd) < 0) 4655 goto out; 4656 4657 /* align data to page boundary */ 4658 if (lseek(fd, data_offset, SEEK_SET) != data_offset) 4659 goto out; 4660 4661 /* 4662 * Finally we can dump process memory into corefile as well. 4663 */ 4664 for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) { 4665 abi_ulong addr; 4666 abi_ulong end; 4667 4668 end = vma->vma_start + vma_dump_size(vma); 4669 4670 for (addr = vma->vma_start; addr < end; 4671 addr += TARGET_PAGE_SIZE) { 4672 char page[TARGET_PAGE_SIZE]; 4673 int error; 4674 4675 /* 4676 * Read in page from target process memory and 4677 * write it to coredump file. 4678 */ 4679 error = copy_from_user(page, addr, sizeof (page)); 4680 if (error != 0) { 4681 (void) fprintf(stderr, "unable to dump " TARGET_ABI_FMT_lx "\n", 4682 addr); 4683 errno = -error; 4684 goto out; 4685 } 4686 if (dump_write(fd, page, TARGET_PAGE_SIZE) < 0) 4687 goto out; 4688 } 4689 } 4690 4691 out: 4692 free_note_info(&info); 4693 if (mm != NULL) 4694 vma_delete(mm); 4695 (void) close(fd); 4696 4697 if (errno != 0) 4698 return (-errno); 4699 return (0); 4700 } 4701 #endif /* USE_ELF_CORE_DUMP */ 4702 4703 void do_init_thread(struct target_pt_regs *regs, struct image_info *infop) 4704 { 4705 init_thread(regs, infop); 4706 } 4707