1 /* This is the Linux kernel elf-loading code, ported into user space */ 2 3 #include "qemu/osdep.h" 4 5 #include "qemu.h" 6 #include "disas/disas.h" 7 #include "qemu/path.h" 8 9 #ifdef _ARCH_PPC64 10 #undef ARCH_DLINFO 11 #undef ELF_PLATFORM 12 #undef ELF_HWCAP 13 #undef ELF_CLASS 14 #undef ELF_DATA 15 #undef ELF_ARCH 16 #endif 17 18 /* from personality.h */ 19 20 /* 21 * Flags for bug emulation. 22 * 23 * These occupy the top three bytes. 24 */ 25 enum { 26 ADDR_NO_RANDOMIZE = 0x0040000, /* disable randomization of VA space */ 27 FDPIC_FUNCPTRS = 0x0080000, /* userspace function ptrs point to descriptors 28 * (signal handling) 29 */ 30 MMAP_PAGE_ZERO = 0x0100000, 31 ADDR_COMPAT_LAYOUT = 0x0200000, 32 READ_IMPLIES_EXEC = 0x0400000, 33 ADDR_LIMIT_32BIT = 0x0800000, 34 SHORT_INODE = 0x1000000, 35 WHOLE_SECONDS = 0x2000000, 36 STICKY_TIMEOUTS = 0x4000000, 37 ADDR_LIMIT_3GB = 0x8000000, 38 }; 39 40 /* 41 * Personality types. 42 * 43 * These go in the low byte. Avoid using the top bit, it will 44 * conflict with error returns. 45 */ 46 enum { 47 PER_LINUX = 0x0000, 48 PER_LINUX_32BIT = 0x0000 | ADDR_LIMIT_32BIT, 49 PER_LINUX_FDPIC = 0x0000 | FDPIC_FUNCPTRS, 50 PER_SVR4 = 0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO, 51 PER_SVR3 = 0x0002 | STICKY_TIMEOUTS | SHORT_INODE, 52 PER_SCOSVR3 = 0x0003 | STICKY_TIMEOUTS | 53 WHOLE_SECONDS | SHORT_INODE, 54 PER_OSR5 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS, 55 PER_WYSEV386 = 0x0004 | STICKY_TIMEOUTS | SHORT_INODE, 56 PER_ISCR4 = 0x0005 | STICKY_TIMEOUTS, 57 PER_BSD = 0x0006, 58 PER_SUNOS = 0x0006 | STICKY_TIMEOUTS, 59 PER_XENIX = 0x0007 | STICKY_TIMEOUTS | SHORT_INODE, 60 PER_LINUX32 = 0x0008, 61 PER_LINUX32_3GB = 0x0008 | ADDR_LIMIT_3GB, 62 PER_IRIX32 = 0x0009 | STICKY_TIMEOUTS,/* IRIX5 32-bit */ 63 PER_IRIXN32 = 0x000a | STICKY_TIMEOUTS,/* IRIX6 new 32-bit */ 64 PER_IRIX64 = 0x000b | STICKY_TIMEOUTS,/* IRIX6 64-bit */ 65 PER_RISCOS = 0x000c, 66 PER_SOLARIS = 0x000d | STICKY_TIMEOUTS, 67 PER_UW7 = 0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO, 68 PER_OSF4 = 0x000f, /* OSF/1 v4 */ 69 PER_HPUX = 0x0010, 70 PER_MASK = 0x00ff, 71 }; 72 73 /* 74 * Return the base personality without flags. 75 */ 76 #define personality(pers) (pers & PER_MASK) 77 78 /* this flag is uneffective under linux too, should be deleted */ 79 #ifndef MAP_DENYWRITE 80 #define MAP_DENYWRITE 0 81 #endif 82 83 /* should probably go in elf.h */ 84 #ifndef ELIBBAD 85 #define ELIBBAD 80 86 #endif 87 88 #ifdef TARGET_I386 89 90 #define ELF_PLATFORM get_elf_platform() 91 92 static const char *get_elf_platform(void) 93 { 94 static char elf_platform[] = "i386"; 95 int family = object_property_get_int(OBJECT(thread_cpu), "family", NULL); 96 if (family > 6) 97 family = 6; 98 if (family >= 3) 99 elf_platform[1] = '0' + family; 100 return elf_platform; 101 } 102 103 #define ELF_HWCAP get_elf_hwcap() 104 105 static uint32_t get_elf_hwcap(void) 106 { 107 X86CPU *cpu = X86_CPU(thread_cpu); 108 109 return cpu->env.features[FEAT_1_EDX]; 110 } 111 112 #ifdef TARGET_X86_64 113 #define ELF_START_MMAP 0x2aaaaab000ULL 114 #define elf_check_arch(x) ( ((x) == ELF_ARCH) ) 115 116 #define ELF_CLASS ELFCLASS64 117 #define ELF_DATA ELFDATA2LSB 118 #define ELF_ARCH EM_X86_64 119 120 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) 121 { 122 regs->rax = 0; 123 regs->rsp = infop->start_stack; 124 regs->rip = infop->entry; 125 if (bsd_type == target_freebsd) { 126 regs->rdi = infop->start_stack; 127 } 128 } 129 130 #else 131 132 #define ELF_START_MMAP 0x80000000 133 134 /* 135 * This is used to ensure we don't load something for the wrong architecture. 136 */ 137 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) ) 138 139 /* 140 * These are used to set parameters in the core dumps. 141 */ 142 #define ELF_CLASS ELFCLASS32 143 #define ELF_DATA ELFDATA2LSB 144 #define ELF_ARCH EM_386 145 146 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) 147 { 148 regs->esp = infop->start_stack; 149 regs->eip = infop->entry; 150 151 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program 152 starts %edx contains a pointer to a function which might be 153 registered using `atexit'. This provides a mean for the 154 dynamic linker to call DT_FINI functions for shared libraries 155 that have been loaded before the code runs. 156 157 A value of 0 tells we have no such handler. */ 158 regs->edx = 0; 159 } 160 #endif 161 162 #define USE_ELF_CORE_DUMP 163 #define ELF_EXEC_PAGESIZE 4096 164 165 #endif 166 167 #ifdef TARGET_ARM 168 169 #define ELF_START_MMAP 0x80000000 170 171 #define elf_check_arch(x) ( (x) == EM_ARM ) 172 173 #define ELF_CLASS ELFCLASS32 174 #ifdef TARGET_WORDS_BIGENDIAN 175 #define ELF_DATA ELFDATA2MSB 176 #else 177 #define ELF_DATA ELFDATA2LSB 178 #endif 179 #define ELF_ARCH EM_ARM 180 181 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) 182 { 183 abi_long stack = infop->start_stack; 184 memset(regs, 0, sizeof(*regs)); 185 regs->ARM_cpsr = 0x10; 186 if (infop->entry & 1) 187 regs->ARM_cpsr |= CPSR_T; 188 regs->ARM_pc = infop->entry & 0xfffffffe; 189 regs->ARM_sp = infop->start_stack; 190 /* FIXME - what to for failure of get_user()? */ 191 get_user_ual(regs->ARM_r2, stack + 8); /* envp */ 192 get_user_ual(regs->ARM_r1, stack + 4); /* envp */ 193 /* XXX: it seems that r0 is zeroed after ! */ 194 regs->ARM_r0 = 0; 195 /* For uClinux PIC binaries. */ 196 /* XXX: Linux does this only on ARM with no MMU (do we care ?) */ 197 regs->ARM_r10 = infop->start_data; 198 } 199 200 #define USE_ELF_CORE_DUMP 201 #define ELF_EXEC_PAGESIZE 4096 202 203 enum 204 { 205 ARM_HWCAP_ARM_SWP = 1 << 0, 206 ARM_HWCAP_ARM_HALF = 1 << 1, 207 ARM_HWCAP_ARM_THUMB = 1 << 2, 208 ARM_HWCAP_ARM_26BIT = 1 << 3, 209 ARM_HWCAP_ARM_FAST_MULT = 1 << 4, 210 ARM_HWCAP_ARM_FPA = 1 << 5, 211 ARM_HWCAP_ARM_VFP = 1 << 6, 212 ARM_HWCAP_ARM_EDSP = 1 << 7, 213 }; 214 215 #define ELF_HWCAP (ARM_HWCAP_ARM_SWP | ARM_HWCAP_ARM_HALF \ 216 | ARM_HWCAP_ARM_THUMB | ARM_HWCAP_ARM_FAST_MULT \ 217 | ARM_HWCAP_ARM_FPA | ARM_HWCAP_ARM_VFP) 218 219 #endif 220 221 #ifdef TARGET_SPARC 222 #ifdef TARGET_SPARC64 223 224 #define ELF_START_MMAP 0x80000000 225 226 #ifndef TARGET_ABI32 227 #define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS ) 228 #else 229 #define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC ) 230 #endif 231 232 #define ELF_CLASS ELFCLASS64 233 #define ELF_DATA ELFDATA2MSB 234 #define ELF_ARCH EM_SPARCV9 235 236 #define STACK_BIAS 2047 237 238 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) 239 { 240 #ifndef TARGET_ABI32 241 regs->tstate = 0; 242 #endif 243 regs->pc = infop->entry; 244 regs->npc = regs->pc + 4; 245 regs->y = 0; 246 #ifdef TARGET_ABI32 247 regs->u_regs[14] = infop->start_stack - 16 * 4; 248 #else 249 if (personality(infop->personality) == PER_LINUX32) 250 regs->u_regs[14] = infop->start_stack - 16 * 4; 251 else { 252 regs->u_regs[14] = infop->start_stack - 16 * 8 - STACK_BIAS; 253 if (bsd_type == target_freebsd) { 254 regs->u_regs[8] = infop->start_stack; 255 regs->u_regs[11] = infop->start_stack; 256 } 257 } 258 #endif 259 } 260 261 #else 262 #define ELF_START_MMAP 0x80000000 263 264 #define elf_check_arch(x) ( (x) == EM_SPARC ) 265 266 #define ELF_CLASS ELFCLASS32 267 #define ELF_DATA ELFDATA2MSB 268 #define ELF_ARCH EM_SPARC 269 270 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) 271 { 272 regs->psr = 0; 273 regs->pc = infop->entry; 274 regs->npc = regs->pc + 4; 275 regs->y = 0; 276 regs->u_regs[14] = infop->start_stack - 16 * 4; 277 } 278 279 #endif 280 #endif 281 282 #ifdef TARGET_PPC 283 284 #define ELF_START_MMAP 0x80000000 285 286 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32) 287 288 #define elf_check_arch(x) ( (x) == EM_PPC64 ) 289 290 #define ELF_CLASS ELFCLASS64 291 292 #else 293 294 #define elf_check_arch(x) ( (x) == EM_PPC ) 295 296 #define ELF_CLASS ELFCLASS32 297 298 #endif 299 300 #ifdef TARGET_WORDS_BIGENDIAN 301 #define ELF_DATA ELFDATA2MSB 302 #else 303 #define ELF_DATA ELFDATA2LSB 304 #endif 305 #define ELF_ARCH EM_PPC 306 307 /* 308 * We need to put in some extra aux table entries to tell glibc what 309 * the cache block size is, so it can use the dcbz instruction safely. 310 */ 311 #define AT_DCACHEBSIZE 19 312 #define AT_ICACHEBSIZE 20 313 #define AT_UCACHEBSIZE 21 314 /* A special ignored type value for PPC, for glibc compatibility. */ 315 #define AT_IGNOREPPC 22 316 /* 317 * The requirements here are: 318 * - keep the final alignment of sp (sp & 0xf) 319 * - make sure the 32-bit value at the first 16 byte aligned position of 320 * AUXV is greater than 16 for glibc compatibility. 321 * AT_IGNOREPPC is used for that. 322 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC, 323 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined. 324 */ 325 #define DLINFO_ARCH_ITEMS 5 326 #define ARCH_DLINFO \ 327 do { \ 328 NEW_AUX_ENT(AT_DCACHEBSIZE, 0x20); \ 329 NEW_AUX_ENT(AT_ICACHEBSIZE, 0x20); \ 330 NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \ 331 /* \ 332 * Now handle glibc compatibility. \ 333 */ \ 334 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \ 335 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \ 336 } while (0) 337 338 static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop) 339 { 340 abi_ulong pos = infop->start_stack; 341 abi_ulong tmp; 342 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32) 343 abi_ulong entry, toc; 344 #endif 345 346 _regs->gpr[1] = infop->start_stack; 347 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32) 348 get_user_u64(entry, infop->entry); 349 entry += infop->load_addr; 350 get_user_u64(toc, infop->entry + 8); 351 toc += infop->load_addr; 352 _regs->gpr[2] = toc; 353 infop->entry = entry; 354 #endif 355 _regs->nip = infop->entry; 356 /* Note that isn't exactly what regular kernel does 357 * but this is what the ABI wants and is needed to allow 358 * execution of PPC BSD programs. 359 */ 360 /* FIXME - what to for failure of get_user()? */ 361 get_user_ual(_regs->gpr[3], pos); 362 pos += sizeof(abi_ulong); 363 _regs->gpr[4] = pos; 364 for (tmp = 1; tmp != 0; pos += sizeof(abi_ulong)) { 365 get_user_ual(tmp, pos); 366 } 367 _regs->gpr[5] = pos; 368 } 369 370 #define USE_ELF_CORE_DUMP 371 #define ELF_EXEC_PAGESIZE 4096 372 373 #endif 374 375 #ifdef TARGET_MIPS 376 377 #define ELF_START_MMAP 0x80000000 378 379 #define elf_check_arch(x) ( (x) == EM_MIPS ) 380 381 #ifdef TARGET_MIPS64 382 #define ELF_CLASS ELFCLASS64 383 #else 384 #define ELF_CLASS ELFCLASS32 385 #endif 386 #ifdef TARGET_WORDS_BIGENDIAN 387 #define ELF_DATA ELFDATA2MSB 388 #else 389 #define ELF_DATA ELFDATA2LSB 390 #endif 391 #define ELF_ARCH EM_MIPS 392 393 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) 394 { 395 regs->cp0_status = 2 << CP0St_KSU; 396 regs->cp0_epc = infop->entry; 397 regs->regs[29] = infop->start_stack; 398 } 399 400 #define USE_ELF_CORE_DUMP 401 #define ELF_EXEC_PAGESIZE 4096 402 403 #endif /* TARGET_MIPS */ 404 405 #ifdef TARGET_SH4 406 407 #define ELF_START_MMAP 0x80000000 408 409 #define elf_check_arch(x) ( (x) == EM_SH ) 410 411 #define ELF_CLASS ELFCLASS32 412 #define ELF_DATA ELFDATA2LSB 413 #define ELF_ARCH EM_SH 414 415 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) 416 { 417 /* Check other registers XXXXX */ 418 regs->pc = infop->entry; 419 regs->regs[15] = infop->start_stack; 420 } 421 422 #define USE_ELF_CORE_DUMP 423 #define ELF_EXEC_PAGESIZE 4096 424 425 #endif 426 427 #ifdef TARGET_CRIS 428 429 #define ELF_START_MMAP 0x80000000 430 431 #define elf_check_arch(x) ( (x) == EM_CRIS ) 432 433 #define ELF_CLASS ELFCLASS32 434 #define ELF_DATA ELFDATA2LSB 435 #define ELF_ARCH EM_CRIS 436 437 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) 438 { 439 regs->erp = infop->entry; 440 } 441 442 #define USE_ELF_CORE_DUMP 443 #define ELF_EXEC_PAGESIZE 8192 444 445 #endif 446 447 #ifdef TARGET_M68K 448 449 #define ELF_START_MMAP 0x80000000 450 451 #define elf_check_arch(x) ( (x) == EM_68K ) 452 453 #define ELF_CLASS ELFCLASS32 454 #define ELF_DATA ELFDATA2MSB 455 #define ELF_ARCH EM_68K 456 457 /* ??? Does this need to do anything? 458 #define ELF_PLAT_INIT(_r) */ 459 460 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) 461 { 462 regs->usp = infop->start_stack; 463 regs->sr = 0; 464 regs->pc = infop->entry; 465 } 466 467 #define USE_ELF_CORE_DUMP 468 #define ELF_EXEC_PAGESIZE 8192 469 470 #endif 471 472 #ifdef TARGET_ALPHA 473 474 #define ELF_START_MMAP (0x30000000000ULL) 475 476 #define elf_check_arch(x) ( (x) == ELF_ARCH ) 477 478 #define ELF_CLASS ELFCLASS64 479 #define ELF_DATA ELFDATA2MSB 480 #define ELF_ARCH EM_ALPHA 481 482 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) 483 { 484 regs->pc = infop->entry; 485 regs->ps = 8; 486 regs->usp = infop->start_stack; 487 regs->unique = infop->start_data; /* ? */ 488 printf("Set unique value to " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n", 489 regs->unique, infop->start_data); 490 } 491 492 #define USE_ELF_CORE_DUMP 493 #define ELF_EXEC_PAGESIZE 8192 494 495 #endif /* TARGET_ALPHA */ 496 497 #ifndef ELF_PLATFORM 498 #define ELF_PLATFORM (NULL) 499 #endif 500 501 #ifndef ELF_HWCAP 502 #define ELF_HWCAP 0 503 #endif 504 505 #ifdef TARGET_ABI32 506 #undef ELF_CLASS 507 #define ELF_CLASS ELFCLASS32 508 #undef bswaptls 509 #define bswaptls(ptr) bswap32s(ptr) 510 #endif 511 512 #include "elf.h" 513 514 struct exec 515 { 516 unsigned int a_info; /* Use macros N_MAGIC, etc for access */ 517 unsigned int a_text; /* length of text, in bytes */ 518 unsigned int a_data; /* length of data, in bytes */ 519 unsigned int a_bss; /* length of uninitialized data area, in bytes */ 520 unsigned int a_syms; /* length of symbol table data in file, in bytes */ 521 unsigned int a_entry; /* start address */ 522 unsigned int a_trsize; /* length of relocation info for text, in bytes */ 523 unsigned int a_drsize; /* length of relocation info for data, in bytes */ 524 }; 525 526 527 #define N_MAGIC(exec) ((exec).a_info & 0xffff) 528 #define OMAGIC 0407 529 #define NMAGIC 0410 530 #define ZMAGIC 0413 531 #define QMAGIC 0314 532 533 /* max code+data+bss space allocated to elf interpreter */ 534 #define INTERP_MAP_SIZE (32 * 1024 * 1024) 535 536 /* max code+data+bss+brk space allocated to ET_DYN executables */ 537 #define ET_DYN_MAP_SIZE (128 * 1024 * 1024) 538 539 /* Necessary parameters */ 540 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE 541 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1)) 542 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1)) 543 544 #define INTERPRETER_NONE 0 545 #define INTERPRETER_AOUT 1 546 #define INTERPRETER_ELF 2 547 548 #define DLINFO_ITEMS 12 549 550 static inline void memcpy_fromfs(void * to, const void * from, unsigned long n) 551 { 552 memcpy(to, from, n); 553 } 554 555 static int load_aout_interp(void * exptr, int interp_fd); 556 557 #ifdef BSWAP_NEEDED 558 static void bswap_ehdr(struct elfhdr *ehdr) 559 { 560 bswap16s(&ehdr->e_type); /* Object file type */ 561 bswap16s(&ehdr->e_machine); /* Architecture */ 562 bswap32s(&ehdr->e_version); /* Object file version */ 563 bswaptls(&ehdr->e_entry); /* Entry point virtual address */ 564 bswaptls(&ehdr->e_phoff); /* Program header table file offset */ 565 bswaptls(&ehdr->e_shoff); /* Section header table file offset */ 566 bswap32s(&ehdr->e_flags); /* Processor-specific flags */ 567 bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */ 568 bswap16s(&ehdr->e_phentsize); /* Program header table entry size */ 569 bswap16s(&ehdr->e_phnum); /* Program header table entry count */ 570 bswap16s(&ehdr->e_shentsize); /* Section header table entry size */ 571 bswap16s(&ehdr->e_shnum); /* Section header table entry count */ 572 bswap16s(&ehdr->e_shstrndx); /* Section header string table index */ 573 } 574 575 static void bswap_phdr(struct elf_phdr *phdr) 576 { 577 bswap32s(&phdr->p_type); /* Segment type */ 578 bswaptls(&phdr->p_offset); /* Segment file offset */ 579 bswaptls(&phdr->p_vaddr); /* Segment virtual address */ 580 bswaptls(&phdr->p_paddr); /* Segment physical address */ 581 bswaptls(&phdr->p_filesz); /* Segment size in file */ 582 bswaptls(&phdr->p_memsz); /* Segment size in memory */ 583 bswap32s(&phdr->p_flags); /* Segment flags */ 584 bswaptls(&phdr->p_align); /* Segment alignment */ 585 } 586 587 static void bswap_shdr(struct elf_shdr *shdr) 588 { 589 bswap32s(&shdr->sh_name); 590 bswap32s(&shdr->sh_type); 591 bswaptls(&shdr->sh_flags); 592 bswaptls(&shdr->sh_addr); 593 bswaptls(&shdr->sh_offset); 594 bswaptls(&shdr->sh_size); 595 bswap32s(&shdr->sh_link); 596 bswap32s(&shdr->sh_info); 597 bswaptls(&shdr->sh_addralign); 598 bswaptls(&shdr->sh_entsize); 599 } 600 601 static void bswap_sym(struct elf_sym *sym) 602 { 603 bswap32s(&sym->st_name); 604 bswaptls(&sym->st_value); 605 bswaptls(&sym->st_size); 606 bswap16s(&sym->st_shndx); 607 } 608 #endif 609 610 /* 611 * 'copy_elf_strings()' copies argument/envelope strings from user 612 * memory to free pages in kernel mem. These are in a format ready 613 * to be put directly into the top of new user memory. 614 * 615 */ 616 static abi_ulong copy_elf_strings(int argc,char ** argv, void **page, 617 abi_ulong p) 618 { 619 char *tmp, *tmp1, *pag = NULL; 620 int len, offset = 0; 621 622 if (!p) { 623 return 0; /* bullet-proofing */ 624 } 625 while (argc-- > 0) { 626 tmp = argv[argc]; 627 if (!tmp) { 628 fprintf(stderr, "VFS: argc is wrong"); 629 exit(-1); 630 } 631 tmp1 = tmp; 632 while (*tmp++); 633 len = tmp - tmp1; 634 if (p < len) { /* this shouldn't happen - 128kB */ 635 return 0; 636 } 637 while (len) { 638 --p; --tmp; --len; 639 if (--offset < 0) { 640 offset = p % TARGET_PAGE_SIZE; 641 pag = (char *)page[p/TARGET_PAGE_SIZE]; 642 if (!pag) { 643 pag = g_try_malloc0(TARGET_PAGE_SIZE); 644 page[p/TARGET_PAGE_SIZE] = pag; 645 if (!pag) 646 return 0; 647 } 648 } 649 if (len == 0 || offset == 0) { 650 *(pag + offset) = *tmp; 651 } 652 else { 653 int bytes_to_copy = (len > offset) ? offset : len; 654 tmp -= bytes_to_copy; 655 p -= bytes_to_copy; 656 offset -= bytes_to_copy; 657 len -= bytes_to_copy; 658 memcpy_fromfs(pag + offset, tmp, bytes_to_copy + 1); 659 } 660 } 661 } 662 return p; 663 } 664 665 static abi_ulong setup_arg_pages(abi_ulong p, struct linux_binprm *bprm, 666 struct image_info *info) 667 { 668 abi_ulong stack_base, size, error; 669 int i; 670 671 /* Create enough stack to hold everything. If we don't use 672 * it for args, we'll use it for something else... 673 */ 674 size = x86_stack_size; 675 if (size < MAX_ARG_PAGES*TARGET_PAGE_SIZE) 676 size = MAX_ARG_PAGES*TARGET_PAGE_SIZE; 677 error = target_mmap(0, 678 size + qemu_host_page_size, 679 PROT_READ | PROT_WRITE, 680 MAP_PRIVATE | MAP_ANON, 681 -1, 0); 682 if (error == -1) { 683 perror("stk mmap"); 684 exit(-1); 685 } 686 /* we reserve one extra page at the top of the stack as guard */ 687 target_mprotect(error + size, qemu_host_page_size, PROT_NONE); 688 689 stack_base = error + size - MAX_ARG_PAGES*TARGET_PAGE_SIZE; 690 p += stack_base; 691 692 for (i = 0 ; i < MAX_ARG_PAGES ; i++) { 693 if (bprm->page[i]) { 694 info->rss++; 695 /* FIXME - check return value of memcpy_to_target() for failure */ 696 memcpy_to_target(stack_base, bprm->page[i], TARGET_PAGE_SIZE); 697 g_free(bprm->page[i]); 698 } 699 stack_base += TARGET_PAGE_SIZE; 700 } 701 return p; 702 } 703 704 static void set_brk(abi_ulong start, abi_ulong end) 705 { 706 /* page-align the start and end addresses... */ 707 start = HOST_PAGE_ALIGN(start); 708 end = HOST_PAGE_ALIGN(end); 709 if (end <= start) 710 return; 711 if(target_mmap(start, end - start, 712 PROT_READ | PROT_WRITE | PROT_EXEC, 713 MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0) == -1) { 714 perror("cannot mmap brk"); 715 exit(-1); 716 } 717 } 718 719 720 /* We need to explicitly zero any fractional pages after the data 721 section (i.e. bss). This would contain the junk from the file that 722 should not be in memory. */ 723 static void padzero(abi_ulong elf_bss, abi_ulong last_bss) 724 { 725 abi_ulong nbyte; 726 727 if (elf_bss >= last_bss) 728 return; 729 730 /* XXX: this is really a hack : if the real host page size is 731 smaller than the target page size, some pages after the end 732 of the file may not be mapped. A better fix would be to 733 patch target_mmap(), but it is more complicated as the file 734 size must be known */ 735 if (qemu_real_host_page_size < qemu_host_page_size) { 736 abi_ulong end_addr, end_addr1; 737 end_addr1 = REAL_HOST_PAGE_ALIGN(elf_bss); 738 end_addr = HOST_PAGE_ALIGN(elf_bss); 739 if (end_addr1 < end_addr) { 740 mmap((void *)g2h(end_addr1), end_addr - end_addr1, 741 PROT_READ|PROT_WRITE|PROT_EXEC, 742 MAP_FIXED|MAP_PRIVATE|MAP_ANON, -1, 0); 743 } 744 } 745 746 nbyte = elf_bss & (qemu_host_page_size-1); 747 if (nbyte) { 748 nbyte = qemu_host_page_size - nbyte; 749 do { 750 /* FIXME - what to do if put_user() fails? */ 751 put_user_u8(0, elf_bss); 752 elf_bss++; 753 } while (--nbyte); 754 } 755 } 756 757 758 static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc, 759 struct elfhdr * exec, 760 abi_ulong load_addr, 761 abi_ulong load_bias, 762 abi_ulong interp_load_addr, int ibcs, 763 struct image_info *info) 764 { 765 abi_ulong sp; 766 int size; 767 abi_ulong u_platform; 768 const char *k_platform; 769 const int n = sizeof(elf_addr_t); 770 771 sp = p; 772 u_platform = 0; 773 k_platform = ELF_PLATFORM; 774 if (k_platform) { 775 size_t len = strlen(k_platform) + 1; 776 sp -= (len + n - 1) & ~(n - 1); 777 u_platform = sp; 778 /* FIXME - check return value of memcpy_to_target() for failure */ 779 memcpy_to_target(sp, k_platform, len); 780 } 781 /* 782 * Force 16 byte _final_ alignment here for generality. 783 */ 784 sp = sp &~ (abi_ulong)15; 785 size = (DLINFO_ITEMS + 1) * 2; 786 if (k_platform) 787 size += 2; 788 #ifdef DLINFO_ARCH_ITEMS 789 size += DLINFO_ARCH_ITEMS * 2; 790 #endif 791 size += envc + argc + 2; 792 size += (!ibcs ? 3 : 1); /* argc itself */ 793 size *= n; 794 if (size & 15) 795 sp -= 16 - (size & 15); 796 797 /* This is correct because Linux defines 798 * elf_addr_t as Elf32_Off / Elf64_Off 799 */ 800 #define NEW_AUX_ENT(id, val) do { \ 801 sp -= n; put_user_ual(val, sp); \ 802 sp -= n; put_user_ual(id, sp); \ 803 } while(0) 804 805 NEW_AUX_ENT (AT_NULL, 0); 806 807 /* There must be exactly DLINFO_ITEMS entries here. */ 808 NEW_AUX_ENT(AT_PHDR, (abi_ulong)(load_addr + exec->e_phoff)); 809 NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof (struct elf_phdr))); 810 NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum)); 811 NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE)); 812 NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_load_addr)); 813 NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0); 814 NEW_AUX_ENT(AT_ENTRY, load_bias + exec->e_entry); 815 NEW_AUX_ENT(AT_UID, (abi_ulong) getuid()); 816 NEW_AUX_ENT(AT_EUID, (abi_ulong) geteuid()); 817 NEW_AUX_ENT(AT_GID, (abi_ulong) getgid()); 818 NEW_AUX_ENT(AT_EGID, (abi_ulong) getegid()); 819 NEW_AUX_ENT(AT_HWCAP, (abi_ulong) ELF_HWCAP); 820 NEW_AUX_ENT(AT_CLKTCK, (abi_ulong) sysconf(_SC_CLK_TCK)); 821 if (k_platform) 822 NEW_AUX_ENT(AT_PLATFORM, u_platform); 823 #ifdef ARCH_DLINFO 824 /* 825 * ARCH_DLINFO must come last so platform specific code can enforce 826 * special alignment requirements on the AUXV if necessary (eg. PPC). 827 */ 828 ARCH_DLINFO; 829 #endif 830 #undef NEW_AUX_ENT 831 832 sp = loader_build_argptr(envc, argc, sp, p, !ibcs); 833 return sp; 834 } 835 836 837 static abi_ulong load_elf_interp(struct elfhdr * interp_elf_ex, 838 int interpreter_fd, 839 abi_ulong *interp_load_addr) 840 { 841 struct elf_phdr *elf_phdata = NULL; 842 struct elf_phdr *eppnt; 843 abi_ulong load_addr = 0; 844 int load_addr_set = 0; 845 int retval; 846 abi_ulong last_bss, elf_bss; 847 abi_ulong error; 848 int i; 849 850 elf_bss = 0; 851 last_bss = 0; 852 error = 0; 853 854 #ifdef BSWAP_NEEDED 855 bswap_ehdr(interp_elf_ex); 856 #endif 857 /* First of all, some simple consistency checks */ 858 if ((interp_elf_ex->e_type != ET_EXEC && 859 interp_elf_ex->e_type != ET_DYN) || 860 !elf_check_arch(interp_elf_ex->e_machine)) { 861 return ~((abi_ulong)0UL); 862 } 863 864 865 /* Now read in all of the header information */ 866 867 if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > TARGET_PAGE_SIZE) 868 return ~(abi_ulong)0UL; 869 870 elf_phdata = (struct elf_phdr *) 871 malloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum); 872 873 if (!elf_phdata) 874 return ~((abi_ulong)0UL); 875 876 /* 877 * If the size of this structure has changed, then punt, since 878 * we will be doing the wrong thing. 879 */ 880 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr)) { 881 free(elf_phdata); 882 return ~((abi_ulong)0UL); 883 } 884 885 retval = lseek(interpreter_fd, interp_elf_ex->e_phoff, SEEK_SET); 886 if(retval >= 0) { 887 retval = read(interpreter_fd, 888 (char *) elf_phdata, 889 sizeof(struct elf_phdr) * interp_elf_ex->e_phnum); 890 } 891 if (retval < 0) { 892 perror("load_elf_interp"); 893 exit(-1); 894 free (elf_phdata); 895 return retval; 896 } 897 #ifdef BSWAP_NEEDED 898 eppnt = elf_phdata; 899 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) { 900 bswap_phdr(eppnt); 901 } 902 #endif 903 904 if (interp_elf_ex->e_type == ET_DYN) { 905 /* in order to avoid hardcoding the interpreter load 906 address in qemu, we allocate a big enough memory zone */ 907 error = target_mmap(0, INTERP_MAP_SIZE, 908 PROT_NONE, MAP_PRIVATE | MAP_ANON, 909 -1, 0); 910 if (error == -1) { 911 perror("mmap"); 912 exit(-1); 913 } 914 load_addr = error; 915 load_addr_set = 1; 916 } 917 918 eppnt = elf_phdata; 919 for(i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) 920 if (eppnt->p_type == PT_LOAD) { 921 int elf_type = MAP_PRIVATE | MAP_DENYWRITE; 922 int elf_prot = 0; 923 abi_ulong vaddr = 0; 924 abi_ulong k; 925 926 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ; 927 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE; 928 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC; 929 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) { 930 elf_type |= MAP_FIXED; 931 vaddr = eppnt->p_vaddr; 932 } 933 error = target_mmap(load_addr+TARGET_ELF_PAGESTART(vaddr), 934 eppnt->p_filesz + TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr), 935 elf_prot, 936 elf_type, 937 interpreter_fd, 938 eppnt->p_offset - TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr)); 939 940 if (error == -1) { 941 /* Real error */ 942 close(interpreter_fd); 943 free(elf_phdata); 944 return ~((abi_ulong)0UL); 945 } 946 947 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) { 948 load_addr = error; 949 load_addr_set = 1; 950 } 951 952 /* 953 * Find the end of the file mapping for this phdr, and keep 954 * track of the largest address we see for this. 955 */ 956 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz; 957 if (k > elf_bss) elf_bss = k; 958 959 /* 960 * Do the same thing for the memory mapping - between 961 * elf_bss and last_bss is the bss section. 962 */ 963 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr; 964 if (k > last_bss) last_bss = k; 965 } 966 967 /* Now use mmap to map the library into memory. */ 968 969 close(interpreter_fd); 970 971 /* 972 * Now fill out the bss section. First pad the last page up 973 * to the page boundary, and then perform a mmap to make sure 974 * that there are zeromapped pages up to and including the last 975 * bss page. 976 */ 977 padzero(elf_bss, last_bss); 978 elf_bss = TARGET_ELF_PAGESTART(elf_bss + qemu_host_page_size - 1); /* What we have mapped so far */ 979 980 /* Map the last of the bss segment */ 981 if (last_bss > elf_bss) { 982 target_mmap(elf_bss, last_bss-elf_bss, 983 PROT_READ|PROT_WRITE|PROT_EXEC, 984 MAP_FIXED|MAP_PRIVATE|MAP_ANON, -1, 0); 985 } 986 free(elf_phdata); 987 988 *interp_load_addr = load_addr; 989 return ((abi_ulong) interp_elf_ex->e_entry) + load_addr; 990 } 991 992 static int symfind(const void *s0, const void *s1) 993 { 994 target_ulong addr = *(target_ulong *)s0; 995 struct elf_sym *sym = (struct elf_sym *)s1; 996 int result = 0; 997 if (addr < sym->st_value) { 998 result = -1; 999 } else if (addr >= sym->st_value + sym->st_size) { 1000 result = 1; 1001 } 1002 return result; 1003 } 1004 1005 static const char *lookup_symbolxx(struct syminfo *s, target_ulong orig_addr) 1006 { 1007 #if ELF_CLASS == ELFCLASS32 1008 struct elf_sym *syms = s->disas_symtab.elf32; 1009 #else 1010 struct elf_sym *syms = s->disas_symtab.elf64; 1011 #endif 1012 1013 // binary search 1014 struct elf_sym *sym; 1015 1016 sym = bsearch(&orig_addr, syms, s->disas_num_syms, sizeof(*syms), symfind); 1017 if (sym != NULL) { 1018 return s->disas_strtab + sym->st_name; 1019 } 1020 1021 return ""; 1022 } 1023 1024 /* FIXME: This should use elf_ops.h */ 1025 static int symcmp(const void *s0, const void *s1) 1026 { 1027 struct elf_sym *sym0 = (struct elf_sym *)s0; 1028 struct elf_sym *sym1 = (struct elf_sym *)s1; 1029 return (sym0->st_value < sym1->st_value) 1030 ? -1 1031 : ((sym0->st_value > sym1->st_value) ? 1 : 0); 1032 } 1033 1034 /* Best attempt to load symbols from this ELF object. */ 1035 static void load_symbols(struct elfhdr *hdr, int fd) 1036 { 1037 unsigned int i, nsyms; 1038 struct elf_shdr sechdr, symtab, strtab; 1039 char *strings; 1040 struct syminfo *s; 1041 struct elf_sym *syms, *new_syms; 1042 1043 lseek(fd, hdr->e_shoff, SEEK_SET); 1044 for (i = 0; i < hdr->e_shnum; i++) { 1045 if (read(fd, &sechdr, sizeof(sechdr)) != sizeof(sechdr)) 1046 return; 1047 #ifdef BSWAP_NEEDED 1048 bswap_shdr(&sechdr); 1049 #endif 1050 if (sechdr.sh_type == SHT_SYMTAB) { 1051 symtab = sechdr; 1052 lseek(fd, hdr->e_shoff 1053 + sizeof(sechdr) * sechdr.sh_link, SEEK_SET); 1054 if (read(fd, &strtab, sizeof(strtab)) 1055 != sizeof(strtab)) 1056 return; 1057 #ifdef BSWAP_NEEDED 1058 bswap_shdr(&strtab); 1059 #endif 1060 goto found; 1061 } 1062 } 1063 return; /* Shouldn't happen... */ 1064 1065 found: 1066 /* Now know where the strtab and symtab are. Snarf them. */ 1067 s = malloc(sizeof(*s)); 1068 syms = malloc(symtab.sh_size); 1069 if (!syms) { 1070 free(s); 1071 return; 1072 } 1073 s->disas_strtab = strings = malloc(strtab.sh_size); 1074 if (!s->disas_strtab) { 1075 free(s); 1076 free(syms); 1077 return; 1078 } 1079 1080 lseek(fd, symtab.sh_offset, SEEK_SET); 1081 if (read(fd, syms, symtab.sh_size) != symtab.sh_size) { 1082 free(s); 1083 free(syms); 1084 free(strings); 1085 return; 1086 } 1087 1088 nsyms = symtab.sh_size / sizeof(struct elf_sym); 1089 1090 i = 0; 1091 while (i < nsyms) { 1092 #ifdef BSWAP_NEEDED 1093 bswap_sym(syms + i); 1094 #endif 1095 // Throw away entries which we do not need. 1096 if (syms[i].st_shndx == SHN_UNDEF || 1097 syms[i].st_shndx >= SHN_LORESERVE || 1098 ELF_ST_TYPE(syms[i].st_info) != STT_FUNC) { 1099 nsyms--; 1100 if (i < nsyms) { 1101 syms[i] = syms[nsyms]; 1102 } 1103 continue; 1104 } 1105 #if defined(TARGET_ARM) || defined (TARGET_MIPS) 1106 /* The bottom address bit marks a Thumb or MIPS16 symbol. */ 1107 syms[i].st_value &= ~(target_ulong)1; 1108 #endif 1109 i++; 1110 } 1111 1112 /* Attempt to free the storage associated with the local symbols 1113 that we threw away. Whether or not this has any effect on the 1114 memory allocation depends on the malloc implementation and how 1115 many symbols we managed to discard. */ 1116 new_syms = realloc(syms, nsyms * sizeof(*syms)); 1117 if (new_syms == NULL) { 1118 free(s); 1119 free(syms); 1120 free(strings); 1121 return; 1122 } 1123 syms = new_syms; 1124 1125 qsort(syms, nsyms, sizeof(*syms), symcmp); 1126 1127 lseek(fd, strtab.sh_offset, SEEK_SET); 1128 if (read(fd, strings, strtab.sh_size) != strtab.sh_size) { 1129 free(s); 1130 free(syms); 1131 free(strings); 1132 return; 1133 } 1134 s->disas_num_syms = nsyms; 1135 #if ELF_CLASS == ELFCLASS32 1136 s->disas_symtab.elf32 = syms; 1137 s->lookup_symbol = (lookup_symbol_t)lookup_symbolxx; 1138 #else 1139 s->disas_symtab.elf64 = syms; 1140 s->lookup_symbol = (lookup_symbol_t)lookup_symbolxx; 1141 #endif 1142 s->next = syminfos; 1143 syminfos = s; 1144 } 1145 1146 int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs, 1147 struct image_info * info) 1148 { 1149 struct elfhdr elf_ex; 1150 struct elfhdr interp_elf_ex; 1151 struct exec interp_ex; 1152 int interpreter_fd = -1; /* avoid warning */ 1153 abi_ulong load_addr, load_bias; 1154 int load_addr_set = 0; 1155 unsigned int interpreter_type = INTERPRETER_NONE; 1156 unsigned char ibcs2_interpreter; 1157 int i; 1158 struct elf_phdr * elf_ppnt; 1159 struct elf_phdr *elf_phdata; 1160 abi_ulong elf_bss, k, elf_brk; 1161 int retval; 1162 char * elf_interpreter; 1163 abi_ulong elf_entry, interp_load_addr = 0; 1164 abi_ulong start_code, end_code, start_data, end_data; 1165 abi_ulong reloc_func_desc = 0; 1166 #ifdef LOW_ELF_STACK 1167 abi_ulong elf_stack = ~((abi_ulong)0UL); 1168 #endif 1169 char passed_fileno[6]; 1170 1171 ibcs2_interpreter = 0; 1172 load_addr = 0; 1173 load_bias = 0; 1174 elf_ex = *((struct elfhdr *) bprm->buf); /* exec-header */ 1175 #ifdef BSWAP_NEEDED 1176 bswap_ehdr(&elf_ex); 1177 #endif 1178 1179 /* First of all, some simple consistency checks */ 1180 if ((elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) || 1181 (! elf_check_arch(elf_ex.e_machine))) { 1182 return -ENOEXEC; 1183 } 1184 1185 bprm->p = copy_elf_strings(1, &bprm->filename, bprm->page, bprm->p); 1186 bprm->p = copy_elf_strings(bprm->envc,bprm->envp,bprm->page,bprm->p); 1187 bprm->p = copy_elf_strings(bprm->argc,bprm->argv,bprm->page,bprm->p); 1188 if (!bprm->p) { 1189 retval = -E2BIG; 1190 } 1191 1192 /* Now read in all of the header information */ 1193 elf_phdata = (struct elf_phdr *)malloc(elf_ex.e_phentsize*elf_ex.e_phnum); 1194 if (elf_phdata == NULL) { 1195 return -ENOMEM; 1196 } 1197 1198 retval = lseek(bprm->fd, elf_ex.e_phoff, SEEK_SET); 1199 if(retval > 0) { 1200 retval = read(bprm->fd, (char *) elf_phdata, 1201 elf_ex.e_phentsize * elf_ex.e_phnum); 1202 } 1203 1204 if (retval < 0) { 1205 perror("load_elf_binary"); 1206 exit(-1); 1207 free (elf_phdata); 1208 return -errno; 1209 } 1210 1211 #ifdef BSWAP_NEEDED 1212 elf_ppnt = elf_phdata; 1213 for (i=0; i<elf_ex.e_phnum; i++, elf_ppnt++) { 1214 bswap_phdr(elf_ppnt); 1215 } 1216 #endif 1217 elf_ppnt = elf_phdata; 1218 1219 elf_bss = 0; 1220 elf_brk = 0; 1221 1222 1223 elf_interpreter = NULL; 1224 start_code = ~((abi_ulong)0UL); 1225 end_code = 0; 1226 start_data = 0; 1227 end_data = 0; 1228 interp_ex.a_info = 0; 1229 1230 for(i=0;i < elf_ex.e_phnum; i++) { 1231 if (elf_ppnt->p_type == PT_INTERP) { 1232 if ( elf_interpreter != NULL ) 1233 { 1234 free (elf_phdata); 1235 free(elf_interpreter); 1236 close(bprm->fd); 1237 return -EINVAL; 1238 } 1239 1240 /* This is the program interpreter used for 1241 * shared libraries - for now assume that this 1242 * is an a.out format binary 1243 */ 1244 1245 elf_interpreter = (char *)malloc(elf_ppnt->p_filesz); 1246 1247 if (elf_interpreter == NULL) { 1248 free (elf_phdata); 1249 close(bprm->fd); 1250 return -ENOMEM; 1251 } 1252 1253 retval = lseek(bprm->fd, elf_ppnt->p_offset, SEEK_SET); 1254 if(retval >= 0) { 1255 retval = read(bprm->fd, elf_interpreter, elf_ppnt->p_filesz); 1256 } 1257 if(retval < 0) { 1258 perror("load_elf_binary2"); 1259 exit(-1); 1260 } 1261 1262 /* If the program interpreter is one of these two, 1263 then assume an iBCS2 image. Otherwise assume 1264 a native linux image. */ 1265 1266 /* JRP - Need to add X86 lib dir stuff here... */ 1267 1268 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 || 1269 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0) { 1270 ibcs2_interpreter = 1; 1271 } 1272 1273 #if 0 1274 printf("Using ELF interpreter %s\n", path(elf_interpreter)); 1275 #endif 1276 if (retval >= 0) { 1277 retval = open(path(elf_interpreter), O_RDONLY); 1278 if(retval >= 0) { 1279 interpreter_fd = retval; 1280 } 1281 else { 1282 perror(elf_interpreter); 1283 exit(-1); 1284 /* retval = -errno; */ 1285 } 1286 } 1287 1288 if (retval >= 0) { 1289 retval = lseek(interpreter_fd, 0, SEEK_SET); 1290 if(retval >= 0) { 1291 retval = read(interpreter_fd,bprm->buf,128); 1292 } 1293 } 1294 if (retval >= 0) { 1295 interp_ex = *((struct exec *) bprm->buf); /* aout exec-header */ 1296 interp_elf_ex = *((struct elfhdr *) bprm->buf); /* elf exec-header */ 1297 } 1298 if (retval < 0) { 1299 perror("load_elf_binary3"); 1300 exit(-1); 1301 free (elf_phdata); 1302 free(elf_interpreter); 1303 close(bprm->fd); 1304 return retval; 1305 } 1306 } 1307 elf_ppnt++; 1308 } 1309 1310 /* Some simple consistency checks for the interpreter */ 1311 if (elf_interpreter){ 1312 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT; 1313 1314 /* Now figure out which format our binary is */ 1315 if ((N_MAGIC(interp_ex) != OMAGIC) && (N_MAGIC(interp_ex) != ZMAGIC) && 1316 (N_MAGIC(interp_ex) != QMAGIC)) { 1317 interpreter_type = INTERPRETER_ELF; 1318 } 1319 1320 if (interp_elf_ex.e_ident[0] != 0x7f || 1321 strncmp((char *)&interp_elf_ex.e_ident[1], "ELF",3) != 0) { 1322 interpreter_type &= ~INTERPRETER_ELF; 1323 } 1324 1325 if (!interpreter_type) { 1326 free(elf_interpreter); 1327 free(elf_phdata); 1328 close(bprm->fd); 1329 return -ELIBBAD; 1330 } 1331 } 1332 1333 /* OK, we are done with that, now set up the arg stuff, 1334 and then start this sucker up */ 1335 1336 { 1337 char * passed_p; 1338 1339 if (interpreter_type == INTERPRETER_AOUT) { 1340 snprintf(passed_fileno, sizeof(passed_fileno), "%d", bprm->fd); 1341 passed_p = passed_fileno; 1342 1343 if (elf_interpreter) { 1344 bprm->p = copy_elf_strings(1,&passed_p,bprm->page,bprm->p); 1345 bprm->argc++; 1346 } 1347 } 1348 if (!bprm->p) { 1349 free(elf_interpreter); 1350 free (elf_phdata); 1351 close(bprm->fd); 1352 return -E2BIG; 1353 } 1354 } 1355 1356 /* OK, This is the point of no return */ 1357 info->end_data = 0; 1358 info->end_code = 0; 1359 info->start_mmap = (abi_ulong)ELF_START_MMAP; 1360 info->mmap = 0; 1361 elf_entry = (abi_ulong) elf_ex.e_entry; 1362 1363 /* 1364 * In case where user has not explicitly set the guest_base, we 1365 * probe here that should we set it automatically. 1366 */ 1367 if (!have_guest_base) { 1368 /* 1369 * Go through ELF program header table and find out whether 1370 * any of the segments drop below our current mmap_min_addr and 1371 * in that case set guest_base to corresponding address. 1372 */ 1373 for (i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; 1374 i++, elf_ppnt++) { 1375 if (elf_ppnt->p_type != PT_LOAD) 1376 continue; 1377 if (HOST_PAGE_ALIGN(elf_ppnt->p_vaddr) < mmap_min_addr) { 1378 guest_base = HOST_PAGE_ALIGN(mmap_min_addr); 1379 break; 1380 } 1381 } 1382 } 1383 1384 /* Do this so that we can load the interpreter, if need be. We will 1385 change some of these later */ 1386 info->rss = 0; 1387 bprm->p = setup_arg_pages(bprm->p, bprm, info); 1388 info->start_stack = bprm->p; 1389 1390 /* Now we do a little grungy work by mmaping the ELF image into 1391 * the correct location in memory. At this point, we assume that 1392 * the image should be loaded at fixed address, not at a variable 1393 * address. 1394 */ 1395 1396 for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) { 1397 int elf_prot = 0; 1398 int elf_flags = 0; 1399 abi_ulong error; 1400 1401 if (elf_ppnt->p_type != PT_LOAD) 1402 continue; 1403 1404 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ; 1405 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE; 1406 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC; 1407 elf_flags = MAP_PRIVATE | MAP_DENYWRITE; 1408 if (elf_ex.e_type == ET_EXEC || load_addr_set) { 1409 elf_flags |= MAP_FIXED; 1410 } else if (elf_ex.e_type == ET_DYN) { 1411 /* Try and get dynamic programs out of the way of the default mmap 1412 base, as well as whatever program they might try to exec. This 1413 is because the brk will follow the loader, and is not movable. */ 1414 /* NOTE: for qemu, we do a big mmap to get enough space 1415 without hardcoding any address */ 1416 error = target_mmap(0, ET_DYN_MAP_SIZE, 1417 PROT_NONE, MAP_PRIVATE | MAP_ANON, 1418 -1, 0); 1419 if (error == -1) { 1420 perror("mmap"); 1421 exit(-1); 1422 } 1423 load_bias = TARGET_ELF_PAGESTART(error - elf_ppnt->p_vaddr); 1424 } 1425 1426 error = target_mmap(TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr), 1427 (elf_ppnt->p_filesz + 1428 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)), 1429 elf_prot, 1430 (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE), 1431 bprm->fd, 1432 (elf_ppnt->p_offset - 1433 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr))); 1434 if (error == -1) { 1435 perror("mmap"); 1436 exit(-1); 1437 } 1438 1439 #ifdef LOW_ELF_STACK 1440 if (TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr) < elf_stack) 1441 elf_stack = TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr); 1442 #endif 1443 1444 if (!load_addr_set) { 1445 load_addr_set = 1; 1446 load_addr = elf_ppnt->p_vaddr - elf_ppnt->p_offset; 1447 if (elf_ex.e_type == ET_DYN) { 1448 load_bias += error - 1449 TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr); 1450 load_addr += load_bias; 1451 reloc_func_desc = load_bias; 1452 } 1453 } 1454 k = elf_ppnt->p_vaddr; 1455 if (k < start_code) 1456 start_code = k; 1457 if (start_data < k) 1458 start_data = k; 1459 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz; 1460 if (k > elf_bss) 1461 elf_bss = k; 1462 if ((elf_ppnt->p_flags & PF_X) && end_code < k) 1463 end_code = k; 1464 if (end_data < k) 1465 end_data = k; 1466 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz; 1467 if (k > elf_brk) elf_brk = k; 1468 } 1469 1470 elf_entry += load_bias; 1471 elf_bss += load_bias; 1472 elf_brk += load_bias; 1473 start_code += load_bias; 1474 end_code += load_bias; 1475 start_data += load_bias; 1476 end_data += load_bias; 1477 1478 if (elf_interpreter) { 1479 if (interpreter_type & 1) { 1480 elf_entry = load_aout_interp(&interp_ex, interpreter_fd); 1481 } 1482 else if (interpreter_type & 2) { 1483 elf_entry = load_elf_interp(&interp_elf_ex, interpreter_fd, 1484 &interp_load_addr); 1485 } 1486 reloc_func_desc = interp_load_addr; 1487 1488 close(interpreter_fd); 1489 free(elf_interpreter); 1490 1491 if (elf_entry == ~((abi_ulong)0UL)) { 1492 printf("Unable to load interpreter\n"); 1493 free(elf_phdata); 1494 exit(-1); 1495 return 0; 1496 } 1497 } 1498 1499 free(elf_phdata); 1500 1501 if (qemu_log_enabled()) 1502 load_symbols(&elf_ex, bprm->fd); 1503 1504 if (interpreter_type != INTERPRETER_AOUT) close(bprm->fd); 1505 info->personality = (ibcs2_interpreter ? PER_SVR4 : PER_LINUX); 1506 1507 #ifdef LOW_ELF_STACK 1508 info->start_stack = bprm->p = elf_stack - 4; 1509 #endif 1510 bprm->p = create_elf_tables(bprm->p, 1511 bprm->argc, 1512 bprm->envc, 1513 &elf_ex, 1514 load_addr, load_bias, 1515 interp_load_addr, 1516 (interpreter_type == INTERPRETER_AOUT ? 0 : 1), 1517 info); 1518 info->load_addr = reloc_func_desc; 1519 info->start_brk = info->brk = elf_brk; 1520 info->end_code = end_code; 1521 info->start_code = start_code; 1522 info->start_data = start_data; 1523 info->end_data = end_data; 1524 info->start_stack = bprm->p; 1525 1526 /* Calling set_brk effectively mmaps the pages that we need for the bss and break 1527 sections */ 1528 set_brk(elf_bss, elf_brk); 1529 1530 padzero(elf_bss, elf_brk); 1531 1532 #if 0 1533 printf("(start_brk) %x\n" , info->start_brk); 1534 printf("(end_code) %x\n" , info->end_code); 1535 printf("(start_code) %x\n" , info->start_code); 1536 printf("(end_data) %x\n" , info->end_data); 1537 printf("(start_stack) %x\n" , info->start_stack); 1538 printf("(brk) %x\n" , info->brk); 1539 #endif 1540 1541 if ( info->personality == PER_SVR4 ) 1542 { 1543 /* Why this, you ask??? Well SVr4 maps page 0 as read-only, 1544 and some applications "depend" upon this behavior. 1545 Since we do not have the power to recompile these, we 1546 emulate the SVr4 behavior. Sigh. */ 1547 target_mmap(0, qemu_host_page_size, PROT_READ | PROT_EXEC, 1548 MAP_FIXED | MAP_PRIVATE, -1, 0); 1549 } 1550 1551 info->entry = elf_entry; 1552 1553 return 0; 1554 } 1555 1556 static int load_aout_interp(void * exptr, int interp_fd) 1557 { 1558 printf("a.out interpreter not yet supported\n"); 1559 return(0); 1560 } 1561 1562 void do_init_thread(struct target_pt_regs *regs, struct image_info *infop) 1563 { 1564 init_thread(regs, infop); 1565 } 1566