1 /* This is the Linux kernel elf-loading code, ported into user space */ 2 3 #include <stdio.h> 4 #include <sys/types.h> 5 #include <fcntl.h> 6 #include <errno.h> 7 #include <unistd.h> 8 #include <sys/mman.h> 9 #include <stdlib.h> 10 #include <string.h> 11 12 #include "qemu.h" 13 #include "disas.h" 14 15 /* this flag is uneffective under linux too, should be deleted */ 16 #ifndef MAP_DENYWRITE 17 #define MAP_DENYWRITE 0 18 #endif 19 20 /* should probably go in elf.h */ 21 #ifndef ELIBBAD 22 #define ELIBBAD 80 23 #endif 24 25 #ifdef TARGET_I386 26 27 #define ELF_PLATFORM get_elf_platform() 28 29 static const char *get_elf_platform(void) 30 { 31 static char elf_platform[] = "i386"; 32 int family = (global_env->cpuid_version >> 8) & 0xff; 33 if (family > 6) 34 family = 6; 35 if (family >= 3) 36 elf_platform[1] = '0' + family; 37 return elf_platform; 38 } 39 40 #define ELF_HWCAP get_elf_hwcap() 41 42 static uint32_t get_elf_hwcap(void) 43 { 44 return global_env->cpuid_features; 45 } 46 47 #define ELF_START_MMAP 0x80000000 48 49 /* 50 * This is used to ensure we don't load something for the wrong architecture. 51 */ 52 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) ) 53 54 /* 55 * These are used to set parameters in the core dumps. 56 */ 57 #define ELF_CLASS ELFCLASS32 58 #define ELF_DATA ELFDATA2LSB 59 #define ELF_ARCH EM_386 60 61 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) 62 { 63 regs->esp = infop->start_stack; 64 regs->eip = infop->entry; 65 66 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program 67 starts %edx contains a pointer to a function which might be 68 registered using `atexit'. This provides a mean for the 69 dynamic linker to call DT_FINI functions for shared libraries 70 that have been loaded before the code runs. 71 72 A value of 0 tells we have no such handler. */ 73 regs->edx = 0; 74 } 75 76 #define USE_ELF_CORE_DUMP 77 #define ELF_EXEC_PAGESIZE 4096 78 79 #endif 80 81 #ifdef TARGET_ARM 82 83 #define ELF_START_MMAP 0x80000000 84 85 #define elf_check_arch(x) ( (x) == EM_ARM ) 86 87 #define ELF_CLASS ELFCLASS32 88 #ifdef TARGET_WORDS_BIGENDIAN 89 #define ELF_DATA ELFDATA2MSB 90 #else 91 #define ELF_DATA ELFDATA2LSB 92 #endif 93 #define ELF_ARCH EM_ARM 94 95 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) 96 { 97 target_long stack = infop->start_stack; 98 memset(regs, 0, sizeof(*regs)); 99 regs->ARM_cpsr = 0x10; 100 if (infop->entry & 1) 101 regs->ARM_cpsr |= CPSR_T; 102 regs->ARM_pc = infop->entry & 0xfffffffe; 103 regs->ARM_sp = infop->start_stack; 104 regs->ARM_r2 = tgetl(stack + 8); /* envp */ 105 regs->ARM_r1 = tgetl(stack + 4); /* envp */ 106 /* XXX: it seems that r0 is zeroed after ! */ 107 regs->ARM_r0 = 0; 108 /* For uClinux PIC binaries. */ 109 regs->ARM_r10 = infop->start_data; 110 } 111 112 #define USE_ELF_CORE_DUMP 113 #define ELF_EXEC_PAGESIZE 4096 114 115 enum 116 { 117 ARM_HWCAP_ARM_SWP = 1 << 0, 118 ARM_HWCAP_ARM_HALF = 1 << 1, 119 ARM_HWCAP_ARM_THUMB = 1 << 2, 120 ARM_HWCAP_ARM_26BIT = 1 << 3, 121 ARM_HWCAP_ARM_FAST_MULT = 1 << 4, 122 ARM_HWCAP_ARM_FPA = 1 << 5, 123 ARM_HWCAP_ARM_VFP = 1 << 6, 124 ARM_HWCAP_ARM_EDSP = 1 << 7, 125 }; 126 127 #define ELF_HWCAP (ARM_HWCAP_ARM_SWP | ARM_HWCAP_ARM_HALF \ 128 | ARM_HWCAP_ARM_THUMB | ARM_HWCAP_ARM_FAST_MULT \ 129 | ARM_HWCAP_ARM_FPA | ARM_HWCAP_ARM_VFP) 130 131 #endif 132 133 #ifdef TARGET_SPARC 134 #ifdef TARGET_SPARC64 135 136 #define ELF_START_MMAP 0x80000000 137 138 #define elf_check_arch(x) ( (x) == EM_SPARCV9 ) 139 140 #define ELF_CLASS ELFCLASS64 141 #define ELF_DATA ELFDATA2MSB 142 #define ELF_ARCH EM_SPARCV9 143 144 #define STACK_BIAS 2047 145 146 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) 147 { 148 regs->tstate = 0; 149 regs->pc = infop->entry; 150 regs->npc = regs->pc + 4; 151 regs->y = 0; 152 regs->u_regs[14] = infop->start_stack - 16 * 8 - STACK_BIAS; 153 } 154 155 #else 156 #define ELF_START_MMAP 0x80000000 157 158 #define elf_check_arch(x) ( (x) == EM_SPARC ) 159 160 #define ELF_CLASS ELFCLASS32 161 #define ELF_DATA ELFDATA2MSB 162 #define ELF_ARCH EM_SPARC 163 164 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) 165 { 166 regs->psr = 0; 167 regs->pc = infop->entry; 168 regs->npc = regs->pc + 4; 169 regs->y = 0; 170 regs->u_regs[14] = infop->start_stack - 16 * 4; 171 } 172 173 #endif 174 #endif 175 176 #ifdef TARGET_PPC 177 178 #define ELF_START_MMAP 0x80000000 179 180 #define elf_check_arch(x) ( (x) == EM_PPC ) 181 182 #define ELF_CLASS ELFCLASS32 183 #ifdef TARGET_WORDS_BIGENDIAN 184 #define ELF_DATA ELFDATA2MSB 185 #else 186 #define ELF_DATA ELFDATA2LSB 187 #endif 188 #define ELF_ARCH EM_PPC 189 190 /* 191 * We need to put in some extra aux table entries to tell glibc what 192 * the cache block size is, so it can use the dcbz instruction safely. 193 */ 194 #define AT_DCACHEBSIZE 19 195 #define AT_ICACHEBSIZE 20 196 #define AT_UCACHEBSIZE 21 197 /* A special ignored type value for PPC, for glibc compatibility. */ 198 #define AT_IGNOREPPC 22 199 /* 200 * The requirements here are: 201 * - keep the final alignment of sp (sp & 0xf) 202 * - make sure the 32-bit value at the first 16 byte aligned position of 203 * AUXV is greater than 16 for glibc compatibility. 204 * AT_IGNOREPPC is used for that. 205 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC, 206 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined. 207 */ 208 #define DLINFO_ARCH_ITEMS 5 209 #define ARCH_DLINFO \ 210 do { \ 211 NEW_AUX_ENT(AT_DCACHEBSIZE, 0x20); \ 212 NEW_AUX_ENT(AT_ICACHEBSIZE, 0x20); \ 213 NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \ 214 /* \ 215 * Now handle glibc compatibility. \ 216 */ \ 217 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \ 218 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \ 219 } while (0) 220 221 static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop) 222 { 223 target_ulong pos = infop->start_stack; 224 target_ulong tmp; 225 226 _regs->msr = 1 << MSR_PR; /* Set user mode */ 227 _regs->gpr[1] = infop->start_stack; 228 _regs->nip = infop->entry; 229 /* Note that isn't exactly what regular kernel does 230 * but this is what the ABI wants and is needed to allow 231 * execution of PPC BSD programs. 232 */ 233 _regs->gpr[3] = tgetl(pos); 234 pos += sizeof(target_ulong); 235 _regs->gpr[4] = pos; 236 for (tmp = 1; tmp != 0; pos += sizeof(target_ulong)) 237 tmp = ldl(pos); 238 _regs->gpr[5] = pos; 239 } 240 241 #define USE_ELF_CORE_DUMP 242 #define ELF_EXEC_PAGESIZE 4096 243 244 #endif 245 246 #ifdef TARGET_MIPS 247 248 #define ELF_START_MMAP 0x80000000 249 250 #define elf_check_arch(x) ( (x) == EM_MIPS ) 251 252 #define ELF_CLASS ELFCLASS32 253 #ifdef TARGET_WORDS_BIGENDIAN 254 #define ELF_DATA ELFDATA2MSB 255 #else 256 #define ELF_DATA ELFDATA2LSB 257 #endif 258 #define ELF_ARCH EM_MIPS 259 260 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) 261 { 262 regs->cp0_status = CP0St_UM; 263 regs->cp0_epc = infop->entry; 264 regs->regs[29] = infop->start_stack; 265 } 266 267 #endif /* TARGET_MIPS */ 268 269 #ifdef TARGET_SH4 270 271 #define ELF_START_MMAP 0x80000000 272 273 #define elf_check_arch(x) ( (x) == EM_SH ) 274 275 #define ELF_CLASS ELFCLASS32 276 #define ELF_DATA ELFDATA2LSB 277 #define ELF_ARCH EM_SH 278 279 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) 280 { 281 /* Check other registers XXXXX */ 282 regs->pc = infop->entry; 283 regs->regs[15] = infop->start_stack - 16 * 4; 284 } 285 286 #define USE_ELF_CORE_DUMP 287 #define ELF_EXEC_PAGESIZE 4096 288 289 #endif 290 291 #ifdef TARGET_M68K 292 293 #define ELF_START_MMAP 0x80000000 294 295 #define elf_check_arch(x) ( (x) == EM_68K ) 296 297 #define ELF_CLASS ELFCLASS32 298 #define ELF_DATA ELFDATA2MSB 299 #define ELF_ARCH EM_68K 300 301 /* ??? Does this need to do anything? 302 #define ELF_PLAT_INIT(_r) */ 303 304 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) 305 { 306 regs->usp = infop->start_stack; 307 regs->sr = 0; 308 regs->pc = infop->entry; 309 } 310 311 #define USE_ELF_CORE_DUMP 312 #define ELF_EXEC_PAGESIZE 8192 313 314 #endif 315 316 #ifndef ELF_PLATFORM 317 #define ELF_PLATFORM (NULL) 318 #endif 319 320 #ifndef ELF_HWCAP 321 #define ELF_HWCAP 0 322 #endif 323 324 #include "elf.h" 325 326 struct exec 327 { 328 unsigned int a_info; /* Use macros N_MAGIC, etc for access */ 329 unsigned int a_text; /* length of text, in bytes */ 330 unsigned int a_data; /* length of data, in bytes */ 331 unsigned int a_bss; /* length of uninitialized data area, in bytes */ 332 unsigned int a_syms; /* length of symbol table data in file, in bytes */ 333 unsigned int a_entry; /* start address */ 334 unsigned int a_trsize; /* length of relocation info for text, in bytes */ 335 unsigned int a_drsize; /* length of relocation info for data, in bytes */ 336 }; 337 338 339 #define N_MAGIC(exec) ((exec).a_info & 0xffff) 340 #define OMAGIC 0407 341 #define NMAGIC 0410 342 #define ZMAGIC 0413 343 #define QMAGIC 0314 344 345 /* max code+data+bss space allocated to elf interpreter */ 346 #define INTERP_MAP_SIZE (32 * 1024 * 1024) 347 348 /* max code+data+bss+brk space allocated to ET_DYN executables */ 349 #define ET_DYN_MAP_SIZE (128 * 1024 * 1024) 350 351 /* from personality.h */ 352 353 /* Flags for bug emulation. These occupy the top three bytes. */ 354 #define STICKY_TIMEOUTS 0x4000000 355 #define WHOLE_SECONDS 0x2000000 356 357 /* Personality types. These go in the low byte. Avoid using the top bit, 358 * it will conflict with error returns. 359 */ 360 #define PER_MASK (0x00ff) 361 #define PER_LINUX (0x0000) 362 #define PER_SVR4 (0x0001 | STICKY_TIMEOUTS) 363 #define PER_SVR3 (0x0002 | STICKY_TIMEOUTS) 364 #define PER_SCOSVR3 (0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS) 365 #define PER_WYSEV386 (0x0004 | STICKY_TIMEOUTS) 366 #define PER_ISCR4 (0x0005 | STICKY_TIMEOUTS) 367 #define PER_BSD (0x0006) 368 #define PER_XENIX (0x0007 | STICKY_TIMEOUTS) 369 370 /* Necessary parameters */ 371 #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE 372 #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1)) 373 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1)) 374 375 #define INTERPRETER_NONE 0 376 #define INTERPRETER_AOUT 1 377 #define INTERPRETER_ELF 2 378 379 #define DLINFO_ITEMS 12 380 381 static inline void memcpy_fromfs(void * to, const void * from, unsigned long n) 382 { 383 memcpy(to, from, n); 384 } 385 386 extern unsigned long x86_stack_size; 387 388 static int load_aout_interp(void * exptr, int interp_fd); 389 390 #ifdef BSWAP_NEEDED 391 static void bswap_ehdr(struct elfhdr *ehdr) 392 { 393 bswap16s(&ehdr->e_type); /* Object file type */ 394 bswap16s(&ehdr->e_machine); /* Architecture */ 395 bswap32s(&ehdr->e_version); /* Object file version */ 396 bswaptls(&ehdr->e_entry); /* Entry point virtual address */ 397 bswaptls(&ehdr->e_phoff); /* Program header table file offset */ 398 bswaptls(&ehdr->e_shoff); /* Section header table file offset */ 399 bswap32s(&ehdr->e_flags); /* Processor-specific flags */ 400 bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */ 401 bswap16s(&ehdr->e_phentsize); /* Program header table entry size */ 402 bswap16s(&ehdr->e_phnum); /* Program header table entry count */ 403 bswap16s(&ehdr->e_shentsize); /* Section header table entry size */ 404 bswap16s(&ehdr->e_shnum); /* Section header table entry count */ 405 bswap16s(&ehdr->e_shstrndx); /* Section header string table index */ 406 } 407 408 static void bswap_phdr(struct elf_phdr *phdr) 409 { 410 bswap32s(&phdr->p_type); /* Segment type */ 411 bswaptls(&phdr->p_offset); /* Segment file offset */ 412 bswaptls(&phdr->p_vaddr); /* Segment virtual address */ 413 bswaptls(&phdr->p_paddr); /* Segment physical address */ 414 bswaptls(&phdr->p_filesz); /* Segment size in file */ 415 bswaptls(&phdr->p_memsz); /* Segment size in memory */ 416 bswap32s(&phdr->p_flags); /* Segment flags */ 417 bswaptls(&phdr->p_align); /* Segment alignment */ 418 } 419 420 static void bswap_shdr(struct elf_shdr *shdr) 421 { 422 bswap32s(&shdr->sh_name); 423 bswap32s(&shdr->sh_type); 424 bswaptls(&shdr->sh_flags); 425 bswaptls(&shdr->sh_addr); 426 bswaptls(&shdr->sh_offset); 427 bswaptls(&shdr->sh_size); 428 bswap32s(&shdr->sh_link); 429 bswap32s(&shdr->sh_info); 430 bswaptls(&shdr->sh_addralign); 431 bswaptls(&shdr->sh_entsize); 432 } 433 434 static void bswap_sym(Elf32_Sym *sym) 435 { 436 bswap32s(&sym->st_name); 437 bswap32s(&sym->st_value); 438 bswap32s(&sym->st_size); 439 bswap16s(&sym->st_shndx); 440 } 441 #endif 442 443 /* 444 * 'copy_elf_strings()' copies argument/envelope strings from user 445 * memory to free pages in kernel mem. These are in a format ready 446 * to be put directly into the top of new user memory. 447 * 448 */ 449 static unsigned long copy_elf_strings(int argc,char ** argv, void **page, 450 unsigned long p) 451 { 452 char *tmp, *tmp1, *pag = NULL; 453 int len, offset = 0; 454 455 if (!p) { 456 return 0; /* bullet-proofing */ 457 } 458 while (argc-- > 0) { 459 tmp = argv[argc]; 460 if (!tmp) { 461 fprintf(stderr, "VFS: argc is wrong"); 462 exit(-1); 463 } 464 tmp1 = tmp; 465 while (*tmp++); 466 len = tmp - tmp1; 467 if (p < len) { /* this shouldn't happen - 128kB */ 468 return 0; 469 } 470 while (len) { 471 --p; --tmp; --len; 472 if (--offset < 0) { 473 offset = p % TARGET_PAGE_SIZE; 474 pag = (char *)page[p/TARGET_PAGE_SIZE]; 475 if (!pag) { 476 pag = (char *)malloc(TARGET_PAGE_SIZE); 477 page[p/TARGET_PAGE_SIZE] = pag; 478 if (!pag) 479 return 0; 480 } 481 } 482 if (len == 0 || offset == 0) { 483 *(pag + offset) = *tmp; 484 } 485 else { 486 int bytes_to_copy = (len > offset) ? offset : len; 487 tmp -= bytes_to_copy; 488 p -= bytes_to_copy; 489 offset -= bytes_to_copy; 490 len -= bytes_to_copy; 491 memcpy_fromfs(pag + offset, tmp, bytes_to_copy + 1); 492 } 493 } 494 } 495 return p; 496 } 497 498 unsigned long setup_arg_pages(target_ulong p, struct linux_binprm * bprm, 499 struct image_info * info) 500 { 501 target_ulong stack_base, size, error; 502 int i; 503 504 /* Create enough stack to hold everything. If we don't use 505 * it for args, we'll use it for something else... 506 */ 507 size = x86_stack_size; 508 if (size < MAX_ARG_PAGES*TARGET_PAGE_SIZE) 509 size = MAX_ARG_PAGES*TARGET_PAGE_SIZE; 510 error = target_mmap(0, 511 size + qemu_host_page_size, 512 PROT_READ | PROT_WRITE, 513 MAP_PRIVATE | MAP_ANONYMOUS, 514 -1, 0); 515 if (error == -1) { 516 perror("stk mmap"); 517 exit(-1); 518 } 519 /* we reserve one extra page at the top of the stack as guard */ 520 target_mprotect(error + size, qemu_host_page_size, PROT_NONE); 521 522 stack_base = error + size - MAX_ARG_PAGES*TARGET_PAGE_SIZE; 523 p += stack_base; 524 525 for (i = 0 ; i < MAX_ARG_PAGES ; i++) { 526 if (bprm->page[i]) { 527 info->rss++; 528 529 memcpy_to_target(stack_base, bprm->page[i], TARGET_PAGE_SIZE); 530 free(bprm->page[i]); 531 } 532 stack_base += TARGET_PAGE_SIZE; 533 } 534 return p; 535 } 536 537 static void set_brk(unsigned long start, unsigned long end) 538 { 539 /* page-align the start and end addresses... */ 540 start = HOST_PAGE_ALIGN(start); 541 end = HOST_PAGE_ALIGN(end); 542 if (end <= start) 543 return; 544 if(target_mmap(start, end - start, 545 PROT_READ | PROT_WRITE | PROT_EXEC, 546 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0) == -1) { 547 perror("cannot mmap brk"); 548 exit(-1); 549 } 550 } 551 552 553 /* We need to explicitly zero any fractional pages after the data 554 section (i.e. bss). This would contain the junk from the file that 555 should not be in memory. */ 556 static void padzero(unsigned long elf_bss, unsigned long last_bss) 557 { 558 unsigned long nbyte; 559 560 if (elf_bss >= last_bss) 561 return; 562 563 /* XXX: this is really a hack : if the real host page size is 564 smaller than the target page size, some pages after the end 565 of the file may not be mapped. A better fix would be to 566 patch target_mmap(), but it is more complicated as the file 567 size must be known */ 568 if (qemu_real_host_page_size < qemu_host_page_size) { 569 unsigned long end_addr, end_addr1; 570 end_addr1 = (elf_bss + qemu_real_host_page_size - 1) & 571 ~(qemu_real_host_page_size - 1); 572 end_addr = HOST_PAGE_ALIGN(elf_bss); 573 if (end_addr1 < end_addr) { 574 mmap((void *)end_addr1, end_addr - end_addr1, 575 PROT_READ|PROT_WRITE|PROT_EXEC, 576 MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); 577 } 578 } 579 580 nbyte = elf_bss & (qemu_host_page_size-1); 581 if (nbyte) { 582 nbyte = qemu_host_page_size - nbyte; 583 do { 584 tput8(elf_bss, 0); 585 elf_bss++; 586 } while (--nbyte); 587 } 588 } 589 590 591 static unsigned long create_elf_tables(target_ulong p, int argc, int envc, 592 struct elfhdr * exec, 593 unsigned long load_addr, 594 unsigned long load_bias, 595 unsigned long interp_load_addr, int ibcs, 596 struct image_info *info) 597 { 598 target_ulong sp; 599 int size; 600 target_ulong u_platform; 601 const char *k_platform; 602 const int n = sizeof(target_ulong); 603 604 sp = p; 605 u_platform = 0; 606 k_platform = ELF_PLATFORM; 607 if (k_platform) { 608 size_t len = strlen(k_platform) + 1; 609 sp -= (len + n - 1) & ~(n - 1); 610 u_platform = sp; 611 memcpy_to_target(sp, k_platform, len); 612 } 613 /* 614 * Force 16 byte _final_ alignment here for generality. 615 */ 616 sp = sp &~ (target_ulong)15; 617 size = (DLINFO_ITEMS + 1) * 2; 618 if (k_platform) 619 size += 2; 620 #ifdef DLINFO_ARCH_ITEMS 621 size += DLINFO_ARCH_ITEMS * 2; 622 #endif 623 size += envc + argc + 2; 624 size += (!ibcs ? 3 : 1); /* argc itself */ 625 size *= n; 626 if (size & 15) 627 sp -= 16 - (size & 15); 628 629 #define NEW_AUX_ENT(id, val) do { \ 630 sp -= n; tputl(sp, val); \ 631 sp -= n; tputl(sp, id); \ 632 } while(0) 633 NEW_AUX_ENT (AT_NULL, 0); 634 635 /* There must be exactly DLINFO_ITEMS entries here. */ 636 NEW_AUX_ENT(AT_PHDR, (target_ulong)(load_addr + exec->e_phoff)); 637 NEW_AUX_ENT(AT_PHENT, (target_ulong)(sizeof (struct elf_phdr))); 638 NEW_AUX_ENT(AT_PHNUM, (target_ulong)(exec->e_phnum)); 639 NEW_AUX_ENT(AT_PAGESZ, (target_ulong)(TARGET_PAGE_SIZE)); 640 NEW_AUX_ENT(AT_BASE, (target_ulong)(interp_load_addr)); 641 NEW_AUX_ENT(AT_FLAGS, (target_ulong)0); 642 NEW_AUX_ENT(AT_ENTRY, load_bias + exec->e_entry); 643 NEW_AUX_ENT(AT_UID, (target_ulong) getuid()); 644 NEW_AUX_ENT(AT_EUID, (target_ulong) geteuid()); 645 NEW_AUX_ENT(AT_GID, (target_ulong) getgid()); 646 NEW_AUX_ENT(AT_EGID, (target_ulong) getegid()); 647 NEW_AUX_ENT(AT_HWCAP, (target_ulong) ELF_HWCAP); 648 if (k_platform) 649 NEW_AUX_ENT(AT_PLATFORM, u_platform); 650 #ifdef ARCH_DLINFO 651 /* 652 * ARCH_DLINFO must come last so platform specific code can enforce 653 * special alignment requirements on the AUXV if necessary (eg. PPC). 654 */ 655 ARCH_DLINFO; 656 #endif 657 #undef NEW_AUX_ENT 658 659 sp = loader_build_argptr(envc, argc, sp, p, !ibcs); 660 return sp; 661 } 662 663 664 static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex, 665 int interpreter_fd, 666 unsigned long *interp_load_addr) 667 { 668 struct elf_phdr *elf_phdata = NULL; 669 struct elf_phdr *eppnt; 670 unsigned long load_addr = 0; 671 int load_addr_set = 0; 672 int retval; 673 unsigned long last_bss, elf_bss; 674 unsigned long error; 675 int i; 676 677 elf_bss = 0; 678 last_bss = 0; 679 error = 0; 680 681 #ifdef BSWAP_NEEDED 682 bswap_ehdr(interp_elf_ex); 683 #endif 684 /* First of all, some simple consistency checks */ 685 if ((interp_elf_ex->e_type != ET_EXEC && 686 interp_elf_ex->e_type != ET_DYN) || 687 !elf_check_arch(interp_elf_ex->e_machine)) { 688 return ~0UL; 689 } 690 691 692 /* Now read in all of the header information */ 693 694 if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > TARGET_PAGE_SIZE) 695 return ~0UL; 696 697 elf_phdata = (struct elf_phdr *) 698 malloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum); 699 700 if (!elf_phdata) 701 return ~0UL; 702 703 /* 704 * If the size of this structure has changed, then punt, since 705 * we will be doing the wrong thing. 706 */ 707 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr)) { 708 free(elf_phdata); 709 return ~0UL; 710 } 711 712 retval = lseek(interpreter_fd, interp_elf_ex->e_phoff, SEEK_SET); 713 if(retval >= 0) { 714 retval = read(interpreter_fd, 715 (char *) elf_phdata, 716 sizeof(struct elf_phdr) * interp_elf_ex->e_phnum); 717 } 718 if (retval < 0) { 719 perror("load_elf_interp"); 720 exit(-1); 721 free (elf_phdata); 722 return retval; 723 } 724 #ifdef BSWAP_NEEDED 725 eppnt = elf_phdata; 726 for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) { 727 bswap_phdr(eppnt); 728 } 729 #endif 730 731 if (interp_elf_ex->e_type == ET_DYN) { 732 /* in order to avoid harcoding the interpreter load 733 address in qemu, we allocate a big enough memory zone */ 734 error = target_mmap(0, INTERP_MAP_SIZE, 735 PROT_NONE, MAP_PRIVATE | MAP_ANON, 736 -1, 0); 737 if (error == -1) { 738 perror("mmap"); 739 exit(-1); 740 } 741 load_addr = error; 742 load_addr_set = 1; 743 } 744 745 eppnt = elf_phdata; 746 for(i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) 747 if (eppnt->p_type == PT_LOAD) { 748 int elf_type = MAP_PRIVATE | MAP_DENYWRITE; 749 int elf_prot = 0; 750 unsigned long vaddr = 0; 751 unsigned long k; 752 753 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ; 754 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE; 755 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC; 756 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) { 757 elf_type |= MAP_FIXED; 758 vaddr = eppnt->p_vaddr; 759 } 760 error = target_mmap(load_addr+TARGET_ELF_PAGESTART(vaddr), 761 eppnt->p_filesz + TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr), 762 elf_prot, 763 elf_type, 764 interpreter_fd, 765 eppnt->p_offset - TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr)); 766 767 if (error == -1) { 768 /* Real error */ 769 close(interpreter_fd); 770 free(elf_phdata); 771 return ~0UL; 772 } 773 774 if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) { 775 load_addr = error; 776 load_addr_set = 1; 777 } 778 779 /* 780 * Find the end of the file mapping for this phdr, and keep 781 * track of the largest address we see for this. 782 */ 783 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz; 784 if (k > elf_bss) elf_bss = k; 785 786 /* 787 * Do the same thing for the memory mapping - between 788 * elf_bss and last_bss is the bss section. 789 */ 790 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr; 791 if (k > last_bss) last_bss = k; 792 } 793 794 /* Now use mmap to map the library into memory. */ 795 796 close(interpreter_fd); 797 798 /* 799 * Now fill out the bss section. First pad the last page up 800 * to the page boundary, and then perform a mmap to make sure 801 * that there are zeromapped pages up to and including the last 802 * bss page. 803 */ 804 padzero(elf_bss, last_bss); 805 elf_bss = TARGET_ELF_PAGESTART(elf_bss + qemu_host_page_size - 1); /* What we have mapped so far */ 806 807 /* Map the last of the bss segment */ 808 if (last_bss > elf_bss) { 809 target_mmap(elf_bss, last_bss-elf_bss, 810 PROT_READ|PROT_WRITE|PROT_EXEC, 811 MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); 812 } 813 free(elf_phdata); 814 815 *interp_load_addr = load_addr; 816 return ((unsigned long) interp_elf_ex->e_entry) + load_addr; 817 } 818 819 /* Best attempt to load symbols from this ELF object. */ 820 static void load_symbols(struct elfhdr *hdr, int fd) 821 { 822 unsigned int i; 823 struct elf_shdr sechdr, symtab, strtab; 824 char *strings; 825 struct syminfo *s; 826 827 lseek(fd, hdr->e_shoff, SEEK_SET); 828 for (i = 0; i < hdr->e_shnum; i++) { 829 if (read(fd, &sechdr, sizeof(sechdr)) != sizeof(sechdr)) 830 return; 831 #ifdef BSWAP_NEEDED 832 bswap_shdr(&sechdr); 833 #endif 834 if (sechdr.sh_type == SHT_SYMTAB) { 835 symtab = sechdr; 836 lseek(fd, hdr->e_shoff 837 + sizeof(sechdr) * sechdr.sh_link, SEEK_SET); 838 if (read(fd, &strtab, sizeof(strtab)) 839 != sizeof(strtab)) 840 return; 841 #ifdef BSWAP_NEEDED 842 bswap_shdr(&strtab); 843 #endif 844 goto found; 845 } 846 } 847 return; /* Shouldn't happen... */ 848 849 found: 850 /* Now know where the strtab and symtab are. Snarf them. */ 851 s = malloc(sizeof(*s)); 852 s->disas_symtab = malloc(symtab.sh_size); 853 s->disas_strtab = strings = malloc(strtab.sh_size); 854 if (!s->disas_symtab || !s->disas_strtab) 855 return; 856 857 lseek(fd, symtab.sh_offset, SEEK_SET); 858 if (read(fd, s->disas_symtab, symtab.sh_size) != symtab.sh_size) 859 return; 860 861 #ifdef BSWAP_NEEDED 862 for (i = 0; i < symtab.sh_size / sizeof(struct elf_sym); i++) 863 bswap_sym(s->disas_symtab + sizeof(struct elf_sym)*i); 864 #endif 865 866 lseek(fd, strtab.sh_offset, SEEK_SET); 867 if (read(fd, strings, strtab.sh_size) != strtab.sh_size) 868 return; 869 s->disas_num_syms = symtab.sh_size / sizeof(struct elf_sym); 870 s->next = syminfos; 871 syminfos = s; 872 } 873 874 int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs, 875 struct image_info * info) 876 { 877 struct elfhdr elf_ex; 878 struct elfhdr interp_elf_ex; 879 struct exec interp_ex; 880 int interpreter_fd = -1; /* avoid warning */ 881 unsigned long load_addr, load_bias; 882 int load_addr_set = 0; 883 unsigned int interpreter_type = INTERPRETER_NONE; 884 unsigned char ibcs2_interpreter; 885 int i; 886 unsigned long mapped_addr; 887 struct elf_phdr * elf_ppnt; 888 struct elf_phdr *elf_phdata; 889 unsigned long elf_bss, k, elf_brk; 890 int retval; 891 char * elf_interpreter; 892 unsigned long elf_entry, interp_load_addr = 0; 893 int status; 894 unsigned long start_code, end_code, end_data; 895 unsigned long elf_stack; 896 char passed_fileno[6]; 897 898 ibcs2_interpreter = 0; 899 status = 0; 900 load_addr = 0; 901 load_bias = 0; 902 elf_ex = *((struct elfhdr *) bprm->buf); /* exec-header */ 903 #ifdef BSWAP_NEEDED 904 bswap_ehdr(&elf_ex); 905 #endif 906 907 /* First of all, some simple consistency checks */ 908 if ((elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) || 909 (! elf_check_arch(elf_ex.e_machine))) { 910 return -ENOEXEC; 911 } 912 913 bprm->p = copy_elf_strings(1, &bprm->filename, bprm->page, bprm->p); 914 bprm->p = copy_elf_strings(bprm->envc,bprm->envp,bprm->page,bprm->p); 915 bprm->p = copy_elf_strings(bprm->argc,bprm->argv,bprm->page,bprm->p); 916 if (!bprm->p) { 917 retval = -E2BIG; 918 } 919 920 /* Now read in all of the header information */ 921 elf_phdata = (struct elf_phdr *)malloc(elf_ex.e_phentsize*elf_ex.e_phnum); 922 if (elf_phdata == NULL) { 923 return -ENOMEM; 924 } 925 926 retval = lseek(bprm->fd, elf_ex.e_phoff, SEEK_SET); 927 if(retval > 0) { 928 retval = read(bprm->fd, (char *) elf_phdata, 929 elf_ex.e_phentsize * elf_ex.e_phnum); 930 } 931 932 if (retval < 0) { 933 perror("load_elf_binary"); 934 exit(-1); 935 free (elf_phdata); 936 return -errno; 937 } 938 939 #ifdef BSWAP_NEEDED 940 elf_ppnt = elf_phdata; 941 for (i=0; i<elf_ex.e_phnum; i++, elf_ppnt++) { 942 bswap_phdr(elf_ppnt); 943 } 944 #endif 945 elf_ppnt = elf_phdata; 946 947 elf_bss = 0; 948 elf_brk = 0; 949 950 951 elf_stack = ~0UL; 952 elf_interpreter = NULL; 953 start_code = ~0UL; 954 end_code = 0; 955 end_data = 0; 956 957 for(i=0;i < elf_ex.e_phnum; i++) { 958 if (elf_ppnt->p_type == PT_INTERP) { 959 if ( elf_interpreter != NULL ) 960 { 961 free (elf_phdata); 962 free(elf_interpreter); 963 close(bprm->fd); 964 return -EINVAL; 965 } 966 967 /* This is the program interpreter used for 968 * shared libraries - for now assume that this 969 * is an a.out format binary 970 */ 971 972 elf_interpreter = (char *)malloc(elf_ppnt->p_filesz); 973 974 if (elf_interpreter == NULL) { 975 free (elf_phdata); 976 close(bprm->fd); 977 return -ENOMEM; 978 } 979 980 retval = lseek(bprm->fd, elf_ppnt->p_offset, SEEK_SET); 981 if(retval >= 0) { 982 retval = read(bprm->fd, elf_interpreter, elf_ppnt->p_filesz); 983 } 984 if(retval < 0) { 985 perror("load_elf_binary2"); 986 exit(-1); 987 } 988 989 /* If the program interpreter is one of these two, 990 then assume an iBCS2 image. Otherwise assume 991 a native linux image. */ 992 993 /* JRP - Need to add X86 lib dir stuff here... */ 994 995 if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 || 996 strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0) { 997 ibcs2_interpreter = 1; 998 } 999 1000 #if 0 1001 printf("Using ELF interpreter %s\n", elf_interpreter); 1002 #endif 1003 if (retval >= 0) { 1004 retval = open(path(elf_interpreter), O_RDONLY); 1005 if(retval >= 0) { 1006 interpreter_fd = retval; 1007 } 1008 else { 1009 perror(elf_interpreter); 1010 exit(-1); 1011 /* retval = -errno; */ 1012 } 1013 } 1014 1015 if (retval >= 0) { 1016 retval = lseek(interpreter_fd, 0, SEEK_SET); 1017 if(retval >= 0) { 1018 retval = read(interpreter_fd,bprm->buf,128); 1019 } 1020 } 1021 if (retval >= 0) { 1022 interp_ex = *((struct exec *) bprm->buf); /* aout exec-header */ 1023 interp_elf_ex=*((struct elfhdr *) bprm->buf); /* elf exec-header */ 1024 } 1025 if (retval < 0) { 1026 perror("load_elf_binary3"); 1027 exit(-1); 1028 free (elf_phdata); 1029 free(elf_interpreter); 1030 close(bprm->fd); 1031 return retval; 1032 } 1033 } 1034 elf_ppnt++; 1035 } 1036 1037 /* Some simple consistency checks for the interpreter */ 1038 if (elf_interpreter){ 1039 interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT; 1040 1041 /* Now figure out which format our binary is */ 1042 if ((N_MAGIC(interp_ex) != OMAGIC) && (N_MAGIC(interp_ex) != ZMAGIC) && 1043 (N_MAGIC(interp_ex) != QMAGIC)) { 1044 interpreter_type = INTERPRETER_ELF; 1045 } 1046 1047 if (interp_elf_ex.e_ident[0] != 0x7f || 1048 strncmp(&interp_elf_ex.e_ident[1], "ELF",3) != 0) { 1049 interpreter_type &= ~INTERPRETER_ELF; 1050 } 1051 1052 if (!interpreter_type) { 1053 free(elf_interpreter); 1054 free(elf_phdata); 1055 close(bprm->fd); 1056 return -ELIBBAD; 1057 } 1058 } 1059 1060 /* OK, we are done with that, now set up the arg stuff, 1061 and then start this sucker up */ 1062 1063 { 1064 char * passed_p; 1065 1066 if (interpreter_type == INTERPRETER_AOUT) { 1067 snprintf(passed_fileno, sizeof(passed_fileno), "%d", bprm->fd); 1068 passed_p = passed_fileno; 1069 1070 if (elf_interpreter) { 1071 bprm->p = copy_elf_strings(1,&passed_p,bprm->page,bprm->p); 1072 bprm->argc++; 1073 } 1074 } 1075 if (!bprm->p) { 1076 if (elf_interpreter) { 1077 free(elf_interpreter); 1078 } 1079 free (elf_phdata); 1080 close(bprm->fd); 1081 return -E2BIG; 1082 } 1083 } 1084 1085 /* OK, This is the point of no return */ 1086 info->end_data = 0; 1087 info->end_code = 0; 1088 info->start_mmap = (unsigned long)ELF_START_MMAP; 1089 info->mmap = 0; 1090 elf_entry = (unsigned long) elf_ex.e_entry; 1091 1092 /* Do this so that we can load the interpreter, if need be. We will 1093 change some of these later */ 1094 info->rss = 0; 1095 bprm->p = setup_arg_pages(bprm->p, bprm, info); 1096 info->start_stack = bprm->p; 1097 1098 /* Now we do a little grungy work by mmaping the ELF image into 1099 * the correct location in memory. At this point, we assume that 1100 * the image should be loaded at fixed address, not at a variable 1101 * address. 1102 */ 1103 1104 for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) { 1105 int elf_prot = 0; 1106 int elf_flags = 0; 1107 unsigned long error; 1108 1109 if (elf_ppnt->p_type != PT_LOAD) 1110 continue; 1111 1112 if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ; 1113 if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE; 1114 if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC; 1115 elf_flags = MAP_PRIVATE | MAP_DENYWRITE; 1116 if (elf_ex.e_type == ET_EXEC || load_addr_set) { 1117 elf_flags |= MAP_FIXED; 1118 } else if (elf_ex.e_type == ET_DYN) { 1119 /* Try and get dynamic programs out of the way of the default mmap 1120 base, as well as whatever program they might try to exec. This 1121 is because the brk will follow the loader, and is not movable. */ 1122 /* NOTE: for qemu, we do a big mmap to get enough space 1123 without harcoding any address */ 1124 error = target_mmap(0, ET_DYN_MAP_SIZE, 1125 PROT_NONE, MAP_PRIVATE | MAP_ANON, 1126 -1, 0); 1127 if (error == -1) { 1128 perror("mmap"); 1129 exit(-1); 1130 } 1131 load_bias = TARGET_ELF_PAGESTART(error - elf_ppnt->p_vaddr); 1132 } 1133 1134 error = target_mmap(TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr), 1135 (elf_ppnt->p_filesz + 1136 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)), 1137 elf_prot, 1138 (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE), 1139 bprm->fd, 1140 (elf_ppnt->p_offset - 1141 TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr))); 1142 if (error == -1) { 1143 perror("mmap"); 1144 exit(-1); 1145 } 1146 1147 #ifdef LOW_ELF_STACK 1148 if (TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr) < elf_stack) 1149 elf_stack = TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr); 1150 #endif 1151 1152 if (!load_addr_set) { 1153 load_addr_set = 1; 1154 load_addr = elf_ppnt->p_vaddr - elf_ppnt->p_offset; 1155 if (elf_ex.e_type == ET_DYN) { 1156 load_bias += error - 1157 TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr); 1158 load_addr += load_bias; 1159 } 1160 } 1161 k = elf_ppnt->p_vaddr; 1162 if (k < start_code) 1163 start_code = k; 1164 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz; 1165 if (k > elf_bss) 1166 elf_bss = k; 1167 if ((elf_ppnt->p_flags & PF_X) && end_code < k) 1168 end_code = k; 1169 if (end_data < k) 1170 end_data = k; 1171 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz; 1172 if (k > elf_brk) elf_brk = k; 1173 } 1174 1175 elf_entry += load_bias; 1176 elf_bss += load_bias; 1177 elf_brk += load_bias; 1178 start_code += load_bias; 1179 end_code += load_bias; 1180 // start_data += load_bias; 1181 end_data += load_bias; 1182 1183 if (elf_interpreter) { 1184 if (interpreter_type & 1) { 1185 elf_entry = load_aout_interp(&interp_ex, interpreter_fd); 1186 } 1187 else if (interpreter_type & 2) { 1188 elf_entry = load_elf_interp(&interp_elf_ex, interpreter_fd, 1189 &interp_load_addr); 1190 } 1191 1192 close(interpreter_fd); 1193 free(elf_interpreter); 1194 1195 if (elf_entry == ~0UL) { 1196 printf("Unable to load interpreter\n"); 1197 free(elf_phdata); 1198 exit(-1); 1199 return 0; 1200 } 1201 } 1202 1203 free(elf_phdata); 1204 1205 if (loglevel) 1206 load_symbols(&elf_ex, bprm->fd); 1207 1208 if (interpreter_type != INTERPRETER_AOUT) close(bprm->fd); 1209 info->personality = (ibcs2_interpreter ? PER_SVR4 : PER_LINUX); 1210 1211 #ifdef LOW_ELF_STACK 1212 info->start_stack = bprm->p = elf_stack - 4; 1213 #endif 1214 bprm->p = create_elf_tables(bprm->p, 1215 bprm->argc, 1216 bprm->envc, 1217 &elf_ex, 1218 load_addr, load_bias, 1219 interp_load_addr, 1220 (interpreter_type == INTERPRETER_AOUT ? 0 : 1), 1221 info); 1222 info->start_brk = info->brk = elf_brk; 1223 info->end_code = end_code; 1224 info->start_code = start_code; 1225 info->start_data = end_code; 1226 info->end_data = end_data; 1227 info->start_stack = bprm->p; 1228 1229 /* Calling set_brk effectively mmaps the pages that we need for the bss and break 1230 sections */ 1231 set_brk(elf_bss, elf_brk); 1232 1233 padzero(elf_bss, elf_brk); 1234 1235 #if 0 1236 printf("(start_brk) %x\n" , info->start_brk); 1237 printf("(end_code) %x\n" , info->end_code); 1238 printf("(start_code) %x\n" , info->start_code); 1239 printf("(end_data) %x\n" , info->end_data); 1240 printf("(start_stack) %x\n" , info->start_stack); 1241 printf("(brk) %x\n" , info->brk); 1242 #endif 1243 1244 if ( info->personality == PER_SVR4 ) 1245 { 1246 /* Why this, you ask??? Well SVr4 maps page 0 as read-only, 1247 and some applications "depend" upon this behavior. 1248 Since we do not have the power to recompile these, we 1249 emulate the SVr4 behavior. Sigh. */ 1250 mapped_addr = target_mmap(0, qemu_host_page_size, PROT_READ | PROT_EXEC, 1251 MAP_FIXED | MAP_PRIVATE, -1, 0); 1252 } 1253 1254 info->entry = elf_entry; 1255 1256 return 0; 1257 } 1258 1259 static int load_aout_interp(void * exptr, int interp_fd) 1260 { 1261 printf("a.out interpreter not yet supported\n"); 1262 return(0); 1263 } 1264 1265 void do_init_thread(struct target_pt_regs *regs, struct image_info *infop) 1266 { 1267 init_thread(regs, infop); 1268 } 1269