1 #include <fcntl.h> 2 #include <stdio.h> 3 #include <errno.h> 4 #include <string.h> 5 #include <unistd.h> 6 #include <inttypes.h> 7 8 #include "symbol.h" 9 #include "machine.h" 10 #include "vdso.h" 11 #include <symbol/kallsyms.h> 12 #include "debug.h" 13 14 #ifdef HAVE_CPLUS_DEMANGLE_SUPPORT 15 extern char *cplus_demangle(const char *, int); 16 17 static inline char *bfd_demangle(void __maybe_unused *v, const char *c, int i) 18 { 19 return cplus_demangle(c, i); 20 } 21 #else 22 #ifdef NO_DEMANGLE 23 static inline char *bfd_demangle(void __maybe_unused *v, 24 const char __maybe_unused *c, 25 int __maybe_unused i) 26 { 27 return NULL; 28 } 29 #else 30 #define PACKAGE 'perf' 31 #include <bfd.h> 32 #endif 33 #endif 34 35 #ifndef HAVE_ELF_GETPHDRNUM_SUPPORT 36 static int elf_getphdrnum(Elf *elf, size_t *dst) 37 { 38 GElf_Ehdr gehdr; 39 GElf_Ehdr *ehdr; 40 41 ehdr = gelf_getehdr(elf, &gehdr); 42 if (!ehdr) 43 return -1; 44 45 *dst = ehdr->e_phnum; 46 47 return 0; 48 } 49 #endif 50 51 #ifndef NT_GNU_BUILD_ID 52 #define NT_GNU_BUILD_ID 3 53 #endif 54 55 /** 56 * elf_symtab__for_each_symbol - iterate thru all the symbols 57 * 58 * @syms: struct elf_symtab instance to iterate 59 * @idx: uint32_t idx 60 * @sym: GElf_Sym iterator 61 */ 62 #define elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) \ 63 for (idx = 0, gelf_getsym(syms, idx, &sym);\ 64 idx < nr_syms; \ 65 idx++, gelf_getsym(syms, idx, &sym)) 66 67 static inline uint8_t elf_sym__type(const GElf_Sym *sym) 68 { 69 return GELF_ST_TYPE(sym->st_info); 70 } 71 72 static inline int elf_sym__is_function(const GElf_Sym *sym) 73 { 74 return (elf_sym__type(sym) == STT_FUNC || 75 elf_sym__type(sym) == STT_GNU_IFUNC) && 76 sym->st_name != 0 && 77 sym->st_shndx != SHN_UNDEF; 78 } 79 80 static inline bool elf_sym__is_object(const GElf_Sym *sym) 81 { 82 return elf_sym__type(sym) == STT_OBJECT && 83 sym->st_name != 0 && 84 sym->st_shndx != SHN_UNDEF; 85 } 86 87 static inline int elf_sym__is_label(const GElf_Sym *sym) 88 { 89 return elf_sym__type(sym) == STT_NOTYPE && 90 sym->st_name != 0 && 91 sym->st_shndx != SHN_UNDEF && 92 sym->st_shndx != SHN_ABS; 93 } 94 95 static bool elf_sym__is_a(GElf_Sym *sym, enum map_type type) 96 { 97 switch (type) { 98 case MAP__FUNCTION: 99 return elf_sym__is_function(sym); 100 case MAP__VARIABLE: 101 return elf_sym__is_object(sym); 102 default: 103 return false; 104 } 105 } 106 107 static inline const char *elf_sym__name(const GElf_Sym *sym, 108 const Elf_Data *symstrs) 109 { 110 return symstrs->d_buf + sym->st_name; 111 } 112 113 static inline const char *elf_sec__name(const GElf_Shdr *shdr, 114 const Elf_Data *secstrs) 115 { 116 return secstrs->d_buf + shdr->sh_name; 117 } 118 119 static inline int elf_sec__is_text(const GElf_Shdr *shdr, 120 const Elf_Data *secstrs) 121 { 122 return strstr(elf_sec__name(shdr, secstrs), "text") != NULL; 123 } 124 125 static inline bool elf_sec__is_data(const GElf_Shdr *shdr, 126 const Elf_Data *secstrs) 127 { 128 return strstr(elf_sec__name(shdr, secstrs), "data") != NULL; 129 } 130 131 static bool elf_sec__is_a(GElf_Shdr *shdr, Elf_Data *secstrs, 132 enum map_type type) 133 { 134 switch (type) { 135 case MAP__FUNCTION: 136 return elf_sec__is_text(shdr, secstrs); 137 case MAP__VARIABLE: 138 return elf_sec__is_data(shdr, secstrs); 139 default: 140 return false; 141 } 142 } 143 144 static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr) 145 { 146 Elf_Scn *sec = NULL; 147 GElf_Shdr shdr; 148 size_t cnt = 1; 149 150 while ((sec = elf_nextscn(elf, sec)) != NULL) { 151 gelf_getshdr(sec, &shdr); 152 153 if ((addr >= shdr.sh_addr) && 154 (addr < (shdr.sh_addr + shdr.sh_size))) 155 return cnt; 156 157 ++cnt; 158 } 159 160 return -1; 161 } 162 163 Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep, 164 GElf_Shdr *shp, const char *name, size_t *idx) 165 { 166 Elf_Scn *sec = NULL; 167 size_t cnt = 1; 168 169 /* Elf is corrupted/truncated, avoid calling elf_strptr. */ 170 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) 171 return NULL; 172 173 while ((sec = elf_nextscn(elf, sec)) != NULL) { 174 char *str; 175 176 gelf_getshdr(sec, shp); 177 str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name); 178 if (str && !strcmp(name, str)) { 179 if (idx) 180 *idx = cnt; 181 return sec; 182 } 183 ++cnt; 184 } 185 186 return NULL; 187 } 188 189 #define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \ 190 for (idx = 0, pos = gelf_getrel(reldata, 0, &pos_mem); \ 191 idx < nr_entries; \ 192 ++idx, pos = gelf_getrel(reldata, idx, &pos_mem)) 193 194 #define elf_section__for_each_rela(reldata, pos, pos_mem, idx, nr_entries) \ 195 for (idx = 0, pos = gelf_getrela(reldata, 0, &pos_mem); \ 196 idx < nr_entries; \ 197 ++idx, pos = gelf_getrela(reldata, idx, &pos_mem)) 198 199 /* 200 * We need to check if we have a .dynsym, so that we can handle the 201 * .plt, synthesizing its symbols, that aren't on the symtabs (be it 202 * .dynsym or .symtab). 203 * And always look at the original dso, not at debuginfo packages, that 204 * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS). 205 */ 206 int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss, struct map *map, 207 symbol_filter_t filter) 208 { 209 uint32_t nr_rel_entries, idx; 210 GElf_Sym sym; 211 u64 plt_offset; 212 GElf_Shdr shdr_plt; 213 struct symbol *f; 214 GElf_Shdr shdr_rel_plt, shdr_dynsym; 215 Elf_Data *reldata, *syms, *symstrs; 216 Elf_Scn *scn_plt_rel, *scn_symstrs, *scn_dynsym; 217 size_t dynsym_idx; 218 GElf_Ehdr ehdr; 219 char sympltname[1024]; 220 Elf *elf; 221 int nr = 0, symidx, err = 0; 222 223 if (!ss->dynsym) 224 return 0; 225 226 elf = ss->elf; 227 ehdr = ss->ehdr; 228 229 scn_dynsym = ss->dynsym; 230 shdr_dynsym = ss->dynshdr; 231 dynsym_idx = ss->dynsym_idx; 232 233 if (scn_dynsym == NULL) 234 goto out_elf_end; 235 236 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt, 237 ".rela.plt", NULL); 238 if (scn_plt_rel == NULL) { 239 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt, 240 ".rel.plt", NULL); 241 if (scn_plt_rel == NULL) 242 goto out_elf_end; 243 } 244 245 err = -1; 246 247 if (shdr_rel_plt.sh_link != dynsym_idx) 248 goto out_elf_end; 249 250 if (elf_section_by_name(elf, &ehdr, &shdr_plt, ".plt", NULL) == NULL) 251 goto out_elf_end; 252 253 /* 254 * Fetch the relocation section to find the idxes to the GOT 255 * and the symbols in the .dynsym they refer to. 256 */ 257 reldata = elf_getdata(scn_plt_rel, NULL); 258 if (reldata == NULL) 259 goto out_elf_end; 260 261 syms = elf_getdata(scn_dynsym, NULL); 262 if (syms == NULL) 263 goto out_elf_end; 264 265 scn_symstrs = elf_getscn(elf, shdr_dynsym.sh_link); 266 if (scn_symstrs == NULL) 267 goto out_elf_end; 268 269 symstrs = elf_getdata(scn_symstrs, NULL); 270 if (symstrs == NULL) 271 goto out_elf_end; 272 273 if (symstrs->d_size == 0) 274 goto out_elf_end; 275 276 nr_rel_entries = shdr_rel_plt.sh_size / shdr_rel_plt.sh_entsize; 277 plt_offset = shdr_plt.sh_offset; 278 279 if (shdr_rel_plt.sh_type == SHT_RELA) { 280 GElf_Rela pos_mem, *pos; 281 282 elf_section__for_each_rela(reldata, pos, pos_mem, idx, 283 nr_rel_entries) { 284 symidx = GELF_R_SYM(pos->r_info); 285 plt_offset += shdr_plt.sh_entsize; 286 gelf_getsym(syms, symidx, &sym); 287 snprintf(sympltname, sizeof(sympltname), 288 "%s@plt", elf_sym__name(&sym, symstrs)); 289 290 f = symbol__new(plt_offset, shdr_plt.sh_entsize, 291 STB_GLOBAL, sympltname); 292 if (!f) 293 goto out_elf_end; 294 295 if (filter && filter(map, f)) 296 symbol__delete(f); 297 else { 298 symbols__insert(&dso->symbols[map->type], f); 299 ++nr; 300 } 301 } 302 } else if (shdr_rel_plt.sh_type == SHT_REL) { 303 GElf_Rel pos_mem, *pos; 304 elf_section__for_each_rel(reldata, pos, pos_mem, idx, 305 nr_rel_entries) { 306 symidx = GELF_R_SYM(pos->r_info); 307 plt_offset += shdr_plt.sh_entsize; 308 gelf_getsym(syms, symidx, &sym); 309 snprintf(sympltname, sizeof(sympltname), 310 "%s@plt", elf_sym__name(&sym, symstrs)); 311 312 f = symbol__new(plt_offset, shdr_plt.sh_entsize, 313 STB_GLOBAL, sympltname); 314 if (!f) 315 goto out_elf_end; 316 317 if (filter && filter(map, f)) 318 symbol__delete(f); 319 else { 320 symbols__insert(&dso->symbols[map->type], f); 321 ++nr; 322 } 323 } 324 } 325 326 err = 0; 327 out_elf_end: 328 if (err == 0) 329 return nr; 330 pr_debug("%s: problems reading %s PLT info.\n", 331 __func__, dso->long_name); 332 return 0; 333 } 334 335 /* 336 * Align offset to 4 bytes as needed for note name and descriptor data. 337 */ 338 #define NOTE_ALIGN(n) (((n) + 3) & -4U) 339 340 static int elf_read_build_id(Elf *elf, void *bf, size_t size) 341 { 342 int err = -1; 343 GElf_Ehdr ehdr; 344 GElf_Shdr shdr; 345 Elf_Data *data; 346 Elf_Scn *sec; 347 Elf_Kind ek; 348 void *ptr; 349 350 if (size < BUILD_ID_SIZE) 351 goto out; 352 353 ek = elf_kind(elf); 354 if (ek != ELF_K_ELF) 355 goto out; 356 357 if (gelf_getehdr(elf, &ehdr) == NULL) { 358 pr_err("%s: cannot get elf header.\n", __func__); 359 goto out; 360 } 361 362 /* 363 * Check following sections for notes: 364 * '.note.gnu.build-id' 365 * '.notes' 366 * '.note' (VDSO specific) 367 */ 368 do { 369 sec = elf_section_by_name(elf, &ehdr, &shdr, 370 ".note.gnu.build-id", NULL); 371 if (sec) 372 break; 373 374 sec = elf_section_by_name(elf, &ehdr, &shdr, 375 ".notes", NULL); 376 if (sec) 377 break; 378 379 sec = elf_section_by_name(elf, &ehdr, &shdr, 380 ".note", NULL); 381 if (sec) 382 break; 383 384 return err; 385 386 } while (0); 387 388 data = elf_getdata(sec, NULL); 389 if (data == NULL) 390 goto out; 391 392 ptr = data->d_buf; 393 while (ptr < (data->d_buf + data->d_size)) { 394 GElf_Nhdr *nhdr = ptr; 395 size_t namesz = NOTE_ALIGN(nhdr->n_namesz), 396 descsz = NOTE_ALIGN(nhdr->n_descsz); 397 const char *name; 398 399 ptr += sizeof(*nhdr); 400 name = ptr; 401 ptr += namesz; 402 if (nhdr->n_type == NT_GNU_BUILD_ID && 403 nhdr->n_namesz == sizeof("GNU")) { 404 if (memcmp(name, "GNU", sizeof("GNU")) == 0) { 405 size_t sz = min(size, descsz); 406 memcpy(bf, ptr, sz); 407 memset(bf + sz, 0, size - sz); 408 err = descsz; 409 break; 410 } 411 } 412 ptr += descsz; 413 } 414 415 out: 416 return err; 417 } 418 419 int filename__read_build_id(const char *filename, void *bf, size_t size) 420 { 421 int fd, err = -1; 422 Elf *elf; 423 424 if (size < BUILD_ID_SIZE) 425 goto out; 426 427 fd = open(filename, O_RDONLY); 428 if (fd < 0) 429 goto out; 430 431 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); 432 if (elf == NULL) { 433 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename); 434 goto out_close; 435 } 436 437 err = elf_read_build_id(elf, bf, size); 438 439 elf_end(elf); 440 out_close: 441 close(fd); 442 out: 443 return err; 444 } 445 446 int sysfs__read_build_id(const char *filename, void *build_id, size_t size) 447 { 448 int fd, err = -1; 449 450 if (size < BUILD_ID_SIZE) 451 goto out; 452 453 fd = open(filename, O_RDONLY); 454 if (fd < 0) 455 goto out; 456 457 while (1) { 458 char bf[BUFSIZ]; 459 GElf_Nhdr nhdr; 460 size_t namesz, descsz; 461 462 if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr)) 463 break; 464 465 namesz = NOTE_ALIGN(nhdr.n_namesz); 466 descsz = NOTE_ALIGN(nhdr.n_descsz); 467 if (nhdr.n_type == NT_GNU_BUILD_ID && 468 nhdr.n_namesz == sizeof("GNU")) { 469 if (read(fd, bf, namesz) != (ssize_t)namesz) 470 break; 471 if (memcmp(bf, "GNU", sizeof("GNU")) == 0) { 472 size_t sz = min(descsz, size); 473 if (read(fd, build_id, sz) == (ssize_t)sz) { 474 memset(build_id + sz, 0, size - sz); 475 err = 0; 476 break; 477 } 478 } else if (read(fd, bf, descsz) != (ssize_t)descsz) 479 break; 480 } else { 481 int n = namesz + descsz; 482 if (read(fd, bf, n) != n) 483 break; 484 } 485 } 486 close(fd); 487 out: 488 return err; 489 } 490 491 int filename__read_debuglink(const char *filename, char *debuglink, 492 size_t size) 493 { 494 int fd, err = -1; 495 Elf *elf; 496 GElf_Ehdr ehdr; 497 GElf_Shdr shdr; 498 Elf_Data *data; 499 Elf_Scn *sec; 500 Elf_Kind ek; 501 502 fd = open(filename, O_RDONLY); 503 if (fd < 0) 504 goto out; 505 506 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); 507 if (elf == NULL) { 508 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename); 509 goto out_close; 510 } 511 512 ek = elf_kind(elf); 513 if (ek != ELF_K_ELF) 514 goto out_elf_end; 515 516 if (gelf_getehdr(elf, &ehdr) == NULL) { 517 pr_err("%s: cannot get elf header.\n", __func__); 518 goto out_elf_end; 519 } 520 521 sec = elf_section_by_name(elf, &ehdr, &shdr, 522 ".gnu_debuglink", NULL); 523 if (sec == NULL) 524 goto out_elf_end; 525 526 data = elf_getdata(sec, NULL); 527 if (data == NULL) 528 goto out_elf_end; 529 530 /* the start of this section is a zero-terminated string */ 531 strncpy(debuglink, data->d_buf, size); 532 533 err = 0; 534 535 out_elf_end: 536 elf_end(elf); 537 out_close: 538 close(fd); 539 out: 540 return err; 541 } 542 543 static int dso__swap_init(struct dso *dso, unsigned char eidata) 544 { 545 static unsigned int const endian = 1; 546 547 dso->needs_swap = DSO_SWAP__NO; 548 549 switch (eidata) { 550 case ELFDATA2LSB: 551 /* We are big endian, DSO is little endian. */ 552 if (*(unsigned char const *)&endian != 1) 553 dso->needs_swap = DSO_SWAP__YES; 554 break; 555 556 case ELFDATA2MSB: 557 /* We are little endian, DSO is big endian. */ 558 if (*(unsigned char const *)&endian != 0) 559 dso->needs_swap = DSO_SWAP__YES; 560 break; 561 562 default: 563 pr_err("unrecognized DSO data encoding %d\n", eidata); 564 return -EINVAL; 565 } 566 567 return 0; 568 } 569 570 static int decompress_kmodule(struct dso *dso, const char *name, 571 enum dso_binary_type type) 572 { 573 int fd; 574 const char *ext = strrchr(name, '.'); 575 char tmpbuf[] = "/tmp/perf-kmod-XXXXXX"; 576 577 if (type != DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP && 578 type != DSO_BINARY_TYPE__GUEST_KMODULE_COMP && 579 type != DSO_BINARY_TYPE__BUILD_ID_CACHE) 580 return -1; 581 582 if (!ext || !is_supported_compression(ext + 1)) { 583 ext = strrchr(dso->name, '.'); 584 if (!ext || !is_supported_compression(ext + 1)) 585 return -1; 586 } 587 588 fd = mkstemp(tmpbuf); 589 if (fd < 0) 590 return -1; 591 592 if (!decompress_to_file(ext + 1, name, fd)) { 593 close(fd); 594 fd = -1; 595 } 596 597 unlink(tmpbuf); 598 599 return fd; 600 } 601 602 bool symsrc__possibly_runtime(struct symsrc *ss) 603 { 604 return ss->dynsym || ss->opdsec; 605 } 606 607 bool symsrc__has_symtab(struct symsrc *ss) 608 { 609 return ss->symtab != NULL; 610 } 611 612 void symsrc__destroy(struct symsrc *ss) 613 { 614 zfree(&ss->name); 615 elf_end(ss->elf); 616 close(ss->fd); 617 } 618 619 int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name, 620 enum dso_binary_type type) 621 { 622 int err = -1; 623 GElf_Ehdr ehdr; 624 Elf *elf; 625 int fd; 626 627 if (dso__needs_decompress(dso)) 628 fd = decompress_kmodule(dso, name, type); 629 else 630 fd = open(name, O_RDONLY); 631 632 if (fd < 0) 633 return -1; 634 635 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); 636 if (elf == NULL) { 637 pr_debug("%s: cannot read %s ELF file.\n", __func__, name); 638 goto out_close; 639 } 640 641 if (gelf_getehdr(elf, &ehdr) == NULL) { 642 pr_debug("%s: cannot get elf header.\n", __func__); 643 goto out_elf_end; 644 } 645 646 if (dso__swap_init(dso, ehdr.e_ident[EI_DATA])) 647 goto out_elf_end; 648 649 /* Always reject images with a mismatched build-id: */ 650 if (dso->has_build_id) { 651 u8 build_id[BUILD_ID_SIZE]; 652 653 if (elf_read_build_id(elf, build_id, BUILD_ID_SIZE) < 0) 654 goto out_elf_end; 655 656 if (!dso__build_id_equal(dso, build_id)) 657 goto out_elf_end; 658 } 659 660 ss->is_64_bit = (gelf_getclass(elf) == ELFCLASS64); 661 662 ss->symtab = elf_section_by_name(elf, &ehdr, &ss->symshdr, ".symtab", 663 NULL); 664 if (ss->symshdr.sh_type != SHT_SYMTAB) 665 ss->symtab = NULL; 666 667 ss->dynsym_idx = 0; 668 ss->dynsym = elf_section_by_name(elf, &ehdr, &ss->dynshdr, ".dynsym", 669 &ss->dynsym_idx); 670 if (ss->dynshdr.sh_type != SHT_DYNSYM) 671 ss->dynsym = NULL; 672 673 ss->opdidx = 0; 674 ss->opdsec = elf_section_by_name(elf, &ehdr, &ss->opdshdr, ".opd", 675 &ss->opdidx); 676 if (ss->opdshdr.sh_type != SHT_PROGBITS) 677 ss->opdsec = NULL; 678 679 if (dso->kernel == DSO_TYPE_USER) { 680 GElf_Shdr shdr; 681 ss->adjust_symbols = (ehdr.e_type == ET_EXEC || 682 ehdr.e_type == ET_REL || 683 dso__is_vdso(dso) || 684 elf_section_by_name(elf, &ehdr, &shdr, 685 ".gnu.prelink_undo", 686 NULL) != NULL); 687 } else { 688 ss->adjust_symbols = ehdr.e_type == ET_EXEC || 689 ehdr.e_type == ET_REL; 690 } 691 692 ss->name = strdup(name); 693 if (!ss->name) 694 goto out_elf_end; 695 696 ss->elf = elf; 697 ss->fd = fd; 698 ss->ehdr = ehdr; 699 ss->type = type; 700 701 return 0; 702 703 out_elf_end: 704 elf_end(elf); 705 out_close: 706 close(fd); 707 return err; 708 } 709 710 /** 711 * ref_reloc_sym_not_found - has kernel relocation symbol been found. 712 * @kmap: kernel maps and relocation reference symbol 713 * 714 * This function returns %true if we are dealing with the kernel maps and the 715 * relocation reference symbol has not yet been found. Otherwise %false is 716 * returned. 717 */ 718 static bool ref_reloc_sym_not_found(struct kmap *kmap) 719 { 720 return kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name && 721 !kmap->ref_reloc_sym->unrelocated_addr; 722 } 723 724 /** 725 * ref_reloc - kernel relocation offset. 726 * @kmap: kernel maps and relocation reference symbol 727 * 728 * This function returns the offset of kernel addresses as determined by using 729 * the relocation reference symbol i.e. if the kernel has not been relocated 730 * then the return value is zero. 731 */ 732 static u64 ref_reloc(struct kmap *kmap) 733 { 734 if (kmap && kmap->ref_reloc_sym && 735 kmap->ref_reloc_sym->unrelocated_addr) 736 return kmap->ref_reloc_sym->addr - 737 kmap->ref_reloc_sym->unrelocated_addr; 738 return 0; 739 } 740 741 static bool want_demangle(bool is_kernel_sym) 742 { 743 return is_kernel_sym ? symbol_conf.demangle_kernel : symbol_conf.demangle; 744 } 745 746 int dso__load_sym(struct dso *dso, struct map *map, 747 struct symsrc *syms_ss, struct symsrc *runtime_ss, 748 symbol_filter_t filter, int kmodule) 749 { 750 struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL; 751 struct map *curr_map = map; 752 struct dso *curr_dso = dso; 753 Elf_Data *symstrs, *secstrs; 754 uint32_t nr_syms; 755 int err = -1; 756 uint32_t idx; 757 GElf_Ehdr ehdr; 758 GElf_Shdr shdr; 759 Elf_Data *syms, *opddata = NULL; 760 GElf_Sym sym; 761 Elf_Scn *sec, *sec_strndx; 762 Elf *elf; 763 int nr = 0; 764 bool remap_kernel = false, adjust_kernel_syms = false; 765 766 dso->symtab_type = syms_ss->type; 767 dso->is_64_bit = syms_ss->is_64_bit; 768 dso->rel = syms_ss->ehdr.e_type == ET_REL; 769 770 /* 771 * Modules may already have symbols from kallsyms, but those symbols 772 * have the wrong values for the dso maps, so remove them. 773 */ 774 if (kmodule && syms_ss->symtab) 775 symbols__delete(&dso->symbols[map->type]); 776 777 if (!syms_ss->symtab) { 778 /* 779 * If the vmlinux is stripped, fail so we will fall back 780 * to using kallsyms. The vmlinux runtime symbols aren't 781 * of much use. 782 */ 783 if (dso->kernel) 784 goto out_elf_end; 785 786 syms_ss->symtab = syms_ss->dynsym; 787 syms_ss->symshdr = syms_ss->dynshdr; 788 } 789 790 elf = syms_ss->elf; 791 ehdr = syms_ss->ehdr; 792 sec = syms_ss->symtab; 793 shdr = syms_ss->symshdr; 794 795 if (runtime_ss->opdsec) 796 opddata = elf_rawdata(runtime_ss->opdsec, NULL); 797 798 syms = elf_getdata(sec, NULL); 799 if (syms == NULL) 800 goto out_elf_end; 801 802 sec = elf_getscn(elf, shdr.sh_link); 803 if (sec == NULL) 804 goto out_elf_end; 805 806 symstrs = elf_getdata(sec, NULL); 807 if (symstrs == NULL) 808 goto out_elf_end; 809 810 sec_strndx = elf_getscn(runtime_ss->elf, runtime_ss->ehdr.e_shstrndx); 811 if (sec_strndx == NULL) 812 goto out_elf_end; 813 814 secstrs = elf_getdata(sec_strndx, NULL); 815 if (secstrs == NULL) 816 goto out_elf_end; 817 818 nr_syms = shdr.sh_size / shdr.sh_entsize; 819 820 memset(&sym, 0, sizeof(sym)); 821 822 /* 823 * The kernel relocation symbol is needed in advance in order to adjust 824 * kernel maps correctly. 825 */ 826 if (ref_reloc_sym_not_found(kmap)) { 827 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) { 828 const char *elf_name = elf_sym__name(&sym, symstrs); 829 830 if (strcmp(elf_name, kmap->ref_reloc_sym->name)) 831 continue; 832 kmap->ref_reloc_sym->unrelocated_addr = sym.st_value; 833 map->reloc = kmap->ref_reloc_sym->addr - 834 kmap->ref_reloc_sym->unrelocated_addr; 835 break; 836 } 837 } 838 839 dso->adjust_symbols = runtime_ss->adjust_symbols || ref_reloc(kmap); 840 /* 841 * Initial kernel and module mappings do not map to the dso. For 842 * function mappings, flag the fixups. 843 */ 844 if (map->type == MAP__FUNCTION && (dso->kernel || kmodule)) { 845 remap_kernel = true; 846 adjust_kernel_syms = dso->adjust_symbols; 847 } 848 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) { 849 struct symbol *f; 850 const char *elf_name = elf_sym__name(&sym, symstrs); 851 char *demangled = NULL; 852 int is_label = elf_sym__is_label(&sym); 853 const char *section_name; 854 bool used_opd = false; 855 856 if (!is_label && !elf_sym__is_a(&sym, map->type)) 857 continue; 858 859 /* Reject ARM ELF "mapping symbols": these aren't unique and 860 * don't identify functions, so will confuse the profile 861 * output: */ 862 if (ehdr.e_machine == EM_ARM) { 863 if (!strcmp(elf_name, "$a") || 864 !strcmp(elf_name, "$d") || 865 !strcmp(elf_name, "$t")) 866 continue; 867 } 868 869 if (runtime_ss->opdsec && sym.st_shndx == runtime_ss->opdidx) { 870 u32 offset = sym.st_value - syms_ss->opdshdr.sh_addr; 871 u64 *opd = opddata->d_buf + offset; 872 sym.st_value = DSO__SWAP(dso, u64, *opd); 873 sym.st_shndx = elf_addr_to_index(runtime_ss->elf, 874 sym.st_value); 875 used_opd = true; 876 } 877 /* 878 * When loading symbols in a data mapping, ABS symbols (which 879 * has a value of SHN_ABS in its st_shndx) failed at 880 * elf_getscn(). And it marks the loading as a failure so 881 * already loaded symbols cannot be fixed up. 882 * 883 * I'm not sure what should be done. Just ignore them for now. 884 * - Namhyung Kim 885 */ 886 if (sym.st_shndx == SHN_ABS) 887 continue; 888 889 sec = elf_getscn(runtime_ss->elf, sym.st_shndx); 890 if (!sec) 891 goto out_elf_end; 892 893 gelf_getshdr(sec, &shdr); 894 895 if (is_label && !elf_sec__is_a(&shdr, secstrs, map->type)) 896 continue; 897 898 section_name = elf_sec__name(&shdr, secstrs); 899 900 /* On ARM, symbols for thumb functions have 1 added to 901 * the symbol address as a flag - remove it */ 902 if ((ehdr.e_machine == EM_ARM) && 903 (map->type == MAP__FUNCTION) && 904 (sym.st_value & 1)) 905 --sym.st_value; 906 907 if (dso->kernel || kmodule) { 908 char dso_name[PATH_MAX]; 909 910 /* Adjust symbol to map to file offset */ 911 if (adjust_kernel_syms) 912 sym.st_value -= shdr.sh_addr - shdr.sh_offset; 913 914 if (strcmp(section_name, 915 (curr_dso->short_name + 916 dso->short_name_len)) == 0) 917 goto new_symbol; 918 919 if (strcmp(section_name, ".text") == 0) { 920 /* 921 * The initial kernel mapping is based on 922 * kallsyms and identity maps. Overwrite it to 923 * map to the kernel dso. 924 */ 925 if (remap_kernel && dso->kernel) { 926 remap_kernel = false; 927 map->start = shdr.sh_addr + 928 ref_reloc(kmap); 929 map->end = map->start + shdr.sh_size; 930 map->pgoff = shdr.sh_offset; 931 map->map_ip = map__map_ip; 932 map->unmap_ip = map__unmap_ip; 933 /* Ensure maps are correctly ordered */ 934 map_groups__remove(kmap->kmaps, map); 935 map_groups__insert(kmap->kmaps, map); 936 } 937 938 /* 939 * The initial module mapping is based on 940 * /proc/modules mapped to offset zero. 941 * Overwrite it to map to the module dso. 942 */ 943 if (remap_kernel && kmodule) { 944 remap_kernel = false; 945 map->pgoff = shdr.sh_offset; 946 } 947 948 curr_map = map; 949 curr_dso = dso; 950 goto new_symbol; 951 } 952 953 if (!kmap) 954 goto new_symbol; 955 956 snprintf(dso_name, sizeof(dso_name), 957 "%s%s", dso->short_name, section_name); 958 959 curr_map = map_groups__find_by_name(kmap->kmaps, map->type, dso_name); 960 if (curr_map == NULL) { 961 u64 start = sym.st_value; 962 963 if (kmodule) 964 start += map->start + shdr.sh_offset; 965 966 curr_dso = dso__new(dso_name); 967 if (curr_dso == NULL) 968 goto out_elf_end; 969 curr_dso->kernel = dso->kernel; 970 curr_dso->long_name = dso->long_name; 971 curr_dso->long_name_len = dso->long_name_len; 972 curr_map = map__new2(start, curr_dso, 973 map->type); 974 if (curr_map == NULL) { 975 dso__delete(curr_dso); 976 goto out_elf_end; 977 } 978 if (adjust_kernel_syms) { 979 curr_map->start = shdr.sh_addr + 980 ref_reloc(kmap); 981 curr_map->end = curr_map->start + 982 shdr.sh_size; 983 curr_map->pgoff = shdr.sh_offset; 984 } else { 985 curr_map->map_ip = identity__map_ip; 986 curr_map->unmap_ip = identity__map_ip; 987 } 988 curr_dso->symtab_type = dso->symtab_type; 989 map_groups__insert(kmap->kmaps, curr_map); 990 /* 991 * The new DSO should go to the kernel DSOS 992 */ 993 dsos__add(&map->groups->machine->kernel_dsos, 994 curr_dso); 995 dso__set_loaded(curr_dso, map->type); 996 } else 997 curr_dso = curr_map->dso; 998 999 goto new_symbol; 1000 } 1001 1002 if ((used_opd && runtime_ss->adjust_symbols) 1003 || (!used_opd && syms_ss->adjust_symbols)) { 1004 pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " " 1005 "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n", __func__, 1006 (u64)sym.st_value, (u64)shdr.sh_addr, 1007 (u64)shdr.sh_offset); 1008 sym.st_value -= shdr.sh_addr - shdr.sh_offset; 1009 } 1010 new_symbol: 1011 /* 1012 * We need to figure out if the object was created from C++ sources 1013 * DWARF DW_compile_unit has this, but we don't always have access 1014 * to it... 1015 */ 1016 if (want_demangle(dso->kernel || kmodule)) { 1017 int demangle_flags = DMGL_NO_OPTS; 1018 if (verbose) 1019 demangle_flags = DMGL_PARAMS | DMGL_ANSI; 1020 1021 demangled = bfd_demangle(NULL, elf_name, demangle_flags); 1022 if (demangled != NULL) 1023 elf_name = demangled; 1024 } 1025 f = symbol__new(sym.st_value, sym.st_size, 1026 GELF_ST_BIND(sym.st_info), elf_name); 1027 free(demangled); 1028 if (!f) 1029 goto out_elf_end; 1030 1031 if (filter && filter(curr_map, f)) 1032 symbol__delete(f); 1033 else { 1034 symbols__insert(&curr_dso->symbols[curr_map->type], f); 1035 nr++; 1036 } 1037 } 1038 1039 /* 1040 * For misannotated, zeroed, ASM function sizes. 1041 */ 1042 if (nr > 0) { 1043 symbols__fixup_duplicate(&dso->symbols[map->type]); 1044 symbols__fixup_end(&dso->symbols[map->type]); 1045 if (kmap) { 1046 /* 1047 * We need to fixup this here too because we create new 1048 * maps here, for things like vsyscall sections. 1049 */ 1050 __map_groups__fixup_end(kmap->kmaps, map->type); 1051 } 1052 } 1053 err = nr; 1054 out_elf_end: 1055 return err; 1056 } 1057 1058 static int elf_read_maps(Elf *elf, bool exe, mapfn_t mapfn, void *data) 1059 { 1060 GElf_Phdr phdr; 1061 size_t i, phdrnum; 1062 int err; 1063 u64 sz; 1064 1065 if (elf_getphdrnum(elf, &phdrnum)) 1066 return -1; 1067 1068 for (i = 0; i < phdrnum; i++) { 1069 if (gelf_getphdr(elf, i, &phdr) == NULL) 1070 return -1; 1071 if (phdr.p_type != PT_LOAD) 1072 continue; 1073 if (exe) { 1074 if (!(phdr.p_flags & PF_X)) 1075 continue; 1076 } else { 1077 if (!(phdr.p_flags & PF_R)) 1078 continue; 1079 } 1080 sz = min(phdr.p_memsz, phdr.p_filesz); 1081 if (!sz) 1082 continue; 1083 err = mapfn(phdr.p_vaddr, sz, phdr.p_offset, data); 1084 if (err) 1085 return err; 1086 } 1087 return 0; 1088 } 1089 1090 int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data, 1091 bool *is_64_bit) 1092 { 1093 int err; 1094 Elf *elf; 1095 1096 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); 1097 if (elf == NULL) 1098 return -1; 1099 1100 if (is_64_bit) 1101 *is_64_bit = (gelf_getclass(elf) == ELFCLASS64); 1102 1103 err = elf_read_maps(elf, exe, mapfn, data); 1104 1105 elf_end(elf); 1106 return err; 1107 } 1108 1109 enum dso_type dso__type_fd(int fd) 1110 { 1111 enum dso_type dso_type = DSO__TYPE_UNKNOWN; 1112 GElf_Ehdr ehdr; 1113 Elf_Kind ek; 1114 Elf *elf; 1115 1116 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); 1117 if (elf == NULL) 1118 goto out; 1119 1120 ek = elf_kind(elf); 1121 if (ek != ELF_K_ELF) 1122 goto out_end; 1123 1124 if (gelf_getclass(elf) == ELFCLASS64) { 1125 dso_type = DSO__TYPE_64BIT; 1126 goto out_end; 1127 } 1128 1129 if (gelf_getehdr(elf, &ehdr) == NULL) 1130 goto out_end; 1131 1132 if (ehdr.e_machine == EM_X86_64) 1133 dso_type = DSO__TYPE_X32BIT; 1134 else 1135 dso_type = DSO__TYPE_32BIT; 1136 out_end: 1137 elf_end(elf); 1138 out: 1139 return dso_type; 1140 } 1141 1142 static int copy_bytes(int from, off_t from_offs, int to, off_t to_offs, u64 len) 1143 { 1144 ssize_t r; 1145 size_t n; 1146 int err = -1; 1147 char *buf = malloc(page_size); 1148 1149 if (buf == NULL) 1150 return -1; 1151 1152 if (lseek(to, to_offs, SEEK_SET) != to_offs) 1153 goto out; 1154 1155 if (lseek(from, from_offs, SEEK_SET) != from_offs) 1156 goto out; 1157 1158 while (len) { 1159 n = page_size; 1160 if (len < n) 1161 n = len; 1162 /* Use read because mmap won't work on proc files */ 1163 r = read(from, buf, n); 1164 if (r < 0) 1165 goto out; 1166 if (!r) 1167 break; 1168 n = r; 1169 r = write(to, buf, n); 1170 if (r < 0) 1171 goto out; 1172 if ((size_t)r != n) 1173 goto out; 1174 len -= n; 1175 } 1176 1177 err = 0; 1178 out: 1179 free(buf); 1180 return err; 1181 } 1182 1183 struct kcore { 1184 int fd; 1185 int elfclass; 1186 Elf *elf; 1187 GElf_Ehdr ehdr; 1188 }; 1189 1190 static int kcore__open(struct kcore *kcore, const char *filename) 1191 { 1192 GElf_Ehdr *ehdr; 1193 1194 kcore->fd = open(filename, O_RDONLY); 1195 if (kcore->fd == -1) 1196 return -1; 1197 1198 kcore->elf = elf_begin(kcore->fd, ELF_C_READ, NULL); 1199 if (!kcore->elf) 1200 goto out_close; 1201 1202 kcore->elfclass = gelf_getclass(kcore->elf); 1203 if (kcore->elfclass == ELFCLASSNONE) 1204 goto out_end; 1205 1206 ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr); 1207 if (!ehdr) 1208 goto out_end; 1209 1210 return 0; 1211 1212 out_end: 1213 elf_end(kcore->elf); 1214 out_close: 1215 close(kcore->fd); 1216 return -1; 1217 } 1218 1219 static int kcore__init(struct kcore *kcore, char *filename, int elfclass, 1220 bool temp) 1221 { 1222 GElf_Ehdr *ehdr; 1223 1224 kcore->elfclass = elfclass; 1225 1226 if (temp) 1227 kcore->fd = mkstemp(filename); 1228 else 1229 kcore->fd = open(filename, O_WRONLY | O_CREAT | O_EXCL, 0400); 1230 if (kcore->fd == -1) 1231 return -1; 1232 1233 kcore->elf = elf_begin(kcore->fd, ELF_C_WRITE, NULL); 1234 if (!kcore->elf) 1235 goto out_close; 1236 1237 if (!gelf_newehdr(kcore->elf, elfclass)) 1238 goto out_end; 1239 1240 ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr); 1241 if (!ehdr) 1242 goto out_end; 1243 1244 return 0; 1245 1246 out_end: 1247 elf_end(kcore->elf); 1248 out_close: 1249 close(kcore->fd); 1250 unlink(filename); 1251 return -1; 1252 } 1253 1254 static void kcore__close(struct kcore *kcore) 1255 { 1256 elf_end(kcore->elf); 1257 close(kcore->fd); 1258 } 1259 1260 static int kcore__copy_hdr(struct kcore *from, struct kcore *to, size_t count) 1261 { 1262 GElf_Ehdr *ehdr = &to->ehdr; 1263 GElf_Ehdr *kehdr = &from->ehdr; 1264 1265 memcpy(ehdr->e_ident, kehdr->e_ident, EI_NIDENT); 1266 ehdr->e_type = kehdr->e_type; 1267 ehdr->e_machine = kehdr->e_machine; 1268 ehdr->e_version = kehdr->e_version; 1269 ehdr->e_entry = 0; 1270 ehdr->e_shoff = 0; 1271 ehdr->e_flags = kehdr->e_flags; 1272 ehdr->e_phnum = count; 1273 ehdr->e_shentsize = 0; 1274 ehdr->e_shnum = 0; 1275 ehdr->e_shstrndx = 0; 1276 1277 if (from->elfclass == ELFCLASS32) { 1278 ehdr->e_phoff = sizeof(Elf32_Ehdr); 1279 ehdr->e_ehsize = sizeof(Elf32_Ehdr); 1280 ehdr->e_phentsize = sizeof(Elf32_Phdr); 1281 } else { 1282 ehdr->e_phoff = sizeof(Elf64_Ehdr); 1283 ehdr->e_ehsize = sizeof(Elf64_Ehdr); 1284 ehdr->e_phentsize = sizeof(Elf64_Phdr); 1285 } 1286 1287 if (!gelf_update_ehdr(to->elf, ehdr)) 1288 return -1; 1289 1290 if (!gelf_newphdr(to->elf, count)) 1291 return -1; 1292 1293 return 0; 1294 } 1295 1296 static int kcore__add_phdr(struct kcore *kcore, int idx, off_t offset, 1297 u64 addr, u64 len) 1298 { 1299 GElf_Phdr gphdr; 1300 GElf_Phdr *phdr; 1301 1302 phdr = gelf_getphdr(kcore->elf, idx, &gphdr); 1303 if (!phdr) 1304 return -1; 1305 1306 phdr->p_type = PT_LOAD; 1307 phdr->p_flags = PF_R | PF_W | PF_X; 1308 phdr->p_offset = offset; 1309 phdr->p_vaddr = addr; 1310 phdr->p_paddr = 0; 1311 phdr->p_filesz = len; 1312 phdr->p_memsz = len; 1313 phdr->p_align = page_size; 1314 1315 if (!gelf_update_phdr(kcore->elf, idx, phdr)) 1316 return -1; 1317 1318 return 0; 1319 } 1320 1321 static off_t kcore__write(struct kcore *kcore) 1322 { 1323 return elf_update(kcore->elf, ELF_C_WRITE); 1324 } 1325 1326 struct phdr_data { 1327 off_t offset; 1328 u64 addr; 1329 u64 len; 1330 }; 1331 1332 struct kcore_copy_info { 1333 u64 stext; 1334 u64 etext; 1335 u64 first_symbol; 1336 u64 last_symbol; 1337 u64 first_module; 1338 u64 last_module_symbol; 1339 struct phdr_data kernel_map; 1340 struct phdr_data modules_map; 1341 }; 1342 1343 static int kcore_copy__process_kallsyms(void *arg, const char *name, char type, 1344 u64 start) 1345 { 1346 struct kcore_copy_info *kci = arg; 1347 1348 if (!symbol_type__is_a(type, MAP__FUNCTION)) 1349 return 0; 1350 1351 if (strchr(name, '[')) { 1352 if (start > kci->last_module_symbol) 1353 kci->last_module_symbol = start; 1354 return 0; 1355 } 1356 1357 if (!kci->first_symbol || start < kci->first_symbol) 1358 kci->first_symbol = start; 1359 1360 if (!kci->last_symbol || start > kci->last_symbol) 1361 kci->last_symbol = start; 1362 1363 if (!strcmp(name, "_stext")) { 1364 kci->stext = start; 1365 return 0; 1366 } 1367 1368 if (!strcmp(name, "_etext")) { 1369 kci->etext = start; 1370 return 0; 1371 } 1372 1373 return 0; 1374 } 1375 1376 static int kcore_copy__parse_kallsyms(struct kcore_copy_info *kci, 1377 const char *dir) 1378 { 1379 char kallsyms_filename[PATH_MAX]; 1380 1381 scnprintf(kallsyms_filename, PATH_MAX, "%s/kallsyms", dir); 1382 1383 if (symbol__restricted_filename(kallsyms_filename, "/proc/kallsyms")) 1384 return -1; 1385 1386 if (kallsyms__parse(kallsyms_filename, kci, 1387 kcore_copy__process_kallsyms) < 0) 1388 return -1; 1389 1390 return 0; 1391 } 1392 1393 static int kcore_copy__process_modules(void *arg, 1394 const char *name __maybe_unused, 1395 u64 start) 1396 { 1397 struct kcore_copy_info *kci = arg; 1398 1399 if (!kci->first_module || start < kci->first_module) 1400 kci->first_module = start; 1401 1402 return 0; 1403 } 1404 1405 static int kcore_copy__parse_modules(struct kcore_copy_info *kci, 1406 const char *dir) 1407 { 1408 char modules_filename[PATH_MAX]; 1409 1410 scnprintf(modules_filename, PATH_MAX, "%s/modules", dir); 1411 1412 if (symbol__restricted_filename(modules_filename, "/proc/modules")) 1413 return -1; 1414 1415 if (modules__parse(modules_filename, kci, 1416 kcore_copy__process_modules) < 0) 1417 return -1; 1418 1419 return 0; 1420 } 1421 1422 static void kcore_copy__map(struct phdr_data *p, u64 start, u64 end, u64 pgoff, 1423 u64 s, u64 e) 1424 { 1425 if (p->addr || s < start || s >= end) 1426 return; 1427 1428 p->addr = s; 1429 p->offset = (s - start) + pgoff; 1430 p->len = e < end ? e - s : end - s; 1431 } 1432 1433 static int kcore_copy__read_map(u64 start, u64 len, u64 pgoff, void *data) 1434 { 1435 struct kcore_copy_info *kci = data; 1436 u64 end = start + len; 1437 1438 kcore_copy__map(&kci->kernel_map, start, end, pgoff, kci->stext, 1439 kci->etext); 1440 1441 kcore_copy__map(&kci->modules_map, start, end, pgoff, kci->first_module, 1442 kci->last_module_symbol); 1443 1444 return 0; 1445 } 1446 1447 static int kcore_copy__read_maps(struct kcore_copy_info *kci, Elf *elf) 1448 { 1449 if (elf_read_maps(elf, true, kcore_copy__read_map, kci) < 0) 1450 return -1; 1451 1452 return 0; 1453 } 1454 1455 static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir, 1456 Elf *elf) 1457 { 1458 if (kcore_copy__parse_kallsyms(kci, dir)) 1459 return -1; 1460 1461 if (kcore_copy__parse_modules(kci, dir)) 1462 return -1; 1463 1464 if (kci->stext) 1465 kci->stext = round_down(kci->stext, page_size); 1466 else 1467 kci->stext = round_down(kci->first_symbol, page_size); 1468 1469 if (kci->etext) { 1470 kci->etext = round_up(kci->etext, page_size); 1471 } else if (kci->last_symbol) { 1472 kci->etext = round_up(kci->last_symbol, page_size); 1473 kci->etext += page_size; 1474 } 1475 1476 kci->first_module = round_down(kci->first_module, page_size); 1477 1478 if (kci->last_module_symbol) { 1479 kci->last_module_symbol = round_up(kci->last_module_symbol, 1480 page_size); 1481 kci->last_module_symbol += page_size; 1482 } 1483 1484 if (!kci->stext || !kci->etext) 1485 return -1; 1486 1487 if (kci->first_module && !kci->last_module_symbol) 1488 return -1; 1489 1490 return kcore_copy__read_maps(kci, elf); 1491 } 1492 1493 static int kcore_copy__copy_file(const char *from_dir, const char *to_dir, 1494 const char *name) 1495 { 1496 char from_filename[PATH_MAX]; 1497 char to_filename[PATH_MAX]; 1498 1499 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name); 1500 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name); 1501 1502 return copyfile_mode(from_filename, to_filename, 0400); 1503 } 1504 1505 static int kcore_copy__unlink(const char *dir, const char *name) 1506 { 1507 char filename[PATH_MAX]; 1508 1509 scnprintf(filename, PATH_MAX, "%s/%s", dir, name); 1510 1511 return unlink(filename); 1512 } 1513 1514 static int kcore_copy__compare_fds(int from, int to) 1515 { 1516 char *buf_from; 1517 char *buf_to; 1518 ssize_t ret; 1519 size_t len; 1520 int err = -1; 1521 1522 buf_from = malloc(page_size); 1523 buf_to = malloc(page_size); 1524 if (!buf_from || !buf_to) 1525 goto out; 1526 1527 while (1) { 1528 /* Use read because mmap won't work on proc files */ 1529 ret = read(from, buf_from, page_size); 1530 if (ret < 0) 1531 goto out; 1532 1533 if (!ret) 1534 break; 1535 1536 len = ret; 1537 1538 if (readn(to, buf_to, len) != (int)len) 1539 goto out; 1540 1541 if (memcmp(buf_from, buf_to, len)) 1542 goto out; 1543 } 1544 1545 err = 0; 1546 out: 1547 free(buf_to); 1548 free(buf_from); 1549 return err; 1550 } 1551 1552 static int kcore_copy__compare_files(const char *from_filename, 1553 const char *to_filename) 1554 { 1555 int from, to, err = -1; 1556 1557 from = open(from_filename, O_RDONLY); 1558 if (from < 0) 1559 return -1; 1560 1561 to = open(to_filename, O_RDONLY); 1562 if (to < 0) 1563 goto out_close_from; 1564 1565 err = kcore_copy__compare_fds(from, to); 1566 1567 close(to); 1568 out_close_from: 1569 close(from); 1570 return err; 1571 } 1572 1573 static int kcore_copy__compare_file(const char *from_dir, const char *to_dir, 1574 const char *name) 1575 { 1576 char from_filename[PATH_MAX]; 1577 char to_filename[PATH_MAX]; 1578 1579 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name); 1580 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name); 1581 1582 return kcore_copy__compare_files(from_filename, to_filename); 1583 } 1584 1585 /** 1586 * kcore_copy - copy kallsyms, modules and kcore from one directory to another. 1587 * @from_dir: from directory 1588 * @to_dir: to directory 1589 * 1590 * This function copies kallsyms, modules and kcore files from one directory to 1591 * another. kallsyms and modules are copied entirely. Only code segments are 1592 * copied from kcore. It is assumed that two segments suffice: one for the 1593 * kernel proper and one for all the modules. The code segments are determined 1594 * from kallsyms and modules files. The kernel map starts at _stext or the 1595 * lowest function symbol, and ends at _etext or the highest function symbol. 1596 * The module map starts at the lowest module address and ends at the highest 1597 * module symbol. Start addresses are rounded down to the nearest page. End 1598 * addresses are rounded up to the nearest page. An extra page is added to the 1599 * highest kernel symbol and highest module symbol to, hopefully, encompass that 1600 * symbol too. Because it contains only code sections, the resulting kcore is 1601 * unusual. One significant peculiarity is that the mapping (start -> pgoff) 1602 * is not the same for the kernel map and the modules map. That happens because 1603 * the data is copied adjacently whereas the original kcore has gaps. Finally, 1604 * kallsyms and modules files are compared with their copies to check that 1605 * modules have not been loaded or unloaded while the copies were taking place. 1606 * 1607 * Return: %0 on success, %-1 on failure. 1608 */ 1609 int kcore_copy(const char *from_dir, const char *to_dir) 1610 { 1611 struct kcore kcore; 1612 struct kcore extract; 1613 size_t count = 2; 1614 int idx = 0, err = -1; 1615 off_t offset = page_size, sz, modules_offset = 0; 1616 struct kcore_copy_info kci = { .stext = 0, }; 1617 char kcore_filename[PATH_MAX]; 1618 char extract_filename[PATH_MAX]; 1619 1620 if (kcore_copy__copy_file(from_dir, to_dir, "kallsyms")) 1621 return -1; 1622 1623 if (kcore_copy__copy_file(from_dir, to_dir, "modules")) 1624 goto out_unlink_kallsyms; 1625 1626 scnprintf(kcore_filename, PATH_MAX, "%s/kcore", from_dir); 1627 scnprintf(extract_filename, PATH_MAX, "%s/kcore", to_dir); 1628 1629 if (kcore__open(&kcore, kcore_filename)) 1630 goto out_unlink_modules; 1631 1632 if (kcore_copy__calc_maps(&kci, from_dir, kcore.elf)) 1633 goto out_kcore_close; 1634 1635 if (kcore__init(&extract, extract_filename, kcore.elfclass, false)) 1636 goto out_kcore_close; 1637 1638 if (!kci.modules_map.addr) 1639 count -= 1; 1640 1641 if (kcore__copy_hdr(&kcore, &extract, count)) 1642 goto out_extract_close; 1643 1644 if (kcore__add_phdr(&extract, idx++, offset, kci.kernel_map.addr, 1645 kci.kernel_map.len)) 1646 goto out_extract_close; 1647 1648 if (kci.modules_map.addr) { 1649 modules_offset = offset + kci.kernel_map.len; 1650 if (kcore__add_phdr(&extract, idx, modules_offset, 1651 kci.modules_map.addr, kci.modules_map.len)) 1652 goto out_extract_close; 1653 } 1654 1655 sz = kcore__write(&extract); 1656 if (sz < 0 || sz > offset) 1657 goto out_extract_close; 1658 1659 if (copy_bytes(kcore.fd, kci.kernel_map.offset, extract.fd, offset, 1660 kci.kernel_map.len)) 1661 goto out_extract_close; 1662 1663 if (modules_offset && copy_bytes(kcore.fd, kci.modules_map.offset, 1664 extract.fd, modules_offset, 1665 kci.modules_map.len)) 1666 goto out_extract_close; 1667 1668 if (kcore_copy__compare_file(from_dir, to_dir, "modules")) 1669 goto out_extract_close; 1670 1671 if (kcore_copy__compare_file(from_dir, to_dir, "kallsyms")) 1672 goto out_extract_close; 1673 1674 err = 0; 1675 1676 out_extract_close: 1677 kcore__close(&extract); 1678 if (err) 1679 unlink(extract_filename); 1680 out_kcore_close: 1681 kcore__close(&kcore); 1682 out_unlink_modules: 1683 if (err) 1684 kcore_copy__unlink(to_dir, "modules"); 1685 out_unlink_kallsyms: 1686 if (err) 1687 kcore_copy__unlink(to_dir, "kallsyms"); 1688 1689 return err; 1690 } 1691 1692 int kcore_extract__create(struct kcore_extract *kce) 1693 { 1694 struct kcore kcore; 1695 struct kcore extract; 1696 size_t count = 1; 1697 int idx = 0, err = -1; 1698 off_t offset = page_size, sz; 1699 1700 if (kcore__open(&kcore, kce->kcore_filename)) 1701 return -1; 1702 1703 strcpy(kce->extract_filename, PERF_KCORE_EXTRACT); 1704 if (kcore__init(&extract, kce->extract_filename, kcore.elfclass, true)) 1705 goto out_kcore_close; 1706 1707 if (kcore__copy_hdr(&kcore, &extract, count)) 1708 goto out_extract_close; 1709 1710 if (kcore__add_phdr(&extract, idx, offset, kce->addr, kce->len)) 1711 goto out_extract_close; 1712 1713 sz = kcore__write(&extract); 1714 if (sz < 0 || sz > offset) 1715 goto out_extract_close; 1716 1717 if (copy_bytes(kcore.fd, kce->offs, extract.fd, offset, kce->len)) 1718 goto out_extract_close; 1719 1720 err = 0; 1721 1722 out_extract_close: 1723 kcore__close(&extract); 1724 if (err) 1725 unlink(kce->extract_filename); 1726 out_kcore_close: 1727 kcore__close(&kcore); 1728 1729 return err; 1730 } 1731 1732 void kcore_extract__delete(struct kcore_extract *kce) 1733 { 1734 unlink(kce->extract_filename); 1735 } 1736 1737 void symbol__elf_init(void) 1738 { 1739 elf_version(EV_CURRENT); 1740 } 1741