1 #include <fcntl.h> 2 #include <stdio.h> 3 #include <errno.h> 4 #include <string.h> 5 #include <unistd.h> 6 #include <inttypes.h> 7 8 #include "symbol.h" 9 #include "machine.h" 10 #include "vdso.h" 11 #include <symbol/kallsyms.h> 12 #include "debug.h" 13 14 #ifdef HAVE_CPLUS_DEMANGLE_SUPPORT 15 extern char *cplus_demangle(const char *, int); 16 17 static inline char *bfd_demangle(void __maybe_unused *v, const char *c, int i) 18 { 19 return cplus_demangle(c, i); 20 } 21 #else 22 #ifdef NO_DEMANGLE 23 static inline char *bfd_demangle(void __maybe_unused *v, 24 const char __maybe_unused *c, 25 int __maybe_unused i) 26 { 27 return NULL; 28 } 29 #else 30 #define PACKAGE 'perf' 31 #include <bfd.h> 32 #endif 33 #endif 34 35 #ifndef HAVE_ELF_GETPHDRNUM_SUPPORT 36 static int elf_getphdrnum(Elf *elf, size_t *dst) 37 { 38 GElf_Ehdr gehdr; 39 GElf_Ehdr *ehdr; 40 41 ehdr = gelf_getehdr(elf, &gehdr); 42 if (!ehdr) 43 return -1; 44 45 *dst = ehdr->e_phnum; 46 47 return 0; 48 } 49 #endif 50 51 #ifndef NT_GNU_BUILD_ID 52 #define NT_GNU_BUILD_ID 3 53 #endif 54 55 /** 56 * elf_symtab__for_each_symbol - iterate thru all the symbols 57 * 58 * @syms: struct elf_symtab instance to iterate 59 * @idx: uint32_t idx 60 * @sym: GElf_Sym iterator 61 */ 62 #define elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) \ 63 for (idx = 0, gelf_getsym(syms, idx, &sym);\ 64 idx < nr_syms; \ 65 idx++, gelf_getsym(syms, idx, &sym)) 66 67 static inline uint8_t elf_sym__type(const GElf_Sym *sym) 68 { 69 return GELF_ST_TYPE(sym->st_info); 70 } 71 72 #ifndef STT_GNU_IFUNC 73 #define STT_GNU_IFUNC 10 74 #endif 75 76 static inline int elf_sym__is_function(const GElf_Sym *sym) 77 { 78 return (elf_sym__type(sym) == STT_FUNC || 79 elf_sym__type(sym) == STT_GNU_IFUNC) && 80 sym->st_name != 0 && 81 sym->st_shndx != SHN_UNDEF; 82 } 83 84 static inline bool elf_sym__is_object(const GElf_Sym *sym) 85 { 86 return elf_sym__type(sym) == STT_OBJECT && 87 sym->st_name != 0 && 88 sym->st_shndx != SHN_UNDEF; 89 } 90 91 static inline int elf_sym__is_label(const GElf_Sym *sym) 92 { 93 return elf_sym__type(sym) == STT_NOTYPE && 94 sym->st_name != 0 && 95 sym->st_shndx != SHN_UNDEF && 96 sym->st_shndx != SHN_ABS; 97 } 98 99 static bool elf_sym__is_a(GElf_Sym *sym, enum map_type type) 100 { 101 switch (type) { 102 case MAP__FUNCTION: 103 return elf_sym__is_function(sym); 104 case MAP__VARIABLE: 105 return elf_sym__is_object(sym); 106 default: 107 return false; 108 } 109 } 110 111 static inline const char *elf_sym__name(const GElf_Sym *sym, 112 const Elf_Data *symstrs) 113 { 114 return symstrs->d_buf + sym->st_name; 115 } 116 117 static inline const char *elf_sec__name(const GElf_Shdr *shdr, 118 const Elf_Data *secstrs) 119 { 120 return secstrs->d_buf + shdr->sh_name; 121 } 122 123 static inline int elf_sec__is_text(const GElf_Shdr *shdr, 124 const Elf_Data *secstrs) 125 { 126 return strstr(elf_sec__name(shdr, secstrs), "text") != NULL; 127 } 128 129 static inline bool elf_sec__is_data(const GElf_Shdr *shdr, 130 const Elf_Data *secstrs) 131 { 132 return strstr(elf_sec__name(shdr, secstrs), "data") != NULL; 133 } 134 135 static bool elf_sec__is_a(GElf_Shdr *shdr, Elf_Data *secstrs, 136 enum map_type type) 137 { 138 switch (type) { 139 case MAP__FUNCTION: 140 return elf_sec__is_text(shdr, secstrs); 141 case MAP__VARIABLE: 142 return elf_sec__is_data(shdr, secstrs); 143 default: 144 return false; 145 } 146 } 147 148 static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr) 149 { 150 Elf_Scn *sec = NULL; 151 GElf_Shdr shdr; 152 size_t cnt = 1; 153 154 while ((sec = elf_nextscn(elf, sec)) != NULL) { 155 gelf_getshdr(sec, &shdr); 156 157 if ((addr >= shdr.sh_addr) && 158 (addr < (shdr.sh_addr + shdr.sh_size))) 159 return cnt; 160 161 ++cnt; 162 } 163 164 return -1; 165 } 166 167 Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep, 168 GElf_Shdr *shp, const char *name, size_t *idx) 169 { 170 Elf_Scn *sec = NULL; 171 size_t cnt = 1; 172 173 /* Elf is corrupted/truncated, avoid calling elf_strptr. */ 174 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) 175 return NULL; 176 177 while ((sec = elf_nextscn(elf, sec)) != NULL) { 178 char *str; 179 180 gelf_getshdr(sec, shp); 181 str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name); 182 if (str && !strcmp(name, str)) { 183 if (idx) 184 *idx = cnt; 185 return sec; 186 } 187 ++cnt; 188 } 189 190 return NULL; 191 } 192 193 #define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \ 194 for (idx = 0, pos = gelf_getrel(reldata, 0, &pos_mem); \ 195 idx < nr_entries; \ 196 ++idx, pos = gelf_getrel(reldata, idx, &pos_mem)) 197 198 #define elf_section__for_each_rela(reldata, pos, pos_mem, idx, nr_entries) \ 199 for (idx = 0, pos = gelf_getrela(reldata, 0, &pos_mem); \ 200 idx < nr_entries; \ 201 ++idx, pos = gelf_getrela(reldata, idx, &pos_mem)) 202 203 /* 204 * We need to check if we have a .dynsym, so that we can handle the 205 * .plt, synthesizing its symbols, that aren't on the symtabs (be it 206 * .dynsym or .symtab). 207 * And always look at the original dso, not at debuginfo packages, that 208 * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS). 209 */ 210 int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss, struct map *map, 211 symbol_filter_t filter) 212 { 213 uint32_t nr_rel_entries, idx; 214 GElf_Sym sym; 215 u64 plt_offset; 216 GElf_Shdr shdr_plt; 217 struct symbol *f; 218 GElf_Shdr shdr_rel_plt, shdr_dynsym; 219 Elf_Data *reldata, *syms, *symstrs; 220 Elf_Scn *scn_plt_rel, *scn_symstrs, *scn_dynsym; 221 size_t dynsym_idx; 222 GElf_Ehdr ehdr; 223 char sympltname[1024]; 224 Elf *elf; 225 int nr = 0, symidx, err = 0; 226 227 if (!ss->dynsym) 228 return 0; 229 230 elf = ss->elf; 231 ehdr = ss->ehdr; 232 233 scn_dynsym = ss->dynsym; 234 shdr_dynsym = ss->dynshdr; 235 dynsym_idx = ss->dynsym_idx; 236 237 if (scn_dynsym == NULL) 238 goto out_elf_end; 239 240 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt, 241 ".rela.plt", NULL); 242 if (scn_plt_rel == NULL) { 243 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt, 244 ".rel.plt", NULL); 245 if (scn_plt_rel == NULL) 246 goto out_elf_end; 247 } 248 249 err = -1; 250 251 if (shdr_rel_plt.sh_link != dynsym_idx) 252 goto out_elf_end; 253 254 if (elf_section_by_name(elf, &ehdr, &shdr_plt, ".plt", NULL) == NULL) 255 goto out_elf_end; 256 257 /* 258 * Fetch the relocation section to find the idxes to the GOT 259 * and the symbols in the .dynsym they refer to. 260 */ 261 reldata = elf_getdata(scn_plt_rel, NULL); 262 if (reldata == NULL) 263 goto out_elf_end; 264 265 syms = elf_getdata(scn_dynsym, NULL); 266 if (syms == NULL) 267 goto out_elf_end; 268 269 scn_symstrs = elf_getscn(elf, shdr_dynsym.sh_link); 270 if (scn_symstrs == NULL) 271 goto out_elf_end; 272 273 symstrs = elf_getdata(scn_symstrs, NULL); 274 if (symstrs == NULL) 275 goto out_elf_end; 276 277 if (symstrs->d_size == 0) 278 goto out_elf_end; 279 280 nr_rel_entries = shdr_rel_plt.sh_size / shdr_rel_plt.sh_entsize; 281 plt_offset = shdr_plt.sh_offset; 282 283 if (shdr_rel_plt.sh_type == SHT_RELA) { 284 GElf_Rela pos_mem, *pos; 285 286 elf_section__for_each_rela(reldata, pos, pos_mem, idx, 287 nr_rel_entries) { 288 symidx = GELF_R_SYM(pos->r_info); 289 plt_offset += shdr_plt.sh_entsize; 290 gelf_getsym(syms, symidx, &sym); 291 snprintf(sympltname, sizeof(sympltname), 292 "%s@plt", elf_sym__name(&sym, symstrs)); 293 294 f = symbol__new(plt_offset, shdr_plt.sh_entsize, 295 STB_GLOBAL, sympltname); 296 if (!f) 297 goto out_elf_end; 298 299 if (filter && filter(map, f)) 300 symbol__delete(f); 301 else { 302 symbols__insert(&dso->symbols[map->type], f); 303 ++nr; 304 } 305 } 306 } else if (shdr_rel_plt.sh_type == SHT_REL) { 307 GElf_Rel pos_mem, *pos; 308 elf_section__for_each_rel(reldata, pos, pos_mem, idx, 309 nr_rel_entries) { 310 symidx = GELF_R_SYM(pos->r_info); 311 plt_offset += shdr_plt.sh_entsize; 312 gelf_getsym(syms, symidx, &sym); 313 snprintf(sympltname, sizeof(sympltname), 314 "%s@plt", elf_sym__name(&sym, symstrs)); 315 316 f = symbol__new(plt_offset, shdr_plt.sh_entsize, 317 STB_GLOBAL, sympltname); 318 if (!f) 319 goto out_elf_end; 320 321 if (filter && filter(map, f)) 322 symbol__delete(f); 323 else { 324 symbols__insert(&dso->symbols[map->type], f); 325 ++nr; 326 } 327 } 328 } 329 330 err = 0; 331 out_elf_end: 332 if (err == 0) 333 return nr; 334 pr_debug("%s: problems reading %s PLT info.\n", 335 __func__, dso->long_name); 336 return 0; 337 } 338 339 /* 340 * Align offset to 4 bytes as needed for note name and descriptor data. 341 */ 342 #define NOTE_ALIGN(n) (((n) + 3) & -4U) 343 344 static int elf_read_build_id(Elf *elf, void *bf, size_t size) 345 { 346 int err = -1; 347 GElf_Ehdr ehdr; 348 GElf_Shdr shdr; 349 Elf_Data *data; 350 Elf_Scn *sec; 351 Elf_Kind ek; 352 void *ptr; 353 354 if (size < BUILD_ID_SIZE) 355 goto out; 356 357 ek = elf_kind(elf); 358 if (ek != ELF_K_ELF) 359 goto out; 360 361 if (gelf_getehdr(elf, &ehdr) == NULL) { 362 pr_err("%s: cannot get elf header.\n", __func__); 363 goto out; 364 } 365 366 /* 367 * Check following sections for notes: 368 * '.note.gnu.build-id' 369 * '.notes' 370 * '.note' (VDSO specific) 371 */ 372 do { 373 sec = elf_section_by_name(elf, &ehdr, &shdr, 374 ".note.gnu.build-id", NULL); 375 if (sec) 376 break; 377 378 sec = elf_section_by_name(elf, &ehdr, &shdr, 379 ".notes", NULL); 380 if (sec) 381 break; 382 383 sec = elf_section_by_name(elf, &ehdr, &shdr, 384 ".note", NULL); 385 if (sec) 386 break; 387 388 return err; 389 390 } while (0); 391 392 data = elf_getdata(sec, NULL); 393 if (data == NULL) 394 goto out; 395 396 ptr = data->d_buf; 397 while (ptr < (data->d_buf + data->d_size)) { 398 GElf_Nhdr *nhdr = ptr; 399 size_t namesz = NOTE_ALIGN(nhdr->n_namesz), 400 descsz = NOTE_ALIGN(nhdr->n_descsz); 401 const char *name; 402 403 ptr += sizeof(*nhdr); 404 name = ptr; 405 ptr += namesz; 406 if (nhdr->n_type == NT_GNU_BUILD_ID && 407 nhdr->n_namesz == sizeof("GNU")) { 408 if (memcmp(name, "GNU", sizeof("GNU")) == 0) { 409 size_t sz = min(size, descsz); 410 memcpy(bf, ptr, sz); 411 memset(bf + sz, 0, size - sz); 412 err = descsz; 413 break; 414 } 415 } 416 ptr += descsz; 417 } 418 419 out: 420 return err; 421 } 422 423 int filename__read_build_id(const char *filename, void *bf, size_t size) 424 { 425 int fd, err = -1; 426 Elf *elf; 427 428 if (size < BUILD_ID_SIZE) 429 goto out; 430 431 fd = open(filename, O_RDONLY); 432 if (fd < 0) 433 goto out; 434 435 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); 436 if (elf == NULL) { 437 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename); 438 goto out_close; 439 } 440 441 err = elf_read_build_id(elf, bf, size); 442 443 elf_end(elf); 444 out_close: 445 close(fd); 446 out: 447 return err; 448 } 449 450 int sysfs__read_build_id(const char *filename, void *build_id, size_t size) 451 { 452 int fd, err = -1; 453 454 if (size < BUILD_ID_SIZE) 455 goto out; 456 457 fd = open(filename, O_RDONLY); 458 if (fd < 0) 459 goto out; 460 461 while (1) { 462 char bf[BUFSIZ]; 463 GElf_Nhdr nhdr; 464 size_t namesz, descsz; 465 466 if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr)) 467 break; 468 469 namesz = NOTE_ALIGN(nhdr.n_namesz); 470 descsz = NOTE_ALIGN(nhdr.n_descsz); 471 if (nhdr.n_type == NT_GNU_BUILD_ID && 472 nhdr.n_namesz == sizeof("GNU")) { 473 if (read(fd, bf, namesz) != (ssize_t)namesz) 474 break; 475 if (memcmp(bf, "GNU", sizeof("GNU")) == 0) { 476 size_t sz = min(descsz, size); 477 if (read(fd, build_id, sz) == (ssize_t)sz) { 478 memset(build_id + sz, 0, size - sz); 479 err = 0; 480 break; 481 } 482 } else if (read(fd, bf, descsz) != (ssize_t)descsz) 483 break; 484 } else { 485 int n = namesz + descsz; 486 if (read(fd, bf, n) != n) 487 break; 488 } 489 } 490 close(fd); 491 out: 492 return err; 493 } 494 495 int filename__read_debuglink(const char *filename, char *debuglink, 496 size_t size) 497 { 498 int fd, err = -1; 499 Elf *elf; 500 GElf_Ehdr ehdr; 501 GElf_Shdr shdr; 502 Elf_Data *data; 503 Elf_Scn *sec; 504 Elf_Kind ek; 505 506 fd = open(filename, O_RDONLY); 507 if (fd < 0) 508 goto out; 509 510 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); 511 if (elf == NULL) { 512 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename); 513 goto out_close; 514 } 515 516 ek = elf_kind(elf); 517 if (ek != ELF_K_ELF) 518 goto out_elf_end; 519 520 if (gelf_getehdr(elf, &ehdr) == NULL) { 521 pr_err("%s: cannot get elf header.\n", __func__); 522 goto out_elf_end; 523 } 524 525 sec = elf_section_by_name(elf, &ehdr, &shdr, 526 ".gnu_debuglink", NULL); 527 if (sec == NULL) 528 goto out_elf_end; 529 530 data = elf_getdata(sec, NULL); 531 if (data == NULL) 532 goto out_elf_end; 533 534 /* the start of this section is a zero-terminated string */ 535 strncpy(debuglink, data->d_buf, size); 536 537 err = 0; 538 539 out_elf_end: 540 elf_end(elf); 541 out_close: 542 close(fd); 543 out: 544 return err; 545 } 546 547 static int dso__swap_init(struct dso *dso, unsigned char eidata) 548 { 549 static unsigned int const endian = 1; 550 551 dso->needs_swap = DSO_SWAP__NO; 552 553 switch (eidata) { 554 case ELFDATA2LSB: 555 /* We are big endian, DSO is little endian. */ 556 if (*(unsigned char const *)&endian != 1) 557 dso->needs_swap = DSO_SWAP__YES; 558 break; 559 560 case ELFDATA2MSB: 561 /* We are little endian, DSO is big endian. */ 562 if (*(unsigned char const *)&endian != 0) 563 dso->needs_swap = DSO_SWAP__YES; 564 break; 565 566 default: 567 pr_err("unrecognized DSO data encoding %d\n", eidata); 568 return -EINVAL; 569 } 570 571 return 0; 572 } 573 574 static int decompress_kmodule(struct dso *dso, const char *name, 575 enum dso_binary_type type) 576 { 577 int fd; 578 const char *ext = strrchr(name, '.'); 579 char tmpbuf[] = "/tmp/perf-kmod-XXXXXX"; 580 581 if (type != DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP && 582 type != DSO_BINARY_TYPE__GUEST_KMODULE_COMP && 583 type != DSO_BINARY_TYPE__BUILD_ID_CACHE) 584 return -1; 585 586 if (!ext || !is_supported_compression(ext + 1)) { 587 ext = strrchr(dso->name, '.'); 588 if (!ext || !is_supported_compression(ext + 1)) 589 return -1; 590 } 591 592 fd = mkstemp(tmpbuf); 593 if (fd < 0) 594 return -1; 595 596 if (!decompress_to_file(ext + 1, name, fd)) { 597 close(fd); 598 fd = -1; 599 } 600 601 unlink(tmpbuf); 602 603 return fd; 604 } 605 606 bool symsrc__possibly_runtime(struct symsrc *ss) 607 { 608 return ss->dynsym || ss->opdsec; 609 } 610 611 bool symsrc__has_symtab(struct symsrc *ss) 612 { 613 return ss->symtab != NULL; 614 } 615 616 void symsrc__destroy(struct symsrc *ss) 617 { 618 zfree(&ss->name); 619 elf_end(ss->elf); 620 close(ss->fd); 621 } 622 623 int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name, 624 enum dso_binary_type type) 625 { 626 int err = -1; 627 GElf_Ehdr ehdr; 628 Elf *elf; 629 int fd; 630 631 if (dso__needs_decompress(dso)) 632 fd = decompress_kmodule(dso, name, type); 633 else 634 fd = open(name, O_RDONLY); 635 636 if (fd < 0) 637 return -1; 638 639 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); 640 if (elf == NULL) { 641 pr_debug("%s: cannot read %s ELF file.\n", __func__, name); 642 goto out_close; 643 } 644 645 if (gelf_getehdr(elf, &ehdr) == NULL) { 646 pr_debug("%s: cannot get elf header.\n", __func__); 647 goto out_elf_end; 648 } 649 650 if (dso__swap_init(dso, ehdr.e_ident[EI_DATA])) 651 goto out_elf_end; 652 653 /* Always reject images with a mismatched build-id: */ 654 if (dso->has_build_id) { 655 u8 build_id[BUILD_ID_SIZE]; 656 657 if (elf_read_build_id(elf, build_id, BUILD_ID_SIZE) < 0) 658 goto out_elf_end; 659 660 if (!dso__build_id_equal(dso, build_id)) 661 goto out_elf_end; 662 } 663 664 ss->is_64_bit = (gelf_getclass(elf) == ELFCLASS64); 665 666 ss->symtab = elf_section_by_name(elf, &ehdr, &ss->symshdr, ".symtab", 667 NULL); 668 if (ss->symshdr.sh_type != SHT_SYMTAB) 669 ss->symtab = NULL; 670 671 ss->dynsym_idx = 0; 672 ss->dynsym = elf_section_by_name(elf, &ehdr, &ss->dynshdr, ".dynsym", 673 &ss->dynsym_idx); 674 if (ss->dynshdr.sh_type != SHT_DYNSYM) 675 ss->dynsym = NULL; 676 677 ss->opdidx = 0; 678 ss->opdsec = elf_section_by_name(elf, &ehdr, &ss->opdshdr, ".opd", 679 &ss->opdidx); 680 if (ss->opdshdr.sh_type != SHT_PROGBITS) 681 ss->opdsec = NULL; 682 683 if (dso->kernel == DSO_TYPE_USER) { 684 GElf_Shdr shdr; 685 ss->adjust_symbols = (ehdr.e_type == ET_EXEC || 686 ehdr.e_type == ET_REL || 687 dso__is_vdso(dso) || 688 elf_section_by_name(elf, &ehdr, &shdr, 689 ".gnu.prelink_undo", 690 NULL) != NULL); 691 } else { 692 ss->adjust_symbols = ehdr.e_type == ET_EXEC || 693 ehdr.e_type == ET_REL; 694 } 695 696 ss->name = strdup(name); 697 if (!ss->name) 698 goto out_elf_end; 699 700 ss->elf = elf; 701 ss->fd = fd; 702 ss->ehdr = ehdr; 703 ss->type = type; 704 705 return 0; 706 707 out_elf_end: 708 elf_end(elf); 709 out_close: 710 close(fd); 711 return err; 712 } 713 714 /** 715 * ref_reloc_sym_not_found - has kernel relocation symbol been found. 716 * @kmap: kernel maps and relocation reference symbol 717 * 718 * This function returns %true if we are dealing with the kernel maps and the 719 * relocation reference symbol has not yet been found. Otherwise %false is 720 * returned. 721 */ 722 static bool ref_reloc_sym_not_found(struct kmap *kmap) 723 { 724 return kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name && 725 !kmap->ref_reloc_sym->unrelocated_addr; 726 } 727 728 /** 729 * ref_reloc - kernel relocation offset. 730 * @kmap: kernel maps and relocation reference symbol 731 * 732 * This function returns the offset of kernel addresses as determined by using 733 * the relocation reference symbol i.e. if the kernel has not been relocated 734 * then the return value is zero. 735 */ 736 static u64 ref_reloc(struct kmap *kmap) 737 { 738 if (kmap && kmap->ref_reloc_sym && 739 kmap->ref_reloc_sym->unrelocated_addr) 740 return kmap->ref_reloc_sym->addr - 741 kmap->ref_reloc_sym->unrelocated_addr; 742 return 0; 743 } 744 745 static bool want_demangle(bool is_kernel_sym) 746 { 747 return is_kernel_sym ? symbol_conf.demangle_kernel : symbol_conf.demangle; 748 } 749 750 int dso__load_sym(struct dso *dso, struct map *map, 751 struct symsrc *syms_ss, struct symsrc *runtime_ss, 752 symbol_filter_t filter, int kmodule) 753 { 754 struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL; 755 struct map *curr_map = map; 756 struct dso *curr_dso = dso; 757 Elf_Data *symstrs, *secstrs; 758 uint32_t nr_syms; 759 int err = -1; 760 uint32_t idx; 761 GElf_Ehdr ehdr; 762 GElf_Shdr shdr; 763 Elf_Data *syms, *opddata = NULL; 764 GElf_Sym sym; 765 Elf_Scn *sec, *sec_strndx; 766 Elf *elf; 767 int nr = 0; 768 bool remap_kernel = false, adjust_kernel_syms = false; 769 770 dso->symtab_type = syms_ss->type; 771 dso->is_64_bit = syms_ss->is_64_bit; 772 dso->rel = syms_ss->ehdr.e_type == ET_REL; 773 774 /* 775 * Modules may already have symbols from kallsyms, but those symbols 776 * have the wrong values for the dso maps, so remove them. 777 */ 778 if (kmodule && syms_ss->symtab) 779 symbols__delete(&dso->symbols[map->type]); 780 781 if (!syms_ss->symtab) { 782 /* 783 * If the vmlinux is stripped, fail so we will fall back 784 * to using kallsyms. The vmlinux runtime symbols aren't 785 * of much use. 786 */ 787 if (dso->kernel) 788 goto out_elf_end; 789 790 syms_ss->symtab = syms_ss->dynsym; 791 syms_ss->symshdr = syms_ss->dynshdr; 792 } 793 794 elf = syms_ss->elf; 795 ehdr = syms_ss->ehdr; 796 sec = syms_ss->symtab; 797 shdr = syms_ss->symshdr; 798 799 if (runtime_ss->opdsec) 800 opddata = elf_rawdata(runtime_ss->opdsec, NULL); 801 802 syms = elf_getdata(sec, NULL); 803 if (syms == NULL) 804 goto out_elf_end; 805 806 sec = elf_getscn(elf, shdr.sh_link); 807 if (sec == NULL) 808 goto out_elf_end; 809 810 symstrs = elf_getdata(sec, NULL); 811 if (symstrs == NULL) 812 goto out_elf_end; 813 814 sec_strndx = elf_getscn(runtime_ss->elf, runtime_ss->ehdr.e_shstrndx); 815 if (sec_strndx == NULL) 816 goto out_elf_end; 817 818 secstrs = elf_getdata(sec_strndx, NULL); 819 if (secstrs == NULL) 820 goto out_elf_end; 821 822 nr_syms = shdr.sh_size / shdr.sh_entsize; 823 824 memset(&sym, 0, sizeof(sym)); 825 826 /* 827 * The kernel relocation symbol is needed in advance in order to adjust 828 * kernel maps correctly. 829 */ 830 if (ref_reloc_sym_not_found(kmap)) { 831 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) { 832 const char *elf_name = elf_sym__name(&sym, symstrs); 833 834 if (strcmp(elf_name, kmap->ref_reloc_sym->name)) 835 continue; 836 kmap->ref_reloc_sym->unrelocated_addr = sym.st_value; 837 map->reloc = kmap->ref_reloc_sym->addr - 838 kmap->ref_reloc_sym->unrelocated_addr; 839 break; 840 } 841 } 842 843 dso->adjust_symbols = runtime_ss->adjust_symbols || ref_reloc(kmap); 844 /* 845 * Initial kernel and module mappings do not map to the dso. For 846 * function mappings, flag the fixups. 847 */ 848 if (map->type == MAP__FUNCTION && (dso->kernel || kmodule)) { 849 remap_kernel = true; 850 adjust_kernel_syms = dso->adjust_symbols; 851 } 852 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) { 853 struct symbol *f; 854 const char *elf_name = elf_sym__name(&sym, symstrs); 855 char *demangled = NULL; 856 int is_label = elf_sym__is_label(&sym); 857 const char *section_name; 858 bool used_opd = false; 859 860 if (!is_label && !elf_sym__is_a(&sym, map->type)) 861 continue; 862 863 /* Reject ARM ELF "mapping symbols": these aren't unique and 864 * don't identify functions, so will confuse the profile 865 * output: */ 866 if (ehdr.e_machine == EM_ARM || ehdr.e_machine == EM_AARCH64) { 867 if (elf_name[0] == '$' && strchr("adtx", elf_name[1]) 868 && (elf_name[2] == '\0' || elf_name[2] == '.')) 869 continue; 870 } 871 872 if (runtime_ss->opdsec && sym.st_shndx == runtime_ss->opdidx) { 873 u32 offset = sym.st_value - syms_ss->opdshdr.sh_addr; 874 u64 *opd = opddata->d_buf + offset; 875 sym.st_value = DSO__SWAP(dso, u64, *opd); 876 sym.st_shndx = elf_addr_to_index(runtime_ss->elf, 877 sym.st_value); 878 used_opd = true; 879 } 880 /* 881 * When loading symbols in a data mapping, ABS symbols (which 882 * has a value of SHN_ABS in its st_shndx) failed at 883 * elf_getscn(). And it marks the loading as a failure so 884 * already loaded symbols cannot be fixed up. 885 * 886 * I'm not sure what should be done. Just ignore them for now. 887 * - Namhyung Kim 888 */ 889 if (sym.st_shndx == SHN_ABS) 890 continue; 891 892 sec = elf_getscn(runtime_ss->elf, sym.st_shndx); 893 if (!sec) 894 goto out_elf_end; 895 896 gelf_getshdr(sec, &shdr); 897 898 if (is_label && !elf_sec__is_a(&shdr, secstrs, map->type)) 899 continue; 900 901 section_name = elf_sec__name(&shdr, secstrs); 902 903 /* On ARM, symbols for thumb functions have 1 added to 904 * the symbol address as a flag - remove it */ 905 if ((ehdr.e_machine == EM_ARM) && 906 (map->type == MAP__FUNCTION) && 907 (sym.st_value & 1)) 908 --sym.st_value; 909 910 if (dso->kernel || kmodule) { 911 char dso_name[PATH_MAX]; 912 913 /* Adjust symbol to map to file offset */ 914 if (adjust_kernel_syms) 915 sym.st_value -= shdr.sh_addr - shdr.sh_offset; 916 917 if (strcmp(section_name, 918 (curr_dso->short_name + 919 dso->short_name_len)) == 0) 920 goto new_symbol; 921 922 if (strcmp(section_name, ".text") == 0) { 923 /* 924 * The initial kernel mapping is based on 925 * kallsyms and identity maps. Overwrite it to 926 * map to the kernel dso. 927 */ 928 if (remap_kernel && dso->kernel) { 929 remap_kernel = false; 930 map->start = shdr.sh_addr + 931 ref_reloc(kmap); 932 map->end = map->start + shdr.sh_size; 933 map->pgoff = shdr.sh_offset; 934 map->map_ip = map__map_ip; 935 map->unmap_ip = map__unmap_ip; 936 /* Ensure maps are correctly ordered */ 937 map_groups__remove(kmap->kmaps, map); 938 map_groups__insert(kmap->kmaps, map); 939 } 940 941 /* 942 * The initial module mapping is based on 943 * /proc/modules mapped to offset zero. 944 * Overwrite it to map to the module dso. 945 */ 946 if (remap_kernel && kmodule) { 947 remap_kernel = false; 948 map->pgoff = shdr.sh_offset; 949 } 950 951 curr_map = map; 952 curr_dso = dso; 953 goto new_symbol; 954 } 955 956 if (!kmap) 957 goto new_symbol; 958 959 snprintf(dso_name, sizeof(dso_name), 960 "%s%s", dso->short_name, section_name); 961 962 curr_map = map_groups__find_by_name(kmap->kmaps, map->type, dso_name); 963 if (curr_map == NULL) { 964 u64 start = sym.st_value; 965 966 if (kmodule) 967 start += map->start + shdr.sh_offset; 968 969 curr_dso = dso__new(dso_name); 970 if (curr_dso == NULL) 971 goto out_elf_end; 972 curr_dso->kernel = dso->kernel; 973 curr_dso->long_name = dso->long_name; 974 curr_dso->long_name_len = dso->long_name_len; 975 curr_map = map__new2(start, curr_dso, 976 map->type); 977 if (curr_map == NULL) { 978 dso__delete(curr_dso); 979 goto out_elf_end; 980 } 981 if (adjust_kernel_syms) { 982 curr_map->start = shdr.sh_addr + 983 ref_reloc(kmap); 984 curr_map->end = curr_map->start + 985 shdr.sh_size; 986 curr_map->pgoff = shdr.sh_offset; 987 } else { 988 curr_map->map_ip = identity__map_ip; 989 curr_map->unmap_ip = identity__map_ip; 990 } 991 curr_dso->symtab_type = dso->symtab_type; 992 map_groups__insert(kmap->kmaps, curr_map); 993 /* 994 * The new DSO should go to the kernel DSOS 995 */ 996 dsos__add(&map->groups->machine->kernel_dsos, 997 curr_dso); 998 dso__set_loaded(curr_dso, map->type); 999 } else 1000 curr_dso = curr_map->dso; 1001 1002 goto new_symbol; 1003 } 1004 1005 if ((used_opd && runtime_ss->adjust_symbols) 1006 || (!used_opd && syms_ss->adjust_symbols)) { 1007 pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " " 1008 "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n", __func__, 1009 (u64)sym.st_value, (u64)shdr.sh_addr, 1010 (u64)shdr.sh_offset); 1011 sym.st_value -= shdr.sh_addr - shdr.sh_offset; 1012 } 1013 new_symbol: 1014 /* 1015 * We need to figure out if the object was created from C++ sources 1016 * DWARF DW_compile_unit has this, but we don't always have access 1017 * to it... 1018 */ 1019 if (want_demangle(dso->kernel || kmodule)) { 1020 int demangle_flags = DMGL_NO_OPTS; 1021 if (verbose) 1022 demangle_flags = DMGL_PARAMS | DMGL_ANSI; 1023 1024 demangled = bfd_demangle(NULL, elf_name, demangle_flags); 1025 if (demangled != NULL) 1026 elf_name = demangled; 1027 } 1028 f = symbol__new(sym.st_value, sym.st_size, 1029 GELF_ST_BIND(sym.st_info), elf_name); 1030 free(demangled); 1031 if (!f) 1032 goto out_elf_end; 1033 1034 if (filter && filter(curr_map, f)) 1035 symbol__delete(f); 1036 else { 1037 symbols__insert(&curr_dso->symbols[curr_map->type], f); 1038 nr++; 1039 } 1040 } 1041 1042 /* 1043 * For misannotated, zeroed, ASM function sizes. 1044 */ 1045 if (nr > 0) { 1046 symbols__fixup_duplicate(&dso->symbols[map->type]); 1047 symbols__fixup_end(&dso->symbols[map->type]); 1048 if (kmap) { 1049 /* 1050 * We need to fixup this here too because we create new 1051 * maps here, for things like vsyscall sections. 1052 */ 1053 __map_groups__fixup_end(kmap->kmaps, map->type); 1054 } 1055 } 1056 err = nr; 1057 out_elf_end: 1058 return err; 1059 } 1060 1061 static int elf_read_maps(Elf *elf, bool exe, mapfn_t mapfn, void *data) 1062 { 1063 GElf_Phdr phdr; 1064 size_t i, phdrnum; 1065 int err; 1066 u64 sz; 1067 1068 if (elf_getphdrnum(elf, &phdrnum)) 1069 return -1; 1070 1071 for (i = 0; i < phdrnum; i++) { 1072 if (gelf_getphdr(elf, i, &phdr) == NULL) 1073 return -1; 1074 if (phdr.p_type != PT_LOAD) 1075 continue; 1076 if (exe) { 1077 if (!(phdr.p_flags & PF_X)) 1078 continue; 1079 } else { 1080 if (!(phdr.p_flags & PF_R)) 1081 continue; 1082 } 1083 sz = min(phdr.p_memsz, phdr.p_filesz); 1084 if (!sz) 1085 continue; 1086 err = mapfn(phdr.p_vaddr, sz, phdr.p_offset, data); 1087 if (err) 1088 return err; 1089 } 1090 return 0; 1091 } 1092 1093 int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data, 1094 bool *is_64_bit) 1095 { 1096 int err; 1097 Elf *elf; 1098 1099 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); 1100 if (elf == NULL) 1101 return -1; 1102 1103 if (is_64_bit) 1104 *is_64_bit = (gelf_getclass(elf) == ELFCLASS64); 1105 1106 err = elf_read_maps(elf, exe, mapfn, data); 1107 1108 elf_end(elf); 1109 return err; 1110 } 1111 1112 enum dso_type dso__type_fd(int fd) 1113 { 1114 enum dso_type dso_type = DSO__TYPE_UNKNOWN; 1115 GElf_Ehdr ehdr; 1116 Elf_Kind ek; 1117 Elf *elf; 1118 1119 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); 1120 if (elf == NULL) 1121 goto out; 1122 1123 ek = elf_kind(elf); 1124 if (ek != ELF_K_ELF) 1125 goto out_end; 1126 1127 if (gelf_getclass(elf) == ELFCLASS64) { 1128 dso_type = DSO__TYPE_64BIT; 1129 goto out_end; 1130 } 1131 1132 if (gelf_getehdr(elf, &ehdr) == NULL) 1133 goto out_end; 1134 1135 if (ehdr.e_machine == EM_X86_64) 1136 dso_type = DSO__TYPE_X32BIT; 1137 else 1138 dso_type = DSO__TYPE_32BIT; 1139 out_end: 1140 elf_end(elf); 1141 out: 1142 return dso_type; 1143 } 1144 1145 static int copy_bytes(int from, off_t from_offs, int to, off_t to_offs, u64 len) 1146 { 1147 ssize_t r; 1148 size_t n; 1149 int err = -1; 1150 char *buf = malloc(page_size); 1151 1152 if (buf == NULL) 1153 return -1; 1154 1155 if (lseek(to, to_offs, SEEK_SET) != to_offs) 1156 goto out; 1157 1158 if (lseek(from, from_offs, SEEK_SET) != from_offs) 1159 goto out; 1160 1161 while (len) { 1162 n = page_size; 1163 if (len < n) 1164 n = len; 1165 /* Use read because mmap won't work on proc files */ 1166 r = read(from, buf, n); 1167 if (r < 0) 1168 goto out; 1169 if (!r) 1170 break; 1171 n = r; 1172 r = write(to, buf, n); 1173 if (r < 0) 1174 goto out; 1175 if ((size_t)r != n) 1176 goto out; 1177 len -= n; 1178 } 1179 1180 err = 0; 1181 out: 1182 free(buf); 1183 return err; 1184 } 1185 1186 struct kcore { 1187 int fd; 1188 int elfclass; 1189 Elf *elf; 1190 GElf_Ehdr ehdr; 1191 }; 1192 1193 static int kcore__open(struct kcore *kcore, const char *filename) 1194 { 1195 GElf_Ehdr *ehdr; 1196 1197 kcore->fd = open(filename, O_RDONLY); 1198 if (kcore->fd == -1) 1199 return -1; 1200 1201 kcore->elf = elf_begin(kcore->fd, ELF_C_READ, NULL); 1202 if (!kcore->elf) 1203 goto out_close; 1204 1205 kcore->elfclass = gelf_getclass(kcore->elf); 1206 if (kcore->elfclass == ELFCLASSNONE) 1207 goto out_end; 1208 1209 ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr); 1210 if (!ehdr) 1211 goto out_end; 1212 1213 return 0; 1214 1215 out_end: 1216 elf_end(kcore->elf); 1217 out_close: 1218 close(kcore->fd); 1219 return -1; 1220 } 1221 1222 static int kcore__init(struct kcore *kcore, char *filename, int elfclass, 1223 bool temp) 1224 { 1225 GElf_Ehdr *ehdr; 1226 1227 kcore->elfclass = elfclass; 1228 1229 if (temp) 1230 kcore->fd = mkstemp(filename); 1231 else 1232 kcore->fd = open(filename, O_WRONLY | O_CREAT | O_EXCL, 0400); 1233 if (kcore->fd == -1) 1234 return -1; 1235 1236 kcore->elf = elf_begin(kcore->fd, ELF_C_WRITE, NULL); 1237 if (!kcore->elf) 1238 goto out_close; 1239 1240 if (!gelf_newehdr(kcore->elf, elfclass)) 1241 goto out_end; 1242 1243 ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr); 1244 if (!ehdr) 1245 goto out_end; 1246 1247 return 0; 1248 1249 out_end: 1250 elf_end(kcore->elf); 1251 out_close: 1252 close(kcore->fd); 1253 unlink(filename); 1254 return -1; 1255 } 1256 1257 static void kcore__close(struct kcore *kcore) 1258 { 1259 elf_end(kcore->elf); 1260 close(kcore->fd); 1261 } 1262 1263 static int kcore__copy_hdr(struct kcore *from, struct kcore *to, size_t count) 1264 { 1265 GElf_Ehdr *ehdr = &to->ehdr; 1266 GElf_Ehdr *kehdr = &from->ehdr; 1267 1268 memcpy(ehdr->e_ident, kehdr->e_ident, EI_NIDENT); 1269 ehdr->e_type = kehdr->e_type; 1270 ehdr->e_machine = kehdr->e_machine; 1271 ehdr->e_version = kehdr->e_version; 1272 ehdr->e_entry = 0; 1273 ehdr->e_shoff = 0; 1274 ehdr->e_flags = kehdr->e_flags; 1275 ehdr->e_phnum = count; 1276 ehdr->e_shentsize = 0; 1277 ehdr->e_shnum = 0; 1278 ehdr->e_shstrndx = 0; 1279 1280 if (from->elfclass == ELFCLASS32) { 1281 ehdr->e_phoff = sizeof(Elf32_Ehdr); 1282 ehdr->e_ehsize = sizeof(Elf32_Ehdr); 1283 ehdr->e_phentsize = sizeof(Elf32_Phdr); 1284 } else { 1285 ehdr->e_phoff = sizeof(Elf64_Ehdr); 1286 ehdr->e_ehsize = sizeof(Elf64_Ehdr); 1287 ehdr->e_phentsize = sizeof(Elf64_Phdr); 1288 } 1289 1290 if (!gelf_update_ehdr(to->elf, ehdr)) 1291 return -1; 1292 1293 if (!gelf_newphdr(to->elf, count)) 1294 return -1; 1295 1296 return 0; 1297 } 1298 1299 static int kcore__add_phdr(struct kcore *kcore, int idx, off_t offset, 1300 u64 addr, u64 len) 1301 { 1302 GElf_Phdr gphdr; 1303 GElf_Phdr *phdr; 1304 1305 phdr = gelf_getphdr(kcore->elf, idx, &gphdr); 1306 if (!phdr) 1307 return -1; 1308 1309 phdr->p_type = PT_LOAD; 1310 phdr->p_flags = PF_R | PF_W | PF_X; 1311 phdr->p_offset = offset; 1312 phdr->p_vaddr = addr; 1313 phdr->p_paddr = 0; 1314 phdr->p_filesz = len; 1315 phdr->p_memsz = len; 1316 phdr->p_align = page_size; 1317 1318 if (!gelf_update_phdr(kcore->elf, idx, phdr)) 1319 return -1; 1320 1321 return 0; 1322 } 1323 1324 static off_t kcore__write(struct kcore *kcore) 1325 { 1326 return elf_update(kcore->elf, ELF_C_WRITE); 1327 } 1328 1329 struct phdr_data { 1330 off_t offset; 1331 u64 addr; 1332 u64 len; 1333 }; 1334 1335 struct kcore_copy_info { 1336 u64 stext; 1337 u64 etext; 1338 u64 first_symbol; 1339 u64 last_symbol; 1340 u64 first_module; 1341 u64 last_module_symbol; 1342 struct phdr_data kernel_map; 1343 struct phdr_data modules_map; 1344 }; 1345 1346 static int kcore_copy__process_kallsyms(void *arg, const char *name, char type, 1347 u64 start) 1348 { 1349 struct kcore_copy_info *kci = arg; 1350 1351 if (!symbol_type__is_a(type, MAP__FUNCTION)) 1352 return 0; 1353 1354 if (strchr(name, '[')) { 1355 if (start > kci->last_module_symbol) 1356 kci->last_module_symbol = start; 1357 return 0; 1358 } 1359 1360 if (!kci->first_symbol || start < kci->first_symbol) 1361 kci->first_symbol = start; 1362 1363 if (!kci->last_symbol || start > kci->last_symbol) 1364 kci->last_symbol = start; 1365 1366 if (!strcmp(name, "_stext")) { 1367 kci->stext = start; 1368 return 0; 1369 } 1370 1371 if (!strcmp(name, "_etext")) { 1372 kci->etext = start; 1373 return 0; 1374 } 1375 1376 return 0; 1377 } 1378 1379 static int kcore_copy__parse_kallsyms(struct kcore_copy_info *kci, 1380 const char *dir) 1381 { 1382 char kallsyms_filename[PATH_MAX]; 1383 1384 scnprintf(kallsyms_filename, PATH_MAX, "%s/kallsyms", dir); 1385 1386 if (symbol__restricted_filename(kallsyms_filename, "/proc/kallsyms")) 1387 return -1; 1388 1389 if (kallsyms__parse(kallsyms_filename, kci, 1390 kcore_copy__process_kallsyms) < 0) 1391 return -1; 1392 1393 return 0; 1394 } 1395 1396 static int kcore_copy__process_modules(void *arg, 1397 const char *name __maybe_unused, 1398 u64 start) 1399 { 1400 struct kcore_copy_info *kci = arg; 1401 1402 if (!kci->first_module || start < kci->first_module) 1403 kci->first_module = start; 1404 1405 return 0; 1406 } 1407 1408 static int kcore_copy__parse_modules(struct kcore_copy_info *kci, 1409 const char *dir) 1410 { 1411 char modules_filename[PATH_MAX]; 1412 1413 scnprintf(modules_filename, PATH_MAX, "%s/modules", dir); 1414 1415 if (symbol__restricted_filename(modules_filename, "/proc/modules")) 1416 return -1; 1417 1418 if (modules__parse(modules_filename, kci, 1419 kcore_copy__process_modules) < 0) 1420 return -1; 1421 1422 return 0; 1423 } 1424 1425 static void kcore_copy__map(struct phdr_data *p, u64 start, u64 end, u64 pgoff, 1426 u64 s, u64 e) 1427 { 1428 if (p->addr || s < start || s >= end) 1429 return; 1430 1431 p->addr = s; 1432 p->offset = (s - start) + pgoff; 1433 p->len = e < end ? e - s : end - s; 1434 } 1435 1436 static int kcore_copy__read_map(u64 start, u64 len, u64 pgoff, void *data) 1437 { 1438 struct kcore_copy_info *kci = data; 1439 u64 end = start + len; 1440 1441 kcore_copy__map(&kci->kernel_map, start, end, pgoff, kci->stext, 1442 kci->etext); 1443 1444 kcore_copy__map(&kci->modules_map, start, end, pgoff, kci->first_module, 1445 kci->last_module_symbol); 1446 1447 return 0; 1448 } 1449 1450 static int kcore_copy__read_maps(struct kcore_copy_info *kci, Elf *elf) 1451 { 1452 if (elf_read_maps(elf, true, kcore_copy__read_map, kci) < 0) 1453 return -1; 1454 1455 return 0; 1456 } 1457 1458 static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir, 1459 Elf *elf) 1460 { 1461 if (kcore_copy__parse_kallsyms(kci, dir)) 1462 return -1; 1463 1464 if (kcore_copy__parse_modules(kci, dir)) 1465 return -1; 1466 1467 if (kci->stext) 1468 kci->stext = round_down(kci->stext, page_size); 1469 else 1470 kci->stext = round_down(kci->first_symbol, page_size); 1471 1472 if (kci->etext) { 1473 kci->etext = round_up(kci->etext, page_size); 1474 } else if (kci->last_symbol) { 1475 kci->etext = round_up(kci->last_symbol, page_size); 1476 kci->etext += page_size; 1477 } 1478 1479 kci->first_module = round_down(kci->first_module, page_size); 1480 1481 if (kci->last_module_symbol) { 1482 kci->last_module_symbol = round_up(kci->last_module_symbol, 1483 page_size); 1484 kci->last_module_symbol += page_size; 1485 } 1486 1487 if (!kci->stext || !kci->etext) 1488 return -1; 1489 1490 if (kci->first_module && !kci->last_module_symbol) 1491 return -1; 1492 1493 return kcore_copy__read_maps(kci, elf); 1494 } 1495 1496 static int kcore_copy__copy_file(const char *from_dir, const char *to_dir, 1497 const char *name) 1498 { 1499 char from_filename[PATH_MAX]; 1500 char to_filename[PATH_MAX]; 1501 1502 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name); 1503 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name); 1504 1505 return copyfile_mode(from_filename, to_filename, 0400); 1506 } 1507 1508 static int kcore_copy__unlink(const char *dir, const char *name) 1509 { 1510 char filename[PATH_MAX]; 1511 1512 scnprintf(filename, PATH_MAX, "%s/%s", dir, name); 1513 1514 return unlink(filename); 1515 } 1516 1517 static int kcore_copy__compare_fds(int from, int to) 1518 { 1519 char *buf_from; 1520 char *buf_to; 1521 ssize_t ret; 1522 size_t len; 1523 int err = -1; 1524 1525 buf_from = malloc(page_size); 1526 buf_to = malloc(page_size); 1527 if (!buf_from || !buf_to) 1528 goto out; 1529 1530 while (1) { 1531 /* Use read because mmap won't work on proc files */ 1532 ret = read(from, buf_from, page_size); 1533 if (ret < 0) 1534 goto out; 1535 1536 if (!ret) 1537 break; 1538 1539 len = ret; 1540 1541 if (readn(to, buf_to, len) != (int)len) 1542 goto out; 1543 1544 if (memcmp(buf_from, buf_to, len)) 1545 goto out; 1546 } 1547 1548 err = 0; 1549 out: 1550 free(buf_to); 1551 free(buf_from); 1552 return err; 1553 } 1554 1555 static int kcore_copy__compare_files(const char *from_filename, 1556 const char *to_filename) 1557 { 1558 int from, to, err = -1; 1559 1560 from = open(from_filename, O_RDONLY); 1561 if (from < 0) 1562 return -1; 1563 1564 to = open(to_filename, O_RDONLY); 1565 if (to < 0) 1566 goto out_close_from; 1567 1568 err = kcore_copy__compare_fds(from, to); 1569 1570 close(to); 1571 out_close_from: 1572 close(from); 1573 return err; 1574 } 1575 1576 static int kcore_copy__compare_file(const char *from_dir, const char *to_dir, 1577 const char *name) 1578 { 1579 char from_filename[PATH_MAX]; 1580 char to_filename[PATH_MAX]; 1581 1582 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name); 1583 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name); 1584 1585 return kcore_copy__compare_files(from_filename, to_filename); 1586 } 1587 1588 /** 1589 * kcore_copy - copy kallsyms, modules and kcore from one directory to another. 1590 * @from_dir: from directory 1591 * @to_dir: to directory 1592 * 1593 * This function copies kallsyms, modules and kcore files from one directory to 1594 * another. kallsyms and modules are copied entirely. Only code segments are 1595 * copied from kcore. It is assumed that two segments suffice: one for the 1596 * kernel proper and one for all the modules. The code segments are determined 1597 * from kallsyms and modules files. The kernel map starts at _stext or the 1598 * lowest function symbol, and ends at _etext or the highest function symbol. 1599 * The module map starts at the lowest module address and ends at the highest 1600 * module symbol. Start addresses are rounded down to the nearest page. End 1601 * addresses are rounded up to the nearest page. An extra page is added to the 1602 * highest kernel symbol and highest module symbol to, hopefully, encompass that 1603 * symbol too. Because it contains only code sections, the resulting kcore is 1604 * unusual. One significant peculiarity is that the mapping (start -> pgoff) 1605 * is not the same for the kernel map and the modules map. That happens because 1606 * the data is copied adjacently whereas the original kcore has gaps. Finally, 1607 * kallsyms and modules files are compared with their copies to check that 1608 * modules have not been loaded or unloaded while the copies were taking place. 1609 * 1610 * Return: %0 on success, %-1 on failure. 1611 */ 1612 int kcore_copy(const char *from_dir, const char *to_dir) 1613 { 1614 struct kcore kcore; 1615 struct kcore extract; 1616 size_t count = 2; 1617 int idx = 0, err = -1; 1618 off_t offset = page_size, sz, modules_offset = 0; 1619 struct kcore_copy_info kci = { .stext = 0, }; 1620 char kcore_filename[PATH_MAX]; 1621 char extract_filename[PATH_MAX]; 1622 1623 if (kcore_copy__copy_file(from_dir, to_dir, "kallsyms")) 1624 return -1; 1625 1626 if (kcore_copy__copy_file(from_dir, to_dir, "modules")) 1627 goto out_unlink_kallsyms; 1628 1629 scnprintf(kcore_filename, PATH_MAX, "%s/kcore", from_dir); 1630 scnprintf(extract_filename, PATH_MAX, "%s/kcore", to_dir); 1631 1632 if (kcore__open(&kcore, kcore_filename)) 1633 goto out_unlink_modules; 1634 1635 if (kcore_copy__calc_maps(&kci, from_dir, kcore.elf)) 1636 goto out_kcore_close; 1637 1638 if (kcore__init(&extract, extract_filename, kcore.elfclass, false)) 1639 goto out_kcore_close; 1640 1641 if (!kci.modules_map.addr) 1642 count -= 1; 1643 1644 if (kcore__copy_hdr(&kcore, &extract, count)) 1645 goto out_extract_close; 1646 1647 if (kcore__add_phdr(&extract, idx++, offset, kci.kernel_map.addr, 1648 kci.kernel_map.len)) 1649 goto out_extract_close; 1650 1651 if (kci.modules_map.addr) { 1652 modules_offset = offset + kci.kernel_map.len; 1653 if (kcore__add_phdr(&extract, idx, modules_offset, 1654 kci.modules_map.addr, kci.modules_map.len)) 1655 goto out_extract_close; 1656 } 1657 1658 sz = kcore__write(&extract); 1659 if (sz < 0 || sz > offset) 1660 goto out_extract_close; 1661 1662 if (copy_bytes(kcore.fd, kci.kernel_map.offset, extract.fd, offset, 1663 kci.kernel_map.len)) 1664 goto out_extract_close; 1665 1666 if (modules_offset && copy_bytes(kcore.fd, kci.modules_map.offset, 1667 extract.fd, modules_offset, 1668 kci.modules_map.len)) 1669 goto out_extract_close; 1670 1671 if (kcore_copy__compare_file(from_dir, to_dir, "modules")) 1672 goto out_extract_close; 1673 1674 if (kcore_copy__compare_file(from_dir, to_dir, "kallsyms")) 1675 goto out_extract_close; 1676 1677 err = 0; 1678 1679 out_extract_close: 1680 kcore__close(&extract); 1681 if (err) 1682 unlink(extract_filename); 1683 out_kcore_close: 1684 kcore__close(&kcore); 1685 out_unlink_modules: 1686 if (err) 1687 kcore_copy__unlink(to_dir, "modules"); 1688 out_unlink_kallsyms: 1689 if (err) 1690 kcore_copy__unlink(to_dir, "kallsyms"); 1691 1692 return err; 1693 } 1694 1695 int kcore_extract__create(struct kcore_extract *kce) 1696 { 1697 struct kcore kcore; 1698 struct kcore extract; 1699 size_t count = 1; 1700 int idx = 0, err = -1; 1701 off_t offset = page_size, sz; 1702 1703 if (kcore__open(&kcore, kce->kcore_filename)) 1704 return -1; 1705 1706 strcpy(kce->extract_filename, PERF_KCORE_EXTRACT); 1707 if (kcore__init(&extract, kce->extract_filename, kcore.elfclass, true)) 1708 goto out_kcore_close; 1709 1710 if (kcore__copy_hdr(&kcore, &extract, count)) 1711 goto out_extract_close; 1712 1713 if (kcore__add_phdr(&extract, idx, offset, kce->addr, kce->len)) 1714 goto out_extract_close; 1715 1716 sz = kcore__write(&extract); 1717 if (sz < 0 || sz > offset) 1718 goto out_extract_close; 1719 1720 if (copy_bytes(kcore.fd, kce->offs, extract.fd, offset, kce->len)) 1721 goto out_extract_close; 1722 1723 err = 0; 1724 1725 out_extract_close: 1726 kcore__close(&extract); 1727 if (err) 1728 unlink(kce->extract_filename); 1729 out_kcore_close: 1730 kcore__close(&kcore); 1731 1732 return err; 1733 } 1734 1735 void kcore_extract__delete(struct kcore_extract *kce) 1736 { 1737 unlink(kce->extract_filename); 1738 } 1739 1740 void symbol__elf_init(void) 1741 { 1742 elf_version(EV_CURRENT); 1743 } 1744