1 // SPDX-License-Identifier: GPL-2.0 2 #include <fcntl.h> 3 #include <stdio.h> 4 #include <errno.h> 5 #include <stdlib.h> 6 #include <string.h> 7 #include <unistd.h> 8 #include <inttypes.h> 9 10 #include "dso.h" 11 #include "map.h" 12 #include "maps.h" 13 #include "symbol.h" 14 #include "symsrc.h" 15 #include "demangle-java.h" 16 #include "demangle-rust.h" 17 #include "machine.h" 18 #include "vdso.h" 19 #include "debug.h" 20 #include "util/copyfile.h" 21 #include <linux/ctype.h> 22 #include <linux/kernel.h> 23 #include <linux/zalloc.h> 24 #include <symbol/kallsyms.h> 25 #include <internal/lib.h> 26 27 #ifndef EM_AARCH64 28 #define EM_AARCH64 183 /* ARM 64 bit */ 29 #endif 30 31 #ifndef ELF32_ST_VISIBILITY 32 #define ELF32_ST_VISIBILITY(o) ((o) & 0x03) 33 #endif 34 35 /* For ELF64 the definitions are the same. */ 36 #ifndef ELF64_ST_VISIBILITY 37 #define ELF64_ST_VISIBILITY(o) ELF32_ST_VISIBILITY (o) 38 #endif 39 40 /* How to extract information held in the st_other field. */ 41 #ifndef GELF_ST_VISIBILITY 42 #define GELF_ST_VISIBILITY(val) ELF64_ST_VISIBILITY (val) 43 #endif 44 45 typedef Elf64_Nhdr GElf_Nhdr; 46 47 #ifndef DMGL_PARAMS 48 #define DMGL_NO_OPTS 0 /* For readability... */ 49 #define DMGL_PARAMS (1 << 0) /* Include function args */ 50 #define DMGL_ANSI (1 << 1) /* Include const, volatile, etc */ 51 #endif 52 53 #ifdef HAVE_LIBBFD_SUPPORT 54 #define PACKAGE 'perf' 55 #include <bfd.h> 56 #else 57 #ifdef HAVE_CPLUS_DEMANGLE_SUPPORT 58 extern char *cplus_demangle(const char *, int); 59 60 static inline char *bfd_demangle(void __maybe_unused *v, const char *c, int i) 61 { 62 return cplus_demangle(c, i); 63 } 64 #else 65 #ifdef NO_DEMANGLE 66 static inline char *bfd_demangle(void __maybe_unused *v, 67 const char __maybe_unused *c, 68 int __maybe_unused i) 69 { 70 return NULL; 71 } 72 #endif 73 #endif 74 #endif 75 76 #ifndef HAVE_ELF_GETPHDRNUM_SUPPORT 77 static int elf_getphdrnum(Elf *elf, size_t *dst) 78 { 79 GElf_Ehdr gehdr; 80 GElf_Ehdr *ehdr; 81 82 ehdr = gelf_getehdr(elf, &gehdr); 83 if (!ehdr) 84 return -1; 85 86 *dst = ehdr->e_phnum; 87 88 return 0; 89 } 90 #endif 91 92 #ifndef HAVE_ELF_GETSHDRSTRNDX_SUPPORT 93 static int elf_getshdrstrndx(Elf *elf __maybe_unused, size_t *dst __maybe_unused) 94 { 95 pr_err("%s: update your libelf to > 0.140, this one lacks elf_getshdrstrndx().\n", __func__); 96 return -1; 97 } 98 #endif 99 100 #ifndef NT_GNU_BUILD_ID 101 #define NT_GNU_BUILD_ID 3 102 #endif 103 104 /** 105 * elf_symtab__for_each_symbol - iterate thru all the symbols 106 * 107 * @syms: struct elf_symtab instance to iterate 108 * @idx: uint32_t idx 109 * @sym: GElf_Sym iterator 110 */ 111 #define elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) \ 112 for (idx = 0, gelf_getsym(syms, idx, &sym);\ 113 idx < nr_syms; \ 114 idx++, gelf_getsym(syms, idx, &sym)) 115 116 static inline uint8_t elf_sym__type(const GElf_Sym *sym) 117 { 118 return GELF_ST_TYPE(sym->st_info); 119 } 120 121 static inline uint8_t elf_sym__visibility(const GElf_Sym *sym) 122 { 123 return GELF_ST_VISIBILITY(sym->st_other); 124 } 125 126 #ifndef STT_GNU_IFUNC 127 #define STT_GNU_IFUNC 10 128 #endif 129 130 static inline int elf_sym__is_function(const GElf_Sym *sym) 131 { 132 return (elf_sym__type(sym) == STT_FUNC || 133 elf_sym__type(sym) == STT_GNU_IFUNC) && 134 sym->st_name != 0 && 135 sym->st_shndx != SHN_UNDEF; 136 } 137 138 static inline bool elf_sym__is_object(const GElf_Sym *sym) 139 { 140 return elf_sym__type(sym) == STT_OBJECT && 141 sym->st_name != 0 && 142 sym->st_shndx != SHN_UNDEF; 143 } 144 145 static inline int elf_sym__is_label(const GElf_Sym *sym) 146 { 147 return elf_sym__type(sym) == STT_NOTYPE && 148 sym->st_name != 0 && 149 sym->st_shndx != SHN_UNDEF && 150 sym->st_shndx != SHN_ABS && 151 elf_sym__visibility(sym) != STV_HIDDEN && 152 elf_sym__visibility(sym) != STV_INTERNAL; 153 } 154 155 static bool elf_sym__filter(GElf_Sym *sym) 156 { 157 return elf_sym__is_function(sym) || elf_sym__is_object(sym); 158 } 159 160 static inline const char *elf_sym__name(const GElf_Sym *sym, 161 const Elf_Data *symstrs) 162 { 163 return symstrs->d_buf + sym->st_name; 164 } 165 166 static inline const char *elf_sec__name(const GElf_Shdr *shdr, 167 const Elf_Data *secstrs) 168 { 169 return secstrs->d_buf + shdr->sh_name; 170 } 171 172 static inline int elf_sec__is_text(const GElf_Shdr *shdr, 173 const Elf_Data *secstrs) 174 { 175 return strstr(elf_sec__name(shdr, secstrs), "text") != NULL; 176 } 177 178 static inline bool elf_sec__is_data(const GElf_Shdr *shdr, 179 const Elf_Data *secstrs) 180 { 181 return strstr(elf_sec__name(shdr, secstrs), "data") != NULL; 182 } 183 184 static bool elf_sec__filter(GElf_Shdr *shdr, Elf_Data *secstrs) 185 { 186 return elf_sec__is_text(shdr, secstrs) || 187 elf_sec__is_data(shdr, secstrs); 188 } 189 190 static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr) 191 { 192 Elf_Scn *sec = NULL; 193 GElf_Shdr shdr; 194 size_t cnt = 1; 195 196 while ((sec = elf_nextscn(elf, sec)) != NULL) { 197 gelf_getshdr(sec, &shdr); 198 199 if ((addr >= shdr.sh_addr) && 200 (addr < (shdr.sh_addr + shdr.sh_size))) 201 return cnt; 202 203 ++cnt; 204 } 205 206 return -1; 207 } 208 209 Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep, 210 GElf_Shdr *shp, const char *name, size_t *idx) 211 { 212 Elf_Scn *sec = NULL; 213 size_t cnt = 1; 214 215 /* Elf is corrupted/truncated, avoid calling elf_strptr. */ 216 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) 217 return NULL; 218 219 while ((sec = elf_nextscn(elf, sec)) != NULL) { 220 char *str; 221 222 gelf_getshdr(sec, shp); 223 str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name); 224 if (str && !strcmp(name, str)) { 225 if (idx) 226 *idx = cnt; 227 return sec; 228 } 229 ++cnt; 230 } 231 232 return NULL; 233 } 234 235 static bool want_demangle(bool is_kernel_sym) 236 { 237 return is_kernel_sym ? symbol_conf.demangle_kernel : symbol_conf.demangle; 238 } 239 240 static char *demangle_sym(struct dso *dso, int kmodule, const char *elf_name) 241 { 242 int demangle_flags = verbose > 0 ? (DMGL_PARAMS | DMGL_ANSI) : DMGL_NO_OPTS; 243 char *demangled = NULL; 244 245 /* 246 * We need to figure out if the object was created from C++ sources 247 * DWARF DW_compile_unit has this, but we don't always have access 248 * to it... 249 */ 250 if (!want_demangle(dso->kernel || kmodule)) 251 return demangled; 252 253 demangled = bfd_demangle(NULL, elf_name, demangle_flags); 254 if (demangled == NULL) 255 demangled = java_demangle_sym(elf_name, JAVA_DEMANGLE_NORET); 256 else if (rust_is_mangled(demangled)) 257 /* 258 * Input to Rust demangling is the BFD-demangled 259 * name which it Rust-demangles in place. 260 */ 261 rust_demangle_sym(demangled); 262 263 return demangled; 264 } 265 266 #define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \ 267 for (idx = 0, pos = gelf_getrel(reldata, 0, &pos_mem); \ 268 idx < nr_entries; \ 269 ++idx, pos = gelf_getrel(reldata, idx, &pos_mem)) 270 271 #define elf_section__for_each_rela(reldata, pos, pos_mem, idx, nr_entries) \ 272 for (idx = 0, pos = gelf_getrela(reldata, 0, &pos_mem); \ 273 idx < nr_entries; \ 274 ++idx, pos = gelf_getrela(reldata, idx, &pos_mem)) 275 276 /* 277 * We need to check if we have a .dynsym, so that we can handle the 278 * .plt, synthesizing its symbols, that aren't on the symtabs (be it 279 * .dynsym or .symtab). 280 * And always look at the original dso, not at debuginfo packages, that 281 * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS). 282 */ 283 int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss) 284 { 285 uint32_t nr_rel_entries, idx; 286 GElf_Sym sym; 287 u64 plt_offset, plt_header_size, plt_entry_size; 288 GElf_Shdr shdr_plt; 289 struct symbol *f; 290 GElf_Shdr shdr_rel_plt, shdr_dynsym; 291 Elf_Data *reldata, *syms, *symstrs; 292 Elf_Scn *scn_plt_rel, *scn_symstrs, *scn_dynsym; 293 size_t dynsym_idx; 294 GElf_Ehdr ehdr; 295 char sympltname[1024]; 296 Elf *elf; 297 int nr = 0, symidx, err = 0; 298 299 if (!ss->dynsym) 300 return 0; 301 302 elf = ss->elf; 303 ehdr = ss->ehdr; 304 305 scn_dynsym = ss->dynsym; 306 shdr_dynsym = ss->dynshdr; 307 dynsym_idx = ss->dynsym_idx; 308 309 if (scn_dynsym == NULL) 310 goto out_elf_end; 311 312 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt, 313 ".rela.plt", NULL); 314 if (scn_plt_rel == NULL) { 315 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt, 316 ".rel.plt", NULL); 317 if (scn_plt_rel == NULL) 318 goto out_elf_end; 319 } 320 321 err = -1; 322 323 if (shdr_rel_plt.sh_link != dynsym_idx) 324 goto out_elf_end; 325 326 if (elf_section_by_name(elf, &ehdr, &shdr_plt, ".plt", NULL) == NULL) 327 goto out_elf_end; 328 329 /* 330 * Fetch the relocation section to find the idxes to the GOT 331 * and the symbols in the .dynsym they refer to. 332 */ 333 reldata = elf_getdata(scn_plt_rel, NULL); 334 if (reldata == NULL) 335 goto out_elf_end; 336 337 syms = elf_getdata(scn_dynsym, NULL); 338 if (syms == NULL) 339 goto out_elf_end; 340 341 scn_symstrs = elf_getscn(elf, shdr_dynsym.sh_link); 342 if (scn_symstrs == NULL) 343 goto out_elf_end; 344 345 symstrs = elf_getdata(scn_symstrs, NULL); 346 if (symstrs == NULL) 347 goto out_elf_end; 348 349 if (symstrs->d_size == 0) 350 goto out_elf_end; 351 352 nr_rel_entries = shdr_rel_plt.sh_size / shdr_rel_plt.sh_entsize; 353 plt_offset = shdr_plt.sh_offset; 354 switch (ehdr.e_machine) { 355 case EM_ARM: 356 plt_header_size = 20; 357 plt_entry_size = 12; 358 break; 359 360 case EM_AARCH64: 361 plt_header_size = 32; 362 plt_entry_size = 16; 363 break; 364 365 case EM_SPARC: 366 plt_header_size = 48; 367 plt_entry_size = 12; 368 break; 369 370 case EM_SPARCV9: 371 plt_header_size = 128; 372 plt_entry_size = 32; 373 break; 374 375 default: /* FIXME: s390/alpha/mips/parisc/poperpc/sh/xtensa need to be checked */ 376 plt_header_size = shdr_plt.sh_entsize; 377 plt_entry_size = shdr_plt.sh_entsize; 378 break; 379 } 380 plt_offset += plt_header_size; 381 382 if (shdr_rel_plt.sh_type == SHT_RELA) { 383 GElf_Rela pos_mem, *pos; 384 385 elf_section__for_each_rela(reldata, pos, pos_mem, idx, 386 nr_rel_entries) { 387 const char *elf_name = NULL; 388 char *demangled = NULL; 389 symidx = GELF_R_SYM(pos->r_info); 390 gelf_getsym(syms, symidx, &sym); 391 392 elf_name = elf_sym__name(&sym, symstrs); 393 demangled = demangle_sym(dso, 0, elf_name); 394 if (demangled != NULL) 395 elf_name = demangled; 396 snprintf(sympltname, sizeof(sympltname), 397 "%s@plt", elf_name); 398 free(demangled); 399 400 f = symbol__new(plt_offset, plt_entry_size, 401 STB_GLOBAL, STT_FUNC, sympltname); 402 if (!f) 403 goto out_elf_end; 404 405 plt_offset += plt_entry_size; 406 symbols__insert(&dso->symbols, f); 407 ++nr; 408 } 409 } else if (shdr_rel_plt.sh_type == SHT_REL) { 410 GElf_Rel pos_mem, *pos; 411 elf_section__for_each_rel(reldata, pos, pos_mem, idx, 412 nr_rel_entries) { 413 const char *elf_name = NULL; 414 char *demangled = NULL; 415 symidx = GELF_R_SYM(pos->r_info); 416 gelf_getsym(syms, symidx, &sym); 417 418 elf_name = elf_sym__name(&sym, symstrs); 419 demangled = demangle_sym(dso, 0, elf_name); 420 if (demangled != NULL) 421 elf_name = demangled; 422 snprintf(sympltname, sizeof(sympltname), 423 "%s@plt", elf_name); 424 free(demangled); 425 426 f = symbol__new(plt_offset, plt_entry_size, 427 STB_GLOBAL, STT_FUNC, sympltname); 428 if (!f) 429 goto out_elf_end; 430 431 plt_offset += plt_entry_size; 432 symbols__insert(&dso->symbols, f); 433 ++nr; 434 } 435 } 436 437 err = 0; 438 out_elf_end: 439 if (err == 0) 440 return nr; 441 pr_debug("%s: problems reading %s PLT info.\n", 442 __func__, dso->long_name); 443 return 0; 444 } 445 446 char *dso__demangle_sym(struct dso *dso, int kmodule, const char *elf_name) 447 { 448 return demangle_sym(dso, kmodule, elf_name); 449 } 450 451 /* 452 * Align offset to 4 bytes as needed for note name and descriptor data. 453 */ 454 #define NOTE_ALIGN(n) (((n) + 3) & -4U) 455 456 static int elf_read_build_id(Elf *elf, void *bf, size_t size) 457 { 458 int err = -1; 459 GElf_Ehdr ehdr; 460 GElf_Shdr shdr; 461 Elf_Data *data; 462 Elf_Scn *sec; 463 Elf_Kind ek; 464 void *ptr; 465 466 if (size < BUILD_ID_SIZE) 467 goto out; 468 469 ek = elf_kind(elf); 470 if (ek != ELF_K_ELF) 471 goto out; 472 473 if (gelf_getehdr(elf, &ehdr) == NULL) { 474 pr_err("%s: cannot get elf header.\n", __func__); 475 goto out; 476 } 477 478 /* 479 * Check following sections for notes: 480 * '.note.gnu.build-id' 481 * '.notes' 482 * '.note' (VDSO specific) 483 */ 484 do { 485 sec = elf_section_by_name(elf, &ehdr, &shdr, 486 ".note.gnu.build-id", NULL); 487 if (sec) 488 break; 489 490 sec = elf_section_by_name(elf, &ehdr, &shdr, 491 ".notes", NULL); 492 if (sec) 493 break; 494 495 sec = elf_section_by_name(elf, &ehdr, &shdr, 496 ".note", NULL); 497 if (sec) 498 break; 499 500 return err; 501 502 } while (0); 503 504 data = elf_getdata(sec, NULL); 505 if (data == NULL) 506 goto out; 507 508 ptr = data->d_buf; 509 while (ptr < (data->d_buf + data->d_size)) { 510 GElf_Nhdr *nhdr = ptr; 511 size_t namesz = NOTE_ALIGN(nhdr->n_namesz), 512 descsz = NOTE_ALIGN(nhdr->n_descsz); 513 const char *name; 514 515 ptr += sizeof(*nhdr); 516 name = ptr; 517 ptr += namesz; 518 if (nhdr->n_type == NT_GNU_BUILD_ID && 519 nhdr->n_namesz == sizeof("GNU")) { 520 if (memcmp(name, "GNU", sizeof("GNU")) == 0) { 521 size_t sz = min(size, descsz); 522 memcpy(bf, ptr, sz); 523 memset(bf + sz, 0, size - sz); 524 err = descsz; 525 break; 526 } 527 } 528 ptr += descsz; 529 } 530 531 out: 532 return err; 533 } 534 535 #ifdef HAVE_LIBBFD_BUILDID_SUPPORT 536 537 static int read_build_id(const char *filename, struct build_id *bid) 538 { 539 size_t size = sizeof(bid->data); 540 int err = -1; 541 bfd *abfd; 542 543 abfd = bfd_openr(filename, NULL); 544 if (!abfd) 545 return -1; 546 547 if (!bfd_check_format(abfd, bfd_object)) { 548 pr_debug2("%s: cannot read %s bfd file.\n", __func__, filename); 549 goto out_close; 550 } 551 552 if (!abfd->build_id || abfd->build_id->size > size) 553 goto out_close; 554 555 memcpy(bid->data, abfd->build_id->data, abfd->build_id->size); 556 memset(bid->data + abfd->build_id->size, 0, size - abfd->build_id->size); 557 err = bid->size = abfd->build_id->size; 558 559 out_close: 560 bfd_close(abfd); 561 return err; 562 } 563 564 #else // HAVE_LIBBFD_BUILDID_SUPPORT 565 566 static int read_build_id(const char *filename, struct build_id *bid) 567 { 568 size_t size = sizeof(bid->data); 569 int fd, err = -1; 570 Elf *elf; 571 572 if (size < BUILD_ID_SIZE) 573 goto out; 574 575 fd = open(filename, O_RDONLY); 576 if (fd < 0) 577 goto out; 578 579 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); 580 if (elf == NULL) { 581 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename); 582 goto out_close; 583 } 584 585 err = elf_read_build_id(elf, bid->data, size); 586 if (err > 0) 587 bid->size = err; 588 589 elf_end(elf); 590 out_close: 591 close(fd); 592 out: 593 return err; 594 } 595 596 #endif // HAVE_LIBBFD_BUILDID_SUPPORT 597 598 int filename__read_build_id(const char *filename, struct build_id *bid) 599 { 600 struct kmod_path m = { .name = NULL, }; 601 char path[PATH_MAX]; 602 int err; 603 604 if (!filename) 605 return -EFAULT; 606 607 err = kmod_path__parse(&m, filename); 608 if (err) 609 return -1; 610 611 if (m.comp) { 612 int error = 0, fd; 613 614 fd = filename__decompress(filename, path, sizeof(path), m.comp, &error); 615 if (fd < 0) { 616 pr_debug("Failed to decompress (error %d) %s\n", 617 error, filename); 618 return -1; 619 } 620 close(fd); 621 filename = path; 622 } 623 624 err = read_build_id(filename, bid); 625 626 if (m.comp) 627 unlink(filename); 628 return err; 629 } 630 631 int sysfs__read_build_id(const char *filename, struct build_id *bid) 632 { 633 size_t size = sizeof(bid->data); 634 int fd, err = -1; 635 636 fd = open(filename, O_RDONLY); 637 if (fd < 0) 638 goto out; 639 640 while (1) { 641 char bf[BUFSIZ]; 642 GElf_Nhdr nhdr; 643 size_t namesz, descsz; 644 645 if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr)) 646 break; 647 648 namesz = NOTE_ALIGN(nhdr.n_namesz); 649 descsz = NOTE_ALIGN(nhdr.n_descsz); 650 if (nhdr.n_type == NT_GNU_BUILD_ID && 651 nhdr.n_namesz == sizeof("GNU")) { 652 if (read(fd, bf, namesz) != (ssize_t)namesz) 653 break; 654 if (memcmp(bf, "GNU", sizeof("GNU")) == 0) { 655 size_t sz = min(descsz, size); 656 if (read(fd, bid->data, sz) == (ssize_t)sz) { 657 memset(bid->data + sz, 0, size - sz); 658 bid->size = sz; 659 err = 0; 660 break; 661 } 662 } else if (read(fd, bf, descsz) != (ssize_t)descsz) 663 break; 664 } else { 665 int n = namesz + descsz; 666 667 if (n > (int)sizeof(bf)) { 668 n = sizeof(bf); 669 pr_debug("%s: truncating reading of build id in sysfs file %s: n_namesz=%u, n_descsz=%u.\n", 670 __func__, filename, nhdr.n_namesz, nhdr.n_descsz); 671 } 672 if (read(fd, bf, n) != n) 673 break; 674 } 675 } 676 close(fd); 677 out: 678 return err; 679 } 680 681 #ifdef HAVE_LIBBFD_SUPPORT 682 683 int filename__read_debuglink(const char *filename, char *debuglink, 684 size_t size) 685 { 686 int err = -1; 687 asection *section; 688 bfd *abfd; 689 690 abfd = bfd_openr(filename, NULL); 691 if (!abfd) 692 return -1; 693 694 if (!bfd_check_format(abfd, bfd_object)) { 695 pr_debug2("%s: cannot read %s bfd file.\n", __func__, filename); 696 goto out_close; 697 } 698 699 section = bfd_get_section_by_name(abfd, ".gnu_debuglink"); 700 if (!section) 701 goto out_close; 702 703 if (section->size > size) 704 goto out_close; 705 706 if (!bfd_get_section_contents(abfd, section, debuglink, 0, 707 section->size)) 708 goto out_close; 709 710 err = 0; 711 712 out_close: 713 bfd_close(abfd); 714 return err; 715 } 716 717 #else 718 719 int filename__read_debuglink(const char *filename, char *debuglink, 720 size_t size) 721 { 722 int fd, err = -1; 723 Elf *elf; 724 GElf_Ehdr ehdr; 725 GElf_Shdr shdr; 726 Elf_Data *data; 727 Elf_Scn *sec; 728 Elf_Kind ek; 729 730 fd = open(filename, O_RDONLY); 731 if (fd < 0) 732 goto out; 733 734 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); 735 if (elf == NULL) { 736 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename); 737 goto out_close; 738 } 739 740 ek = elf_kind(elf); 741 if (ek != ELF_K_ELF) 742 goto out_elf_end; 743 744 if (gelf_getehdr(elf, &ehdr) == NULL) { 745 pr_err("%s: cannot get elf header.\n", __func__); 746 goto out_elf_end; 747 } 748 749 sec = elf_section_by_name(elf, &ehdr, &shdr, 750 ".gnu_debuglink", NULL); 751 if (sec == NULL) 752 goto out_elf_end; 753 754 data = elf_getdata(sec, NULL); 755 if (data == NULL) 756 goto out_elf_end; 757 758 /* the start of this section is a zero-terminated string */ 759 strncpy(debuglink, data->d_buf, size); 760 761 err = 0; 762 763 out_elf_end: 764 elf_end(elf); 765 out_close: 766 close(fd); 767 out: 768 return err; 769 } 770 771 #endif 772 773 static int dso__swap_init(struct dso *dso, unsigned char eidata) 774 { 775 static unsigned int const endian = 1; 776 777 dso->needs_swap = DSO_SWAP__NO; 778 779 switch (eidata) { 780 case ELFDATA2LSB: 781 /* We are big endian, DSO is little endian. */ 782 if (*(unsigned char const *)&endian != 1) 783 dso->needs_swap = DSO_SWAP__YES; 784 break; 785 786 case ELFDATA2MSB: 787 /* We are little endian, DSO is big endian. */ 788 if (*(unsigned char const *)&endian != 0) 789 dso->needs_swap = DSO_SWAP__YES; 790 break; 791 792 default: 793 pr_err("unrecognized DSO data encoding %d\n", eidata); 794 return -EINVAL; 795 } 796 797 return 0; 798 } 799 800 bool symsrc__possibly_runtime(struct symsrc *ss) 801 { 802 return ss->dynsym || ss->opdsec; 803 } 804 805 bool symsrc__has_symtab(struct symsrc *ss) 806 { 807 return ss->symtab != NULL; 808 } 809 810 void symsrc__destroy(struct symsrc *ss) 811 { 812 zfree(&ss->name); 813 elf_end(ss->elf); 814 close(ss->fd); 815 } 816 817 bool elf__needs_adjust_symbols(GElf_Ehdr ehdr) 818 { 819 /* 820 * Usually vmlinux is an ELF file with type ET_EXEC for most 821 * architectures; except Arm64 kernel is linked with option 822 * '-share', so need to check type ET_DYN. 823 */ 824 return ehdr.e_type == ET_EXEC || ehdr.e_type == ET_REL || 825 ehdr.e_type == ET_DYN; 826 } 827 828 int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name, 829 enum dso_binary_type type) 830 { 831 GElf_Ehdr ehdr; 832 Elf *elf; 833 int fd; 834 835 if (dso__needs_decompress(dso)) { 836 fd = dso__decompress_kmodule_fd(dso, name); 837 if (fd < 0) 838 return -1; 839 840 type = dso->symtab_type; 841 } else { 842 fd = open(name, O_RDONLY); 843 if (fd < 0) { 844 dso->load_errno = errno; 845 return -1; 846 } 847 } 848 849 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); 850 if (elf == NULL) { 851 pr_debug("%s: cannot read %s ELF file.\n", __func__, name); 852 dso->load_errno = DSO_LOAD_ERRNO__INVALID_ELF; 853 goto out_close; 854 } 855 856 if (gelf_getehdr(elf, &ehdr) == NULL) { 857 dso->load_errno = DSO_LOAD_ERRNO__INVALID_ELF; 858 pr_debug("%s: cannot get elf header.\n", __func__); 859 goto out_elf_end; 860 } 861 862 if (dso__swap_init(dso, ehdr.e_ident[EI_DATA])) { 863 dso->load_errno = DSO_LOAD_ERRNO__INTERNAL_ERROR; 864 goto out_elf_end; 865 } 866 867 /* Always reject images with a mismatched build-id: */ 868 if (dso->has_build_id && !symbol_conf.ignore_vmlinux_buildid) { 869 u8 build_id[BUILD_ID_SIZE]; 870 struct build_id bid; 871 int size; 872 873 size = elf_read_build_id(elf, build_id, BUILD_ID_SIZE); 874 if (size <= 0) { 875 dso->load_errno = DSO_LOAD_ERRNO__CANNOT_READ_BUILDID; 876 goto out_elf_end; 877 } 878 879 build_id__init(&bid, build_id, size); 880 if (!dso__build_id_equal(dso, &bid)) { 881 pr_debug("%s: build id mismatch for %s.\n", __func__, name); 882 dso->load_errno = DSO_LOAD_ERRNO__MISMATCHING_BUILDID; 883 goto out_elf_end; 884 } 885 } 886 887 ss->is_64_bit = (gelf_getclass(elf) == ELFCLASS64); 888 889 ss->symtab = elf_section_by_name(elf, &ehdr, &ss->symshdr, ".symtab", 890 NULL); 891 if (ss->symshdr.sh_type != SHT_SYMTAB) 892 ss->symtab = NULL; 893 894 ss->dynsym_idx = 0; 895 ss->dynsym = elf_section_by_name(elf, &ehdr, &ss->dynshdr, ".dynsym", 896 &ss->dynsym_idx); 897 if (ss->dynshdr.sh_type != SHT_DYNSYM) 898 ss->dynsym = NULL; 899 900 ss->opdidx = 0; 901 ss->opdsec = elf_section_by_name(elf, &ehdr, &ss->opdshdr, ".opd", 902 &ss->opdidx); 903 if (ss->opdshdr.sh_type != SHT_PROGBITS) 904 ss->opdsec = NULL; 905 906 if (dso->kernel == DSO_SPACE__USER) 907 ss->adjust_symbols = true; 908 else 909 ss->adjust_symbols = elf__needs_adjust_symbols(ehdr); 910 911 ss->name = strdup(name); 912 if (!ss->name) { 913 dso->load_errno = errno; 914 goto out_elf_end; 915 } 916 917 ss->elf = elf; 918 ss->fd = fd; 919 ss->ehdr = ehdr; 920 ss->type = type; 921 922 return 0; 923 924 out_elf_end: 925 elf_end(elf); 926 out_close: 927 close(fd); 928 return -1; 929 } 930 931 /** 932 * ref_reloc_sym_not_found - has kernel relocation symbol been found. 933 * @kmap: kernel maps and relocation reference symbol 934 * 935 * This function returns %true if we are dealing with the kernel maps and the 936 * relocation reference symbol has not yet been found. Otherwise %false is 937 * returned. 938 */ 939 static bool ref_reloc_sym_not_found(struct kmap *kmap) 940 { 941 return kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name && 942 !kmap->ref_reloc_sym->unrelocated_addr; 943 } 944 945 /** 946 * ref_reloc - kernel relocation offset. 947 * @kmap: kernel maps and relocation reference symbol 948 * 949 * This function returns the offset of kernel addresses as determined by using 950 * the relocation reference symbol i.e. if the kernel has not been relocated 951 * then the return value is zero. 952 */ 953 static u64 ref_reloc(struct kmap *kmap) 954 { 955 if (kmap && kmap->ref_reloc_sym && 956 kmap->ref_reloc_sym->unrelocated_addr) 957 return kmap->ref_reloc_sym->addr - 958 kmap->ref_reloc_sym->unrelocated_addr; 959 return 0; 960 } 961 962 void __weak arch__sym_update(struct symbol *s __maybe_unused, 963 GElf_Sym *sym __maybe_unused) { } 964 965 static int dso__process_kernel_symbol(struct dso *dso, struct map *map, 966 GElf_Sym *sym, GElf_Shdr *shdr, 967 struct maps *kmaps, struct kmap *kmap, 968 struct dso **curr_dsop, struct map **curr_mapp, 969 const char *section_name, 970 bool adjust_kernel_syms, bool kmodule, bool *remap_kernel) 971 { 972 struct dso *curr_dso = *curr_dsop; 973 struct map *curr_map; 974 char dso_name[PATH_MAX]; 975 976 /* Adjust symbol to map to file offset */ 977 if (adjust_kernel_syms) 978 sym->st_value -= shdr->sh_addr - shdr->sh_offset; 979 980 if (strcmp(section_name, (curr_dso->short_name + dso->short_name_len)) == 0) 981 return 0; 982 983 if (strcmp(section_name, ".text") == 0) { 984 /* 985 * The initial kernel mapping is based on 986 * kallsyms and identity maps. Overwrite it to 987 * map to the kernel dso. 988 */ 989 if (*remap_kernel && dso->kernel && !kmodule) { 990 *remap_kernel = false; 991 map->start = shdr->sh_addr + ref_reloc(kmap); 992 map->end = map->start + shdr->sh_size; 993 map->pgoff = shdr->sh_offset; 994 map->map_ip = map__map_ip; 995 map->unmap_ip = map__unmap_ip; 996 /* Ensure maps are correctly ordered */ 997 if (kmaps) { 998 map__get(map); 999 maps__remove(kmaps, map); 1000 maps__insert(kmaps, map); 1001 map__put(map); 1002 } 1003 } 1004 1005 /* 1006 * The initial module mapping is based on 1007 * /proc/modules mapped to offset zero. 1008 * Overwrite it to map to the module dso. 1009 */ 1010 if (*remap_kernel && kmodule) { 1011 *remap_kernel = false; 1012 map->pgoff = shdr->sh_offset; 1013 } 1014 1015 *curr_mapp = map; 1016 *curr_dsop = dso; 1017 return 0; 1018 } 1019 1020 if (!kmap) 1021 return 0; 1022 1023 snprintf(dso_name, sizeof(dso_name), "%s%s", dso->short_name, section_name); 1024 1025 curr_map = maps__find_by_name(kmaps, dso_name); 1026 if (curr_map == NULL) { 1027 u64 start = sym->st_value; 1028 1029 if (kmodule) 1030 start += map->start + shdr->sh_offset; 1031 1032 curr_dso = dso__new(dso_name); 1033 if (curr_dso == NULL) 1034 return -1; 1035 curr_dso->kernel = dso->kernel; 1036 curr_dso->long_name = dso->long_name; 1037 curr_dso->long_name_len = dso->long_name_len; 1038 curr_map = map__new2(start, curr_dso); 1039 dso__put(curr_dso); 1040 if (curr_map == NULL) 1041 return -1; 1042 1043 if (curr_dso->kernel) 1044 map__kmap(curr_map)->kmaps = kmaps; 1045 1046 if (adjust_kernel_syms) { 1047 curr_map->start = shdr->sh_addr + ref_reloc(kmap); 1048 curr_map->end = curr_map->start + shdr->sh_size; 1049 curr_map->pgoff = shdr->sh_offset; 1050 } else { 1051 curr_map->map_ip = curr_map->unmap_ip = identity__map_ip; 1052 } 1053 curr_dso->symtab_type = dso->symtab_type; 1054 maps__insert(kmaps, curr_map); 1055 /* 1056 * Add it before we drop the referece to curr_map, i.e. while 1057 * we still are sure to have a reference to this DSO via 1058 * *curr_map->dso. 1059 */ 1060 dsos__add(&kmaps->machine->dsos, curr_dso); 1061 /* kmaps already got it */ 1062 map__put(curr_map); 1063 dso__set_loaded(curr_dso); 1064 *curr_mapp = curr_map; 1065 *curr_dsop = curr_dso; 1066 } else 1067 *curr_dsop = curr_map->dso; 1068 1069 return 0; 1070 } 1071 1072 int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss, 1073 struct symsrc *runtime_ss, int kmodule) 1074 { 1075 struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL; 1076 struct maps *kmaps = kmap ? map__kmaps(map) : NULL; 1077 struct map *curr_map = map; 1078 struct dso *curr_dso = dso; 1079 Elf_Data *symstrs, *secstrs; 1080 uint32_t nr_syms; 1081 int err = -1; 1082 uint32_t idx; 1083 GElf_Ehdr ehdr; 1084 GElf_Shdr shdr; 1085 GElf_Shdr tshdr; 1086 Elf_Data *syms, *opddata = NULL; 1087 GElf_Sym sym; 1088 Elf_Scn *sec, *sec_strndx; 1089 Elf *elf; 1090 int nr = 0; 1091 bool remap_kernel = false, adjust_kernel_syms = false; 1092 1093 if (kmap && !kmaps) 1094 return -1; 1095 1096 dso->symtab_type = syms_ss->type; 1097 dso->is_64_bit = syms_ss->is_64_bit; 1098 dso->rel = syms_ss->ehdr.e_type == ET_REL; 1099 1100 /* 1101 * Modules may already have symbols from kallsyms, but those symbols 1102 * have the wrong values for the dso maps, so remove them. 1103 */ 1104 if (kmodule && syms_ss->symtab) 1105 symbols__delete(&dso->symbols); 1106 1107 if (!syms_ss->symtab) { 1108 /* 1109 * If the vmlinux is stripped, fail so we will fall back 1110 * to using kallsyms. The vmlinux runtime symbols aren't 1111 * of much use. 1112 */ 1113 if (dso->kernel) 1114 goto out_elf_end; 1115 1116 syms_ss->symtab = syms_ss->dynsym; 1117 syms_ss->symshdr = syms_ss->dynshdr; 1118 } 1119 1120 elf = syms_ss->elf; 1121 ehdr = syms_ss->ehdr; 1122 sec = syms_ss->symtab; 1123 shdr = syms_ss->symshdr; 1124 1125 if (elf_section_by_name(runtime_ss->elf, &runtime_ss->ehdr, &tshdr, 1126 ".text", NULL)) 1127 dso->text_offset = tshdr.sh_addr - tshdr.sh_offset; 1128 1129 if (runtime_ss->opdsec) 1130 opddata = elf_rawdata(runtime_ss->opdsec, NULL); 1131 1132 syms = elf_getdata(sec, NULL); 1133 if (syms == NULL) 1134 goto out_elf_end; 1135 1136 sec = elf_getscn(elf, shdr.sh_link); 1137 if (sec == NULL) 1138 goto out_elf_end; 1139 1140 symstrs = elf_getdata(sec, NULL); 1141 if (symstrs == NULL) 1142 goto out_elf_end; 1143 1144 sec_strndx = elf_getscn(runtime_ss->elf, runtime_ss->ehdr.e_shstrndx); 1145 if (sec_strndx == NULL) 1146 goto out_elf_end; 1147 1148 secstrs = elf_getdata(sec_strndx, NULL); 1149 if (secstrs == NULL) 1150 goto out_elf_end; 1151 1152 nr_syms = shdr.sh_size / shdr.sh_entsize; 1153 1154 memset(&sym, 0, sizeof(sym)); 1155 1156 /* 1157 * The kernel relocation symbol is needed in advance in order to adjust 1158 * kernel maps correctly. 1159 */ 1160 if (ref_reloc_sym_not_found(kmap)) { 1161 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) { 1162 const char *elf_name = elf_sym__name(&sym, symstrs); 1163 1164 if (strcmp(elf_name, kmap->ref_reloc_sym->name)) 1165 continue; 1166 kmap->ref_reloc_sym->unrelocated_addr = sym.st_value; 1167 map->reloc = kmap->ref_reloc_sym->addr - 1168 kmap->ref_reloc_sym->unrelocated_addr; 1169 break; 1170 } 1171 } 1172 1173 /* 1174 * Handle any relocation of vdso necessary because older kernels 1175 * attempted to prelink vdso to its virtual address. 1176 */ 1177 if (dso__is_vdso(dso)) 1178 map->reloc = map->start - dso->text_offset; 1179 1180 dso->adjust_symbols = runtime_ss->adjust_symbols || ref_reloc(kmap); 1181 /* 1182 * Initial kernel and module mappings do not map to the dso. 1183 * Flag the fixups. 1184 */ 1185 if (dso->kernel) { 1186 remap_kernel = true; 1187 adjust_kernel_syms = dso->adjust_symbols; 1188 } 1189 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) { 1190 struct symbol *f; 1191 const char *elf_name = elf_sym__name(&sym, symstrs); 1192 char *demangled = NULL; 1193 int is_label = elf_sym__is_label(&sym); 1194 const char *section_name; 1195 bool used_opd = false; 1196 1197 if (!is_label && !elf_sym__filter(&sym)) 1198 continue; 1199 1200 /* Reject ARM ELF "mapping symbols": these aren't unique and 1201 * don't identify functions, so will confuse the profile 1202 * output: */ 1203 if (ehdr.e_machine == EM_ARM || ehdr.e_machine == EM_AARCH64) { 1204 if (elf_name[0] == '$' && strchr("adtx", elf_name[1]) 1205 && (elf_name[2] == '\0' || elf_name[2] == '.')) 1206 continue; 1207 } 1208 1209 if (runtime_ss->opdsec && sym.st_shndx == runtime_ss->opdidx) { 1210 u32 offset = sym.st_value - syms_ss->opdshdr.sh_addr; 1211 u64 *opd = opddata->d_buf + offset; 1212 sym.st_value = DSO__SWAP(dso, u64, *opd); 1213 sym.st_shndx = elf_addr_to_index(runtime_ss->elf, 1214 sym.st_value); 1215 used_opd = true; 1216 } 1217 /* 1218 * When loading symbols in a data mapping, ABS symbols (which 1219 * has a value of SHN_ABS in its st_shndx) failed at 1220 * elf_getscn(). And it marks the loading as a failure so 1221 * already loaded symbols cannot be fixed up. 1222 * 1223 * I'm not sure what should be done. Just ignore them for now. 1224 * - Namhyung Kim 1225 */ 1226 if (sym.st_shndx == SHN_ABS) 1227 continue; 1228 1229 sec = elf_getscn(runtime_ss->elf, sym.st_shndx); 1230 if (!sec) 1231 goto out_elf_end; 1232 1233 gelf_getshdr(sec, &shdr); 1234 1235 if (is_label && !elf_sec__filter(&shdr, secstrs)) 1236 continue; 1237 1238 section_name = elf_sec__name(&shdr, secstrs); 1239 1240 /* On ARM, symbols for thumb functions have 1 added to 1241 * the symbol address as a flag - remove it */ 1242 if ((ehdr.e_machine == EM_ARM) && 1243 (GELF_ST_TYPE(sym.st_info) == STT_FUNC) && 1244 (sym.st_value & 1)) 1245 --sym.st_value; 1246 1247 if (dso->kernel) { 1248 if (dso__process_kernel_symbol(dso, map, &sym, &shdr, kmaps, kmap, &curr_dso, &curr_map, 1249 section_name, adjust_kernel_syms, kmodule, &remap_kernel)) 1250 goto out_elf_end; 1251 } else if ((used_opd && runtime_ss->adjust_symbols) || 1252 (!used_opd && syms_ss->adjust_symbols)) { 1253 pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " " 1254 "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n", __func__, 1255 (u64)sym.st_value, (u64)shdr.sh_addr, 1256 (u64)shdr.sh_offset); 1257 sym.st_value -= shdr.sh_addr - shdr.sh_offset; 1258 } 1259 1260 demangled = demangle_sym(dso, kmodule, elf_name); 1261 if (demangled != NULL) 1262 elf_name = demangled; 1263 1264 f = symbol__new(sym.st_value, sym.st_size, 1265 GELF_ST_BIND(sym.st_info), 1266 GELF_ST_TYPE(sym.st_info), elf_name); 1267 free(demangled); 1268 if (!f) 1269 goto out_elf_end; 1270 1271 arch__sym_update(f, &sym); 1272 1273 __symbols__insert(&curr_dso->symbols, f, dso->kernel); 1274 nr++; 1275 } 1276 1277 /* 1278 * For misannotated, zeroed, ASM function sizes. 1279 */ 1280 if (nr > 0) { 1281 symbols__fixup_end(&dso->symbols); 1282 symbols__fixup_duplicate(&dso->symbols); 1283 if (kmap) { 1284 /* 1285 * We need to fixup this here too because we create new 1286 * maps here, for things like vsyscall sections. 1287 */ 1288 maps__fixup_end(kmaps); 1289 } 1290 } 1291 err = nr; 1292 out_elf_end: 1293 return err; 1294 } 1295 1296 static int elf_read_maps(Elf *elf, bool exe, mapfn_t mapfn, void *data) 1297 { 1298 GElf_Phdr phdr; 1299 size_t i, phdrnum; 1300 int err; 1301 u64 sz; 1302 1303 if (elf_getphdrnum(elf, &phdrnum)) 1304 return -1; 1305 1306 for (i = 0; i < phdrnum; i++) { 1307 if (gelf_getphdr(elf, i, &phdr) == NULL) 1308 return -1; 1309 if (phdr.p_type != PT_LOAD) 1310 continue; 1311 if (exe) { 1312 if (!(phdr.p_flags & PF_X)) 1313 continue; 1314 } else { 1315 if (!(phdr.p_flags & PF_R)) 1316 continue; 1317 } 1318 sz = min(phdr.p_memsz, phdr.p_filesz); 1319 if (!sz) 1320 continue; 1321 err = mapfn(phdr.p_vaddr, sz, phdr.p_offset, data); 1322 if (err) 1323 return err; 1324 } 1325 return 0; 1326 } 1327 1328 int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data, 1329 bool *is_64_bit) 1330 { 1331 int err; 1332 Elf *elf; 1333 1334 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); 1335 if (elf == NULL) 1336 return -1; 1337 1338 if (is_64_bit) 1339 *is_64_bit = (gelf_getclass(elf) == ELFCLASS64); 1340 1341 err = elf_read_maps(elf, exe, mapfn, data); 1342 1343 elf_end(elf); 1344 return err; 1345 } 1346 1347 enum dso_type dso__type_fd(int fd) 1348 { 1349 enum dso_type dso_type = DSO__TYPE_UNKNOWN; 1350 GElf_Ehdr ehdr; 1351 Elf_Kind ek; 1352 Elf *elf; 1353 1354 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); 1355 if (elf == NULL) 1356 goto out; 1357 1358 ek = elf_kind(elf); 1359 if (ek != ELF_K_ELF) 1360 goto out_end; 1361 1362 if (gelf_getclass(elf) == ELFCLASS64) { 1363 dso_type = DSO__TYPE_64BIT; 1364 goto out_end; 1365 } 1366 1367 if (gelf_getehdr(elf, &ehdr) == NULL) 1368 goto out_end; 1369 1370 if (ehdr.e_machine == EM_X86_64) 1371 dso_type = DSO__TYPE_X32BIT; 1372 else 1373 dso_type = DSO__TYPE_32BIT; 1374 out_end: 1375 elf_end(elf); 1376 out: 1377 return dso_type; 1378 } 1379 1380 static int copy_bytes(int from, off_t from_offs, int to, off_t to_offs, u64 len) 1381 { 1382 ssize_t r; 1383 size_t n; 1384 int err = -1; 1385 char *buf = malloc(page_size); 1386 1387 if (buf == NULL) 1388 return -1; 1389 1390 if (lseek(to, to_offs, SEEK_SET) != to_offs) 1391 goto out; 1392 1393 if (lseek(from, from_offs, SEEK_SET) != from_offs) 1394 goto out; 1395 1396 while (len) { 1397 n = page_size; 1398 if (len < n) 1399 n = len; 1400 /* Use read because mmap won't work on proc files */ 1401 r = read(from, buf, n); 1402 if (r < 0) 1403 goto out; 1404 if (!r) 1405 break; 1406 n = r; 1407 r = write(to, buf, n); 1408 if (r < 0) 1409 goto out; 1410 if ((size_t)r != n) 1411 goto out; 1412 len -= n; 1413 } 1414 1415 err = 0; 1416 out: 1417 free(buf); 1418 return err; 1419 } 1420 1421 struct kcore { 1422 int fd; 1423 int elfclass; 1424 Elf *elf; 1425 GElf_Ehdr ehdr; 1426 }; 1427 1428 static int kcore__open(struct kcore *kcore, const char *filename) 1429 { 1430 GElf_Ehdr *ehdr; 1431 1432 kcore->fd = open(filename, O_RDONLY); 1433 if (kcore->fd == -1) 1434 return -1; 1435 1436 kcore->elf = elf_begin(kcore->fd, ELF_C_READ, NULL); 1437 if (!kcore->elf) 1438 goto out_close; 1439 1440 kcore->elfclass = gelf_getclass(kcore->elf); 1441 if (kcore->elfclass == ELFCLASSNONE) 1442 goto out_end; 1443 1444 ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr); 1445 if (!ehdr) 1446 goto out_end; 1447 1448 return 0; 1449 1450 out_end: 1451 elf_end(kcore->elf); 1452 out_close: 1453 close(kcore->fd); 1454 return -1; 1455 } 1456 1457 static int kcore__init(struct kcore *kcore, char *filename, int elfclass, 1458 bool temp) 1459 { 1460 kcore->elfclass = elfclass; 1461 1462 if (temp) 1463 kcore->fd = mkstemp(filename); 1464 else 1465 kcore->fd = open(filename, O_WRONLY | O_CREAT | O_EXCL, 0400); 1466 if (kcore->fd == -1) 1467 return -1; 1468 1469 kcore->elf = elf_begin(kcore->fd, ELF_C_WRITE, NULL); 1470 if (!kcore->elf) 1471 goto out_close; 1472 1473 if (!gelf_newehdr(kcore->elf, elfclass)) 1474 goto out_end; 1475 1476 memset(&kcore->ehdr, 0, sizeof(GElf_Ehdr)); 1477 1478 return 0; 1479 1480 out_end: 1481 elf_end(kcore->elf); 1482 out_close: 1483 close(kcore->fd); 1484 unlink(filename); 1485 return -1; 1486 } 1487 1488 static void kcore__close(struct kcore *kcore) 1489 { 1490 elf_end(kcore->elf); 1491 close(kcore->fd); 1492 } 1493 1494 static int kcore__copy_hdr(struct kcore *from, struct kcore *to, size_t count) 1495 { 1496 GElf_Ehdr *ehdr = &to->ehdr; 1497 GElf_Ehdr *kehdr = &from->ehdr; 1498 1499 memcpy(ehdr->e_ident, kehdr->e_ident, EI_NIDENT); 1500 ehdr->e_type = kehdr->e_type; 1501 ehdr->e_machine = kehdr->e_machine; 1502 ehdr->e_version = kehdr->e_version; 1503 ehdr->e_entry = 0; 1504 ehdr->e_shoff = 0; 1505 ehdr->e_flags = kehdr->e_flags; 1506 ehdr->e_phnum = count; 1507 ehdr->e_shentsize = 0; 1508 ehdr->e_shnum = 0; 1509 ehdr->e_shstrndx = 0; 1510 1511 if (from->elfclass == ELFCLASS32) { 1512 ehdr->e_phoff = sizeof(Elf32_Ehdr); 1513 ehdr->e_ehsize = sizeof(Elf32_Ehdr); 1514 ehdr->e_phentsize = sizeof(Elf32_Phdr); 1515 } else { 1516 ehdr->e_phoff = sizeof(Elf64_Ehdr); 1517 ehdr->e_ehsize = sizeof(Elf64_Ehdr); 1518 ehdr->e_phentsize = sizeof(Elf64_Phdr); 1519 } 1520 1521 if (!gelf_update_ehdr(to->elf, ehdr)) 1522 return -1; 1523 1524 if (!gelf_newphdr(to->elf, count)) 1525 return -1; 1526 1527 return 0; 1528 } 1529 1530 static int kcore__add_phdr(struct kcore *kcore, int idx, off_t offset, 1531 u64 addr, u64 len) 1532 { 1533 GElf_Phdr phdr = { 1534 .p_type = PT_LOAD, 1535 .p_flags = PF_R | PF_W | PF_X, 1536 .p_offset = offset, 1537 .p_vaddr = addr, 1538 .p_paddr = 0, 1539 .p_filesz = len, 1540 .p_memsz = len, 1541 .p_align = page_size, 1542 }; 1543 1544 if (!gelf_update_phdr(kcore->elf, idx, &phdr)) 1545 return -1; 1546 1547 return 0; 1548 } 1549 1550 static off_t kcore__write(struct kcore *kcore) 1551 { 1552 return elf_update(kcore->elf, ELF_C_WRITE); 1553 } 1554 1555 struct phdr_data { 1556 off_t offset; 1557 off_t rel; 1558 u64 addr; 1559 u64 len; 1560 struct list_head node; 1561 struct phdr_data *remaps; 1562 }; 1563 1564 struct sym_data { 1565 u64 addr; 1566 struct list_head node; 1567 }; 1568 1569 struct kcore_copy_info { 1570 u64 stext; 1571 u64 etext; 1572 u64 first_symbol; 1573 u64 last_symbol; 1574 u64 first_module; 1575 u64 first_module_symbol; 1576 u64 last_module_symbol; 1577 size_t phnum; 1578 struct list_head phdrs; 1579 struct list_head syms; 1580 }; 1581 1582 #define kcore_copy__for_each_phdr(k, p) \ 1583 list_for_each_entry((p), &(k)->phdrs, node) 1584 1585 static struct phdr_data *phdr_data__new(u64 addr, u64 len, off_t offset) 1586 { 1587 struct phdr_data *p = zalloc(sizeof(*p)); 1588 1589 if (p) { 1590 p->addr = addr; 1591 p->len = len; 1592 p->offset = offset; 1593 } 1594 1595 return p; 1596 } 1597 1598 static struct phdr_data *kcore_copy_info__addnew(struct kcore_copy_info *kci, 1599 u64 addr, u64 len, 1600 off_t offset) 1601 { 1602 struct phdr_data *p = phdr_data__new(addr, len, offset); 1603 1604 if (p) 1605 list_add_tail(&p->node, &kci->phdrs); 1606 1607 return p; 1608 } 1609 1610 static void kcore_copy__free_phdrs(struct kcore_copy_info *kci) 1611 { 1612 struct phdr_data *p, *tmp; 1613 1614 list_for_each_entry_safe(p, tmp, &kci->phdrs, node) { 1615 list_del_init(&p->node); 1616 free(p); 1617 } 1618 } 1619 1620 static struct sym_data *kcore_copy__new_sym(struct kcore_copy_info *kci, 1621 u64 addr) 1622 { 1623 struct sym_data *s = zalloc(sizeof(*s)); 1624 1625 if (s) { 1626 s->addr = addr; 1627 list_add_tail(&s->node, &kci->syms); 1628 } 1629 1630 return s; 1631 } 1632 1633 static void kcore_copy__free_syms(struct kcore_copy_info *kci) 1634 { 1635 struct sym_data *s, *tmp; 1636 1637 list_for_each_entry_safe(s, tmp, &kci->syms, node) { 1638 list_del_init(&s->node); 1639 free(s); 1640 } 1641 } 1642 1643 static int kcore_copy__process_kallsyms(void *arg, const char *name, char type, 1644 u64 start) 1645 { 1646 struct kcore_copy_info *kci = arg; 1647 1648 if (!kallsyms__is_function(type)) 1649 return 0; 1650 1651 if (strchr(name, '[')) { 1652 if (!kci->first_module_symbol || start < kci->first_module_symbol) 1653 kci->first_module_symbol = start; 1654 if (start > kci->last_module_symbol) 1655 kci->last_module_symbol = start; 1656 return 0; 1657 } 1658 1659 if (!kci->first_symbol || start < kci->first_symbol) 1660 kci->first_symbol = start; 1661 1662 if (!kci->last_symbol || start > kci->last_symbol) 1663 kci->last_symbol = start; 1664 1665 if (!strcmp(name, "_stext")) { 1666 kci->stext = start; 1667 return 0; 1668 } 1669 1670 if (!strcmp(name, "_etext")) { 1671 kci->etext = start; 1672 return 0; 1673 } 1674 1675 if (is_entry_trampoline(name) && !kcore_copy__new_sym(kci, start)) 1676 return -1; 1677 1678 return 0; 1679 } 1680 1681 static int kcore_copy__parse_kallsyms(struct kcore_copy_info *kci, 1682 const char *dir) 1683 { 1684 char kallsyms_filename[PATH_MAX]; 1685 1686 scnprintf(kallsyms_filename, PATH_MAX, "%s/kallsyms", dir); 1687 1688 if (symbol__restricted_filename(kallsyms_filename, "/proc/kallsyms")) 1689 return -1; 1690 1691 if (kallsyms__parse(kallsyms_filename, kci, 1692 kcore_copy__process_kallsyms) < 0) 1693 return -1; 1694 1695 return 0; 1696 } 1697 1698 static int kcore_copy__process_modules(void *arg, 1699 const char *name __maybe_unused, 1700 u64 start, u64 size __maybe_unused) 1701 { 1702 struct kcore_copy_info *kci = arg; 1703 1704 if (!kci->first_module || start < kci->first_module) 1705 kci->first_module = start; 1706 1707 return 0; 1708 } 1709 1710 static int kcore_copy__parse_modules(struct kcore_copy_info *kci, 1711 const char *dir) 1712 { 1713 char modules_filename[PATH_MAX]; 1714 1715 scnprintf(modules_filename, PATH_MAX, "%s/modules", dir); 1716 1717 if (symbol__restricted_filename(modules_filename, "/proc/modules")) 1718 return -1; 1719 1720 if (modules__parse(modules_filename, kci, 1721 kcore_copy__process_modules) < 0) 1722 return -1; 1723 1724 return 0; 1725 } 1726 1727 static int kcore_copy__map(struct kcore_copy_info *kci, u64 start, u64 end, 1728 u64 pgoff, u64 s, u64 e) 1729 { 1730 u64 len, offset; 1731 1732 if (s < start || s >= end) 1733 return 0; 1734 1735 offset = (s - start) + pgoff; 1736 len = e < end ? e - s : end - s; 1737 1738 return kcore_copy_info__addnew(kci, s, len, offset) ? 0 : -1; 1739 } 1740 1741 static int kcore_copy__read_map(u64 start, u64 len, u64 pgoff, void *data) 1742 { 1743 struct kcore_copy_info *kci = data; 1744 u64 end = start + len; 1745 struct sym_data *sdat; 1746 1747 if (kcore_copy__map(kci, start, end, pgoff, kci->stext, kci->etext)) 1748 return -1; 1749 1750 if (kcore_copy__map(kci, start, end, pgoff, kci->first_module, 1751 kci->last_module_symbol)) 1752 return -1; 1753 1754 list_for_each_entry(sdat, &kci->syms, node) { 1755 u64 s = round_down(sdat->addr, page_size); 1756 1757 if (kcore_copy__map(kci, start, end, pgoff, s, s + len)) 1758 return -1; 1759 } 1760 1761 return 0; 1762 } 1763 1764 static int kcore_copy__read_maps(struct kcore_copy_info *kci, Elf *elf) 1765 { 1766 if (elf_read_maps(elf, true, kcore_copy__read_map, kci) < 0) 1767 return -1; 1768 1769 return 0; 1770 } 1771 1772 static void kcore_copy__find_remaps(struct kcore_copy_info *kci) 1773 { 1774 struct phdr_data *p, *k = NULL; 1775 u64 kend; 1776 1777 if (!kci->stext) 1778 return; 1779 1780 /* Find phdr that corresponds to the kernel map (contains stext) */ 1781 kcore_copy__for_each_phdr(kci, p) { 1782 u64 pend = p->addr + p->len - 1; 1783 1784 if (p->addr <= kci->stext && pend >= kci->stext) { 1785 k = p; 1786 break; 1787 } 1788 } 1789 1790 if (!k) 1791 return; 1792 1793 kend = k->offset + k->len; 1794 1795 /* Find phdrs that remap the kernel */ 1796 kcore_copy__for_each_phdr(kci, p) { 1797 u64 pend = p->offset + p->len; 1798 1799 if (p == k) 1800 continue; 1801 1802 if (p->offset >= k->offset && pend <= kend) 1803 p->remaps = k; 1804 } 1805 } 1806 1807 static void kcore_copy__layout(struct kcore_copy_info *kci) 1808 { 1809 struct phdr_data *p; 1810 off_t rel = 0; 1811 1812 kcore_copy__find_remaps(kci); 1813 1814 kcore_copy__for_each_phdr(kci, p) { 1815 if (!p->remaps) { 1816 p->rel = rel; 1817 rel += p->len; 1818 } 1819 kci->phnum += 1; 1820 } 1821 1822 kcore_copy__for_each_phdr(kci, p) { 1823 struct phdr_data *k = p->remaps; 1824 1825 if (k) 1826 p->rel = p->offset - k->offset + k->rel; 1827 } 1828 } 1829 1830 static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir, 1831 Elf *elf) 1832 { 1833 if (kcore_copy__parse_kallsyms(kci, dir)) 1834 return -1; 1835 1836 if (kcore_copy__parse_modules(kci, dir)) 1837 return -1; 1838 1839 if (kci->stext) 1840 kci->stext = round_down(kci->stext, page_size); 1841 else 1842 kci->stext = round_down(kci->first_symbol, page_size); 1843 1844 if (kci->etext) { 1845 kci->etext = round_up(kci->etext, page_size); 1846 } else if (kci->last_symbol) { 1847 kci->etext = round_up(kci->last_symbol, page_size); 1848 kci->etext += page_size; 1849 } 1850 1851 if (kci->first_module_symbol && 1852 (!kci->first_module || kci->first_module_symbol < kci->first_module)) 1853 kci->first_module = kci->first_module_symbol; 1854 1855 kci->first_module = round_down(kci->first_module, page_size); 1856 1857 if (kci->last_module_symbol) { 1858 kci->last_module_symbol = round_up(kci->last_module_symbol, 1859 page_size); 1860 kci->last_module_symbol += page_size; 1861 } 1862 1863 if (!kci->stext || !kci->etext) 1864 return -1; 1865 1866 if (kci->first_module && !kci->last_module_symbol) 1867 return -1; 1868 1869 if (kcore_copy__read_maps(kci, elf)) 1870 return -1; 1871 1872 kcore_copy__layout(kci); 1873 1874 return 0; 1875 } 1876 1877 static int kcore_copy__copy_file(const char *from_dir, const char *to_dir, 1878 const char *name) 1879 { 1880 char from_filename[PATH_MAX]; 1881 char to_filename[PATH_MAX]; 1882 1883 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name); 1884 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name); 1885 1886 return copyfile_mode(from_filename, to_filename, 0400); 1887 } 1888 1889 static int kcore_copy__unlink(const char *dir, const char *name) 1890 { 1891 char filename[PATH_MAX]; 1892 1893 scnprintf(filename, PATH_MAX, "%s/%s", dir, name); 1894 1895 return unlink(filename); 1896 } 1897 1898 static int kcore_copy__compare_fds(int from, int to) 1899 { 1900 char *buf_from; 1901 char *buf_to; 1902 ssize_t ret; 1903 size_t len; 1904 int err = -1; 1905 1906 buf_from = malloc(page_size); 1907 buf_to = malloc(page_size); 1908 if (!buf_from || !buf_to) 1909 goto out; 1910 1911 while (1) { 1912 /* Use read because mmap won't work on proc files */ 1913 ret = read(from, buf_from, page_size); 1914 if (ret < 0) 1915 goto out; 1916 1917 if (!ret) 1918 break; 1919 1920 len = ret; 1921 1922 if (readn(to, buf_to, len) != (int)len) 1923 goto out; 1924 1925 if (memcmp(buf_from, buf_to, len)) 1926 goto out; 1927 } 1928 1929 err = 0; 1930 out: 1931 free(buf_to); 1932 free(buf_from); 1933 return err; 1934 } 1935 1936 static int kcore_copy__compare_files(const char *from_filename, 1937 const char *to_filename) 1938 { 1939 int from, to, err = -1; 1940 1941 from = open(from_filename, O_RDONLY); 1942 if (from < 0) 1943 return -1; 1944 1945 to = open(to_filename, O_RDONLY); 1946 if (to < 0) 1947 goto out_close_from; 1948 1949 err = kcore_copy__compare_fds(from, to); 1950 1951 close(to); 1952 out_close_from: 1953 close(from); 1954 return err; 1955 } 1956 1957 static int kcore_copy__compare_file(const char *from_dir, const char *to_dir, 1958 const char *name) 1959 { 1960 char from_filename[PATH_MAX]; 1961 char to_filename[PATH_MAX]; 1962 1963 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name); 1964 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name); 1965 1966 return kcore_copy__compare_files(from_filename, to_filename); 1967 } 1968 1969 /** 1970 * kcore_copy - copy kallsyms, modules and kcore from one directory to another. 1971 * @from_dir: from directory 1972 * @to_dir: to directory 1973 * 1974 * This function copies kallsyms, modules and kcore files from one directory to 1975 * another. kallsyms and modules are copied entirely. Only code segments are 1976 * copied from kcore. It is assumed that two segments suffice: one for the 1977 * kernel proper and one for all the modules. The code segments are determined 1978 * from kallsyms and modules files. The kernel map starts at _stext or the 1979 * lowest function symbol, and ends at _etext or the highest function symbol. 1980 * The module map starts at the lowest module address and ends at the highest 1981 * module symbol. Start addresses are rounded down to the nearest page. End 1982 * addresses are rounded up to the nearest page. An extra page is added to the 1983 * highest kernel symbol and highest module symbol to, hopefully, encompass that 1984 * symbol too. Because it contains only code sections, the resulting kcore is 1985 * unusual. One significant peculiarity is that the mapping (start -> pgoff) 1986 * is not the same for the kernel map and the modules map. That happens because 1987 * the data is copied adjacently whereas the original kcore has gaps. Finally, 1988 * kallsyms and modules files are compared with their copies to check that 1989 * modules have not been loaded or unloaded while the copies were taking place. 1990 * 1991 * Return: %0 on success, %-1 on failure. 1992 */ 1993 int kcore_copy(const char *from_dir, const char *to_dir) 1994 { 1995 struct kcore kcore; 1996 struct kcore extract; 1997 int idx = 0, err = -1; 1998 off_t offset, sz; 1999 struct kcore_copy_info kci = { .stext = 0, }; 2000 char kcore_filename[PATH_MAX]; 2001 char extract_filename[PATH_MAX]; 2002 struct phdr_data *p; 2003 2004 INIT_LIST_HEAD(&kci.phdrs); 2005 INIT_LIST_HEAD(&kci.syms); 2006 2007 if (kcore_copy__copy_file(from_dir, to_dir, "kallsyms")) 2008 return -1; 2009 2010 if (kcore_copy__copy_file(from_dir, to_dir, "modules")) 2011 goto out_unlink_kallsyms; 2012 2013 scnprintf(kcore_filename, PATH_MAX, "%s/kcore", from_dir); 2014 scnprintf(extract_filename, PATH_MAX, "%s/kcore", to_dir); 2015 2016 if (kcore__open(&kcore, kcore_filename)) 2017 goto out_unlink_modules; 2018 2019 if (kcore_copy__calc_maps(&kci, from_dir, kcore.elf)) 2020 goto out_kcore_close; 2021 2022 if (kcore__init(&extract, extract_filename, kcore.elfclass, false)) 2023 goto out_kcore_close; 2024 2025 if (kcore__copy_hdr(&kcore, &extract, kci.phnum)) 2026 goto out_extract_close; 2027 2028 offset = gelf_fsize(extract.elf, ELF_T_EHDR, 1, EV_CURRENT) + 2029 gelf_fsize(extract.elf, ELF_T_PHDR, kci.phnum, EV_CURRENT); 2030 offset = round_up(offset, page_size); 2031 2032 kcore_copy__for_each_phdr(&kci, p) { 2033 off_t offs = p->rel + offset; 2034 2035 if (kcore__add_phdr(&extract, idx++, offs, p->addr, p->len)) 2036 goto out_extract_close; 2037 } 2038 2039 sz = kcore__write(&extract); 2040 if (sz < 0 || sz > offset) 2041 goto out_extract_close; 2042 2043 kcore_copy__for_each_phdr(&kci, p) { 2044 off_t offs = p->rel + offset; 2045 2046 if (p->remaps) 2047 continue; 2048 if (copy_bytes(kcore.fd, p->offset, extract.fd, offs, p->len)) 2049 goto out_extract_close; 2050 } 2051 2052 if (kcore_copy__compare_file(from_dir, to_dir, "modules")) 2053 goto out_extract_close; 2054 2055 if (kcore_copy__compare_file(from_dir, to_dir, "kallsyms")) 2056 goto out_extract_close; 2057 2058 err = 0; 2059 2060 out_extract_close: 2061 kcore__close(&extract); 2062 if (err) 2063 unlink(extract_filename); 2064 out_kcore_close: 2065 kcore__close(&kcore); 2066 out_unlink_modules: 2067 if (err) 2068 kcore_copy__unlink(to_dir, "modules"); 2069 out_unlink_kallsyms: 2070 if (err) 2071 kcore_copy__unlink(to_dir, "kallsyms"); 2072 2073 kcore_copy__free_phdrs(&kci); 2074 kcore_copy__free_syms(&kci); 2075 2076 return err; 2077 } 2078 2079 int kcore_extract__create(struct kcore_extract *kce) 2080 { 2081 struct kcore kcore; 2082 struct kcore extract; 2083 size_t count = 1; 2084 int idx = 0, err = -1; 2085 off_t offset = page_size, sz; 2086 2087 if (kcore__open(&kcore, kce->kcore_filename)) 2088 return -1; 2089 2090 strcpy(kce->extract_filename, PERF_KCORE_EXTRACT); 2091 if (kcore__init(&extract, kce->extract_filename, kcore.elfclass, true)) 2092 goto out_kcore_close; 2093 2094 if (kcore__copy_hdr(&kcore, &extract, count)) 2095 goto out_extract_close; 2096 2097 if (kcore__add_phdr(&extract, idx, offset, kce->addr, kce->len)) 2098 goto out_extract_close; 2099 2100 sz = kcore__write(&extract); 2101 if (sz < 0 || sz > offset) 2102 goto out_extract_close; 2103 2104 if (copy_bytes(kcore.fd, kce->offs, extract.fd, offset, kce->len)) 2105 goto out_extract_close; 2106 2107 err = 0; 2108 2109 out_extract_close: 2110 kcore__close(&extract); 2111 if (err) 2112 unlink(kce->extract_filename); 2113 out_kcore_close: 2114 kcore__close(&kcore); 2115 2116 return err; 2117 } 2118 2119 void kcore_extract__delete(struct kcore_extract *kce) 2120 { 2121 unlink(kce->extract_filename); 2122 } 2123 2124 #ifdef HAVE_GELF_GETNOTE_SUPPORT 2125 2126 static void sdt_adjust_loc(struct sdt_note *tmp, GElf_Addr base_off) 2127 { 2128 if (!base_off) 2129 return; 2130 2131 if (tmp->bit32) 2132 tmp->addr.a32[SDT_NOTE_IDX_LOC] = 2133 tmp->addr.a32[SDT_NOTE_IDX_LOC] + base_off - 2134 tmp->addr.a32[SDT_NOTE_IDX_BASE]; 2135 else 2136 tmp->addr.a64[SDT_NOTE_IDX_LOC] = 2137 tmp->addr.a64[SDT_NOTE_IDX_LOC] + base_off - 2138 tmp->addr.a64[SDT_NOTE_IDX_BASE]; 2139 } 2140 2141 static void sdt_adjust_refctr(struct sdt_note *tmp, GElf_Addr base_addr, 2142 GElf_Addr base_off) 2143 { 2144 if (!base_off) 2145 return; 2146 2147 if (tmp->bit32 && tmp->addr.a32[SDT_NOTE_IDX_REFCTR]) 2148 tmp->addr.a32[SDT_NOTE_IDX_REFCTR] -= (base_addr - base_off); 2149 else if (tmp->addr.a64[SDT_NOTE_IDX_REFCTR]) 2150 tmp->addr.a64[SDT_NOTE_IDX_REFCTR] -= (base_addr - base_off); 2151 } 2152 2153 /** 2154 * populate_sdt_note : Parse raw data and identify SDT note 2155 * @elf: elf of the opened file 2156 * @data: raw data of a section with description offset applied 2157 * @len: note description size 2158 * @type: type of the note 2159 * @sdt_notes: List to add the SDT note 2160 * 2161 * Responsible for parsing the @data in section .note.stapsdt in @elf and 2162 * if its an SDT note, it appends to @sdt_notes list. 2163 */ 2164 static int populate_sdt_note(Elf **elf, const char *data, size_t len, 2165 struct list_head *sdt_notes) 2166 { 2167 const char *provider, *name, *args; 2168 struct sdt_note *tmp = NULL; 2169 GElf_Ehdr ehdr; 2170 GElf_Shdr shdr; 2171 int ret = -EINVAL; 2172 2173 union { 2174 Elf64_Addr a64[NR_ADDR]; 2175 Elf32_Addr a32[NR_ADDR]; 2176 } buf; 2177 2178 Elf_Data dst = { 2179 .d_buf = &buf, .d_type = ELF_T_ADDR, .d_version = EV_CURRENT, 2180 .d_size = gelf_fsize((*elf), ELF_T_ADDR, NR_ADDR, EV_CURRENT), 2181 .d_off = 0, .d_align = 0 2182 }; 2183 Elf_Data src = { 2184 .d_buf = (void *) data, .d_type = ELF_T_ADDR, 2185 .d_version = EV_CURRENT, .d_size = dst.d_size, .d_off = 0, 2186 .d_align = 0 2187 }; 2188 2189 tmp = (struct sdt_note *)calloc(1, sizeof(struct sdt_note)); 2190 if (!tmp) { 2191 ret = -ENOMEM; 2192 goto out_err; 2193 } 2194 2195 INIT_LIST_HEAD(&tmp->note_list); 2196 2197 if (len < dst.d_size + 3) 2198 goto out_free_note; 2199 2200 /* Translation from file representation to memory representation */ 2201 if (gelf_xlatetom(*elf, &dst, &src, 2202 elf_getident(*elf, NULL)[EI_DATA]) == NULL) { 2203 pr_err("gelf_xlatetom : %s\n", elf_errmsg(-1)); 2204 goto out_free_note; 2205 } 2206 2207 /* Populate the fields of sdt_note */ 2208 provider = data + dst.d_size; 2209 2210 name = (const char *)memchr(provider, '\0', data + len - provider); 2211 if (name++ == NULL) 2212 goto out_free_note; 2213 2214 tmp->provider = strdup(provider); 2215 if (!tmp->provider) { 2216 ret = -ENOMEM; 2217 goto out_free_note; 2218 } 2219 tmp->name = strdup(name); 2220 if (!tmp->name) { 2221 ret = -ENOMEM; 2222 goto out_free_prov; 2223 } 2224 2225 args = memchr(name, '\0', data + len - name); 2226 2227 /* 2228 * There is no argument if: 2229 * - We reached the end of the note; 2230 * - There is not enough room to hold a potential string; 2231 * - The argument string is empty or just contains ':'. 2232 */ 2233 if (args == NULL || data + len - args < 2 || 2234 args[1] == ':' || args[1] == '\0') 2235 tmp->args = NULL; 2236 else { 2237 tmp->args = strdup(++args); 2238 if (!tmp->args) { 2239 ret = -ENOMEM; 2240 goto out_free_name; 2241 } 2242 } 2243 2244 if (gelf_getclass(*elf) == ELFCLASS32) { 2245 memcpy(&tmp->addr, &buf, 3 * sizeof(Elf32_Addr)); 2246 tmp->bit32 = true; 2247 } else { 2248 memcpy(&tmp->addr, &buf, 3 * sizeof(Elf64_Addr)); 2249 tmp->bit32 = false; 2250 } 2251 2252 if (!gelf_getehdr(*elf, &ehdr)) { 2253 pr_debug("%s : cannot get elf header.\n", __func__); 2254 ret = -EBADF; 2255 goto out_free_args; 2256 } 2257 2258 /* Adjust the prelink effect : 2259 * Find out the .stapsdt.base section. 2260 * This scn will help us to handle prelinking (if present). 2261 * Compare the retrieved file offset of the base section with the 2262 * base address in the description of the SDT note. If its different, 2263 * then accordingly, adjust the note location. 2264 */ 2265 if (elf_section_by_name(*elf, &ehdr, &shdr, SDT_BASE_SCN, NULL)) 2266 sdt_adjust_loc(tmp, shdr.sh_offset); 2267 2268 /* Adjust reference counter offset */ 2269 if (elf_section_by_name(*elf, &ehdr, &shdr, SDT_PROBES_SCN, NULL)) 2270 sdt_adjust_refctr(tmp, shdr.sh_addr, shdr.sh_offset); 2271 2272 list_add_tail(&tmp->note_list, sdt_notes); 2273 return 0; 2274 2275 out_free_args: 2276 zfree(&tmp->args); 2277 out_free_name: 2278 zfree(&tmp->name); 2279 out_free_prov: 2280 zfree(&tmp->provider); 2281 out_free_note: 2282 free(tmp); 2283 out_err: 2284 return ret; 2285 } 2286 2287 /** 2288 * construct_sdt_notes_list : constructs a list of SDT notes 2289 * @elf : elf to look into 2290 * @sdt_notes : empty list_head 2291 * 2292 * Scans the sections in 'elf' for the section 2293 * .note.stapsdt. It, then calls populate_sdt_note to find 2294 * out the SDT events and populates the 'sdt_notes'. 2295 */ 2296 static int construct_sdt_notes_list(Elf *elf, struct list_head *sdt_notes) 2297 { 2298 GElf_Ehdr ehdr; 2299 Elf_Scn *scn = NULL; 2300 Elf_Data *data; 2301 GElf_Shdr shdr; 2302 size_t shstrndx, next; 2303 GElf_Nhdr nhdr; 2304 size_t name_off, desc_off, offset; 2305 int ret = 0; 2306 2307 if (gelf_getehdr(elf, &ehdr) == NULL) { 2308 ret = -EBADF; 2309 goto out_ret; 2310 } 2311 if (elf_getshdrstrndx(elf, &shstrndx) != 0) { 2312 ret = -EBADF; 2313 goto out_ret; 2314 } 2315 2316 /* Look for the required section */ 2317 scn = elf_section_by_name(elf, &ehdr, &shdr, SDT_NOTE_SCN, NULL); 2318 if (!scn) { 2319 ret = -ENOENT; 2320 goto out_ret; 2321 } 2322 2323 if ((shdr.sh_type != SHT_NOTE) || (shdr.sh_flags & SHF_ALLOC)) { 2324 ret = -ENOENT; 2325 goto out_ret; 2326 } 2327 2328 data = elf_getdata(scn, NULL); 2329 2330 /* Get the SDT notes */ 2331 for (offset = 0; (next = gelf_getnote(data, offset, &nhdr, &name_off, 2332 &desc_off)) > 0; offset = next) { 2333 if (nhdr.n_namesz == sizeof(SDT_NOTE_NAME) && 2334 !memcmp(data->d_buf + name_off, SDT_NOTE_NAME, 2335 sizeof(SDT_NOTE_NAME))) { 2336 /* Check the type of the note */ 2337 if (nhdr.n_type != SDT_NOTE_TYPE) 2338 goto out_ret; 2339 2340 ret = populate_sdt_note(&elf, ((data->d_buf) + desc_off), 2341 nhdr.n_descsz, sdt_notes); 2342 if (ret < 0) 2343 goto out_ret; 2344 } 2345 } 2346 if (list_empty(sdt_notes)) 2347 ret = -ENOENT; 2348 2349 out_ret: 2350 return ret; 2351 } 2352 2353 /** 2354 * get_sdt_note_list : Wrapper to construct a list of sdt notes 2355 * @head : empty list_head 2356 * @target : file to find SDT notes from 2357 * 2358 * This opens the file, initializes 2359 * the ELF and then calls construct_sdt_notes_list. 2360 */ 2361 int get_sdt_note_list(struct list_head *head, const char *target) 2362 { 2363 Elf *elf; 2364 int fd, ret; 2365 2366 fd = open(target, O_RDONLY); 2367 if (fd < 0) 2368 return -EBADF; 2369 2370 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); 2371 if (!elf) { 2372 ret = -EBADF; 2373 goto out_close; 2374 } 2375 ret = construct_sdt_notes_list(elf, head); 2376 elf_end(elf); 2377 out_close: 2378 close(fd); 2379 return ret; 2380 } 2381 2382 /** 2383 * cleanup_sdt_note_list : free the sdt notes' list 2384 * @sdt_notes: sdt notes' list 2385 * 2386 * Free up the SDT notes in @sdt_notes. 2387 * Returns the number of SDT notes free'd. 2388 */ 2389 int cleanup_sdt_note_list(struct list_head *sdt_notes) 2390 { 2391 struct sdt_note *tmp, *pos; 2392 int nr_free = 0; 2393 2394 list_for_each_entry_safe(pos, tmp, sdt_notes, note_list) { 2395 list_del_init(&pos->note_list); 2396 zfree(&pos->name); 2397 zfree(&pos->provider); 2398 free(pos); 2399 nr_free++; 2400 } 2401 return nr_free; 2402 } 2403 2404 /** 2405 * sdt_notes__get_count: Counts the number of sdt events 2406 * @start: list_head to sdt_notes list 2407 * 2408 * Returns the number of SDT notes in a list 2409 */ 2410 int sdt_notes__get_count(struct list_head *start) 2411 { 2412 struct sdt_note *sdt_ptr; 2413 int count = 0; 2414 2415 list_for_each_entry(sdt_ptr, start, note_list) 2416 count++; 2417 return count; 2418 } 2419 #endif 2420 2421 void symbol__elf_init(void) 2422 { 2423 elf_version(EV_CURRENT); 2424 } 2425