1 // SPDX-License-Identifier: GPL-2.0 2 #include <fcntl.h> 3 #include <stdio.h> 4 #include <errno.h> 5 #include <stdlib.h> 6 #include <string.h> 7 #include <unistd.h> 8 #include <inttypes.h> 9 10 #include "dso.h" 11 #include "map.h" 12 #include "maps.h" 13 #include "symbol.h" 14 #include "symsrc.h" 15 #include "demangle-cxx.h" 16 #include "demangle-ocaml.h" 17 #include "demangle-java.h" 18 #include "demangle-rust.h" 19 #include "machine.h" 20 #include "vdso.h" 21 #include "debug.h" 22 #include "util/copyfile.h" 23 #include <linux/ctype.h> 24 #include <linux/kernel.h> 25 #include <linux/zalloc.h> 26 #include <symbol/kallsyms.h> 27 #include <internal/lib.h> 28 29 #ifdef HAVE_LIBBFD_SUPPORT 30 #define PACKAGE 'perf' 31 #include <bfd.h> 32 #endif 33 34 #ifndef EM_AARCH64 35 #define EM_AARCH64 183 /* ARM 64 bit */ 36 #endif 37 38 #ifndef ELF32_ST_VISIBILITY 39 #define ELF32_ST_VISIBILITY(o) ((o) & 0x03) 40 #endif 41 42 /* For ELF64 the definitions are the same. */ 43 #ifndef ELF64_ST_VISIBILITY 44 #define ELF64_ST_VISIBILITY(o) ELF32_ST_VISIBILITY (o) 45 #endif 46 47 /* How to extract information held in the st_other field. */ 48 #ifndef GELF_ST_VISIBILITY 49 #define GELF_ST_VISIBILITY(val) ELF64_ST_VISIBILITY (val) 50 #endif 51 52 typedef Elf64_Nhdr GElf_Nhdr; 53 54 55 #ifndef HAVE_ELF_GETPHDRNUM_SUPPORT 56 static int elf_getphdrnum(Elf *elf, size_t *dst) 57 { 58 GElf_Ehdr gehdr; 59 GElf_Ehdr *ehdr; 60 61 ehdr = gelf_getehdr(elf, &gehdr); 62 if (!ehdr) 63 return -1; 64 65 *dst = ehdr->e_phnum; 66 67 return 0; 68 } 69 #endif 70 71 #ifndef HAVE_ELF_GETSHDRSTRNDX_SUPPORT 72 static int elf_getshdrstrndx(Elf *elf __maybe_unused, size_t *dst __maybe_unused) 73 { 74 pr_err("%s: update your libelf to > 0.140, this one lacks elf_getshdrstrndx().\n", __func__); 75 return -1; 76 } 77 #endif 78 79 #ifndef NT_GNU_BUILD_ID 80 #define NT_GNU_BUILD_ID 3 81 #endif 82 83 /** 84 * elf_symtab__for_each_symbol - iterate thru all the symbols 85 * 86 * @syms: struct elf_symtab instance to iterate 87 * @idx: uint32_t idx 88 * @sym: GElf_Sym iterator 89 */ 90 #define elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) \ 91 for (idx = 0, gelf_getsym(syms, idx, &sym);\ 92 idx < nr_syms; \ 93 idx++, gelf_getsym(syms, idx, &sym)) 94 95 static inline uint8_t elf_sym__type(const GElf_Sym *sym) 96 { 97 return GELF_ST_TYPE(sym->st_info); 98 } 99 100 static inline uint8_t elf_sym__visibility(const GElf_Sym *sym) 101 { 102 return GELF_ST_VISIBILITY(sym->st_other); 103 } 104 105 #ifndef STT_GNU_IFUNC 106 #define STT_GNU_IFUNC 10 107 #endif 108 109 static inline int elf_sym__is_function(const GElf_Sym *sym) 110 { 111 return (elf_sym__type(sym) == STT_FUNC || 112 elf_sym__type(sym) == STT_GNU_IFUNC) && 113 sym->st_name != 0 && 114 sym->st_shndx != SHN_UNDEF; 115 } 116 117 static inline bool elf_sym__is_object(const GElf_Sym *sym) 118 { 119 return elf_sym__type(sym) == STT_OBJECT && 120 sym->st_name != 0 && 121 sym->st_shndx != SHN_UNDEF; 122 } 123 124 static inline int elf_sym__is_label(const GElf_Sym *sym) 125 { 126 return elf_sym__type(sym) == STT_NOTYPE && 127 sym->st_name != 0 && 128 sym->st_shndx != SHN_UNDEF && 129 sym->st_shndx != SHN_ABS && 130 elf_sym__visibility(sym) != STV_HIDDEN && 131 elf_sym__visibility(sym) != STV_INTERNAL; 132 } 133 134 static bool elf_sym__filter(GElf_Sym *sym) 135 { 136 return elf_sym__is_function(sym) || elf_sym__is_object(sym); 137 } 138 139 static inline const char *elf_sym__name(const GElf_Sym *sym, 140 const Elf_Data *symstrs) 141 { 142 return symstrs->d_buf + sym->st_name; 143 } 144 145 static inline const char *elf_sec__name(const GElf_Shdr *shdr, 146 const Elf_Data *secstrs) 147 { 148 return secstrs->d_buf + shdr->sh_name; 149 } 150 151 static inline int elf_sec__is_text(const GElf_Shdr *shdr, 152 const Elf_Data *secstrs) 153 { 154 return strstr(elf_sec__name(shdr, secstrs), "text") != NULL; 155 } 156 157 static inline bool elf_sec__is_data(const GElf_Shdr *shdr, 158 const Elf_Data *secstrs) 159 { 160 return strstr(elf_sec__name(shdr, secstrs), "data") != NULL; 161 } 162 163 static bool elf_sec__filter(GElf_Shdr *shdr, Elf_Data *secstrs) 164 { 165 return elf_sec__is_text(shdr, secstrs) || 166 elf_sec__is_data(shdr, secstrs); 167 } 168 169 static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr) 170 { 171 Elf_Scn *sec = NULL; 172 GElf_Shdr shdr; 173 size_t cnt = 1; 174 175 while ((sec = elf_nextscn(elf, sec)) != NULL) { 176 gelf_getshdr(sec, &shdr); 177 178 if ((addr >= shdr.sh_addr) && 179 (addr < (shdr.sh_addr + shdr.sh_size))) 180 return cnt; 181 182 ++cnt; 183 } 184 185 return -1; 186 } 187 188 Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep, 189 GElf_Shdr *shp, const char *name, size_t *idx) 190 { 191 Elf_Scn *sec = NULL; 192 size_t cnt = 1; 193 194 /* ELF is corrupted/truncated, avoid calling elf_strptr. */ 195 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) 196 return NULL; 197 198 while ((sec = elf_nextscn(elf, sec)) != NULL) { 199 char *str; 200 201 gelf_getshdr(sec, shp); 202 str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name); 203 if (str && !strcmp(name, str)) { 204 if (idx) 205 *idx = cnt; 206 return sec; 207 } 208 ++cnt; 209 } 210 211 return NULL; 212 } 213 214 bool filename__has_section(const char *filename, const char *sec) 215 { 216 int fd; 217 Elf *elf; 218 GElf_Ehdr ehdr; 219 GElf_Shdr shdr; 220 bool found = false; 221 222 fd = open(filename, O_RDONLY); 223 if (fd < 0) 224 return false; 225 226 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); 227 if (elf == NULL) 228 goto out; 229 230 if (gelf_getehdr(elf, &ehdr) == NULL) 231 goto elf_out; 232 233 found = !!elf_section_by_name(elf, &ehdr, &shdr, sec, NULL); 234 235 elf_out: 236 elf_end(elf); 237 out: 238 close(fd); 239 return found; 240 } 241 242 static int elf_read_program_header(Elf *elf, u64 vaddr, GElf_Phdr *phdr) 243 { 244 size_t i, phdrnum; 245 u64 sz; 246 247 if (elf_getphdrnum(elf, &phdrnum)) 248 return -1; 249 250 for (i = 0; i < phdrnum; i++) { 251 if (gelf_getphdr(elf, i, phdr) == NULL) 252 return -1; 253 254 if (phdr->p_type != PT_LOAD) 255 continue; 256 257 sz = max(phdr->p_memsz, phdr->p_filesz); 258 if (!sz) 259 continue; 260 261 if (vaddr >= phdr->p_vaddr && (vaddr < phdr->p_vaddr + sz)) 262 return 0; 263 } 264 265 /* Not found any valid program header */ 266 return -1; 267 } 268 269 static bool want_demangle(bool is_kernel_sym) 270 { 271 return is_kernel_sym ? symbol_conf.demangle_kernel : symbol_conf.demangle; 272 } 273 274 static char *demangle_sym(struct dso *dso, int kmodule, const char *elf_name) 275 { 276 char *demangled = NULL; 277 278 /* 279 * We need to figure out if the object was created from C++ sources 280 * DWARF DW_compile_unit has this, but we don't always have access 281 * to it... 282 */ 283 if (!want_demangle(dso->kernel || kmodule)) 284 return demangled; 285 286 demangled = cxx_demangle_sym(elf_name, verbose > 0, verbose > 0); 287 if (demangled == NULL) { 288 demangled = ocaml_demangle_sym(elf_name); 289 if (demangled == NULL) { 290 demangled = java_demangle_sym(elf_name, JAVA_DEMANGLE_NORET); 291 } 292 } 293 else if (rust_is_mangled(demangled)) 294 /* 295 * Input to Rust demangling is the BFD-demangled 296 * name which it Rust-demangles in place. 297 */ 298 rust_demangle_sym(demangled); 299 300 return demangled; 301 } 302 303 struct rel_info { 304 u32 nr_entries; 305 u32 *sorted; 306 bool is_rela; 307 Elf_Data *reldata; 308 GElf_Rela rela; 309 GElf_Rel rel; 310 }; 311 312 static u32 get_rel_symidx(struct rel_info *ri, u32 idx) 313 { 314 idx = ri->sorted ? ri->sorted[idx] : idx; 315 if (ri->is_rela) { 316 gelf_getrela(ri->reldata, idx, &ri->rela); 317 return GELF_R_SYM(ri->rela.r_info); 318 } 319 gelf_getrel(ri->reldata, idx, &ri->rel); 320 return GELF_R_SYM(ri->rel.r_info); 321 } 322 323 static u64 get_rel_offset(struct rel_info *ri, u32 x) 324 { 325 if (ri->is_rela) { 326 GElf_Rela rela; 327 328 gelf_getrela(ri->reldata, x, &rela); 329 return rela.r_offset; 330 } else { 331 GElf_Rel rel; 332 333 gelf_getrel(ri->reldata, x, &rel); 334 return rel.r_offset; 335 } 336 } 337 338 static int rel_cmp(const void *a, const void *b, void *r) 339 { 340 struct rel_info *ri = r; 341 u64 a_offset = get_rel_offset(ri, *(const u32 *)a); 342 u64 b_offset = get_rel_offset(ri, *(const u32 *)b); 343 344 return a_offset < b_offset ? -1 : (a_offset > b_offset ? 1 : 0); 345 } 346 347 static int sort_rel(struct rel_info *ri) 348 { 349 size_t sz = sizeof(ri->sorted[0]); 350 u32 i; 351 352 ri->sorted = calloc(ri->nr_entries, sz); 353 if (!ri->sorted) 354 return -1; 355 for (i = 0; i < ri->nr_entries; i++) 356 ri->sorted[i] = i; 357 qsort_r(ri->sorted, ri->nr_entries, sz, rel_cmp, ri); 358 return 0; 359 } 360 361 /* 362 * For x86_64, the GNU linker is putting IFUNC information in the relocation 363 * addend. 364 */ 365 static bool addend_may_be_ifunc(GElf_Ehdr *ehdr, struct rel_info *ri) 366 { 367 return ehdr->e_machine == EM_X86_64 && ri->is_rela && 368 GELF_R_TYPE(ri->rela.r_info) == R_X86_64_IRELATIVE; 369 } 370 371 static bool get_ifunc_name(Elf *elf, struct dso *dso, GElf_Ehdr *ehdr, 372 struct rel_info *ri, char *buf, size_t buf_sz) 373 { 374 u64 addr = ri->rela.r_addend; 375 struct symbol *sym; 376 GElf_Phdr phdr; 377 378 if (!addend_may_be_ifunc(ehdr, ri)) 379 return false; 380 381 if (elf_read_program_header(elf, addr, &phdr)) 382 return false; 383 384 addr -= phdr.p_vaddr - phdr.p_offset; 385 386 sym = dso__find_symbol_nocache(dso, addr); 387 388 /* Expecting the address to be an IFUNC or IFUNC alias */ 389 if (!sym || sym->start != addr || (sym->type != STT_GNU_IFUNC && !sym->ifunc_alias)) 390 return false; 391 392 snprintf(buf, buf_sz, "%s@plt", sym->name); 393 394 return true; 395 } 396 397 static void exit_rel(struct rel_info *ri) 398 { 399 zfree(&ri->sorted); 400 } 401 402 static bool get_plt_sizes(struct dso *dso, GElf_Ehdr *ehdr, GElf_Shdr *shdr_plt, 403 u64 *plt_header_size, u64 *plt_entry_size) 404 { 405 switch (ehdr->e_machine) { 406 case EM_ARM: 407 *plt_header_size = 20; 408 *plt_entry_size = 12; 409 return true; 410 case EM_AARCH64: 411 *plt_header_size = 32; 412 *plt_entry_size = 16; 413 return true; 414 case EM_SPARC: 415 *plt_header_size = 48; 416 *plt_entry_size = 12; 417 return true; 418 case EM_SPARCV9: 419 *plt_header_size = 128; 420 *plt_entry_size = 32; 421 return true; 422 case EM_386: 423 case EM_X86_64: 424 *plt_entry_size = shdr_plt->sh_entsize; 425 /* Size is 8 or 16, if not, assume alignment indicates size */ 426 if (*plt_entry_size != 8 && *plt_entry_size != 16) 427 *plt_entry_size = shdr_plt->sh_addralign == 8 ? 8 : 16; 428 *plt_header_size = *plt_entry_size; 429 break; 430 default: /* FIXME: s390/alpha/mips/parisc/poperpc/sh/xtensa need to be checked */ 431 *plt_header_size = shdr_plt->sh_entsize; 432 *plt_entry_size = shdr_plt->sh_entsize; 433 break; 434 } 435 if (*plt_entry_size) 436 return true; 437 pr_debug("Missing PLT entry size for %s\n", dso->long_name); 438 return false; 439 } 440 441 static bool machine_is_x86(GElf_Half e_machine) 442 { 443 return e_machine == EM_386 || e_machine == EM_X86_64; 444 } 445 446 struct rela_dyn { 447 GElf_Addr offset; 448 u32 sym_idx; 449 }; 450 451 struct rela_dyn_info { 452 struct dso *dso; 453 Elf_Data *plt_got_data; 454 u32 nr_entries; 455 struct rela_dyn *sorted; 456 Elf_Data *dynsym_data; 457 Elf_Data *dynstr_data; 458 Elf_Data *rela_dyn_data; 459 }; 460 461 static void exit_rela_dyn(struct rela_dyn_info *di) 462 { 463 zfree(&di->sorted); 464 } 465 466 static int cmp_offset(const void *a, const void *b) 467 { 468 const struct rela_dyn *va = a; 469 const struct rela_dyn *vb = b; 470 471 return va->offset < vb->offset ? -1 : (va->offset > vb->offset ? 1 : 0); 472 } 473 474 static int sort_rela_dyn(struct rela_dyn_info *di) 475 { 476 u32 i, n; 477 478 di->sorted = calloc(di->nr_entries, sizeof(di->sorted[0])); 479 if (!di->sorted) 480 return -1; 481 482 /* Get data for sorting: the offset and symbol index */ 483 for (i = 0, n = 0; i < di->nr_entries; i++) { 484 GElf_Rela rela; 485 u32 sym_idx; 486 487 gelf_getrela(di->rela_dyn_data, i, &rela); 488 sym_idx = GELF_R_SYM(rela.r_info); 489 if (sym_idx) { 490 di->sorted[n].sym_idx = sym_idx; 491 di->sorted[n].offset = rela.r_offset; 492 n += 1; 493 } 494 } 495 496 /* Sort by offset */ 497 di->nr_entries = n; 498 qsort(di->sorted, n, sizeof(di->sorted[0]), cmp_offset); 499 500 return 0; 501 } 502 503 static void get_rela_dyn_info(Elf *elf, GElf_Ehdr *ehdr, struct rela_dyn_info *di, Elf_Scn *scn) 504 { 505 GElf_Shdr rela_dyn_shdr; 506 GElf_Shdr shdr; 507 508 di->plt_got_data = elf_getdata(scn, NULL); 509 510 scn = elf_section_by_name(elf, ehdr, &rela_dyn_shdr, ".rela.dyn", NULL); 511 if (!scn || !rela_dyn_shdr.sh_link || !rela_dyn_shdr.sh_entsize) 512 return; 513 514 di->nr_entries = rela_dyn_shdr.sh_size / rela_dyn_shdr.sh_entsize; 515 di->rela_dyn_data = elf_getdata(scn, NULL); 516 517 scn = elf_getscn(elf, rela_dyn_shdr.sh_link); 518 if (!scn || !gelf_getshdr(scn, &shdr) || !shdr.sh_link) 519 return; 520 521 di->dynsym_data = elf_getdata(scn, NULL); 522 di->dynstr_data = elf_getdata(elf_getscn(elf, shdr.sh_link), NULL); 523 524 if (!di->plt_got_data || !di->dynstr_data || !di->dynsym_data || !di->rela_dyn_data) 525 return; 526 527 /* Sort into offset order */ 528 sort_rela_dyn(di); 529 } 530 531 /* Get instruction displacement from a plt entry for x86_64 */ 532 static u32 get_x86_64_plt_disp(const u8 *p) 533 { 534 u8 endbr64[] = {0xf3, 0x0f, 0x1e, 0xfa}; 535 int n = 0; 536 537 /* Skip endbr64 */ 538 if (!memcmp(p, endbr64, sizeof(endbr64))) 539 n += sizeof(endbr64); 540 /* Skip bnd prefix */ 541 if (p[n] == 0xf2) 542 n += 1; 543 /* jmp with 4-byte displacement */ 544 if (p[n] == 0xff && p[n + 1] == 0x25) { 545 u32 disp; 546 547 n += 2; 548 /* Also add offset from start of entry to end of instruction */ 549 memcpy(&disp, p + n, sizeof(disp)); 550 return n + 4 + le32toh(disp); 551 } 552 return 0; 553 } 554 555 static bool get_plt_got_name(GElf_Shdr *shdr, size_t i, 556 struct rela_dyn_info *di, 557 char *buf, size_t buf_sz) 558 { 559 struct rela_dyn vi, *vr; 560 const char *sym_name; 561 char *demangled; 562 GElf_Sym sym; 563 bool result; 564 u32 disp; 565 566 if (!di->sorted) 567 return false; 568 569 disp = get_x86_64_plt_disp(di->plt_got_data->d_buf + i); 570 if (!disp) 571 return false; 572 573 /* Compute target offset of the .plt.got entry */ 574 vi.offset = shdr->sh_offset + di->plt_got_data->d_off + i + disp; 575 576 /* Find that offset in .rela.dyn (sorted by offset) */ 577 vr = bsearch(&vi, di->sorted, di->nr_entries, sizeof(di->sorted[0]), cmp_offset); 578 if (!vr) 579 return false; 580 581 /* Get the associated symbol */ 582 gelf_getsym(di->dynsym_data, vr->sym_idx, &sym); 583 sym_name = elf_sym__name(&sym, di->dynstr_data); 584 demangled = demangle_sym(di->dso, 0, sym_name); 585 if (demangled != NULL) 586 sym_name = demangled; 587 588 snprintf(buf, buf_sz, "%s@plt", sym_name); 589 590 result = *sym_name; 591 592 free(demangled); 593 594 return result; 595 } 596 597 static int dso__synthesize_plt_got_symbols(struct dso *dso, Elf *elf, 598 GElf_Ehdr *ehdr, 599 char *buf, size_t buf_sz) 600 { 601 struct rela_dyn_info di = { .dso = dso }; 602 struct symbol *sym; 603 GElf_Shdr shdr; 604 Elf_Scn *scn; 605 int err = -1; 606 size_t i; 607 608 scn = elf_section_by_name(elf, ehdr, &shdr, ".plt.got", NULL); 609 if (!scn || !shdr.sh_entsize) 610 return 0; 611 612 if (ehdr->e_machine == EM_X86_64) 613 get_rela_dyn_info(elf, ehdr, &di, scn); 614 615 for (i = 0; i < shdr.sh_size; i += shdr.sh_entsize) { 616 if (!get_plt_got_name(&shdr, i, &di, buf, buf_sz)) 617 snprintf(buf, buf_sz, "offset_%#" PRIx64 "@plt", (u64)shdr.sh_offset + i); 618 sym = symbol__new(shdr.sh_offset + i, shdr.sh_entsize, STB_GLOBAL, STT_FUNC, buf); 619 if (!sym) 620 goto out; 621 symbols__insert(&dso->symbols, sym); 622 } 623 err = 0; 624 out: 625 exit_rela_dyn(&di); 626 return err; 627 } 628 629 /* 630 * We need to check if we have a .dynsym, so that we can handle the 631 * .plt, synthesizing its symbols, that aren't on the symtabs (be it 632 * .dynsym or .symtab). 633 * And always look at the original dso, not at debuginfo packages, that 634 * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS). 635 */ 636 int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss) 637 { 638 uint32_t idx; 639 GElf_Sym sym; 640 u64 plt_offset, plt_header_size, plt_entry_size; 641 GElf_Shdr shdr_plt, plt_sec_shdr; 642 struct symbol *f, *plt_sym; 643 GElf_Shdr shdr_rel_plt, shdr_dynsym; 644 Elf_Data *syms, *symstrs; 645 Elf_Scn *scn_plt_rel, *scn_symstrs, *scn_dynsym; 646 GElf_Ehdr ehdr; 647 char sympltname[1024]; 648 Elf *elf; 649 int nr = 0, err = -1; 650 struct rel_info ri = { .is_rela = false }; 651 bool lazy_plt; 652 653 elf = ss->elf; 654 ehdr = ss->ehdr; 655 656 if (!elf_section_by_name(elf, &ehdr, &shdr_plt, ".plt", NULL)) 657 return 0; 658 659 /* 660 * A symbol from a previous section (e.g. .init) can have been expanded 661 * by symbols__fixup_end() to overlap .plt. Truncate it before adding 662 * a symbol for .plt header. 663 */ 664 f = dso__find_symbol_nocache(dso, shdr_plt.sh_offset); 665 if (f && f->start < shdr_plt.sh_offset && f->end > shdr_plt.sh_offset) 666 f->end = shdr_plt.sh_offset; 667 668 if (!get_plt_sizes(dso, &ehdr, &shdr_plt, &plt_header_size, &plt_entry_size)) 669 return 0; 670 671 /* Add a symbol for .plt header */ 672 plt_sym = symbol__new(shdr_plt.sh_offset, plt_header_size, STB_GLOBAL, STT_FUNC, ".plt"); 673 if (!plt_sym) 674 goto out_elf_end; 675 symbols__insert(&dso->symbols, plt_sym); 676 677 /* Only x86 has .plt.got */ 678 if (machine_is_x86(ehdr.e_machine) && 679 dso__synthesize_plt_got_symbols(dso, elf, &ehdr, sympltname, sizeof(sympltname))) 680 goto out_elf_end; 681 682 /* Only x86 has .plt.sec */ 683 if (machine_is_x86(ehdr.e_machine) && 684 elf_section_by_name(elf, &ehdr, &plt_sec_shdr, ".plt.sec", NULL)) { 685 if (!get_plt_sizes(dso, &ehdr, &plt_sec_shdr, &plt_header_size, &plt_entry_size)) 686 return 0; 687 /* Extend .plt symbol to entire .plt */ 688 plt_sym->end = plt_sym->start + shdr_plt.sh_size; 689 /* Use .plt.sec offset */ 690 plt_offset = plt_sec_shdr.sh_offset; 691 lazy_plt = false; 692 } else { 693 plt_offset = shdr_plt.sh_offset; 694 lazy_plt = true; 695 } 696 697 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt, 698 ".rela.plt", NULL); 699 if (scn_plt_rel == NULL) { 700 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt, 701 ".rel.plt", NULL); 702 if (scn_plt_rel == NULL) 703 return 0; 704 } 705 706 if (shdr_rel_plt.sh_type != SHT_RELA && 707 shdr_rel_plt.sh_type != SHT_REL) 708 return 0; 709 710 if (!shdr_rel_plt.sh_link) 711 return 0; 712 713 if (shdr_rel_plt.sh_link == ss->dynsym_idx) { 714 scn_dynsym = ss->dynsym; 715 shdr_dynsym = ss->dynshdr; 716 } else if (shdr_rel_plt.sh_link == ss->symtab_idx) { 717 /* 718 * A static executable can have a .plt due to IFUNCs, in which 719 * case .symtab is used not .dynsym. 720 */ 721 scn_dynsym = ss->symtab; 722 shdr_dynsym = ss->symshdr; 723 } else { 724 goto out_elf_end; 725 } 726 727 if (!scn_dynsym) 728 return 0; 729 730 /* 731 * Fetch the relocation section to find the idxes to the GOT 732 * and the symbols in the .dynsym they refer to. 733 */ 734 ri.reldata = elf_getdata(scn_plt_rel, NULL); 735 if (!ri.reldata) 736 goto out_elf_end; 737 738 syms = elf_getdata(scn_dynsym, NULL); 739 if (syms == NULL) 740 goto out_elf_end; 741 742 scn_symstrs = elf_getscn(elf, shdr_dynsym.sh_link); 743 if (scn_symstrs == NULL) 744 goto out_elf_end; 745 746 symstrs = elf_getdata(scn_symstrs, NULL); 747 if (symstrs == NULL) 748 goto out_elf_end; 749 750 if (symstrs->d_size == 0) 751 goto out_elf_end; 752 753 ri.nr_entries = shdr_rel_plt.sh_size / shdr_rel_plt.sh_entsize; 754 755 ri.is_rela = shdr_rel_plt.sh_type == SHT_RELA; 756 757 if (lazy_plt) { 758 /* 759 * Assume a .plt with the same number of entries as the number 760 * of relocation entries is not lazy and does not have a header. 761 */ 762 if (ri.nr_entries * plt_entry_size == shdr_plt.sh_size) 763 dso__delete_symbol(dso, plt_sym); 764 else 765 plt_offset += plt_header_size; 766 } 767 768 /* 769 * x86 doesn't insert IFUNC relocations in .plt order, so sort to get 770 * back in order. 771 */ 772 if (machine_is_x86(ehdr.e_machine) && sort_rel(&ri)) 773 goto out_elf_end; 774 775 for (idx = 0; idx < ri.nr_entries; idx++) { 776 const char *elf_name = NULL; 777 char *demangled = NULL; 778 779 gelf_getsym(syms, get_rel_symidx(&ri, idx), &sym); 780 781 elf_name = elf_sym__name(&sym, symstrs); 782 demangled = demangle_sym(dso, 0, elf_name); 783 if (demangled) 784 elf_name = demangled; 785 if (*elf_name) 786 snprintf(sympltname, sizeof(sympltname), "%s@plt", elf_name); 787 else if (!get_ifunc_name(elf, dso, &ehdr, &ri, sympltname, sizeof(sympltname))) 788 snprintf(sympltname, sizeof(sympltname), 789 "offset_%#" PRIx64 "@plt", plt_offset); 790 free(demangled); 791 792 f = symbol__new(plt_offset, plt_entry_size, STB_GLOBAL, STT_FUNC, sympltname); 793 if (!f) 794 goto out_elf_end; 795 796 plt_offset += plt_entry_size; 797 symbols__insert(&dso->symbols, f); 798 ++nr; 799 } 800 801 err = 0; 802 out_elf_end: 803 exit_rel(&ri); 804 if (err == 0) 805 return nr; 806 pr_debug("%s: problems reading %s PLT info.\n", 807 __func__, dso->long_name); 808 return 0; 809 } 810 811 char *dso__demangle_sym(struct dso *dso, int kmodule, const char *elf_name) 812 { 813 return demangle_sym(dso, kmodule, elf_name); 814 } 815 816 /* 817 * Align offset to 4 bytes as needed for note name and descriptor data. 818 */ 819 #define NOTE_ALIGN(n) (((n) + 3) & -4U) 820 821 static int elf_read_build_id(Elf *elf, void *bf, size_t size) 822 { 823 int err = -1; 824 GElf_Ehdr ehdr; 825 GElf_Shdr shdr; 826 Elf_Data *data; 827 Elf_Scn *sec; 828 Elf_Kind ek; 829 void *ptr; 830 831 if (size < BUILD_ID_SIZE) 832 goto out; 833 834 ek = elf_kind(elf); 835 if (ek != ELF_K_ELF) 836 goto out; 837 838 if (gelf_getehdr(elf, &ehdr) == NULL) { 839 pr_err("%s: cannot get elf header.\n", __func__); 840 goto out; 841 } 842 843 /* 844 * Check following sections for notes: 845 * '.note.gnu.build-id' 846 * '.notes' 847 * '.note' (VDSO specific) 848 */ 849 do { 850 sec = elf_section_by_name(elf, &ehdr, &shdr, 851 ".note.gnu.build-id", NULL); 852 if (sec) 853 break; 854 855 sec = elf_section_by_name(elf, &ehdr, &shdr, 856 ".notes", NULL); 857 if (sec) 858 break; 859 860 sec = elf_section_by_name(elf, &ehdr, &shdr, 861 ".note", NULL); 862 if (sec) 863 break; 864 865 return err; 866 867 } while (0); 868 869 data = elf_getdata(sec, NULL); 870 if (data == NULL) 871 goto out; 872 873 ptr = data->d_buf; 874 while (ptr < (data->d_buf + data->d_size)) { 875 GElf_Nhdr *nhdr = ptr; 876 size_t namesz = NOTE_ALIGN(nhdr->n_namesz), 877 descsz = NOTE_ALIGN(nhdr->n_descsz); 878 const char *name; 879 880 ptr += sizeof(*nhdr); 881 name = ptr; 882 ptr += namesz; 883 if (nhdr->n_type == NT_GNU_BUILD_ID && 884 nhdr->n_namesz == sizeof("GNU")) { 885 if (memcmp(name, "GNU", sizeof("GNU")) == 0) { 886 size_t sz = min(size, descsz); 887 memcpy(bf, ptr, sz); 888 memset(bf + sz, 0, size - sz); 889 err = sz; 890 break; 891 } 892 } 893 ptr += descsz; 894 } 895 896 out: 897 return err; 898 } 899 900 #ifdef HAVE_LIBBFD_BUILDID_SUPPORT 901 902 static int read_build_id(const char *filename, struct build_id *bid) 903 { 904 size_t size = sizeof(bid->data); 905 int err = -1; 906 bfd *abfd; 907 908 abfd = bfd_openr(filename, NULL); 909 if (!abfd) 910 return -1; 911 912 if (!bfd_check_format(abfd, bfd_object)) { 913 pr_debug2("%s: cannot read %s bfd file.\n", __func__, filename); 914 goto out_close; 915 } 916 917 if (!abfd->build_id || abfd->build_id->size > size) 918 goto out_close; 919 920 memcpy(bid->data, abfd->build_id->data, abfd->build_id->size); 921 memset(bid->data + abfd->build_id->size, 0, size - abfd->build_id->size); 922 err = bid->size = abfd->build_id->size; 923 924 out_close: 925 bfd_close(abfd); 926 return err; 927 } 928 929 #else // HAVE_LIBBFD_BUILDID_SUPPORT 930 931 static int read_build_id(const char *filename, struct build_id *bid) 932 { 933 size_t size = sizeof(bid->data); 934 int fd, err = -1; 935 Elf *elf; 936 937 if (size < BUILD_ID_SIZE) 938 goto out; 939 940 fd = open(filename, O_RDONLY); 941 if (fd < 0) 942 goto out; 943 944 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); 945 if (elf == NULL) { 946 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename); 947 goto out_close; 948 } 949 950 err = elf_read_build_id(elf, bid->data, size); 951 if (err > 0) 952 bid->size = err; 953 954 elf_end(elf); 955 out_close: 956 close(fd); 957 out: 958 return err; 959 } 960 961 #endif // HAVE_LIBBFD_BUILDID_SUPPORT 962 963 int filename__read_build_id(const char *filename, struct build_id *bid) 964 { 965 struct kmod_path m = { .name = NULL, }; 966 char path[PATH_MAX]; 967 int err; 968 969 if (!filename) 970 return -EFAULT; 971 972 err = kmod_path__parse(&m, filename); 973 if (err) 974 return -1; 975 976 if (m.comp) { 977 int error = 0, fd; 978 979 fd = filename__decompress(filename, path, sizeof(path), m.comp, &error); 980 if (fd < 0) { 981 pr_debug("Failed to decompress (error %d) %s\n", 982 error, filename); 983 return -1; 984 } 985 close(fd); 986 filename = path; 987 } 988 989 err = read_build_id(filename, bid); 990 991 if (m.comp) 992 unlink(filename); 993 return err; 994 } 995 996 int sysfs__read_build_id(const char *filename, struct build_id *bid) 997 { 998 size_t size = sizeof(bid->data); 999 int fd, err = -1; 1000 1001 fd = open(filename, O_RDONLY); 1002 if (fd < 0) 1003 goto out; 1004 1005 while (1) { 1006 char bf[BUFSIZ]; 1007 GElf_Nhdr nhdr; 1008 size_t namesz, descsz; 1009 1010 if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr)) 1011 break; 1012 1013 namesz = NOTE_ALIGN(nhdr.n_namesz); 1014 descsz = NOTE_ALIGN(nhdr.n_descsz); 1015 if (nhdr.n_type == NT_GNU_BUILD_ID && 1016 nhdr.n_namesz == sizeof("GNU")) { 1017 if (read(fd, bf, namesz) != (ssize_t)namesz) 1018 break; 1019 if (memcmp(bf, "GNU", sizeof("GNU")) == 0) { 1020 size_t sz = min(descsz, size); 1021 if (read(fd, bid->data, sz) == (ssize_t)sz) { 1022 memset(bid->data + sz, 0, size - sz); 1023 bid->size = sz; 1024 err = 0; 1025 break; 1026 } 1027 } else if (read(fd, bf, descsz) != (ssize_t)descsz) 1028 break; 1029 } else { 1030 int n = namesz + descsz; 1031 1032 if (n > (int)sizeof(bf)) { 1033 n = sizeof(bf); 1034 pr_debug("%s: truncating reading of build id in sysfs file %s: n_namesz=%u, n_descsz=%u.\n", 1035 __func__, filename, nhdr.n_namesz, nhdr.n_descsz); 1036 } 1037 if (read(fd, bf, n) != n) 1038 break; 1039 } 1040 } 1041 close(fd); 1042 out: 1043 return err; 1044 } 1045 1046 #ifdef HAVE_LIBBFD_SUPPORT 1047 1048 int filename__read_debuglink(const char *filename, char *debuglink, 1049 size_t size) 1050 { 1051 int err = -1; 1052 asection *section; 1053 bfd *abfd; 1054 1055 abfd = bfd_openr(filename, NULL); 1056 if (!abfd) 1057 return -1; 1058 1059 if (!bfd_check_format(abfd, bfd_object)) { 1060 pr_debug2("%s: cannot read %s bfd file.\n", __func__, filename); 1061 goto out_close; 1062 } 1063 1064 section = bfd_get_section_by_name(abfd, ".gnu_debuglink"); 1065 if (!section) 1066 goto out_close; 1067 1068 if (section->size > size) 1069 goto out_close; 1070 1071 if (!bfd_get_section_contents(abfd, section, debuglink, 0, 1072 section->size)) 1073 goto out_close; 1074 1075 err = 0; 1076 1077 out_close: 1078 bfd_close(abfd); 1079 return err; 1080 } 1081 1082 #else 1083 1084 int filename__read_debuglink(const char *filename, char *debuglink, 1085 size_t size) 1086 { 1087 int fd, err = -1; 1088 Elf *elf; 1089 GElf_Ehdr ehdr; 1090 GElf_Shdr shdr; 1091 Elf_Data *data; 1092 Elf_Scn *sec; 1093 Elf_Kind ek; 1094 1095 fd = open(filename, O_RDONLY); 1096 if (fd < 0) 1097 goto out; 1098 1099 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); 1100 if (elf == NULL) { 1101 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename); 1102 goto out_close; 1103 } 1104 1105 ek = elf_kind(elf); 1106 if (ek != ELF_K_ELF) 1107 goto out_elf_end; 1108 1109 if (gelf_getehdr(elf, &ehdr) == NULL) { 1110 pr_err("%s: cannot get elf header.\n", __func__); 1111 goto out_elf_end; 1112 } 1113 1114 sec = elf_section_by_name(elf, &ehdr, &shdr, 1115 ".gnu_debuglink", NULL); 1116 if (sec == NULL) 1117 goto out_elf_end; 1118 1119 data = elf_getdata(sec, NULL); 1120 if (data == NULL) 1121 goto out_elf_end; 1122 1123 /* the start of this section is a zero-terminated string */ 1124 strncpy(debuglink, data->d_buf, size); 1125 1126 err = 0; 1127 1128 out_elf_end: 1129 elf_end(elf); 1130 out_close: 1131 close(fd); 1132 out: 1133 return err; 1134 } 1135 1136 #endif 1137 1138 static int dso__swap_init(struct dso *dso, unsigned char eidata) 1139 { 1140 static unsigned int const endian = 1; 1141 1142 dso->needs_swap = DSO_SWAP__NO; 1143 1144 switch (eidata) { 1145 case ELFDATA2LSB: 1146 /* We are big endian, DSO is little endian. */ 1147 if (*(unsigned char const *)&endian != 1) 1148 dso->needs_swap = DSO_SWAP__YES; 1149 break; 1150 1151 case ELFDATA2MSB: 1152 /* We are little endian, DSO is big endian. */ 1153 if (*(unsigned char const *)&endian != 0) 1154 dso->needs_swap = DSO_SWAP__YES; 1155 break; 1156 1157 default: 1158 pr_err("unrecognized DSO data encoding %d\n", eidata); 1159 return -EINVAL; 1160 } 1161 1162 return 0; 1163 } 1164 1165 bool symsrc__possibly_runtime(struct symsrc *ss) 1166 { 1167 return ss->dynsym || ss->opdsec; 1168 } 1169 1170 bool symsrc__has_symtab(struct symsrc *ss) 1171 { 1172 return ss->symtab != NULL; 1173 } 1174 1175 void symsrc__destroy(struct symsrc *ss) 1176 { 1177 zfree(&ss->name); 1178 elf_end(ss->elf); 1179 close(ss->fd); 1180 } 1181 1182 bool elf__needs_adjust_symbols(GElf_Ehdr ehdr) 1183 { 1184 /* 1185 * Usually vmlinux is an ELF file with type ET_EXEC for most 1186 * architectures; except Arm64 kernel is linked with option 1187 * '-share', so need to check type ET_DYN. 1188 */ 1189 return ehdr.e_type == ET_EXEC || ehdr.e_type == ET_REL || 1190 ehdr.e_type == ET_DYN; 1191 } 1192 1193 int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name, 1194 enum dso_binary_type type) 1195 { 1196 GElf_Ehdr ehdr; 1197 Elf *elf; 1198 int fd; 1199 1200 if (dso__needs_decompress(dso)) { 1201 fd = dso__decompress_kmodule_fd(dso, name); 1202 if (fd < 0) 1203 return -1; 1204 1205 type = dso->symtab_type; 1206 } else { 1207 fd = open(name, O_RDONLY); 1208 if (fd < 0) { 1209 dso->load_errno = errno; 1210 return -1; 1211 } 1212 } 1213 1214 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); 1215 if (elf == NULL) { 1216 pr_debug("%s: cannot read %s ELF file.\n", __func__, name); 1217 dso->load_errno = DSO_LOAD_ERRNO__INVALID_ELF; 1218 goto out_close; 1219 } 1220 1221 if (gelf_getehdr(elf, &ehdr) == NULL) { 1222 dso->load_errno = DSO_LOAD_ERRNO__INVALID_ELF; 1223 pr_debug("%s: cannot get elf header.\n", __func__); 1224 goto out_elf_end; 1225 } 1226 1227 if (dso__swap_init(dso, ehdr.e_ident[EI_DATA])) { 1228 dso->load_errno = DSO_LOAD_ERRNO__INTERNAL_ERROR; 1229 goto out_elf_end; 1230 } 1231 1232 /* Always reject images with a mismatched build-id: */ 1233 if (dso->has_build_id && !symbol_conf.ignore_vmlinux_buildid) { 1234 u8 build_id[BUILD_ID_SIZE]; 1235 struct build_id bid; 1236 int size; 1237 1238 size = elf_read_build_id(elf, build_id, BUILD_ID_SIZE); 1239 if (size <= 0) { 1240 dso->load_errno = DSO_LOAD_ERRNO__CANNOT_READ_BUILDID; 1241 goto out_elf_end; 1242 } 1243 1244 build_id__init(&bid, build_id, size); 1245 if (!dso__build_id_equal(dso, &bid)) { 1246 pr_debug("%s: build id mismatch for %s.\n", __func__, name); 1247 dso->load_errno = DSO_LOAD_ERRNO__MISMATCHING_BUILDID; 1248 goto out_elf_end; 1249 } 1250 } 1251 1252 ss->is_64_bit = (gelf_getclass(elf) == ELFCLASS64); 1253 1254 ss->symtab_idx = 0; 1255 ss->symtab = elf_section_by_name(elf, &ehdr, &ss->symshdr, ".symtab", 1256 &ss->symtab_idx); 1257 if (ss->symshdr.sh_type != SHT_SYMTAB) 1258 ss->symtab = NULL; 1259 1260 ss->dynsym_idx = 0; 1261 ss->dynsym = elf_section_by_name(elf, &ehdr, &ss->dynshdr, ".dynsym", 1262 &ss->dynsym_idx); 1263 if (ss->dynshdr.sh_type != SHT_DYNSYM) 1264 ss->dynsym = NULL; 1265 1266 ss->opdidx = 0; 1267 ss->opdsec = elf_section_by_name(elf, &ehdr, &ss->opdshdr, ".opd", 1268 &ss->opdidx); 1269 if (ss->opdshdr.sh_type != SHT_PROGBITS) 1270 ss->opdsec = NULL; 1271 1272 if (dso->kernel == DSO_SPACE__USER) 1273 ss->adjust_symbols = true; 1274 else 1275 ss->adjust_symbols = elf__needs_adjust_symbols(ehdr); 1276 1277 ss->name = strdup(name); 1278 if (!ss->name) { 1279 dso->load_errno = errno; 1280 goto out_elf_end; 1281 } 1282 1283 ss->elf = elf; 1284 ss->fd = fd; 1285 ss->ehdr = ehdr; 1286 ss->type = type; 1287 1288 return 0; 1289 1290 out_elf_end: 1291 elf_end(elf); 1292 out_close: 1293 close(fd); 1294 return -1; 1295 } 1296 1297 /** 1298 * ref_reloc_sym_not_found - has kernel relocation symbol been found. 1299 * @kmap: kernel maps and relocation reference symbol 1300 * 1301 * This function returns %true if we are dealing with the kernel maps and the 1302 * relocation reference symbol has not yet been found. Otherwise %false is 1303 * returned. 1304 */ 1305 static bool ref_reloc_sym_not_found(struct kmap *kmap) 1306 { 1307 return kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name && 1308 !kmap->ref_reloc_sym->unrelocated_addr; 1309 } 1310 1311 /** 1312 * ref_reloc - kernel relocation offset. 1313 * @kmap: kernel maps and relocation reference symbol 1314 * 1315 * This function returns the offset of kernel addresses as determined by using 1316 * the relocation reference symbol i.e. if the kernel has not been relocated 1317 * then the return value is zero. 1318 */ 1319 static u64 ref_reloc(struct kmap *kmap) 1320 { 1321 if (kmap && kmap->ref_reloc_sym && 1322 kmap->ref_reloc_sym->unrelocated_addr) 1323 return kmap->ref_reloc_sym->addr - 1324 kmap->ref_reloc_sym->unrelocated_addr; 1325 return 0; 1326 } 1327 1328 void __weak arch__sym_update(struct symbol *s __maybe_unused, 1329 GElf_Sym *sym __maybe_unused) { } 1330 1331 static int dso__process_kernel_symbol(struct dso *dso, struct map *map, 1332 GElf_Sym *sym, GElf_Shdr *shdr, 1333 struct maps *kmaps, struct kmap *kmap, 1334 struct dso **curr_dsop, struct map **curr_mapp, 1335 const char *section_name, 1336 bool adjust_kernel_syms, bool kmodule, bool *remap_kernel) 1337 { 1338 struct dso *curr_dso = *curr_dsop; 1339 struct map *curr_map; 1340 char dso_name[PATH_MAX]; 1341 1342 /* Adjust symbol to map to file offset */ 1343 if (adjust_kernel_syms) 1344 sym->st_value -= shdr->sh_addr - shdr->sh_offset; 1345 1346 if (strcmp(section_name, (curr_dso->short_name + dso->short_name_len)) == 0) 1347 return 0; 1348 1349 if (strcmp(section_name, ".text") == 0) { 1350 /* 1351 * The initial kernel mapping is based on 1352 * kallsyms and identity maps. Overwrite it to 1353 * map to the kernel dso. 1354 */ 1355 if (*remap_kernel && dso->kernel && !kmodule) { 1356 *remap_kernel = false; 1357 map__set_start(map, shdr->sh_addr + ref_reloc(kmap)); 1358 map__set_end(map, map__start(map) + shdr->sh_size); 1359 map__set_pgoff(map, shdr->sh_offset); 1360 map__set_map_ip(map, map__dso_map_ip); 1361 map__set_unmap_ip(map, map__dso_unmap_ip); 1362 /* Ensure maps are correctly ordered */ 1363 if (kmaps) { 1364 int err; 1365 1366 map__get(map); 1367 maps__remove(kmaps, map); 1368 err = maps__insert(kmaps, map); 1369 map__put(map); 1370 if (err) 1371 return err; 1372 } 1373 } 1374 1375 /* 1376 * The initial module mapping is based on 1377 * /proc/modules mapped to offset zero. 1378 * Overwrite it to map to the module dso. 1379 */ 1380 if (*remap_kernel && kmodule) { 1381 *remap_kernel = false; 1382 map__set_pgoff(map, shdr->sh_offset); 1383 } 1384 1385 *curr_mapp = map; 1386 *curr_dsop = dso; 1387 return 0; 1388 } 1389 1390 if (!kmap) 1391 return 0; 1392 1393 snprintf(dso_name, sizeof(dso_name), "%s%s", dso->short_name, section_name); 1394 1395 curr_map = maps__find_by_name(kmaps, dso_name); 1396 if (curr_map == NULL) { 1397 u64 start = sym->st_value; 1398 1399 if (kmodule) 1400 start += map__start(map) + shdr->sh_offset; 1401 1402 curr_dso = dso__new(dso_name); 1403 if (curr_dso == NULL) 1404 return -1; 1405 curr_dso->kernel = dso->kernel; 1406 curr_dso->long_name = dso->long_name; 1407 curr_dso->long_name_len = dso->long_name_len; 1408 curr_map = map__new2(start, curr_dso); 1409 dso__put(curr_dso); 1410 if (curr_map == NULL) 1411 return -1; 1412 1413 if (curr_dso->kernel) 1414 map__kmap(curr_map)->kmaps = kmaps; 1415 1416 if (adjust_kernel_syms) { 1417 map__set_start(curr_map, shdr->sh_addr + ref_reloc(kmap)); 1418 map__set_end(curr_map, map__start(curr_map) + shdr->sh_size); 1419 map__set_pgoff(curr_map, shdr->sh_offset); 1420 } else { 1421 map__set_map_ip(curr_map, identity__map_ip); 1422 map__set_unmap_ip(curr_map, identity__map_ip); 1423 } 1424 curr_dso->symtab_type = dso->symtab_type; 1425 if (maps__insert(kmaps, curr_map)) 1426 return -1; 1427 /* 1428 * Add it before we drop the reference to curr_map, i.e. while 1429 * we still are sure to have a reference to this DSO via 1430 * *curr_map->dso. 1431 */ 1432 dsos__add(&maps__machine(kmaps)->dsos, curr_dso); 1433 /* kmaps already got it */ 1434 map__put(curr_map); 1435 dso__set_loaded(curr_dso); 1436 *curr_mapp = curr_map; 1437 *curr_dsop = curr_dso; 1438 } else 1439 *curr_dsop = map__dso(curr_map); 1440 1441 return 0; 1442 } 1443 1444 static int 1445 dso__load_sym_internal(struct dso *dso, struct map *map, struct symsrc *syms_ss, 1446 struct symsrc *runtime_ss, int kmodule, int dynsym) 1447 { 1448 struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL; 1449 struct maps *kmaps = kmap ? map__kmaps(map) : NULL; 1450 struct map *curr_map = map; 1451 struct dso *curr_dso = dso; 1452 Elf_Data *symstrs, *secstrs, *secstrs_run, *secstrs_sym; 1453 uint32_t nr_syms; 1454 int err = -1; 1455 uint32_t idx; 1456 GElf_Ehdr ehdr; 1457 GElf_Shdr shdr; 1458 GElf_Shdr tshdr; 1459 Elf_Data *syms, *opddata = NULL; 1460 GElf_Sym sym; 1461 Elf_Scn *sec, *sec_strndx; 1462 Elf *elf; 1463 int nr = 0; 1464 bool remap_kernel = false, adjust_kernel_syms = false; 1465 1466 if (kmap && !kmaps) 1467 return -1; 1468 1469 elf = syms_ss->elf; 1470 ehdr = syms_ss->ehdr; 1471 if (dynsym) { 1472 sec = syms_ss->dynsym; 1473 shdr = syms_ss->dynshdr; 1474 } else { 1475 sec = syms_ss->symtab; 1476 shdr = syms_ss->symshdr; 1477 } 1478 1479 if (elf_section_by_name(runtime_ss->elf, &runtime_ss->ehdr, &tshdr, 1480 ".text", NULL)) 1481 dso->text_offset = tshdr.sh_addr - tshdr.sh_offset; 1482 1483 if (runtime_ss->opdsec) 1484 opddata = elf_rawdata(runtime_ss->opdsec, NULL); 1485 1486 syms = elf_getdata(sec, NULL); 1487 if (syms == NULL) 1488 goto out_elf_end; 1489 1490 sec = elf_getscn(elf, shdr.sh_link); 1491 if (sec == NULL) 1492 goto out_elf_end; 1493 1494 symstrs = elf_getdata(sec, NULL); 1495 if (symstrs == NULL) 1496 goto out_elf_end; 1497 1498 sec_strndx = elf_getscn(runtime_ss->elf, runtime_ss->ehdr.e_shstrndx); 1499 if (sec_strndx == NULL) 1500 goto out_elf_end; 1501 1502 secstrs_run = elf_getdata(sec_strndx, NULL); 1503 if (secstrs_run == NULL) 1504 goto out_elf_end; 1505 1506 sec_strndx = elf_getscn(elf, ehdr.e_shstrndx); 1507 if (sec_strndx == NULL) 1508 goto out_elf_end; 1509 1510 secstrs_sym = elf_getdata(sec_strndx, NULL); 1511 if (secstrs_sym == NULL) 1512 goto out_elf_end; 1513 1514 nr_syms = shdr.sh_size / shdr.sh_entsize; 1515 1516 memset(&sym, 0, sizeof(sym)); 1517 1518 /* 1519 * The kernel relocation symbol is needed in advance in order to adjust 1520 * kernel maps correctly. 1521 */ 1522 if (ref_reloc_sym_not_found(kmap)) { 1523 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) { 1524 const char *elf_name = elf_sym__name(&sym, symstrs); 1525 1526 if (strcmp(elf_name, kmap->ref_reloc_sym->name)) 1527 continue; 1528 kmap->ref_reloc_sym->unrelocated_addr = sym.st_value; 1529 map__set_reloc(map, kmap->ref_reloc_sym->addr - kmap->ref_reloc_sym->unrelocated_addr); 1530 break; 1531 } 1532 } 1533 1534 /* 1535 * Handle any relocation of vdso necessary because older kernels 1536 * attempted to prelink vdso to its virtual address. 1537 */ 1538 if (dso__is_vdso(dso)) 1539 map__set_reloc(map, map__start(map) - dso->text_offset); 1540 1541 dso->adjust_symbols = runtime_ss->adjust_symbols || ref_reloc(kmap); 1542 /* 1543 * Initial kernel and module mappings do not map to the dso. 1544 * Flag the fixups. 1545 */ 1546 if (dso->kernel) { 1547 remap_kernel = true; 1548 adjust_kernel_syms = dso->adjust_symbols; 1549 } 1550 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) { 1551 struct symbol *f; 1552 const char *elf_name = elf_sym__name(&sym, symstrs); 1553 char *demangled = NULL; 1554 int is_label = elf_sym__is_label(&sym); 1555 const char *section_name; 1556 bool used_opd = false; 1557 1558 if (!is_label && !elf_sym__filter(&sym)) 1559 continue; 1560 1561 /* Reject ARM ELF "mapping symbols": these aren't unique and 1562 * don't identify functions, so will confuse the profile 1563 * output: */ 1564 if (ehdr.e_machine == EM_ARM || ehdr.e_machine == EM_AARCH64) { 1565 if (elf_name[0] == '$' && strchr("adtx", elf_name[1]) 1566 && (elf_name[2] == '\0' || elf_name[2] == '.')) 1567 continue; 1568 } 1569 1570 if (runtime_ss->opdsec && sym.st_shndx == runtime_ss->opdidx) { 1571 u32 offset = sym.st_value - syms_ss->opdshdr.sh_addr; 1572 u64 *opd = opddata->d_buf + offset; 1573 sym.st_value = DSO__SWAP(dso, u64, *opd); 1574 sym.st_shndx = elf_addr_to_index(runtime_ss->elf, 1575 sym.st_value); 1576 used_opd = true; 1577 } 1578 1579 /* 1580 * When loading symbols in a data mapping, ABS symbols (which 1581 * has a value of SHN_ABS in its st_shndx) failed at 1582 * elf_getscn(). And it marks the loading as a failure so 1583 * already loaded symbols cannot be fixed up. 1584 * 1585 * I'm not sure what should be done. Just ignore them for now. 1586 * - Namhyung Kim 1587 */ 1588 if (sym.st_shndx == SHN_ABS) 1589 continue; 1590 1591 sec = elf_getscn(syms_ss->elf, sym.st_shndx); 1592 if (!sec) 1593 goto out_elf_end; 1594 1595 gelf_getshdr(sec, &shdr); 1596 1597 /* 1598 * If the attribute bit SHF_ALLOC is not set, the section 1599 * doesn't occupy memory during process execution. 1600 * E.g. ".gnu.warning.*" section is used by linker to generate 1601 * warnings when calling deprecated functions, the symbols in 1602 * the section aren't loaded to memory during process execution, 1603 * so skip them. 1604 */ 1605 if (!(shdr.sh_flags & SHF_ALLOC)) 1606 continue; 1607 1608 secstrs = secstrs_sym; 1609 1610 /* 1611 * We have to fallback to runtime when syms' section header has 1612 * NOBITS set. NOBITS results in file offset (sh_offset) not 1613 * being incremented. So sh_offset used below has different 1614 * values for syms (invalid) and runtime (valid). 1615 */ 1616 if (shdr.sh_type == SHT_NOBITS) { 1617 sec = elf_getscn(runtime_ss->elf, sym.st_shndx); 1618 if (!sec) 1619 goto out_elf_end; 1620 1621 gelf_getshdr(sec, &shdr); 1622 secstrs = secstrs_run; 1623 } 1624 1625 if (is_label && !elf_sec__filter(&shdr, secstrs)) 1626 continue; 1627 1628 section_name = elf_sec__name(&shdr, secstrs); 1629 1630 /* On ARM, symbols for thumb functions have 1 added to 1631 * the symbol address as a flag - remove it */ 1632 if ((ehdr.e_machine == EM_ARM) && 1633 (GELF_ST_TYPE(sym.st_info) == STT_FUNC) && 1634 (sym.st_value & 1)) 1635 --sym.st_value; 1636 1637 if (dso->kernel) { 1638 if (dso__process_kernel_symbol(dso, map, &sym, &shdr, kmaps, kmap, &curr_dso, &curr_map, 1639 section_name, adjust_kernel_syms, kmodule, &remap_kernel)) 1640 goto out_elf_end; 1641 } else if ((used_opd && runtime_ss->adjust_symbols) || 1642 (!used_opd && syms_ss->adjust_symbols)) { 1643 GElf_Phdr phdr; 1644 1645 if (elf_read_program_header(runtime_ss->elf, 1646 (u64)sym.st_value, &phdr)) { 1647 pr_debug4("%s: failed to find program header for " 1648 "symbol: %s st_value: %#" PRIx64 "\n", 1649 __func__, elf_name, (u64)sym.st_value); 1650 pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " " 1651 "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n", 1652 __func__, (u64)sym.st_value, (u64)shdr.sh_addr, 1653 (u64)shdr.sh_offset); 1654 /* 1655 * Fail to find program header, let's rollback 1656 * to use shdr.sh_addr and shdr.sh_offset to 1657 * calibrate symbol's file address, though this 1658 * is not necessary for normal C ELF file, we 1659 * still need to handle java JIT symbols in this 1660 * case. 1661 */ 1662 sym.st_value -= shdr.sh_addr - shdr.sh_offset; 1663 } else { 1664 pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " " 1665 "p_vaddr: %#" PRIx64 " p_offset: %#" PRIx64 "\n", 1666 __func__, (u64)sym.st_value, (u64)phdr.p_vaddr, 1667 (u64)phdr.p_offset); 1668 sym.st_value -= phdr.p_vaddr - phdr.p_offset; 1669 } 1670 } 1671 1672 demangled = demangle_sym(dso, kmodule, elf_name); 1673 if (demangled != NULL) 1674 elf_name = demangled; 1675 1676 f = symbol__new(sym.st_value, sym.st_size, 1677 GELF_ST_BIND(sym.st_info), 1678 GELF_ST_TYPE(sym.st_info), elf_name); 1679 free(demangled); 1680 if (!f) 1681 goto out_elf_end; 1682 1683 arch__sym_update(f, &sym); 1684 1685 __symbols__insert(&curr_dso->symbols, f, dso->kernel); 1686 nr++; 1687 } 1688 1689 /* 1690 * For misannotated, zeroed, ASM function sizes. 1691 */ 1692 if (nr > 0) { 1693 symbols__fixup_end(&dso->symbols, false); 1694 symbols__fixup_duplicate(&dso->symbols); 1695 if (kmap) { 1696 /* 1697 * We need to fixup this here too because we create new 1698 * maps here, for things like vsyscall sections. 1699 */ 1700 maps__fixup_end(kmaps); 1701 } 1702 } 1703 err = nr; 1704 out_elf_end: 1705 return err; 1706 } 1707 1708 int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss, 1709 struct symsrc *runtime_ss, int kmodule) 1710 { 1711 int nr = 0; 1712 int err = -1; 1713 1714 dso->symtab_type = syms_ss->type; 1715 dso->is_64_bit = syms_ss->is_64_bit; 1716 dso->rel = syms_ss->ehdr.e_type == ET_REL; 1717 1718 /* 1719 * Modules may already have symbols from kallsyms, but those symbols 1720 * have the wrong values for the dso maps, so remove them. 1721 */ 1722 if (kmodule && syms_ss->symtab) 1723 symbols__delete(&dso->symbols); 1724 1725 if (!syms_ss->symtab) { 1726 /* 1727 * If the vmlinux is stripped, fail so we will fall back 1728 * to using kallsyms. The vmlinux runtime symbols aren't 1729 * of much use. 1730 */ 1731 if (dso->kernel) 1732 return err; 1733 } else { 1734 err = dso__load_sym_internal(dso, map, syms_ss, runtime_ss, 1735 kmodule, 0); 1736 if (err < 0) 1737 return err; 1738 nr = err; 1739 } 1740 1741 if (syms_ss->dynsym) { 1742 err = dso__load_sym_internal(dso, map, syms_ss, runtime_ss, 1743 kmodule, 1); 1744 if (err < 0) 1745 return err; 1746 err += nr; 1747 } 1748 1749 return err; 1750 } 1751 1752 static int elf_read_maps(Elf *elf, bool exe, mapfn_t mapfn, void *data) 1753 { 1754 GElf_Phdr phdr; 1755 size_t i, phdrnum; 1756 int err; 1757 u64 sz; 1758 1759 if (elf_getphdrnum(elf, &phdrnum)) 1760 return -1; 1761 1762 for (i = 0; i < phdrnum; i++) { 1763 if (gelf_getphdr(elf, i, &phdr) == NULL) 1764 return -1; 1765 if (phdr.p_type != PT_LOAD) 1766 continue; 1767 if (exe) { 1768 if (!(phdr.p_flags & PF_X)) 1769 continue; 1770 } else { 1771 if (!(phdr.p_flags & PF_R)) 1772 continue; 1773 } 1774 sz = min(phdr.p_memsz, phdr.p_filesz); 1775 if (!sz) 1776 continue; 1777 err = mapfn(phdr.p_vaddr, sz, phdr.p_offset, data); 1778 if (err) 1779 return err; 1780 } 1781 return 0; 1782 } 1783 1784 int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data, 1785 bool *is_64_bit) 1786 { 1787 int err; 1788 Elf *elf; 1789 1790 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); 1791 if (elf == NULL) 1792 return -1; 1793 1794 if (is_64_bit) 1795 *is_64_bit = (gelf_getclass(elf) == ELFCLASS64); 1796 1797 err = elf_read_maps(elf, exe, mapfn, data); 1798 1799 elf_end(elf); 1800 return err; 1801 } 1802 1803 enum dso_type dso__type_fd(int fd) 1804 { 1805 enum dso_type dso_type = DSO__TYPE_UNKNOWN; 1806 GElf_Ehdr ehdr; 1807 Elf_Kind ek; 1808 Elf *elf; 1809 1810 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); 1811 if (elf == NULL) 1812 goto out; 1813 1814 ek = elf_kind(elf); 1815 if (ek != ELF_K_ELF) 1816 goto out_end; 1817 1818 if (gelf_getclass(elf) == ELFCLASS64) { 1819 dso_type = DSO__TYPE_64BIT; 1820 goto out_end; 1821 } 1822 1823 if (gelf_getehdr(elf, &ehdr) == NULL) 1824 goto out_end; 1825 1826 if (ehdr.e_machine == EM_X86_64) 1827 dso_type = DSO__TYPE_X32BIT; 1828 else 1829 dso_type = DSO__TYPE_32BIT; 1830 out_end: 1831 elf_end(elf); 1832 out: 1833 return dso_type; 1834 } 1835 1836 static int copy_bytes(int from, off_t from_offs, int to, off_t to_offs, u64 len) 1837 { 1838 ssize_t r; 1839 size_t n; 1840 int err = -1; 1841 char *buf = malloc(page_size); 1842 1843 if (buf == NULL) 1844 return -1; 1845 1846 if (lseek(to, to_offs, SEEK_SET) != to_offs) 1847 goto out; 1848 1849 if (lseek(from, from_offs, SEEK_SET) != from_offs) 1850 goto out; 1851 1852 while (len) { 1853 n = page_size; 1854 if (len < n) 1855 n = len; 1856 /* Use read because mmap won't work on proc files */ 1857 r = read(from, buf, n); 1858 if (r < 0) 1859 goto out; 1860 if (!r) 1861 break; 1862 n = r; 1863 r = write(to, buf, n); 1864 if (r < 0) 1865 goto out; 1866 if ((size_t)r != n) 1867 goto out; 1868 len -= n; 1869 } 1870 1871 err = 0; 1872 out: 1873 free(buf); 1874 return err; 1875 } 1876 1877 struct kcore { 1878 int fd; 1879 int elfclass; 1880 Elf *elf; 1881 GElf_Ehdr ehdr; 1882 }; 1883 1884 static int kcore__open(struct kcore *kcore, const char *filename) 1885 { 1886 GElf_Ehdr *ehdr; 1887 1888 kcore->fd = open(filename, O_RDONLY); 1889 if (kcore->fd == -1) 1890 return -1; 1891 1892 kcore->elf = elf_begin(kcore->fd, ELF_C_READ, NULL); 1893 if (!kcore->elf) 1894 goto out_close; 1895 1896 kcore->elfclass = gelf_getclass(kcore->elf); 1897 if (kcore->elfclass == ELFCLASSNONE) 1898 goto out_end; 1899 1900 ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr); 1901 if (!ehdr) 1902 goto out_end; 1903 1904 return 0; 1905 1906 out_end: 1907 elf_end(kcore->elf); 1908 out_close: 1909 close(kcore->fd); 1910 return -1; 1911 } 1912 1913 static int kcore__init(struct kcore *kcore, char *filename, int elfclass, 1914 bool temp) 1915 { 1916 kcore->elfclass = elfclass; 1917 1918 if (temp) 1919 kcore->fd = mkstemp(filename); 1920 else 1921 kcore->fd = open(filename, O_WRONLY | O_CREAT | O_EXCL, 0400); 1922 if (kcore->fd == -1) 1923 return -1; 1924 1925 kcore->elf = elf_begin(kcore->fd, ELF_C_WRITE, NULL); 1926 if (!kcore->elf) 1927 goto out_close; 1928 1929 if (!gelf_newehdr(kcore->elf, elfclass)) 1930 goto out_end; 1931 1932 memset(&kcore->ehdr, 0, sizeof(GElf_Ehdr)); 1933 1934 return 0; 1935 1936 out_end: 1937 elf_end(kcore->elf); 1938 out_close: 1939 close(kcore->fd); 1940 unlink(filename); 1941 return -1; 1942 } 1943 1944 static void kcore__close(struct kcore *kcore) 1945 { 1946 elf_end(kcore->elf); 1947 close(kcore->fd); 1948 } 1949 1950 static int kcore__copy_hdr(struct kcore *from, struct kcore *to, size_t count) 1951 { 1952 GElf_Ehdr *ehdr = &to->ehdr; 1953 GElf_Ehdr *kehdr = &from->ehdr; 1954 1955 memcpy(ehdr->e_ident, kehdr->e_ident, EI_NIDENT); 1956 ehdr->e_type = kehdr->e_type; 1957 ehdr->e_machine = kehdr->e_machine; 1958 ehdr->e_version = kehdr->e_version; 1959 ehdr->e_entry = 0; 1960 ehdr->e_shoff = 0; 1961 ehdr->e_flags = kehdr->e_flags; 1962 ehdr->e_phnum = count; 1963 ehdr->e_shentsize = 0; 1964 ehdr->e_shnum = 0; 1965 ehdr->e_shstrndx = 0; 1966 1967 if (from->elfclass == ELFCLASS32) { 1968 ehdr->e_phoff = sizeof(Elf32_Ehdr); 1969 ehdr->e_ehsize = sizeof(Elf32_Ehdr); 1970 ehdr->e_phentsize = sizeof(Elf32_Phdr); 1971 } else { 1972 ehdr->e_phoff = sizeof(Elf64_Ehdr); 1973 ehdr->e_ehsize = sizeof(Elf64_Ehdr); 1974 ehdr->e_phentsize = sizeof(Elf64_Phdr); 1975 } 1976 1977 if (!gelf_update_ehdr(to->elf, ehdr)) 1978 return -1; 1979 1980 if (!gelf_newphdr(to->elf, count)) 1981 return -1; 1982 1983 return 0; 1984 } 1985 1986 static int kcore__add_phdr(struct kcore *kcore, int idx, off_t offset, 1987 u64 addr, u64 len) 1988 { 1989 GElf_Phdr phdr = { 1990 .p_type = PT_LOAD, 1991 .p_flags = PF_R | PF_W | PF_X, 1992 .p_offset = offset, 1993 .p_vaddr = addr, 1994 .p_paddr = 0, 1995 .p_filesz = len, 1996 .p_memsz = len, 1997 .p_align = page_size, 1998 }; 1999 2000 if (!gelf_update_phdr(kcore->elf, idx, &phdr)) 2001 return -1; 2002 2003 return 0; 2004 } 2005 2006 static off_t kcore__write(struct kcore *kcore) 2007 { 2008 return elf_update(kcore->elf, ELF_C_WRITE); 2009 } 2010 2011 struct phdr_data { 2012 off_t offset; 2013 off_t rel; 2014 u64 addr; 2015 u64 len; 2016 struct list_head node; 2017 struct phdr_data *remaps; 2018 }; 2019 2020 struct sym_data { 2021 u64 addr; 2022 struct list_head node; 2023 }; 2024 2025 struct kcore_copy_info { 2026 u64 stext; 2027 u64 etext; 2028 u64 first_symbol; 2029 u64 last_symbol; 2030 u64 first_module; 2031 u64 first_module_symbol; 2032 u64 last_module_symbol; 2033 size_t phnum; 2034 struct list_head phdrs; 2035 struct list_head syms; 2036 }; 2037 2038 #define kcore_copy__for_each_phdr(k, p) \ 2039 list_for_each_entry((p), &(k)->phdrs, node) 2040 2041 static struct phdr_data *phdr_data__new(u64 addr, u64 len, off_t offset) 2042 { 2043 struct phdr_data *p = zalloc(sizeof(*p)); 2044 2045 if (p) { 2046 p->addr = addr; 2047 p->len = len; 2048 p->offset = offset; 2049 } 2050 2051 return p; 2052 } 2053 2054 static struct phdr_data *kcore_copy_info__addnew(struct kcore_copy_info *kci, 2055 u64 addr, u64 len, 2056 off_t offset) 2057 { 2058 struct phdr_data *p = phdr_data__new(addr, len, offset); 2059 2060 if (p) 2061 list_add_tail(&p->node, &kci->phdrs); 2062 2063 return p; 2064 } 2065 2066 static void kcore_copy__free_phdrs(struct kcore_copy_info *kci) 2067 { 2068 struct phdr_data *p, *tmp; 2069 2070 list_for_each_entry_safe(p, tmp, &kci->phdrs, node) { 2071 list_del_init(&p->node); 2072 free(p); 2073 } 2074 } 2075 2076 static struct sym_data *kcore_copy__new_sym(struct kcore_copy_info *kci, 2077 u64 addr) 2078 { 2079 struct sym_data *s = zalloc(sizeof(*s)); 2080 2081 if (s) { 2082 s->addr = addr; 2083 list_add_tail(&s->node, &kci->syms); 2084 } 2085 2086 return s; 2087 } 2088 2089 static void kcore_copy__free_syms(struct kcore_copy_info *kci) 2090 { 2091 struct sym_data *s, *tmp; 2092 2093 list_for_each_entry_safe(s, tmp, &kci->syms, node) { 2094 list_del_init(&s->node); 2095 free(s); 2096 } 2097 } 2098 2099 static int kcore_copy__process_kallsyms(void *arg, const char *name, char type, 2100 u64 start) 2101 { 2102 struct kcore_copy_info *kci = arg; 2103 2104 if (!kallsyms__is_function(type)) 2105 return 0; 2106 2107 if (strchr(name, '[')) { 2108 if (!kci->first_module_symbol || start < kci->first_module_symbol) 2109 kci->first_module_symbol = start; 2110 if (start > kci->last_module_symbol) 2111 kci->last_module_symbol = start; 2112 return 0; 2113 } 2114 2115 if (!kci->first_symbol || start < kci->first_symbol) 2116 kci->first_symbol = start; 2117 2118 if (!kci->last_symbol || start > kci->last_symbol) 2119 kci->last_symbol = start; 2120 2121 if (!strcmp(name, "_stext")) { 2122 kci->stext = start; 2123 return 0; 2124 } 2125 2126 if (!strcmp(name, "_etext")) { 2127 kci->etext = start; 2128 return 0; 2129 } 2130 2131 if (is_entry_trampoline(name) && !kcore_copy__new_sym(kci, start)) 2132 return -1; 2133 2134 return 0; 2135 } 2136 2137 static int kcore_copy__parse_kallsyms(struct kcore_copy_info *kci, 2138 const char *dir) 2139 { 2140 char kallsyms_filename[PATH_MAX]; 2141 2142 scnprintf(kallsyms_filename, PATH_MAX, "%s/kallsyms", dir); 2143 2144 if (symbol__restricted_filename(kallsyms_filename, "/proc/kallsyms")) 2145 return -1; 2146 2147 if (kallsyms__parse(kallsyms_filename, kci, 2148 kcore_copy__process_kallsyms) < 0) 2149 return -1; 2150 2151 return 0; 2152 } 2153 2154 static int kcore_copy__process_modules(void *arg, 2155 const char *name __maybe_unused, 2156 u64 start, u64 size __maybe_unused) 2157 { 2158 struct kcore_copy_info *kci = arg; 2159 2160 if (!kci->first_module || start < kci->first_module) 2161 kci->first_module = start; 2162 2163 return 0; 2164 } 2165 2166 static int kcore_copy__parse_modules(struct kcore_copy_info *kci, 2167 const char *dir) 2168 { 2169 char modules_filename[PATH_MAX]; 2170 2171 scnprintf(modules_filename, PATH_MAX, "%s/modules", dir); 2172 2173 if (symbol__restricted_filename(modules_filename, "/proc/modules")) 2174 return -1; 2175 2176 if (modules__parse(modules_filename, kci, 2177 kcore_copy__process_modules) < 0) 2178 return -1; 2179 2180 return 0; 2181 } 2182 2183 static int kcore_copy__map(struct kcore_copy_info *kci, u64 start, u64 end, 2184 u64 pgoff, u64 s, u64 e) 2185 { 2186 u64 len, offset; 2187 2188 if (s < start || s >= end) 2189 return 0; 2190 2191 offset = (s - start) + pgoff; 2192 len = e < end ? e - s : end - s; 2193 2194 return kcore_copy_info__addnew(kci, s, len, offset) ? 0 : -1; 2195 } 2196 2197 static int kcore_copy__read_map(u64 start, u64 len, u64 pgoff, void *data) 2198 { 2199 struct kcore_copy_info *kci = data; 2200 u64 end = start + len; 2201 struct sym_data *sdat; 2202 2203 if (kcore_copy__map(kci, start, end, pgoff, kci->stext, kci->etext)) 2204 return -1; 2205 2206 if (kcore_copy__map(kci, start, end, pgoff, kci->first_module, 2207 kci->last_module_symbol)) 2208 return -1; 2209 2210 list_for_each_entry(sdat, &kci->syms, node) { 2211 u64 s = round_down(sdat->addr, page_size); 2212 2213 if (kcore_copy__map(kci, start, end, pgoff, s, s + len)) 2214 return -1; 2215 } 2216 2217 return 0; 2218 } 2219 2220 static int kcore_copy__read_maps(struct kcore_copy_info *kci, Elf *elf) 2221 { 2222 if (elf_read_maps(elf, true, kcore_copy__read_map, kci) < 0) 2223 return -1; 2224 2225 return 0; 2226 } 2227 2228 static void kcore_copy__find_remaps(struct kcore_copy_info *kci) 2229 { 2230 struct phdr_data *p, *k = NULL; 2231 u64 kend; 2232 2233 if (!kci->stext) 2234 return; 2235 2236 /* Find phdr that corresponds to the kernel map (contains stext) */ 2237 kcore_copy__for_each_phdr(kci, p) { 2238 u64 pend = p->addr + p->len - 1; 2239 2240 if (p->addr <= kci->stext && pend >= kci->stext) { 2241 k = p; 2242 break; 2243 } 2244 } 2245 2246 if (!k) 2247 return; 2248 2249 kend = k->offset + k->len; 2250 2251 /* Find phdrs that remap the kernel */ 2252 kcore_copy__for_each_phdr(kci, p) { 2253 u64 pend = p->offset + p->len; 2254 2255 if (p == k) 2256 continue; 2257 2258 if (p->offset >= k->offset && pend <= kend) 2259 p->remaps = k; 2260 } 2261 } 2262 2263 static void kcore_copy__layout(struct kcore_copy_info *kci) 2264 { 2265 struct phdr_data *p; 2266 off_t rel = 0; 2267 2268 kcore_copy__find_remaps(kci); 2269 2270 kcore_copy__for_each_phdr(kci, p) { 2271 if (!p->remaps) { 2272 p->rel = rel; 2273 rel += p->len; 2274 } 2275 kci->phnum += 1; 2276 } 2277 2278 kcore_copy__for_each_phdr(kci, p) { 2279 struct phdr_data *k = p->remaps; 2280 2281 if (k) 2282 p->rel = p->offset - k->offset + k->rel; 2283 } 2284 } 2285 2286 static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir, 2287 Elf *elf) 2288 { 2289 if (kcore_copy__parse_kallsyms(kci, dir)) 2290 return -1; 2291 2292 if (kcore_copy__parse_modules(kci, dir)) 2293 return -1; 2294 2295 if (kci->stext) 2296 kci->stext = round_down(kci->stext, page_size); 2297 else 2298 kci->stext = round_down(kci->first_symbol, page_size); 2299 2300 if (kci->etext) { 2301 kci->etext = round_up(kci->etext, page_size); 2302 } else if (kci->last_symbol) { 2303 kci->etext = round_up(kci->last_symbol, page_size); 2304 kci->etext += page_size; 2305 } 2306 2307 if (kci->first_module_symbol && 2308 (!kci->first_module || kci->first_module_symbol < kci->first_module)) 2309 kci->first_module = kci->first_module_symbol; 2310 2311 kci->first_module = round_down(kci->first_module, page_size); 2312 2313 if (kci->last_module_symbol) { 2314 kci->last_module_symbol = round_up(kci->last_module_symbol, 2315 page_size); 2316 kci->last_module_symbol += page_size; 2317 } 2318 2319 if (!kci->stext || !kci->etext) 2320 return -1; 2321 2322 if (kci->first_module && !kci->last_module_symbol) 2323 return -1; 2324 2325 if (kcore_copy__read_maps(kci, elf)) 2326 return -1; 2327 2328 kcore_copy__layout(kci); 2329 2330 return 0; 2331 } 2332 2333 static int kcore_copy__copy_file(const char *from_dir, const char *to_dir, 2334 const char *name) 2335 { 2336 char from_filename[PATH_MAX]; 2337 char to_filename[PATH_MAX]; 2338 2339 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name); 2340 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name); 2341 2342 return copyfile_mode(from_filename, to_filename, 0400); 2343 } 2344 2345 static int kcore_copy__unlink(const char *dir, const char *name) 2346 { 2347 char filename[PATH_MAX]; 2348 2349 scnprintf(filename, PATH_MAX, "%s/%s", dir, name); 2350 2351 return unlink(filename); 2352 } 2353 2354 static int kcore_copy__compare_fds(int from, int to) 2355 { 2356 char *buf_from; 2357 char *buf_to; 2358 ssize_t ret; 2359 size_t len; 2360 int err = -1; 2361 2362 buf_from = malloc(page_size); 2363 buf_to = malloc(page_size); 2364 if (!buf_from || !buf_to) 2365 goto out; 2366 2367 while (1) { 2368 /* Use read because mmap won't work on proc files */ 2369 ret = read(from, buf_from, page_size); 2370 if (ret < 0) 2371 goto out; 2372 2373 if (!ret) 2374 break; 2375 2376 len = ret; 2377 2378 if (readn(to, buf_to, len) != (int)len) 2379 goto out; 2380 2381 if (memcmp(buf_from, buf_to, len)) 2382 goto out; 2383 } 2384 2385 err = 0; 2386 out: 2387 free(buf_to); 2388 free(buf_from); 2389 return err; 2390 } 2391 2392 static int kcore_copy__compare_files(const char *from_filename, 2393 const char *to_filename) 2394 { 2395 int from, to, err = -1; 2396 2397 from = open(from_filename, O_RDONLY); 2398 if (from < 0) 2399 return -1; 2400 2401 to = open(to_filename, O_RDONLY); 2402 if (to < 0) 2403 goto out_close_from; 2404 2405 err = kcore_copy__compare_fds(from, to); 2406 2407 close(to); 2408 out_close_from: 2409 close(from); 2410 return err; 2411 } 2412 2413 static int kcore_copy__compare_file(const char *from_dir, const char *to_dir, 2414 const char *name) 2415 { 2416 char from_filename[PATH_MAX]; 2417 char to_filename[PATH_MAX]; 2418 2419 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name); 2420 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name); 2421 2422 return kcore_copy__compare_files(from_filename, to_filename); 2423 } 2424 2425 /** 2426 * kcore_copy - copy kallsyms, modules and kcore from one directory to another. 2427 * @from_dir: from directory 2428 * @to_dir: to directory 2429 * 2430 * This function copies kallsyms, modules and kcore files from one directory to 2431 * another. kallsyms and modules are copied entirely. Only code segments are 2432 * copied from kcore. It is assumed that two segments suffice: one for the 2433 * kernel proper and one for all the modules. The code segments are determined 2434 * from kallsyms and modules files. The kernel map starts at _stext or the 2435 * lowest function symbol, and ends at _etext or the highest function symbol. 2436 * The module map starts at the lowest module address and ends at the highest 2437 * module symbol. Start addresses are rounded down to the nearest page. End 2438 * addresses are rounded up to the nearest page. An extra page is added to the 2439 * highest kernel symbol and highest module symbol to, hopefully, encompass that 2440 * symbol too. Because it contains only code sections, the resulting kcore is 2441 * unusual. One significant peculiarity is that the mapping (start -> pgoff) 2442 * is not the same for the kernel map and the modules map. That happens because 2443 * the data is copied adjacently whereas the original kcore has gaps. Finally, 2444 * kallsyms file is compared with its copy to check that modules have not been 2445 * loaded or unloaded while the copies were taking place. 2446 * 2447 * Return: %0 on success, %-1 on failure. 2448 */ 2449 int kcore_copy(const char *from_dir, const char *to_dir) 2450 { 2451 struct kcore kcore; 2452 struct kcore extract; 2453 int idx = 0, err = -1; 2454 off_t offset, sz; 2455 struct kcore_copy_info kci = { .stext = 0, }; 2456 char kcore_filename[PATH_MAX]; 2457 char extract_filename[PATH_MAX]; 2458 struct phdr_data *p; 2459 2460 INIT_LIST_HEAD(&kci.phdrs); 2461 INIT_LIST_HEAD(&kci.syms); 2462 2463 if (kcore_copy__copy_file(from_dir, to_dir, "kallsyms")) 2464 return -1; 2465 2466 if (kcore_copy__copy_file(from_dir, to_dir, "modules")) 2467 goto out_unlink_kallsyms; 2468 2469 scnprintf(kcore_filename, PATH_MAX, "%s/kcore", from_dir); 2470 scnprintf(extract_filename, PATH_MAX, "%s/kcore", to_dir); 2471 2472 if (kcore__open(&kcore, kcore_filename)) 2473 goto out_unlink_modules; 2474 2475 if (kcore_copy__calc_maps(&kci, from_dir, kcore.elf)) 2476 goto out_kcore_close; 2477 2478 if (kcore__init(&extract, extract_filename, kcore.elfclass, false)) 2479 goto out_kcore_close; 2480 2481 if (kcore__copy_hdr(&kcore, &extract, kci.phnum)) 2482 goto out_extract_close; 2483 2484 offset = gelf_fsize(extract.elf, ELF_T_EHDR, 1, EV_CURRENT) + 2485 gelf_fsize(extract.elf, ELF_T_PHDR, kci.phnum, EV_CURRENT); 2486 offset = round_up(offset, page_size); 2487 2488 kcore_copy__for_each_phdr(&kci, p) { 2489 off_t offs = p->rel + offset; 2490 2491 if (kcore__add_phdr(&extract, idx++, offs, p->addr, p->len)) 2492 goto out_extract_close; 2493 } 2494 2495 sz = kcore__write(&extract); 2496 if (sz < 0 || sz > offset) 2497 goto out_extract_close; 2498 2499 kcore_copy__for_each_phdr(&kci, p) { 2500 off_t offs = p->rel + offset; 2501 2502 if (p->remaps) 2503 continue; 2504 if (copy_bytes(kcore.fd, p->offset, extract.fd, offs, p->len)) 2505 goto out_extract_close; 2506 } 2507 2508 if (kcore_copy__compare_file(from_dir, to_dir, "kallsyms")) 2509 goto out_extract_close; 2510 2511 err = 0; 2512 2513 out_extract_close: 2514 kcore__close(&extract); 2515 if (err) 2516 unlink(extract_filename); 2517 out_kcore_close: 2518 kcore__close(&kcore); 2519 out_unlink_modules: 2520 if (err) 2521 kcore_copy__unlink(to_dir, "modules"); 2522 out_unlink_kallsyms: 2523 if (err) 2524 kcore_copy__unlink(to_dir, "kallsyms"); 2525 2526 kcore_copy__free_phdrs(&kci); 2527 kcore_copy__free_syms(&kci); 2528 2529 return err; 2530 } 2531 2532 int kcore_extract__create(struct kcore_extract *kce) 2533 { 2534 struct kcore kcore; 2535 struct kcore extract; 2536 size_t count = 1; 2537 int idx = 0, err = -1; 2538 off_t offset = page_size, sz; 2539 2540 if (kcore__open(&kcore, kce->kcore_filename)) 2541 return -1; 2542 2543 strcpy(kce->extract_filename, PERF_KCORE_EXTRACT); 2544 if (kcore__init(&extract, kce->extract_filename, kcore.elfclass, true)) 2545 goto out_kcore_close; 2546 2547 if (kcore__copy_hdr(&kcore, &extract, count)) 2548 goto out_extract_close; 2549 2550 if (kcore__add_phdr(&extract, idx, offset, kce->addr, kce->len)) 2551 goto out_extract_close; 2552 2553 sz = kcore__write(&extract); 2554 if (sz < 0 || sz > offset) 2555 goto out_extract_close; 2556 2557 if (copy_bytes(kcore.fd, kce->offs, extract.fd, offset, kce->len)) 2558 goto out_extract_close; 2559 2560 err = 0; 2561 2562 out_extract_close: 2563 kcore__close(&extract); 2564 if (err) 2565 unlink(kce->extract_filename); 2566 out_kcore_close: 2567 kcore__close(&kcore); 2568 2569 return err; 2570 } 2571 2572 void kcore_extract__delete(struct kcore_extract *kce) 2573 { 2574 unlink(kce->extract_filename); 2575 } 2576 2577 #ifdef HAVE_GELF_GETNOTE_SUPPORT 2578 2579 static void sdt_adjust_loc(struct sdt_note *tmp, GElf_Addr base_off) 2580 { 2581 if (!base_off) 2582 return; 2583 2584 if (tmp->bit32) 2585 tmp->addr.a32[SDT_NOTE_IDX_LOC] = 2586 tmp->addr.a32[SDT_NOTE_IDX_LOC] + base_off - 2587 tmp->addr.a32[SDT_NOTE_IDX_BASE]; 2588 else 2589 tmp->addr.a64[SDT_NOTE_IDX_LOC] = 2590 tmp->addr.a64[SDT_NOTE_IDX_LOC] + base_off - 2591 tmp->addr.a64[SDT_NOTE_IDX_BASE]; 2592 } 2593 2594 static void sdt_adjust_refctr(struct sdt_note *tmp, GElf_Addr base_addr, 2595 GElf_Addr base_off) 2596 { 2597 if (!base_off) 2598 return; 2599 2600 if (tmp->bit32 && tmp->addr.a32[SDT_NOTE_IDX_REFCTR]) 2601 tmp->addr.a32[SDT_NOTE_IDX_REFCTR] -= (base_addr - base_off); 2602 else if (tmp->addr.a64[SDT_NOTE_IDX_REFCTR]) 2603 tmp->addr.a64[SDT_NOTE_IDX_REFCTR] -= (base_addr - base_off); 2604 } 2605 2606 /** 2607 * populate_sdt_note : Parse raw data and identify SDT note 2608 * @elf: elf of the opened file 2609 * @data: raw data of a section with description offset applied 2610 * @len: note description size 2611 * @type: type of the note 2612 * @sdt_notes: List to add the SDT note 2613 * 2614 * Responsible for parsing the @data in section .note.stapsdt in @elf and 2615 * if its an SDT note, it appends to @sdt_notes list. 2616 */ 2617 static int populate_sdt_note(Elf **elf, const char *data, size_t len, 2618 struct list_head *sdt_notes) 2619 { 2620 const char *provider, *name, *args; 2621 struct sdt_note *tmp = NULL; 2622 GElf_Ehdr ehdr; 2623 GElf_Shdr shdr; 2624 int ret = -EINVAL; 2625 2626 union { 2627 Elf64_Addr a64[NR_ADDR]; 2628 Elf32_Addr a32[NR_ADDR]; 2629 } buf; 2630 2631 Elf_Data dst = { 2632 .d_buf = &buf, .d_type = ELF_T_ADDR, .d_version = EV_CURRENT, 2633 .d_size = gelf_fsize((*elf), ELF_T_ADDR, NR_ADDR, EV_CURRENT), 2634 .d_off = 0, .d_align = 0 2635 }; 2636 Elf_Data src = { 2637 .d_buf = (void *) data, .d_type = ELF_T_ADDR, 2638 .d_version = EV_CURRENT, .d_size = dst.d_size, .d_off = 0, 2639 .d_align = 0 2640 }; 2641 2642 tmp = (struct sdt_note *)calloc(1, sizeof(struct sdt_note)); 2643 if (!tmp) { 2644 ret = -ENOMEM; 2645 goto out_err; 2646 } 2647 2648 INIT_LIST_HEAD(&tmp->note_list); 2649 2650 if (len < dst.d_size + 3) 2651 goto out_free_note; 2652 2653 /* Translation from file representation to memory representation */ 2654 if (gelf_xlatetom(*elf, &dst, &src, 2655 elf_getident(*elf, NULL)[EI_DATA]) == NULL) { 2656 pr_err("gelf_xlatetom : %s\n", elf_errmsg(-1)); 2657 goto out_free_note; 2658 } 2659 2660 /* Populate the fields of sdt_note */ 2661 provider = data + dst.d_size; 2662 2663 name = (const char *)memchr(provider, '\0', data + len - provider); 2664 if (name++ == NULL) 2665 goto out_free_note; 2666 2667 tmp->provider = strdup(provider); 2668 if (!tmp->provider) { 2669 ret = -ENOMEM; 2670 goto out_free_note; 2671 } 2672 tmp->name = strdup(name); 2673 if (!tmp->name) { 2674 ret = -ENOMEM; 2675 goto out_free_prov; 2676 } 2677 2678 args = memchr(name, '\0', data + len - name); 2679 2680 /* 2681 * There is no argument if: 2682 * - We reached the end of the note; 2683 * - There is not enough room to hold a potential string; 2684 * - The argument string is empty or just contains ':'. 2685 */ 2686 if (args == NULL || data + len - args < 2 || 2687 args[1] == ':' || args[1] == '\0') 2688 tmp->args = NULL; 2689 else { 2690 tmp->args = strdup(++args); 2691 if (!tmp->args) { 2692 ret = -ENOMEM; 2693 goto out_free_name; 2694 } 2695 } 2696 2697 if (gelf_getclass(*elf) == ELFCLASS32) { 2698 memcpy(&tmp->addr, &buf, 3 * sizeof(Elf32_Addr)); 2699 tmp->bit32 = true; 2700 } else { 2701 memcpy(&tmp->addr, &buf, 3 * sizeof(Elf64_Addr)); 2702 tmp->bit32 = false; 2703 } 2704 2705 if (!gelf_getehdr(*elf, &ehdr)) { 2706 pr_debug("%s : cannot get elf header.\n", __func__); 2707 ret = -EBADF; 2708 goto out_free_args; 2709 } 2710 2711 /* Adjust the prelink effect : 2712 * Find out the .stapsdt.base section. 2713 * This scn will help us to handle prelinking (if present). 2714 * Compare the retrieved file offset of the base section with the 2715 * base address in the description of the SDT note. If its different, 2716 * then accordingly, adjust the note location. 2717 */ 2718 if (elf_section_by_name(*elf, &ehdr, &shdr, SDT_BASE_SCN, NULL)) 2719 sdt_adjust_loc(tmp, shdr.sh_offset); 2720 2721 /* Adjust reference counter offset */ 2722 if (elf_section_by_name(*elf, &ehdr, &shdr, SDT_PROBES_SCN, NULL)) 2723 sdt_adjust_refctr(tmp, shdr.sh_addr, shdr.sh_offset); 2724 2725 list_add_tail(&tmp->note_list, sdt_notes); 2726 return 0; 2727 2728 out_free_args: 2729 zfree(&tmp->args); 2730 out_free_name: 2731 zfree(&tmp->name); 2732 out_free_prov: 2733 zfree(&tmp->provider); 2734 out_free_note: 2735 free(tmp); 2736 out_err: 2737 return ret; 2738 } 2739 2740 /** 2741 * construct_sdt_notes_list : constructs a list of SDT notes 2742 * @elf : elf to look into 2743 * @sdt_notes : empty list_head 2744 * 2745 * Scans the sections in 'elf' for the section 2746 * .note.stapsdt. It, then calls populate_sdt_note to find 2747 * out the SDT events and populates the 'sdt_notes'. 2748 */ 2749 static int construct_sdt_notes_list(Elf *elf, struct list_head *sdt_notes) 2750 { 2751 GElf_Ehdr ehdr; 2752 Elf_Scn *scn = NULL; 2753 Elf_Data *data; 2754 GElf_Shdr shdr; 2755 size_t shstrndx, next; 2756 GElf_Nhdr nhdr; 2757 size_t name_off, desc_off, offset; 2758 int ret = 0; 2759 2760 if (gelf_getehdr(elf, &ehdr) == NULL) { 2761 ret = -EBADF; 2762 goto out_ret; 2763 } 2764 if (elf_getshdrstrndx(elf, &shstrndx) != 0) { 2765 ret = -EBADF; 2766 goto out_ret; 2767 } 2768 2769 /* Look for the required section */ 2770 scn = elf_section_by_name(elf, &ehdr, &shdr, SDT_NOTE_SCN, NULL); 2771 if (!scn) { 2772 ret = -ENOENT; 2773 goto out_ret; 2774 } 2775 2776 if ((shdr.sh_type != SHT_NOTE) || (shdr.sh_flags & SHF_ALLOC)) { 2777 ret = -ENOENT; 2778 goto out_ret; 2779 } 2780 2781 data = elf_getdata(scn, NULL); 2782 2783 /* Get the SDT notes */ 2784 for (offset = 0; (next = gelf_getnote(data, offset, &nhdr, &name_off, 2785 &desc_off)) > 0; offset = next) { 2786 if (nhdr.n_namesz == sizeof(SDT_NOTE_NAME) && 2787 !memcmp(data->d_buf + name_off, SDT_NOTE_NAME, 2788 sizeof(SDT_NOTE_NAME))) { 2789 /* Check the type of the note */ 2790 if (nhdr.n_type != SDT_NOTE_TYPE) 2791 goto out_ret; 2792 2793 ret = populate_sdt_note(&elf, ((data->d_buf) + desc_off), 2794 nhdr.n_descsz, sdt_notes); 2795 if (ret < 0) 2796 goto out_ret; 2797 } 2798 } 2799 if (list_empty(sdt_notes)) 2800 ret = -ENOENT; 2801 2802 out_ret: 2803 return ret; 2804 } 2805 2806 /** 2807 * get_sdt_note_list : Wrapper to construct a list of sdt notes 2808 * @head : empty list_head 2809 * @target : file to find SDT notes from 2810 * 2811 * This opens the file, initializes 2812 * the ELF and then calls construct_sdt_notes_list. 2813 */ 2814 int get_sdt_note_list(struct list_head *head, const char *target) 2815 { 2816 Elf *elf; 2817 int fd, ret; 2818 2819 fd = open(target, O_RDONLY); 2820 if (fd < 0) 2821 return -EBADF; 2822 2823 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); 2824 if (!elf) { 2825 ret = -EBADF; 2826 goto out_close; 2827 } 2828 ret = construct_sdt_notes_list(elf, head); 2829 elf_end(elf); 2830 out_close: 2831 close(fd); 2832 return ret; 2833 } 2834 2835 /** 2836 * cleanup_sdt_note_list : free the sdt notes' list 2837 * @sdt_notes: sdt notes' list 2838 * 2839 * Free up the SDT notes in @sdt_notes. 2840 * Returns the number of SDT notes free'd. 2841 */ 2842 int cleanup_sdt_note_list(struct list_head *sdt_notes) 2843 { 2844 struct sdt_note *tmp, *pos; 2845 int nr_free = 0; 2846 2847 list_for_each_entry_safe(pos, tmp, sdt_notes, note_list) { 2848 list_del_init(&pos->note_list); 2849 zfree(&pos->args); 2850 zfree(&pos->name); 2851 zfree(&pos->provider); 2852 free(pos); 2853 nr_free++; 2854 } 2855 return nr_free; 2856 } 2857 2858 /** 2859 * sdt_notes__get_count: Counts the number of sdt events 2860 * @start: list_head to sdt_notes list 2861 * 2862 * Returns the number of SDT notes in a list 2863 */ 2864 int sdt_notes__get_count(struct list_head *start) 2865 { 2866 struct sdt_note *sdt_ptr; 2867 int count = 0; 2868 2869 list_for_each_entry(sdt_ptr, start, note_list) 2870 count++; 2871 return count; 2872 } 2873 #endif 2874 2875 void symbol__elf_init(void) 2876 { 2877 elf_version(EV_CURRENT); 2878 } 2879