1 // SPDX-License-Identifier: GPL-2.0
2 #include <fcntl.h>
3 #include <stdio.h>
4 #include <errno.h>
5 #include <stdlib.h>
6 #include <string.h>
7 #include <unistd.h>
8 #include <inttypes.h>
9
10 #include "dso.h"
11 #include "map.h"
12 #include "maps.h"
13 #include "symbol.h"
14 #include "symsrc.h"
15 #include "demangle-cxx.h"
16 #include "demangle-ocaml.h"
17 #include "demangle-java.h"
18 #include "demangle-rust.h"
19 #include "machine.h"
20 #include "vdso.h"
21 #include "debug.h"
22 #include "util/copyfile.h"
23 #include <linux/ctype.h>
24 #include <linux/kernel.h>
25 #include <linux/zalloc.h>
26 #include <symbol/kallsyms.h>
27 #include <internal/lib.h>
28
29 #ifdef HAVE_LIBBFD_SUPPORT
30 #define PACKAGE 'perf'
31 #include <bfd.h>
32 #endif
33
34 #if defined(HAVE_LIBBFD_SUPPORT) || defined(HAVE_CPLUS_DEMANGLE_SUPPORT)
35 #ifndef DMGL_PARAMS
36 #define DMGL_PARAMS (1 << 0) /* Include function args */
37 #define DMGL_ANSI (1 << 1) /* Include const, volatile, etc */
38 #endif
39 #endif
40
41 #ifndef EM_AARCH64
42 #define EM_AARCH64 183 /* ARM 64 bit */
43 #endif
44
45 #ifndef EM_LOONGARCH
46 #define EM_LOONGARCH 258
47 #endif
48
49 #ifndef ELF32_ST_VISIBILITY
50 #define ELF32_ST_VISIBILITY(o) ((o) & 0x03)
51 #endif
52
53 /* For ELF64 the definitions are the same. */
54 #ifndef ELF64_ST_VISIBILITY
55 #define ELF64_ST_VISIBILITY(o) ELF32_ST_VISIBILITY (o)
56 #endif
57
58 /* How to extract information held in the st_other field. */
59 #ifndef GELF_ST_VISIBILITY
60 #define GELF_ST_VISIBILITY(val) ELF64_ST_VISIBILITY (val)
61 #endif
62
63 typedef Elf64_Nhdr GElf_Nhdr;
64
65
66 #ifndef HAVE_ELF_GETPHDRNUM_SUPPORT
elf_getphdrnum(Elf * elf,size_t * dst)67 static int elf_getphdrnum(Elf *elf, size_t *dst)
68 {
69 GElf_Ehdr gehdr;
70 GElf_Ehdr *ehdr;
71
72 ehdr = gelf_getehdr(elf, &gehdr);
73 if (!ehdr)
74 return -1;
75
76 *dst = ehdr->e_phnum;
77
78 return 0;
79 }
80 #endif
81
82 #ifndef HAVE_ELF_GETSHDRSTRNDX_SUPPORT
elf_getshdrstrndx(Elf * elf __maybe_unused,size_t * dst __maybe_unused)83 static int elf_getshdrstrndx(Elf *elf __maybe_unused, size_t *dst __maybe_unused)
84 {
85 pr_err("%s: update your libelf to > 0.140, this one lacks elf_getshdrstrndx().\n", __func__);
86 return -1;
87 }
88 #endif
89
90 #ifndef NT_GNU_BUILD_ID
91 #define NT_GNU_BUILD_ID 3
92 #endif
93
94 /**
95 * elf_symtab__for_each_symbol - iterate thru all the symbols
96 *
97 * @syms: struct elf_symtab instance to iterate
98 * @idx: uint32_t idx
99 * @sym: GElf_Sym iterator
100 */
101 #define elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) \
102 for (idx = 0, gelf_getsym(syms, idx, &sym);\
103 idx < nr_syms; \
104 idx++, gelf_getsym(syms, idx, &sym))
105
elf_sym__type(const GElf_Sym * sym)106 static inline uint8_t elf_sym__type(const GElf_Sym *sym)
107 {
108 return GELF_ST_TYPE(sym->st_info);
109 }
110
elf_sym__visibility(const GElf_Sym * sym)111 static inline uint8_t elf_sym__visibility(const GElf_Sym *sym)
112 {
113 return GELF_ST_VISIBILITY(sym->st_other);
114 }
115
116 #ifndef STT_GNU_IFUNC
117 #define STT_GNU_IFUNC 10
118 #endif
119
elf_sym__is_function(const GElf_Sym * sym)120 static inline int elf_sym__is_function(const GElf_Sym *sym)
121 {
122 return (elf_sym__type(sym) == STT_FUNC ||
123 elf_sym__type(sym) == STT_GNU_IFUNC) &&
124 sym->st_name != 0 &&
125 sym->st_shndx != SHN_UNDEF;
126 }
127
elf_sym__is_object(const GElf_Sym * sym)128 static inline bool elf_sym__is_object(const GElf_Sym *sym)
129 {
130 return elf_sym__type(sym) == STT_OBJECT &&
131 sym->st_name != 0 &&
132 sym->st_shndx != SHN_UNDEF;
133 }
134
elf_sym__is_label(const GElf_Sym * sym)135 static inline int elf_sym__is_label(const GElf_Sym *sym)
136 {
137 return elf_sym__type(sym) == STT_NOTYPE &&
138 sym->st_name != 0 &&
139 sym->st_shndx != SHN_UNDEF &&
140 sym->st_shndx != SHN_ABS &&
141 elf_sym__visibility(sym) != STV_HIDDEN &&
142 elf_sym__visibility(sym) != STV_INTERNAL;
143 }
144
elf_sym__filter(GElf_Sym * sym)145 static bool elf_sym__filter(GElf_Sym *sym)
146 {
147 return elf_sym__is_function(sym) || elf_sym__is_object(sym);
148 }
149
elf_sym__name(const GElf_Sym * sym,const Elf_Data * symstrs)150 static inline const char *elf_sym__name(const GElf_Sym *sym,
151 const Elf_Data *symstrs)
152 {
153 return symstrs->d_buf + sym->st_name;
154 }
155
elf_sec__name(const GElf_Shdr * shdr,const Elf_Data * secstrs)156 static inline const char *elf_sec__name(const GElf_Shdr *shdr,
157 const Elf_Data *secstrs)
158 {
159 return secstrs->d_buf + shdr->sh_name;
160 }
161
elf_sec__is_text(const GElf_Shdr * shdr,const Elf_Data * secstrs)162 static inline int elf_sec__is_text(const GElf_Shdr *shdr,
163 const Elf_Data *secstrs)
164 {
165 return strstr(elf_sec__name(shdr, secstrs), "text") != NULL;
166 }
167
elf_sec__is_data(const GElf_Shdr * shdr,const Elf_Data * secstrs)168 static inline bool elf_sec__is_data(const GElf_Shdr *shdr,
169 const Elf_Data *secstrs)
170 {
171 return strstr(elf_sec__name(shdr, secstrs), "data") != NULL;
172 }
173
elf_sec__filter(GElf_Shdr * shdr,Elf_Data * secstrs)174 static bool elf_sec__filter(GElf_Shdr *shdr, Elf_Data *secstrs)
175 {
176 return elf_sec__is_text(shdr, secstrs) ||
177 elf_sec__is_data(shdr, secstrs);
178 }
179
elf_addr_to_index(Elf * elf,GElf_Addr addr)180 static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr)
181 {
182 Elf_Scn *sec = NULL;
183 GElf_Shdr shdr;
184 size_t cnt = 1;
185
186 while ((sec = elf_nextscn(elf, sec)) != NULL) {
187 gelf_getshdr(sec, &shdr);
188
189 if ((addr >= shdr.sh_addr) &&
190 (addr < (shdr.sh_addr + shdr.sh_size)))
191 return cnt;
192
193 ++cnt;
194 }
195
196 return -1;
197 }
198
elf_section_by_name(Elf * elf,GElf_Ehdr * ep,GElf_Shdr * shp,const char * name,size_t * idx)199 Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
200 GElf_Shdr *shp, const char *name, size_t *idx)
201 {
202 Elf_Scn *sec = NULL;
203 size_t cnt = 1;
204
205 /* ELF is corrupted/truncated, avoid calling elf_strptr. */
206 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL))
207 return NULL;
208
209 while ((sec = elf_nextscn(elf, sec)) != NULL) {
210 char *str;
211
212 gelf_getshdr(sec, shp);
213 str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name);
214 if (str && !strcmp(name, str)) {
215 if (idx)
216 *idx = cnt;
217 return sec;
218 }
219 ++cnt;
220 }
221
222 return NULL;
223 }
224
filename__has_section(const char * filename,const char * sec)225 bool filename__has_section(const char *filename, const char *sec)
226 {
227 int fd;
228 Elf *elf;
229 GElf_Ehdr ehdr;
230 GElf_Shdr shdr;
231 bool found = false;
232
233 fd = open(filename, O_RDONLY);
234 if (fd < 0)
235 return false;
236
237 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
238 if (elf == NULL)
239 goto out;
240
241 if (gelf_getehdr(elf, &ehdr) == NULL)
242 goto elf_out;
243
244 found = !!elf_section_by_name(elf, &ehdr, &shdr, sec, NULL);
245
246 elf_out:
247 elf_end(elf);
248 out:
249 close(fd);
250 return found;
251 }
252
elf_read_program_header(Elf * elf,u64 vaddr,GElf_Phdr * phdr)253 static int elf_read_program_header(Elf *elf, u64 vaddr, GElf_Phdr *phdr)
254 {
255 size_t i, phdrnum;
256 u64 sz;
257
258 if (elf_getphdrnum(elf, &phdrnum))
259 return -1;
260
261 for (i = 0; i < phdrnum; i++) {
262 if (gelf_getphdr(elf, i, phdr) == NULL)
263 return -1;
264
265 if (phdr->p_type != PT_LOAD)
266 continue;
267
268 sz = max(phdr->p_memsz, phdr->p_filesz);
269 if (!sz)
270 continue;
271
272 if (vaddr >= phdr->p_vaddr && (vaddr < phdr->p_vaddr + sz))
273 return 0;
274 }
275
276 /* Not found any valid program header */
277 return -1;
278 }
279
want_demangle(bool is_kernel_sym)280 static bool want_demangle(bool is_kernel_sym)
281 {
282 return is_kernel_sym ? symbol_conf.demangle_kernel : symbol_conf.demangle;
283 }
284
285 /*
286 * Demangle C++ function signature, typically replaced by demangle-cxx.cpp
287 * version.
288 */
cxx_demangle_sym(const char * str __maybe_unused,bool params __maybe_unused,bool modifiers __maybe_unused)289 __weak char *cxx_demangle_sym(const char *str __maybe_unused, bool params __maybe_unused,
290 bool modifiers __maybe_unused)
291 {
292 #ifdef HAVE_LIBBFD_SUPPORT
293 int flags = (params ? DMGL_PARAMS : 0) | (modifiers ? DMGL_ANSI : 0);
294
295 return bfd_demangle(NULL, str, flags);
296 #elif defined(HAVE_CPLUS_DEMANGLE_SUPPORT)
297 int flags = (params ? DMGL_PARAMS : 0) | (modifiers ? DMGL_ANSI : 0);
298
299 return cplus_demangle(str, flags);
300 #else
301 return NULL;
302 #endif
303 }
304
demangle_sym(struct dso * dso,int kmodule,const char * elf_name)305 static char *demangle_sym(struct dso *dso, int kmodule, const char *elf_name)
306 {
307 char *demangled = NULL;
308
309 /*
310 * We need to figure out if the object was created from C++ sources
311 * DWARF DW_compile_unit has this, but we don't always have access
312 * to it...
313 */
314 if (!want_demangle(dso->kernel || kmodule))
315 return demangled;
316
317 demangled = cxx_demangle_sym(elf_name, verbose > 0, verbose > 0);
318 if (demangled == NULL) {
319 demangled = ocaml_demangle_sym(elf_name);
320 if (demangled == NULL) {
321 demangled = java_demangle_sym(elf_name, JAVA_DEMANGLE_NORET);
322 }
323 }
324 else if (rust_is_mangled(demangled))
325 /*
326 * Input to Rust demangling is the BFD-demangled
327 * name which it Rust-demangles in place.
328 */
329 rust_demangle_sym(demangled);
330
331 return demangled;
332 }
333
334 struct rel_info {
335 u32 nr_entries;
336 u32 *sorted;
337 bool is_rela;
338 Elf_Data *reldata;
339 GElf_Rela rela;
340 GElf_Rel rel;
341 };
342
get_rel_symidx(struct rel_info * ri,u32 idx)343 static u32 get_rel_symidx(struct rel_info *ri, u32 idx)
344 {
345 idx = ri->sorted ? ri->sorted[idx] : idx;
346 if (ri->is_rela) {
347 gelf_getrela(ri->reldata, idx, &ri->rela);
348 return GELF_R_SYM(ri->rela.r_info);
349 }
350 gelf_getrel(ri->reldata, idx, &ri->rel);
351 return GELF_R_SYM(ri->rel.r_info);
352 }
353
get_rel_offset(struct rel_info * ri,u32 x)354 static u64 get_rel_offset(struct rel_info *ri, u32 x)
355 {
356 if (ri->is_rela) {
357 GElf_Rela rela;
358
359 gelf_getrela(ri->reldata, x, &rela);
360 return rela.r_offset;
361 } else {
362 GElf_Rel rel;
363
364 gelf_getrel(ri->reldata, x, &rel);
365 return rel.r_offset;
366 }
367 }
368
rel_cmp(const void * a,const void * b,void * r)369 static int rel_cmp(const void *a, const void *b, void *r)
370 {
371 struct rel_info *ri = r;
372 u64 a_offset = get_rel_offset(ri, *(const u32 *)a);
373 u64 b_offset = get_rel_offset(ri, *(const u32 *)b);
374
375 return a_offset < b_offset ? -1 : (a_offset > b_offset ? 1 : 0);
376 }
377
sort_rel(struct rel_info * ri)378 static int sort_rel(struct rel_info *ri)
379 {
380 size_t sz = sizeof(ri->sorted[0]);
381 u32 i;
382
383 ri->sorted = calloc(ri->nr_entries, sz);
384 if (!ri->sorted)
385 return -1;
386 for (i = 0; i < ri->nr_entries; i++)
387 ri->sorted[i] = i;
388 qsort_r(ri->sorted, ri->nr_entries, sz, rel_cmp, ri);
389 return 0;
390 }
391
392 /*
393 * For x86_64, the GNU linker is putting IFUNC information in the relocation
394 * addend.
395 */
addend_may_be_ifunc(GElf_Ehdr * ehdr,struct rel_info * ri)396 static bool addend_may_be_ifunc(GElf_Ehdr *ehdr, struct rel_info *ri)
397 {
398 return ehdr->e_machine == EM_X86_64 && ri->is_rela &&
399 GELF_R_TYPE(ri->rela.r_info) == R_X86_64_IRELATIVE;
400 }
401
get_ifunc_name(Elf * elf,struct dso * dso,GElf_Ehdr * ehdr,struct rel_info * ri,char * buf,size_t buf_sz)402 static bool get_ifunc_name(Elf *elf, struct dso *dso, GElf_Ehdr *ehdr,
403 struct rel_info *ri, char *buf, size_t buf_sz)
404 {
405 u64 addr = ri->rela.r_addend;
406 struct symbol *sym;
407 GElf_Phdr phdr;
408
409 if (!addend_may_be_ifunc(ehdr, ri))
410 return false;
411
412 if (elf_read_program_header(elf, addr, &phdr))
413 return false;
414
415 addr -= phdr.p_vaddr - phdr.p_offset;
416
417 sym = dso__find_symbol_nocache(dso, addr);
418
419 /* Expecting the address to be an IFUNC or IFUNC alias */
420 if (!sym || sym->start != addr || (sym->type != STT_GNU_IFUNC && !sym->ifunc_alias))
421 return false;
422
423 snprintf(buf, buf_sz, "%s@plt", sym->name);
424
425 return true;
426 }
427
exit_rel(struct rel_info * ri)428 static void exit_rel(struct rel_info *ri)
429 {
430 zfree(&ri->sorted);
431 }
432
get_plt_sizes(struct dso * dso,GElf_Ehdr * ehdr,GElf_Shdr * shdr_plt,u64 * plt_header_size,u64 * plt_entry_size)433 static bool get_plt_sizes(struct dso *dso, GElf_Ehdr *ehdr, GElf_Shdr *shdr_plt,
434 u64 *plt_header_size, u64 *plt_entry_size)
435 {
436 switch (ehdr->e_machine) {
437 case EM_ARM:
438 *plt_header_size = 20;
439 *plt_entry_size = 12;
440 return true;
441 case EM_AARCH64:
442 *plt_header_size = 32;
443 *plt_entry_size = 16;
444 return true;
445 case EM_LOONGARCH:
446 *plt_header_size = 32;
447 *plt_entry_size = 16;
448 return true;
449 case EM_SPARC:
450 *plt_header_size = 48;
451 *plt_entry_size = 12;
452 return true;
453 case EM_SPARCV9:
454 *plt_header_size = 128;
455 *plt_entry_size = 32;
456 return true;
457 case EM_386:
458 case EM_X86_64:
459 *plt_entry_size = shdr_plt->sh_entsize;
460 /* Size is 8 or 16, if not, assume alignment indicates size */
461 if (*plt_entry_size != 8 && *plt_entry_size != 16)
462 *plt_entry_size = shdr_plt->sh_addralign == 8 ? 8 : 16;
463 *plt_header_size = *plt_entry_size;
464 break;
465 default: /* FIXME: s390/alpha/mips/parisc/poperpc/sh/xtensa need to be checked */
466 *plt_header_size = shdr_plt->sh_entsize;
467 *plt_entry_size = shdr_plt->sh_entsize;
468 break;
469 }
470 if (*plt_entry_size)
471 return true;
472 pr_debug("Missing PLT entry size for %s\n", dso->long_name);
473 return false;
474 }
475
machine_is_x86(GElf_Half e_machine)476 static bool machine_is_x86(GElf_Half e_machine)
477 {
478 return e_machine == EM_386 || e_machine == EM_X86_64;
479 }
480
481 struct rela_dyn {
482 GElf_Addr offset;
483 u32 sym_idx;
484 };
485
486 struct rela_dyn_info {
487 struct dso *dso;
488 Elf_Data *plt_got_data;
489 u32 nr_entries;
490 struct rela_dyn *sorted;
491 Elf_Data *dynsym_data;
492 Elf_Data *dynstr_data;
493 Elf_Data *rela_dyn_data;
494 };
495
exit_rela_dyn(struct rela_dyn_info * di)496 static void exit_rela_dyn(struct rela_dyn_info *di)
497 {
498 zfree(&di->sorted);
499 }
500
cmp_offset(const void * a,const void * b)501 static int cmp_offset(const void *a, const void *b)
502 {
503 const struct rela_dyn *va = a;
504 const struct rela_dyn *vb = b;
505
506 return va->offset < vb->offset ? -1 : (va->offset > vb->offset ? 1 : 0);
507 }
508
sort_rela_dyn(struct rela_dyn_info * di)509 static int sort_rela_dyn(struct rela_dyn_info *di)
510 {
511 u32 i, n;
512
513 di->sorted = calloc(di->nr_entries, sizeof(di->sorted[0]));
514 if (!di->sorted)
515 return -1;
516
517 /* Get data for sorting: the offset and symbol index */
518 for (i = 0, n = 0; i < di->nr_entries; i++) {
519 GElf_Rela rela;
520 u32 sym_idx;
521
522 gelf_getrela(di->rela_dyn_data, i, &rela);
523 sym_idx = GELF_R_SYM(rela.r_info);
524 if (sym_idx) {
525 di->sorted[n].sym_idx = sym_idx;
526 di->sorted[n].offset = rela.r_offset;
527 n += 1;
528 }
529 }
530
531 /* Sort by offset */
532 di->nr_entries = n;
533 qsort(di->sorted, n, sizeof(di->sorted[0]), cmp_offset);
534
535 return 0;
536 }
537
get_rela_dyn_info(Elf * elf,GElf_Ehdr * ehdr,struct rela_dyn_info * di,Elf_Scn * scn)538 static void get_rela_dyn_info(Elf *elf, GElf_Ehdr *ehdr, struct rela_dyn_info *di, Elf_Scn *scn)
539 {
540 GElf_Shdr rela_dyn_shdr;
541 GElf_Shdr shdr;
542
543 di->plt_got_data = elf_getdata(scn, NULL);
544
545 scn = elf_section_by_name(elf, ehdr, &rela_dyn_shdr, ".rela.dyn", NULL);
546 if (!scn || !rela_dyn_shdr.sh_link || !rela_dyn_shdr.sh_entsize)
547 return;
548
549 di->nr_entries = rela_dyn_shdr.sh_size / rela_dyn_shdr.sh_entsize;
550 di->rela_dyn_data = elf_getdata(scn, NULL);
551
552 scn = elf_getscn(elf, rela_dyn_shdr.sh_link);
553 if (!scn || !gelf_getshdr(scn, &shdr) || !shdr.sh_link)
554 return;
555
556 di->dynsym_data = elf_getdata(scn, NULL);
557 di->dynstr_data = elf_getdata(elf_getscn(elf, shdr.sh_link), NULL);
558
559 if (!di->plt_got_data || !di->dynstr_data || !di->dynsym_data || !di->rela_dyn_data)
560 return;
561
562 /* Sort into offset order */
563 sort_rela_dyn(di);
564 }
565
566 /* Get instruction displacement from a plt entry for x86_64 */
get_x86_64_plt_disp(const u8 * p)567 static u32 get_x86_64_plt_disp(const u8 *p)
568 {
569 u8 endbr64[] = {0xf3, 0x0f, 0x1e, 0xfa};
570 int n = 0;
571
572 /* Skip endbr64 */
573 if (!memcmp(p, endbr64, sizeof(endbr64)))
574 n += sizeof(endbr64);
575 /* Skip bnd prefix */
576 if (p[n] == 0xf2)
577 n += 1;
578 /* jmp with 4-byte displacement */
579 if (p[n] == 0xff && p[n + 1] == 0x25) {
580 u32 disp;
581
582 n += 2;
583 /* Also add offset from start of entry to end of instruction */
584 memcpy(&disp, p + n, sizeof(disp));
585 return n + 4 + le32toh(disp);
586 }
587 return 0;
588 }
589
get_plt_got_name(GElf_Shdr * shdr,size_t i,struct rela_dyn_info * di,char * buf,size_t buf_sz)590 static bool get_plt_got_name(GElf_Shdr *shdr, size_t i,
591 struct rela_dyn_info *di,
592 char *buf, size_t buf_sz)
593 {
594 struct rela_dyn vi, *vr;
595 const char *sym_name;
596 char *demangled;
597 GElf_Sym sym;
598 bool result;
599 u32 disp;
600
601 if (!di->sorted)
602 return false;
603
604 disp = get_x86_64_plt_disp(di->plt_got_data->d_buf + i);
605 if (!disp)
606 return false;
607
608 /* Compute target offset of the .plt.got entry */
609 vi.offset = shdr->sh_offset + di->plt_got_data->d_off + i + disp;
610
611 /* Find that offset in .rela.dyn (sorted by offset) */
612 vr = bsearch(&vi, di->sorted, di->nr_entries, sizeof(di->sorted[0]), cmp_offset);
613 if (!vr)
614 return false;
615
616 /* Get the associated symbol */
617 gelf_getsym(di->dynsym_data, vr->sym_idx, &sym);
618 sym_name = elf_sym__name(&sym, di->dynstr_data);
619 demangled = demangle_sym(di->dso, 0, sym_name);
620 if (demangled != NULL)
621 sym_name = demangled;
622
623 snprintf(buf, buf_sz, "%s@plt", sym_name);
624
625 result = *sym_name;
626
627 free(demangled);
628
629 return result;
630 }
631
dso__synthesize_plt_got_symbols(struct dso * dso,Elf * elf,GElf_Ehdr * ehdr,char * buf,size_t buf_sz)632 static int dso__synthesize_plt_got_symbols(struct dso *dso, Elf *elf,
633 GElf_Ehdr *ehdr,
634 char *buf, size_t buf_sz)
635 {
636 struct rela_dyn_info di = { .dso = dso };
637 struct symbol *sym;
638 GElf_Shdr shdr;
639 Elf_Scn *scn;
640 int err = -1;
641 size_t i;
642
643 scn = elf_section_by_name(elf, ehdr, &shdr, ".plt.got", NULL);
644 if (!scn || !shdr.sh_entsize)
645 return 0;
646
647 if (ehdr->e_machine == EM_X86_64)
648 get_rela_dyn_info(elf, ehdr, &di, scn);
649
650 for (i = 0; i < shdr.sh_size; i += shdr.sh_entsize) {
651 if (!get_plt_got_name(&shdr, i, &di, buf, buf_sz))
652 snprintf(buf, buf_sz, "offset_%#" PRIx64 "@plt", (u64)shdr.sh_offset + i);
653 sym = symbol__new(shdr.sh_offset + i, shdr.sh_entsize, STB_GLOBAL, STT_FUNC, buf);
654 if (!sym)
655 goto out;
656 symbols__insert(&dso->symbols, sym);
657 }
658 err = 0;
659 out:
660 exit_rela_dyn(&di);
661 return err;
662 }
663
664 /*
665 * We need to check if we have a .dynsym, so that we can handle the
666 * .plt, synthesizing its symbols, that aren't on the symtabs (be it
667 * .dynsym or .symtab).
668 * And always look at the original dso, not at debuginfo packages, that
669 * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS).
670 */
dso__synthesize_plt_symbols(struct dso * dso,struct symsrc * ss)671 int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss)
672 {
673 uint32_t idx;
674 GElf_Sym sym;
675 u64 plt_offset, plt_header_size, plt_entry_size;
676 GElf_Shdr shdr_plt, plt_sec_shdr;
677 struct symbol *f, *plt_sym;
678 GElf_Shdr shdr_rel_plt, shdr_dynsym;
679 Elf_Data *syms, *symstrs;
680 Elf_Scn *scn_plt_rel, *scn_symstrs, *scn_dynsym;
681 GElf_Ehdr ehdr;
682 char sympltname[1024];
683 Elf *elf;
684 int nr = 0, err = -1;
685 struct rel_info ri = { .is_rela = false };
686 bool lazy_plt;
687
688 elf = ss->elf;
689 ehdr = ss->ehdr;
690
691 if (!elf_section_by_name(elf, &ehdr, &shdr_plt, ".plt", NULL))
692 return 0;
693
694 /*
695 * A symbol from a previous section (e.g. .init) can have been expanded
696 * by symbols__fixup_end() to overlap .plt. Truncate it before adding
697 * a symbol for .plt header.
698 */
699 f = dso__find_symbol_nocache(dso, shdr_plt.sh_offset);
700 if (f && f->start < shdr_plt.sh_offset && f->end > shdr_plt.sh_offset)
701 f->end = shdr_plt.sh_offset;
702
703 if (!get_plt_sizes(dso, &ehdr, &shdr_plt, &plt_header_size, &plt_entry_size))
704 return 0;
705
706 /* Add a symbol for .plt header */
707 plt_sym = symbol__new(shdr_plt.sh_offset, plt_header_size, STB_GLOBAL, STT_FUNC, ".plt");
708 if (!plt_sym)
709 goto out_elf_end;
710 symbols__insert(&dso->symbols, plt_sym);
711
712 /* Only x86 has .plt.got */
713 if (machine_is_x86(ehdr.e_machine) &&
714 dso__synthesize_plt_got_symbols(dso, elf, &ehdr, sympltname, sizeof(sympltname)))
715 goto out_elf_end;
716
717 /* Only x86 has .plt.sec */
718 if (machine_is_x86(ehdr.e_machine) &&
719 elf_section_by_name(elf, &ehdr, &plt_sec_shdr, ".plt.sec", NULL)) {
720 if (!get_plt_sizes(dso, &ehdr, &plt_sec_shdr, &plt_header_size, &plt_entry_size))
721 return 0;
722 /* Extend .plt symbol to entire .plt */
723 plt_sym->end = plt_sym->start + shdr_plt.sh_size;
724 /* Use .plt.sec offset */
725 plt_offset = plt_sec_shdr.sh_offset;
726 lazy_plt = false;
727 } else {
728 plt_offset = shdr_plt.sh_offset;
729 lazy_plt = true;
730 }
731
732 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
733 ".rela.plt", NULL);
734 if (scn_plt_rel == NULL) {
735 scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt,
736 ".rel.plt", NULL);
737 if (scn_plt_rel == NULL)
738 return 0;
739 }
740
741 if (shdr_rel_plt.sh_type != SHT_RELA &&
742 shdr_rel_plt.sh_type != SHT_REL)
743 return 0;
744
745 if (!shdr_rel_plt.sh_link)
746 return 0;
747
748 if (shdr_rel_plt.sh_link == ss->dynsym_idx) {
749 scn_dynsym = ss->dynsym;
750 shdr_dynsym = ss->dynshdr;
751 } else if (shdr_rel_plt.sh_link == ss->symtab_idx) {
752 /*
753 * A static executable can have a .plt due to IFUNCs, in which
754 * case .symtab is used not .dynsym.
755 */
756 scn_dynsym = ss->symtab;
757 shdr_dynsym = ss->symshdr;
758 } else {
759 goto out_elf_end;
760 }
761
762 if (!scn_dynsym)
763 return 0;
764
765 /*
766 * Fetch the relocation section to find the idxes to the GOT
767 * and the symbols in the .dynsym they refer to.
768 */
769 ri.reldata = elf_getdata(scn_plt_rel, NULL);
770 if (!ri.reldata)
771 goto out_elf_end;
772
773 syms = elf_getdata(scn_dynsym, NULL);
774 if (syms == NULL)
775 goto out_elf_end;
776
777 scn_symstrs = elf_getscn(elf, shdr_dynsym.sh_link);
778 if (scn_symstrs == NULL)
779 goto out_elf_end;
780
781 symstrs = elf_getdata(scn_symstrs, NULL);
782 if (symstrs == NULL)
783 goto out_elf_end;
784
785 if (symstrs->d_size == 0)
786 goto out_elf_end;
787
788 ri.nr_entries = shdr_rel_plt.sh_size / shdr_rel_plt.sh_entsize;
789
790 ri.is_rela = shdr_rel_plt.sh_type == SHT_RELA;
791
792 if (lazy_plt) {
793 /*
794 * Assume a .plt with the same number of entries as the number
795 * of relocation entries is not lazy and does not have a header.
796 */
797 if (ri.nr_entries * plt_entry_size == shdr_plt.sh_size)
798 dso__delete_symbol(dso, plt_sym);
799 else
800 plt_offset += plt_header_size;
801 }
802
803 /*
804 * x86 doesn't insert IFUNC relocations in .plt order, so sort to get
805 * back in order.
806 */
807 if (machine_is_x86(ehdr.e_machine) && sort_rel(&ri))
808 goto out_elf_end;
809
810 for (idx = 0; idx < ri.nr_entries; idx++) {
811 const char *elf_name = NULL;
812 char *demangled = NULL;
813
814 gelf_getsym(syms, get_rel_symidx(&ri, idx), &sym);
815
816 elf_name = elf_sym__name(&sym, symstrs);
817 demangled = demangle_sym(dso, 0, elf_name);
818 if (demangled)
819 elf_name = demangled;
820 if (*elf_name)
821 snprintf(sympltname, sizeof(sympltname), "%s@plt", elf_name);
822 else if (!get_ifunc_name(elf, dso, &ehdr, &ri, sympltname, sizeof(sympltname)))
823 snprintf(sympltname, sizeof(sympltname),
824 "offset_%#" PRIx64 "@plt", plt_offset);
825 free(demangled);
826
827 f = symbol__new(plt_offset, plt_entry_size, STB_GLOBAL, STT_FUNC, sympltname);
828 if (!f)
829 goto out_elf_end;
830
831 plt_offset += plt_entry_size;
832 symbols__insert(&dso->symbols, f);
833 ++nr;
834 }
835
836 err = 0;
837 out_elf_end:
838 exit_rel(&ri);
839 if (err == 0)
840 return nr;
841 pr_debug("%s: problems reading %s PLT info.\n",
842 __func__, dso->long_name);
843 return 0;
844 }
845
dso__demangle_sym(struct dso * dso,int kmodule,const char * elf_name)846 char *dso__demangle_sym(struct dso *dso, int kmodule, const char *elf_name)
847 {
848 return demangle_sym(dso, kmodule, elf_name);
849 }
850
851 /*
852 * Align offset to 4 bytes as needed for note name and descriptor data.
853 */
854 #define NOTE_ALIGN(n) (((n) + 3) & -4U)
855
elf_read_build_id(Elf * elf,void * bf,size_t size)856 static int elf_read_build_id(Elf *elf, void *bf, size_t size)
857 {
858 int err = -1;
859 GElf_Ehdr ehdr;
860 GElf_Shdr shdr;
861 Elf_Data *data;
862 Elf_Scn *sec;
863 Elf_Kind ek;
864 void *ptr;
865
866 if (size < BUILD_ID_SIZE)
867 goto out;
868
869 ek = elf_kind(elf);
870 if (ek != ELF_K_ELF)
871 goto out;
872
873 if (gelf_getehdr(elf, &ehdr) == NULL) {
874 pr_err("%s: cannot get elf header.\n", __func__);
875 goto out;
876 }
877
878 /*
879 * Check following sections for notes:
880 * '.note.gnu.build-id'
881 * '.notes'
882 * '.note' (VDSO specific)
883 */
884 do {
885 sec = elf_section_by_name(elf, &ehdr, &shdr,
886 ".note.gnu.build-id", NULL);
887 if (sec)
888 break;
889
890 sec = elf_section_by_name(elf, &ehdr, &shdr,
891 ".notes", NULL);
892 if (sec)
893 break;
894
895 sec = elf_section_by_name(elf, &ehdr, &shdr,
896 ".note", NULL);
897 if (sec)
898 break;
899
900 return err;
901
902 } while (0);
903
904 data = elf_getdata(sec, NULL);
905 if (data == NULL)
906 goto out;
907
908 ptr = data->d_buf;
909 while (ptr < (data->d_buf + data->d_size)) {
910 GElf_Nhdr *nhdr = ptr;
911 size_t namesz = NOTE_ALIGN(nhdr->n_namesz),
912 descsz = NOTE_ALIGN(nhdr->n_descsz);
913 const char *name;
914
915 ptr += sizeof(*nhdr);
916 name = ptr;
917 ptr += namesz;
918 if (nhdr->n_type == NT_GNU_BUILD_ID &&
919 nhdr->n_namesz == sizeof("GNU")) {
920 if (memcmp(name, "GNU", sizeof("GNU")) == 0) {
921 size_t sz = min(size, descsz);
922 memcpy(bf, ptr, sz);
923 memset(bf + sz, 0, size - sz);
924 err = sz;
925 break;
926 }
927 }
928 ptr += descsz;
929 }
930
931 out:
932 return err;
933 }
934
935 #ifdef HAVE_LIBBFD_BUILDID_SUPPORT
936
read_build_id(const char * filename,struct build_id * bid)937 static int read_build_id(const char *filename, struct build_id *bid)
938 {
939 size_t size = sizeof(bid->data);
940 int err = -1;
941 bfd *abfd;
942
943 abfd = bfd_openr(filename, NULL);
944 if (!abfd)
945 return -1;
946
947 if (!bfd_check_format(abfd, bfd_object)) {
948 pr_debug2("%s: cannot read %s bfd file.\n", __func__, filename);
949 goto out_close;
950 }
951
952 if (!abfd->build_id || abfd->build_id->size > size)
953 goto out_close;
954
955 memcpy(bid->data, abfd->build_id->data, abfd->build_id->size);
956 memset(bid->data + abfd->build_id->size, 0, size - abfd->build_id->size);
957 err = bid->size = abfd->build_id->size;
958
959 out_close:
960 bfd_close(abfd);
961 return err;
962 }
963
964 #else // HAVE_LIBBFD_BUILDID_SUPPORT
965
read_build_id(const char * filename,struct build_id * bid)966 static int read_build_id(const char *filename, struct build_id *bid)
967 {
968 size_t size = sizeof(bid->data);
969 int fd, err = -1;
970 Elf *elf;
971
972 if (size < BUILD_ID_SIZE)
973 goto out;
974
975 fd = open(filename, O_RDONLY);
976 if (fd < 0)
977 goto out;
978
979 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
980 if (elf == NULL) {
981 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
982 goto out_close;
983 }
984
985 err = elf_read_build_id(elf, bid->data, size);
986 if (err > 0)
987 bid->size = err;
988
989 elf_end(elf);
990 out_close:
991 close(fd);
992 out:
993 return err;
994 }
995
996 #endif // HAVE_LIBBFD_BUILDID_SUPPORT
997
filename__read_build_id(const char * filename,struct build_id * bid)998 int filename__read_build_id(const char *filename, struct build_id *bid)
999 {
1000 struct kmod_path m = { .name = NULL, };
1001 char path[PATH_MAX];
1002 int err;
1003
1004 if (!filename)
1005 return -EFAULT;
1006
1007 err = kmod_path__parse(&m, filename);
1008 if (err)
1009 return -1;
1010
1011 if (m.comp) {
1012 int error = 0, fd;
1013
1014 fd = filename__decompress(filename, path, sizeof(path), m.comp, &error);
1015 if (fd < 0) {
1016 pr_debug("Failed to decompress (error %d) %s\n",
1017 error, filename);
1018 return -1;
1019 }
1020 close(fd);
1021 filename = path;
1022 }
1023
1024 err = read_build_id(filename, bid);
1025
1026 if (m.comp)
1027 unlink(filename);
1028 return err;
1029 }
1030
sysfs__read_build_id(const char * filename,struct build_id * bid)1031 int sysfs__read_build_id(const char *filename, struct build_id *bid)
1032 {
1033 size_t size = sizeof(bid->data);
1034 int fd, err = -1;
1035
1036 fd = open(filename, O_RDONLY);
1037 if (fd < 0)
1038 goto out;
1039
1040 while (1) {
1041 char bf[BUFSIZ];
1042 GElf_Nhdr nhdr;
1043 size_t namesz, descsz;
1044
1045 if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr))
1046 break;
1047
1048 namesz = NOTE_ALIGN(nhdr.n_namesz);
1049 descsz = NOTE_ALIGN(nhdr.n_descsz);
1050 if (nhdr.n_type == NT_GNU_BUILD_ID &&
1051 nhdr.n_namesz == sizeof("GNU")) {
1052 if (read(fd, bf, namesz) != (ssize_t)namesz)
1053 break;
1054 if (memcmp(bf, "GNU", sizeof("GNU")) == 0) {
1055 size_t sz = min(descsz, size);
1056 if (read(fd, bid->data, sz) == (ssize_t)sz) {
1057 memset(bid->data + sz, 0, size - sz);
1058 bid->size = sz;
1059 err = 0;
1060 break;
1061 }
1062 } else if (read(fd, bf, descsz) != (ssize_t)descsz)
1063 break;
1064 } else {
1065 int n = namesz + descsz;
1066
1067 if (n > (int)sizeof(bf)) {
1068 n = sizeof(bf);
1069 pr_debug("%s: truncating reading of build id in sysfs file %s: n_namesz=%u, n_descsz=%u.\n",
1070 __func__, filename, nhdr.n_namesz, nhdr.n_descsz);
1071 }
1072 if (read(fd, bf, n) != n)
1073 break;
1074 }
1075 }
1076 close(fd);
1077 out:
1078 return err;
1079 }
1080
1081 #ifdef HAVE_LIBBFD_SUPPORT
1082
filename__read_debuglink(const char * filename,char * debuglink,size_t size)1083 int filename__read_debuglink(const char *filename, char *debuglink,
1084 size_t size)
1085 {
1086 int err = -1;
1087 asection *section;
1088 bfd *abfd;
1089
1090 abfd = bfd_openr(filename, NULL);
1091 if (!abfd)
1092 return -1;
1093
1094 if (!bfd_check_format(abfd, bfd_object)) {
1095 pr_debug2("%s: cannot read %s bfd file.\n", __func__, filename);
1096 goto out_close;
1097 }
1098
1099 section = bfd_get_section_by_name(abfd, ".gnu_debuglink");
1100 if (!section)
1101 goto out_close;
1102
1103 if (section->size > size)
1104 goto out_close;
1105
1106 if (!bfd_get_section_contents(abfd, section, debuglink, 0,
1107 section->size))
1108 goto out_close;
1109
1110 err = 0;
1111
1112 out_close:
1113 bfd_close(abfd);
1114 return err;
1115 }
1116
1117 #else
1118
filename__read_debuglink(const char * filename,char * debuglink,size_t size)1119 int filename__read_debuglink(const char *filename, char *debuglink,
1120 size_t size)
1121 {
1122 int fd, err = -1;
1123 Elf *elf;
1124 GElf_Ehdr ehdr;
1125 GElf_Shdr shdr;
1126 Elf_Data *data;
1127 Elf_Scn *sec;
1128 Elf_Kind ek;
1129
1130 fd = open(filename, O_RDONLY);
1131 if (fd < 0)
1132 goto out;
1133
1134 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
1135 if (elf == NULL) {
1136 pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
1137 goto out_close;
1138 }
1139
1140 ek = elf_kind(elf);
1141 if (ek != ELF_K_ELF)
1142 goto out_elf_end;
1143
1144 if (gelf_getehdr(elf, &ehdr) == NULL) {
1145 pr_err("%s: cannot get elf header.\n", __func__);
1146 goto out_elf_end;
1147 }
1148
1149 sec = elf_section_by_name(elf, &ehdr, &shdr,
1150 ".gnu_debuglink", NULL);
1151 if (sec == NULL)
1152 goto out_elf_end;
1153
1154 data = elf_getdata(sec, NULL);
1155 if (data == NULL)
1156 goto out_elf_end;
1157
1158 /* the start of this section is a zero-terminated string */
1159 strncpy(debuglink, data->d_buf, size);
1160
1161 err = 0;
1162
1163 out_elf_end:
1164 elf_end(elf);
1165 out_close:
1166 close(fd);
1167 out:
1168 return err;
1169 }
1170
1171 #endif
1172
dso__swap_init(struct dso * dso,unsigned char eidata)1173 static int dso__swap_init(struct dso *dso, unsigned char eidata)
1174 {
1175 static unsigned int const endian = 1;
1176
1177 dso->needs_swap = DSO_SWAP__NO;
1178
1179 switch (eidata) {
1180 case ELFDATA2LSB:
1181 /* We are big endian, DSO is little endian. */
1182 if (*(unsigned char const *)&endian != 1)
1183 dso->needs_swap = DSO_SWAP__YES;
1184 break;
1185
1186 case ELFDATA2MSB:
1187 /* We are little endian, DSO is big endian. */
1188 if (*(unsigned char const *)&endian != 0)
1189 dso->needs_swap = DSO_SWAP__YES;
1190 break;
1191
1192 default:
1193 pr_err("unrecognized DSO data encoding %d\n", eidata);
1194 return -EINVAL;
1195 }
1196
1197 return 0;
1198 }
1199
symsrc__possibly_runtime(struct symsrc * ss)1200 bool symsrc__possibly_runtime(struct symsrc *ss)
1201 {
1202 return ss->dynsym || ss->opdsec;
1203 }
1204
symsrc__has_symtab(struct symsrc * ss)1205 bool symsrc__has_symtab(struct symsrc *ss)
1206 {
1207 return ss->symtab != NULL;
1208 }
1209
symsrc__destroy(struct symsrc * ss)1210 void symsrc__destroy(struct symsrc *ss)
1211 {
1212 zfree(&ss->name);
1213 elf_end(ss->elf);
1214 close(ss->fd);
1215 }
1216
elf__needs_adjust_symbols(GElf_Ehdr ehdr)1217 bool elf__needs_adjust_symbols(GElf_Ehdr ehdr)
1218 {
1219 /*
1220 * Usually vmlinux is an ELF file with type ET_EXEC for most
1221 * architectures; except Arm64 kernel is linked with option
1222 * '-share', so need to check type ET_DYN.
1223 */
1224 return ehdr.e_type == ET_EXEC || ehdr.e_type == ET_REL ||
1225 ehdr.e_type == ET_DYN;
1226 }
1227
symsrc__init(struct symsrc * ss,struct dso * dso,const char * name,enum dso_binary_type type)1228 int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
1229 enum dso_binary_type type)
1230 {
1231 GElf_Ehdr ehdr;
1232 Elf *elf;
1233 int fd;
1234
1235 if (dso__needs_decompress(dso)) {
1236 fd = dso__decompress_kmodule_fd(dso, name);
1237 if (fd < 0)
1238 return -1;
1239
1240 type = dso->symtab_type;
1241 } else {
1242 fd = open(name, O_RDONLY);
1243 if (fd < 0) {
1244 dso->load_errno = errno;
1245 return -1;
1246 }
1247 }
1248
1249 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
1250 if (elf == NULL) {
1251 pr_debug("%s: cannot read %s ELF file.\n", __func__, name);
1252 dso->load_errno = DSO_LOAD_ERRNO__INVALID_ELF;
1253 goto out_close;
1254 }
1255
1256 if (gelf_getehdr(elf, &ehdr) == NULL) {
1257 dso->load_errno = DSO_LOAD_ERRNO__INVALID_ELF;
1258 pr_debug("%s: cannot get elf header.\n", __func__);
1259 goto out_elf_end;
1260 }
1261
1262 if (dso__swap_init(dso, ehdr.e_ident[EI_DATA])) {
1263 dso->load_errno = DSO_LOAD_ERRNO__INTERNAL_ERROR;
1264 goto out_elf_end;
1265 }
1266
1267 /* Always reject images with a mismatched build-id: */
1268 if (dso->has_build_id && !symbol_conf.ignore_vmlinux_buildid) {
1269 u8 build_id[BUILD_ID_SIZE];
1270 struct build_id bid;
1271 int size;
1272
1273 size = elf_read_build_id(elf, build_id, BUILD_ID_SIZE);
1274 if (size <= 0) {
1275 dso->load_errno = DSO_LOAD_ERRNO__CANNOT_READ_BUILDID;
1276 goto out_elf_end;
1277 }
1278
1279 build_id__init(&bid, build_id, size);
1280 if (!dso__build_id_equal(dso, &bid)) {
1281 pr_debug("%s: build id mismatch for %s.\n", __func__, name);
1282 dso->load_errno = DSO_LOAD_ERRNO__MISMATCHING_BUILDID;
1283 goto out_elf_end;
1284 }
1285 }
1286
1287 ss->is_64_bit = (gelf_getclass(elf) == ELFCLASS64);
1288
1289 ss->symtab_idx = 0;
1290 ss->symtab = elf_section_by_name(elf, &ehdr, &ss->symshdr, ".symtab",
1291 &ss->symtab_idx);
1292 if (ss->symshdr.sh_type != SHT_SYMTAB)
1293 ss->symtab = NULL;
1294
1295 ss->dynsym_idx = 0;
1296 ss->dynsym = elf_section_by_name(elf, &ehdr, &ss->dynshdr, ".dynsym",
1297 &ss->dynsym_idx);
1298 if (ss->dynshdr.sh_type != SHT_DYNSYM)
1299 ss->dynsym = NULL;
1300
1301 ss->opdidx = 0;
1302 ss->opdsec = elf_section_by_name(elf, &ehdr, &ss->opdshdr, ".opd",
1303 &ss->opdidx);
1304 if (ss->opdshdr.sh_type != SHT_PROGBITS)
1305 ss->opdsec = NULL;
1306
1307 if (dso->kernel == DSO_SPACE__USER)
1308 ss->adjust_symbols = true;
1309 else
1310 ss->adjust_symbols = elf__needs_adjust_symbols(ehdr);
1311
1312 ss->name = strdup(name);
1313 if (!ss->name) {
1314 dso->load_errno = errno;
1315 goto out_elf_end;
1316 }
1317
1318 ss->elf = elf;
1319 ss->fd = fd;
1320 ss->ehdr = ehdr;
1321 ss->type = type;
1322
1323 return 0;
1324
1325 out_elf_end:
1326 elf_end(elf);
1327 out_close:
1328 close(fd);
1329 return -1;
1330 }
1331
1332 /**
1333 * ref_reloc_sym_not_found - has kernel relocation symbol been found.
1334 * @kmap: kernel maps and relocation reference symbol
1335 *
1336 * This function returns %true if we are dealing with the kernel maps and the
1337 * relocation reference symbol has not yet been found. Otherwise %false is
1338 * returned.
1339 */
ref_reloc_sym_not_found(struct kmap * kmap)1340 static bool ref_reloc_sym_not_found(struct kmap *kmap)
1341 {
1342 return kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name &&
1343 !kmap->ref_reloc_sym->unrelocated_addr;
1344 }
1345
1346 /**
1347 * ref_reloc - kernel relocation offset.
1348 * @kmap: kernel maps and relocation reference symbol
1349 *
1350 * This function returns the offset of kernel addresses as determined by using
1351 * the relocation reference symbol i.e. if the kernel has not been relocated
1352 * then the return value is zero.
1353 */
ref_reloc(struct kmap * kmap)1354 static u64 ref_reloc(struct kmap *kmap)
1355 {
1356 if (kmap && kmap->ref_reloc_sym &&
1357 kmap->ref_reloc_sym->unrelocated_addr)
1358 return kmap->ref_reloc_sym->addr -
1359 kmap->ref_reloc_sym->unrelocated_addr;
1360 return 0;
1361 }
1362
arch__sym_update(struct symbol * s __maybe_unused,GElf_Sym * sym __maybe_unused)1363 void __weak arch__sym_update(struct symbol *s __maybe_unused,
1364 GElf_Sym *sym __maybe_unused) { }
1365
dso__process_kernel_symbol(struct dso * dso,struct map * map,GElf_Sym * sym,GElf_Shdr * shdr,struct maps * kmaps,struct kmap * kmap,struct dso ** curr_dsop,struct map ** curr_mapp,const char * section_name,bool adjust_kernel_syms,bool kmodule,bool * remap_kernel)1366 static int dso__process_kernel_symbol(struct dso *dso, struct map *map,
1367 GElf_Sym *sym, GElf_Shdr *shdr,
1368 struct maps *kmaps, struct kmap *kmap,
1369 struct dso **curr_dsop, struct map **curr_mapp,
1370 const char *section_name,
1371 bool adjust_kernel_syms, bool kmodule, bool *remap_kernel)
1372 {
1373 struct dso *curr_dso = *curr_dsop;
1374 struct map *curr_map;
1375 char dso_name[PATH_MAX];
1376
1377 /* Adjust symbol to map to file offset */
1378 if (adjust_kernel_syms)
1379 sym->st_value -= shdr->sh_addr - shdr->sh_offset;
1380
1381 if (strcmp(section_name, (curr_dso->short_name + dso->short_name_len)) == 0)
1382 return 0;
1383
1384 if (strcmp(section_name, ".text") == 0) {
1385 /*
1386 * The initial kernel mapping is based on
1387 * kallsyms and identity maps. Overwrite it to
1388 * map to the kernel dso.
1389 */
1390 if (*remap_kernel && dso->kernel && !kmodule) {
1391 *remap_kernel = false;
1392 map__set_start(map, shdr->sh_addr + ref_reloc(kmap));
1393 map__set_end(map, map__start(map) + shdr->sh_size);
1394 map__set_pgoff(map, shdr->sh_offset);
1395 map__set_map_ip(map, map__dso_map_ip);
1396 map__set_unmap_ip(map, map__dso_unmap_ip);
1397 /* Ensure maps are correctly ordered */
1398 if (kmaps) {
1399 int err;
1400 struct map *tmp = map__get(map);
1401
1402 maps__remove(kmaps, map);
1403 err = maps__insert(kmaps, map);
1404 map__put(tmp);
1405 if (err)
1406 return err;
1407 }
1408 }
1409
1410 /*
1411 * The initial module mapping is based on
1412 * /proc/modules mapped to offset zero.
1413 * Overwrite it to map to the module dso.
1414 */
1415 if (*remap_kernel && kmodule) {
1416 *remap_kernel = false;
1417 map__set_pgoff(map, shdr->sh_offset);
1418 }
1419
1420 *curr_mapp = map;
1421 *curr_dsop = dso;
1422 return 0;
1423 }
1424
1425 if (!kmap)
1426 return 0;
1427
1428 snprintf(dso_name, sizeof(dso_name), "%s%s", dso->short_name, section_name);
1429
1430 curr_map = maps__find_by_name(kmaps, dso_name);
1431 if (curr_map == NULL) {
1432 u64 start = sym->st_value;
1433
1434 if (kmodule)
1435 start += map__start(map) + shdr->sh_offset;
1436
1437 curr_dso = dso__new(dso_name);
1438 if (curr_dso == NULL)
1439 return -1;
1440 curr_dso->kernel = dso->kernel;
1441 curr_dso->long_name = dso->long_name;
1442 curr_dso->long_name_len = dso->long_name_len;
1443 curr_dso->binary_type = dso->binary_type;
1444 curr_dso->adjust_symbols = dso->adjust_symbols;
1445 curr_map = map__new2(start, curr_dso);
1446 dso__put(curr_dso);
1447 if (curr_map == NULL)
1448 return -1;
1449
1450 if (curr_dso->kernel)
1451 map__kmap(curr_map)->kmaps = kmaps;
1452
1453 if (adjust_kernel_syms) {
1454 map__set_start(curr_map, shdr->sh_addr + ref_reloc(kmap));
1455 map__set_end(curr_map, map__start(curr_map) + shdr->sh_size);
1456 map__set_pgoff(curr_map, shdr->sh_offset);
1457 } else {
1458 map__set_map_ip(curr_map, identity__map_ip);
1459 map__set_unmap_ip(curr_map, identity__map_ip);
1460 }
1461 curr_dso->symtab_type = dso->symtab_type;
1462 if (maps__insert(kmaps, curr_map))
1463 return -1;
1464 /*
1465 * Add it before we drop the reference to curr_map, i.e. while
1466 * we still are sure to have a reference to this DSO via
1467 * *curr_map->dso.
1468 */
1469 dsos__add(&maps__machine(kmaps)->dsos, curr_dso);
1470 /* kmaps already got it */
1471 map__put(curr_map);
1472 dso__set_loaded(curr_dso);
1473 *curr_mapp = curr_map;
1474 *curr_dsop = curr_dso;
1475 } else
1476 *curr_dsop = map__dso(curr_map);
1477
1478 return 0;
1479 }
1480
1481 static int
dso__load_sym_internal(struct dso * dso,struct map * map,struct symsrc * syms_ss,struct symsrc * runtime_ss,int kmodule,int dynsym)1482 dso__load_sym_internal(struct dso *dso, struct map *map, struct symsrc *syms_ss,
1483 struct symsrc *runtime_ss, int kmodule, int dynsym)
1484 {
1485 struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL;
1486 struct maps *kmaps = kmap ? map__kmaps(map) : NULL;
1487 struct map *curr_map = map;
1488 struct dso *curr_dso = dso;
1489 Elf_Data *symstrs, *secstrs, *secstrs_run, *secstrs_sym;
1490 uint32_t nr_syms;
1491 int err = -1;
1492 uint32_t idx;
1493 GElf_Ehdr ehdr;
1494 GElf_Shdr shdr;
1495 GElf_Shdr tshdr;
1496 Elf_Data *syms, *opddata = NULL;
1497 GElf_Sym sym;
1498 Elf_Scn *sec, *sec_strndx;
1499 Elf *elf;
1500 int nr = 0;
1501 bool remap_kernel = false, adjust_kernel_syms = false;
1502
1503 if (kmap && !kmaps)
1504 return -1;
1505
1506 elf = syms_ss->elf;
1507 ehdr = syms_ss->ehdr;
1508 if (dynsym) {
1509 sec = syms_ss->dynsym;
1510 shdr = syms_ss->dynshdr;
1511 } else {
1512 sec = syms_ss->symtab;
1513 shdr = syms_ss->symshdr;
1514 }
1515
1516 if (elf_section_by_name(runtime_ss->elf, &runtime_ss->ehdr, &tshdr,
1517 ".text", NULL))
1518 dso->text_offset = tshdr.sh_addr - tshdr.sh_offset;
1519
1520 if (runtime_ss->opdsec)
1521 opddata = elf_rawdata(runtime_ss->opdsec, NULL);
1522
1523 syms = elf_getdata(sec, NULL);
1524 if (syms == NULL)
1525 goto out_elf_end;
1526
1527 sec = elf_getscn(elf, shdr.sh_link);
1528 if (sec == NULL)
1529 goto out_elf_end;
1530
1531 symstrs = elf_getdata(sec, NULL);
1532 if (symstrs == NULL)
1533 goto out_elf_end;
1534
1535 sec_strndx = elf_getscn(runtime_ss->elf, runtime_ss->ehdr.e_shstrndx);
1536 if (sec_strndx == NULL)
1537 goto out_elf_end;
1538
1539 secstrs_run = elf_getdata(sec_strndx, NULL);
1540 if (secstrs_run == NULL)
1541 goto out_elf_end;
1542
1543 sec_strndx = elf_getscn(elf, ehdr.e_shstrndx);
1544 if (sec_strndx == NULL)
1545 goto out_elf_end;
1546
1547 secstrs_sym = elf_getdata(sec_strndx, NULL);
1548 if (secstrs_sym == NULL)
1549 goto out_elf_end;
1550
1551 nr_syms = shdr.sh_size / shdr.sh_entsize;
1552
1553 memset(&sym, 0, sizeof(sym));
1554
1555 /*
1556 * The kernel relocation symbol is needed in advance in order to adjust
1557 * kernel maps correctly.
1558 */
1559 if (ref_reloc_sym_not_found(kmap)) {
1560 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
1561 const char *elf_name = elf_sym__name(&sym, symstrs);
1562
1563 if (strcmp(elf_name, kmap->ref_reloc_sym->name))
1564 continue;
1565 kmap->ref_reloc_sym->unrelocated_addr = sym.st_value;
1566 map__set_reloc(map, kmap->ref_reloc_sym->addr - kmap->ref_reloc_sym->unrelocated_addr);
1567 break;
1568 }
1569 }
1570
1571 /*
1572 * Handle any relocation of vdso necessary because older kernels
1573 * attempted to prelink vdso to its virtual address.
1574 */
1575 if (dso__is_vdso(dso))
1576 map__set_reloc(map, map__start(map) - dso->text_offset);
1577
1578 dso->adjust_symbols = runtime_ss->adjust_symbols || ref_reloc(kmap);
1579 /*
1580 * Initial kernel and module mappings do not map to the dso.
1581 * Flag the fixups.
1582 */
1583 if (dso->kernel) {
1584 remap_kernel = true;
1585 adjust_kernel_syms = dso->adjust_symbols;
1586 }
1587 elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
1588 struct symbol *f;
1589 const char *elf_name = elf_sym__name(&sym, symstrs);
1590 char *demangled = NULL;
1591 int is_label = elf_sym__is_label(&sym);
1592 const char *section_name;
1593 bool used_opd = false;
1594
1595 if (!is_label && !elf_sym__filter(&sym))
1596 continue;
1597
1598 /* Reject ARM ELF "mapping symbols": these aren't unique and
1599 * don't identify functions, so will confuse the profile
1600 * output: */
1601 if (ehdr.e_machine == EM_ARM || ehdr.e_machine == EM_AARCH64) {
1602 if (elf_name[0] == '$' && strchr("adtx", elf_name[1])
1603 && (elf_name[2] == '\0' || elf_name[2] == '.'))
1604 continue;
1605 }
1606
1607 if (runtime_ss->opdsec && sym.st_shndx == runtime_ss->opdidx) {
1608 u32 offset = sym.st_value - syms_ss->opdshdr.sh_addr;
1609 u64 *opd = opddata->d_buf + offset;
1610 sym.st_value = DSO__SWAP(dso, u64, *opd);
1611 sym.st_shndx = elf_addr_to_index(runtime_ss->elf,
1612 sym.st_value);
1613 used_opd = true;
1614 }
1615
1616 /*
1617 * When loading symbols in a data mapping, ABS symbols (which
1618 * has a value of SHN_ABS in its st_shndx) failed at
1619 * elf_getscn(). And it marks the loading as a failure so
1620 * already loaded symbols cannot be fixed up.
1621 *
1622 * I'm not sure what should be done. Just ignore them for now.
1623 * - Namhyung Kim
1624 */
1625 if (sym.st_shndx == SHN_ABS)
1626 continue;
1627
1628 sec = elf_getscn(syms_ss->elf, sym.st_shndx);
1629 if (!sec)
1630 goto out_elf_end;
1631
1632 gelf_getshdr(sec, &shdr);
1633
1634 /*
1635 * If the attribute bit SHF_ALLOC is not set, the section
1636 * doesn't occupy memory during process execution.
1637 * E.g. ".gnu.warning.*" section is used by linker to generate
1638 * warnings when calling deprecated functions, the symbols in
1639 * the section aren't loaded to memory during process execution,
1640 * so skip them.
1641 */
1642 if (!(shdr.sh_flags & SHF_ALLOC))
1643 continue;
1644
1645 secstrs = secstrs_sym;
1646
1647 /*
1648 * We have to fallback to runtime when syms' section header has
1649 * NOBITS set. NOBITS results in file offset (sh_offset) not
1650 * being incremented. So sh_offset used below has different
1651 * values for syms (invalid) and runtime (valid).
1652 */
1653 if (shdr.sh_type == SHT_NOBITS) {
1654 sec = elf_getscn(runtime_ss->elf, sym.st_shndx);
1655 if (!sec)
1656 goto out_elf_end;
1657
1658 gelf_getshdr(sec, &shdr);
1659 secstrs = secstrs_run;
1660 }
1661
1662 if (is_label && !elf_sec__filter(&shdr, secstrs))
1663 continue;
1664
1665 section_name = elf_sec__name(&shdr, secstrs);
1666
1667 /* On ARM, symbols for thumb functions have 1 added to
1668 * the symbol address as a flag - remove it */
1669 if ((ehdr.e_machine == EM_ARM) &&
1670 (GELF_ST_TYPE(sym.st_info) == STT_FUNC) &&
1671 (sym.st_value & 1))
1672 --sym.st_value;
1673
1674 if (dso->kernel) {
1675 if (dso__process_kernel_symbol(dso, map, &sym, &shdr, kmaps, kmap, &curr_dso, &curr_map,
1676 section_name, adjust_kernel_syms, kmodule, &remap_kernel))
1677 goto out_elf_end;
1678 } else if ((used_opd && runtime_ss->adjust_symbols) ||
1679 (!used_opd && syms_ss->adjust_symbols)) {
1680 GElf_Phdr phdr;
1681
1682 if (elf_read_program_header(runtime_ss->elf,
1683 (u64)sym.st_value, &phdr)) {
1684 pr_debug4("%s: failed to find program header for "
1685 "symbol: %s st_value: %#" PRIx64 "\n",
1686 __func__, elf_name, (u64)sym.st_value);
1687 pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " "
1688 "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n",
1689 __func__, (u64)sym.st_value, (u64)shdr.sh_addr,
1690 (u64)shdr.sh_offset);
1691 /*
1692 * Fail to find program header, let's rollback
1693 * to use shdr.sh_addr and shdr.sh_offset to
1694 * calibrate symbol's file address, though this
1695 * is not necessary for normal C ELF file, we
1696 * still need to handle java JIT symbols in this
1697 * case.
1698 */
1699 sym.st_value -= shdr.sh_addr - shdr.sh_offset;
1700 } else {
1701 pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " "
1702 "p_vaddr: %#" PRIx64 " p_offset: %#" PRIx64 "\n",
1703 __func__, (u64)sym.st_value, (u64)phdr.p_vaddr,
1704 (u64)phdr.p_offset);
1705 sym.st_value -= phdr.p_vaddr - phdr.p_offset;
1706 }
1707 }
1708
1709 demangled = demangle_sym(dso, kmodule, elf_name);
1710 if (demangled != NULL)
1711 elf_name = demangled;
1712
1713 f = symbol__new(sym.st_value, sym.st_size,
1714 GELF_ST_BIND(sym.st_info),
1715 GELF_ST_TYPE(sym.st_info), elf_name);
1716 free(demangled);
1717 if (!f)
1718 goto out_elf_end;
1719
1720 arch__sym_update(f, &sym);
1721
1722 __symbols__insert(&curr_dso->symbols, f, dso->kernel);
1723 nr++;
1724 }
1725
1726 /*
1727 * For misannotated, zeroed, ASM function sizes.
1728 */
1729 if (nr > 0) {
1730 symbols__fixup_end(&dso->symbols, false);
1731 symbols__fixup_duplicate(&dso->symbols);
1732 if (kmap) {
1733 /*
1734 * We need to fixup this here too because we create new
1735 * maps here, for things like vsyscall sections.
1736 */
1737 maps__fixup_end(kmaps);
1738 }
1739 }
1740 err = nr;
1741 out_elf_end:
1742 return err;
1743 }
1744
dso__load_sym(struct dso * dso,struct map * map,struct symsrc * syms_ss,struct symsrc * runtime_ss,int kmodule)1745 int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
1746 struct symsrc *runtime_ss, int kmodule)
1747 {
1748 int nr = 0;
1749 int err = -1;
1750
1751 dso->symtab_type = syms_ss->type;
1752 dso->is_64_bit = syms_ss->is_64_bit;
1753 dso->rel = syms_ss->ehdr.e_type == ET_REL;
1754
1755 /*
1756 * Modules may already have symbols from kallsyms, but those symbols
1757 * have the wrong values for the dso maps, so remove them.
1758 */
1759 if (kmodule && syms_ss->symtab)
1760 symbols__delete(&dso->symbols);
1761
1762 if (!syms_ss->symtab) {
1763 /*
1764 * If the vmlinux is stripped, fail so we will fall back
1765 * to using kallsyms. The vmlinux runtime symbols aren't
1766 * of much use.
1767 */
1768 if (dso->kernel)
1769 return err;
1770 } else {
1771 err = dso__load_sym_internal(dso, map, syms_ss, runtime_ss,
1772 kmodule, 0);
1773 if (err < 0)
1774 return err;
1775 nr = err;
1776 }
1777
1778 if (syms_ss->dynsym) {
1779 err = dso__load_sym_internal(dso, map, syms_ss, runtime_ss,
1780 kmodule, 1);
1781 if (err < 0)
1782 return err;
1783 err += nr;
1784 }
1785
1786 return err;
1787 }
1788
elf_read_maps(Elf * elf,bool exe,mapfn_t mapfn,void * data)1789 static int elf_read_maps(Elf *elf, bool exe, mapfn_t mapfn, void *data)
1790 {
1791 GElf_Phdr phdr;
1792 size_t i, phdrnum;
1793 int err;
1794 u64 sz;
1795
1796 if (elf_getphdrnum(elf, &phdrnum))
1797 return -1;
1798
1799 for (i = 0; i < phdrnum; i++) {
1800 if (gelf_getphdr(elf, i, &phdr) == NULL)
1801 return -1;
1802 if (phdr.p_type != PT_LOAD)
1803 continue;
1804 if (exe) {
1805 if (!(phdr.p_flags & PF_X))
1806 continue;
1807 } else {
1808 if (!(phdr.p_flags & PF_R))
1809 continue;
1810 }
1811 sz = min(phdr.p_memsz, phdr.p_filesz);
1812 if (!sz)
1813 continue;
1814 err = mapfn(phdr.p_vaddr, sz, phdr.p_offset, data);
1815 if (err)
1816 return err;
1817 }
1818 return 0;
1819 }
1820
file__read_maps(int fd,bool exe,mapfn_t mapfn,void * data,bool * is_64_bit)1821 int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data,
1822 bool *is_64_bit)
1823 {
1824 int err;
1825 Elf *elf;
1826
1827 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
1828 if (elf == NULL)
1829 return -1;
1830
1831 if (is_64_bit)
1832 *is_64_bit = (gelf_getclass(elf) == ELFCLASS64);
1833
1834 err = elf_read_maps(elf, exe, mapfn, data);
1835
1836 elf_end(elf);
1837 return err;
1838 }
1839
dso__type_fd(int fd)1840 enum dso_type dso__type_fd(int fd)
1841 {
1842 enum dso_type dso_type = DSO__TYPE_UNKNOWN;
1843 GElf_Ehdr ehdr;
1844 Elf_Kind ek;
1845 Elf *elf;
1846
1847 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
1848 if (elf == NULL)
1849 goto out;
1850
1851 ek = elf_kind(elf);
1852 if (ek != ELF_K_ELF)
1853 goto out_end;
1854
1855 if (gelf_getclass(elf) == ELFCLASS64) {
1856 dso_type = DSO__TYPE_64BIT;
1857 goto out_end;
1858 }
1859
1860 if (gelf_getehdr(elf, &ehdr) == NULL)
1861 goto out_end;
1862
1863 if (ehdr.e_machine == EM_X86_64)
1864 dso_type = DSO__TYPE_X32BIT;
1865 else
1866 dso_type = DSO__TYPE_32BIT;
1867 out_end:
1868 elf_end(elf);
1869 out:
1870 return dso_type;
1871 }
1872
copy_bytes(int from,off_t from_offs,int to,off_t to_offs,u64 len)1873 static int copy_bytes(int from, off_t from_offs, int to, off_t to_offs, u64 len)
1874 {
1875 ssize_t r;
1876 size_t n;
1877 int err = -1;
1878 char *buf = malloc(page_size);
1879
1880 if (buf == NULL)
1881 return -1;
1882
1883 if (lseek(to, to_offs, SEEK_SET) != to_offs)
1884 goto out;
1885
1886 if (lseek(from, from_offs, SEEK_SET) != from_offs)
1887 goto out;
1888
1889 while (len) {
1890 n = page_size;
1891 if (len < n)
1892 n = len;
1893 /* Use read because mmap won't work on proc files */
1894 r = read(from, buf, n);
1895 if (r < 0)
1896 goto out;
1897 if (!r)
1898 break;
1899 n = r;
1900 r = write(to, buf, n);
1901 if (r < 0)
1902 goto out;
1903 if ((size_t)r != n)
1904 goto out;
1905 len -= n;
1906 }
1907
1908 err = 0;
1909 out:
1910 free(buf);
1911 return err;
1912 }
1913
1914 struct kcore {
1915 int fd;
1916 int elfclass;
1917 Elf *elf;
1918 GElf_Ehdr ehdr;
1919 };
1920
kcore__open(struct kcore * kcore,const char * filename)1921 static int kcore__open(struct kcore *kcore, const char *filename)
1922 {
1923 GElf_Ehdr *ehdr;
1924
1925 kcore->fd = open(filename, O_RDONLY);
1926 if (kcore->fd == -1)
1927 return -1;
1928
1929 kcore->elf = elf_begin(kcore->fd, ELF_C_READ, NULL);
1930 if (!kcore->elf)
1931 goto out_close;
1932
1933 kcore->elfclass = gelf_getclass(kcore->elf);
1934 if (kcore->elfclass == ELFCLASSNONE)
1935 goto out_end;
1936
1937 ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr);
1938 if (!ehdr)
1939 goto out_end;
1940
1941 return 0;
1942
1943 out_end:
1944 elf_end(kcore->elf);
1945 out_close:
1946 close(kcore->fd);
1947 return -1;
1948 }
1949
kcore__init(struct kcore * kcore,char * filename,int elfclass,bool temp)1950 static int kcore__init(struct kcore *kcore, char *filename, int elfclass,
1951 bool temp)
1952 {
1953 kcore->elfclass = elfclass;
1954
1955 if (temp)
1956 kcore->fd = mkstemp(filename);
1957 else
1958 kcore->fd = open(filename, O_WRONLY | O_CREAT | O_EXCL, 0400);
1959 if (kcore->fd == -1)
1960 return -1;
1961
1962 kcore->elf = elf_begin(kcore->fd, ELF_C_WRITE, NULL);
1963 if (!kcore->elf)
1964 goto out_close;
1965
1966 if (!gelf_newehdr(kcore->elf, elfclass))
1967 goto out_end;
1968
1969 memset(&kcore->ehdr, 0, sizeof(GElf_Ehdr));
1970
1971 return 0;
1972
1973 out_end:
1974 elf_end(kcore->elf);
1975 out_close:
1976 close(kcore->fd);
1977 unlink(filename);
1978 return -1;
1979 }
1980
kcore__close(struct kcore * kcore)1981 static void kcore__close(struct kcore *kcore)
1982 {
1983 elf_end(kcore->elf);
1984 close(kcore->fd);
1985 }
1986
kcore__copy_hdr(struct kcore * from,struct kcore * to,size_t count)1987 static int kcore__copy_hdr(struct kcore *from, struct kcore *to, size_t count)
1988 {
1989 GElf_Ehdr *ehdr = &to->ehdr;
1990 GElf_Ehdr *kehdr = &from->ehdr;
1991
1992 memcpy(ehdr->e_ident, kehdr->e_ident, EI_NIDENT);
1993 ehdr->e_type = kehdr->e_type;
1994 ehdr->e_machine = kehdr->e_machine;
1995 ehdr->e_version = kehdr->e_version;
1996 ehdr->e_entry = 0;
1997 ehdr->e_shoff = 0;
1998 ehdr->e_flags = kehdr->e_flags;
1999 ehdr->e_phnum = count;
2000 ehdr->e_shentsize = 0;
2001 ehdr->e_shnum = 0;
2002 ehdr->e_shstrndx = 0;
2003
2004 if (from->elfclass == ELFCLASS32) {
2005 ehdr->e_phoff = sizeof(Elf32_Ehdr);
2006 ehdr->e_ehsize = sizeof(Elf32_Ehdr);
2007 ehdr->e_phentsize = sizeof(Elf32_Phdr);
2008 } else {
2009 ehdr->e_phoff = sizeof(Elf64_Ehdr);
2010 ehdr->e_ehsize = sizeof(Elf64_Ehdr);
2011 ehdr->e_phentsize = sizeof(Elf64_Phdr);
2012 }
2013
2014 if (!gelf_update_ehdr(to->elf, ehdr))
2015 return -1;
2016
2017 if (!gelf_newphdr(to->elf, count))
2018 return -1;
2019
2020 return 0;
2021 }
2022
kcore__add_phdr(struct kcore * kcore,int idx,off_t offset,u64 addr,u64 len)2023 static int kcore__add_phdr(struct kcore *kcore, int idx, off_t offset,
2024 u64 addr, u64 len)
2025 {
2026 GElf_Phdr phdr = {
2027 .p_type = PT_LOAD,
2028 .p_flags = PF_R | PF_W | PF_X,
2029 .p_offset = offset,
2030 .p_vaddr = addr,
2031 .p_paddr = 0,
2032 .p_filesz = len,
2033 .p_memsz = len,
2034 .p_align = page_size,
2035 };
2036
2037 if (!gelf_update_phdr(kcore->elf, idx, &phdr))
2038 return -1;
2039
2040 return 0;
2041 }
2042
kcore__write(struct kcore * kcore)2043 static off_t kcore__write(struct kcore *kcore)
2044 {
2045 return elf_update(kcore->elf, ELF_C_WRITE);
2046 }
2047
2048 struct phdr_data {
2049 off_t offset;
2050 off_t rel;
2051 u64 addr;
2052 u64 len;
2053 struct list_head node;
2054 struct phdr_data *remaps;
2055 };
2056
2057 struct sym_data {
2058 u64 addr;
2059 struct list_head node;
2060 };
2061
2062 struct kcore_copy_info {
2063 u64 stext;
2064 u64 etext;
2065 u64 first_symbol;
2066 u64 last_symbol;
2067 u64 first_module;
2068 u64 first_module_symbol;
2069 u64 last_module_symbol;
2070 size_t phnum;
2071 struct list_head phdrs;
2072 struct list_head syms;
2073 };
2074
2075 #define kcore_copy__for_each_phdr(k, p) \
2076 list_for_each_entry((p), &(k)->phdrs, node)
2077
phdr_data__new(u64 addr,u64 len,off_t offset)2078 static struct phdr_data *phdr_data__new(u64 addr, u64 len, off_t offset)
2079 {
2080 struct phdr_data *p = zalloc(sizeof(*p));
2081
2082 if (p) {
2083 p->addr = addr;
2084 p->len = len;
2085 p->offset = offset;
2086 }
2087
2088 return p;
2089 }
2090
kcore_copy_info__addnew(struct kcore_copy_info * kci,u64 addr,u64 len,off_t offset)2091 static struct phdr_data *kcore_copy_info__addnew(struct kcore_copy_info *kci,
2092 u64 addr, u64 len,
2093 off_t offset)
2094 {
2095 struct phdr_data *p = phdr_data__new(addr, len, offset);
2096
2097 if (p)
2098 list_add_tail(&p->node, &kci->phdrs);
2099
2100 return p;
2101 }
2102
kcore_copy__free_phdrs(struct kcore_copy_info * kci)2103 static void kcore_copy__free_phdrs(struct kcore_copy_info *kci)
2104 {
2105 struct phdr_data *p, *tmp;
2106
2107 list_for_each_entry_safe(p, tmp, &kci->phdrs, node) {
2108 list_del_init(&p->node);
2109 free(p);
2110 }
2111 }
2112
kcore_copy__new_sym(struct kcore_copy_info * kci,u64 addr)2113 static struct sym_data *kcore_copy__new_sym(struct kcore_copy_info *kci,
2114 u64 addr)
2115 {
2116 struct sym_data *s = zalloc(sizeof(*s));
2117
2118 if (s) {
2119 s->addr = addr;
2120 list_add_tail(&s->node, &kci->syms);
2121 }
2122
2123 return s;
2124 }
2125
kcore_copy__free_syms(struct kcore_copy_info * kci)2126 static void kcore_copy__free_syms(struct kcore_copy_info *kci)
2127 {
2128 struct sym_data *s, *tmp;
2129
2130 list_for_each_entry_safe(s, tmp, &kci->syms, node) {
2131 list_del_init(&s->node);
2132 free(s);
2133 }
2134 }
2135
kcore_copy__process_kallsyms(void * arg,const char * name,char type,u64 start)2136 static int kcore_copy__process_kallsyms(void *arg, const char *name, char type,
2137 u64 start)
2138 {
2139 struct kcore_copy_info *kci = arg;
2140
2141 if (!kallsyms__is_function(type))
2142 return 0;
2143
2144 if (strchr(name, '[')) {
2145 if (!kci->first_module_symbol || start < kci->first_module_symbol)
2146 kci->first_module_symbol = start;
2147 if (start > kci->last_module_symbol)
2148 kci->last_module_symbol = start;
2149 return 0;
2150 }
2151
2152 if (!kci->first_symbol || start < kci->first_symbol)
2153 kci->first_symbol = start;
2154
2155 if (!kci->last_symbol || start > kci->last_symbol)
2156 kci->last_symbol = start;
2157
2158 if (!strcmp(name, "_stext")) {
2159 kci->stext = start;
2160 return 0;
2161 }
2162
2163 if (!strcmp(name, "_etext")) {
2164 kci->etext = start;
2165 return 0;
2166 }
2167
2168 if (is_entry_trampoline(name) && !kcore_copy__new_sym(kci, start))
2169 return -1;
2170
2171 return 0;
2172 }
2173
kcore_copy__parse_kallsyms(struct kcore_copy_info * kci,const char * dir)2174 static int kcore_copy__parse_kallsyms(struct kcore_copy_info *kci,
2175 const char *dir)
2176 {
2177 char kallsyms_filename[PATH_MAX];
2178
2179 scnprintf(kallsyms_filename, PATH_MAX, "%s/kallsyms", dir);
2180
2181 if (symbol__restricted_filename(kallsyms_filename, "/proc/kallsyms"))
2182 return -1;
2183
2184 if (kallsyms__parse(kallsyms_filename, kci,
2185 kcore_copy__process_kallsyms) < 0)
2186 return -1;
2187
2188 return 0;
2189 }
2190
kcore_copy__process_modules(void * arg,const char * name __maybe_unused,u64 start,u64 size __maybe_unused)2191 static int kcore_copy__process_modules(void *arg,
2192 const char *name __maybe_unused,
2193 u64 start, u64 size __maybe_unused)
2194 {
2195 struct kcore_copy_info *kci = arg;
2196
2197 if (!kci->first_module || start < kci->first_module)
2198 kci->first_module = start;
2199
2200 return 0;
2201 }
2202
kcore_copy__parse_modules(struct kcore_copy_info * kci,const char * dir)2203 static int kcore_copy__parse_modules(struct kcore_copy_info *kci,
2204 const char *dir)
2205 {
2206 char modules_filename[PATH_MAX];
2207
2208 scnprintf(modules_filename, PATH_MAX, "%s/modules", dir);
2209
2210 if (symbol__restricted_filename(modules_filename, "/proc/modules"))
2211 return -1;
2212
2213 if (modules__parse(modules_filename, kci,
2214 kcore_copy__process_modules) < 0)
2215 return -1;
2216
2217 return 0;
2218 }
2219
kcore_copy__map(struct kcore_copy_info * kci,u64 start,u64 end,u64 pgoff,u64 s,u64 e)2220 static int kcore_copy__map(struct kcore_copy_info *kci, u64 start, u64 end,
2221 u64 pgoff, u64 s, u64 e)
2222 {
2223 u64 len, offset;
2224
2225 if (s < start || s >= end)
2226 return 0;
2227
2228 offset = (s - start) + pgoff;
2229 len = e < end ? e - s : end - s;
2230
2231 return kcore_copy_info__addnew(kci, s, len, offset) ? 0 : -1;
2232 }
2233
kcore_copy__read_map(u64 start,u64 len,u64 pgoff,void * data)2234 static int kcore_copy__read_map(u64 start, u64 len, u64 pgoff, void *data)
2235 {
2236 struct kcore_copy_info *kci = data;
2237 u64 end = start + len;
2238 struct sym_data *sdat;
2239
2240 if (kcore_copy__map(kci, start, end, pgoff, kci->stext, kci->etext))
2241 return -1;
2242
2243 if (kcore_copy__map(kci, start, end, pgoff, kci->first_module,
2244 kci->last_module_symbol))
2245 return -1;
2246
2247 list_for_each_entry(sdat, &kci->syms, node) {
2248 u64 s = round_down(sdat->addr, page_size);
2249
2250 if (kcore_copy__map(kci, start, end, pgoff, s, s + len))
2251 return -1;
2252 }
2253
2254 return 0;
2255 }
2256
kcore_copy__read_maps(struct kcore_copy_info * kci,Elf * elf)2257 static int kcore_copy__read_maps(struct kcore_copy_info *kci, Elf *elf)
2258 {
2259 if (elf_read_maps(elf, true, kcore_copy__read_map, kci) < 0)
2260 return -1;
2261
2262 return 0;
2263 }
2264
kcore_copy__find_remaps(struct kcore_copy_info * kci)2265 static void kcore_copy__find_remaps(struct kcore_copy_info *kci)
2266 {
2267 struct phdr_data *p, *k = NULL;
2268 u64 kend;
2269
2270 if (!kci->stext)
2271 return;
2272
2273 /* Find phdr that corresponds to the kernel map (contains stext) */
2274 kcore_copy__for_each_phdr(kci, p) {
2275 u64 pend = p->addr + p->len - 1;
2276
2277 if (p->addr <= kci->stext && pend >= kci->stext) {
2278 k = p;
2279 break;
2280 }
2281 }
2282
2283 if (!k)
2284 return;
2285
2286 kend = k->offset + k->len;
2287
2288 /* Find phdrs that remap the kernel */
2289 kcore_copy__for_each_phdr(kci, p) {
2290 u64 pend = p->offset + p->len;
2291
2292 if (p == k)
2293 continue;
2294
2295 if (p->offset >= k->offset && pend <= kend)
2296 p->remaps = k;
2297 }
2298 }
2299
kcore_copy__layout(struct kcore_copy_info * kci)2300 static void kcore_copy__layout(struct kcore_copy_info *kci)
2301 {
2302 struct phdr_data *p;
2303 off_t rel = 0;
2304
2305 kcore_copy__find_remaps(kci);
2306
2307 kcore_copy__for_each_phdr(kci, p) {
2308 if (!p->remaps) {
2309 p->rel = rel;
2310 rel += p->len;
2311 }
2312 kci->phnum += 1;
2313 }
2314
2315 kcore_copy__for_each_phdr(kci, p) {
2316 struct phdr_data *k = p->remaps;
2317
2318 if (k)
2319 p->rel = p->offset - k->offset + k->rel;
2320 }
2321 }
2322
kcore_copy__calc_maps(struct kcore_copy_info * kci,const char * dir,Elf * elf)2323 static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir,
2324 Elf *elf)
2325 {
2326 if (kcore_copy__parse_kallsyms(kci, dir))
2327 return -1;
2328
2329 if (kcore_copy__parse_modules(kci, dir))
2330 return -1;
2331
2332 if (kci->stext)
2333 kci->stext = round_down(kci->stext, page_size);
2334 else
2335 kci->stext = round_down(kci->first_symbol, page_size);
2336
2337 if (kci->etext) {
2338 kci->etext = round_up(kci->etext, page_size);
2339 } else if (kci->last_symbol) {
2340 kci->etext = round_up(kci->last_symbol, page_size);
2341 kci->etext += page_size;
2342 }
2343
2344 if (kci->first_module_symbol &&
2345 (!kci->first_module || kci->first_module_symbol < kci->first_module))
2346 kci->first_module = kci->first_module_symbol;
2347
2348 kci->first_module = round_down(kci->first_module, page_size);
2349
2350 if (kci->last_module_symbol) {
2351 kci->last_module_symbol = round_up(kci->last_module_symbol,
2352 page_size);
2353 kci->last_module_symbol += page_size;
2354 }
2355
2356 if (!kci->stext || !kci->etext)
2357 return -1;
2358
2359 if (kci->first_module && !kci->last_module_symbol)
2360 return -1;
2361
2362 if (kcore_copy__read_maps(kci, elf))
2363 return -1;
2364
2365 kcore_copy__layout(kci);
2366
2367 return 0;
2368 }
2369
kcore_copy__copy_file(const char * from_dir,const char * to_dir,const char * name)2370 static int kcore_copy__copy_file(const char *from_dir, const char *to_dir,
2371 const char *name)
2372 {
2373 char from_filename[PATH_MAX];
2374 char to_filename[PATH_MAX];
2375
2376 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name);
2377 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name);
2378
2379 return copyfile_mode(from_filename, to_filename, 0400);
2380 }
2381
kcore_copy__unlink(const char * dir,const char * name)2382 static int kcore_copy__unlink(const char *dir, const char *name)
2383 {
2384 char filename[PATH_MAX];
2385
2386 scnprintf(filename, PATH_MAX, "%s/%s", dir, name);
2387
2388 return unlink(filename);
2389 }
2390
kcore_copy__compare_fds(int from,int to)2391 static int kcore_copy__compare_fds(int from, int to)
2392 {
2393 char *buf_from;
2394 char *buf_to;
2395 ssize_t ret;
2396 size_t len;
2397 int err = -1;
2398
2399 buf_from = malloc(page_size);
2400 buf_to = malloc(page_size);
2401 if (!buf_from || !buf_to)
2402 goto out;
2403
2404 while (1) {
2405 /* Use read because mmap won't work on proc files */
2406 ret = read(from, buf_from, page_size);
2407 if (ret < 0)
2408 goto out;
2409
2410 if (!ret)
2411 break;
2412
2413 len = ret;
2414
2415 if (readn(to, buf_to, len) != (int)len)
2416 goto out;
2417
2418 if (memcmp(buf_from, buf_to, len))
2419 goto out;
2420 }
2421
2422 err = 0;
2423 out:
2424 free(buf_to);
2425 free(buf_from);
2426 return err;
2427 }
2428
kcore_copy__compare_files(const char * from_filename,const char * to_filename)2429 static int kcore_copy__compare_files(const char *from_filename,
2430 const char *to_filename)
2431 {
2432 int from, to, err = -1;
2433
2434 from = open(from_filename, O_RDONLY);
2435 if (from < 0)
2436 return -1;
2437
2438 to = open(to_filename, O_RDONLY);
2439 if (to < 0)
2440 goto out_close_from;
2441
2442 err = kcore_copy__compare_fds(from, to);
2443
2444 close(to);
2445 out_close_from:
2446 close(from);
2447 return err;
2448 }
2449
kcore_copy__compare_file(const char * from_dir,const char * to_dir,const char * name)2450 static int kcore_copy__compare_file(const char *from_dir, const char *to_dir,
2451 const char *name)
2452 {
2453 char from_filename[PATH_MAX];
2454 char to_filename[PATH_MAX];
2455
2456 scnprintf(from_filename, PATH_MAX, "%s/%s", from_dir, name);
2457 scnprintf(to_filename, PATH_MAX, "%s/%s", to_dir, name);
2458
2459 return kcore_copy__compare_files(from_filename, to_filename);
2460 }
2461
2462 /**
2463 * kcore_copy - copy kallsyms, modules and kcore from one directory to another.
2464 * @from_dir: from directory
2465 * @to_dir: to directory
2466 *
2467 * This function copies kallsyms, modules and kcore files from one directory to
2468 * another. kallsyms and modules are copied entirely. Only code segments are
2469 * copied from kcore. It is assumed that two segments suffice: one for the
2470 * kernel proper and one for all the modules. The code segments are determined
2471 * from kallsyms and modules files. The kernel map starts at _stext or the
2472 * lowest function symbol, and ends at _etext or the highest function symbol.
2473 * The module map starts at the lowest module address and ends at the highest
2474 * module symbol. Start addresses are rounded down to the nearest page. End
2475 * addresses are rounded up to the nearest page. An extra page is added to the
2476 * highest kernel symbol and highest module symbol to, hopefully, encompass that
2477 * symbol too. Because it contains only code sections, the resulting kcore is
2478 * unusual. One significant peculiarity is that the mapping (start -> pgoff)
2479 * is not the same for the kernel map and the modules map. That happens because
2480 * the data is copied adjacently whereas the original kcore has gaps. Finally,
2481 * kallsyms file is compared with its copy to check that modules have not been
2482 * loaded or unloaded while the copies were taking place.
2483 *
2484 * Return: %0 on success, %-1 on failure.
2485 */
kcore_copy(const char * from_dir,const char * to_dir)2486 int kcore_copy(const char *from_dir, const char *to_dir)
2487 {
2488 struct kcore kcore;
2489 struct kcore extract;
2490 int idx = 0, err = -1;
2491 off_t offset, sz;
2492 struct kcore_copy_info kci = { .stext = 0, };
2493 char kcore_filename[PATH_MAX];
2494 char extract_filename[PATH_MAX];
2495 struct phdr_data *p;
2496
2497 INIT_LIST_HEAD(&kci.phdrs);
2498 INIT_LIST_HEAD(&kci.syms);
2499
2500 if (kcore_copy__copy_file(from_dir, to_dir, "kallsyms"))
2501 return -1;
2502
2503 if (kcore_copy__copy_file(from_dir, to_dir, "modules"))
2504 goto out_unlink_kallsyms;
2505
2506 scnprintf(kcore_filename, PATH_MAX, "%s/kcore", from_dir);
2507 scnprintf(extract_filename, PATH_MAX, "%s/kcore", to_dir);
2508
2509 if (kcore__open(&kcore, kcore_filename))
2510 goto out_unlink_modules;
2511
2512 if (kcore_copy__calc_maps(&kci, from_dir, kcore.elf))
2513 goto out_kcore_close;
2514
2515 if (kcore__init(&extract, extract_filename, kcore.elfclass, false))
2516 goto out_kcore_close;
2517
2518 if (kcore__copy_hdr(&kcore, &extract, kci.phnum))
2519 goto out_extract_close;
2520
2521 offset = gelf_fsize(extract.elf, ELF_T_EHDR, 1, EV_CURRENT) +
2522 gelf_fsize(extract.elf, ELF_T_PHDR, kci.phnum, EV_CURRENT);
2523 offset = round_up(offset, page_size);
2524
2525 kcore_copy__for_each_phdr(&kci, p) {
2526 off_t offs = p->rel + offset;
2527
2528 if (kcore__add_phdr(&extract, idx++, offs, p->addr, p->len))
2529 goto out_extract_close;
2530 }
2531
2532 sz = kcore__write(&extract);
2533 if (sz < 0 || sz > offset)
2534 goto out_extract_close;
2535
2536 kcore_copy__for_each_phdr(&kci, p) {
2537 off_t offs = p->rel + offset;
2538
2539 if (p->remaps)
2540 continue;
2541 if (copy_bytes(kcore.fd, p->offset, extract.fd, offs, p->len))
2542 goto out_extract_close;
2543 }
2544
2545 if (kcore_copy__compare_file(from_dir, to_dir, "kallsyms"))
2546 goto out_extract_close;
2547
2548 err = 0;
2549
2550 out_extract_close:
2551 kcore__close(&extract);
2552 if (err)
2553 unlink(extract_filename);
2554 out_kcore_close:
2555 kcore__close(&kcore);
2556 out_unlink_modules:
2557 if (err)
2558 kcore_copy__unlink(to_dir, "modules");
2559 out_unlink_kallsyms:
2560 if (err)
2561 kcore_copy__unlink(to_dir, "kallsyms");
2562
2563 kcore_copy__free_phdrs(&kci);
2564 kcore_copy__free_syms(&kci);
2565
2566 return err;
2567 }
2568
kcore_extract__create(struct kcore_extract * kce)2569 int kcore_extract__create(struct kcore_extract *kce)
2570 {
2571 struct kcore kcore;
2572 struct kcore extract;
2573 size_t count = 1;
2574 int idx = 0, err = -1;
2575 off_t offset = page_size, sz;
2576
2577 if (kcore__open(&kcore, kce->kcore_filename))
2578 return -1;
2579
2580 strcpy(kce->extract_filename, PERF_KCORE_EXTRACT);
2581 if (kcore__init(&extract, kce->extract_filename, kcore.elfclass, true))
2582 goto out_kcore_close;
2583
2584 if (kcore__copy_hdr(&kcore, &extract, count))
2585 goto out_extract_close;
2586
2587 if (kcore__add_phdr(&extract, idx, offset, kce->addr, kce->len))
2588 goto out_extract_close;
2589
2590 sz = kcore__write(&extract);
2591 if (sz < 0 || sz > offset)
2592 goto out_extract_close;
2593
2594 if (copy_bytes(kcore.fd, kce->offs, extract.fd, offset, kce->len))
2595 goto out_extract_close;
2596
2597 err = 0;
2598
2599 out_extract_close:
2600 kcore__close(&extract);
2601 if (err)
2602 unlink(kce->extract_filename);
2603 out_kcore_close:
2604 kcore__close(&kcore);
2605
2606 return err;
2607 }
2608
kcore_extract__delete(struct kcore_extract * kce)2609 void kcore_extract__delete(struct kcore_extract *kce)
2610 {
2611 unlink(kce->extract_filename);
2612 }
2613
2614 #ifdef HAVE_GELF_GETNOTE_SUPPORT
2615
sdt_adjust_loc(struct sdt_note * tmp,GElf_Addr base_off)2616 static void sdt_adjust_loc(struct sdt_note *tmp, GElf_Addr base_off)
2617 {
2618 if (!base_off)
2619 return;
2620
2621 if (tmp->bit32)
2622 tmp->addr.a32[SDT_NOTE_IDX_LOC] =
2623 tmp->addr.a32[SDT_NOTE_IDX_LOC] + base_off -
2624 tmp->addr.a32[SDT_NOTE_IDX_BASE];
2625 else
2626 tmp->addr.a64[SDT_NOTE_IDX_LOC] =
2627 tmp->addr.a64[SDT_NOTE_IDX_LOC] + base_off -
2628 tmp->addr.a64[SDT_NOTE_IDX_BASE];
2629 }
2630
sdt_adjust_refctr(struct sdt_note * tmp,GElf_Addr base_addr,GElf_Addr base_off)2631 static void sdt_adjust_refctr(struct sdt_note *tmp, GElf_Addr base_addr,
2632 GElf_Addr base_off)
2633 {
2634 if (!base_off)
2635 return;
2636
2637 if (tmp->bit32 && tmp->addr.a32[SDT_NOTE_IDX_REFCTR])
2638 tmp->addr.a32[SDT_NOTE_IDX_REFCTR] -= (base_addr - base_off);
2639 else if (tmp->addr.a64[SDT_NOTE_IDX_REFCTR])
2640 tmp->addr.a64[SDT_NOTE_IDX_REFCTR] -= (base_addr - base_off);
2641 }
2642
2643 /**
2644 * populate_sdt_note : Parse raw data and identify SDT note
2645 * @elf: elf of the opened file
2646 * @data: raw data of a section with description offset applied
2647 * @len: note description size
2648 * @type: type of the note
2649 * @sdt_notes: List to add the SDT note
2650 *
2651 * Responsible for parsing the @data in section .note.stapsdt in @elf and
2652 * if its an SDT note, it appends to @sdt_notes list.
2653 */
populate_sdt_note(Elf ** elf,const char * data,size_t len,struct list_head * sdt_notes)2654 static int populate_sdt_note(Elf **elf, const char *data, size_t len,
2655 struct list_head *sdt_notes)
2656 {
2657 const char *provider, *name, *args;
2658 struct sdt_note *tmp = NULL;
2659 GElf_Ehdr ehdr;
2660 GElf_Shdr shdr;
2661 int ret = -EINVAL;
2662
2663 union {
2664 Elf64_Addr a64[NR_ADDR];
2665 Elf32_Addr a32[NR_ADDR];
2666 } buf;
2667
2668 Elf_Data dst = {
2669 .d_buf = &buf, .d_type = ELF_T_ADDR, .d_version = EV_CURRENT,
2670 .d_size = gelf_fsize((*elf), ELF_T_ADDR, NR_ADDR, EV_CURRENT),
2671 .d_off = 0, .d_align = 0
2672 };
2673 Elf_Data src = {
2674 .d_buf = (void *) data, .d_type = ELF_T_ADDR,
2675 .d_version = EV_CURRENT, .d_size = dst.d_size, .d_off = 0,
2676 .d_align = 0
2677 };
2678
2679 tmp = (struct sdt_note *)calloc(1, sizeof(struct sdt_note));
2680 if (!tmp) {
2681 ret = -ENOMEM;
2682 goto out_err;
2683 }
2684
2685 INIT_LIST_HEAD(&tmp->note_list);
2686
2687 if (len < dst.d_size + 3)
2688 goto out_free_note;
2689
2690 /* Translation from file representation to memory representation */
2691 if (gelf_xlatetom(*elf, &dst, &src,
2692 elf_getident(*elf, NULL)[EI_DATA]) == NULL) {
2693 pr_err("gelf_xlatetom : %s\n", elf_errmsg(-1));
2694 goto out_free_note;
2695 }
2696
2697 /* Populate the fields of sdt_note */
2698 provider = data + dst.d_size;
2699
2700 name = (const char *)memchr(provider, '\0', data + len - provider);
2701 if (name++ == NULL)
2702 goto out_free_note;
2703
2704 tmp->provider = strdup(provider);
2705 if (!tmp->provider) {
2706 ret = -ENOMEM;
2707 goto out_free_note;
2708 }
2709 tmp->name = strdup(name);
2710 if (!tmp->name) {
2711 ret = -ENOMEM;
2712 goto out_free_prov;
2713 }
2714
2715 args = memchr(name, '\0', data + len - name);
2716
2717 /*
2718 * There is no argument if:
2719 * - We reached the end of the note;
2720 * - There is not enough room to hold a potential string;
2721 * - The argument string is empty or just contains ':'.
2722 */
2723 if (args == NULL || data + len - args < 2 ||
2724 args[1] == ':' || args[1] == '\0')
2725 tmp->args = NULL;
2726 else {
2727 tmp->args = strdup(++args);
2728 if (!tmp->args) {
2729 ret = -ENOMEM;
2730 goto out_free_name;
2731 }
2732 }
2733
2734 if (gelf_getclass(*elf) == ELFCLASS32) {
2735 memcpy(&tmp->addr, &buf, 3 * sizeof(Elf32_Addr));
2736 tmp->bit32 = true;
2737 } else {
2738 memcpy(&tmp->addr, &buf, 3 * sizeof(Elf64_Addr));
2739 tmp->bit32 = false;
2740 }
2741
2742 if (!gelf_getehdr(*elf, &ehdr)) {
2743 pr_debug("%s : cannot get elf header.\n", __func__);
2744 ret = -EBADF;
2745 goto out_free_args;
2746 }
2747
2748 /* Adjust the prelink effect :
2749 * Find out the .stapsdt.base section.
2750 * This scn will help us to handle prelinking (if present).
2751 * Compare the retrieved file offset of the base section with the
2752 * base address in the description of the SDT note. If its different,
2753 * then accordingly, adjust the note location.
2754 */
2755 if (elf_section_by_name(*elf, &ehdr, &shdr, SDT_BASE_SCN, NULL))
2756 sdt_adjust_loc(tmp, shdr.sh_offset);
2757
2758 /* Adjust reference counter offset */
2759 if (elf_section_by_name(*elf, &ehdr, &shdr, SDT_PROBES_SCN, NULL))
2760 sdt_adjust_refctr(tmp, shdr.sh_addr, shdr.sh_offset);
2761
2762 list_add_tail(&tmp->note_list, sdt_notes);
2763 return 0;
2764
2765 out_free_args:
2766 zfree(&tmp->args);
2767 out_free_name:
2768 zfree(&tmp->name);
2769 out_free_prov:
2770 zfree(&tmp->provider);
2771 out_free_note:
2772 free(tmp);
2773 out_err:
2774 return ret;
2775 }
2776
2777 /**
2778 * construct_sdt_notes_list : constructs a list of SDT notes
2779 * @elf : elf to look into
2780 * @sdt_notes : empty list_head
2781 *
2782 * Scans the sections in 'elf' for the section
2783 * .note.stapsdt. It, then calls populate_sdt_note to find
2784 * out the SDT events and populates the 'sdt_notes'.
2785 */
construct_sdt_notes_list(Elf * elf,struct list_head * sdt_notes)2786 static int construct_sdt_notes_list(Elf *elf, struct list_head *sdt_notes)
2787 {
2788 GElf_Ehdr ehdr;
2789 Elf_Scn *scn = NULL;
2790 Elf_Data *data;
2791 GElf_Shdr shdr;
2792 size_t shstrndx, next;
2793 GElf_Nhdr nhdr;
2794 size_t name_off, desc_off, offset;
2795 int ret = 0;
2796
2797 if (gelf_getehdr(elf, &ehdr) == NULL) {
2798 ret = -EBADF;
2799 goto out_ret;
2800 }
2801 if (elf_getshdrstrndx(elf, &shstrndx) != 0) {
2802 ret = -EBADF;
2803 goto out_ret;
2804 }
2805
2806 /* Look for the required section */
2807 scn = elf_section_by_name(elf, &ehdr, &shdr, SDT_NOTE_SCN, NULL);
2808 if (!scn) {
2809 ret = -ENOENT;
2810 goto out_ret;
2811 }
2812
2813 if ((shdr.sh_type != SHT_NOTE) || (shdr.sh_flags & SHF_ALLOC)) {
2814 ret = -ENOENT;
2815 goto out_ret;
2816 }
2817
2818 data = elf_getdata(scn, NULL);
2819
2820 /* Get the SDT notes */
2821 for (offset = 0; (next = gelf_getnote(data, offset, &nhdr, &name_off,
2822 &desc_off)) > 0; offset = next) {
2823 if (nhdr.n_namesz == sizeof(SDT_NOTE_NAME) &&
2824 !memcmp(data->d_buf + name_off, SDT_NOTE_NAME,
2825 sizeof(SDT_NOTE_NAME))) {
2826 /* Check the type of the note */
2827 if (nhdr.n_type != SDT_NOTE_TYPE)
2828 goto out_ret;
2829
2830 ret = populate_sdt_note(&elf, ((data->d_buf) + desc_off),
2831 nhdr.n_descsz, sdt_notes);
2832 if (ret < 0)
2833 goto out_ret;
2834 }
2835 }
2836 if (list_empty(sdt_notes))
2837 ret = -ENOENT;
2838
2839 out_ret:
2840 return ret;
2841 }
2842
2843 /**
2844 * get_sdt_note_list : Wrapper to construct a list of sdt notes
2845 * @head : empty list_head
2846 * @target : file to find SDT notes from
2847 *
2848 * This opens the file, initializes
2849 * the ELF and then calls construct_sdt_notes_list.
2850 */
get_sdt_note_list(struct list_head * head,const char * target)2851 int get_sdt_note_list(struct list_head *head, const char *target)
2852 {
2853 Elf *elf;
2854 int fd, ret;
2855
2856 fd = open(target, O_RDONLY);
2857 if (fd < 0)
2858 return -EBADF;
2859
2860 elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
2861 if (!elf) {
2862 ret = -EBADF;
2863 goto out_close;
2864 }
2865 ret = construct_sdt_notes_list(elf, head);
2866 elf_end(elf);
2867 out_close:
2868 close(fd);
2869 return ret;
2870 }
2871
2872 /**
2873 * cleanup_sdt_note_list : free the sdt notes' list
2874 * @sdt_notes: sdt notes' list
2875 *
2876 * Free up the SDT notes in @sdt_notes.
2877 * Returns the number of SDT notes free'd.
2878 */
cleanup_sdt_note_list(struct list_head * sdt_notes)2879 int cleanup_sdt_note_list(struct list_head *sdt_notes)
2880 {
2881 struct sdt_note *tmp, *pos;
2882 int nr_free = 0;
2883
2884 list_for_each_entry_safe(pos, tmp, sdt_notes, note_list) {
2885 list_del_init(&pos->note_list);
2886 zfree(&pos->args);
2887 zfree(&pos->name);
2888 zfree(&pos->provider);
2889 free(pos);
2890 nr_free++;
2891 }
2892 return nr_free;
2893 }
2894
2895 /**
2896 * sdt_notes__get_count: Counts the number of sdt events
2897 * @start: list_head to sdt_notes list
2898 *
2899 * Returns the number of SDT notes in a list
2900 */
sdt_notes__get_count(struct list_head * start)2901 int sdt_notes__get_count(struct list_head *start)
2902 {
2903 struct sdt_note *sdt_ptr;
2904 int count = 0;
2905
2906 list_for_each_entry(sdt_ptr, start, note_list)
2907 count++;
2908 return count;
2909 }
2910 #endif
2911
symbol__elf_init(void)2912 void symbol__elf_init(void)
2913 {
2914 elf_version(EV_CURRENT);
2915 }
2916