1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * 4 * Copyright (C) 2015 Naveen N. Rao, IBM Corporation 5 */ 6 7 #include "debug.h" 8 #include "symbol.h" 9 #include "map.h" 10 #include "probe-event.h" 11 #include "probe-file.h" 12 13 #ifdef HAVE_LIBELF_SUPPORT 14 bool elf__needs_adjust_symbols(GElf_Ehdr ehdr) 15 { 16 return ehdr.e_type == ET_EXEC || 17 ehdr.e_type == ET_REL || 18 ehdr.e_type == ET_DYN; 19 } 20 21 #endif 22 23 int arch__choose_best_symbol(struct symbol *syma, 24 struct symbol *symb __maybe_unused) 25 { 26 char *sym = syma->name; 27 28 #if !defined(_CALL_ELF) || _CALL_ELF != 2 29 /* Skip over any initial dot */ 30 if (*sym == '.') 31 sym++; 32 #endif 33 34 /* Avoid "SyS" kernel syscall aliases */ 35 if (strlen(sym) >= 3 && !strncmp(sym, "SyS", 3)) 36 return SYMBOL_B; 37 if (strlen(sym) >= 10 && !strncmp(sym, "compat_SyS", 10)) 38 return SYMBOL_B; 39 40 return SYMBOL_A; 41 } 42 43 #if !defined(_CALL_ELF) || _CALL_ELF != 2 44 /* Allow matching against dot variants */ 45 int arch__compare_symbol_names(const char *namea, const char *nameb) 46 { 47 /* Skip over initial dot */ 48 if (*namea == '.') 49 namea++; 50 if (*nameb == '.') 51 nameb++; 52 53 return strcmp(namea, nameb); 54 } 55 56 int arch__compare_symbol_names_n(const char *namea, const char *nameb, 57 unsigned int n) 58 { 59 /* Skip over initial dot */ 60 if (*namea == '.') 61 namea++; 62 if (*nameb == '.') 63 nameb++; 64 65 return strncmp(namea, nameb, n); 66 } 67 68 const char *arch__normalize_symbol_name(const char *name) 69 { 70 /* Skip over initial dot */ 71 if (name && *name == '.') 72 name++; 73 return name; 74 } 75 #endif 76 77 #if defined(_CALL_ELF) && _CALL_ELF == 2 78 79 #ifdef HAVE_LIBELF_SUPPORT 80 void arch__sym_update(struct symbol *s, GElf_Sym *sym) 81 { 82 s->arch_sym = sym->st_other; 83 } 84 #endif 85 86 #define PPC64LE_LEP_OFFSET 8 87 88 void arch__fix_tev_from_maps(struct perf_probe_event *pev, 89 struct probe_trace_event *tev, struct map *map, 90 struct symbol *sym) 91 { 92 int lep_offset; 93 94 /* 95 * When probing at a function entry point, we normally always want the 96 * LEP since that catches calls to the function through both the GEP and 97 * the LEP. Hence, we would like to probe at an offset of 8 bytes if 98 * the user only specified the function entry. 99 * 100 * However, if the user specifies an offset, we fall back to using the 101 * GEP since all userspace applications (objdump/readelf) show function 102 * disassembly with offsets from the GEP. 103 */ 104 if (pev->point.offset || !map || !sym) 105 return; 106 107 /* For kretprobes, add an offset only if the kernel supports it */ 108 if (!pev->uprobes && pev->point.retprobe) { 109 #ifdef HAVE_LIBELF_SUPPORT 110 if (!kretprobe_offset_is_supported()) 111 #endif 112 return; 113 } 114 115 lep_offset = PPC64_LOCAL_ENTRY_OFFSET(sym->arch_sym); 116 117 if (map->dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS) 118 tev->point.offset += PPC64LE_LEP_OFFSET; 119 else if (lep_offset) { 120 if (pev->uprobes) 121 tev->point.address += lep_offset; 122 else 123 tev->point.offset += lep_offset; 124 } 125 } 126 127 #ifdef HAVE_LIBELF_SUPPORT 128 void arch__post_process_probe_trace_events(struct perf_probe_event *pev, 129 int ntevs) 130 { 131 struct probe_trace_event *tev; 132 struct map *map; 133 struct symbol *sym = NULL; 134 struct rb_node *tmp; 135 int i = 0; 136 137 map = get_target_map(pev->target, pev->nsi, pev->uprobes); 138 if (!map || map__load(map) < 0) 139 return; 140 141 for (i = 0; i < ntevs; i++) { 142 tev = &pev->tevs[i]; 143 map__for_each_symbol(map, sym, tmp) { 144 if (map->unmap_ip(map, sym->start) == tev->point.address) { 145 arch__fix_tev_from_maps(pev, tev, map, sym); 146 break; 147 } 148 } 149 } 150 } 151 #endif /* HAVE_LIBELF_SUPPORT */ 152 153 #endif 154