1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <stdlib.h> 4 #include <bpf/bpf.h> 5 #include <bpf/btf.h> 6 #include <bpf/libbpf.h> 7 #include <linux/btf.h> 8 #include <linux/err.h> 9 #include <linux/string.h> 10 #include <internal/lib.h> 11 #include <symbol/kallsyms.h> 12 #include "bpf-event.h" 13 #include "debug.h" 14 #include "dso.h" 15 #include "symbol.h" 16 #include "machine.h" 17 #include "env.h" 18 #include "session.h" 19 #include "map.h" 20 #include "evlist.h" 21 #include "record.h" 22 #include "util/synthetic-events.h" 23 24 #define ptr_to_u64(ptr) ((__u64)(unsigned long)(ptr)) 25 26 static int snprintf_hex(char *buf, size_t size, unsigned char *data, size_t len) 27 { 28 int ret = 0; 29 size_t i; 30 31 for (i = 0; i < len; i++) 32 ret += snprintf(buf + ret, size - ret, "%02x", data[i]); 33 return ret; 34 } 35 36 static int machine__process_bpf_event_load(struct machine *machine, 37 union perf_event *event, 38 struct perf_sample *sample __maybe_unused) 39 { 40 struct bpf_prog_info_linear *info_linear; 41 struct bpf_prog_info_node *info_node; 42 struct perf_env *env = machine->env; 43 int id = event->bpf.id; 44 unsigned int i; 45 46 /* perf-record, no need to handle bpf-event */ 47 if (env == NULL) 48 return 0; 49 50 info_node = perf_env__find_bpf_prog_info(env, id); 51 if (!info_node) 52 return 0; 53 info_linear = info_node->info_linear; 54 55 for (i = 0; i < info_linear->info.nr_jited_ksyms; i++) { 56 u64 *addrs = (u64 *)(uintptr_t)(info_linear->info.jited_ksyms); 57 u64 addr = addrs[i]; 58 struct map *map = maps__find(&machine->kmaps, addr); 59 60 if (map) { 61 map->dso->binary_type = DSO_BINARY_TYPE__BPF_PROG_INFO; 62 map->dso->bpf_prog.id = id; 63 map->dso->bpf_prog.sub_id = i; 64 map->dso->bpf_prog.env = env; 65 } 66 } 67 return 0; 68 } 69 70 int machine__process_bpf(struct machine *machine, union perf_event *event, 71 struct perf_sample *sample) 72 { 73 if (dump_trace) 74 perf_event__fprintf_bpf(event, stdout); 75 76 switch (event->bpf.type) { 77 case PERF_BPF_EVENT_PROG_LOAD: 78 return machine__process_bpf_event_load(machine, event, sample); 79 80 case PERF_BPF_EVENT_PROG_UNLOAD: 81 /* 82 * Do not free bpf_prog_info and btf of the program here, 83 * as annotation still need them. They will be freed at 84 * the end of the session. 85 */ 86 break; 87 default: 88 pr_debug("unexpected bpf event type of %d\n", event->bpf.type); 89 break; 90 } 91 return 0; 92 } 93 94 static int perf_env__fetch_btf(struct perf_env *env, 95 u32 btf_id, 96 struct btf *btf) 97 { 98 struct btf_node *node; 99 u32 data_size; 100 const void *data; 101 102 data = btf__get_raw_data(btf, &data_size); 103 104 node = malloc(data_size + sizeof(struct btf_node)); 105 if (!node) 106 return -1; 107 108 node->id = btf_id; 109 node->data_size = data_size; 110 memcpy(node->data, data, data_size); 111 112 perf_env__insert_btf(env, node); 113 return 0; 114 } 115 116 static int synthesize_bpf_prog_name(char *buf, int size, 117 struct bpf_prog_info *info, 118 struct btf *btf, 119 u32 sub_id) 120 { 121 u8 (*prog_tags)[BPF_TAG_SIZE] = (void *)(uintptr_t)(info->prog_tags); 122 void *func_infos = (void *)(uintptr_t)(info->func_info); 123 u32 sub_prog_cnt = info->nr_jited_ksyms; 124 const struct bpf_func_info *finfo; 125 const char *short_name = NULL; 126 const struct btf_type *t; 127 int name_len; 128 129 name_len = snprintf(buf, size, "bpf_prog_"); 130 name_len += snprintf_hex(buf + name_len, size - name_len, 131 prog_tags[sub_id], BPF_TAG_SIZE); 132 if (btf) { 133 finfo = func_infos + sub_id * info->func_info_rec_size; 134 t = btf__type_by_id(btf, finfo->type_id); 135 short_name = btf__name_by_offset(btf, t->name_off); 136 } else if (sub_id == 0 && sub_prog_cnt == 1) { 137 /* no subprog */ 138 if (info->name[0]) 139 short_name = info->name; 140 } else 141 short_name = "F"; 142 if (short_name) 143 name_len += snprintf(buf + name_len, size - name_len, 144 "_%s", short_name); 145 return name_len; 146 } 147 148 /* 149 * Synthesize PERF_RECORD_KSYMBOL and PERF_RECORD_BPF_EVENT for one bpf 150 * program. One PERF_RECORD_BPF_EVENT is generated for the program. And 151 * one PERF_RECORD_KSYMBOL is generated for each sub program. 152 * 153 * Returns: 154 * 0 for success; 155 * -1 for failures; 156 * -2 for lack of kernel support. 157 */ 158 static int perf_event__synthesize_one_bpf_prog(struct perf_session *session, 159 perf_event__handler_t process, 160 struct machine *machine, 161 int fd, 162 union perf_event *event, 163 struct record_opts *opts) 164 { 165 struct perf_record_ksymbol *ksymbol_event = &event->ksymbol; 166 struct perf_record_bpf_event *bpf_event = &event->bpf; 167 struct bpf_prog_info_linear *info_linear; 168 struct perf_tool *tool = session->tool; 169 struct bpf_prog_info_node *info_node; 170 struct bpf_prog_info *info; 171 struct btf *btf = NULL; 172 struct perf_env *env; 173 u32 sub_prog_cnt, i; 174 int err = 0; 175 u64 arrays; 176 177 /* 178 * for perf-record and perf-report use header.env; 179 * otherwise, use global perf_env. 180 */ 181 env = session->data ? &session->header.env : &perf_env; 182 183 arrays = 1UL << BPF_PROG_INFO_JITED_KSYMS; 184 arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS; 185 arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO; 186 arrays |= 1UL << BPF_PROG_INFO_PROG_TAGS; 187 arrays |= 1UL << BPF_PROG_INFO_JITED_INSNS; 188 arrays |= 1UL << BPF_PROG_INFO_LINE_INFO; 189 arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO; 190 191 info_linear = bpf_program__get_prog_info_linear(fd, arrays); 192 if (IS_ERR_OR_NULL(info_linear)) { 193 info_linear = NULL; 194 pr_debug("%s: failed to get BPF program info. aborting\n", __func__); 195 return -1; 196 } 197 198 if (info_linear->info_len < offsetof(struct bpf_prog_info, prog_tags)) { 199 pr_debug("%s: the kernel is too old, aborting\n", __func__); 200 return -2; 201 } 202 203 info = &info_linear->info; 204 205 /* number of ksyms, func_lengths, and tags should match */ 206 sub_prog_cnt = info->nr_jited_ksyms; 207 if (sub_prog_cnt != info->nr_prog_tags || 208 sub_prog_cnt != info->nr_jited_func_lens) 209 return -1; 210 211 /* check BTF func info support */ 212 if (info->btf_id && info->nr_func_info && info->func_info_rec_size) { 213 /* btf func info number should be same as sub_prog_cnt */ 214 if (sub_prog_cnt != info->nr_func_info) { 215 pr_debug("%s: mismatch in BPF sub program count and BTF function info count, aborting\n", __func__); 216 err = -1; 217 goto out; 218 } 219 if (btf__get_from_id(info->btf_id, &btf)) { 220 pr_debug("%s: failed to get BTF of id %u, aborting\n", __func__, info->btf_id); 221 err = -1; 222 btf = NULL; 223 goto out; 224 } 225 perf_env__fetch_btf(env, info->btf_id, btf); 226 } 227 228 /* Synthesize PERF_RECORD_KSYMBOL */ 229 for (i = 0; i < sub_prog_cnt; i++) { 230 __u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens); 231 __u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms); 232 int name_len; 233 234 *ksymbol_event = (struct perf_record_ksymbol) { 235 .header = { 236 .type = PERF_RECORD_KSYMBOL, 237 .size = offsetof(struct perf_record_ksymbol, name), 238 }, 239 .addr = prog_addrs[i], 240 .len = prog_lens[i], 241 .ksym_type = PERF_RECORD_KSYMBOL_TYPE_BPF, 242 .flags = 0, 243 }; 244 245 name_len = synthesize_bpf_prog_name(ksymbol_event->name, 246 KSYM_NAME_LEN, info, btf, i); 247 ksymbol_event->header.size += PERF_ALIGN(name_len + 1, 248 sizeof(u64)); 249 250 memset((void *)event + event->header.size, 0, machine->id_hdr_size); 251 event->header.size += machine->id_hdr_size; 252 err = perf_tool__process_synth_event(tool, event, 253 machine, process); 254 } 255 256 if (!opts->no_bpf_event) { 257 /* Synthesize PERF_RECORD_BPF_EVENT */ 258 *bpf_event = (struct perf_record_bpf_event) { 259 .header = { 260 .type = PERF_RECORD_BPF_EVENT, 261 .size = sizeof(struct perf_record_bpf_event), 262 }, 263 .type = PERF_BPF_EVENT_PROG_LOAD, 264 .flags = 0, 265 .id = info->id, 266 }; 267 memcpy(bpf_event->tag, info->tag, BPF_TAG_SIZE); 268 memset((void *)event + event->header.size, 0, machine->id_hdr_size); 269 event->header.size += machine->id_hdr_size; 270 271 /* save bpf_prog_info to env */ 272 info_node = malloc(sizeof(struct bpf_prog_info_node)); 273 if (!info_node) { 274 err = -1; 275 goto out; 276 } 277 278 info_node->info_linear = info_linear; 279 perf_env__insert_bpf_prog_info(env, info_node); 280 info_linear = NULL; 281 282 /* 283 * process after saving bpf_prog_info to env, so that 284 * required information is ready for look up 285 */ 286 err = perf_tool__process_synth_event(tool, event, 287 machine, process); 288 } 289 290 out: 291 free(info_linear); 292 free(btf); 293 return err ? -1 : 0; 294 } 295 296 struct kallsyms_parse { 297 union perf_event *event; 298 perf_event__handler_t process; 299 struct machine *machine; 300 struct perf_tool *tool; 301 }; 302 303 static int 304 process_bpf_image(char *name, u64 addr, struct kallsyms_parse *data) 305 { 306 struct machine *machine = data->machine; 307 union perf_event *event = data->event; 308 struct perf_record_ksymbol *ksymbol; 309 int len; 310 311 ksymbol = &event->ksymbol; 312 313 *ksymbol = (struct perf_record_ksymbol) { 314 .header = { 315 .type = PERF_RECORD_KSYMBOL, 316 .size = offsetof(struct perf_record_ksymbol, name), 317 }, 318 .addr = addr, 319 .len = page_size, 320 .ksym_type = PERF_RECORD_KSYMBOL_TYPE_BPF, 321 .flags = 0, 322 }; 323 324 len = scnprintf(ksymbol->name, KSYM_NAME_LEN, "%s", name); 325 ksymbol->header.size += PERF_ALIGN(len + 1, sizeof(u64)); 326 memset((void *) event + event->header.size, 0, machine->id_hdr_size); 327 event->header.size += machine->id_hdr_size; 328 329 return perf_tool__process_synth_event(data->tool, event, machine, 330 data->process); 331 } 332 333 static int 334 kallsyms_process_symbol(void *data, const char *_name, 335 char type __maybe_unused, u64 start) 336 { 337 char disp[KSYM_NAME_LEN]; 338 char *module, *name; 339 unsigned long id; 340 int err = 0; 341 342 module = strchr(_name, '\t'); 343 if (!module) 344 return 0; 345 346 /* We are going after [bpf] module ... */ 347 if (strcmp(module + 1, "[bpf]")) 348 return 0; 349 350 name = memdup(_name, (module - _name) + 1); 351 if (!name) 352 return -ENOMEM; 353 354 name[module - _name] = 0; 355 356 /* .. and only for trampolines and dispatchers */ 357 if ((sscanf(name, "bpf_trampoline_%lu", &id) == 1) || 358 (sscanf(name, "bpf_dispatcher_%s", disp) == 1)) 359 err = process_bpf_image(name, start, data); 360 361 free(name); 362 return err; 363 } 364 365 int perf_event__synthesize_bpf_events(struct perf_session *session, 366 perf_event__handler_t process, 367 struct machine *machine, 368 struct record_opts *opts) 369 { 370 const char *kallsyms_filename = "/proc/kallsyms"; 371 struct kallsyms_parse arg; 372 union perf_event *event; 373 __u32 id = 0; 374 int err; 375 int fd; 376 377 event = malloc(sizeof(event->bpf) + KSYM_NAME_LEN + machine->id_hdr_size); 378 if (!event) 379 return -1; 380 381 /* Synthesize all the bpf programs in system. */ 382 while (true) { 383 err = bpf_prog_get_next_id(id, &id); 384 if (err) { 385 if (errno == ENOENT) { 386 err = 0; 387 break; 388 } 389 pr_debug("%s: can't get next program: %s%s\n", 390 __func__, strerror(errno), 391 errno == EINVAL ? " -- kernel too old?" : ""); 392 /* don't report error on old kernel or EPERM */ 393 err = (errno == EINVAL || errno == EPERM) ? 0 : -1; 394 break; 395 } 396 fd = bpf_prog_get_fd_by_id(id); 397 if (fd < 0) { 398 pr_debug("%s: failed to get fd for prog_id %u\n", 399 __func__, id); 400 continue; 401 } 402 403 err = perf_event__synthesize_one_bpf_prog(session, process, 404 machine, fd, 405 event, opts); 406 close(fd); 407 if (err) { 408 /* do not return error for old kernel */ 409 if (err == -2) 410 err = 0; 411 break; 412 } 413 } 414 415 /* Synthesize all the bpf images - trampolines/dispatchers. */ 416 if (symbol_conf.kallsyms_name != NULL) 417 kallsyms_filename = symbol_conf.kallsyms_name; 418 419 arg = (struct kallsyms_parse) { 420 .event = event, 421 .process = process, 422 .machine = machine, 423 .tool = session->tool, 424 }; 425 426 if (kallsyms__parse(kallsyms_filename, &arg, kallsyms_process_symbol)) { 427 pr_err("%s: failed to synthesize bpf images: %s\n", 428 __func__, strerror(errno)); 429 } 430 431 free(event); 432 return err; 433 } 434 435 static void perf_env__add_bpf_info(struct perf_env *env, u32 id) 436 { 437 struct bpf_prog_info_linear *info_linear; 438 struct bpf_prog_info_node *info_node; 439 struct btf *btf = NULL; 440 u64 arrays; 441 u32 btf_id; 442 int fd; 443 444 fd = bpf_prog_get_fd_by_id(id); 445 if (fd < 0) 446 return; 447 448 arrays = 1UL << BPF_PROG_INFO_JITED_KSYMS; 449 arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS; 450 arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO; 451 arrays |= 1UL << BPF_PROG_INFO_PROG_TAGS; 452 arrays |= 1UL << BPF_PROG_INFO_JITED_INSNS; 453 arrays |= 1UL << BPF_PROG_INFO_LINE_INFO; 454 arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO; 455 456 info_linear = bpf_program__get_prog_info_linear(fd, arrays); 457 if (IS_ERR_OR_NULL(info_linear)) { 458 pr_debug("%s: failed to get BPF program info. aborting\n", __func__); 459 goto out; 460 } 461 462 btf_id = info_linear->info.btf_id; 463 464 info_node = malloc(sizeof(struct bpf_prog_info_node)); 465 if (info_node) { 466 info_node->info_linear = info_linear; 467 perf_env__insert_bpf_prog_info(env, info_node); 468 } else 469 free(info_linear); 470 471 if (btf_id == 0) 472 goto out; 473 474 if (btf__get_from_id(btf_id, &btf)) { 475 pr_debug("%s: failed to get BTF of id %u, aborting\n", 476 __func__, btf_id); 477 goto out; 478 } 479 perf_env__fetch_btf(env, btf_id, btf); 480 481 out: 482 free(btf); 483 close(fd); 484 } 485 486 static int bpf_event__sb_cb(union perf_event *event, void *data) 487 { 488 struct perf_env *env = data; 489 490 if (event->header.type != PERF_RECORD_BPF_EVENT) 491 return -1; 492 493 switch (event->bpf.type) { 494 case PERF_BPF_EVENT_PROG_LOAD: 495 perf_env__add_bpf_info(env, event->bpf.id); 496 497 case PERF_BPF_EVENT_PROG_UNLOAD: 498 /* 499 * Do not free bpf_prog_info and btf of the program here, 500 * as annotation still need them. They will be freed at 501 * the end of the session. 502 */ 503 break; 504 default: 505 pr_debug("unexpected bpf event type of %d\n", event->bpf.type); 506 break; 507 } 508 509 return 0; 510 } 511 512 int evlist__add_bpf_sb_event(struct evlist *evlist, struct perf_env *env) 513 { 514 struct perf_event_attr attr = { 515 .type = PERF_TYPE_SOFTWARE, 516 .config = PERF_COUNT_SW_DUMMY, 517 .sample_id_all = 1, 518 .watermark = 1, 519 .bpf_event = 1, 520 .size = sizeof(attr), /* to capture ABI version */ 521 }; 522 523 /* 524 * Older gcc versions don't support designated initializers, like above, 525 * for unnamed union members, such as the following: 526 */ 527 attr.wakeup_watermark = 1; 528 529 return perf_evlist__add_sb_event(evlist, &attr, bpf_event__sb_cb, env); 530 } 531 532 void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info, 533 struct perf_env *env, 534 FILE *fp) 535 { 536 __u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens); 537 __u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms); 538 char name[KSYM_NAME_LEN]; 539 struct btf *btf = NULL; 540 u32 sub_prog_cnt, i; 541 542 sub_prog_cnt = info->nr_jited_ksyms; 543 if (sub_prog_cnt != info->nr_prog_tags || 544 sub_prog_cnt != info->nr_jited_func_lens) 545 return; 546 547 if (info->btf_id) { 548 struct btf_node *node; 549 550 node = perf_env__find_btf(env, info->btf_id); 551 if (node) 552 btf = btf__new((__u8 *)(node->data), 553 node->data_size); 554 } 555 556 if (sub_prog_cnt == 1) { 557 synthesize_bpf_prog_name(name, KSYM_NAME_LEN, info, btf, 0); 558 fprintf(fp, "# bpf_prog_info %u: %s addr 0x%llx size %u\n", 559 info->id, name, prog_addrs[0], prog_lens[0]); 560 return; 561 } 562 563 fprintf(fp, "# bpf_prog_info %u:\n", info->id); 564 for (i = 0; i < sub_prog_cnt; i++) { 565 synthesize_bpf_prog_name(name, KSYM_NAME_LEN, info, btf, i); 566 567 fprintf(fp, "# \tsub_prog %u: %s addr 0x%llx size %u\n", 568 i, name, prog_addrs[i], prog_lens[i]); 569 } 570 } 571