1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 /* Copyright (C) 2020 Facebook */ 3 4 #include <errno.h> 5 #include <linux/err.h> 6 #include <linux/netfilter.h> 7 #include <linux/netfilter_arp.h> 8 #include <linux/perf_event.h> 9 #include <net/if.h> 10 #include <stdio.h> 11 #include <unistd.h> 12 13 #include <bpf/bpf.h> 14 #include <bpf/hashmap.h> 15 16 #include "json_writer.h" 17 #include "main.h" 18 #include "xlated_dumper.h" 19 20 #define PERF_HW_CACHE_LEN 128 21 22 static struct hashmap *link_table; 23 static struct dump_data dd; 24 25 static const char *perf_type_name[PERF_TYPE_MAX] = { 26 [PERF_TYPE_HARDWARE] = "hardware", 27 [PERF_TYPE_SOFTWARE] = "software", 28 [PERF_TYPE_TRACEPOINT] = "tracepoint", 29 [PERF_TYPE_HW_CACHE] = "hw-cache", 30 [PERF_TYPE_RAW] = "raw", 31 [PERF_TYPE_BREAKPOINT] = "breakpoint", 32 }; 33 34 const char *event_symbols_hw[PERF_COUNT_HW_MAX] = { 35 [PERF_COUNT_HW_CPU_CYCLES] = "cpu-cycles", 36 [PERF_COUNT_HW_INSTRUCTIONS] = "instructions", 37 [PERF_COUNT_HW_CACHE_REFERENCES] = "cache-references", 38 [PERF_COUNT_HW_CACHE_MISSES] = "cache-misses", 39 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "branch-instructions", 40 [PERF_COUNT_HW_BRANCH_MISSES] = "branch-misses", 41 [PERF_COUNT_HW_BUS_CYCLES] = "bus-cycles", 42 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = "stalled-cycles-frontend", 43 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = "stalled-cycles-backend", 44 [PERF_COUNT_HW_REF_CPU_CYCLES] = "ref-cycles", 45 }; 46 47 const char *event_symbols_sw[PERF_COUNT_SW_MAX] = { 48 [PERF_COUNT_SW_CPU_CLOCK] = "cpu-clock", 49 [PERF_COUNT_SW_TASK_CLOCK] = "task-clock", 50 [PERF_COUNT_SW_PAGE_FAULTS] = "page-faults", 51 [PERF_COUNT_SW_CONTEXT_SWITCHES] = "context-switches", 52 [PERF_COUNT_SW_CPU_MIGRATIONS] = "cpu-migrations", 53 [PERF_COUNT_SW_PAGE_FAULTS_MIN] = "minor-faults", 54 [PERF_COUNT_SW_PAGE_FAULTS_MAJ] = "major-faults", 55 [PERF_COUNT_SW_ALIGNMENT_FAULTS] = "alignment-faults", 56 [PERF_COUNT_SW_EMULATION_FAULTS] = "emulation-faults", 57 [PERF_COUNT_SW_DUMMY] = "dummy", 58 [PERF_COUNT_SW_BPF_OUTPUT] = "bpf-output", 59 [PERF_COUNT_SW_CGROUP_SWITCHES] = "cgroup-switches", 60 }; 61 62 const char *evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX] = { 63 [PERF_COUNT_HW_CACHE_L1D] = "L1-dcache", 64 [PERF_COUNT_HW_CACHE_L1I] = "L1-icache", 65 [PERF_COUNT_HW_CACHE_LL] = "LLC", 66 [PERF_COUNT_HW_CACHE_DTLB] = "dTLB", 67 [PERF_COUNT_HW_CACHE_ITLB] = "iTLB", 68 [PERF_COUNT_HW_CACHE_BPU] = "branch", 69 [PERF_COUNT_HW_CACHE_NODE] = "node", 70 }; 71 72 const char *evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX] = { 73 [PERF_COUNT_HW_CACHE_OP_READ] = "load", 74 [PERF_COUNT_HW_CACHE_OP_WRITE] = "store", 75 [PERF_COUNT_HW_CACHE_OP_PREFETCH] = "prefetch", 76 }; 77 78 const char *evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX] = { 79 [PERF_COUNT_HW_CACHE_RESULT_ACCESS] = "refs", 80 [PERF_COUNT_HW_CACHE_RESULT_MISS] = "misses", 81 }; 82 83 #define perf_event_name(array, id) ({ \ 84 const char *event_str = NULL; \ 85 \ 86 if ((id) >= 0 && (id) < ARRAY_SIZE(array)) \ 87 event_str = array[id]; \ 88 event_str; \ 89 }) 90 91 static int link_parse_fd(int *argc, char ***argv) 92 { 93 int fd; 94 95 if (is_prefix(**argv, "id")) { 96 unsigned int id; 97 char *endptr; 98 99 NEXT_ARGP(); 100 101 id = strtoul(**argv, &endptr, 0); 102 if (*endptr) { 103 p_err("can't parse %s as ID", **argv); 104 return -1; 105 } 106 NEXT_ARGP(); 107 108 fd = bpf_link_get_fd_by_id(id); 109 if (fd < 0) 110 p_err("failed to get link with ID %d: %s", id, strerror(errno)); 111 return fd; 112 } else if (is_prefix(**argv, "pinned")) { 113 char *path; 114 115 NEXT_ARGP(); 116 117 path = **argv; 118 NEXT_ARGP(); 119 120 return open_obj_pinned_any(path, BPF_OBJ_LINK); 121 } 122 123 p_err("expected 'id' or 'pinned', got: '%s'?", **argv); 124 return -1; 125 } 126 127 static void 128 show_link_header_json(struct bpf_link_info *info, json_writer_t *wtr) 129 { 130 const char *link_type_str; 131 132 jsonw_uint_field(wtr, "id", info->id); 133 link_type_str = libbpf_bpf_link_type_str(info->type); 134 if (link_type_str) 135 jsonw_string_field(wtr, "type", link_type_str); 136 else 137 jsonw_uint_field(wtr, "type", info->type); 138 139 jsonw_uint_field(json_wtr, "prog_id", info->prog_id); 140 } 141 142 static void show_link_attach_type_json(__u32 attach_type, json_writer_t *wtr) 143 { 144 const char *attach_type_str; 145 146 attach_type_str = libbpf_bpf_attach_type_str(attach_type); 147 if (attach_type_str) 148 jsonw_string_field(wtr, "attach_type", attach_type_str); 149 else 150 jsonw_uint_field(wtr, "attach_type", attach_type); 151 } 152 153 static bool is_iter_map_target(const char *target_name) 154 { 155 return strcmp(target_name, "bpf_map_elem") == 0 || 156 strcmp(target_name, "bpf_sk_storage_map") == 0; 157 } 158 159 static bool is_iter_cgroup_target(const char *target_name) 160 { 161 return strcmp(target_name, "cgroup") == 0; 162 } 163 164 static const char *cgroup_order_string(__u32 order) 165 { 166 switch (order) { 167 case BPF_CGROUP_ITER_ORDER_UNSPEC: 168 return "order_unspec"; 169 case BPF_CGROUP_ITER_SELF_ONLY: 170 return "self_only"; 171 case BPF_CGROUP_ITER_DESCENDANTS_PRE: 172 return "descendants_pre"; 173 case BPF_CGROUP_ITER_DESCENDANTS_POST: 174 return "descendants_post"; 175 case BPF_CGROUP_ITER_ANCESTORS_UP: 176 return "ancestors_up"; 177 default: /* won't happen */ 178 return "unknown"; 179 } 180 } 181 182 static bool is_iter_task_target(const char *target_name) 183 { 184 return strcmp(target_name, "task") == 0 || 185 strcmp(target_name, "task_file") == 0 || 186 strcmp(target_name, "task_vma") == 0; 187 } 188 189 static void show_iter_json(struct bpf_link_info *info, json_writer_t *wtr) 190 { 191 const char *target_name = u64_to_ptr(info->iter.target_name); 192 193 jsonw_string_field(wtr, "target_name", target_name); 194 195 if (is_iter_map_target(target_name)) 196 jsonw_uint_field(wtr, "map_id", info->iter.map.map_id); 197 else if (is_iter_task_target(target_name)) { 198 if (info->iter.task.tid) 199 jsonw_uint_field(wtr, "tid", info->iter.task.tid); 200 else if (info->iter.task.pid) 201 jsonw_uint_field(wtr, "pid", info->iter.task.pid); 202 } 203 204 if (is_iter_cgroup_target(target_name)) { 205 jsonw_lluint_field(wtr, "cgroup_id", info->iter.cgroup.cgroup_id); 206 jsonw_string_field(wtr, "order", 207 cgroup_order_string(info->iter.cgroup.order)); 208 } 209 } 210 211 void netfilter_dump_json(const struct bpf_link_info *info, json_writer_t *wtr) 212 { 213 jsonw_uint_field(json_wtr, "pf", 214 info->netfilter.pf); 215 jsonw_uint_field(json_wtr, "hook", 216 info->netfilter.hooknum); 217 jsonw_int_field(json_wtr, "prio", 218 info->netfilter.priority); 219 jsonw_uint_field(json_wtr, "flags", 220 info->netfilter.flags); 221 } 222 223 static int get_prog_info(int prog_id, struct bpf_prog_info *info) 224 { 225 __u32 len = sizeof(*info); 226 int err, prog_fd; 227 228 prog_fd = bpf_prog_get_fd_by_id(prog_id); 229 if (prog_fd < 0) 230 return prog_fd; 231 232 memset(info, 0, sizeof(*info)); 233 err = bpf_prog_get_info_by_fd(prog_fd, info, &len); 234 if (err) 235 p_err("can't get prog info: %s", strerror(errno)); 236 close(prog_fd); 237 return err; 238 } 239 240 static int cmp_u64(const void *A, const void *B) 241 { 242 const __u64 *a = A, *b = B; 243 244 return *a - *b; 245 } 246 247 static void 248 show_kprobe_multi_json(struct bpf_link_info *info, json_writer_t *wtr) 249 { 250 __u32 i, j = 0; 251 __u64 *addrs; 252 253 jsonw_bool_field(json_wtr, "retprobe", 254 info->kprobe_multi.flags & BPF_F_KPROBE_MULTI_RETURN); 255 jsonw_uint_field(json_wtr, "func_cnt", info->kprobe_multi.count); 256 jsonw_name(json_wtr, "funcs"); 257 jsonw_start_array(json_wtr); 258 addrs = u64_to_ptr(info->kprobe_multi.addrs); 259 qsort(addrs, info->kprobe_multi.count, sizeof(addrs[0]), cmp_u64); 260 261 /* Load it once for all. */ 262 if (!dd.sym_count) 263 kernel_syms_load(&dd); 264 for (i = 0; i < dd.sym_count; i++) { 265 if (dd.sym_mapping[i].address != addrs[j]) 266 continue; 267 jsonw_start_object(json_wtr); 268 jsonw_uint_field(json_wtr, "addr", dd.sym_mapping[i].address); 269 jsonw_string_field(json_wtr, "func", dd.sym_mapping[i].name); 270 /* Print null if it is vmlinux */ 271 if (dd.sym_mapping[i].module[0] == '\0') { 272 jsonw_name(json_wtr, "module"); 273 jsonw_null(json_wtr); 274 } else { 275 jsonw_string_field(json_wtr, "module", dd.sym_mapping[i].module); 276 } 277 jsonw_end_object(json_wtr); 278 if (j++ == info->kprobe_multi.count) 279 break; 280 } 281 jsonw_end_array(json_wtr); 282 } 283 284 static void 285 show_perf_event_kprobe_json(struct bpf_link_info *info, json_writer_t *wtr) 286 { 287 jsonw_bool_field(wtr, "retprobe", info->perf_event.type == BPF_PERF_EVENT_KRETPROBE); 288 jsonw_uint_field(wtr, "addr", info->perf_event.kprobe.addr); 289 jsonw_string_field(wtr, "func", 290 u64_to_ptr(info->perf_event.kprobe.func_name)); 291 jsonw_uint_field(wtr, "offset", info->perf_event.kprobe.offset); 292 } 293 294 static void 295 show_perf_event_uprobe_json(struct bpf_link_info *info, json_writer_t *wtr) 296 { 297 jsonw_bool_field(wtr, "retprobe", info->perf_event.type == BPF_PERF_EVENT_URETPROBE); 298 jsonw_string_field(wtr, "file", 299 u64_to_ptr(info->perf_event.uprobe.file_name)); 300 jsonw_uint_field(wtr, "offset", info->perf_event.uprobe.offset); 301 } 302 303 static void 304 show_perf_event_tracepoint_json(struct bpf_link_info *info, json_writer_t *wtr) 305 { 306 jsonw_string_field(wtr, "tracepoint", 307 u64_to_ptr(info->perf_event.tracepoint.tp_name)); 308 } 309 310 static char *perf_config_hw_cache_str(__u64 config) 311 { 312 const char *hw_cache, *result, *op; 313 char *str = malloc(PERF_HW_CACHE_LEN); 314 315 if (!str) { 316 p_err("mem alloc failed"); 317 return NULL; 318 } 319 320 hw_cache = perf_event_name(evsel__hw_cache, config & 0xff); 321 if (hw_cache) 322 snprintf(str, PERF_HW_CACHE_LEN, "%s-", hw_cache); 323 else 324 snprintf(str, PERF_HW_CACHE_LEN, "%lld-", config & 0xff); 325 326 op = perf_event_name(evsel__hw_cache_op, (config >> 8) & 0xff); 327 if (op) 328 snprintf(str + strlen(str), PERF_HW_CACHE_LEN - strlen(str), 329 "%s-", op); 330 else 331 snprintf(str + strlen(str), PERF_HW_CACHE_LEN - strlen(str), 332 "%lld-", (config >> 8) & 0xff); 333 334 result = perf_event_name(evsel__hw_cache_result, config >> 16); 335 if (result) 336 snprintf(str + strlen(str), PERF_HW_CACHE_LEN - strlen(str), 337 "%s", result); 338 else 339 snprintf(str + strlen(str), PERF_HW_CACHE_LEN - strlen(str), 340 "%lld", config >> 16); 341 return str; 342 } 343 344 static const char *perf_config_str(__u32 type, __u64 config) 345 { 346 const char *perf_config; 347 348 switch (type) { 349 case PERF_TYPE_HARDWARE: 350 perf_config = perf_event_name(event_symbols_hw, config); 351 break; 352 case PERF_TYPE_SOFTWARE: 353 perf_config = perf_event_name(event_symbols_sw, config); 354 break; 355 case PERF_TYPE_HW_CACHE: 356 perf_config = perf_config_hw_cache_str(config); 357 break; 358 default: 359 perf_config = NULL; 360 break; 361 } 362 return perf_config; 363 } 364 365 static void 366 show_perf_event_event_json(struct bpf_link_info *info, json_writer_t *wtr) 367 { 368 __u64 config = info->perf_event.event.config; 369 __u32 type = info->perf_event.event.type; 370 const char *perf_type, *perf_config; 371 372 perf_type = perf_event_name(perf_type_name, type); 373 if (perf_type) 374 jsonw_string_field(wtr, "event_type", perf_type); 375 else 376 jsonw_uint_field(wtr, "event_type", type); 377 378 perf_config = perf_config_str(type, config); 379 if (perf_config) 380 jsonw_string_field(wtr, "event_config", perf_config); 381 else 382 jsonw_uint_field(wtr, "event_config", config); 383 384 if (type == PERF_TYPE_HW_CACHE && perf_config) 385 free((void *)perf_config); 386 } 387 388 static int show_link_close_json(int fd, struct bpf_link_info *info) 389 { 390 struct bpf_prog_info prog_info; 391 const char *prog_type_str; 392 int err; 393 394 jsonw_start_object(json_wtr); 395 396 show_link_header_json(info, json_wtr); 397 398 switch (info->type) { 399 case BPF_LINK_TYPE_RAW_TRACEPOINT: 400 jsonw_string_field(json_wtr, "tp_name", 401 u64_to_ptr(info->raw_tracepoint.tp_name)); 402 break; 403 case BPF_LINK_TYPE_TRACING: 404 err = get_prog_info(info->prog_id, &prog_info); 405 if (err) 406 return err; 407 408 prog_type_str = libbpf_bpf_prog_type_str(prog_info.type); 409 /* libbpf will return NULL for variants unknown to it. */ 410 if (prog_type_str) 411 jsonw_string_field(json_wtr, "prog_type", prog_type_str); 412 else 413 jsonw_uint_field(json_wtr, "prog_type", prog_info.type); 414 415 show_link_attach_type_json(info->tracing.attach_type, 416 json_wtr); 417 jsonw_uint_field(json_wtr, "target_obj_id", info->tracing.target_obj_id); 418 jsonw_uint_field(json_wtr, "target_btf_id", info->tracing.target_btf_id); 419 break; 420 case BPF_LINK_TYPE_CGROUP: 421 jsonw_lluint_field(json_wtr, "cgroup_id", 422 info->cgroup.cgroup_id); 423 show_link_attach_type_json(info->cgroup.attach_type, json_wtr); 424 break; 425 case BPF_LINK_TYPE_ITER: 426 show_iter_json(info, json_wtr); 427 break; 428 case BPF_LINK_TYPE_NETNS: 429 jsonw_uint_field(json_wtr, "netns_ino", 430 info->netns.netns_ino); 431 show_link_attach_type_json(info->netns.attach_type, json_wtr); 432 break; 433 case BPF_LINK_TYPE_NETFILTER: 434 netfilter_dump_json(info, json_wtr); 435 break; 436 case BPF_LINK_TYPE_STRUCT_OPS: 437 jsonw_uint_field(json_wtr, "map_id", 438 info->struct_ops.map_id); 439 break; 440 case BPF_LINK_TYPE_KPROBE_MULTI: 441 show_kprobe_multi_json(info, json_wtr); 442 break; 443 case BPF_LINK_TYPE_PERF_EVENT: 444 switch (info->perf_event.type) { 445 case BPF_PERF_EVENT_EVENT: 446 show_perf_event_event_json(info, json_wtr); 447 break; 448 case BPF_PERF_EVENT_TRACEPOINT: 449 show_perf_event_tracepoint_json(info, json_wtr); 450 break; 451 case BPF_PERF_EVENT_KPROBE: 452 case BPF_PERF_EVENT_KRETPROBE: 453 show_perf_event_kprobe_json(info, json_wtr); 454 break; 455 case BPF_PERF_EVENT_UPROBE: 456 case BPF_PERF_EVENT_URETPROBE: 457 show_perf_event_uprobe_json(info, json_wtr); 458 break; 459 default: 460 break; 461 } 462 break; 463 default: 464 break; 465 } 466 467 if (!hashmap__empty(link_table)) { 468 struct hashmap_entry *entry; 469 470 jsonw_name(json_wtr, "pinned"); 471 jsonw_start_array(json_wtr); 472 hashmap__for_each_key_entry(link_table, entry, info->id) 473 jsonw_string(json_wtr, entry->pvalue); 474 jsonw_end_array(json_wtr); 475 } 476 477 emit_obj_refs_json(refs_table, info->id, json_wtr); 478 479 jsonw_end_object(json_wtr); 480 481 return 0; 482 } 483 484 static void show_link_header_plain(struct bpf_link_info *info) 485 { 486 const char *link_type_str; 487 488 printf("%u: ", info->id); 489 link_type_str = libbpf_bpf_link_type_str(info->type); 490 if (link_type_str) 491 printf("%s ", link_type_str); 492 else 493 printf("type %u ", info->type); 494 495 if (info->type == BPF_LINK_TYPE_STRUCT_OPS) 496 printf("map %u ", info->struct_ops.map_id); 497 else 498 printf("prog %u ", info->prog_id); 499 } 500 501 static void show_link_attach_type_plain(__u32 attach_type) 502 { 503 const char *attach_type_str; 504 505 attach_type_str = libbpf_bpf_attach_type_str(attach_type); 506 if (attach_type_str) 507 printf("attach_type %s ", attach_type_str); 508 else 509 printf("attach_type %u ", attach_type); 510 } 511 512 static void show_iter_plain(struct bpf_link_info *info) 513 { 514 const char *target_name = u64_to_ptr(info->iter.target_name); 515 516 printf("target_name %s ", target_name); 517 518 if (is_iter_map_target(target_name)) 519 printf("map_id %u ", info->iter.map.map_id); 520 else if (is_iter_task_target(target_name)) { 521 if (info->iter.task.tid) 522 printf("tid %u ", info->iter.task.tid); 523 else if (info->iter.task.pid) 524 printf("pid %u ", info->iter.task.pid); 525 } 526 527 if (is_iter_cgroup_target(target_name)) { 528 printf("cgroup_id %llu ", info->iter.cgroup.cgroup_id); 529 printf("order %s ", 530 cgroup_order_string(info->iter.cgroup.order)); 531 } 532 } 533 534 static const char * const pf2name[] = { 535 [NFPROTO_INET] = "inet", 536 [NFPROTO_IPV4] = "ip", 537 [NFPROTO_ARP] = "arp", 538 [NFPROTO_NETDEV] = "netdev", 539 [NFPROTO_BRIDGE] = "bridge", 540 [NFPROTO_IPV6] = "ip6", 541 }; 542 543 static const char * const inethook2name[] = { 544 [NF_INET_PRE_ROUTING] = "prerouting", 545 [NF_INET_LOCAL_IN] = "input", 546 [NF_INET_FORWARD] = "forward", 547 [NF_INET_LOCAL_OUT] = "output", 548 [NF_INET_POST_ROUTING] = "postrouting", 549 }; 550 551 static const char * const arphook2name[] = { 552 [NF_ARP_IN] = "input", 553 [NF_ARP_OUT] = "output", 554 }; 555 556 void netfilter_dump_plain(const struct bpf_link_info *info) 557 { 558 const char *hookname = NULL, *pfname = NULL; 559 unsigned int hook = info->netfilter.hooknum; 560 unsigned int pf = info->netfilter.pf; 561 562 if (pf < ARRAY_SIZE(pf2name)) 563 pfname = pf2name[pf]; 564 565 switch (pf) { 566 case NFPROTO_BRIDGE: /* bridge shares numbers with enum nf_inet_hooks */ 567 case NFPROTO_IPV4: 568 case NFPROTO_IPV6: 569 case NFPROTO_INET: 570 if (hook < ARRAY_SIZE(inethook2name)) 571 hookname = inethook2name[hook]; 572 break; 573 case NFPROTO_ARP: 574 if (hook < ARRAY_SIZE(arphook2name)) 575 hookname = arphook2name[hook]; 576 default: 577 break; 578 } 579 580 if (pfname) 581 printf("\n\t%s", pfname); 582 else 583 printf("\n\tpf: %d", pf); 584 585 if (hookname) 586 printf(" %s", hookname); 587 else 588 printf(", hook %u,", hook); 589 590 printf(" prio %d", info->netfilter.priority); 591 592 if (info->netfilter.flags) 593 printf(" flags 0x%x", info->netfilter.flags); 594 } 595 596 static void show_kprobe_multi_plain(struct bpf_link_info *info) 597 { 598 __u32 i, j = 0; 599 __u64 *addrs; 600 601 if (!info->kprobe_multi.count) 602 return; 603 604 if (info->kprobe_multi.flags & BPF_F_KPROBE_MULTI_RETURN) 605 printf("\n\tkretprobe.multi "); 606 else 607 printf("\n\tkprobe.multi "); 608 printf("func_cnt %u ", info->kprobe_multi.count); 609 addrs = (__u64 *)u64_to_ptr(info->kprobe_multi.addrs); 610 qsort(addrs, info->kprobe_multi.count, sizeof(__u64), cmp_u64); 611 612 /* Load it once for all. */ 613 if (!dd.sym_count) 614 kernel_syms_load(&dd); 615 if (!dd.sym_count) 616 return; 617 618 printf("\n\t%-16s %s", "addr", "func [module]"); 619 for (i = 0; i < dd.sym_count; i++) { 620 if (dd.sym_mapping[i].address != addrs[j]) 621 continue; 622 printf("\n\t%016lx %s", 623 dd.sym_mapping[i].address, dd.sym_mapping[i].name); 624 if (dd.sym_mapping[i].module[0] != '\0') 625 printf(" [%s] ", dd.sym_mapping[i].module); 626 else 627 printf(" "); 628 629 if (j++ == info->kprobe_multi.count) 630 break; 631 } 632 } 633 634 static void show_perf_event_kprobe_plain(struct bpf_link_info *info) 635 { 636 const char *buf; 637 638 buf = u64_to_ptr(info->perf_event.kprobe.func_name); 639 if (buf[0] == '\0' && !info->perf_event.kprobe.addr) 640 return; 641 642 if (info->perf_event.type == BPF_PERF_EVENT_KRETPROBE) 643 printf("\n\tkretprobe "); 644 else 645 printf("\n\tkprobe "); 646 if (info->perf_event.kprobe.addr) 647 printf("%llx ", info->perf_event.kprobe.addr); 648 printf("%s", buf); 649 if (info->perf_event.kprobe.offset) 650 printf("+%#x", info->perf_event.kprobe.offset); 651 printf(" "); 652 } 653 654 static void show_perf_event_uprobe_plain(struct bpf_link_info *info) 655 { 656 const char *buf; 657 658 buf = u64_to_ptr(info->perf_event.uprobe.file_name); 659 if (buf[0] == '\0') 660 return; 661 662 if (info->perf_event.type == BPF_PERF_EVENT_URETPROBE) 663 printf("\n\turetprobe "); 664 else 665 printf("\n\tuprobe "); 666 printf("%s+%#x ", buf, info->perf_event.uprobe.offset); 667 } 668 669 static void show_perf_event_tracepoint_plain(struct bpf_link_info *info) 670 { 671 const char *buf; 672 673 buf = u64_to_ptr(info->perf_event.tracepoint.tp_name); 674 if (buf[0] == '\0') 675 return; 676 677 printf("\n\ttracepoint %s ", buf); 678 } 679 680 static void show_perf_event_event_plain(struct bpf_link_info *info) 681 { 682 __u64 config = info->perf_event.event.config; 683 __u32 type = info->perf_event.event.type; 684 const char *perf_type, *perf_config; 685 686 printf("\n\tevent "); 687 perf_type = perf_event_name(perf_type_name, type); 688 if (perf_type) 689 printf("%s:", perf_type); 690 else 691 printf("%u :", type); 692 693 perf_config = perf_config_str(type, config); 694 if (perf_config) 695 printf("%s ", perf_config); 696 else 697 printf("%llu ", config); 698 699 if (type == PERF_TYPE_HW_CACHE && perf_config) 700 free((void *)perf_config); 701 } 702 703 static int show_link_close_plain(int fd, struct bpf_link_info *info) 704 { 705 struct bpf_prog_info prog_info; 706 const char *prog_type_str; 707 int err; 708 709 show_link_header_plain(info); 710 711 switch (info->type) { 712 case BPF_LINK_TYPE_RAW_TRACEPOINT: 713 printf("\n\ttp '%s' ", 714 (const char *)u64_to_ptr(info->raw_tracepoint.tp_name)); 715 break; 716 case BPF_LINK_TYPE_TRACING: 717 err = get_prog_info(info->prog_id, &prog_info); 718 if (err) 719 return err; 720 721 prog_type_str = libbpf_bpf_prog_type_str(prog_info.type); 722 /* libbpf will return NULL for variants unknown to it. */ 723 if (prog_type_str) 724 printf("\n\tprog_type %s ", prog_type_str); 725 else 726 printf("\n\tprog_type %u ", prog_info.type); 727 728 show_link_attach_type_plain(info->tracing.attach_type); 729 if (info->tracing.target_obj_id || info->tracing.target_btf_id) 730 printf("\n\ttarget_obj_id %u target_btf_id %u ", 731 info->tracing.target_obj_id, 732 info->tracing.target_btf_id); 733 break; 734 case BPF_LINK_TYPE_CGROUP: 735 printf("\n\tcgroup_id %zu ", (size_t)info->cgroup.cgroup_id); 736 show_link_attach_type_plain(info->cgroup.attach_type); 737 break; 738 case BPF_LINK_TYPE_ITER: 739 show_iter_plain(info); 740 break; 741 case BPF_LINK_TYPE_NETNS: 742 printf("\n\tnetns_ino %u ", info->netns.netns_ino); 743 show_link_attach_type_plain(info->netns.attach_type); 744 break; 745 case BPF_LINK_TYPE_NETFILTER: 746 netfilter_dump_plain(info); 747 break; 748 case BPF_LINK_TYPE_KPROBE_MULTI: 749 show_kprobe_multi_plain(info); 750 break; 751 case BPF_LINK_TYPE_PERF_EVENT: 752 switch (info->perf_event.type) { 753 case BPF_PERF_EVENT_EVENT: 754 show_perf_event_event_plain(info); 755 break; 756 case BPF_PERF_EVENT_TRACEPOINT: 757 show_perf_event_tracepoint_plain(info); 758 break; 759 case BPF_PERF_EVENT_KPROBE: 760 case BPF_PERF_EVENT_KRETPROBE: 761 show_perf_event_kprobe_plain(info); 762 break; 763 case BPF_PERF_EVENT_UPROBE: 764 case BPF_PERF_EVENT_URETPROBE: 765 show_perf_event_uprobe_plain(info); 766 break; 767 default: 768 break; 769 } 770 break; 771 default: 772 break; 773 } 774 775 if (!hashmap__empty(link_table)) { 776 struct hashmap_entry *entry; 777 778 hashmap__for_each_key_entry(link_table, entry, info->id) 779 printf("\n\tpinned %s", (char *)entry->pvalue); 780 } 781 emit_obj_refs_plain(refs_table, info->id, "\n\tpids "); 782 783 printf("\n"); 784 785 return 0; 786 } 787 788 static int do_show_link(int fd) 789 { 790 struct bpf_link_info info; 791 __u32 len = sizeof(info); 792 __u64 *addrs = NULL; 793 char buf[PATH_MAX]; 794 int count; 795 int err; 796 797 memset(&info, 0, sizeof(info)); 798 buf[0] = '\0'; 799 again: 800 err = bpf_link_get_info_by_fd(fd, &info, &len); 801 if (err) { 802 p_err("can't get link info: %s", 803 strerror(errno)); 804 close(fd); 805 return err; 806 } 807 if (info.type == BPF_LINK_TYPE_RAW_TRACEPOINT && 808 !info.raw_tracepoint.tp_name) { 809 info.raw_tracepoint.tp_name = ptr_to_u64(&buf); 810 info.raw_tracepoint.tp_name_len = sizeof(buf); 811 goto again; 812 } 813 if (info.type == BPF_LINK_TYPE_ITER && 814 !info.iter.target_name) { 815 info.iter.target_name = ptr_to_u64(&buf); 816 info.iter.target_name_len = sizeof(buf); 817 goto again; 818 } 819 if (info.type == BPF_LINK_TYPE_KPROBE_MULTI && 820 !info.kprobe_multi.addrs) { 821 count = info.kprobe_multi.count; 822 if (count) { 823 addrs = calloc(count, sizeof(__u64)); 824 if (!addrs) { 825 p_err("mem alloc failed"); 826 close(fd); 827 return -ENOMEM; 828 } 829 info.kprobe_multi.addrs = ptr_to_u64(addrs); 830 goto again; 831 } 832 } 833 if (info.type == BPF_LINK_TYPE_PERF_EVENT) { 834 switch (info.perf_event.type) { 835 case BPF_PERF_EVENT_TRACEPOINT: 836 if (!info.perf_event.tracepoint.tp_name) { 837 info.perf_event.tracepoint.tp_name = ptr_to_u64(&buf); 838 info.perf_event.tracepoint.name_len = sizeof(buf); 839 goto again; 840 } 841 break; 842 case BPF_PERF_EVENT_KPROBE: 843 case BPF_PERF_EVENT_KRETPROBE: 844 if (!info.perf_event.kprobe.func_name) { 845 info.perf_event.kprobe.func_name = ptr_to_u64(&buf); 846 info.perf_event.kprobe.name_len = sizeof(buf); 847 goto again; 848 } 849 break; 850 case BPF_PERF_EVENT_UPROBE: 851 case BPF_PERF_EVENT_URETPROBE: 852 if (!info.perf_event.uprobe.file_name) { 853 info.perf_event.uprobe.file_name = ptr_to_u64(&buf); 854 info.perf_event.uprobe.name_len = sizeof(buf); 855 goto again; 856 } 857 break; 858 default: 859 break; 860 } 861 } 862 863 if (json_output) 864 show_link_close_json(fd, &info); 865 else 866 show_link_close_plain(fd, &info); 867 868 if (addrs) 869 free(addrs); 870 close(fd); 871 return 0; 872 } 873 874 static int do_show(int argc, char **argv) 875 { 876 __u32 id = 0; 877 int err, fd; 878 879 if (show_pinned) { 880 link_table = hashmap__new(hash_fn_for_key_as_id, 881 equal_fn_for_key_as_id, NULL); 882 if (IS_ERR(link_table)) { 883 p_err("failed to create hashmap for pinned paths"); 884 return -1; 885 } 886 build_pinned_obj_table(link_table, BPF_OBJ_LINK); 887 } 888 build_obj_refs_table(&refs_table, BPF_OBJ_LINK); 889 890 if (argc == 2) { 891 fd = link_parse_fd(&argc, &argv); 892 if (fd < 0) 893 return fd; 894 do_show_link(fd); 895 goto out; 896 } 897 898 if (argc) 899 return BAD_ARG(); 900 901 if (json_output) 902 jsonw_start_array(json_wtr); 903 while (true) { 904 err = bpf_link_get_next_id(id, &id); 905 if (err) { 906 if (errno == ENOENT) 907 break; 908 p_err("can't get next link: %s%s", strerror(errno), 909 errno == EINVAL ? " -- kernel too old?" : ""); 910 break; 911 } 912 913 fd = bpf_link_get_fd_by_id(id); 914 if (fd < 0) { 915 if (errno == ENOENT) 916 continue; 917 p_err("can't get link by id (%u): %s", 918 id, strerror(errno)); 919 break; 920 } 921 922 err = do_show_link(fd); 923 if (err) 924 break; 925 } 926 if (json_output) 927 jsonw_end_array(json_wtr); 928 929 delete_obj_refs_table(refs_table); 930 931 if (show_pinned) 932 delete_pinned_obj_table(link_table); 933 934 out: 935 if (dd.sym_count) 936 kernel_syms_destroy(&dd); 937 return errno == ENOENT ? 0 : -1; 938 } 939 940 static int do_pin(int argc, char **argv) 941 { 942 int err; 943 944 err = do_pin_any(argc, argv, link_parse_fd); 945 if (!err && json_output) 946 jsonw_null(json_wtr); 947 return err; 948 } 949 950 static int do_detach(int argc, char **argv) 951 { 952 int err, fd; 953 954 if (argc != 2) { 955 p_err("link specifier is invalid or missing\n"); 956 return 1; 957 } 958 959 fd = link_parse_fd(&argc, &argv); 960 if (fd < 0) 961 return 1; 962 963 err = bpf_link_detach(fd); 964 if (err) 965 err = -errno; 966 close(fd); 967 if (err) { 968 p_err("failed link detach: %s", strerror(-err)); 969 return 1; 970 } 971 972 if (json_output) 973 jsonw_null(json_wtr); 974 975 return 0; 976 } 977 978 static int do_help(int argc, char **argv) 979 { 980 if (json_output) { 981 jsonw_null(json_wtr); 982 return 0; 983 } 984 985 fprintf(stderr, 986 "Usage: %1$s %2$s { show | list } [LINK]\n" 987 " %1$s %2$s pin LINK FILE\n" 988 " %1$s %2$s detach LINK\n" 989 " %1$s %2$s help\n" 990 "\n" 991 " " HELP_SPEC_LINK "\n" 992 " " HELP_SPEC_OPTIONS " |\n" 993 " {-f|--bpffs} | {-n|--nomount} }\n" 994 "", 995 bin_name, argv[-2]); 996 997 return 0; 998 } 999 1000 static const struct cmd cmds[] = { 1001 { "show", do_show }, 1002 { "list", do_show }, 1003 { "help", do_help }, 1004 { "pin", do_pin }, 1005 { "detach", do_detach }, 1006 { 0 } 1007 }; 1008 1009 int do_link(int argc, char **argv) 1010 { 1011 return cmd_select(cmds, argc, argv, do_help); 1012 } 1013