1 // SPDX-License-Identifier: GPL-2.0 2 3 /* Copyright (c) 2019 Facebook */ 4 5 #include <assert.h> 6 #include <limits.h> 7 #include <unistd.h> 8 #include <sys/file.h> 9 #include <sys/time.h> 10 #include <linux/err.h> 11 #include <linux/zalloc.h> 12 #include <api/fs/fs.h> 13 #include <perf/bpf_perf.h> 14 15 #include "bpf_counter.h" 16 #include "counts.h" 17 #include "debug.h" 18 #include "evsel.h" 19 #include "evlist.h" 20 #include "target.h" 21 #include "cpumap.h" 22 #include "thread_map.h" 23 24 #include "bpf_skel/bpf_prog_profiler.skel.h" 25 #include "bpf_skel/bperf_u.h" 26 #include "bpf_skel/bperf_leader.skel.h" 27 #include "bpf_skel/bperf_follower.skel.h" 28 29 #define ATTR_MAP_SIZE 16 30 31 static inline void *u64_to_ptr(__u64 ptr) 32 { 33 return (void *)(unsigned long)ptr; 34 } 35 36 static struct bpf_counter *bpf_counter_alloc(void) 37 { 38 struct bpf_counter *counter; 39 40 counter = zalloc(sizeof(*counter)); 41 if (counter) 42 INIT_LIST_HEAD(&counter->list); 43 return counter; 44 } 45 46 static int bpf_program_profiler__destroy(struct evsel *evsel) 47 { 48 struct bpf_counter *counter, *tmp; 49 50 list_for_each_entry_safe(counter, tmp, 51 &evsel->bpf_counter_list, list) { 52 list_del_init(&counter->list); 53 bpf_prog_profiler_bpf__destroy(counter->skel); 54 free(counter); 55 } 56 assert(list_empty(&evsel->bpf_counter_list)); 57 58 return 0; 59 } 60 61 static char *bpf_target_prog_name(int tgt_fd) 62 { 63 struct bpf_prog_info_linear *info_linear; 64 struct bpf_func_info *func_info; 65 const struct btf_type *t; 66 char *name = NULL; 67 struct btf *btf; 68 69 info_linear = bpf_program__get_prog_info_linear( 70 tgt_fd, 1UL << BPF_PROG_INFO_FUNC_INFO); 71 if (IS_ERR_OR_NULL(info_linear)) { 72 pr_debug("failed to get info_linear for prog FD %d\n", tgt_fd); 73 return NULL; 74 } 75 76 if (info_linear->info.btf_id == 0 || 77 btf__get_from_id(info_linear->info.btf_id, &btf)) { 78 pr_debug("prog FD %d doesn't have valid btf\n", tgt_fd); 79 goto out; 80 } 81 82 func_info = u64_to_ptr(info_linear->info.func_info); 83 t = btf__type_by_id(btf, func_info[0].type_id); 84 if (!t) { 85 pr_debug("btf %d doesn't have type %d\n", 86 info_linear->info.btf_id, func_info[0].type_id); 87 goto out; 88 } 89 name = strdup(btf__name_by_offset(btf, t->name_off)); 90 out: 91 free(info_linear); 92 return name; 93 } 94 95 static int bpf_program_profiler_load_one(struct evsel *evsel, u32 prog_id) 96 { 97 struct bpf_prog_profiler_bpf *skel; 98 struct bpf_counter *counter; 99 struct bpf_program *prog; 100 char *prog_name; 101 int prog_fd; 102 int err; 103 104 prog_fd = bpf_prog_get_fd_by_id(prog_id); 105 if (prog_fd < 0) { 106 pr_err("Failed to open fd for bpf prog %u\n", prog_id); 107 return -1; 108 } 109 counter = bpf_counter_alloc(); 110 if (!counter) { 111 close(prog_fd); 112 return -1; 113 } 114 115 skel = bpf_prog_profiler_bpf__open(); 116 if (!skel) { 117 pr_err("Failed to open bpf skeleton\n"); 118 goto err_out; 119 } 120 121 skel->rodata->num_cpu = evsel__nr_cpus(evsel); 122 123 bpf_map__resize(skel->maps.events, evsel__nr_cpus(evsel)); 124 bpf_map__resize(skel->maps.fentry_readings, 1); 125 bpf_map__resize(skel->maps.accum_readings, 1); 126 127 prog_name = bpf_target_prog_name(prog_fd); 128 if (!prog_name) { 129 pr_err("Failed to get program name for bpf prog %u. Does it have BTF?\n", prog_id); 130 goto err_out; 131 } 132 133 bpf_object__for_each_program(prog, skel->obj) { 134 err = bpf_program__set_attach_target(prog, prog_fd, prog_name); 135 if (err) { 136 pr_err("bpf_program__set_attach_target failed.\n" 137 "Does bpf prog %u have BTF?\n", prog_id); 138 goto err_out; 139 } 140 } 141 set_max_rlimit(); 142 err = bpf_prog_profiler_bpf__load(skel); 143 if (err) { 144 pr_err("bpf_prog_profiler_bpf__load failed\n"); 145 goto err_out; 146 } 147 148 assert(skel != NULL); 149 counter->skel = skel; 150 list_add(&counter->list, &evsel->bpf_counter_list); 151 close(prog_fd); 152 return 0; 153 err_out: 154 bpf_prog_profiler_bpf__destroy(skel); 155 free(counter); 156 close(prog_fd); 157 return -1; 158 } 159 160 static int bpf_program_profiler__load(struct evsel *evsel, struct target *target) 161 { 162 char *bpf_str, *bpf_str_, *tok, *saveptr = NULL, *p; 163 u32 prog_id; 164 int ret; 165 166 bpf_str_ = bpf_str = strdup(target->bpf_str); 167 if (!bpf_str) 168 return -1; 169 170 while ((tok = strtok_r(bpf_str, ",", &saveptr)) != NULL) { 171 prog_id = strtoul(tok, &p, 10); 172 if (prog_id == 0 || prog_id == UINT_MAX || 173 (*p != '\0' && *p != ',')) { 174 pr_err("Failed to parse bpf prog ids %s\n", 175 target->bpf_str); 176 return -1; 177 } 178 179 ret = bpf_program_profiler_load_one(evsel, prog_id); 180 if (ret) { 181 bpf_program_profiler__destroy(evsel); 182 free(bpf_str_); 183 return -1; 184 } 185 bpf_str = NULL; 186 } 187 free(bpf_str_); 188 return 0; 189 } 190 191 static int bpf_program_profiler__enable(struct evsel *evsel) 192 { 193 struct bpf_counter *counter; 194 int ret; 195 196 list_for_each_entry(counter, &evsel->bpf_counter_list, list) { 197 assert(counter->skel != NULL); 198 ret = bpf_prog_profiler_bpf__attach(counter->skel); 199 if (ret) { 200 bpf_program_profiler__destroy(evsel); 201 return ret; 202 } 203 } 204 return 0; 205 } 206 207 static int bpf_program_profiler__disable(struct evsel *evsel) 208 { 209 struct bpf_counter *counter; 210 211 list_for_each_entry(counter, &evsel->bpf_counter_list, list) { 212 assert(counter->skel != NULL); 213 bpf_prog_profiler_bpf__detach(counter->skel); 214 } 215 return 0; 216 } 217 218 static int bpf_program_profiler__read(struct evsel *evsel) 219 { 220 // perf_cpu_map uses /sys/devices/system/cpu/online 221 int num_cpu = evsel__nr_cpus(evsel); 222 // BPF_MAP_TYPE_PERCPU_ARRAY uses /sys/devices/system/cpu/possible 223 // Sometimes possible > online, like on a Ryzen 3900X that has 24 224 // threads but its possible showed 0-31 -acme 225 int num_cpu_bpf = libbpf_num_possible_cpus(); 226 struct bpf_perf_event_value values[num_cpu_bpf]; 227 struct bpf_counter *counter; 228 int reading_map_fd; 229 __u32 key = 0; 230 int err, cpu; 231 232 if (list_empty(&evsel->bpf_counter_list)) 233 return -EAGAIN; 234 235 for (cpu = 0; cpu < num_cpu; cpu++) { 236 perf_counts(evsel->counts, cpu, 0)->val = 0; 237 perf_counts(evsel->counts, cpu, 0)->ena = 0; 238 perf_counts(evsel->counts, cpu, 0)->run = 0; 239 } 240 list_for_each_entry(counter, &evsel->bpf_counter_list, list) { 241 struct bpf_prog_profiler_bpf *skel = counter->skel; 242 243 assert(skel != NULL); 244 reading_map_fd = bpf_map__fd(skel->maps.accum_readings); 245 246 err = bpf_map_lookup_elem(reading_map_fd, &key, values); 247 if (err) { 248 pr_err("failed to read value\n"); 249 return err; 250 } 251 252 for (cpu = 0; cpu < num_cpu; cpu++) { 253 perf_counts(evsel->counts, cpu, 0)->val += values[cpu].counter; 254 perf_counts(evsel->counts, cpu, 0)->ena += values[cpu].enabled; 255 perf_counts(evsel->counts, cpu, 0)->run += values[cpu].running; 256 } 257 } 258 return 0; 259 } 260 261 static int bpf_program_profiler__install_pe(struct evsel *evsel, int cpu, 262 int fd) 263 { 264 struct bpf_prog_profiler_bpf *skel; 265 struct bpf_counter *counter; 266 int ret; 267 268 list_for_each_entry(counter, &evsel->bpf_counter_list, list) { 269 skel = counter->skel; 270 assert(skel != NULL); 271 272 ret = bpf_map_update_elem(bpf_map__fd(skel->maps.events), 273 &cpu, &fd, BPF_ANY); 274 if (ret) 275 return ret; 276 } 277 return 0; 278 } 279 280 struct bpf_counter_ops bpf_program_profiler_ops = { 281 .load = bpf_program_profiler__load, 282 .enable = bpf_program_profiler__enable, 283 .disable = bpf_program_profiler__disable, 284 .read = bpf_program_profiler__read, 285 .destroy = bpf_program_profiler__destroy, 286 .install_pe = bpf_program_profiler__install_pe, 287 }; 288 289 static bool bperf_attr_map_compatible(int attr_map_fd) 290 { 291 struct bpf_map_info map_info = {0}; 292 __u32 map_info_len = sizeof(map_info); 293 int err; 294 295 err = bpf_obj_get_info_by_fd(attr_map_fd, &map_info, &map_info_len); 296 297 if (err) 298 return false; 299 return (map_info.key_size == sizeof(struct perf_event_attr)) && 300 (map_info.value_size == sizeof(struct perf_event_attr_map_entry)); 301 } 302 303 static int bperf_lock_attr_map(struct target *target) 304 { 305 char path[PATH_MAX]; 306 int map_fd, err; 307 308 if (target->attr_map) { 309 scnprintf(path, PATH_MAX, "%s", target->attr_map); 310 } else { 311 scnprintf(path, PATH_MAX, "%s/fs/bpf/%s", sysfs__mountpoint(), 312 BPF_PERF_DEFAULT_ATTR_MAP_PATH); 313 } 314 315 if (access(path, F_OK)) { 316 map_fd = bpf_create_map(BPF_MAP_TYPE_HASH, 317 sizeof(struct perf_event_attr), 318 sizeof(struct perf_event_attr_map_entry), 319 ATTR_MAP_SIZE, 0); 320 if (map_fd < 0) 321 return -1; 322 323 err = bpf_obj_pin(map_fd, path); 324 if (err) { 325 /* someone pinned the map in parallel? */ 326 close(map_fd); 327 map_fd = bpf_obj_get(path); 328 if (map_fd < 0) 329 return -1; 330 } 331 } else { 332 map_fd = bpf_obj_get(path); 333 if (map_fd < 0) 334 return -1; 335 } 336 337 if (!bperf_attr_map_compatible(map_fd)) { 338 close(map_fd); 339 return -1; 340 341 } 342 err = flock(map_fd, LOCK_EX); 343 if (err) { 344 close(map_fd); 345 return -1; 346 } 347 return map_fd; 348 } 349 350 static int bperf_check_target(struct evsel *evsel, 351 struct target *target, 352 enum bperf_filter_type *filter_type, 353 __u32 *filter_entry_cnt) 354 { 355 if (evsel->leader->core.nr_members > 1) { 356 pr_err("bpf managed perf events do not yet support groups.\n"); 357 return -1; 358 } 359 360 /* determine filter type based on target */ 361 if (target->system_wide) { 362 *filter_type = BPERF_FILTER_GLOBAL; 363 *filter_entry_cnt = 1; 364 } else if (target->cpu_list) { 365 *filter_type = BPERF_FILTER_CPU; 366 *filter_entry_cnt = perf_cpu_map__nr(evsel__cpus(evsel)); 367 } else if (target->tid) { 368 *filter_type = BPERF_FILTER_PID; 369 *filter_entry_cnt = perf_thread_map__nr(evsel->core.threads); 370 } else if (target->pid || evsel->evlist->workload.pid != -1) { 371 *filter_type = BPERF_FILTER_TGID; 372 *filter_entry_cnt = perf_thread_map__nr(evsel->core.threads); 373 } else { 374 pr_err("bpf managed perf events do not yet support these targets.\n"); 375 return -1; 376 } 377 378 return 0; 379 } 380 381 static struct perf_cpu_map *all_cpu_map; 382 383 static int bperf_reload_leader_program(struct evsel *evsel, int attr_map_fd, 384 struct perf_event_attr_map_entry *entry) 385 { 386 struct bperf_leader_bpf *skel = bperf_leader_bpf__open(); 387 int link_fd, diff_map_fd, err; 388 struct bpf_link *link = NULL; 389 390 if (!skel) { 391 pr_err("Failed to open leader skeleton\n"); 392 return -1; 393 } 394 395 bpf_map__resize(skel->maps.events, libbpf_num_possible_cpus()); 396 err = bperf_leader_bpf__load(skel); 397 if (err) { 398 pr_err("Failed to load leader skeleton\n"); 399 goto out; 400 } 401 402 link = bpf_program__attach(skel->progs.on_switch); 403 if (IS_ERR(link)) { 404 pr_err("Failed to attach leader program\n"); 405 err = PTR_ERR(link); 406 goto out; 407 } 408 409 link_fd = bpf_link__fd(link); 410 diff_map_fd = bpf_map__fd(skel->maps.diff_readings); 411 entry->link_id = bpf_link_get_id(link_fd); 412 entry->diff_map_id = bpf_map_get_id(diff_map_fd); 413 err = bpf_map_update_elem(attr_map_fd, &evsel->core.attr, entry, BPF_ANY); 414 assert(err == 0); 415 416 evsel->bperf_leader_link_fd = bpf_link_get_fd_by_id(entry->link_id); 417 assert(evsel->bperf_leader_link_fd >= 0); 418 419 /* 420 * save leader_skel for install_pe, which is called within 421 * following evsel__open_per_cpu call 422 */ 423 evsel->leader_skel = skel; 424 evsel__open_per_cpu(evsel, all_cpu_map, -1); 425 426 out: 427 bperf_leader_bpf__destroy(skel); 428 bpf_link__destroy(link); 429 return err; 430 } 431 432 static int bperf__load(struct evsel *evsel, struct target *target) 433 { 434 struct perf_event_attr_map_entry entry = {0xffffffff, 0xffffffff}; 435 int attr_map_fd, diff_map_fd = -1, err; 436 enum bperf_filter_type filter_type; 437 __u32 filter_entry_cnt, i; 438 439 if (bperf_check_target(evsel, target, &filter_type, &filter_entry_cnt)) 440 return -1; 441 442 if (!all_cpu_map) { 443 all_cpu_map = perf_cpu_map__new(NULL); 444 if (!all_cpu_map) 445 return -1; 446 } 447 448 evsel->bperf_leader_prog_fd = -1; 449 evsel->bperf_leader_link_fd = -1; 450 451 /* 452 * Step 1: hold a fd on the leader program and the bpf_link, if 453 * the program is not already gone, reload the program. 454 * Use flock() to ensure exclusive access to the perf_event_attr 455 * map. 456 */ 457 attr_map_fd = bperf_lock_attr_map(target); 458 if (attr_map_fd < 0) { 459 pr_err("Failed to lock perf_event_attr map\n"); 460 return -1; 461 } 462 463 err = bpf_map_lookup_elem(attr_map_fd, &evsel->core.attr, &entry); 464 if (err) { 465 err = bpf_map_update_elem(attr_map_fd, &evsel->core.attr, &entry, BPF_ANY); 466 if (err) 467 goto out; 468 } 469 470 evsel->bperf_leader_link_fd = bpf_link_get_fd_by_id(entry.link_id); 471 if (evsel->bperf_leader_link_fd < 0 && 472 bperf_reload_leader_program(evsel, attr_map_fd, &entry)) { 473 err = -1; 474 goto out; 475 } 476 /* 477 * The bpf_link holds reference to the leader program, and the 478 * leader program holds reference to the maps. Therefore, if 479 * link_id is valid, diff_map_id should also be valid. 480 */ 481 evsel->bperf_leader_prog_fd = bpf_prog_get_fd_by_id( 482 bpf_link_get_prog_id(evsel->bperf_leader_link_fd)); 483 assert(evsel->bperf_leader_prog_fd >= 0); 484 485 diff_map_fd = bpf_map_get_fd_by_id(entry.diff_map_id); 486 assert(diff_map_fd >= 0); 487 488 /* 489 * bperf uses BPF_PROG_TEST_RUN to get accurate reading. Check 490 * whether the kernel support it 491 */ 492 err = bperf_trigger_reading(evsel->bperf_leader_prog_fd, 0); 493 if (err) { 494 pr_err("The kernel does not support test_run for raw_tp BPF programs.\n" 495 "Therefore, --use-bpf might show inaccurate readings\n"); 496 goto out; 497 } 498 499 /* Step 2: load the follower skeleton */ 500 evsel->follower_skel = bperf_follower_bpf__open(); 501 if (!evsel->follower_skel) { 502 err = -1; 503 pr_err("Failed to open follower skeleton\n"); 504 goto out; 505 } 506 507 /* attach fexit program to the leader program */ 508 bpf_program__set_attach_target(evsel->follower_skel->progs.fexit_XXX, 509 evsel->bperf_leader_prog_fd, "on_switch"); 510 511 /* connect to leader diff_reading map */ 512 bpf_map__reuse_fd(evsel->follower_skel->maps.diff_readings, diff_map_fd); 513 514 /* set up reading map */ 515 bpf_map__set_max_entries(evsel->follower_skel->maps.accum_readings, 516 filter_entry_cnt); 517 /* set up follower filter based on target */ 518 bpf_map__set_max_entries(evsel->follower_skel->maps.filter, 519 filter_entry_cnt); 520 err = bperf_follower_bpf__load(evsel->follower_skel); 521 if (err) { 522 pr_err("Failed to load follower skeleton\n"); 523 bperf_follower_bpf__destroy(evsel->follower_skel); 524 evsel->follower_skel = NULL; 525 goto out; 526 } 527 528 for (i = 0; i < filter_entry_cnt; i++) { 529 int filter_map_fd; 530 __u32 key; 531 532 if (filter_type == BPERF_FILTER_PID || 533 filter_type == BPERF_FILTER_TGID) 534 key = evsel->core.threads->map[i].pid; 535 else if (filter_type == BPERF_FILTER_CPU) 536 key = evsel->core.cpus->map[i]; 537 else 538 break; 539 540 filter_map_fd = bpf_map__fd(evsel->follower_skel->maps.filter); 541 bpf_map_update_elem(filter_map_fd, &key, &i, BPF_ANY); 542 } 543 544 evsel->follower_skel->bss->type = filter_type; 545 546 err = bperf_follower_bpf__attach(evsel->follower_skel); 547 548 out: 549 if (err && evsel->bperf_leader_link_fd >= 0) 550 close(evsel->bperf_leader_link_fd); 551 if (err && evsel->bperf_leader_prog_fd >= 0) 552 close(evsel->bperf_leader_prog_fd); 553 if (diff_map_fd >= 0) 554 close(diff_map_fd); 555 556 flock(attr_map_fd, LOCK_UN); 557 close(attr_map_fd); 558 559 return err; 560 } 561 562 static int bperf__install_pe(struct evsel *evsel, int cpu, int fd) 563 { 564 struct bperf_leader_bpf *skel = evsel->leader_skel; 565 566 return bpf_map_update_elem(bpf_map__fd(skel->maps.events), 567 &cpu, &fd, BPF_ANY); 568 } 569 570 /* 571 * trigger the leader prog on each cpu, so the accum_reading map could get 572 * the latest readings. 573 */ 574 static int bperf_sync_counters(struct evsel *evsel) 575 { 576 int num_cpu, i, cpu; 577 578 num_cpu = all_cpu_map->nr; 579 for (i = 0; i < num_cpu; i++) { 580 cpu = all_cpu_map->map[i]; 581 bperf_trigger_reading(evsel->bperf_leader_prog_fd, cpu); 582 } 583 return 0; 584 } 585 586 static int bperf__enable(struct evsel *evsel) 587 { 588 evsel->follower_skel->bss->enabled = 1; 589 return 0; 590 } 591 592 static int bperf__disable(struct evsel *evsel) 593 { 594 evsel->follower_skel->bss->enabled = 0; 595 return 0; 596 } 597 598 static int bperf__read(struct evsel *evsel) 599 { 600 struct bperf_follower_bpf *skel = evsel->follower_skel; 601 __u32 num_cpu_bpf = cpu__max_cpu(); 602 struct bpf_perf_event_value values[num_cpu_bpf]; 603 int reading_map_fd, err = 0; 604 __u32 i, j, num_cpu; 605 606 bperf_sync_counters(evsel); 607 reading_map_fd = bpf_map__fd(skel->maps.accum_readings); 608 609 for (i = 0; i < bpf_map__max_entries(skel->maps.accum_readings); i++) { 610 __u32 cpu; 611 612 err = bpf_map_lookup_elem(reading_map_fd, &i, values); 613 if (err) 614 goto out; 615 switch (evsel->follower_skel->bss->type) { 616 case BPERF_FILTER_GLOBAL: 617 assert(i == 0); 618 619 num_cpu = all_cpu_map->nr; 620 for (j = 0; j < num_cpu; j++) { 621 cpu = all_cpu_map->map[j]; 622 perf_counts(evsel->counts, cpu, 0)->val = values[cpu].counter; 623 perf_counts(evsel->counts, cpu, 0)->ena = values[cpu].enabled; 624 perf_counts(evsel->counts, cpu, 0)->run = values[cpu].running; 625 } 626 break; 627 case BPERF_FILTER_CPU: 628 cpu = evsel->core.cpus->map[i]; 629 perf_counts(evsel->counts, i, 0)->val = values[cpu].counter; 630 perf_counts(evsel->counts, i, 0)->ena = values[cpu].enabled; 631 perf_counts(evsel->counts, i, 0)->run = values[cpu].running; 632 break; 633 case BPERF_FILTER_PID: 634 case BPERF_FILTER_TGID: 635 perf_counts(evsel->counts, 0, i)->val = 0; 636 perf_counts(evsel->counts, 0, i)->ena = 0; 637 perf_counts(evsel->counts, 0, i)->run = 0; 638 639 for (cpu = 0; cpu < num_cpu_bpf; cpu++) { 640 perf_counts(evsel->counts, 0, i)->val += values[cpu].counter; 641 perf_counts(evsel->counts, 0, i)->ena += values[cpu].enabled; 642 perf_counts(evsel->counts, 0, i)->run += values[cpu].running; 643 } 644 break; 645 default: 646 break; 647 } 648 } 649 out: 650 return err; 651 } 652 653 static int bperf__destroy(struct evsel *evsel) 654 { 655 bperf_follower_bpf__destroy(evsel->follower_skel); 656 close(evsel->bperf_leader_prog_fd); 657 close(evsel->bperf_leader_link_fd); 658 return 0; 659 } 660 661 /* 662 * bperf: share hardware PMCs with BPF 663 * 664 * perf uses performance monitoring counters (PMC) to monitor system 665 * performance. The PMCs are limited hardware resources. For example, 666 * Intel CPUs have 3x fixed PMCs and 4x programmable PMCs per cpu. 667 * 668 * Modern data center systems use these PMCs in many different ways: 669 * system level monitoring, (maybe nested) container level monitoring, per 670 * process monitoring, profiling (in sample mode), etc. In some cases, 671 * there are more active perf_events than available hardware PMCs. To allow 672 * all perf_events to have a chance to run, it is necessary to do expensive 673 * time multiplexing of events. 674 * 675 * On the other hand, many monitoring tools count the common metrics 676 * (cycles, instructions). It is a waste to have multiple tools create 677 * multiple perf_events of "cycles" and occupy multiple PMCs. 678 * 679 * bperf tries to reduce such wastes by allowing multiple perf_events of 680 * "cycles" or "instructions" (at different scopes) to share PMUs. Instead 681 * of having each perf-stat session to read its own perf_events, bperf uses 682 * BPF programs to read the perf_events and aggregate readings to BPF maps. 683 * Then, the perf-stat session(s) reads the values from these BPF maps. 684 * 685 * || 686 * shared progs and maps <- || -> per session progs and maps 687 * || 688 * --------------- || 689 * | perf_events | || 690 * --------------- fexit || ----------------- 691 * | --------||----> | follower prog | 692 * --------------- / || --- ----------------- 693 * cs -> | leader prog |/ ||/ | | 694 * --> --------------- /|| -------------- ------------------ 695 * / | | / || | filter map | | accum_readings | 696 * / ------------ ------------ || -------------- ------------------ 697 * | | prev map | | diff map | || | 698 * | ------------ ------------ || | 699 * \ || | 700 * = \ ==================================================== | ============ 701 * \ / user space 702 * \ / 703 * \ / 704 * BPF_PROG_TEST_RUN BPF_MAP_LOOKUP_ELEM 705 * \ / 706 * \ / 707 * \------ perf-stat ----------------------/ 708 * 709 * The figure above shows the architecture of bperf. Note that the figure 710 * is divided into 3 regions: shared progs and maps (top left), per session 711 * progs and maps (top right), and user space (bottom). 712 * 713 * The leader prog is triggered on each context switch (cs). The leader 714 * prog reads perf_events and stores the difference (current_reading - 715 * previous_reading) to the diff map. For the same metric, e.g. "cycles", 716 * multiple perf-stat sessions share the same leader prog. 717 * 718 * Each perf-stat session creates a follower prog as fexit program to the 719 * leader prog. It is possible to attach up to BPF_MAX_TRAMP_PROGS (38) 720 * follower progs to the same leader prog. The follower prog checks current 721 * task and processor ID to decide whether to add the value from the diff 722 * map to its accumulated reading map (accum_readings). 723 * 724 * Finally, perf-stat user space reads the value from accum_reading map. 725 * 726 * Besides context switch, it is also necessary to trigger the leader prog 727 * before perf-stat reads the value. Otherwise, the accum_reading map may 728 * not have the latest reading from the perf_events. This is achieved by 729 * triggering the event via sys_bpf(BPF_PROG_TEST_RUN) to each CPU. 730 * 731 * Comment before the definition of struct perf_event_attr_map_entry 732 * describes how different sessions of perf-stat share information about 733 * the leader prog. 734 */ 735 736 struct bpf_counter_ops bperf_ops = { 737 .load = bperf__load, 738 .enable = bperf__enable, 739 .disable = bperf__disable, 740 .read = bperf__read, 741 .install_pe = bperf__install_pe, 742 .destroy = bperf__destroy, 743 }; 744 745 static inline bool bpf_counter_skip(struct evsel *evsel) 746 { 747 return list_empty(&evsel->bpf_counter_list) && 748 evsel->follower_skel == NULL; 749 } 750 751 int bpf_counter__install_pe(struct evsel *evsel, int cpu, int fd) 752 { 753 if (bpf_counter_skip(evsel)) 754 return 0; 755 return evsel->bpf_counter_ops->install_pe(evsel, cpu, fd); 756 } 757 758 int bpf_counter__load(struct evsel *evsel, struct target *target) 759 { 760 if (target->bpf_str) 761 evsel->bpf_counter_ops = &bpf_program_profiler_ops; 762 else if (target->use_bpf || evsel->bpf_counter || 763 evsel__match_bpf_counter_events(evsel->name)) 764 evsel->bpf_counter_ops = &bperf_ops; 765 766 if (evsel->bpf_counter_ops) 767 return evsel->bpf_counter_ops->load(evsel, target); 768 return 0; 769 } 770 771 int bpf_counter__enable(struct evsel *evsel) 772 { 773 if (bpf_counter_skip(evsel)) 774 return 0; 775 return evsel->bpf_counter_ops->enable(evsel); 776 } 777 778 int bpf_counter__disable(struct evsel *evsel) 779 { 780 if (bpf_counter_skip(evsel)) 781 return 0; 782 return evsel->bpf_counter_ops->disable(evsel); 783 } 784 785 int bpf_counter__read(struct evsel *evsel) 786 { 787 if (bpf_counter_skip(evsel)) 788 return -EAGAIN; 789 return evsel->bpf_counter_ops->read(evsel); 790 } 791 792 void bpf_counter__destroy(struct evsel *evsel) 793 { 794 if (bpf_counter_skip(evsel)) 795 return; 796 evsel->bpf_counter_ops->destroy(evsel); 797 evsel->bpf_counter_ops = NULL; 798 } 799