1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> 4 * 5 * Parts came from builtin-{top,stat,record}.c, see those files for further 6 * copyright notes. 7 */ 8 #include <api/fs/fs.h> 9 #include <errno.h> 10 #include <inttypes.h> 11 #include <poll.h> 12 #include "cpumap.h" 13 #include "util/mmap.h" 14 #include "thread_map.h" 15 #include "target.h" 16 #include "evlist.h" 17 #include "evsel.h" 18 #include "debug.h" 19 #include "units.h" 20 #include "bpf_counter.h" 21 #include <internal/lib.h> // page_size 22 #include "affinity.h" 23 #include "../perf.h" 24 #include "asm/bug.h" 25 #include "bpf-event.h" 26 #include "util/string2.h" 27 #include "util/perf_api_probe.h" 28 #include "util/evsel_fprintf.h" 29 #include "util/evlist-hybrid.h" 30 #include "util/pmu.h" 31 #include <signal.h> 32 #include <unistd.h> 33 #include <sched.h> 34 #include <stdlib.h> 35 36 #include "parse-events.h" 37 #include <subcmd/parse-options.h> 38 39 #include <fcntl.h> 40 #include <sys/ioctl.h> 41 #include <sys/mman.h> 42 #include <sys/prctl.h> 43 44 #include <linux/bitops.h> 45 #include <linux/hash.h> 46 #include <linux/log2.h> 47 #include <linux/err.h> 48 #include <linux/string.h> 49 #include <linux/zalloc.h> 50 #include <perf/evlist.h> 51 #include <perf/evsel.h> 52 #include <perf/cpumap.h> 53 #include <perf/mmap.h> 54 55 #include <internal/xyarray.h> 56 57 #ifdef LACKS_SIGQUEUE_PROTOTYPE 58 int sigqueue(pid_t pid, int sig, const union sigval value); 59 #endif 60 61 #define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y)) 62 #define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y) 63 64 void evlist__init(struct evlist *evlist, struct perf_cpu_map *cpus, 65 struct perf_thread_map *threads) 66 { 67 perf_evlist__init(&evlist->core); 68 perf_evlist__set_maps(&evlist->core, cpus, threads); 69 evlist->workload.pid = -1; 70 evlist->bkw_mmap_state = BKW_MMAP_NOTREADY; 71 evlist->ctl_fd.fd = -1; 72 evlist->ctl_fd.ack = -1; 73 evlist->ctl_fd.pos = -1; 74 } 75 76 struct evlist *evlist__new(void) 77 { 78 struct evlist *evlist = zalloc(sizeof(*evlist)); 79 80 if (evlist != NULL) 81 evlist__init(evlist, NULL, NULL); 82 83 return evlist; 84 } 85 86 struct evlist *evlist__new_default(void) 87 { 88 struct evlist *evlist = evlist__new(); 89 90 if (evlist && evlist__add_default(evlist)) { 91 evlist__delete(evlist); 92 evlist = NULL; 93 } 94 95 return evlist; 96 } 97 98 struct evlist *evlist__new_dummy(void) 99 { 100 struct evlist *evlist = evlist__new(); 101 102 if (evlist && evlist__add_dummy(evlist)) { 103 evlist__delete(evlist); 104 evlist = NULL; 105 } 106 107 return evlist; 108 } 109 110 /** 111 * evlist__set_id_pos - set the positions of event ids. 112 * @evlist: selected event list 113 * 114 * Events with compatible sample types all have the same id_pos 115 * and is_pos. For convenience, put a copy on evlist. 116 */ 117 void evlist__set_id_pos(struct evlist *evlist) 118 { 119 struct evsel *first = evlist__first(evlist); 120 121 evlist->id_pos = first->id_pos; 122 evlist->is_pos = first->is_pos; 123 } 124 125 static void evlist__update_id_pos(struct evlist *evlist) 126 { 127 struct evsel *evsel; 128 129 evlist__for_each_entry(evlist, evsel) 130 evsel__calc_id_pos(evsel); 131 132 evlist__set_id_pos(evlist); 133 } 134 135 static void evlist__purge(struct evlist *evlist) 136 { 137 struct evsel *pos, *n; 138 139 evlist__for_each_entry_safe(evlist, n, pos) { 140 list_del_init(&pos->core.node); 141 pos->evlist = NULL; 142 evsel__delete(pos); 143 } 144 145 evlist->core.nr_entries = 0; 146 } 147 148 void evlist__exit(struct evlist *evlist) 149 { 150 zfree(&evlist->mmap); 151 zfree(&evlist->overwrite_mmap); 152 perf_evlist__exit(&evlist->core); 153 } 154 155 void evlist__delete(struct evlist *evlist) 156 { 157 if (evlist == NULL) 158 return; 159 160 evlist__munmap(evlist); 161 evlist__close(evlist); 162 evlist__purge(evlist); 163 evlist__exit(evlist); 164 free(evlist); 165 } 166 167 void evlist__add(struct evlist *evlist, struct evsel *entry) 168 { 169 perf_evlist__add(&evlist->core, &entry->core); 170 entry->evlist = evlist; 171 entry->tracking = !entry->core.idx; 172 173 if (evlist->core.nr_entries == 1) 174 evlist__set_id_pos(evlist); 175 } 176 177 void evlist__remove(struct evlist *evlist, struct evsel *evsel) 178 { 179 evsel->evlist = NULL; 180 perf_evlist__remove(&evlist->core, &evsel->core); 181 } 182 183 void evlist__splice_list_tail(struct evlist *evlist, struct list_head *list) 184 { 185 while (!list_empty(list)) { 186 struct evsel *evsel, *temp, *leader = NULL; 187 188 __evlist__for_each_entry_safe(list, temp, evsel) { 189 list_del_init(&evsel->core.node); 190 evlist__add(evlist, evsel); 191 leader = evsel; 192 break; 193 } 194 195 __evlist__for_each_entry_safe(list, temp, evsel) { 196 if (evsel__has_leader(evsel, leader)) { 197 list_del_init(&evsel->core.node); 198 evlist__add(evlist, evsel); 199 } 200 } 201 } 202 } 203 204 int __evlist__set_tracepoints_handlers(struct evlist *evlist, 205 const struct evsel_str_handler *assocs, size_t nr_assocs) 206 { 207 size_t i; 208 int err; 209 210 for (i = 0; i < nr_assocs; i++) { 211 // Adding a handler for an event not in this evlist, just ignore it. 212 struct evsel *evsel = evlist__find_tracepoint_by_name(evlist, assocs[i].name); 213 if (evsel == NULL) 214 continue; 215 216 err = -EEXIST; 217 if (evsel->handler != NULL) 218 goto out; 219 evsel->handler = assocs[i].handler; 220 } 221 222 err = 0; 223 out: 224 return err; 225 } 226 227 void evlist__set_leader(struct evlist *evlist) 228 { 229 perf_evlist__set_leader(&evlist->core); 230 } 231 232 int __evlist__add_default(struct evlist *evlist, bool precise) 233 { 234 struct evsel *evsel; 235 236 evsel = evsel__new_cycles(precise, PERF_TYPE_HARDWARE, 237 PERF_COUNT_HW_CPU_CYCLES); 238 if (evsel == NULL) 239 return -ENOMEM; 240 241 evlist__add(evlist, evsel); 242 return 0; 243 } 244 245 int evlist__add_dummy(struct evlist *evlist) 246 { 247 struct perf_event_attr attr = { 248 .type = PERF_TYPE_SOFTWARE, 249 .config = PERF_COUNT_SW_DUMMY, 250 .size = sizeof(attr), /* to capture ABI version */ 251 }; 252 struct evsel *evsel = evsel__new_idx(&attr, evlist->core.nr_entries); 253 254 if (evsel == NULL) 255 return -ENOMEM; 256 257 evlist__add(evlist, evsel); 258 return 0; 259 } 260 261 static int evlist__add_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs) 262 { 263 struct evsel *evsel, *n; 264 LIST_HEAD(head); 265 size_t i; 266 267 for (i = 0; i < nr_attrs; i++) { 268 evsel = evsel__new_idx(attrs + i, evlist->core.nr_entries + i); 269 if (evsel == NULL) 270 goto out_delete_partial_list; 271 list_add_tail(&evsel->core.node, &head); 272 } 273 274 evlist__splice_list_tail(evlist, &head); 275 276 return 0; 277 278 out_delete_partial_list: 279 __evlist__for_each_entry_safe(&head, n, evsel) 280 evsel__delete(evsel); 281 return -1; 282 } 283 284 int __evlist__add_default_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs) 285 { 286 size_t i; 287 288 for (i = 0; i < nr_attrs; i++) 289 event_attr_init(attrs + i); 290 291 return evlist__add_attrs(evlist, attrs, nr_attrs); 292 } 293 294 __weak int arch_evlist__add_default_attrs(struct evlist *evlist __maybe_unused) 295 { 296 return 0; 297 } 298 299 struct evsel *evlist__find_tracepoint_by_id(struct evlist *evlist, int id) 300 { 301 struct evsel *evsel; 302 303 evlist__for_each_entry(evlist, evsel) { 304 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT && 305 (int)evsel->core.attr.config == id) 306 return evsel; 307 } 308 309 return NULL; 310 } 311 312 struct evsel *evlist__find_tracepoint_by_name(struct evlist *evlist, const char *name) 313 { 314 struct evsel *evsel; 315 316 evlist__for_each_entry(evlist, evsel) { 317 if ((evsel->core.attr.type == PERF_TYPE_TRACEPOINT) && 318 (strcmp(evsel->name, name) == 0)) 319 return evsel; 320 } 321 322 return NULL; 323 } 324 325 int evlist__add_newtp(struct evlist *evlist, const char *sys, const char *name, void *handler) 326 { 327 struct evsel *evsel = evsel__newtp(sys, name); 328 329 if (IS_ERR(evsel)) 330 return -1; 331 332 evsel->handler = handler; 333 evlist__add(evlist, evsel); 334 return 0; 335 } 336 337 static int evlist__nr_threads(struct evlist *evlist, struct evsel *evsel) 338 { 339 if (evsel->core.system_wide) 340 return 1; 341 else 342 return perf_thread_map__nr(evlist->core.threads); 343 } 344 345 struct evlist_cpu_iterator evlist__cpu_begin(struct evlist *evlist, struct affinity *affinity) 346 { 347 struct evlist_cpu_iterator itr = { 348 .container = evlist, 349 .evsel = NULL, 350 .cpu_map_idx = 0, 351 .evlist_cpu_map_idx = 0, 352 .evlist_cpu_map_nr = perf_cpu_map__nr(evlist->core.all_cpus), 353 .cpu = (struct perf_cpu){ .cpu = -1}, 354 .affinity = affinity, 355 }; 356 357 if (evlist__empty(evlist)) { 358 /* Ensure the empty list doesn't iterate. */ 359 itr.evlist_cpu_map_idx = itr.evlist_cpu_map_nr; 360 } else { 361 itr.evsel = evlist__first(evlist); 362 if (itr.affinity) { 363 itr.cpu = perf_cpu_map__cpu(evlist->core.all_cpus, 0); 364 affinity__set(itr.affinity, itr.cpu.cpu); 365 itr.cpu_map_idx = perf_cpu_map__idx(itr.evsel->core.cpus, itr.cpu); 366 /* 367 * If this CPU isn't in the evsel's cpu map then advance 368 * through the list. 369 */ 370 if (itr.cpu_map_idx == -1) 371 evlist_cpu_iterator__next(&itr); 372 } 373 } 374 return itr; 375 } 376 377 void evlist_cpu_iterator__next(struct evlist_cpu_iterator *evlist_cpu_itr) 378 { 379 while (evlist_cpu_itr->evsel != evlist__last(evlist_cpu_itr->container)) { 380 evlist_cpu_itr->evsel = evsel__next(evlist_cpu_itr->evsel); 381 evlist_cpu_itr->cpu_map_idx = 382 perf_cpu_map__idx(evlist_cpu_itr->evsel->core.cpus, 383 evlist_cpu_itr->cpu); 384 if (evlist_cpu_itr->cpu_map_idx != -1) 385 return; 386 } 387 evlist_cpu_itr->evlist_cpu_map_idx++; 388 if (evlist_cpu_itr->evlist_cpu_map_idx < evlist_cpu_itr->evlist_cpu_map_nr) { 389 evlist_cpu_itr->evsel = evlist__first(evlist_cpu_itr->container); 390 evlist_cpu_itr->cpu = 391 perf_cpu_map__cpu(evlist_cpu_itr->container->core.all_cpus, 392 evlist_cpu_itr->evlist_cpu_map_idx); 393 if (evlist_cpu_itr->affinity) 394 affinity__set(evlist_cpu_itr->affinity, evlist_cpu_itr->cpu.cpu); 395 evlist_cpu_itr->cpu_map_idx = 396 perf_cpu_map__idx(evlist_cpu_itr->evsel->core.cpus, 397 evlist_cpu_itr->cpu); 398 /* 399 * If this CPU isn't in the evsel's cpu map then advance through 400 * the list. 401 */ 402 if (evlist_cpu_itr->cpu_map_idx == -1) 403 evlist_cpu_iterator__next(evlist_cpu_itr); 404 } 405 } 406 407 bool evlist_cpu_iterator__end(const struct evlist_cpu_iterator *evlist_cpu_itr) 408 { 409 return evlist_cpu_itr->evlist_cpu_map_idx >= evlist_cpu_itr->evlist_cpu_map_nr; 410 } 411 412 static int evsel__strcmp(struct evsel *pos, char *evsel_name) 413 { 414 if (!evsel_name) 415 return 0; 416 if (evsel__is_dummy_event(pos)) 417 return 1; 418 return strcmp(pos->name, evsel_name); 419 } 420 421 static int evlist__is_enabled(struct evlist *evlist) 422 { 423 struct evsel *pos; 424 425 evlist__for_each_entry(evlist, pos) { 426 if (!evsel__is_group_leader(pos) || !pos->core.fd) 427 continue; 428 /* If at least one event is enabled, evlist is enabled. */ 429 if (!pos->disabled) 430 return true; 431 } 432 return false; 433 } 434 435 static void __evlist__disable(struct evlist *evlist, char *evsel_name) 436 { 437 struct evsel *pos; 438 struct evlist_cpu_iterator evlist_cpu_itr; 439 struct affinity saved_affinity, *affinity = NULL; 440 bool has_imm = false; 441 442 // See explanation in evlist__close() 443 if (!cpu_map__is_dummy(evlist->core.user_requested_cpus)) { 444 if (affinity__setup(&saved_affinity) < 0) 445 return; 446 affinity = &saved_affinity; 447 } 448 449 /* Disable 'immediate' events last */ 450 for (int imm = 0; imm <= 1; imm++) { 451 evlist__for_each_cpu(evlist_cpu_itr, evlist, affinity) { 452 pos = evlist_cpu_itr.evsel; 453 if (evsel__strcmp(pos, evsel_name)) 454 continue; 455 if (pos->disabled || !evsel__is_group_leader(pos) || !pos->core.fd) 456 continue; 457 if (pos->immediate) 458 has_imm = true; 459 if (pos->immediate != imm) 460 continue; 461 evsel__disable_cpu(pos, evlist_cpu_itr.cpu_map_idx); 462 } 463 if (!has_imm) 464 break; 465 } 466 467 affinity__cleanup(affinity); 468 evlist__for_each_entry(evlist, pos) { 469 if (evsel__strcmp(pos, evsel_name)) 470 continue; 471 if (!evsel__is_group_leader(pos) || !pos->core.fd) 472 continue; 473 pos->disabled = true; 474 } 475 476 /* 477 * If we disabled only single event, we need to check 478 * the enabled state of the evlist manually. 479 */ 480 if (evsel_name) 481 evlist->enabled = evlist__is_enabled(evlist); 482 else 483 evlist->enabled = false; 484 } 485 486 void evlist__disable(struct evlist *evlist) 487 { 488 __evlist__disable(evlist, NULL); 489 } 490 491 void evlist__disable_evsel(struct evlist *evlist, char *evsel_name) 492 { 493 __evlist__disable(evlist, evsel_name); 494 } 495 496 static void __evlist__enable(struct evlist *evlist, char *evsel_name) 497 { 498 struct evsel *pos; 499 struct evlist_cpu_iterator evlist_cpu_itr; 500 struct affinity saved_affinity, *affinity = NULL; 501 502 // See explanation in evlist__close() 503 if (!cpu_map__is_dummy(evlist->core.user_requested_cpus)) { 504 if (affinity__setup(&saved_affinity) < 0) 505 return; 506 affinity = &saved_affinity; 507 } 508 509 evlist__for_each_cpu(evlist_cpu_itr, evlist, affinity) { 510 pos = evlist_cpu_itr.evsel; 511 if (evsel__strcmp(pos, evsel_name)) 512 continue; 513 if (!evsel__is_group_leader(pos) || !pos->core.fd) 514 continue; 515 evsel__enable_cpu(pos, evlist_cpu_itr.cpu_map_idx); 516 } 517 affinity__cleanup(affinity); 518 evlist__for_each_entry(evlist, pos) { 519 if (evsel__strcmp(pos, evsel_name)) 520 continue; 521 if (!evsel__is_group_leader(pos) || !pos->core.fd) 522 continue; 523 pos->disabled = false; 524 } 525 526 /* 527 * Even single event sets the 'enabled' for evlist, 528 * so the toggle can work properly and toggle to 529 * 'disabled' state. 530 */ 531 evlist->enabled = true; 532 } 533 534 void evlist__enable(struct evlist *evlist) 535 { 536 __evlist__enable(evlist, NULL); 537 } 538 539 void evlist__enable_evsel(struct evlist *evlist, char *evsel_name) 540 { 541 __evlist__enable(evlist, evsel_name); 542 } 543 544 void evlist__toggle_enable(struct evlist *evlist) 545 { 546 (evlist->enabled ? evlist__disable : evlist__enable)(evlist); 547 } 548 549 static int evlist__enable_event_cpu(struct evlist *evlist, struct evsel *evsel, int cpu) 550 { 551 int thread; 552 int nr_threads = evlist__nr_threads(evlist, evsel); 553 554 if (!evsel->core.fd) 555 return -EINVAL; 556 557 for (thread = 0; thread < nr_threads; thread++) { 558 int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0); 559 if (err) 560 return err; 561 } 562 return 0; 563 } 564 565 static int evlist__enable_event_thread(struct evlist *evlist, struct evsel *evsel, int thread) 566 { 567 int cpu; 568 int nr_cpus = perf_cpu_map__nr(evlist->core.user_requested_cpus); 569 570 if (!evsel->core.fd) 571 return -EINVAL; 572 573 for (cpu = 0; cpu < nr_cpus; cpu++) { 574 int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0); 575 if (err) 576 return err; 577 } 578 return 0; 579 } 580 581 int evlist__enable_event_idx(struct evlist *evlist, struct evsel *evsel, int idx) 582 { 583 bool per_cpu_mmaps = !perf_cpu_map__empty(evlist->core.user_requested_cpus); 584 585 if (per_cpu_mmaps) 586 return evlist__enable_event_cpu(evlist, evsel, idx); 587 588 return evlist__enable_event_thread(evlist, evsel, idx); 589 } 590 591 int evlist__add_pollfd(struct evlist *evlist, int fd) 592 { 593 return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN, fdarray_flag__default); 594 } 595 596 int evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask) 597 { 598 return perf_evlist__filter_pollfd(&evlist->core, revents_and_mask); 599 } 600 601 #ifdef HAVE_EVENTFD_SUPPORT 602 int evlist__add_wakeup_eventfd(struct evlist *evlist, int fd) 603 { 604 return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN, 605 fdarray_flag__nonfilterable); 606 } 607 #endif 608 609 int evlist__poll(struct evlist *evlist, int timeout) 610 { 611 return perf_evlist__poll(&evlist->core, timeout); 612 } 613 614 struct perf_sample_id *evlist__id2sid(struct evlist *evlist, u64 id) 615 { 616 struct hlist_head *head; 617 struct perf_sample_id *sid; 618 int hash; 619 620 hash = hash_64(id, PERF_EVLIST__HLIST_BITS); 621 head = &evlist->core.heads[hash]; 622 623 hlist_for_each_entry(sid, head, node) 624 if (sid->id == id) 625 return sid; 626 627 return NULL; 628 } 629 630 struct evsel *evlist__id2evsel(struct evlist *evlist, u64 id) 631 { 632 struct perf_sample_id *sid; 633 634 if (evlist->core.nr_entries == 1 || !id) 635 return evlist__first(evlist); 636 637 sid = evlist__id2sid(evlist, id); 638 if (sid) 639 return container_of(sid->evsel, struct evsel, core); 640 641 if (!evlist__sample_id_all(evlist)) 642 return evlist__first(evlist); 643 644 return NULL; 645 } 646 647 struct evsel *evlist__id2evsel_strict(struct evlist *evlist, u64 id) 648 { 649 struct perf_sample_id *sid; 650 651 if (!id) 652 return NULL; 653 654 sid = evlist__id2sid(evlist, id); 655 if (sid) 656 return container_of(sid->evsel, struct evsel, core); 657 658 return NULL; 659 } 660 661 static int evlist__event2id(struct evlist *evlist, union perf_event *event, u64 *id) 662 { 663 const __u64 *array = event->sample.array; 664 ssize_t n; 665 666 n = (event->header.size - sizeof(event->header)) >> 3; 667 668 if (event->header.type == PERF_RECORD_SAMPLE) { 669 if (evlist->id_pos >= n) 670 return -1; 671 *id = array[evlist->id_pos]; 672 } else { 673 if (evlist->is_pos > n) 674 return -1; 675 n -= evlist->is_pos; 676 *id = array[n]; 677 } 678 return 0; 679 } 680 681 struct evsel *evlist__event2evsel(struct evlist *evlist, union perf_event *event) 682 { 683 struct evsel *first = evlist__first(evlist); 684 struct hlist_head *head; 685 struct perf_sample_id *sid; 686 int hash; 687 u64 id; 688 689 if (evlist->core.nr_entries == 1) 690 return first; 691 692 if (!first->core.attr.sample_id_all && 693 event->header.type != PERF_RECORD_SAMPLE) 694 return first; 695 696 if (evlist__event2id(evlist, event, &id)) 697 return NULL; 698 699 /* Synthesized events have an id of zero */ 700 if (!id) 701 return first; 702 703 hash = hash_64(id, PERF_EVLIST__HLIST_BITS); 704 head = &evlist->core.heads[hash]; 705 706 hlist_for_each_entry(sid, head, node) { 707 if (sid->id == id) 708 return container_of(sid->evsel, struct evsel, core); 709 } 710 return NULL; 711 } 712 713 static int evlist__set_paused(struct evlist *evlist, bool value) 714 { 715 int i; 716 717 if (!evlist->overwrite_mmap) 718 return 0; 719 720 for (i = 0; i < evlist->core.nr_mmaps; i++) { 721 int fd = evlist->overwrite_mmap[i].core.fd; 722 int err; 723 724 if (fd < 0) 725 continue; 726 err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0); 727 if (err) 728 return err; 729 } 730 return 0; 731 } 732 733 static int evlist__pause(struct evlist *evlist) 734 { 735 return evlist__set_paused(evlist, true); 736 } 737 738 static int evlist__resume(struct evlist *evlist) 739 { 740 return evlist__set_paused(evlist, false); 741 } 742 743 static void evlist__munmap_nofree(struct evlist *evlist) 744 { 745 int i; 746 747 if (evlist->mmap) 748 for (i = 0; i < evlist->core.nr_mmaps; i++) 749 perf_mmap__munmap(&evlist->mmap[i].core); 750 751 if (evlist->overwrite_mmap) 752 for (i = 0; i < evlist->core.nr_mmaps; i++) 753 perf_mmap__munmap(&evlist->overwrite_mmap[i].core); 754 } 755 756 void evlist__munmap(struct evlist *evlist) 757 { 758 evlist__munmap_nofree(evlist); 759 zfree(&evlist->mmap); 760 zfree(&evlist->overwrite_mmap); 761 } 762 763 static void perf_mmap__unmap_cb(struct perf_mmap *map) 764 { 765 struct mmap *m = container_of(map, struct mmap, core); 766 767 mmap__munmap(m); 768 } 769 770 static struct mmap *evlist__alloc_mmap(struct evlist *evlist, 771 bool overwrite) 772 { 773 int i; 774 struct mmap *map; 775 776 map = zalloc(evlist->core.nr_mmaps * sizeof(struct mmap)); 777 if (!map) 778 return NULL; 779 780 for (i = 0; i < evlist->core.nr_mmaps; i++) { 781 struct perf_mmap *prev = i ? &map[i - 1].core : NULL; 782 783 /* 784 * When the perf_mmap() call is made we grab one refcount, plus 785 * one extra to let perf_mmap__consume() get the last 786 * events after all real references (perf_mmap__get()) are 787 * dropped. 788 * 789 * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and 790 * thus does perf_mmap__get() on it. 791 */ 792 perf_mmap__init(&map[i].core, prev, overwrite, perf_mmap__unmap_cb); 793 } 794 795 return map; 796 } 797 798 static void 799 perf_evlist__mmap_cb_idx(struct perf_evlist *_evlist, 800 struct perf_mmap_param *_mp, 801 int idx, bool per_cpu) 802 { 803 struct evlist *evlist = container_of(_evlist, struct evlist, core); 804 struct mmap_params *mp = container_of(_mp, struct mmap_params, core); 805 806 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, idx, per_cpu); 807 } 808 809 static struct perf_mmap* 810 perf_evlist__mmap_cb_get(struct perf_evlist *_evlist, bool overwrite, int idx) 811 { 812 struct evlist *evlist = container_of(_evlist, struct evlist, core); 813 struct mmap *maps; 814 815 maps = overwrite ? evlist->overwrite_mmap : evlist->mmap; 816 817 if (!maps) { 818 maps = evlist__alloc_mmap(evlist, overwrite); 819 if (!maps) 820 return NULL; 821 822 if (overwrite) { 823 evlist->overwrite_mmap = maps; 824 if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY) 825 evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING); 826 } else { 827 evlist->mmap = maps; 828 } 829 } 830 831 return &maps[idx].core; 832 } 833 834 static int 835 perf_evlist__mmap_cb_mmap(struct perf_mmap *_map, struct perf_mmap_param *_mp, 836 int output, struct perf_cpu cpu) 837 { 838 struct mmap *map = container_of(_map, struct mmap, core); 839 struct mmap_params *mp = container_of(_mp, struct mmap_params, core); 840 841 return mmap__mmap(map, mp, output, cpu); 842 } 843 844 unsigned long perf_event_mlock_kb_in_pages(void) 845 { 846 unsigned long pages; 847 int max; 848 849 if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) { 850 /* 851 * Pick a once upon a time good value, i.e. things look 852 * strange since we can't read a sysctl value, but lets not 853 * die yet... 854 */ 855 max = 512; 856 } else { 857 max -= (page_size / 1024); 858 } 859 860 pages = (max * 1024) / page_size; 861 if (!is_power_of_2(pages)) 862 pages = rounddown_pow_of_two(pages); 863 864 return pages; 865 } 866 867 size_t evlist__mmap_size(unsigned long pages) 868 { 869 if (pages == UINT_MAX) 870 pages = perf_event_mlock_kb_in_pages(); 871 else if (!is_power_of_2(pages)) 872 return 0; 873 874 return (pages + 1) * page_size; 875 } 876 877 static long parse_pages_arg(const char *str, unsigned long min, 878 unsigned long max) 879 { 880 unsigned long pages, val; 881 static struct parse_tag tags[] = { 882 { .tag = 'B', .mult = 1 }, 883 { .tag = 'K', .mult = 1 << 10 }, 884 { .tag = 'M', .mult = 1 << 20 }, 885 { .tag = 'G', .mult = 1 << 30 }, 886 { .tag = 0 }, 887 }; 888 889 if (str == NULL) 890 return -EINVAL; 891 892 val = parse_tag_value(str, tags); 893 if (val != (unsigned long) -1) { 894 /* we got file size value */ 895 pages = PERF_ALIGN(val, page_size) / page_size; 896 } else { 897 /* we got pages count value */ 898 char *eptr; 899 pages = strtoul(str, &eptr, 10); 900 if (*eptr != '\0') 901 return -EINVAL; 902 } 903 904 if (pages == 0 && min == 0) { 905 /* leave number of pages at 0 */ 906 } else if (!is_power_of_2(pages)) { 907 char buf[100]; 908 909 /* round pages up to next power of 2 */ 910 pages = roundup_pow_of_two(pages); 911 if (!pages) 912 return -EINVAL; 913 914 unit_number__scnprintf(buf, sizeof(buf), pages * page_size); 915 pr_info("rounding mmap pages size to %s (%lu pages)\n", 916 buf, pages); 917 } 918 919 if (pages > max) 920 return -EINVAL; 921 922 return pages; 923 } 924 925 int __evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str) 926 { 927 unsigned long max = UINT_MAX; 928 long pages; 929 930 if (max > SIZE_MAX / page_size) 931 max = SIZE_MAX / page_size; 932 933 pages = parse_pages_arg(str, 1, max); 934 if (pages < 0) { 935 pr_err("Invalid argument for --mmap_pages/-m\n"); 936 return -1; 937 } 938 939 *mmap_pages = pages; 940 return 0; 941 } 942 943 int evlist__parse_mmap_pages(const struct option *opt, const char *str, int unset __maybe_unused) 944 { 945 return __evlist__parse_mmap_pages(opt->value, str); 946 } 947 948 /** 949 * evlist__mmap_ex - Create mmaps to receive events. 950 * @evlist: list of events 951 * @pages: map length in pages 952 * @overwrite: overwrite older events? 953 * @auxtrace_pages - auxtrace map length in pages 954 * @auxtrace_overwrite - overwrite older auxtrace data? 955 * 956 * If @overwrite is %false the user needs to signal event consumption using 957 * perf_mmap__write_tail(). Using evlist__mmap_read() does this 958 * automatically. 959 * 960 * Similarly, if @auxtrace_overwrite is %false the user needs to signal data 961 * consumption using auxtrace_mmap__write_tail(). 962 * 963 * Return: %0 on success, negative error code otherwise. 964 */ 965 int evlist__mmap_ex(struct evlist *evlist, unsigned int pages, 966 unsigned int auxtrace_pages, 967 bool auxtrace_overwrite, int nr_cblocks, int affinity, int flush, 968 int comp_level) 969 { 970 /* 971 * Delay setting mp.prot: set it before calling perf_mmap__mmap. 972 * Its value is decided by evsel's write_backward. 973 * So &mp should not be passed through const pointer. 974 */ 975 struct mmap_params mp = { 976 .nr_cblocks = nr_cblocks, 977 .affinity = affinity, 978 .flush = flush, 979 .comp_level = comp_level 980 }; 981 struct perf_evlist_mmap_ops ops = { 982 .idx = perf_evlist__mmap_cb_idx, 983 .get = perf_evlist__mmap_cb_get, 984 .mmap = perf_evlist__mmap_cb_mmap, 985 }; 986 987 evlist->core.mmap_len = evlist__mmap_size(pages); 988 pr_debug("mmap size %zuB\n", evlist->core.mmap_len); 989 990 auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->core.mmap_len, 991 auxtrace_pages, auxtrace_overwrite); 992 993 return perf_evlist__mmap_ops(&evlist->core, &ops, &mp.core); 994 } 995 996 int evlist__mmap(struct evlist *evlist, unsigned int pages) 997 { 998 return evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS, 1, 0); 999 } 1000 1001 int evlist__create_maps(struct evlist *evlist, struct target *target) 1002 { 1003 bool all_threads = (target->per_thread && target->system_wide); 1004 struct perf_cpu_map *cpus; 1005 struct perf_thread_map *threads; 1006 1007 /* 1008 * If specify '-a' and '--per-thread' to perf record, perf record 1009 * will override '--per-thread'. target->per_thread = false and 1010 * target->system_wide = true. 1011 * 1012 * If specify '--per-thread' only to perf record, 1013 * target->per_thread = true and target->system_wide = false. 1014 * 1015 * So target->per_thread && target->system_wide is false. 1016 * For perf record, thread_map__new_str doesn't call 1017 * thread_map__new_all_cpus. That will keep perf record's 1018 * current behavior. 1019 * 1020 * For perf stat, it allows the case that target->per_thread and 1021 * target->system_wide are all true. It means to collect system-wide 1022 * per-thread data. thread_map__new_str will call 1023 * thread_map__new_all_cpus to enumerate all threads. 1024 */ 1025 threads = thread_map__new_str(target->pid, target->tid, target->uid, 1026 all_threads); 1027 1028 if (!threads) 1029 return -1; 1030 1031 if (target__uses_dummy_map(target)) 1032 cpus = perf_cpu_map__dummy_new(); 1033 else 1034 cpus = perf_cpu_map__new(target->cpu_list); 1035 1036 if (!cpus) 1037 goto out_delete_threads; 1038 1039 evlist->core.has_user_cpus = !!target->cpu_list && !target->hybrid; 1040 1041 perf_evlist__set_maps(&evlist->core, cpus, threads); 1042 1043 /* as evlist now has references, put count here */ 1044 perf_cpu_map__put(cpus); 1045 perf_thread_map__put(threads); 1046 1047 return 0; 1048 1049 out_delete_threads: 1050 perf_thread_map__put(threads); 1051 return -1; 1052 } 1053 1054 int evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel) 1055 { 1056 struct evsel *evsel; 1057 int err = 0; 1058 1059 evlist__for_each_entry(evlist, evsel) { 1060 if (evsel->filter == NULL) 1061 continue; 1062 1063 /* 1064 * filters only work for tracepoint event, which doesn't have cpu limit. 1065 * So evlist and evsel should always be same. 1066 */ 1067 err = perf_evsel__apply_filter(&evsel->core, evsel->filter); 1068 if (err) { 1069 *err_evsel = evsel; 1070 break; 1071 } 1072 } 1073 1074 return err; 1075 } 1076 1077 int evlist__set_tp_filter(struct evlist *evlist, const char *filter) 1078 { 1079 struct evsel *evsel; 1080 int err = 0; 1081 1082 if (filter == NULL) 1083 return -1; 1084 1085 evlist__for_each_entry(evlist, evsel) { 1086 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 1087 continue; 1088 1089 err = evsel__set_filter(evsel, filter); 1090 if (err) 1091 break; 1092 } 1093 1094 return err; 1095 } 1096 1097 int evlist__append_tp_filter(struct evlist *evlist, const char *filter) 1098 { 1099 struct evsel *evsel; 1100 int err = 0; 1101 1102 if (filter == NULL) 1103 return -1; 1104 1105 evlist__for_each_entry(evlist, evsel) { 1106 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 1107 continue; 1108 1109 err = evsel__append_tp_filter(evsel, filter); 1110 if (err) 1111 break; 1112 } 1113 1114 return err; 1115 } 1116 1117 char *asprintf__tp_filter_pids(size_t npids, pid_t *pids) 1118 { 1119 char *filter; 1120 size_t i; 1121 1122 for (i = 0; i < npids; ++i) { 1123 if (i == 0) { 1124 if (asprintf(&filter, "common_pid != %d", pids[i]) < 0) 1125 return NULL; 1126 } else { 1127 char *tmp; 1128 1129 if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0) 1130 goto out_free; 1131 1132 free(filter); 1133 filter = tmp; 1134 } 1135 } 1136 1137 return filter; 1138 out_free: 1139 free(filter); 1140 return NULL; 1141 } 1142 1143 int evlist__set_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids) 1144 { 1145 char *filter = asprintf__tp_filter_pids(npids, pids); 1146 int ret = evlist__set_tp_filter(evlist, filter); 1147 1148 free(filter); 1149 return ret; 1150 } 1151 1152 int evlist__set_tp_filter_pid(struct evlist *evlist, pid_t pid) 1153 { 1154 return evlist__set_tp_filter_pids(evlist, 1, &pid); 1155 } 1156 1157 int evlist__append_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids) 1158 { 1159 char *filter = asprintf__tp_filter_pids(npids, pids); 1160 int ret = evlist__append_tp_filter(evlist, filter); 1161 1162 free(filter); 1163 return ret; 1164 } 1165 1166 int evlist__append_tp_filter_pid(struct evlist *evlist, pid_t pid) 1167 { 1168 return evlist__append_tp_filter_pids(evlist, 1, &pid); 1169 } 1170 1171 bool evlist__valid_sample_type(struct evlist *evlist) 1172 { 1173 struct evsel *pos; 1174 1175 if (evlist->core.nr_entries == 1) 1176 return true; 1177 1178 if (evlist->id_pos < 0 || evlist->is_pos < 0) 1179 return false; 1180 1181 evlist__for_each_entry(evlist, pos) { 1182 if (pos->id_pos != evlist->id_pos || 1183 pos->is_pos != evlist->is_pos) 1184 return false; 1185 } 1186 1187 return true; 1188 } 1189 1190 u64 __evlist__combined_sample_type(struct evlist *evlist) 1191 { 1192 struct evsel *evsel; 1193 1194 if (evlist->combined_sample_type) 1195 return evlist->combined_sample_type; 1196 1197 evlist__for_each_entry(evlist, evsel) 1198 evlist->combined_sample_type |= evsel->core.attr.sample_type; 1199 1200 return evlist->combined_sample_type; 1201 } 1202 1203 u64 evlist__combined_sample_type(struct evlist *evlist) 1204 { 1205 evlist->combined_sample_type = 0; 1206 return __evlist__combined_sample_type(evlist); 1207 } 1208 1209 u64 evlist__combined_branch_type(struct evlist *evlist) 1210 { 1211 struct evsel *evsel; 1212 u64 branch_type = 0; 1213 1214 evlist__for_each_entry(evlist, evsel) 1215 branch_type |= evsel->core.attr.branch_sample_type; 1216 return branch_type; 1217 } 1218 1219 bool evlist__valid_read_format(struct evlist *evlist) 1220 { 1221 struct evsel *first = evlist__first(evlist), *pos = first; 1222 u64 read_format = first->core.attr.read_format; 1223 u64 sample_type = first->core.attr.sample_type; 1224 1225 evlist__for_each_entry(evlist, pos) { 1226 if (read_format != pos->core.attr.read_format) { 1227 pr_debug("Read format differs %#" PRIx64 " vs %#" PRIx64 "\n", 1228 read_format, (u64)pos->core.attr.read_format); 1229 } 1230 } 1231 1232 /* PERF_SAMPLE_READ implies PERF_FORMAT_ID. */ 1233 if ((sample_type & PERF_SAMPLE_READ) && 1234 !(read_format & PERF_FORMAT_ID)) { 1235 return false; 1236 } 1237 1238 return true; 1239 } 1240 1241 u16 evlist__id_hdr_size(struct evlist *evlist) 1242 { 1243 struct evsel *first = evlist__first(evlist); 1244 struct perf_sample *data; 1245 u64 sample_type; 1246 u16 size = 0; 1247 1248 if (!first->core.attr.sample_id_all) 1249 goto out; 1250 1251 sample_type = first->core.attr.sample_type; 1252 1253 if (sample_type & PERF_SAMPLE_TID) 1254 size += sizeof(data->tid) * 2; 1255 1256 if (sample_type & PERF_SAMPLE_TIME) 1257 size += sizeof(data->time); 1258 1259 if (sample_type & PERF_SAMPLE_ID) 1260 size += sizeof(data->id); 1261 1262 if (sample_type & PERF_SAMPLE_STREAM_ID) 1263 size += sizeof(data->stream_id); 1264 1265 if (sample_type & PERF_SAMPLE_CPU) 1266 size += sizeof(data->cpu) * 2; 1267 1268 if (sample_type & PERF_SAMPLE_IDENTIFIER) 1269 size += sizeof(data->id); 1270 out: 1271 return size; 1272 } 1273 1274 bool evlist__valid_sample_id_all(struct evlist *evlist) 1275 { 1276 struct evsel *first = evlist__first(evlist), *pos = first; 1277 1278 evlist__for_each_entry_continue(evlist, pos) { 1279 if (first->core.attr.sample_id_all != pos->core.attr.sample_id_all) 1280 return false; 1281 } 1282 1283 return true; 1284 } 1285 1286 bool evlist__sample_id_all(struct evlist *evlist) 1287 { 1288 struct evsel *first = evlist__first(evlist); 1289 return first->core.attr.sample_id_all; 1290 } 1291 1292 void evlist__set_selected(struct evlist *evlist, struct evsel *evsel) 1293 { 1294 evlist->selected = evsel; 1295 } 1296 1297 void evlist__close(struct evlist *evlist) 1298 { 1299 struct evsel *evsel; 1300 struct evlist_cpu_iterator evlist_cpu_itr; 1301 struct affinity affinity; 1302 1303 /* 1304 * With perf record core.user_requested_cpus is usually NULL. 1305 * Use the old method to handle this for now. 1306 */ 1307 if (!evlist->core.user_requested_cpus || 1308 cpu_map__is_dummy(evlist->core.user_requested_cpus)) { 1309 evlist__for_each_entry_reverse(evlist, evsel) 1310 evsel__close(evsel); 1311 return; 1312 } 1313 1314 if (affinity__setup(&affinity) < 0) 1315 return; 1316 1317 evlist__for_each_cpu(evlist_cpu_itr, evlist, &affinity) { 1318 perf_evsel__close_cpu(&evlist_cpu_itr.evsel->core, 1319 evlist_cpu_itr.cpu_map_idx); 1320 } 1321 1322 affinity__cleanup(&affinity); 1323 evlist__for_each_entry_reverse(evlist, evsel) { 1324 perf_evsel__free_fd(&evsel->core); 1325 perf_evsel__free_id(&evsel->core); 1326 } 1327 perf_evlist__reset_id_hash(&evlist->core); 1328 } 1329 1330 static int evlist__create_syswide_maps(struct evlist *evlist) 1331 { 1332 struct perf_cpu_map *cpus; 1333 struct perf_thread_map *threads; 1334 1335 /* 1336 * Try reading /sys/devices/system/cpu/online to get 1337 * an all cpus map. 1338 * 1339 * FIXME: -ENOMEM is the best we can do here, the cpu_map 1340 * code needs an overhaul to properly forward the 1341 * error, and we may not want to do that fallback to a 1342 * default cpu identity map :-\ 1343 */ 1344 cpus = perf_cpu_map__new(NULL); 1345 if (!cpus) 1346 goto out; 1347 1348 threads = perf_thread_map__new_dummy(); 1349 if (!threads) 1350 goto out_put; 1351 1352 perf_evlist__set_maps(&evlist->core, cpus, threads); 1353 1354 perf_thread_map__put(threads); 1355 out_put: 1356 perf_cpu_map__put(cpus); 1357 out: 1358 return -ENOMEM; 1359 } 1360 1361 int evlist__open(struct evlist *evlist) 1362 { 1363 struct evsel *evsel; 1364 int err; 1365 1366 /* 1367 * Default: one fd per CPU, all threads, aka systemwide 1368 * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL 1369 */ 1370 if (evlist->core.threads == NULL && evlist->core.user_requested_cpus == NULL) { 1371 err = evlist__create_syswide_maps(evlist); 1372 if (err < 0) 1373 goto out_err; 1374 } 1375 1376 evlist__update_id_pos(evlist); 1377 1378 evlist__for_each_entry(evlist, evsel) { 1379 err = evsel__open(evsel, evsel->core.cpus, evsel->core.threads); 1380 if (err < 0) 1381 goto out_err; 1382 } 1383 1384 return 0; 1385 out_err: 1386 evlist__close(evlist); 1387 errno = -err; 1388 return err; 1389 } 1390 1391 int evlist__prepare_workload(struct evlist *evlist, struct target *target, const char *argv[], 1392 bool pipe_output, void (*exec_error)(int signo, siginfo_t *info, void *ucontext)) 1393 { 1394 int child_ready_pipe[2], go_pipe[2]; 1395 char bf; 1396 1397 if (pipe(child_ready_pipe) < 0) { 1398 perror("failed to create 'ready' pipe"); 1399 return -1; 1400 } 1401 1402 if (pipe(go_pipe) < 0) { 1403 perror("failed to create 'go' pipe"); 1404 goto out_close_ready_pipe; 1405 } 1406 1407 evlist->workload.pid = fork(); 1408 if (evlist->workload.pid < 0) { 1409 perror("failed to fork"); 1410 goto out_close_pipes; 1411 } 1412 1413 if (!evlist->workload.pid) { 1414 int ret; 1415 1416 if (pipe_output) 1417 dup2(2, 1); 1418 1419 signal(SIGTERM, SIG_DFL); 1420 1421 close(child_ready_pipe[0]); 1422 close(go_pipe[1]); 1423 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC); 1424 1425 /* 1426 * Change the name of this process not to confuse --exclude-perf users 1427 * that sees 'perf' in the window up to the execvp() and thinks that 1428 * perf samples are not being excluded. 1429 */ 1430 prctl(PR_SET_NAME, "perf-exec"); 1431 1432 /* 1433 * Tell the parent we're ready to go 1434 */ 1435 close(child_ready_pipe[1]); 1436 1437 /* 1438 * Wait until the parent tells us to go. 1439 */ 1440 ret = read(go_pipe[0], &bf, 1); 1441 /* 1442 * The parent will ask for the execvp() to be performed by 1443 * writing exactly one byte, in workload.cork_fd, usually via 1444 * evlist__start_workload(). 1445 * 1446 * For cancelling the workload without actually running it, 1447 * the parent will just close workload.cork_fd, without writing 1448 * anything, i.e. read will return zero and we just exit() 1449 * here. 1450 */ 1451 if (ret != 1) { 1452 if (ret == -1) 1453 perror("unable to read pipe"); 1454 exit(ret); 1455 } 1456 1457 execvp(argv[0], (char **)argv); 1458 1459 if (exec_error) { 1460 union sigval val; 1461 1462 val.sival_int = errno; 1463 if (sigqueue(getppid(), SIGUSR1, val)) 1464 perror(argv[0]); 1465 } else 1466 perror(argv[0]); 1467 exit(-1); 1468 } 1469 1470 if (exec_error) { 1471 struct sigaction act = { 1472 .sa_flags = SA_SIGINFO, 1473 .sa_sigaction = exec_error, 1474 }; 1475 sigaction(SIGUSR1, &act, NULL); 1476 } 1477 1478 if (target__none(target)) { 1479 if (evlist->core.threads == NULL) { 1480 fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n", 1481 __func__, __LINE__); 1482 goto out_close_pipes; 1483 } 1484 perf_thread_map__set_pid(evlist->core.threads, 0, evlist->workload.pid); 1485 } 1486 1487 close(child_ready_pipe[1]); 1488 close(go_pipe[0]); 1489 /* 1490 * wait for child to settle 1491 */ 1492 if (read(child_ready_pipe[0], &bf, 1) == -1) { 1493 perror("unable to read pipe"); 1494 goto out_close_pipes; 1495 } 1496 1497 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC); 1498 evlist->workload.cork_fd = go_pipe[1]; 1499 close(child_ready_pipe[0]); 1500 return 0; 1501 1502 out_close_pipes: 1503 close(go_pipe[0]); 1504 close(go_pipe[1]); 1505 out_close_ready_pipe: 1506 close(child_ready_pipe[0]); 1507 close(child_ready_pipe[1]); 1508 return -1; 1509 } 1510 1511 int evlist__start_workload(struct evlist *evlist) 1512 { 1513 if (evlist->workload.cork_fd > 0) { 1514 char bf = 0; 1515 int ret; 1516 /* 1517 * Remove the cork, let it rip! 1518 */ 1519 ret = write(evlist->workload.cork_fd, &bf, 1); 1520 if (ret < 0) 1521 perror("unable to write to pipe"); 1522 1523 close(evlist->workload.cork_fd); 1524 return ret; 1525 } 1526 1527 return 0; 1528 } 1529 1530 int evlist__parse_sample(struct evlist *evlist, union perf_event *event, struct perf_sample *sample) 1531 { 1532 struct evsel *evsel = evlist__event2evsel(evlist, event); 1533 1534 if (!evsel) 1535 return -EFAULT; 1536 return evsel__parse_sample(evsel, event, sample); 1537 } 1538 1539 int evlist__parse_sample_timestamp(struct evlist *evlist, union perf_event *event, u64 *timestamp) 1540 { 1541 struct evsel *evsel = evlist__event2evsel(evlist, event); 1542 1543 if (!evsel) 1544 return -EFAULT; 1545 return evsel__parse_sample_timestamp(evsel, event, timestamp); 1546 } 1547 1548 int evlist__strerror_open(struct evlist *evlist, int err, char *buf, size_t size) 1549 { 1550 int printed, value; 1551 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf)); 1552 1553 switch (err) { 1554 case EACCES: 1555 case EPERM: 1556 printed = scnprintf(buf, size, 1557 "Error:\t%s.\n" 1558 "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg); 1559 1560 value = perf_event_paranoid(); 1561 1562 printed += scnprintf(buf + printed, size - printed, "\nHint:\t"); 1563 1564 if (value >= 2) { 1565 printed += scnprintf(buf + printed, size - printed, 1566 "For your workloads it needs to be <= 1\nHint:\t"); 1567 } 1568 printed += scnprintf(buf + printed, size - printed, 1569 "For system wide tracing it needs to be set to -1.\n"); 1570 1571 printed += scnprintf(buf + printed, size - printed, 1572 "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n" 1573 "Hint:\tThe current value is %d.", value); 1574 break; 1575 case EINVAL: { 1576 struct evsel *first = evlist__first(evlist); 1577 int max_freq; 1578 1579 if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0) 1580 goto out_default; 1581 1582 if (first->core.attr.sample_freq < (u64)max_freq) 1583 goto out_default; 1584 1585 printed = scnprintf(buf, size, 1586 "Error:\t%s.\n" 1587 "Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n" 1588 "Hint:\tThe current value is %d and %" PRIu64 " is being requested.", 1589 emsg, max_freq, first->core.attr.sample_freq); 1590 break; 1591 } 1592 default: 1593 out_default: 1594 scnprintf(buf, size, "%s", emsg); 1595 break; 1596 } 1597 1598 return 0; 1599 } 1600 1601 int evlist__strerror_mmap(struct evlist *evlist, int err, char *buf, size_t size) 1602 { 1603 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf)); 1604 int pages_attempted = evlist->core.mmap_len / 1024, pages_max_per_user, printed = 0; 1605 1606 switch (err) { 1607 case EPERM: 1608 sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user); 1609 printed += scnprintf(buf + printed, size - printed, 1610 "Error:\t%s.\n" 1611 "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n" 1612 "Hint:\tTried using %zd kB.\n", 1613 emsg, pages_max_per_user, pages_attempted); 1614 1615 if (pages_attempted >= pages_max_per_user) { 1616 printed += scnprintf(buf + printed, size - printed, 1617 "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n", 1618 pages_max_per_user + pages_attempted); 1619 } 1620 1621 printed += scnprintf(buf + printed, size - printed, 1622 "Hint:\tTry using a smaller -m/--mmap-pages value."); 1623 break; 1624 default: 1625 scnprintf(buf, size, "%s", emsg); 1626 break; 1627 } 1628 1629 return 0; 1630 } 1631 1632 void evlist__to_front(struct evlist *evlist, struct evsel *move_evsel) 1633 { 1634 struct evsel *evsel, *n; 1635 LIST_HEAD(move); 1636 1637 if (move_evsel == evlist__first(evlist)) 1638 return; 1639 1640 evlist__for_each_entry_safe(evlist, n, evsel) { 1641 if (evsel__leader(evsel) == evsel__leader(move_evsel)) 1642 list_move_tail(&evsel->core.node, &move); 1643 } 1644 1645 list_splice(&move, &evlist->core.entries); 1646 } 1647 1648 struct evsel *evlist__get_tracking_event(struct evlist *evlist) 1649 { 1650 struct evsel *evsel; 1651 1652 evlist__for_each_entry(evlist, evsel) { 1653 if (evsel->tracking) 1654 return evsel; 1655 } 1656 1657 return evlist__first(evlist); 1658 } 1659 1660 void evlist__set_tracking_event(struct evlist *evlist, struct evsel *tracking_evsel) 1661 { 1662 struct evsel *evsel; 1663 1664 if (tracking_evsel->tracking) 1665 return; 1666 1667 evlist__for_each_entry(evlist, evsel) { 1668 if (evsel != tracking_evsel) 1669 evsel->tracking = false; 1670 } 1671 1672 tracking_evsel->tracking = true; 1673 } 1674 1675 struct evsel *evlist__find_evsel_by_str(struct evlist *evlist, const char *str) 1676 { 1677 struct evsel *evsel; 1678 1679 evlist__for_each_entry(evlist, evsel) { 1680 if (!evsel->name) 1681 continue; 1682 if (strcmp(str, evsel->name) == 0) 1683 return evsel; 1684 } 1685 1686 return NULL; 1687 } 1688 1689 void evlist__toggle_bkw_mmap(struct evlist *evlist, enum bkw_mmap_state state) 1690 { 1691 enum bkw_mmap_state old_state = evlist->bkw_mmap_state; 1692 enum action { 1693 NONE, 1694 PAUSE, 1695 RESUME, 1696 } action = NONE; 1697 1698 if (!evlist->overwrite_mmap) 1699 return; 1700 1701 switch (old_state) { 1702 case BKW_MMAP_NOTREADY: { 1703 if (state != BKW_MMAP_RUNNING) 1704 goto state_err; 1705 break; 1706 } 1707 case BKW_MMAP_RUNNING: { 1708 if (state != BKW_MMAP_DATA_PENDING) 1709 goto state_err; 1710 action = PAUSE; 1711 break; 1712 } 1713 case BKW_MMAP_DATA_PENDING: { 1714 if (state != BKW_MMAP_EMPTY) 1715 goto state_err; 1716 break; 1717 } 1718 case BKW_MMAP_EMPTY: { 1719 if (state != BKW_MMAP_RUNNING) 1720 goto state_err; 1721 action = RESUME; 1722 break; 1723 } 1724 default: 1725 WARN_ONCE(1, "Shouldn't get there\n"); 1726 } 1727 1728 evlist->bkw_mmap_state = state; 1729 1730 switch (action) { 1731 case PAUSE: 1732 evlist__pause(evlist); 1733 break; 1734 case RESUME: 1735 evlist__resume(evlist); 1736 break; 1737 case NONE: 1738 default: 1739 break; 1740 } 1741 1742 state_err: 1743 return; 1744 } 1745 1746 bool evlist__exclude_kernel(struct evlist *evlist) 1747 { 1748 struct evsel *evsel; 1749 1750 evlist__for_each_entry(evlist, evsel) { 1751 if (!evsel->core.attr.exclude_kernel) 1752 return false; 1753 } 1754 1755 return true; 1756 } 1757 1758 /* 1759 * Events in data file are not collect in groups, but we still want 1760 * the group display. Set the artificial group and set the leader's 1761 * forced_leader flag to notify the display code. 1762 */ 1763 void evlist__force_leader(struct evlist *evlist) 1764 { 1765 if (!evlist->core.nr_groups) { 1766 struct evsel *leader = evlist__first(evlist); 1767 1768 evlist__set_leader(evlist); 1769 leader->forced_leader = true; 1770 } 1771 } 1772 1773 struct evsel *evlist__reset_weak_group(struct evlist *evsel_list, struct evsel *evsel, bool close) 1774 { 1775 struct evsel *c2, *leader; 1776 bool is_open = true; 1777 1778 leader = evsel__leader(evsel); 1779 1780 pr_debug("Weak group for %s/%d failed\n", 1781 leader->name, leader->core.nr_members); 1782 1783 /* 1784 * for_each_group_member doesn't work here because it doesn't 1785 * include the first entry. 1786 */ 1787 evlist__for_each_entry(evsel_list, c2) { 1788 if (c2 == evsel) 1789 is_open = false; 1790 if (evsel__has_leader(c2, leader)) { 1791 if (is_open && close) 1792 perf_evsel__close(&c2->core); 1793 evsel__set_leader(c2, c2); 1794 c2->core.nr_members = 0; 1795 /* 1796 * Set this for all former members of the group 1797 * to indicate they get reopened. 1798 */ 1799 c2->reset_group = true; 1800 } 1801 } 1802 return leader; 1803 } 1804 1805 static int evlist__parse_control_fifo(const char *str, int *ctl_fd, int *ctl_fd_ack, bool *ctl_fd_close) 1806 { 1807 char *s, *p; 1808 int ret = 0, fd; 1809 1810 if (strncmp(str, "fifo:", 5)) 1811 return -EINVAL; 1812 1813 str += 5; 1814 if (!*str || *str == ',') 1815 return -EINVAL; 1816 1817 s = strdup(str); 1818 if (!s) 1819 return -ENOMEM; 1820 1821 p = strchr(s, ','); 1822 if (p) 1823 *p = '\0'; 1824 1825 /* 1826 * O_RDWR avoids POLLHUPs which is necessary to allow the other 1827 * end of a FIFO to be repeatedly opened and closed. 1828 */ 1829 fd = open(s, O_RDWR | O_NONBLOCK | O_CLOEXEC); 1830 if (fd < 0) { 1831 pr_err("Failed to open '%s'\n", s); 1832 ret = -errno; 1833 goto out_free; 1834 } 1835 *ctl_fd = fd; 1836 *ctl_fd_close = true; 1837 1838 if (p && *++p) { 1839 /* O_RDWR | O_NONBLOCK means the other end need not be open */ 1840 fd = open(p, O_RDWR | O_NONBLOCK | O_CLOEXEC); 1841 if (fd < 0) { 1842 pr_err("Failed to open '%s'\n", p); 1843 ret = -errno; 1844 goto out_free; 1845 } 1846 *ctl_fd_ack = fd; 1847 } 1848 1849 out_free: 1850 free(s); 1851 return ret; 1852 } 1853 1854 int evlist__parse_control(const char *str, int *ctl_fd, int *ctl_fd_ack, bool *ctl_fd_close) 1855 { 1856 char *comma = NULL, *endptr = NULL; 1857 1858 *ctl_fd_close = false; 1859 1860 if (strncmp(str, "fd:", 3)) 1861 return evlist__parse_control_fifo(str, ctl_fd, ctl_fd_ack, ctl_fd_close); 1862 1863 *ctl_fd = strtoul(&str[3], &endptr, 0); 1864 if (endptr == &str[3]) 1865 return -EINVAL; 1866 1867 comma = strchr(str, ','); 1868 if (comma) { 1869 if (endptr != comma) 1870 return -EINVAL; 1871 1872 *ctl_fd_ack = strtoul(comma + 1, &endptr, 0); 1873 if (endptr == comma + 1 || *endptr != '\0') 1874 return -EINVAL; 1875 } 1876 1877 return 0; 1878 } 1879 1880 void evlist__close_control(int ctl_fd, int ctl_fd_ack, bool *ctl_fd_close) 1881 { 1882 if (*ctl_fd_close) { 1883 *ctl_fd_close = false; 1884 close(ctl_fd); 1885 if (ctl_fd_ack >= 0) 1886 close(ctl_fd_ack); 1887 } 1888 } 1889 1890 int evlist__initialize_ctlfd(struct evlist *evlist, int fd, int ack) 1891 { 1892 if (fd == -1) { 1893 pr_debug("Control descriptor is not initialized\n"); 1894 return 0; 1895 } 1896 1897 evlist->ctl_fd.pos = perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN, 1898 fdarray_flag__nonfilterable); 1899 if (evlist->ctl_fd.pos < 0) { 1900 evlist->ctl_fd.pos = -1; 1901 pr_err("Failed to add ctl fd entry: %m\n"); 1902 return -1; 1903 } 1904 1905 evlist->ctl_fd.fd = fd; 1906 evlist->ctl_fd.ack = ack; 1907 1908 return 0; 1909 } 1910 1911 bool evlist__ctlfd_initialized(struct evlist *evlist) 1912 { 1913 return evlist->ctl_fd.pos >= 0; 1914 } 1915 1916 int evlist__finalize_ctlfd(struct evlist *evlist) 1917 { 1918 struct pollfd *entries = evlist->core.pollfd.entries; 1919 1920 if (!evlist__ctlfd_initialized(evlist)) 1921 return 0; 1922 1923 entries[evlist->ctl_fd.pos].fd = -1; 1924 entries[evlist->ctl_fd.pos].events = 0; 1925 entries[evlist->ctl_fd.pos].revents = 0; 1926 1927 evlist->ctl_fd.pos = -1; 1928 evlist->ctl_fd.ack = -1; 1929 evlist->ctl_fd.fd = -1; 1930 1931 return 0; 1932 } 1933 1934 static int evlist__ctlfd_recv(struct evlist *evlist, enum evlist_ctl_cmd *cmd, 1935 char *cmd_data, size_t data_size) 1936 { 1937 int err; 1938 char c; 1939 size_t bytes_read = 0; 1940 1941 *cmd = EVLIST_CTL_CMD_UNSUPPORTED; 1942 memset(cmd_data, 0, data_size); 1943 data_size--; 1944 1945 do { 1946 err = read(evlist->ctl_fd.fd, &c, 1); 1947 if (err > 0) { 1948 if (c == '\n' || c == '\0') 1949 break; 1950 cmd_data[bytes_read++] = c; 1951 if (bytes_read == data_size) 1952 break; 1953 continue; 1954 } else if (err == -1) { 1955 if (errno == EINTR) 1956 continue; 1957 if (errno == EAGAIN || errno == EWOULDBLOCK) 1958 err = 0; 1959 else 1960 pr_err("Failed to read from ctlfd %d: %m\n", evlist->ctl_fd.fd); 1961 } 1962 break; 1963 } while (1); 1964 1965 pr_debug("Message from ctl_fd: \"%s%s\"\n", cmd_data, 1966 bytes_read == data_size ? "" : c == '\n' ? "\\n" : "\\0"); 1967 1968 if (bytes_read > 0) { 1969 if (!strncmp(cmd_data, EVLIST_CTL_CMD_ENABLE_TAG, 1970 (sizeof(EVLIST_CTL_CMD_ENABLE_TAG)-1))) { 1971 *cmd = EVLIST_CTL_CMD_ENABLE; 1972 } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_DISABLE_TAG, 1973 (sizeof(EVLIST_CTL_CMD_DISABLE_TAG)-1))) { 1974 *cmd = EVLIST_CTL_CMD_DISABLE; 1975 } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_SNAPSHOT_TAG, 1976 (sizeof(EVLIST_CTL_CMD_SNAPSHOT_TAG)-1))) { 1977 *cmd = EVLIST_CTL_CMD_SNAPSHOT; 1978 pr_debug("is snapshot\n"); 1979 } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_EVLIST_TAG, 1980 (sizeof(EVLIST_CTL_CMD_EVLIST_TAG)-1))) { 1981 *cmd = EVLIST_CTL_CMD_EVLIST; 1982 } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_STOP_TAG, 1983 (sizeof(EVLIST_CTL_CMD_STOP_TAG)-1))) { 1984 *cmd = EVLIST_CTL_CMD_STOP; 1985 } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_PING_TAG, 1986 (sizeof(EVLIST_CTL_CMD_PING_TAG)-1))) { 1987 *cmd = EVLIST_CTL_CMD_PING; 1988 } 1989 } 1990 1991 return bytes_read ? (int)bytes_read : err; 1992 } 1993 1994 int evlist__ctlfd_ack(struct evlist *evlist) 1995 { 1996 int err; 1997 1998 if (evlist->ctl_fd.ack == -1) 1999 return 0; 2000 2001 err = write(evlist->ctl_fd.ack, EVLIST_CTL_CMD_ACK_TAG, 2002 sizeof(EVLIST_CTL_CMD_ACK_TAG)); 2003 if (err == -1) 2004 pr_err("failed to write to ctl_ack_fd %d: %m\n", evlist->ctl_fd.ack); 2005 2006 return err; 2007 } 2008 2009 static int get_cmd_arg(char *cmd_data, size_t cmd_size, char **arg) 2010 { 2011 char *data = cmd_data + cmd_size; 2012 2013 /* no argument */ 2014 if (!*data) 2015 return 0; 2016 2017 /* there's argument */ 2018 if (*data == ' ') { 2019 *arg = data + 1; 2020 return 1; 2021 } 2022 2023 /* malformed */ 2024 return -1; 2025 } 2026 2027 static int evlist__ctlfd_enable(struct evlist *evlist, char *cmd_data, bool enable) 2028 { 2029 struct evsel *evsel; 2030 char *name; 2031 int err; 2032 2033 err = get_cmd_arg(cmd_data, 2034 enable ? sizeof(EVLIST_CTL_CMD_ENABLE_TAG) - 1 : 2035 sizeof(EVLIST_CTL_CMD_DISABLE_TAG) - 1, 2036 &name); 2037 if (err < 0) { 2038 pr_info("failed: wrong command\n"); 2039 return -1; 2040 } 2041 2042 if (err) { 2043 evsel = evlist__find_evsel_by_str(evlist, name); 2044 if (evsel) { 2045 if (enable) 2046 evlist__enable_evsel(evlist, name); 2047 else 2048 evlist__disable_evsel(evlist, name); 2049 pr_info("Event %s %s\n", evsel->name, 2050 enable ? "enabled" : "disabled"); 2051 } else { 2052 pr_info("failed: can't find '%s' event\n", name); 2053 } 2054 } else { 2055 if (enable) { 2056 evlist__enable(evlist); 2057 pr_info(EVLIST_ENABLED_MSG); 2058 } else { 2059 evlist__disable(evlist); 2060 pr_info(EVLIST_DISABLED_MSG); 2061 } 2062 } 2063 2064 return 0; 2065 } 2066 2067 static int evlist__ctlfd_list(struct evlist *evlist, char *cmd_data) 2068 { 2069 struct perf_attr_details details = { .verbose = false, }; 2070 struct evsel *evsel; 2071 char *arg; 2072 int err; 2073 2074 err = get_cmd_arg(cmd_data, 2075 sizeof(EVLIST_CTL_CMD_EVLIST_TAG) - 1, 2076 &arg); 2077 if (err < 0) { 2078 pr_info("failed: wrong command\n"); 2079 return -1; 2080 } 2081 2082 if (err) { 2083 if (!strcmp(arg, "-v")) { 2084 details.verbose = true; 2085 } else if (!strcmp(arg, "-g")) { 2086 details.event_group = true; 2087 } else if (!strcmp(arg, "-F")) { 2088 details.freq = true; 2089 } else { 2090 pr_info("failed: wrong command\n"); 2091 return -1; 2092 } 2093 } 2094 2095 evlist__for_each_entry(evlist, evsel) 2096 evsel__fprintf(evsel, &details, stderr); 2097 2098 return 0; 2099 } 2100 2101 int evlist__ctlfd_process(struct evlist *evlist, enum evlist_ctl_cmd *cmd) 2102 { 2103 int err = 0; 2104 char cmd_data[EVLIST_CTL_CMD_MAX_LEN]; 2105 int ctlfd_pos = evlist->ctl_fd.pos; 2106 struct pollfd *entries = evlist->core.pollfd.entries; 2107 2108 if (!evlist__ctlfd_initialized(evlist) || !entries[ctlfd_pos].revents) 2109 return 0; 2110 2111 if (entries[ctlfd_pos].revents & POLLIN) { 2112 err = evlist__ctlfd_recv(evlist, cmd, cmd_data, 2113 EVLIST_CTL_CMD_MAX_LEN); 2114 if (err > 0) { 2115 switch (*cmd) { 2116 case EVLIST_CTL_CMD_ENABLE: 2117 case EVLIST_CTL_CMD_DISABLE: 2118 err = evlist__ctlfd_enable(evlist, cmd_data, 2119 *cmd == EVLIST_CTL_CMD_ENABLE); 2120 break; 2121 case EVLIST_CTL_CMD_EVLIST: 2122 err = evlist__ctlfd_list(evlist, cmd_data); 2123 break; 2124 case EVLIST_CTL_CMD_SNAPSHOT: 2125 case EVLIST_CTL_CMD_STOP: 2126 case EVLIST_CTL_CMD_PING: 2127 break; 2128 case EVLIST_CTL_CMD_ACK: 2129 case EVLIST_CTL_CMD_UNSUPPORTED: 2130 default: 2131 pr_debug("ctlfd: unsupported %d\n", *cmd); 2132 break; 2133 } 2134 if (!(*cmd == EVLIST_CTL_CMD_ACK || *cmd == EVLIST_CTL_CMD_UNSUPPORTED || 2135 *cmd == EVLIST_CTL_CMD_SNAPSHOT)) 2136 evlist__ctlfd_ack(evlist); 2137 } 2138 } 2139 2140 if (entries[ctlfd_pos].revents & (POLLHUP | POLLERR)) 2141 evlist__finalize_ctlfd(evlist); 2142 else 2143 entries[ctlfd_pos].revents = 0; 2144 2145 return err; 2146 } 2147 2148 int evlist__ctlfd_update(struct evlist *evlist, struct pollfd *update) 2149 { 2150 int ctlfd_pos = evlist->ctl_fd.pos; 2151 struct pollfd *entries = evlist->core.pollfd.entries; 2152 2153 if (!evlist__ctlfd_initialized(evlist)) 2154 return 0; 2155 2156 if (entries[ctlfd_pos].fd != update->fd || 2157 entries[ctlfd_pos].events != update->events) 2158 return -1; 2159 2160 entries[ctlfd_pos].revents = update->revents; 2161 return 0; 2162 } 2163 2164 struct evsel *evlist__find_evsel(struct evlist *evlist, int idx) 2165 { 2166 struct evsel *evsel; 2167 2168 evlist__for_each_entry(evlist, evsel) { 2169 if (evsel->core.idx == idx) 2170 return evsel; 2171 } 2172 return NULL; 2173 } 2174 2175 int evlist__scnprintf_evsels(struct evlist *evlist, size_t size, char *bf) 2176 { 2177 struct evsel *evsel; 2178 int printed = 0; 2179 2180 evlist__for_each_entry(evlist, evsel) { 2181 if (evsel__is_dummy_event(evsel)) 2182 continue; 2183 if (size > (strlen(evsel__name(evsel)) + (printed ? 2 : 1))) { 2184 printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "," : "", evsel__name(evsel)); 2185 } else { 2186 printed += scnprintf(bf + printed, size - printed, "%s...", printed ? "," : ""); 2187 break; 2188 } 2189 } 2190 2191 return printed; 2192 } 2193 2194 void evlist__check_mem_load_aux(struct evlist *evlist) 2195 { 2196 struct evsel *leader, *evsel, *pos; 2197 2198 /* 2199 * For some platforms, the 'mem-loads' event is required to use 2200 * together with 'mem-loads-aux' within a group and 'mem-loads-aux' 2201 * must be the group leader. Now we disable this group before reporting 2202 * because 'mem-loads-aux' is just an auxiliary event. It doesn't carry 2203 * any valid memory load information. 2204 */ 2205 evlist__for_each_entry(evlist, evsel) { 2206 leader = evsel__leader(evsel); 2207 if (leader == evsel) 2208 continue; 2209 2210 if (leader->name && strstr(leader->name, "mem-loads-aux")) { 2211 for_each_group_evsel(pos, leader) { 2212 evsel__set_leader(pos, pos); 2213 pos->core.nr_members = 0; 2214 } 2215 } 2216 } 2217 } 2218