1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> 4 * 5 * Parts came from builtin-{top,stat,record}.c, see those files for further 6 * copyright notes. 7 */ 8 #include <api/fs/fs.h> 9 #include <errno.h> 10 #include <inttypes.h> 11 #include <poll.h> 12 #include "cpumap.h" 13 #include "util/mmap.h" 14 #include "thread_map.h" 15 #include "target.h" 16 #include "evlist.h" 17 #include "evsel.h" 18 #include "debug.h" 19 #include "units.h" 20 #include <internal/lib.h> // page_size 21 #include "affinity.h" 22 #include "../perf.h" 23 #include "asm/bug.h" 24 #include "bpf-event.h" 25 #include "util/string2.h" 26 #include "util/perf_api_probe.h" 27 #include <signal.h> 28 #include <unistd.h> 29 #include <sched.h> 30 #include <stdlib.h> 31 32 #include "parse-events.h" 33 #include <subcmd/parse-options.h> 34 35 #include <fcntl.h> 36 #include <sys/ioctl.h> 37 #include <sys/mman.h> 38 39 #include <linux/bitops.h> 40 #include <linux/hash.h> 41 #include <linux/log2.h> 42 #include <linux/err.h> 43 #include <linux/string.h> 44 #include <linux/zalloc.h> 45 #include <perf/evlist.h> 46 #include <perf/evsel.h> 47 #include <perf/cpumap.h> 48 #include <perf/mmap.h> 49 50 #include <internal/xyarray.h> 51 52 #ifdef LACKS_SIGQUEUE_PROTOTYPE 53 int sigqueue(pid_t pid, int sig, const union sigval value); 54 #endif 55 56 #define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y)) 57 #define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y) 58 59 void evlist__init(struct evlist *evlist, struct perf_cpu_map *cpus, 60 struct perf_thread_map *threads) 61 { 62 perf_evlist__init(&evlist->core); 63 perf_evlist__set_maps(&evlist->core, cpus, threads); 64 evlist->workload.pid = -1; 65 evlist->bkw_mmap_state = BKW_MMAP_NOTREADY; 66 } 67 68 struct evlist *evlist__new(void) 69 { 70 struct evlist *evlist = zalloc(sizeof(*evlist)); 71 72 if (evlist != NULL) 73 evlist__init(evlist, NULL, NULL); 74 75 return evlist; 76 } 77 78 struct evlist *perf_evlist__new_default(void) 79 { 80 struct evlist *evlist = evlist__new(); 81 82 if (evlist && perf_evlist__add_default(evlist)) { 83 evlist__delete(evlist); 84 evlist = NULL; 85 } 86 87 return evlist; 88 } 89 90 struct evlist *perf_evlist__new_dummy(void) 91 { 92 struct evlist *evlist = evlist__new(); 93 94 if (evlist && perf_evlist__add_dummy(evlist)) { 95 evlist__delete(evlist); 96 evlist = NULL; 97 } 98 99 return evlist; 100 } 101 102 /** 103 * perf_evlist__set_id_pos - set the positions of event ids. 104 * @evlist: selected event list 105 * 106 * Events with compatible sample types all have the same id_pos 107 * and is_pos. For convenience, put a copy on evlist. 108 */ 109 void perf_evlist__set_id_pos(struct evlist *evlist) 110 { 111 struct evsel *first = evlist__first(evlist); 112 113 evlist->id_pos = first->id_pos; 114 evlist->is_pos = first->is_pos; 115 } 116 117 static void perf_evlist__update_id_pos(struct evlist *evlist) 118 { 119 struct evsel *evsel; 120 121 evlist__for_each_entry(evlist, evsel) 122 evsel__calc_id_pos(evsel); 123 124 perf_evlist__set_id_pos(evlist); 125 } 126 127 static void evlist__purge(struct evlist *evlist) 128 { 129 struct evsel *pos, *n; 130 131 evlist__for_each_entry_safe(evlist, n, pos) { 132 list_del_init(&pos->core.node); 133 pos->evlist = NULL; 134 evsel__delete(pos); 135 } 136 137 evlist->core.nr_entries = 0; 138 } 139 140 void evlist__exit(struct evlist *evlist) 141 { 142 zfree(&evlist->mmap); 143 zfree(&evlist->overwrite_mmap); 144 perf_evlist__exit(&evlist->core); 145 } 146 147 void evlist__delete(struct evlist *evlist) 148 { 149 if (evlist == NULL) 150 return; 151 152 evlist__munmap(evlist); 153 evlist__close(evlist); 154 evlist__purge(evlist); 155 evlist__exit(evlist); 156 free(evlist); 157 } 158 159 void evlist__add(struct evlist *evlist, struct evsel *entry) 160 { 161 entry->evlist = evlist; 162 entry->idx = evlist->core.nr_entries; 163 entry->tracking = !entry->idx; 164 165 perf_evlist__add(&evlist->core, &entry->core); 166 167 if (evlist->core.nr_entries == 1) 168 perf_evlist__set_id_pos(evlist); 169 } 170 171 void evlist__remove(struct evlist *evlist, struct evsel *evsel) 172 { 173 evsel->evlist = NULL; 174 perf_evlist__remove(&evlist->core, &evsel->core); 175 } 176 177 void perf_evlist__splice_list_tail(struct evlist *evlist, 178 struct list_head *list) 179 { 180 struct evsel *evsel, *temp; 181 182 __evlist__for_each_entry_safe(list, temp, evsel) { 183 list_del_init(&evsel->core.node); 184 evlist__add(evlist, evsel); 185 } 186 } 187 188 int __evlist__set_tracepoints_handlers(struct evlist *evlist, 189 const struct evsel_str_handler *assocs, size_t nr_assocs) 190 { 191 struct evsel *evsel; 192 size_t i; 193 int err; 194 195 for (i = 0; i < nr_assocs; i++) { 196 // Adding a handler for an event not in this evlist, just ignore it. 197 evsel = perf_evlist__find_tracepoint_by_name(evlist, assocs[i].name); 198 if (evsel == NULL) 199 continue; 200 201 err = -EEXIST; 202 if (evsel->handler != NULL) 203 goto out; 204 evsel->handler = assocs[i].handler; 205 } 206 207 err = 0; 208 out: 209 return err; 210 } 211 212 void __perf_evlist__set_leader(struct list_head *list) 213 { 214 struct evsel *evsel, *leader; 215 216 leader = list_entry(list->next, struct evsel, core.node); 217 evsel = list_entry(list->prev, struct evsel, core.node); 218 219 leader->core.nr_members = evsel->idx - leader->idx + 1; 220 221 __evlist__for_each_entry(list, evsel) { 222 evsel->leader = leader; 223 } 224 } 225 226 void perf_evlist__set_leader(struct evlist *evlist) 227 { 228 if (evlist->core.nr_entries) { 229 evlist->nr_groups = evlist->core.nr_entries > 1 ? 1 : 0; 230 __perf_evlist__set_leader(&evlist->core.entries); 231 } 232 } 233 234 int __perf_evlist__add_default(struct evlist *evlist, bool precise) 235 { 236 struct evsel *evsel = perf_evsel__new_cycles(precise); 237 238 if (evsel == NULL) 239 return -ENOMEM; 240 241 evlist__add(evlist, evsel); 242 return 0; 243 } 244 245 int perf_evlist__add_dummy(struct evlist *evlist) 246 { 247 struct perf_event_attr attr = { 248 .type = PERF_TYPE_SOFTWARE, 249 .config = PERF_COUNT_SW_DUMMY, 250 .size = sizeof(attr), /* to capture ABI version */ 251 }; 252 struct evsel *evsel = perf_evsel__new_idx(&attr, evlist->core.nr_entries); 253 254 if (evsel == NULL) 255 return -ENOMEM; 256 257 evlist__add(evlist, evsel); 258 return 0; 259 } 260 261 static int evlist__add_attrs(struct evlist *evlist, 262 struct perf_event_attr *attrs, size_t nr_attrs) 263 { 264 struct evsel *evsel, *n; 265 LIST_HEAD(head); 266 size_t i; 267 268 for (i = 0; i < nr_attrs; i++) { 269 evsel = perf_evsel__new_idx(attrs + i, evlist->core.nr_entries + i); 270 if (evsel == NULL) 271 goto out_delete_partial_list; 272 list_add_tail(&evsel->core.node, &head); 273 } 274 275 perf_evlist__splice_list_tail(evlist, &head); 276 277 return 0; 278 279 out_delete_partial_list: 280 __evlist__for_each_entry_safe(&head, n, evsel) 281 evsel__delete(evsel); 282 return -1; 283 } 284 285 int __perf_evlist__add_default_attrs(struct evlist *evlist, 286 struct perf_event_attr *attrs, size_t nr_attrs) 287 { 288 size_t i; 289 290 for (i = 0; i < nr_attrs; i++) 291 event_attr_init(attrs + i); 292 293 return evlist__add_attrs(evlist, attrs, nr_attrs); 294 } 295 296 struct evsel * 297 perf_evlist__find_tracepoint_by_id(struct evlist *evlist, int id) 298 { 299 struct evsel *evsel; 300 301 evlist__for_each_entry(evlist, evsel) { 302 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT && 303 (int)evsel->core.attr.config == id) 304 return evsel; 305 } 306 307 return NULL; 308 } 309 310 struct evsel * 311 perf_evlist__find_tracepoint_by_name(struct evlist *evlist, 312 const char *name) 313 { 314 struct evsel *evsel; 315 316 evlist__for_each_entry(evlist, evsel) { 317 if ((evsel->core.attr.type == PERF_TYPE_TRACEPOINT) && 318 (strcmp(evsel->name, name) == 0)) 319 return evsel; 320 } 321 322 return NULL; 323 } 324 325 int perf_evlist__add_newtp(struct evlist *evlist, 326 const char *sys, const char *name, void *handler) 327 { 328 struct evsel *evsel = perf_evsel__newtp(sys, name); 329 330 if (IS_ERR(evsel)) 331 return -1; 332 333 evsel->handler = handler; 334 evlist__add(evlist, evsel); 335 return 0; 336 } 337 338 static int perf_evlist__nr_threads(struct evlist *evlist, 339 struct evsel *evsel) 340 { 341 if (evsel->core.system_wide) 342 return 1; 343 else 344 return perf_thread_map__nr(evlist->core.threads); 345 } 346 347 void evlist__cpu_iter_start(struct evlist *evlist) 348 { 349 struct evsel *pos; 350 351 /* 352 * Reset the per evsel cpu_iter. This is needed because 353 * each evsel's cpumap may have a different index space, 354 * and some operations need the index to modify 355 * the FD xyarray (e.g. open, close) 356 */ 357 evlist__for_each_entry(evlist, pos) 358 pos->cpu_iter = 0; 359 } 360 361 bool evsel__cpu_iter_skip_no_inc(struct evsel *ev, int cpu) 362 { 363 if (ev->cpu_iter >= ev->core.cpus->nr) 364 return true; 365 if (cpu >= 0 && ev->core.cpus->map[ev->cpu_iter] != cpu) 366 return true; 367 return false; 368 } 369 370 bool evsel__cpu_iter_skip(struct evsel *ev, int cpu) 371 { 372 if (!evsel__cpu_iter_skip_no_inc(ev, cpu)) { 373 ev->cpu_iter++; 374 return false; 375 } 376 return true; 377 } 378 379 void evlist__disable(struct evlist *evlist) 380 { 381 struct evsel *pos; 382 struct affinity affinity; 383 int cpu, i; 384 385 if (affinity__setup(&affinity) < 0) 386 return; 387 388 evlist__for_each_cpu(evlist, i, cpu) { 389 affinity__set(&affinity, cpu); 390 391 evlist__for_each_entry(evlist, pos) { 392 if (evsel__cpu_iter_skip(pos, cpu)) 393 continue; 394 if (pos->disabled || !evsel__is_group_leader(pos) || !pos->core.fd) 395 continue; 396 evsel__disable_cpu(pos, pos->cpu_iter - 1); 397 } 398 } 399 affinity__cleanup(&affinity); 400 evlist__for_each_entry(evlist, pos) { 401 if (!evsel__is_group_leader(pos) || !pos->core.fd) 402 continue; 403 pos->disabled = true; 404 } 405 406 evlist->enabled = false; 407 } 408 409 void evlist__enable(struct evlist *evlist) 410 { 411 struct evsel *pos; 412 struct affinity affinity; 413 int cpu, i; 414 415 if (affinity__setup(&affinity) < 0) 416 return; 417 418 evlist__for_each_cpu(evlist, i, cpu) { 419 affinity__set(&affinity, cpu); 420 421 evlist__for_each_entry(evlist, pos) { 422 if (evsel__cpu_iter_skip(pos, cpu)) 423 continue; 424 if (!evsel__is_group_leader(pos) || !pos->core.fd) 425 continue; 426 evsel__enable_cpu(pos, pos->cpu_iter - 1); 427 } 428 } 429 affinity__cleanup(&affinity); 430 evlist__for_each_entry(evlist, pos) { 431 if (!evsel__is_group_leader(pos) || !pos->core.fd) 432 continue; 433 pos->disabled = false; 434 } 435 436 evlist->enabled = true; 437 } 438 439 void perf_evlist__toggle_enable(struct evlist *evlist) 440 { 441 (evlist->enabled ? evlist__disable : evlist__enable)(evlist); 442 } 443 444 static int perf_evlist__enable_event_cpu(struct evlist *evlist, 445 struct evsel *evsel, int cpu) 446 { 447 int thread; 448 int nr_threads = perf_evlist__nr_threads(evlist, evsel); 449 450 if (!evsel->core.fd) 451 return -EINVAL; 452 453 for (thread = 0; thread < nr_threads; thread++) { 454 int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0); 455 if (err) 456 return err; 457 } 458 return 0; 459 } 460 461 static int perf_evlist__enable_event_thread(struct evlist *evlist, 462 struct evsel *evsel, 463 int thread) 464 { 465 int cpu; 466 int nr_cpus = perf_cpu_map__nr(evlist->core.cpus); 467 468 if (!evsel->core.fd) 469 return -EINVAL; 470 471 for (cpu = 0; cpu < nr_cpus; cpu++) { 472 int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0); 473 if (err) 474 return err; 475 } 476 return 0; 477 } 478 479 int perf_evlist__enable_event_idx(struct evlist *evlist, 480 struct evsel *evsel, int idx) 481 { 482 bool per_cpu_mmaps = !perf_cpu_map__empty(evlist->core.cpus); 483 484 if (per_cpu_mmaps) 485 return perf_evlist__enable_event_cpu(evlist, evsel, idx); 486 else 487 return perf_evlist__enable_event_thread(evlist, evsel, idx); 488 } 489 490 int evlist__add_pollfd(struct evlist *evlist, int fd) 491 { 492 return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN); 493 } 494 495 int evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask) 496 { 497 return perf_evlist__filter_pollfd(&evlist->core, revents_and_mask); 498 } 499 500 int evlist__poll(struct evlist *evlist, int timeout) 501 { 502 return perf_evlist__poll(&evlist->core, timeout); 503 } 504 505 struct perf_sample_id *perf_evlist__id2sid(struct evlist *evlist, u64 id) 506 { 507 struct hlist_head *head; 508 struct perf_sample_id *sid; 509 int hash; 510 511 hash = hash_64(id, PERF_EVLIST__HLIST_BITS); 512 head = &evlist->core.heads[hash]; 513 514 hlist_for_each_entry(sid, head, node) 515 if (sid->id == id) 516 return sid; 517 518 return NULL; 519 } 520 521 struct evsel *perf_evlist__id2evsel(struct evlist *evlist, u64 id) 522 { 523 struct perf_sample_id *sid; 524 525 if (evlist->core.nr_entries == 1 || !id) 526 return evlist__first(evlist); 527 528 sid = perf_evlist__id2sid(evlist, id); 529 if (sid) 530 return container_of(sid->evsel, struct evsel, core); 531 532 if (!perf_evlist__sample_id_all(evlist)) 533 return evlist__first(evlist); 534 535 return NULL; 536 } 537 538 struct evsel *perf_evlist__id2evsel_strict(struct evlist *evlist, 539 u64 id) 540 { 541 struct perf_sample_id *sid; 542 543 if (!id) 544 return NULL; 545 546 sid = perf_evlist__id2sid(evlist, id); 547 if (sid) 548 return container_of(sid->evsel, struct evsel, core); 549 550 return NULL; 551 } 552 553 static int perf_evlist__event2id(struct evlist *evlist, 554 union perf_event *event, u64 *id) 555 { 556 const __u64 *array = event->sample.array; 557 ssize_t n; 558 559 n = (event->header.size - sizeof(event->header)) >> 3; 560 561 if (event->header.type == PERF_RECORD_SAMPLE) { 562 if (evlist->id_pos >= n) 563 return -1; 564 *id = array[evlist->id_pos]; 565 } else { 566 if (evlist->is_pos > n) 567 return -1; 568 n -= evlist->is_pos; 569 *id = array[n]; 570 } 571 return 0; 572 } 573 574 struct evsel *perf_evlist__event2evsel(struct evlist *evlist, 575 union perf_event *event) 576 { 577 struct evsel *first = evlist__first(evlist); 578 struct hlist_head *head; 579 struct perf_sample_id *sid; 580 int hash; 581 u64 id; 582 583 if (evlist->core.nr_entries == 1) 584 return first; 585 586 if (!first->core.attr.sample_id_all && 587 event->header.type != PERF_RECORD_SAMPLE) 588 return first; 589 590 if (perf_evlist__event2id(evlist, event, &id)) 591 return NULL; 592 593 /* Synthesized events have an id of zero */ 594 if (!id) 595 return first; 596 597 hash = hash_64(id, PERF_EVLIST__HLIST_BITS); 598 head = &evlist->core.heads[hash]; 599 600 hlist_for_each_entry(sid, head, node) { 601 if (sid->id == id) 602 return container_of(sid->evsel, struct evsel, core); 603 } 604 return NULL; 605 } 606 607 static int perf_evlist__set_paused(struct evlist *evlist, bool value) 608 { 609 int i; 610 611 if (!evlist->overwrite_mmap) 612 return 0; 613 614 for (i = 0; i < evlist->core.nr_mmaps; i++) { 615 int fd = evlist->overwrite_mmap[i].core.fd; 616 int err; 617 618 if (fd < 0) 619 continue; 620 err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0); 621 if (err) 622 return err; 623 } 624 return 0; 625 } 626 627 static int perf_evlist__pause(struct evlist *evlist) 628 { 629 return perf_evlist__set_paused(evlist, true); 630 } 631 632 static int perf_evlist__resume(struct evlist *evlist) 633 { 634 return perf_evlist__set_paused(evlist, false); 635 } 636 637 static void evlist__munmap_nofree(struct evlist *evlist) 638 { 639 int i; 640 641 if (evlist->mmap) 642 for (i = 0; i < evlist->core.nr_mmaps; i++) 643 perf_mmap__munmap(&evlist->mmap[i].core); 644 645 if (evlist->overwrite_mmap) 646 for (i = 0; i < evlist->core.nr_mmaps; i++) 647 perf_mmap__munmap(&evlist->overwrite_mmap[i].core); 648 } 649 650 void evlist__munmap(struct evlist *evlist) 651 { 652 evlist__munmap_nofree(evlist); 653 zfree(&evlist->mmap); 654 zfree(&evlist->overwrite_mmap); 655 } 656 657 static void perf_mmap__unmap_cb(struct perf_mmap *map) 658 { 659 struct mmap *m = container_of(map, struct mmap, core); 660 661 mmap__munmap(m); 662 } 663 664 static struct mmap *evlist__alloc_mmap(struct evlist *evlist, 665 bool overwrite) 666 { 667 int i; 668 struct mmap *map; 669 670 map = zalloc(evlist->core.nr_mmaps * sizeof(struct mmap)); 671 if (!map) 672 return NULL; 673 674 for (i = 0; i < evlist->core.nr_mmaps; i++) { 675 struct perf_mmap *prev = i ? &map[i - 1].core : NULL; 676 677 /* 678 * When the perf_mmap() call is made we grab one refcount, plus 679 * one extra to let perf_mmap__consume() get the last 680 * events after all real references (perf_mmap__get()) are 681 * dropped. 682 * 683 * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and 684 * thus does perf_mmap__get() on it. 685 */ 686 perf_mmap__init(&map[i].core, prev, overwrite, perf_mmap__unmap_cb); 687 } 688 689 return map; 690 } 691 692 static void 693 perf_evlist__mmap_cb_idx(struct perf_evlist *_evlist, 694 struct perf_mmap_param *_mp, 695 int idx, bool per_cpu) 696 { 697 struct evlist *evlist = container_of(_evlist, struct evlist, core); 698 struct mmap_params *mp = container_of(_mp, struct mmap_params, core); 699 700 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, idx, per_cpu); 701 } 702 703 static struct perf_mmap* 704 perf_evlist__mmap_cb_get(struct perf_evlist *_evlist, bool overwrite, int idx) 705 { 706 struct evlist *evlist = container_of(_evlist, struct evlist, core); 707 struct mmap *maps; 708 709 maps = overwrite ? evlist->overwrite_mmap : evlist->mmap; 710 711 if (!maps) { 712 maps = evlist__alloc_mmap(evlist, overwrite); 713 if (!maps) 714 return NULL; 715 716 if (overwrite) { 717 evlist->overwrite_mmap = maps; 718 if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY) 719 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING); 720 } else { 721 evlist->mmap = maps; 722 } 723 } 724 725 return &maps[idx].core; 726 } 727 728 static int 729 perf_evlist__mmap_cb_mmap(struct perf_mmap *_map, struct perf_mmap_param *_mp, 730 int output, int cpu) 731 { 732 struct mmap *map = container_of(_map, struct mmap, core); 733 struct mmap_params *mp = container_of(_mp, struct mmap_params, core); 734 735 return mmap__mmap(map, mp, output, cpu); 736 } 737 738 unsigned long perf_event_mlock_kb_in_pages(void) 739 { 740 unsigned long pages; 741 int max; 742 743 if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) { 744 /* 745 * Pick a once upon a time good value, i.e. things look 746 * strange since we can't read a sysctl value, but lets not 747 * die yet... 748 */ 749 max = 512; 750 } else { 751 max -= (page_size / 1024); 752 } 753 754 pages = (max * 1024) / page_size; 755 if (!is_power_of_2(pages)) 756 pages = rounddown_pow_of_two(pages); 757 758 return pages; 759 } 760 761 size_t evlist__mmap_size(unsigned long pages) 762 { 763 if (pages == UINT_MAX) 764 pages = perf_event_mlock_kb_in_pages(); 765 else if (!is_power_of_2(pages)) 766 return 0; 767 768 return (pages + 1) * page_size; 769 } 770 771 static long parse_pages_arg(const char *str, unsigned long min, 772 unsigned long max) 773 { 774 unsigned long pages, val; 775 static struct parse_tag tags[] = { 776 { .tag = 'B', .mult = 1 }, 777 { .tag = 'K', .mult = 1 << 10 }, 778 { .tag = 'M', .mult = 1 << 20 }, 779 { .tag = 'G', .mult = 1 << 30 }, 780 { .tag = 0 }, 781 }; 782 783 if (str == NULL) 784 return -EINVAL; 785 786 val = parse_tag_value(str, tags); 787 if (val != (unsigned long) -1) { 788 /* we got file size value */ 789 pages = PERF_ALIGN(val, page_size) / page_size; 790 } else { 791 /* we got pages count value */ 792 char *eptr; 793 pages = strtoul(str, &eptr, 10); 794 if (*eptr != '\0') 795 return -EINVAL; 796 } 797 798 if (pages == 0 && min == 0) { 799 /* leave number of pages at 0 */ 800 } else if (!is_power_of_2(pages)) { 801 char buf[100]; 802 803 /* round pages up to next power of 2 */ 804 pages = roundup_pow_of_two(pages); 805 if (!pages) 806 return -EINVAL; 807 808 unit_number__scnprintf(buf, sizeof(buf), pages * page_size); 809 pr_info("rounding mmap pages size to %s (%lu pages)\n", 810 buf, pages); 811 } 812 813 if (pages > max) 814 return -EINVAL; 815 816 return pages; 817 } 818 819 int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str) 820 { 821 unsigned long max = UINT_MAX; 822 long pages; 823 824 if (max > SIZE_MAX / page_size) 825 max = SIZE_MAX / page_size; 826 827 pages = parse_pages_arg(str, 1, max); 828 if (pages < 0) { 829 pr_err("Invalid argument for --mmap_pages/-m\n"); 830 return -1; 831 } 832 833 *mmap_pages = pages; 834 return 0; 835 } 836 837 int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str, 838 int unset __maybe_unused) 839 { 840 return __perf_evlist__parse_mmap_pages(opt->value, str); 841 } 842 843 /** 844 * evlist__mmap_ex - Create mmaps to receive events. 845 * @evlist: list of events 846 * @pages: map length in pages 847 * @overwrite: overwrite older events? 848 * @auxtrace_pages - auxtrace map length in pages 849 * @auxtrace_overwrite - overwrite older auxtrace data? 850 * 851 * If @overwrite is %false the user needs to signal event consumption using 852 * perf_mmap__write_tail(). Using evlist__mmap_read() does this 853 * automatically. 854 * 855 * Similarly, if @auxtrace_overwrite is %false the user needs to signal data 856 * consumption using auxtrace_mmap__write_tail(). 857 * 858 * Return: %0 on success, negative error code otherwise. 859 */ 860 int evlist__mmap_ex(struct evlist *evlist, unsigned int pages, 861 unsigned int auxtrace_pages, 862 bool auxtrace_overwrite, int nr_cblocks, int affinity, int flush, 863 int comp_level) 864 { 865 /* 866 * Delay setting mp.prot: set it before calling perf_mmap__mmap. 867 * Its value is decided by evsel's write_backward. 868 * So &mp should not be passed through const pointer. 869 */ 870 struct mmap_params mp = { 871 .nr_cblocks = nr_cblocks, 872 .affinity = affinity, 873 .flush = flush, 874 .comp_level = comp_level 875 }; 876 struct perf_evlist_mmap_ops ops = { 877 .idx = perf_evlist__mmap_cb_idx, 878 .get = perf_evlist__mmap_cb_get, 879 .mmap = perf_evlist__mmap_cb_mmap, 880 }; 881 882 evlist->core.mmap_len = evlist__mmap_size(pages); 883 pr_debug("mmap size %zuB\n", evlist->core.mmap_len); 884 885 auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->core.mmap_len, 886 auxtrace_pages, auxtrace_overwrite); 887 888 return perf_evlist__mmap_ops(&evlist->core, &ops, &mp.core); 889 } 890 891 int evlist__mmap(struct evlist *evlist, unsigned int pages) 892 { 893 return evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS, 1, 0); 894 } 895 896 int perf_evlist__create_maps(struct evlist *evlist, struct target *target) 897 { 898 bool all_threads = (target->per_thread && target->system_wide); 899 struct perf_cpu_map *cpus; 900 struct perf_thread_map *threads; 901 902 /* 903 * If specify '-a' and '--per-thread' to perf record, perf record 904 * will override '--per-thread'. target->per_thread = false and 905 * target->system_wide = true. 906 * 907 * If specify '--per-thread' only to perf record, 908 * target->per_thread = true and target->system_wide = false. 909 * 910 * So target->per_thread && target->system_wide is false. 911 * For perf record, thread_map__new_str doesn't call 912 * thread_map__new_all_cpus. That will keep perf record's 913 * current behavior. 914 * 915 * For perf stat, it allows the case that target->per_thread and 916 * target->system_wide are all true. It means to collect system-wide 917 * per-thread data. thread_map__new_str will call 918 * thread_map__new_all_cpus to enumerate all threads. 919 */ 920 threads = thread_map__new_str(target->pid, target->tid, target->uid, 921 all_threads); 922 923 if (!threads) 924 return -1; 925 926 if (target__uses_dummy_map(target)) 927 cpus = perf_cpu_map__dummy_new(); 928 else 929 cpus = perf_cpu_map__new(target->cpu_list); 930 931 if (!cpus) 932 goto out_delete_threads; 933 934 evlist->core.has_user_cpus = !!target->cpu_list; 935 936 perf_evlist__set_maps(&evlist->core, cpus, threads); 937 938 return 0; 939 940 out_delete_threads: 941 perf_thread_map__put(threads); 942 return -1; 943 } 944 945 void __perf_evlist__set_sample_bit(struct evlist *evlist, 946 enum perf_event_sample_format bit) 947 { 948 struct evsel *evsel; 949 950 evlist__for_each_entry(evlist, evsel) 951 __evsel__set_sample_bit(evsel, bit); 952 } 953 954 void __perf_evlist__reset_sample_bit(struct evlist *evlist, 955 enum perf_event_sample_format bit) 956 { 957 struct evsel *evsel; 958 959 evlist__for_each_entry(evlist, evsel) 960 __evsel__reset_sample_bit(evsel, bit); 961 } 962 963 int perf_evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel) 964 { 965 struct evsel *evsel; 966 int err = 0; 967 968 evlist__for_each_entry(evlist, evsel) { 969 if (evsel->filter == NULL) 970 continue; 971 972 /* 973 * filters only work for tracepoint event, which doesn't have cpu limit. 974 * So evlist and evsel should always be same. 975 */ 976 err = perf_evsel__apply_filter(&evsel->core, evsel->filter); 977 if (err) { 978 *err_evsel = evsel; 979 break; 980 } 981 } 982 983 return err; 984 } 985 986 int perf_evlist__set_tp_filter(struct evlist *evlist, const char *filter) 987 { 988 struct evsel *evsel; 989 int err = 0; 990 991 if (filter == NULL) 992 return -1; 993 994 evlist__for_each_entry(evlist, evsel) { 995 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 996 continue; 997 998 err = evsel__set_filter(evsel, filter); 999 if (err) 1000 break; 1001 } 1002 1003 return err; 1004 } 1005 1006 int perf_evlist__append_tp_filter(struct evlist *evlist, const char *filter) 1007 { 1008 struct evsel *evsel; 1009 int err = 0; 1010 1011 if (filter == NULL) 1012 return -1; 1013 1014 evlist__for_each_entry(evlist, evsel) { 1015 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 1016 continue; 1017 1018 err = evsel__append_tp_filter(evsel, filter); 1019 if (err) 1020 break; 1021 } 1022 1023 return err; 1024 } 1025 1026 char *asprintf__tp_filter_pids(size_t npids, pid_t *pids) 1027 { 1028 char *filter; 1029 size_t i; 1030 1031 for (i = 0; i < npids; ++i) { 1032 if (i == 0) { 1033 if (asprintf(&filter, "common_pid != %d", pids[i]) < 0) 1034 return NULL; 1035 } else { 1036 char *tmp; 1037 1038 if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0) 1039 goto out_free; 1040 1041 free(filter); 1042 filter = tmp; 1043 } 1044 } 1045 1046 return filter; 1047 out_free: 1048 free(filter); 1049 return NULL; 1050 } 1051 1052 int perf_evlist__set_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids) 1053 { 1054 char *filter = asprintf__tp_filter_pids(npids, pids); 1055 int ret = perf_evlist__set_tp_filter(evlist, filter); 1056 1057 free(filter); 1058 return ret; 1059 } 1060 1061 int perf_evlist__set_tp_filter_pid(struct evlist *evlist, pid_t pid) 1062 { 1063 return perf_evlist__set_tp_filter_pids(evlist, 1, &pid); 1064 } 1065 1066 int perf_evlist__append_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids) 1067 { 1068 char *filter = asprintf__tp_filter_pids(npids, pids); 1069 int ret = perf_evlist__append_tp_filter(evlist, filter); 1070 1071 free(filter); 1072 return ret; 1073 } 1074 1075 int perf_evlist__append_tp_filter_pid(struct evlist *evlist, pid_t pid) 1076 { 1077 return perf_evlist__append_tp_filter_pids(evlist, 1, &pid); 1078 } 1079 1080 bool perf_evlist__valid_sample_type(struct evlist *evlist) 1081 { 1082 struct evsel *pos; 1083 1084 if (evlist->core.nr_entries == 1) 1085 return true; 1086 1087 if (evlist->id_pos < 0 || evlist->is_pos < 0) 1088 return false; 1089 1090 evlist__for_each_entry(evlist, pos) { 1091 if (pos->id_pos != evlist->id_pos || 1092 pos->is_pos != evlist->is_pos) 1093 return false; 1094 } 1095 1096 return true; 1097 } 1098 1099 u64 __perf_evlist__combined_sample_type(struct evlist *evlist) 1100 { 1101 struct evsel *evsel; 1102 1103 if (evlist->combined_sample_type) 1104 return evlist->combined_sample_type; 1105 1106 evlist__for_each_entry(evlist, evsel) 1107 evlist->combined_sample_type |= evsel->core.attr.sample_type; 1108 1109 return evlist->combined_sample_type; 1110 } 1111 1112 u64 perf_evlist__combined_sample_type(struct evlist *evlist) 1113 { 1114 evlist->combined_sample_type = 0; 1115 return __perf_evlist__combined_sample_type(evlist); 1116 } 1117 1118 u64 perf_evlist__combined_branch_type(struct evlist *evlist) 1119 { 1120 struct evsel *evsel; 1121 u64 branch_type = 0; 1122 1123 evlist__for_each_entry(evlist, evsel) 1124 branch_type |= evsel->core.attr.branch_sample_type; 1125 return branch_type; 1126 } 1127 1128 bool perf_evlist__valid_read_format(struct evlist *evlist) 1129 { 1130 struct evsel *first = evlist__first(evlist), *pos = first; 1131 u64 read_format = first->core.attr.read_format; 1132 u64 sample_type = first->core.attr.sample_type; 1133 1134 evlist__for_each_entry(evlist, pos) { 1135 if (read_format != pos->core.attr.read_format) { 1136 pr_debug("Read format differs %#" PRIx64 " vs %#" PRIx64 "\n", 1137 read_format, (u64)pos->core.attr.read_format); 1138 } 1139 } 1140 1141 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */ 1142 if ((sample_type & PERF_SAMPLE_READ) && 1143 !(read_format & PERF_FORMAT_ID)) { 1144 return false; 1145 } 1146 1147 return true; 1148 } 1149 1150 u16 perf_evlist__id_hdr_size(struct evlist *evlist) 1151 { 1152 struct evsel *first = evlist__first(evlist); 1153 struct perf_sample *data; 1154 u64 sample_type; 1155 u16 size = 0; 1156 1157 if (!first->core.attr.sample_id_all) 1158 goto out; 1159 1160 sample_type = first->core.attr.sample_type; 1161 1162 if (sample_type & PERF_SAMPLE_TID) 1163 size += sizeof(data->tid) * 2; 1164 1165 if (sample_type & PERF_SAMPLE_TIME) 1166 size += sizeof(data->time); 1167 1168 if (sample_type & PERF_SAMPLE_ID) 1169 size += sizeof(data->id); 1170 1171 if (sample_type & PERF_SAMPLE_STREAM_ID) 1172 size += sizeof(data->stream_id); 1173 1174 if (sample_type & PERF_SAMPLE_CPU) 1175 size += sizeof(data->cpu) * 2; 1176 1177 if (sample_type & PERF_SAMPLE_IDENTIFIER) 1178 size += sizeof(data->id); 1179 out: 1180 return size; 1181 } 1182 1183 bool perf_evlist__valid_sample_id_all(struct evlist *evlist) 1184 { 1185 struct evsel *first = evlist__first(evlist), *pos = first; 1186 1187 evlist__for_each_entry_continue(evlist, pos) { 1188 if (first->core.attr.sample_id_all != pos->core.attr.sample_id_all) 1189 return false; 1190 } 1191 1192 return true; 1193 } 1194 1195 bool perf_evlist__sample_id_all(struct evlist *evlist) 1196 { 1197 struct evsel *first = evlist__first(evlist); 1198 return first->core.attr.sample_id_all; 1199 } 1200 1201 void perf_evlist__set_selected(struct evlist *evlist, 1202 struct evsel *evsel) 1203 { 1204 evlist->selected = evsel; 1205 } 1206 1207 void evlist__close(struct evlist *evlist) 1208 { 1209 struct evsel *evsel; 1210 struct affinity affinity; 1211 int cpu, i; 1212 1213 /* 1214 * With perf record core.cpus is usually NULL. 1215 * Use the old method to handle this for now. 1216 */ 1217 if (!evlist->core.cpus) { 1218 evlist__for_each_entry_reverse(evlist, evsel) 1219 evsel__close(evsel); 1220 return; 1221 } 1222 1223 if (affinity__setup(&affinity) < 0) 1224 return; 1225 evlist__for_each_cpu(evlist, i, cpu) { 1226 affinity__set(&affinity, cpu); 1227 1228 evlist__for_each_entry_reverse(evlist, evsel) { 1229 if (evsel__cpu_iter_skip(evsel, cpu)) 1230 continue; 1231 perf_evsel__close_cpu(&evsel->core, evsel->cpu_iter - 1); 1232 } 1233 } 1234 affinity__cleanup(&affinity); 1235 evlist__for_each_entry_reverse(evlist, evsel) { 1236 perf_evsel__free_fd(&evsel->core); 1237 perf_evsel__free_id(&evsel->core); 1238 } 1239 } 1240 1241 static int perf_evlist__create_syswide_maps(struct evlist *evlist) 1242 { 1243 struct perf_cpu_map *cpus; 1244 struct perf_thread_map *threads; 1245 int err = -ENOMEM; 1246 1247 /* 1248 * Try reading /sys/devices/system/cpu/online to get 1249 * an all cpus map. 1250 * 1251 * FIXME: -ENOMEM is the best we can do here, the cpu_map 1252 * code needs an overhaul to properly forward the 1253 * error, and we may not want to do that fallback to a 1254 * default cpu identity map :-\ 1255 */ 1256 cpus = perf_cpu_map__new(NULL); 1257 if (!cpus) 1258 goto out; 1259 1260 threads = perf_thread_map__new_dummy(); 1261 if (!threads) 1262 goto out_put; 1263 1264 perf_evlist__set_maps(&evlist->core, cpus, threads); 1265 out: 1266 return err; 1267 out_put: 1268 perf_cpu_map__put(cpus); 1269 goto out; 1270 } 1271 1272 int evlist__open(struct evlist *evlist) 1273 { 1274 struct evsel *evsel; 1275 int err; 1276 1277 /* 1278 * Default: one fd per CPU, all threads, aka systemwide 1279 * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL 1280 */ 1281 if (evlist->core.threads == NULL && evlist->core.cpus == NULL) { 1282 err = perf_evlist__create_syswide_maps(evlist); 1283 if (err < 0) 1284 goto out_err; 1285 } 1286 1287 perf_evlist__update_id_pos(evlist); 1288 1289 evlist__for_each_entry(evlist, evsel) { 1290 err = evsel__open(evsel, evsel->core.cpus, evsel->core.threads); 1291 if (err < 0) 1292 goto out_err; 1293 } 1294 1295 return 0; 1296 out_err: 1297 evlist__close(evlist); 1298 errno = -err; 1299 return err; 1300 } 1301 1302 int perf_evlist__prepare_workload(struct evlist *evlist, struct target *target, 1303 const char *argv[], bool pipe_output, 1304 void (*exec_error)(int signo, siginfo_t *info, void *ucontext)) 1305 { 1306 int child_ready_pipe[2], go_pipe[2]; 1307 char bf; 1308 1309 if (pipe(child_ready_pipe) < 0) { 1310 perror("failed to create 'ready' pipe"); 1311 return -1; 1312 } 1313 1314 if (pipe(go_pipe) < 0) { 1315 perror("failed to create 'go' pipe"); 1316 goto out_close_ready_pipe; 1317 } 1318 1319 evlist->workload.pid = fork(); 1320 if (evlist->workload.pid < 0) { 1321 perror("failed to fork"); 1322 goto out_close_pipes; 1323 } 1324 1325 if (!evlist->workload.pid) { 1326 int ret; 1327 1328 if (pipe_output) 1329 dup2(2, 1); 1330 1331 signal(SIGTERM, SIG_DFL); 1332 1333 close(child_ready_pipe[0]); 1334 close(go_pipe[1]); 1335 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC); 1336 1337 /* 1338 * Tell the parent we're ready to go 1339 */ 1340 close(child_ready_pipe[1]); 1341 1342 /* 1343 * Wait until the parent tells us to go. 1344 */ 1345 ret = read(go_pipe[0], &bf, 1); 1346 /* 1347 * The parent will ask for the execvp() to be performed by 1348 * writing exactly one byte, in workload.cork_fd, usually via 1349 * perf_evlist__start_workload(). 1350 * 1351 * For cancelling the workload without actually running it, 1352 * the parent will just close workload.cork_fd, without writing 1353 * anything, i.e. read will return zero and we just exit() 1354 * here. 1355 */ 1356 if (ret != 1) { 1357 if (ret == -1) 1358 perror("unable to read pipe"); 1359 exit(ret); 1360 } 1361 1362 execvp(argv[0], (char **)argv); 1363 1364 if (exec_error) { 1365 union sigval val; 1366 1367 val.sival_int = errno; 1368 if (sigqueue(getppid(), SIGUSR1, val)) 1369 perror(argv[0]); 1370 } else 1371 perror(argv[0]); 1372 exit(-1); 1373 } 1374 1375 if (exec_error) { 1376 struct sigaction act = { 1377 .sa_flags = SA_SIGINFO, 1378 .sa_sigaction = exec_error, 1379 }; 1380 sigaction(SIGUSR1, &act, NULL); 1381 } 1382 1383 if (target__none(target)) { 1384 if (evlist->core.threads == NULL) { 1385 fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n", 1386 __func__, __LINE__); 1387 goto out_close_pipes; 1388 } 1389 perf_thread_map__set_pid(evlist->core.threads, 0, evlist->workload.pid); 1390 } 1391 1392 close(child_ready_pipe[1]); 1393 close(go_pipe[0]); 1394 /* 1395 * wait for child to settle 1396 */ 1397 if (read(child_ready_pipe[0], &bf, 1) == -1) { 1398 perror("unable to read pipe"); 1399 goto out_close_pipes; 1400 } 1401 1402 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC); 1403 evlist->workload.cork_fd = go_pipe[1]; 1404 close(child_ready_pipe[0]); 1405 return 0; 1406 1407 out_close_pipes: 1408 close(go_pipe[0]); 1409 close(go_pipe[1]); 1410 out_close_ready_pipe: 1411 close(child_ready_pipe[0]); 1412 close(child_ready_pipe[1]); 1413 return -1; 1414 } 1415 1416 int perf_evlist__start_workload(struct evlist *evlist) 1417 { 1418 if (evlist->workload.cork_fd > 0) { 1419 char bf = 0; 1420 int ret; 1421 /* 1422 * Remove the cork, let it rip! 1423 */ 1424 ret = write(evlist->workload.cork_fd, &bf, 1); 1425 if (ret < 0) 1426 perror("unable to write to pipe"); 1427 1428 close(evlist->workload.cork_fd); 1429 return ret; 1430 } 1431 1432 return 0; 1433 } 1434 1435 int perf_evlist__parse_sample(struct evlist *evlist, union perf_event *event, 1436 struct perf_sample *sample) 1437 { 1438 struct evsel *evsel = perf_evlist__event2evsel(evlist, event); 1439 1440 if (!evsel) 1441 return -EFAULT; 1442 return evsel__parse_sample(evsel, event, sample); 1443 } 1444 1445 int perf_evlist__parse_sample_timestamp(struct evlist *evlist, 1446 union perf_event *event, 1447 u64 *timestamp) 1448 { 1449 struct evsel *evsel = perf_evlist__event2evsel(evlist, event); 1450 1451 if (!evsel) 1452 return -EFAULT; 1453 return evsel__parse_sample_timestamp(evsel, event, timestamp); 1454 } 1455 1456 int perf_evlist__strerror_open(struct evlist *evlist, 1457 int err, char *buf, size_t size) 1458 { 1459 int printed, value; 1460 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf)); 1461 1462 switch (err) { 1463 case EACCES: 1464 case EPERM: 1465 printed = scnprintf(buf, size, 1466 "Error:\t%s.\n" 1467 "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg); 1468 1469 value = perf_event_paranoid(); 1470 1471 printed += scnprintf(buf + printed, size - printed, "\nHint:\t"); 1472 1473 if (value >= 2) { 1474 printed += scnprintf(buf + printed, size - printed, 1475 "For your workloads it needs to be <= 1\nHint:\t"); 1476 } 1477 printed += scnprintf(buf + printed, size - printed, 1478 "For system wide tracing it needs to be set to -1.\n"); 1479 1480 printed += scnprintf(buf + printed, size - printed, 1481 "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n" 1482 "Hint:\tThe current value is %d.", value); 1483 break; 1484 case EINVAL: { 1485 struct evsel *first = evlist__first(evlist); 1486 int max_freq; 1487 1488 if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0) 1489 goto out_default; 1490 1491 if (first->core.attr.sample_freq < (u64)max_freq) 1492 goto out_default; 1493 1494 printed = scnprintf(buf, size, 1495 "Error:\t%s.\n" 1496 "Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n" 1497 "Hint:\tThe current value is %d and %" PRIu64 " is being requested.", 1498 emsg, max_freq, first->core.attr.sample_freq); 1499 break; 1500 } 1501 default: 1502 out_default: 1503 scnprintf(buf, size, "%s", emsg); 1504 break; 1505 } 1506 1507 return 0; 1508 } 1509 1510 int perf_evlist__strerror_mmap(struct evlist *evlist, int err, char *buf, size_t size) 1511 { 1512 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf)); 1513 int pages_attempted = evlist->core.mmap_len / 1024, pages_max_per_user, printed = 0; 1514 1515 switch (err) { 1516 case EPERM: 1517 sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user); 1518 printed += scnprintf(buf + printed, size - printed, 1519 "Error:\t%s.\n" 1520 "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n" 1521 "Hint:\tTried using %zd kB.\n", 1522 emsg, pages_max_per_user, pages_attempted); 1523 1524 if (pages_attempted >= pages_max_per_user) { 1525 printed += scnprintf(buf + printed, size - printed, 1526 "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n", 1527 pages_max_per_user + pages_attempted); 1528 } 1529 1530 printed += scnprintf(buf + printed, size - printed, 1531 "Hint:\tTry using a smaller -m/--mmap-pages value."); 1532 break; 1533 default: 1534 scnprintf(buf, size, "%s", emsg); 1535 break; 1536 } 1537 1538 return 0; 1539 } 1540 1541 void perf_evlist__to_front(struct evlist *evlist, 1542 struct evsel *move_evsel) 1543 { 1544 struct evsel *evsel, *n; 1545 LIST_HEAD(move); 1546 1547 if (move_evsel == evlist__first(evlist)) 1548 return; 1549 1550 evlist__for_each_entry_safe(evlist, n, evsel) { 1551 if (evsel->leader == move_evsel->leader) 1552 list_move_tail(&evsel->core.node, &move); 1553 } 1554 1555 list_splice(&move, &evlist->core.entries); 1556 } 1557 1558 void perf_evlist__set_tracking_event(struct evlist *evlist, 1559 struct evsel *tracking_evsel) 1560 { 1561 struct evsel *evsel; 1562 1563 if (tracking_evsel->tracking) 1564 return; 1565 1566 evlist__for_each_entry(evlist, evsel) { 1567 if (evsel != tracking_evsel) 1568 evsel->tracking = false; 1569 } 1570 1571 tracking_evsel->tracking = true; 1572 } 1573 1574 struct evsel * 1575 perf_evlist__find_evsel_by_str(struct evlist *evlist, 1576 const char *str) 1577 { 1578 struct evsel *evsel; 1579 1580 evlist__for_each_entry(evlist, evsel) { 1581 if (!evsel->name) 1582 continue; 1583 if (strcmp(str, evsel->name) == 0) 1584 return evsel; 1585 } 1586 1587 return NULL; 1588 } 1589 1590 void perf_evlist__toggle_bkw_mmap(struct evlist *evlist, 1591 enum bkw_mmap_state state) 1592 { 1593 enum bkw_mmap_state old_state = evlist->bkw_mmap_state; 1594 enum action { 1595 NONE, 1596 PAUSE, 1597 RESUME, 1598 } action = NONE; 1599 1600 if (!evlist->overwrite_mmap) 1601 return; 1602 1603 switch (old_state) { 1604 case BKW_MMAP_NOTREADY: { 1605 if (state != BKW_MMAP_RUNNING) 1606 goto state_err; 1607 break; 1608 } 1609 case BKW_MMAP_RUNNING: { 1610 if (state != BKW_MMAP_DATA_PENDING) 1611 goto state_err; 1612 action = PAUSE; 1613 break; 1614 } 1615 case BKW_MMAP_DATA_PENDING: { 1616 if (state != BKW_MMAP_EMPTY) 1617 goto state_err; 1618 break; 1619 } 1620 case BKW_MMAP_EMPTY: { 1621 if (state != BKW_MMAP_RUNNING) 1622 goto state_err; 1623 action = RESUME; 1624 break; 1625 } 1626 default: 1627 WARN_ONCE(1, "Shouldn't get there\n"); 1628 } 1629 1630 evlist->bkw_mmap_state = state; 1631 1632 switch (action) { 1633 case PAUSE: 1634 perf_evlist__pause(evlist); 1635 break; 1636 case RESUME: 1637 perf_evlist__resume(evlist); 1638 break; 1639 case NONE: 1640 default: 1641 break; 1642 } 1643 1644 state_err: 1645 return; 1646 } 1647 1648 bool perf_evlist__exclude_kernel(struct evlist *evlist) 1649 { 1650 struct evsel *evsel; 1651 1652 evlist__for_each_entry(evlist, evsel) { 1653 if (!evsel->core.attr.exclude_kernel) 1654 return false; 1655 } 1656 1657 return true; 1658 } 1659 1660 /* 1661 * Events in data file are not collect in groups, but we still want 1662 * the group display. Set the artificial group and set the leader's 1663 * forced_leader flag to notify the display code. 1664 */ 1665 void perf_evlist__force_leader(struct evlist *evlist) 1666 { 1667 if (!evlist->nr_groups) { 1668 struct evsel *leader = evlist__first(evlist); 1669 1670 perf_evlist__set_leader(evlist); 1671 leader->forced_leader = true; 1672 } 1673 } 1674 1675 struct evsel *perf_evlist__reset_weak_group(struct evlist *evsel_list, 1676 struct evsel *evsel, 1677 bool close) 1678 { 1679 struct evsel *c2, *leader; 1680 bool is_open = true; 1681 1682 leader = evsel->leader; 1683 pr_debug("Weak group for %s/%d failed\n", 1684 leader->name, leader->core.nr_members); 1685 1686 /* 1687 * for_each_group_member doesn't work here because it doesn't 1688 * include the first entry. 1689 */ 1690 evlist__for_each_entry(evsel_list, c2) { 1691 if (c2 == evsel) 1692 is_open = false; 1693 if (c2->leader == leader) { 1694 if (is_open && close) 1695 perf_evsel__close(&c2->core); 1696 c2->leader = c2; 1697 c2->core.nr_members = 0; 1698 /* 1699 * Set this for all former members of the group 1700 * to indicate they get reopened. 1701 */ 1702 c2->reset_group = true; 1703 } 1704 } 1705 return leader; 1706 } 1707