1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> 4 * 5 * Parts came from builtin-{top,stat,record}.c, see those files for further 6 * copyright notes. 7 */ 8 #include <api/fs/fs.h> 9 #include <errno.h> 10 #include <inttypes.h> 11 #include <poll.h> 12 #include "cpumap.h" 13 #include "util/mmap.h" 14 #include "thread_map.h" 15 #include "target.h" 16 #include "evlist.h" 17 #include "evsel.h" 18 #include "debug.h" 19 #include "units.h" 20 #include <internal/lib.h> // page_size 21 #include "affinity.h" 22 #include "../perf.h" 23 #include "asm/bug.h" 24 #include "bpf-event.h" 25 #include "util/string2.h" 26 #include "util/perf_api_probe.h" 27 #include <signal.h> 28 #include <unistd.h> 29 #include <sched.h> 30 #include <stdlib.h> 31 32 #include "parse-events.h" 33 #include <subcmd/parse-options.h> 34 35 #include <fcntl.h> 36 #include <sys/ioctl.h> 37 #include <sys/mman.h> 38 39 #include <linux/bitops.h> 40 #include <linux/hash.h> 41 #include <linux/log2.h> 42 #include <linux/err.h> 43 #include <linux/string.h> 44 #include <linux/zalloc.h> 45 #include <perf/evlist.h> 46 #include <perf/evsel.h> 47 #include <perf/cpumap.h> 48 #include <perf/mmap.h> 49 50 #include <internal/xyarray.h> 51 52 #ifdef LACKS_SIGQUEUE_PROTOTYPE 53 int sigqueue(pid_t pid, int sig, const union sigval value); 54 #endif 55 56 #define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y)) 57 #define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y) 58 59 void evlist__init(struct evlist *evlist, struct perf_cpu_map *cpus, 60 struct perf_thread_map *threads) 61 { 62 perf_evlist__init(&evlist->core); 63 perf_evlist__set_maps(&evlist->core, cpus, threads); 64 evlist->workload.pid = -1; 65 evlist->bkw_mmap_state = BKW_MMAP_NOTREADY; 66 } 67 68 struct evlist *evlist__new(void) 69 { 70 struct evlist *evlist = zalloc(sizeof(*evlist)); 71 72 if (evlist != NULL) 73 evlist__init(evlist, NULL, NULL); 74 75 return evlist; 76 } 77 78 struct evlist *perf_evlist__new_default(void) 79 { 80 struct evlist *evlist = evlist__new(); 81 82 if (evlist && evlist__add_default(evlist)) { 83 evlist__delete(evlist); 84 evlist = NULL; 85 } 86 87 return evlist; 88 } 89 90 struct evlist *perf_evlist__new_dummy(void) 91 { 92 struct evlist *evlist = evlist__new(); 93 94 if (evlist && evlist__add_dummy(evlist)) { 95 evlist__delete(evlist); 96 evlist = NULL; 97 } 98 99 return evlist; 100 } 101 102 /** 103 * perf_evlist__set_id_pos - set the positions of event ids. 104 * @evlist: selected event list 105 * 106 * Events with compatible sample types all have the same id_pos 107 * and is_pos. For convenience, put a copy on evlist. 108 */ 109 void perf_evlist__set_id_pos(struct evlist *evlist) 110 { 111 struct evsel *first = evlist__first(evlist); 112 113 evlist->id_pos = first->id_pos; 114 evlist->is_pos = first->is_pos; 115 } 116 117 static void perf_evlist__update_id_pos(struct evlist *evlist) 118 { 119 struct evsel *evsel; 120 121 evlist__for_each_entry(evlist, evsel) 122 evsel__calc_id_pos(evsel); 123 124 perf_evlist__set_id_pos(evlist); 125 } 126 127 static void evlist__purge(struct evlist *evlist) 128 { 129 struct evsel *pos, *n; 130 131 evlist__for_each_entry_safe(evlist, n, pos) { 132 list_del_init(&pos->core.node); 133 pos->evlist = NULL; 134 evsel__delete(pos); 135 } 136 137 evlist->core.nr_entries = 0; 138 } 139 140 void evlist__exit(struct evlist *evlist) 141 { 142 zfree(&evlist->mmap); 143 zfree(&evlist->overwrite_mmap); 144 perf_evlist__exit(&evlist->core); 145 } 146 147 void evlist__delete(struct evlist *evlist) 148 { 149 if (evlist == NULL) 150 return; 151 152 evlist__munmap(evlist); 153 evlist__close(evlist); 154 evlist__purge(evlist); 155 evlist__exit(evlist); 156 free(evlist); 157 } 158 159 void evlist__add(struct evlist *evlist, struct evsel *entry) 160 { 161 entry->evlist = evlist; 162 entry->idx = evlist->core.nr_entries; 163 entry->tracking = !entry->idx; 164 165 perf_evlist__add(&evlist->core, &entry->core); 166 167 if (evlist->core.nr_entries == 1) 168 perf_evlist__set_id_pos(evlist); 169 } 170 171 void evlist__remove(struct evlist *evlist, struct evsel *evsel) 172 { 173 evsel->evlist = NULL; 174 perf_evlist__remove(&evlist->core, &evsel->core); 175 } 176 177 void perf_evlist__splice_list_tail(struct evlist *evlist, 178 struct list_head *list) 179 { 180 struct evsel *evsel, *temp; 181 182 __evlist__for_each_entry_safe(list, temp, evsel) { 183 list_del_init(&evsel->core.node); 184 evlist__add(evlist, evsel); 185 } 186 } 187 188 int __evlist__set_tracepoints_handlers(struct evlist *evlist, 189 const struct evsel_str_handler *assocs, size_t nr_assocs) 190 { 191 struct evsel *evsel; 192 size_t i; 193 int err; 194 195 for (i = 0; i < nr_assocs; i++) { 196 // Adding a handler for an event not in this evlist, just ignore it. 197 evsel = perf_evlist__find_tracepoint_by_name(evlist, assocs[i].name); 198 if (evsel == NULL) 199 continue; 200 201 err = -EEXIST; 202 if (evsel->handler != NULL) 203 goto out; 204 evsel->handler = assocs[i].handler; 205 } 206 207 err = 0; 208 out: 209 return err; 210 } 211 212 void __perf_evlist__set_leader(struct list_head *list) 213 { 214 struct evsel *evsel, *leader; 215 216 leader = list_entry(list->next, struct evsel, core.node); 217 evsel = list_entry(list->prev, struct evsel, core.node); 218 219 leader->core.nr_members = evsel->idx - leader->idx + 1; 220 221 __evlist__for_each_entry(list, evsel) { 222 evsel->leader = leader; 223 } 224 } 225 226 void perf_evlist__set_leader(struct evlist *evlist) 227 { 228 if (evlist->core.nr_entries) { 229 evlist->nr_groups = evlist->core.nr_entries > 1 ? 1 : 0; 230 __perf_evlist__set_leader(&evlist->core.entries); 231 } 232 } 233 234 int __evlist__add_default(struct evlist *evlist, bool precise) 235 { 236 struct evsel *evsel = evsel__new_cycles(precise); 237 238 if (evsel == NULL) 239 return -ENOMEM; 240 241 evlist__add(evlist, evsel); 242 return 0; 243 } 244 245 int evlist__add_dummy(struct evlist *evlist) 246 { 247 struct perf_event_attr attr = { 248 .type = PERF_TYPE_SOFTWARE, 249 .config = PERF_COUNT_SW_DUMMY, 250 .size = sizeof(attr), /* to capture ABI version */ 251 }; 252 struct evsel *evsel = evsel__new_idx(&attr, evlist->core.nr_entries); 253 254 if (evsel == NULL) 255 return -ENOMEM; 256 257 evlist__add(evlist, evsel); 258 return 0; 259 } 260 261 static int evlist__add_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs) 262 { 263 struct evsel *evsel, *n; 264 LIST_HEAD(head); 265 size_t i; 266 267 for (i = 0; i < nr_attrs; i++) { 268 evsel = evsel__new_idx(attrs + i, evlist->core.nr_entries + i); 269 if (evsel == NULL) 270 goto out_delete_partial_list; 271 list_add_tail(&evsel->core.node, &head); 272 } 273 274 perf_evlist__splice_list_tail(evlist, &head); 275 276 return 0; 277 278 out_delete_partial_list: 279 __evlist__for_each_entry_safe(&head, n, evsel) 280 evsel__delete(evsel); 281 return -1; 282 } 283 284 int __evlist__add_default_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs) 285 { 286 size_t i; 287 288 for (i = 0; i < nr_attrs; i++) 289 event_attr_init(attrs + i); 290 291 return evlist__add_attrs(evlist, attrs, nr_attrs); 292 } 293 294 struct evsel * 295 perf_evlist__find_tracepoint_by_id(struct evlist *evlist, int id) 296 { 297 struct evsel *evsel; 298 299 evlist__for_each_entry(evlist, evsel) { 300 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT && 301 (int)evsel->core.attr.config == id) 302 return evsel; 303 } 304 305 return NULL; 306 } 307 308 struct evsel * 309 perf_evlist__find_tracepoint_by_name(struct evlist *evlist, 310 const char *name) 311 { 312 struct evsel *evsel; 313 314 evlist__for_each_entry(evlist, evsel) { 315 if ((evsel->core.attr.type == PERF_TYPE_TRACEPOINT) && 316 (strcmp(evsel->name, name) == 0)) 317 return evsel; 318 } 319 320 return NULL; 321 } 322 323 int evlist__add_newtp(struct evlist *evlist, const char *sys, const char *name, void *handler) 324 { 325 struct evsel *evsel = evsel__newtp(sys, name); 326 327 if (IS_ERR(evsel)) 328 return -1; 329 330 evsel->handler = handler; 331 evlist__add(evlist, evsel); 332 return 0; 333 } 334 335 static int perf_evlist__nr_threads(struct evlist *evlist, 336 struct evsel *evsel) 337 { 338 if (evsel->core.system_wide) 339 return 1; 340 else 341 return perf_thread_map__nr(evlist->core.threads); 342 } 343 344 void evlist__cpu_iter_start(struct evlist *evlist) 345 { 346 struct evsel *pos; 347 348 /* 349 * Reset the per evsel cpu_iter. This is needed because 350 * each evsel's cpumap may have a different index space, 351 * and some operations need the index to modify 352 * the FD xyarray (e.g. open, close) 353 */ 354 evlist__for_each_entry(evlist, pos) 355 pos->cpu_iter = 0; 356 } 357 358 bool evsel__cpu_iter_skip_no_inc(struct evsel *ev, int cpu) 359 { 360 if (ev->cpu_iter >= ev->core.cpus->nr) 361 return true; 362 if (cpu >= 0 && ev->core.cpus->map[ev->cpu_iter] != cpu) 363 return true; 364 return false; 365 } 366 367 bool evsel__cpu_iter_skip(struct evsel *ev, int cpu) 368 { 369 if (!evsel__cpu_iter_skip_no_inc(ev, cpu)) { 370 ev->cpu_iter++; 371 return false; 372 } 373 return true; 374 } 375 376 void evlist__disable(struct evlist *evlist) 377 { 378 struct evsel *pos; 379 struct affinity affinity; 380 int cpu, i, imm = 0; 381 bool has_imm = false; 382 383 if (affinity__setup(&affinity) < 0) 384 return; 385 386 /* Disable 'immediate' events last */ 387 for (imm = 0; imm <= 1; imm++) { 388 evlist__for_each_cpu(evlist, i, cpu) { 389 affinity__set(&affinity, cpu); 390 391 evlist__for_each_entry(evlist, pos) { 392 if (evsel__cpu_iter_skip(pos, cpu)) 393 continue; 394 if (pos->disabled || !evsel__is_group_leader(pos) || !pos->core.fd) 395 continue; 396 if (pos->immediate) 397 has_imm = true; 398 if (pos->immediate != imm) 399 continue; 400 evsel__disable_cpu(pos, pos->cpu_iter - 1); 401 } 402 } 403 if (!has_imm) 404 break; 405 } 406 407 affinity__cleanup(&affinity); 408 evlist__for_each_entry(evlist, pos) { 409 if (!evsel__is_group_leader(pos) || !pos->core.fd) 410 continue; 411 pos->disabled = true; 412 } 413 414 evlist->enabled = false; 415 } 416 417 void evlist__enable(struct evlist *evlist) 418 { 419 struct evsel *pos; 420 struct affinity affinity; 421 int cpu, i; 422 423 if (affinity__setup(&affinity) < 0) 424 return; 425 426 evlist__for_each_cpu(evlist, i, cpu) { 427 affinity__set(&affinity, cpu); 428 429 evlist__for_each_entry(evlist, pos) { 430 if (evsel__cpu_iter_skip(pos, cpu)) 431 continue; 432 if (!evsel__is_group_leader(pos) || !pos->core.fd) 433 continue; 434 evsel__enable_cpu(pos, pos->cpu_iter - 1); 435 } 436 } 437 affinity__cleanup(&affinity); 438 evlist__for_each_entry(evlist, pos) { 439 if (!evsel__is_group_leader(pos) || !pos->core.fd) 440 continue; 441 pos->disabled = false; 442 } 443 444 evlist->enabled = true; 445 } 446 447 void perf_evlist__toggle_enable(struct evlist *evlist) 448 { 449 (evlist->enabled ? evlist__disable : evlist__enable)(evlist); 450 } 451 452 static int perf_evlist__enable_event_cpu(struct evlist *evlist, 453 struct evsel *evsel, int cpu) 454 { 455 int thread; 456 int nr_threads = perf_evlist__nr_threads(evlist, evsel); 457 458 if (!evsel->core.fd) 459 return -EINVAL; 460 461 for (thread = 0; thread < nr_threads; thread++) { 462 int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0); 463 if (err) 464 return err; 465 } 466 return 0; 467 } 468 469 static int perf_evlist__enable_event_thread(struct evlist *evlist, 470 struct evsel *evsel, 471 int thread) 472 { 473 int cpu; 474 int nr_cpus = perf_cpu_map__nr(evlist->core.cpus); 475 476 if (!evsel->core.fd) 477 return -EINVAL; 478 479 for (cpu = 0; cpu < nr_cpus; cpu++) { 480 int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0); 481 if (err) 482 return err; 483 } 484 return 0; 485 } 486 487 int perf_evlist__enable_event_idx(struct evlist *evlist, 488 struct evsel *evsel, int idx) 489 { 490 bool per_cpu_mmaps = !perf_cpu_map__empty(evlist->core.cpus); 491 492 if (per_cpu_mmaps) 493 return perf_evlist__enable_event_cpu(evlist, evsel, idx); 494 else 495 return perf_evlist__enable_event_thread(evlist, evsel, idx); 496 } 497 498 int evlist__add_pollfd(struct evlist *evlist, int fd) 499 { 500 return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN, fdarray_flag__default); 501 } 502 503 int evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask) 504 { 505 return perf_evlist__filter_pollfd(&evlist->core, revents_and_mask); 506 } 507 508 int evlist__poll(struct evlist *evlist, int timeout) 509 { 510 return perf_evlist__poll(&evlist->core, timeout); 511 } 512 513 struct perf_sample_id *perf_evlist__id2sid(struct evlist *evlist, u64 id) 514 { 515 struct hlist_head *head; 516 struct perf_sample_id *sid; 517 int hash; 518 519 hash = hash_64(id, PERF_EVLIST__HLIST_BITS); 520 head = &evlist->core.heads[hash]; 521 522 hlist_for_each_entry(sid, head, node) 523 if (sid->id == id) 524 return sid; 525 526 return NULL; 527 } 528 529 struct evsel *perf_evlist__id2evsel(struct evlist *evlist, u64 id) 530 { 531 struct perf_sample_id *sid; 532 533 if (evlist->core.nr_entries == 1 || !id) 534 return evlist__first(evlist); 535 536 sid = perf_evlist__id2sid(evlist, id); 537 if (sid) 538 return container_of(sid->evsel, struct evsel, core); 539 540 if (!evlist__sample_id_all(evlist)) 541 return evlist__first(evlist); 542 543 return NULL; 544 } 545 546 struct evsel *perf_evlist__id2evsel_strict(struct evlist *evlist, 547 u64 id) 548 { 549 struct perf_sample_id *sid; 550 551 if (!id) 552 return NULL; 553 554 sid = perf_evlist__id2sid(evlist, id); 555 if (sid) 556 return container_of(sid->evsel, struct evsel, core); 557 558 return NULL; 559 } 560 561 static int perf_evlist__event2id(struct evlist *evlist, 562 union perf_event *event, u64 *id) 563 { 564 const __u64 *array = event->sample.array; 565 ssize_t n; 566 567 n = (event->header.size - sizeof(event->header)) >> 3; 568 569 if (event->header.type == PERF_RECORD_SAMPLE) { 570 if (evlist->id_pos >= n) 571 return -1; 572 *id = array[evlist->id_pos]; 573 } else { 574 if (evlist->is_pos > n) 575 return -1; 576 n -= evlist->is_pos; 577 *id = array[n]; 578 } 579 return 0; 580 } 581 582 struct evsel *perf_evlist__event2evsel(struct evlist *evlist, 583 union perf_event *event) 584 { 585 struct evsel *first = evlist__first(evlist); 586 struct hlist_head *head; 587 struct perf_sample_id *sid; 588 int hash; 589 u64 id; 590 591 if (evlist->core.nr_entries == 1) 592 return first; 593 594 if (!first->core.attr.sample_id_all && 595 event->header.type != PERF_RECORD_SAMPLE) 596 return first; 597 598 if (perf_evlist__event2id(evlist, event, &id)) 599 return NULL; 600 601 /* Synthesized events have an id of zero */ 602 if (!id) 603 return first; 604 605 hash = hash_64(id, PERF_EVLIST__HLIST_BITS); 606 head = &evlist->core.heads[hash]; 607 608 hlist_for_each_entry(sid, head, node) { 609 if (sid->id == id) 610 return container_of(sid->evsel, struct evsel, core); 611 } 612 return NULL; 613 } 614 615 static int perf_evlist__set_paused(struct evlist *evlist, bool value) 616 { 617 int i; 618 619 if (!evlist->overwrite_mmap) 620 return 0; 621 622 for (i = 0; i < evlist->core.nr_mmaps; i++) { 623 int fd = evlist->overwrite_mmap[i].core.fd; 624 int err; 625 626 if (fd < 0) 627 continue; 628 err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0); 629 if (err) 630 return err; 631 } 632 return 0; 633 } 634 635 static int perf_evlist__pause(struct evlist *evlist) 636 { 637 return perf_evlist__set_paused(evlist, true); 638 } 639 640 static int perf_evlist__resume(struct evlist *evlist) 641 { 642 return perf_evlist__set_paused(evlist, false); 643 } 644 645 static void evlist__munmap_nofree(struct evlist *evlist) 646 { 647 int i; 648 649 if (evlist->mmap) 650 for (i = 0; i < evlist->core.nr_mmaps; i++) 651 perf_mmap__munmap(&evlist->mmap[i].core); 652 653 if (evlist->overwrite_mmap) 654 for (i = 0; i < evlist->core.nr_mmaps; i++) 655 perf_mmap__munmap(&evlist->overwrite_mmap[i].core); 656 } 657 658 void evlist__munmap(struct evlist *evlist) 659 { 660 evlist__munmap_nofree(evlist); 661 zfree(&evlist->mmap); 662 zfree(&evlist->overwrite_mmap); 663 } 664 665 static void perf_mmap__unmap_cb(struct perf_mmap *map) 666 { 667 struct mmap *m = container_of(map, struct mmap, core); 668 669 mmap__munmap(m); 670 } 671 672 static struct mmap *evlist__alloc_mmap(struct evlist *evlist, 673 bool overwrite) 674 { 675 int i; 676 struct mmap *map; 677 678 map = zalloc(evlist->core.nr_mmaps * sizeof(struct mmap)); 679 if (!map) 680 return NULL; 681 682 for (i = 0; i < evlist->core.nr_mmaps; i++) { 683 struct perf_mmap *prev = i ? &map[i - 1].core : NULL; 684 685 /* 686 * When the perf_mmap() call is made we grab one refcount, plus 687 * one extra to let perf_mmap__consume() get the last 688 * events after all real references (perf_mmap__get()) are 689 * dropped. 690 * 691 * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and 692 * thus does perf_mmap__get() on it. 693 */ 694 perf_mmap__init(&map[i].core, prev, overwrite, perf_mmap__unmap_cb); 695 } 696 697 return map; 698 } 699 700 static void 701 perf_evlist__mmap_cb_idx(struct perf_evlist *_evlist, 702 struct perf_mmap_param *_mp, 703 int idx, bool per_cpu) 704 { 705 struct evlist *evlist = container_of(_evlist, struct evlist, core); 706 struct mmap_params *mp = container_of(_mp, struct mmap_params, core); 707 708 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, idx, per_cpu); 709 } 710 711 static struct perf_mmap* 712 perf_evlist__mmap_cb_get(struct perf_evlist *_evlist, bool overwrite, int idx) 713 { 714 struct evlist *evlist = container_of(_evlist, struct evlist, core); 715 struct mmap *maps; 716 717 maps = overwrite ? evlist->overwrite_mmap : evlist->mmap; 718 719 if (!maps) { 720 maps = evlist__alloc_mmap(evlist, overwrite); 721 if (!maps) 722 return NULL; 723 724 if (overwrite) { 725 evlist->overwrite_mmap = maps; 726 if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY) 727 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING); 728 } else { 729 evlist->mmap = maps; 730 } 731 } 732 733 return &maps[idx].core; 734 } 735 736 static int 737 perf_evlist__mmap_cb_mmap(struct perf_mmap *_map, struct perf_mmap_param *_mp, 738 int output, int cpu) 739 { 740 struct mmap *map = container_of(_map, struct mmap, core); 741 struct mmap_params *mp = container_of(_mp, struct mmap_params, core); 742 743 return mmap__mmap(map, mp, output, cpu); 744 } 745 746 unsigned long perf_event_mlock_kb_in_pages(void) 747 { 748 unsigned long pages; 749 int max; 750 751 if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) { 752 /* 753 * Pick a once upon a time good value, i.e. things look 754 * strange since we can't read a sysctl value, but lets not 755 * die yet... 756 */ 757 max = 512; 758 } else { 759 max -= (page_size / 1024); 760 } 761 762 pages = (max * 1024) / page_size; 763 if (!is_power_of_2(pages)) 764 pages = rounddown_pow_of_two(pages); 765 766 return pages; 767 } 768 769 size_t evlist__mmap_size(unsigned long pages) 770 { 771 if (pages == UINT_MAX) 772 pages = perf_event_mlock_kb_in_pages(); 773 else if (!is_power_of_2(pages)) 774 return 0; 775 776 return (pages + 1) * page_size; 777 } 778 779 static long parse_pages_arg(const char *str, unsigned long min, 780 unsigned long max) 781 { 782 unsigned long pages, val; 783 static struct parse_tag tags[] = { 784 { .tag = 'B', .mult = 1 }, 785 { .tag = 'K', .mult = 1 << 10 }, 786 { .tag = 'M', .mult = 1 << 20 }, 787 { .tag = 'G', .mult = 1 << 30 }, 788 { .tag = 0 }, 789 }; 790 791 if (str == NULL) 792 return -EINVAL; 793 794 val = parse_tag_value(str, tags); 795 if (val != (unsigned long) -1) { 796 /* we got file size value */ 797 pages = PERF_ALIGN(val, page_size) / page_size; 798 } else { 799 /* we got pages count value */ 800 char *eptr; 801 pages = strtoul(str, &eptr, 10); 802 if (*eptr != '\0') 803 return -EINVAL; 804 } 805 806 if (pages == 0 && min == 0) { 807 /* leave number of pages at 0 */ 808 } else if (!is_power_of_2(pages)) { 809 char buf[100]; 810 811 /* round pages up to next power of 2 */ 812 pages = roundup_pow_of_two(pages); 813 if (!pages) 814 return -EINVAL; 815 816 unit_number__scnprintf(buf, sizeof(buf), pages * page_size); 817 pr_info("rounding mmap pages size to %s (%lu pages)\n", 818 buf, pages); 819 } 820 821 if (pages > max) 822 return -EINVAL; 823 824 return pages; 825 } 826 827 int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str) 828 { 829 unsigned long max = UINT_MAX; 830 long pages; 831 832 if (max > SIZE_MAX / page_size) 833 max = SIZE_MAX / page_size; 834 835 pages = parse_pages_arg(str, 1, max); 836 if (pages < 0) { 837 pr_err("Invalid argument for --mmap_pages/-m\n"); 838 return -1; 839 } 840 841 *mmap_pages = pages; 842 return 0; 843 } 844 845 int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str, 846 int unset __maybe_unused) 847 { 848 return __perf_evlist__parse_mmap_pages(opt->value, str); 849 } 850 851 /** 852 * evlist__mmap_ex - Create mmaps to receive events. 853 * @evlist: list of events 854 * @pages: map length in pages 855 * @overwrite: overwrite older events? 856 * @auxtrace_pages - auxtrace map length in pages 857 * @auxtrace_overwrite - overwrite older auxtrace data? 858 * 859 * If @overwrite is %false the user needs to signal event consumption using 860 * perf_mmap__write_tail(). Using evlist__mmap_read() does this 861 * automatically. 862 * 863 * Similarly, if @auxtrace_overwrite is %false the user needs to signal data 864 * consumption using auxtrace_mmap__write_tail(). 865 * 866 * Return: %0 on success, negative error code otherwise. 867 */ 868 int evlist__mmap_ex(struct evlist *evlist, unsigned int pages, 869 unsigned int auxtrace_pages, 870 bool auxtrace_overwrite, int nr_cblocks, int affinity, int flush, 871 int comp_level) 872 { 873 /* 874 * Delay setting mp.prot: set it before calling perf_mmap__mmap. 875 * Its value is decided by evsel's write_backward. 876 * So &mp should not be passed through const pointer. 877 */ 878 struct mmap_params mp = { 879 .nr_cblocks = nr_cblocks, 880 .affinity = affinity, 881 .flush = flush, 882 .comp_level = comp_level 883 }; 884 struct perf_evlist_mmap_ops ops = { 885 .idx = perf_evlist__mmap_cb_idx, 886 .get = perf_evlist__mmap_cb_get, 887 .mmap = perf_evlist__mmap_cb_mmap, 888 }; 889 890 evlist->core.mmap_len = evlist__mmap_size(pages); 891 pr_debug("mmap size %zuB\n", evlist->core.mmap_len); 892 893 auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->core.mmap_len, 894 auxtrace_pages, auxtrace_overwrite); 895 896 return perf_evlist__mmap_ops(&evlist->core, &ops, &mp.core); 897 } 898 899 int evlist__mmap(struct evlist *evlist, unsigned int pages) 900 { 901 return evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS, 1, 0); 902 } 903 904 int perf_evlist__create_maps(struct evlist *evlist, struct target *target) 905 { 906 bool all_threads = (target->per_thread && target->system_wide); 907 struct perf_cpu_map *cpus; 908 struct perf_thread_map *threads; 909 910 /* 911 * If specify '-a' and '--per-thread' to perf record, perf record 912 * will override '--per-thread'. target->per_thread = false and 913 * target->system_wide = true. 914 * 915 * If specify '--per-thread' only to perf record, 916 * target->per_thread = true and target->system_wide = false. 917 * 918 * So target->per_thread && target->system_wide is false. 919 * For perf record, thread_map__new_str doesn't call 920 * thread_map__new_all_cpus. That will keep perf record's 921 * current behavior. 922 * 923 * For perf stat, it allows the case that target->per_thread and 924 * target->system_wide are all true. It means to collect system-wide 925 * per-thread data. thread_map__new_str will call 926 * thread_map__new_all_cpus to enumerate all threads. 927 */ 928 threads = thread_map__new_str(target->pid, target->tid, target->uid, 929 all_threads); 930 931 if (!threads) 932 return -1; 933 934 if (target__uses_dummy_map(target)) 935 cpus = perf_cpu_map__dummy_new(); 936 else 937 cpus = perf_cpu_map__new(target->cpu_list); 938 939 if (!cpus) 940 goto out_delete_threads; 941 942 evlist->core.has_user_cpus = !!target->cpu_list; 943 944 perf_evlist__set_maps(&evlist->core, cpus, threads); 945 946 return 0; 947 948 out_delete_threads: 949 perf_thread_map__put(threads); 950 return -1; 951 } 952 953 void __perf_evlist__set_sample_bit(struct evlist *evlist, 954 enum perf_event_sample_format bit) 955 { 956 struct evsel *evsel; 957 958 evlist__for_each_entry(evlist, evsel) 959 __evsel__set_sample_bit(evsel, bit); 960 } 961 962 void __perf_evlist__reset_sample_bit(struct evlist *evlist, 963 enum perf_event_sample_format bit) 964 { 965 struct evsel *evsel; 966 967 evlist__for_each_entry(evlist, evsel) 968 __evsel__reset_sample_bit(evsel, bit); 969 } 970 971 int perf_evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel) 972 { 973 struct evsel *evsel; 974 int err = 0; 975 976 evlist__for_each_entry(evlist, evsel) { 977 if (evsel->filter == NULL) 978 continue; 979 980 /* 981 * filters only work for tracepoint event, which doesn't have cpu limit. 982 * So evlist and evsel should always be same. 983 */ 984 err = perf_evsel__apply_filter(&evsel->core, evsel->filter); 985 if (err) { 986 *err_evsel = evsel; 987 break; 988 } 989 } 990 991 return err; 992 } 993 994 int perf_evlist__set_tp_filter(struct evlist *evlist, const char *filter) 995 { 996 struct evsel *evsel; 997 int err = 0; 998 999 if (filter == NULL) 1000 return -1; 1001 1002 evlist__for_each_entry(evlist, evsel) { 1003 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 1004 continue; 1005 1006 err = evsel__set_filter(evsel, filter); 1007 if (err) 1008 break; 1009 } 1010 1011 return err; 1012 } 1013 1014 int perf_evlist__append_tp_filter(struct evlist *evlist, const char *filter) 1015 { 1016 struct evsel *evsel; 1017 int err = 0; 1018 1019 if (filter == NULL) 1020 return -1; 1021 1022 evlist__for_each_entry(evlist, evsel) { 1023 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 1024 continue; 1025 1026 err = evsel__append_tp_filter(evsel, filter); 1027 if (err) 1028 break; 1029 } 1030 1031 return err; 1032 } 1033 1034 char *asprintf__tp_filter_pids(size_t npids, pid_t *pids) 1035 { 1036 char *filter; 1037 size_t i; 1038 1039 for (i = 0; i < npids; ++i) { 1040 if (i == 0) { 1041 if (asprintf(&filter, "common_pid != %d", pids[i]) < 0) 1042 return NULL; 1043 } else { 1044 char *tmp; 1045 1046 if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0) 1047 goto out_free; 1048 1049 free(filter); 1050 filter = tmp; 1051 } 1052 } 1053 1054 return filter; 1055 out_free: 1056 free(filter); 1057 return NULL; 1058 } 1059 1060 int perf_evlist__set_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids) 1061 { 1062 char *filter = asprintf__tp_filter_pids(npids, pids); 1063 int ret = perf_evlist__set_tp_filter(evlist, filter); 1064 1065 free(filter); 1066 return ret; 1067 } 1068 1069 int perf_evlist__set_tp_filter_pid(struct evlist *evlist, pid_t pid) 1070 { 1071 return perf_evlist__set_tp_filter_pids(evlist, 1, &pid); 1072 } 1073 1074 int perf_evlist__append_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids) 1075 { 1076 char *filter = asprintf__tp_filter_pids(npids, pids); 1077 int ret = perf_evlist__append_tp_filter(evlist, filter); 1078 1079 free(filter); 1080 return ret; 1081 } 1082 1083 int perf_evlist__append_tp_filter_pid(struct evlist *evlist, pid_t pid) 1084 { 1085 return perf_evlist__append_tp_filter_pids(evlist, 1, &pid); 1086 } 1087 1088 bool evlist__valid_sample_type(struct evlist *evlist) 1089 { 1090 struct evsel *pos; 1091 1092 if (evlist->core.nr_entries == 1) 1093 return true; 1094 1095 if (evlist->id_pos < 0 || evlist->is_pos < 0) 1096 return false; 1097 1098 evlist__for_each_entry(evlist, pos) { 1099 if (pos->id_pos != evlist->id_pos || 1100 pos->is_pos != evlist->is_pos) 1101 return false; 1102 } 1103 1104 return true; 1105 } 1106 1107 u64 __evlist__combined_sample_type(struct evlist *evlist) 1108 { 1109 struct evsel *evsel; 1110 1111 if (evlist->combined_sample_type) 1112 return evlist->combined_sample_type; 1113 1114 evlist__for_each_entry(evlist, evsel) 1115 evlist->combined_sample_type |= evsel->core.attr.sample_type; 1116 1117 return evlist->combined_sample_type; 1118 } 1119 1120 u64 evlist__combined_sample_type(struct evlist *evlist) 1121 { 1122 evlist->combined_sample_type = 0; 1123 return __evlist__combined_sample_type(evlist); 1124 } 1125 1126 u64 evlist__combined_branch_type(struct evlist *evlist) 1127 { 1128 struct evsel *evsel; 1129 u64 branch_type = 0; 1130 1131 evlist__for_each_entry(evlist, evsel) 1132 branch_type |= evsel->core.attr.branch_sample_type; 1133 return branch_type; 1134 } 1135 1136 bool perf_evlist__valid_read_format(struct evlist *evlist) 1137 { 1138 struct evsel *first = evlist__first(evlist), *pos = first; 1139 u64 read_format = first->core.attr.read_format; 1140 u64 sample_type = first->core.attr.sample_type; 1141 1142 evlist__for_each_entry(evlist, pos) { 1143 if (read_format != pos->core.attr.read_format) { 1144 pr_debug("Read format differs %#" PRIx64 " vs %#" PRIx64 "\n", 1145 read_format, (u64)pos->core.attr.read_format); 1146 } 1147 } 1148 1149 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */ 1150 if ((sample_type & PERF_SAMPLE_READ) && 1151 !(read_format & PERF_FORMAT_ID)) { 1152 return false; 1153 } 1154 1155 return true; 1156 } 1157 1158 u16 perf_evlist__id_hdr_size(struct evlist *evlist) 1159 { 1160 struct evsel *first = evlist__first(evlist); 1161 struct perf_sample *data; 1162 u64 sample_type; 1163 u16 size = 0; 1164 1165 if (!first->core.attr.sample_id_all) 1166 goto out; 1167 1168 sample_type = first->core.attr.sample_type; 1169 1170 if (sample_type & PERF_SAMPLE_TID) 1171 size += sizeof(data->tid) * 2; 1172 1173 if (sample_type & PERF_SAMPLE_TIME) 1174 size += sizeof(data->time); 1175 1176 if (sample_type & PERF_SAMPLE_ID) 1177 size += sizeof(data->id); 1178 1179 if (sample_type & PERF_SAMPLE_STREAM_ID) 1180 size += sizeof(data->stream_id); 1181 1182 if (sample_type & PERF_SAMPLE_CPU) 1183 size += sizeof(data->cpu) * 2; 1184 1185 if (sample_type & PERF_SAMPLE_IDENTIFIER) 1186 size += sizeof(data->id); 1187 out: 1188 return size; 1189 } 1190 1191 bool evlist__valid_sample_id_all(struct evlist *evlist) 1192 { 1193 struct evsel *first = evlist__first(evlist), *pos = first; 1194 1195 evlist__for_each_entry_continue(evlist, pos) { 1196 if (first->core.attr.sample_id_all != pos->core.attr.sample_id_all) 1197 return false; 1198 } 1199 1200 return true; 1201 } 1202 1203 bool evlist__sample_id_all(struct evlist *evlist) 1204 { 1205 struct evsel *first = evlist__first(evlist); 1206 return first->core.attr.sample_id_all; 1207 } 1208 1209 void perf_evlist__set_selected(struct evlist *evlist, 1210 struct evsel *evsel) 1211 { 1212 evlist->selected = evsel; 1213 } 1214 1215 void evlist__close(struct evlist *evlist) 1216 { 1217 struct evsel *evsel; 1218 struct affinity affinity; 1219 int cpu, i; 1220 1221 /* 1222 * With perf record core.cpus is usually NULL. 1223 * Use the old method to handle this for now. 1224 */ 1225 if (!evlist->core.cpus) { 1226 evlist__for_each_entry_reverse(evlist, evsel) 1227 evsel__close(evsel); 1228 return; 1229 } 1230 1231 if (affinity__setup(&affinity) < 0) 1232 return; 1233 evlist__for_each_cpu(evlist, i, cpu) { 1234 affinity__set(&affinity, cpu); 1235 1236 evlist__for_each_entry_reverse(evlist, evsel) { 1237 if (evsel__cpu_iter_skip(evsel, cpu)) 1238 continue; 1239 perf_evsel__close_cpu(&evsel->core, evsel->cpu_iter - 1); 1240 } 1241 } 1242 affinity__cleanup(&affinity); 1243 evlist__for_each_entry_reverse(evlist, evsel) { 1244 perf_evsel__free_fd(&evsel->core); 1245 perf_evsel__free_id(&evsel->core); 1246 } 1247 } 1248 1249 static int perf_evlist__create_syswide_maps(struct evlist *evlist) 1250 { 1251 struct perf_cpu_map *cpus; 1252 struct perf_thread_map *threads; 1253 int err = -ENOMEM; 1254 1255 /* 1256 * Try reading /sys/devices/system/cpu/online to get 1257 * an all cpus map. 1258 * 1259 * FIXME: -ENOMEM is the best we can do here, the cpu_map 1260 * code needs an overhaul to properly forward the 1261 * error, and we may not want to do that fallback to a 1262 * default cpu identity map :-\ 1263 */ 1264 cpus = perf_cpu_map__new(NULL); 1265 if (!cpus) 1266 goto out; 1267 1268 threads = perf_thread_map__new_dummy(); 1269 if (!threads) 1270 goto out_put; 1271 1272 perf_evlist__set_maps(&evlist->core, cpus, threads); 1273 out: 1274 return err; 1275 out_put: 1276 perf_cpu_map__put(cpus); 1277 goto out; 1278 } 1279 1280 int evlist__open(struct evlist *evlist) 1281 { 1282 struct evsel *evsel; 1283 int err; 1284 1285 /* 1286 * Default: one fd per CPU, all threads, aka systemwide 1287 * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL 1288 */ 1289 if (evlist->core.threads == NULL && evlist->core.cpus == NULL) { 1290 err = perf_evlist__create_syswide_maps(evlist); 1291 if (err < 0) 1292 goto out_err; 1293 } 1294 1295 perf_evlist__update_id_pos(evlist); 1296 1297 evlist__for_each_entry(evlist, evsel) { 1298 err = evsel__open(evsel, evsel->core.cpus, evsel->core.threads); 1299 if (err < 0) 1300 goto out_err; 1301 } 1302 1303 return 0; 1304 out_err: 1305 evlist__close(evlist); 1306 errno = -err; 1307 return err; 1308 } 1309 1310 int perf_evlist__prepare_workload(struct evlist *evlist, struct target *target, 1311 const char *argv[], bool pipe_output, 1312 void (*exec_error)(int signo, siginfo_t *info, void *ucontext)) 1313 { 1314 int child_ready_pipe[2], go_pipe[2]; 1315 char bf; 1316 1317 if (pipe(child_ready_pipe) < 0) { 1318 perror("failed to create 'ready' pipe"); 1319 return -1; 1320 } 1321 1322 if (pipe(go_pipe) < 0) { 1323 perror("failed to create 'go' pipe"); 1324 goto out_close_ready_pipe; 1325 } 1326 1327 evlist->workload.pid = fork(); 1328 if (evlist->workload.pid < 0) { 1329 perror("failed to fork"); 1330 goto out_close_pipes; 1331 } 1332 1333 if (!evlist->workload.pid) { 1334 int ret; 1335 1336 if (pipe_output) 1337 dup2(2, 1); 1338 1339 signal(SIGTERM, SIG_DFL); 1340 1341 close(child_ready_pipe[0]); 1342 close(go_pipe[1]); 1343 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC); 1344 1345 /* 1346 * Tell the parent we're ready to go 1347 */ 1348 close(child_ready_pipe[1]); 1349 1350 /* 1351 * Wait until the parent tells us to go. 1352 */ 1353 ret = read(go_pipe[0], &bf, 1); 1354 /* 1355 * The parent will ask for the execvp() to be performed by 1356 * writing exactly one byte, in workload.cork_fd, usually via 1357 * perf_evlist__start_workload(). 1358 * 1359 * For cancelling the workload without actually running it, 1360 * the parent will just close workload.cork_fd, without writing 1361 * anything, i.e. read will return zero and we just exit() 1362 * here. 1363 */ 1364 if (ret != 1) { 1365 if (ret == -1) 1366 perror("unable to read pipe"); 1367 exit(ret); 1368 } 1369 1370 execvp(argv[0], (char **)argv); 1371 1372 if (exec_error) { 1373 union sigval val; 1374 1375 val.sival_int = errno; 1376 if (sigqueue(getppid(), SIGUSR1, val)) 1377 perror(argv[0]); 1378 } else 1379 perror(argv[0]); 1380 exit(-1); 1381 } 1382 1383 if (exec_error) { 1384 struct sigaction act = { 1385 .sa_flags = SA_SIGINFO, 1386 .sa_sigaction = exec_error, 1387 }; 1388 sigaction(SIGUSR1, &act, NULL); 1389 } 1390 1391 if (target__none(target)) { 1392 if (evlist->core.threads == NULL) { 1393 fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n", 1394 __func__, __LINE__); 1395 goto out_close_pipes; 1396 } 1397 perf_thread_map__set_pid(evlist->core.threads, 0, evlist->workload.pid); 1398 } 1399 1400 close(child_ready_pipe[1]); 1401 close(go_pipe[0]); 1402 /* 1403 * wait for child to settle 1404 */ 1405 if (read(child_ready_pipe[0], &bf, 1) == -1) { 1406 perror("unable to read pipe"); 1407 goto out_close_pipes; 1408 } 1409 1410 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC); 1411 evlist->workload.cork_fd = go_pipe[1]; 1412 close(child_ready_pipe[0]); 1413 return 0; 1414 1415 out_close_pipes: 1416 close(go_pipe[0]); 1417 close(go_pipe[1]); 1418 out_close_ready_pipe: 1419 close(child_ready_pipe[0]); 1420 close(child_ready_pipe[1]); 1421 return -1; 1422 } 1423 1424 int perf_evlist__start_workload(struct evlist *evlist) 1425 { 1426 if (evlist->workload.cork_fd > 0) { 1427 char bf = 0; 1428 int ret; 1429 /* 1430 * Remove the cork, let it rip! 1431 */ 1432 ret = write(evlist->workload.cork_fd, &bf, 1); 1433 if (ret < 0) 1434 perror("unable to write to pipe"); 1435 1436 close(evlist->workload.cork_fd); 1437 return ret; 1438 } 1439 1440 return 0; 1441 } 1442 1443 int perf_evlist__parse_sample(struct evlist *evlist, union perf_event *event, 1444 struct perf_sample *sample) 1445 { 1446 struct evsel *evsel = perf_evlist__event2evsel(evlist, event); 1447 1448 if (!evsel) 1449 return -EFAULT; 1450 return evsel__parse_sample(evsel, event, sample); 1451 } 1452 1453 int perf_evlist__parse_sample_timestamp(struct evlist *evlist, 1454 union perf_event *event, 1455 u64 *timestamp) 1456 { 1457 struct evsel *evsel = perf_evlist__event2evsel(evlist, event); 1458 1459 if (!evsel) 1460 return -EFAULT; 1461 return evsel__parse_sample_timestamp(evsel, event, timestamp); 1462 } 1463 1464 int evlist__strerror_open(struct evlist *evlist, int err, char *buf, size_t size) 1465 { 1466 int printed, value; 1467 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf)); 1468 1469 switch (err) { 1470 case EACCES: 1471 case EPERM: 1472 printed = scnprintf(buf, size, 1473 "Error:\t%s.\n" 1474 "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg); 1475 1476 value = perf_event_paranoid(); 1477 1478 printed += scnprintf(buf + printed, size - printed, "\nHint:\t"); 1479 1480 if (value >= 2) { 1481 printed += scnprintf(buf + printed, size - printed, 1482 "For your workloads it needs to be <= 1\nHint:\t"); 1483 } 1484 printed += scnprintf(buf + printed, size - printed, 1485 "For system wide tracing it needs to be set to -1.\n"); 1486 1487 printed += scnprintf(buf + printed, size - printed, 1488 "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n" 1489 "Hint:\tThe current value is %d.", value); 1490 break; 1491 case EINVAL: { 1492 struct evsel *first = evlist__first(evlist); 1493 int max_freq; 1494 1495 if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0) 1496 goto out_default; 1497 1498 if (first->core.attr.sample_freq < (u64)max_freq) 1499 goto out_default; 1500 1501 printed = scnprintf(buf, size, 1502 "Error:\t%s.\n" 1503 "Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n" 1504 "Hint:\tThe current value is %d and %" PRIu64 " is being requested.", 1505 emsg, max_freq, first->core.attr.sample_freq); 1506 break; 1507 } 1508 default: 1509 out_default: 1510 scnprintf(buf, size, "%s", emsg); 1511 break; 1512 } 1513 1514 return 0; 1515 } 1516 1517 int evlist__strerror_mmap(struct evlist *evlist, int err, char *buf, size_t size) 1518 { 1519 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf)); 1520 int pages_attempted = evlist->core.mmap_len / 1024, pages_max_per_user, printed = 0; 1521 1522 switch (err) { 1523 case EPERM: 1524 sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user); 1525 printed += scnprintf(buf + printed, size - printed, 1526 "Error:\t%s.\n" 1527 "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n" 1528 "Hint:\tTried using %zd kB.\n", 1529 emsg, pages_max_per_user, pages_attempted); 1530 1531 if (pages_attempted >= pages_max_per_user) { 1532 printed += scnprintf(buf + printed, size - printed, 1533 "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n", 1534 pages_max_per_user + pages_attempted); 1535 } 1536 1537 printed += scnprintf(buf + printed, size - printed, 1538 "Hint:\tTry using a smaller -m/--mmap-pages value."); 1539 break; 1540 default: 1541 scnprintf(buf, size, "%s", emsg); 1542 break; 1543 } 1544 1545 return 0; 1546 } 1547 1548 void perf_evlist__to_front(struct evlist *evlist, 1549 struct evsel *move_evsel) 1550 { 1551 struct evsel *evsel, *n; 1552 LIST_HEAD(move); 1553 1554 if (move_evsel == evlist__first(evlist)) 1555 return; 1556 1557 evlist__for_each_entry_safe(evlist, n, evsel) { 1558 if (evsel->leader == move_evsel->leader) 1559 list_move_tail(&evsel->core.node, &move); 1560 } 1561 1562 list_splice(&move, &evlist->core.entries); 1563 } 1564 1565 struct evsel *perf_evlist__get_tracking_event(struct evlist *evlist) 1566 { 1567 struct evsel *evsel; 1568 1569 evlist__for_each_entry(evlist, evsel) { 1570 if (evsel->tracking) 1571 return evsel; 1572 } 1573 1574 return evlist__first(evlist); 1575 } 1576 1577 void perf_evlist__set_tracking_event(struct evlist *evlist, 1578 struct evsel *tracking_evsel) 1579 { 1580 struct evsel *evsel; 1581 1582 if (tracking_evsel->tracking) 1583 return; 1584 1585 evlist__for_each_entry(evlist, evsel) { 1586 if (evsel != tracking_evsel) 1587 evsel->tracking = false; 1588 } 1589 1590 tracking_evsel->tracking = true; 1591 } 1592 1593 struct evsel * 1594 perf_evlist__find_evsel_by_str(struct evlist *evlist, 1595 const char *str) 1596 { 1597 struct evsel *evsel; 1598 1599 evlist__for_each_entry(evlist, evsel) { 1600 if (!evsel->name) 1601 continue; 1602 if (strcmp(str, evsel->name) == 0) 1603 return evsel; 1604 } 1605 1606 return NULL; 1607 } 1608 1609 void perf_evlist__toggle_bkw_mmap(struct evlist *evlist, 1610 enum bkw_mmap_state state) 1611 { 1612 enum bkw_mmap_state old_state = evlist->bkw_mmap_state; 1613 enum action { 1614 NONE, 1615 PAUSE, 1616 RESUME, 1617 } action = NONE; 1618 1619 if (!evlist->overwrite_mmap) 1620 return; 1621 1622 switch (old_state) { 1623 case BKW_MMAP_NOTREADY: { 1624 if (state != BKW_MMAP_RUNNING) 1625 goto state_err; 1626 break; 1627 } 1628 case BKW_MMAP_RUNNING: { 1629 if (state != BKW_MMAP_DATA_PENDING) 1630 goto state_err; 1631 action = PAUSE; 1632 break; 1633 } 1634 case BKW_MMAP_DATA_PENDING: { 1635 if (state != BKW_MMAP_EMPTY) 1636 goto state_err; 1637 break; 1638 } 1639 case BKW_MMAP_EMPTY: { 1640 if (state != BKW_MMAP_RUNNING) 1641 goto state_err; 1642 action = RESUME; 1643 break; 1644 } 1645 default: 1646 WARN_ONCE(1, "Shouldn't get there\n"); 1647 } 1648 1649 evlist->bkw_mmap_state = state; 1650 1651 switch (action) { 1652 case PAUSE: 1653 perf_evlist__pause(evlist); 1654 break; 1655 case RESUME: 1656 perf_evlist__resume(evlist); 1657 break; 1658 case NONE: 1659 default: 1660 break; 1661 } 1662 1663 state_err: 1664 return; 1665 } 1666 1667 bool perf_evlist__exclude_kernel(struct evlist *evlist) 1668 { 1669 struct evsel *evsel; 1670 1671 evlist__for_each_entry(evlist, evsel) { 1672 if (!evsel->core.attr.exclude_kernel) 1673 return false; 1674 } 1675 1676 return true; 1677 } 1678 1679 /* 1680 * Events in data file are not collect in groups, but we still want 1681 * the group display. Set the artificial group and set the leader's 1682 * forced_leader flag to notify the display code. 1683 */ 1684 void perf_evlist__force_leader(struct evlist *evlist) 1685 { 1686 if (!evlist->nr_groups) { 1687 struct evsel *leader = evlist__first(evlist); 1688 1689 perf_evlist__set_leader(evlist); 1690 leader->forced_leader = true; 1691 } 1692 } 1693 1694 struct evsel *perf_evlist__reset_weak_group(struct evlist *evsel_list, 1695 struct evsel *evsel, 1696 bool close) 1697 { 1698 struct evsel *c2, *leader; 1699 bool is_open = true; 1700 1701 leader = evsel->leader; 1702 pr_debug("Weak group for %s/%d failed\n", 1703 leader->name, leader->core.nr_members); 1704 1705 /* 1706 * for_each_group_member doesn't work here because it doesn't 1707 * include the first entry. 1708 */ 1709 evlist__for_each_entry(evsel_list, c2) { 1710 if (c2 == evsel) 1711 is_open = false; 1712 if (c2->leader == leader) { 1713 if (is_open && close) 1714 perf_evsel__close(&c2->core); 1715 c2->leader = c2; 1716 c2->core.nr_members = 0; 1717 /* 1718 * Set this for all former members of the group 1719 * to indicate they get reopened. 1720 */ 1721 c2->reset_group = true; 1722 } 1723 } 1724 return leader; 1725 } 1726