1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> 4 * 5 * Parts came from builtin-{top,stat,record}.c, see those files for further 6 * copyright notes. 7 */ 8 #include <api/fs/fs.h> 9 #include <errno.h> 10 #include <inttypes.h> 11 #include <poll.h> 12 #include "cpumap.h" 13 #include "util/mmap.h" 14 #include "thread_map.h" 15 #include "target.h" 16 #include "evlist.h" 17 #include "evsel.h" 18 #include "record.h" 19 #include "debug.h" 20 #include "units.h" 21 #include "bpf_counter.h" 22 #include <internal/lib.h> // page_size 23 #include "affinity.h" 24 #include "../perf.h" 25 #include "asm/bug.h" 26 #include "bpf-event.h" 27 #include "util/event.h" 28 #include "util/string2.h" 29 #include "util/perf_api_probe.h" 30 #include "util/evsel_fprintf.h" 31 #include "util/pmu.h" 32 #include "util/sample.h" 33 #include "util/bpf-filter.h" 34 #include "util/stat.h" 35 #include "util/util.h" 36 #include <signal.h> 37 #include <unistd.h> 38 #include <sched.h> 39 #include <stdlib.h> 40 41 #include "parse-events.h" 42 #include <subcmd/parse-options.h> 43 44 #include <fcntl.h> 45 #include <sys/ioctl.h> 46 #include <sys/mman.h> 47 #include <sys/prctl.h> 48 #include <sys/timerfd.h> 49 50 #include <linux/bitops.h> 51 #include <linux/hash.h> 52 #include <linux/log2.h> 53 #include <linux/err.h> 54 #include <linux/string.h> 55 #include <linux/time64.h> 56 #include <linux/zalloc.h> 57 #include <perf/evlist.h> 58 #include <perf/evsel.h> 59 #include <perf/cpumap.h> 60 #include <perf/mmap.h> 61 62 #include <internal/xyarray.h> 63 64 #ifdef LACKS_SIGQUEUE_PROTOTYPE 65 int sigqueue(pid_t pid, int sig, const union sigval value); 66 #endif 67 68 #define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y)) 69 #define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y) 70 71 void evlist__init(struct evlist *evlist, struct perf_cpu_map *cpus, 72 struct perf_thread_map *threads) 73 { 74 perf_evlist__init(&evlist->core); 75 perf_evlist__set_maps(&evlist->core, cpus, threads); 76 evlist->workload.pid = -1; 77 evlist->bkw_mmap_state = BKW_MMAP_NOTREADY; 78 evlist->ctl_fd.fd = -1; 79 evlist->ctl_fd.ack = -1; 80 evlist->ctl_fd.pos = -1; 81 } 82 83 struct evlist *evlist__new(void) 84 { 85 struct evlist *evlist = zalloc(sizeof(*evlist)); 86 87 if (evlist != NULL) 88 evlist__init(evlist, NULL, NULL); 89 90 return evlist; 91 } 92 93 struct evlist *evlist__new_default(void) 94 { 95 struct evlist *evlist = evlist__new(); 96 bool can_profile_kernel; 97 int err; 98 99 if (!evlist) 100 return NULL; 101 102 can_profile_kernel = perf_event_paranoid_check(1); 103 err = parse_event(evlist, can_profile_kernel ? "cycles:P" : "cycles:Pu"); 104 if (err) { 105 evlist__delete(evlist); 106 return NULL; 107 } 108 109 if (evlist->core.nr_entries > 1) { 110 struct evsel *evsel; 111 112 evlist__for_each_entry(evlist, evsel) 113 evsel__set_sample_id(evsel, /*can_sample_identifier=*/false); 114 } 115 116 return evlist; 117 } 118 119 struct evlist *evlist__new_dummy(void) 120 { 121 struct evlist *evlist = evlist__new(); 122 123 if (evlist && evlist__add_dummy(evlist)) { 124 evlist__delete(evlist); 125 evlist = NULL; 126 } 127 128 return evlist; 129 } 130 131 /** 132 * evlist__set_id_pos - set the positions of event ids. 133 * @evlist: selected event list 134 * 135 * Events with compatible sample types all have the same id_pos 136 * and is_pos. For convenience, put a copy on evlist. 137 */ 138 void evlist__set_id_pos(struct evlist *evlist) 139 { 140 struct evsel *first = evlist__first(evlist); 141 142 evlist->id_pos = first->id_pos; 143 evlist->is_pos = first->is_pos; 144 } 145 146 static void evlist__update_id_pos(struct evlist *evlist) 147 { 148 struct evsel *evsel; 149 150 evlist__for_each_entry(evlist, evsel) 151 evsel__calc_id_pos(evsel); 152 153 evlist__set_id_pos(evlist); 154 } 155 156 static void evlist__purge(struct evlist *evlist) 157 { 158 struct evsel *pos, *n; 159 160 evlist__for_each_entry_safe(evlist, n, pos) { 161 list_del_init(&pos->core.node); 162 pos->evlist = NULL; 163 evsel__delete(pos); 164 } 165 166 evlist->core.nr_entries = 0; 167 } 168 169 void evlist__exit(struct evlist *evlist) 170 { 171 event_enable_timer__exit(&evlist->eet); 172 zfree(&evlist->mmap); 173 zfree(&evlist->overwrite_mmap); 174 perf_evlist__exit(&evlist->core); 175 } 176 177 void evlist__delete(struct evlist *evlist) 178 { 179 if (evlist == NULL) 180 return; 181 182 evlist__free_stats(evlist); 183 evlist__munmap(evlist); 184 evlist__close(evlist); 185 evlist__purge(evlist); 186 evlist__exit(evlist); 187 free(evlist); 188 } 189 190 void evlist__add(struct evlist *evlist, struct evsel *entry) 191 { 192 perf_evlist__add(&evlist->core, &entry->core); 193 entry->evlist = evlist; 194 entry->tracking = !entry->core.idx; 195 196 if (evlist->core.nr_entries == 1) 197 evlist__set_id_pos(evlist); 198 } 199 200 void evlist__remove(struct evlist *evlist, struct evsel *evsel) 201 { 202 evsel->evlist = NULL; 203 perf_evlist__remove(&evlist->core, &evsel->core); 204 } 205 206 void evlist__splice_list_tail(struct evlist *evlist, struct list_head *list) 207 { 208 while (!list_empty(list)) { 209 struct evsel *evsel, *temp, *leader = NULL; 210 211 __evlist__for_each_entry_safe(list, temp, evsel) { 212 list_del_init(&evsel->core.node); 213 evlist__add(evlist, evsel); 214 leader = evsel; 215 break; 216 } 217 218 __evlist__for_each_entry_safe(list, temp, evsel) { 219 if (evsel__has_leader(evsel, leader)) { 220 list_del_init(&evsel->core.node); 221 evlist__add(evlist, evsel); 222 } 223 } 224 } 225 } 226 227 int __evlist__set_tracepoints_handlers(struct evlist *evlist, 228 const struct evsel_str_handler *assocs, size_t nr_assocs) 229 { 230 size_t i; 231 int err; 232 233 for (i = 0; i < nr_assocs; i++) { 234 // Adding a handler for an event not in this evlist, just ignore it. 235 struct evsel *evsel = evlist__find_tracepoint_by_name(evlist, assocs[i].name); 236 if (evsel == NULL) 237 continue; 238 239 err = -EEXIST; 240 if (evsel->handler != NULL) 241 goto out; 242 evsel->handler = assocs[i].handler; 243 } 244 245 err = 0; 246 out: 247 return err; 248 } 249 250 static void evlist__set_leader(struct evlist *evlist) 251 { 252 perf_evlist__set_leader(&evlist->core); 253 } 254 255 static struct evsel *evlist__dummy_event(struct evlist *evlist) 256 { 257 struct perf_event_attr attr = { 258 .type = PERF_TYPE_SOFTWARE, 259 .config = PERF_COUNT_SW_DUMMY, 260 .size = sizeof(attr), /* to capture ABI version */ 261 /* Avoid frequency mode for dummy events to avoid associated timers. */ 262 .freq = 0, 263 .sample_period = 1, 264 }; 265 266 return evsel__new_idx(&attr, evlist->core.nr_entries); 267 } 268 269 int evlist__add_dummy(struct evlist *evlist) 270 { 271 struct evsel *evsel = evlist__dummy_event(evlist); 272 273 if (evsel == NULL) 274 return -ENOMEM; 275 276 evlist__add(evlist, evsel); 277 return 0; 278 } 279 280 struct evsel *evlist__add_aux_dummy(struct evlist *evlist, bool system_wide) 281 { 282 struct evsel *evsel = evlist__dummy_event(evlist); 283 284 if (!evsel) 285 return NULL; 286 287 evsel->core.attr.exclude_kernel = 1; 288 evsel->core.attr.exclude_guest = 1; 289 evsel->core.attr.exclude_hv = 1; 290 evsel->core.system_wide = system_wide; 291 evsel->no_aux_samples = true; 292 evsel->name = strdup("dummy:u"); 293 294 evlist__add(evlist, evsel); 295 return evsel; 296 } 297 298 #ifdef HAVE_LIBTRACEEVENT 299 struct evsel *evlist__add_sched_switch(struct evlist *evlist, bool system_wide) 300 { 301 struct evsel *evsel = evsel__newtp_idx("sched", "sched_switch", 0); 302 303 if (IS_ERR(evsel)) 304 return evsel; 305 306 evsel__set_sample_bit(evsel, CPU); 307 evsel__set_sample_bit(evsel, TIME); 308 309 evsel->core.system_wide = system_wide; 310 evsel->no_aux_samples = true; 311 312 evlist__add(evlist, evsel); 313 return evsel; 314 } 315 #endif 316 317 int evlist__add_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs) 318 { 319 struct evsel *evsel, *n; 320 LIST_HEAD(head); 321 size_t i; 322 323 for (i = 0; i < nr_attrs; i++) { 324 evsel = evsel__new_idx(attrs + i, evlist->core.nr_entries + i); 325 if (evsel == NULL) 326 goto out_delete_partial_list; 327 list_add_tail(&evsel->core.node, &head); 328 } 329 330 evlist__splice_list_tail(evlist, &head); 331 332 return 0; 333 334 out_delete_partial_list: 335 __evlist__for_each_entry_safe(&head, n, evsel) 336 evsel__delete(evsel); 337 return -1; 338 } 339 340 int __evlist__add_default_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs) 341 { 342 size_t i; 343 344 for (i = 0; i < nr_attrs; i++) 345 event_attr_init(attrs + i); 346 347 return evlist__add_attrs(evlist, attrs, nr_attrs); 348 } 349 350 __weak int arch_evlist__add_default_attrs(struct evlist *evlist, 351 struct perf_event_attr *attrs, 352 size_t nr_attrs) 353 { 354 if (!nr_attrs) 355 return 0; 356 357 return __evlist__add_default_attrs(evlist, attrs, nr_attrs); 358 } 359 360 struct evsel *evlist__find_tracepoint_by_id(struct evlist *evlist, int id) 361 { 362 struct evsel *evsel; 363 364 evlist__for_each_entry(evlist, evsel) { 365 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT && 366 (int)evsel->core.attr.config == id) 367 return evsel; 368 } 369 370 return NULL; 371 } 372 373 struct evsel *evlist__find_tracepoint_by_name(struct evlist *evlist, const char *name) 374 { 375 struct evsel *evsel; 376 377 evlist__for_each_entry(evlist, evsel) { 378 if ((evsel->core.attr.type == PERF_TYPE_TRACEPOINT) && 379 (strcmp(evsel->name, name) == 0)) 380 return evsel; 381 } 382 383 return NULL; 384 } 385 386 #ifdef HAVE_LIBTRACEEVENT 387 int evlist__add_newtp(struct evlist *evlist, const char *sys, const char *name, void *handler) 388 { 389 struct evsel *evsel = evsel__newtp(sys, name); 390 391 if (IS_ERR(evsel)) 392 return -1; 393 394 evsel->handler = handler; 395 evlist__add(evlist, evsel); 396 return 0; 397 } 398 #endif 399 400 struct evlist_cpu_iterator evlist__cpu_begin(struct evlist *evlist, struct affinity *affinity) 401 { 402 struct evlist_cpu_iterator itr = { 403 .container = evlist, 404 .evsel = NULL, 405 .cpu_map_idx = 0, 406 .evlist_cpu_map_idx = 0, 407 .evlist_cpu_map_nr = perf_cpu_map__nr(evlist->core.all_cpus), 408 .cpu = (struct perf_cpu){ .cpu = -1}, 409 .affinity = affinity, 410 }; 411 412 if (evlist__empty(evlist)) { 413 /* Ensure the empty list doesn't iterate. */ 414 itr.evlist_cpu_map_idx = itr.evlist_cpu_map_nr; 415 } else { 416 itr.evsel = evlist__first(evlist); 417 if (itr.affinity) { 418 itr.cpu = perf_cpu_map__cpu(evlist->core.all_cpus, 0); 419 affinity__set(itr.affinity, itr.cpu.cpu); 420 itr.cpu_map_idx = perf_cpu_map__idx(itr.evsel->core.cpus, itr.cpu); 421 /* 422 * If this CPU isn't in the evsel's cpu map then advance 423 * through the list. 424 */ 425 if (itr.cpu_map_idx == -1) 426 evlist_cpu_iterator__next(&itr); 427 } 428 } 429 return itr; 430 } 431 432 void evlist_cpu_iterator__next(struct evlist_cpu_iterator *evlist_cpu_itr) 433 { 434 while (evlist_cpu_itr->evsel != evlist__last(evlist_cpu_itr->container)) { 435 evlist_cpu_itr->evsel = evsel__next(evlist_cpu_itr->evsel); 436 evlist_cpu_itr->cpu_map_idx = 437 perf_cpu_map__idx(evlist_cpu_itr->evsel->core.cpus, 438 evlist_cpu_itr->cpu); 439 if (evlist_cpu_itr->cpu_map_idx != -1) 440 return; 441 } 442 evlist_cpu_itr->evlist_cpu_map_idx++; 443 if (evlist_cpu_itr->evlist_cpu_map_idx < evlist_cpu_itr->evlist_cpu_map_nr) { 444 evlist_cpu_itr->evsel = evlist__first(evlist_cpu_itr->container); 445 evlist_cpu_itr->cpu = 446 perf_cpu_map__cpu(evlist_cpu_itr->container->core.all_cpus, 447 evlist_cpu_itr->evlist_cpu_map_idx); 448 if (evlist_cpu_itr->affinity) 449 affinity__set(evlist_cpu_itr->affinity, evlist_cpu_itr->cpu.cpu); 450 evlist_cpu_itr->cpu_map_idx = 451 perf_cpu_map__idx(evlist_cpu_itr->evsel->core.cpus, 452 evlist_cpu_itr->cpu); 453 /* 454 * If this CPU isn't in the evsel's cpu map then advance through 455 * the list. 456 */ 457 if (evlist_cpu_itr->cpu_map_idx == -1) 458 evlist_cpu_iterator__next(evlist_cpu_itr); 459 } 460 } 461 462 bool evlist_cpu_iterator__end(const struct evlist_cpu_iterator *evlist_cpu_itr) 463 { 464 return evlist_cpu_itr->evlist_cpu_map_idx >= evlist_cpu_itr->evlist_cpu_map_nr; 465 } 466 467 static int evsel__strcmp(struct evsel *pos, char *evsel_name) 468 { 469 if (!evsel_name) 470 return 0; 471 if (evsel__is_dummy_event(pos)) 472 return 1; 473 return !evsel__name_is(pos, evsel_name); 474 } 475 476 static int evlist__is_enabled(struct evlist *evlist) 477 { 478 struct evsel *pos; 479 480 evlist__for_each_entry(evlist, pos) { 481 if (!evsel__is_group_leader(pos) || !pos->core.fd) 482 continue; 483 /* If at least one event is enabled, evlist is enabled. */ 484 if (!pos->disabled) 485 return true; 486 } 487 return false; 488 } 489 490 static void __evlist__disable(struct evlist *evlist, char *evsel_name, bool excl_dummy) 491 { 492 struct evsel *pos; 493 struct evlist_cpu_iterator evlist_cpu_itr; 494 struct affinity saved_affinity, *affinity = NULL; 495 bool has_imm = false; 496 497 // See explanation in evlist__close() 498 if (!cpu_map__is_dummy(evlist->core.user_requested_cpus)) { 499 if (affinity__setup(&saved_affinity) < 0) 500 return; 501 affinity = &saved_affinity; 502 } 503 504 /* Disable 'immediate' events last */ 505 for (int imm = 0; imm <= 1; imm++) { 506 evlist__for_each_cpu(evlist_cpu_itr, evlist, affinity) { 507 pos = evlist_cpu_itr.evsel; 508 if (evsel__strcmp(pos, evsel_name)) 509 continue; 510 if (pos->disabled || !evsel__is_group_leader(pos) || !pos->core.fd) 511 continue; 512 if (excl_dummy && evsel__is_dummy_event(pos)) 513 continue; 514 if (pos->immediate) 515 has_imm = true; 516 if (pos->immediate != imm) 517 continue; 518 evsel__disable_cpu(pos, evlist_cpu_itr.cpu_map_idx); 519 } 520 if (!has_imm) 521 break; 522 } 523 524 affinity__cleanup(affinity); 525 evlist__for_each_entry(evlist, pos) { 526 if (evsel__strcmp(pos, evsel_name)) 527 continue; 528 if (!evsel__is_group_leader(pos) || !pos->core.fd) 529 continue; 530 if (excl_dummy && evsel__is_dummy_event(pos)) 531 continue; 532 pos->disabled = true; 533 } 534 535 /* 536 * If we disabled only single event, we need to check 537 * the enabled state of the evlist manually. 538 */ 539 if (evsel_name) 540 evlist->enabled = evlist__is_enabled(evlist); 541 else 542 evlist->enabled = false; 543 } 544 545 void evlist__disable(struct evlist *evlist) 546 { 547 __evlist__disable(evlist, NULL, false); 548 } 549 550 void evlist__disable_non_dummy(struct evlist *evlist) 551 { 552 __evlist__disable(evlist, NULL, true); 553 } 554 555 void evlist__disable_evsel(struct evlist *evlist, char *evsel_name) 556 { 557 __evlist__disable(evlist, evsel_name, false); 558 } 559 560 static void __evlist__enable(struct evlist *evlist, char *evsel_name, bool excl_dummy) 561 { 562 struct evsel *pos; 563 struct evlist_cpu_iterator evlist_cpu_itr; 564 struct affinity saved_affinity, *affinity = NULL; 565 566 // See explanation in evlist__close() 567 if (!cpu_map__is_dummy(evlist->core.user_requested_cpus)) { 568 if (affinity__setup(&saved_affinity) < 0) 569 return; 570 affinity = &saved_affinity; 571 } 572 573 evlist__for_each_cpu(evlist_cpu_itr, evlist, affinity) { 574 pos = evlist_cpu_itr.evsel; 575 if (evsel__strcmp(pos, evsel_name)) 576 continue; 577 if (!evsel__is_group_leader(pos) || !pos->core.fd) 578 continue; 579 if (excl_dummy && evsel__is_dummy_event(pos)) 580 continue; 581 evsel__enable_cpu(pos, evlist_cpu_itr.cpu_map_idx); 582 } 583 affinity__cleanup(affinity); 584 evlist__for_each_entry(evlist, pos) { 585 if (evsel__strcmp(pos, evsel_name)) 586 continue; 587 if (!evsel__is_group_leader(pos) || !pos->core.fd) 588 continue; 589 if (excl_dummy && evsel__is_dummy_event(pos)) 590 continue; 591 pos->disabled = false; 592 } 593 594 /* 595 * Even single event sets the 'enabled' for evlist, 596 * so the toggle can work properly and toggle to 597 * 'disabled' state. 598 */ 599 evlist->enabled = true; 600 } 601 602 void evlist__enable(struct evlist *evlist) 603 { 604 __evlist__enable(evlist, NULL, false); 605 } 606 607 void evlist__enable_non_dummy(struct evlist *evlist) 608 { 609 __evlist__enable(evlist, NULL, true); 610 } 611 612 void evlist__enable_evsel(struct evlist *evlist, char *evsel_name) 613 { 614 __evlist__enable(evlist, evsel_name, false); 615 } 616 617 void evlist__toggle_enable(struct evlist *evlist) 618 { 619 (evlist->enabled ? evlist__disable : evlist__enable)(evlist); 620 } 621 622 int evlist__add_pollfd(struct evlist *evlist, int fd) 623 { 624 return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN, fdarray_flag__default); 625 } 626 627 int evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask) 628 { 629 return perf_evlist__filter_pollfd(&evlist->core, revents_and_mask); 630 } 631 632 #ifdef HAVE_EVENTFD_SUPPORT 633 int evlist__add_wakeup_eventfd(struct evlist *evlist, int fd) 634 { 635 return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN, 636 fdarray_flag__nonfilterable | 637 fdarray_flag__non_perf_event); 638 } 639 #endif 640 641 int evlist__poll(struct evlist *evlist, int timeout) 642 { 643 return perf_evlist__poll(&evlist->core, timeout); 644 } 645 646 struct perf_sample_id *evlist__id2sid(struct evlist *evlist, u64 id) 647 { 648 struct hlist_head *head; 649 struct perf_sample_id *sid; 650 int hash; 651 652 hash = hash_64(id, PERF_EVLIST__HLIST_BITS); 653 head = &evlist->core.heads[hash]; 654 655 hlist_for_each_entry(sid, head, node) 656 if (sid->id == id) 657 return sid; 658 659 return NULL; 660 } 661 662 struct evsel *evlist__id2evsel(struct evlist *evlist, u64 id) 663 { 664 struct perf_sample_id *sid; 665 666 if (evlist->core.nr_entries == 1 || !id) 667 return evlist__first(evlist); 668 669 sid = evlist__id2sid(evlist, id); 670 if (sid) 671 return container_of(sid->evsel, struct evsel, core); 672 673 if (!evlist__sample_id_all(evlist)) 674 return evlist__first(evlist); 675 676 return NULL; 677 } 678 679 struct evsel *evlist__id2evsel_strict(struct evlist *evlist, u64 id) 680 { 681 struct perf_sample_id *sid; 682 683 if (!id) 684 return NULL; 685 686 sid = evlist__id2sid(evlist, id); 687 if (sid) 688 return container_of(sid->evsel, struct evsel, core); 689 690 return NULL; 691 } 692 693 static int evlist__event2id(struct evlist *evlist, union perf_event *event, u64 *id) 694 { 695 const __u64 *array = event->sample.array; 696 ssize_t n; 697 698 n = (event->header.size - sizeof(event->header)) >> 3; 699 700 if (event->header.type == PERF_RECORD_SAMPLE) { 701 if (evlist->id_pos >= n) 702 return -1; 703 *id = array[evlist->id_pos]; 704 } else { 705 if (evlist->is_pos > n) 706 return -1; 707 n -= evlist->is_pos; 708 *id = array[n]; 709 } 710 return 0; 711 } 712 713 struct evsel *evlist__event2evsel(struct evlist *evlist, union perf_event *event) 714 { 715 struct evsel *first = evlist__first(evlist); 716 struct hlist_head *head; 717 struct perf_sample_id *sid; 718 int hash; 719 u64 id; 720 721 if (evlist->core.nr_entries == 1) 722 return first; 723 724 if (!first->core.attr.sample_id_all && 725 event->header.type != PERF_RECORD_SAMPLE) 726 return first; 727 728 if (evlist__event2id(evlist, event, &id)) 729 return NULL; 730 731 /* Synthesized events have an id of zero */ 732 if (!id) 733 return first; 734 735 hash = hash_64(id, PERF_EVLIST__HLIST_BITS); 736 head = &evlist->core.heads[hash]; 737 738 hlist_for_each_entry(sid, head, node) { 739 if (sid->id == id) 740 return container_of(sid->evsel, struct evsel, core); 741 } 742 return NULL; 743 } 744 745 static int evlist__set_paused(struct evlist *evlist, bool value) 746 { 747 int i; 748 749 if (!evlist->overwrite_mmap) 750 return 0; 751 752 for (i = 0; i < evlist->core.nr_mmaps; i++) { 753 int fd = evlist->overwrite_mmap[i].core.fd; 754 int err; 755 756 if (fd < 0) 757 continue; 758 err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0); 759 if (err) 760 return err; 761 } 762 return 0; 763 } 764 765 static int evlist__pause(struct evlist *evlist) 766 { 767 return evlist__set_paused(evlist, true); 768 } 769 770 static int evlist__resume(struct evlist *evlist) 771 { 772 return evlist__set_paused(evlist, false); 773 } 774 775 static void evlist__munmap_nofree(struct evlist *evlist) 776 { 777 int i; 778 779 if (evlist->mmap) 780 for (i = 0; i < evlist->core.nr_mmaps; i++) 781 perf_mmap__munmap(&evlist->mmap[i].core); 782 783 if (evlist->overwrite_mmap) 784 for (i = 0; i < evlist->core.nr_mmaps; i++) 785 perf_mmap__munmap(&evlist->overwrite_mmap[i].core); 786 } 787 788 void evlist__munmap(struct evlist *evlist) 789 { 790 evlist__munmap_nofree(evlist); 791 zfree(&evlist->mmap); 792 zfree(&evlist->overwrite_mmap); 793 } 794 795 static void perf_mmap__unmap_cb(struct perf_mmap *map) 796 { 797 struct mmap *m = container_of(map, struct mmap, core); 798 799 mmap__munmap(m); 800 } 801 802 static struct mmap *evlist__alloc_mmap(struct evlist *evlist, 803 bool overwrite) 804 { 805 int i; 806 struct mmap *map; 807 808 map = zalloc(evlist->core.nr_mmaps * sizeof(struct mmap)); 809 if (!map) 810 return NULL; 811 812 for (i = 0; i < evlist->core.nr_mmaps; i++) { 813 struct perf_mmap *prev = i ? &map[i - 1].core : NULL; 814 815 /* 816 * When the perf_mmap() call is made we grab one refcount, plus 817 * one extra to let perf_mmap__consume() get the last 818 * events after all real references (perf_mmap__get()) are 819 * dropped. 820 * 821 * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and 822 * thus does perf_mmap__get() on it. 823 */ 824 perf_mmap__init(&map[i].core, prev, overwrite, perf_mmap__unmap_cb); 825 } 826 827 return map; 828 } 829 830 static void 831 perf_evlist__mmap_cb_idx(struct perf_evlist *_evlist, 832 struct perf_evsel *_evsel, 833 struct perf_mmap_param *_mp, 834 int idx) 835 { 836 struct evlist *evlist = container_of(_evlist, struct evlist, core); 837 struct mmap_params *mp = container_of(_mp, struct mmap_params, core); 838 struct evsel *evsel = container_of(_evsel, struct evsel, core); 839 840 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, evsel, idx); 841 } 842 843 static struct perf_mmap* 844 perf_evlist__mmap_cb_get(struct perf_evlist *_evlist, bool overwrite, int idx) 845 { 846 struct evlist *evlist = container_of(_evlist, struct evlist, core); 847 struct mmap *maps; 848 849 maps = overwrite ? evlist->overwrite_mmap : evlist->mmap; 850 851 if (!maps) { 852 maps = evlist__alloc_mmap(evlist, overwrite); 853 if (!maps) 854 return NULL; 855 856 if (overwrite) { 857 evlist->overwrite_mmap = maps; 858 if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY) 859 evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING); 860 } else { 861 evlist->mmap = maps; 862 } 863 } 864 865 return &maps[idx].core; 866 } 867 868 static int 869 perf_evlist__mmap_cb_mmap(struct perf_mmap *_map, struct perf_mmap_param *_mp, 870 int output, struct perf_cpu cpu) 871 { 872 struct mmap *map = container_of(_map, struct mmap, core); 873 struct mmap_params *mp = container_of(_mp, struct mmap_params, core); 874 875 return mmap__mmap(map, mp, output, cpu); 876 } 877 878 unsigned long perf_event_mlock_kb_in_pages(void) 879 { 880 unsigned long pages; 881 int max; 882 883 if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) { 884 /* 885 * Pick a once upon a time good value, i.e. things look 886 * strange since we can't read a sysctl value, but lets not 887 * die yet... 888 */ 889 max = 512; 890 } else { 891 max -= (page_size / 1024); 892 } 893 894 pages = (max * 1024) / page_size; 895 if (!is_power_of_2(pages)) 896 pages = rounddown_pow_of_two(pages); 897 898 return pages; 899 } 900 901 size_t evlist__mmap_size(unsigned long pages) 902 { 903 if (pages == UINT_MAX) 904 pages = perf_event_mlock_kb_in_pages(); 905 else if (!is_power_of_2(pages)) 906 return 0; 907 908 return (pages + 1) * page_size; 909 } 910 911 static long parse_pages_arg(const char *str, unsigned long min, 912 unsigned long max) 913 { 914 unsigned long pages, val; 915 static struct parse_tag tags[] = { 916 { .tag = 'B', .mult = 1 }, 917 { .tag = 'K', .mult = 1 << 10 }, 918 { .tag = 'M', .mult = 1 << 20 }, 919 { .tag = 'G', .mult = 1 << 30 }, 920 { .tag = 0 }, 921 }; 922 923 if (str == NULL) 924 return -EINVAL; 925 926 val = parse_tag_value(str, tags); 927 if (val != (unsigned long) -1) { 928 /* we got file size value */ 929 pages = PERF_ALIGN(val, page_size) / page_size; 930 } else { 931 /* we got pages count value */ 932 char *eptr; 933 pages = strtoul(str, &eptr, 10); 934 if (*eptr != '\0') 935 return -EINVAL; 936 } 937 938 if (pages == 0 && min == 0) { 939 /* leave number of pages at 0 */ 940 } else if (!is_power_of_2(pages)) { 941 char buf[100]; 942 943 /* round pages up to next power of 2 */ 944 pages = roundup_pow_of_two(pages); 945 if (!pages) 946 return -EINVAL; 947 948 unit_number__scnprintf(buf, sizeof(buf), pages * page_size); 949 pr_info("rounding mmap pages size to %s (%lu pages)\n", 950 buf, pages); 951 } 952 953 if (pages > max) 954 return -EINVAL; 955 956 return pages; 957 } 958 959 int __evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str) 960 { 961 unsigned long max = UINT_MAX; 962 long pages; 963 964 if (max > SIZE_MAX / page_size) 965 max = SIZE_MAX / page_size; 966 967 pages = parse_pages_arg(str, 1, max); 968 if (pages < 0) { 969 pr_err("Invalid argument for --mmap_pages/-m\n"); 970 return -1; 971 } 972 973 *mmap_pages = pages; 974 return 0; 975 } 976 977 int evlist__parse_mmap_pages(const struct option *opt, const char *str, int unset __maybe_unused) 978 { 979 return __evlist__parse_mmap_pages(opt->value, str); 980 } 981 982 /** 983 * evlist__mmap_ex - Create mmaps to receive events. 984 * @evlist: list of events 985 * @pages: map length in pages 986 * @overwrite: overwrite older events? 987 * @auxtrace_pages - auxtrace map length in pages 988 * @auxtrace_overwrite - overwrite older auxtrace data? 989 * 990 * If @overwrite is %false the user needs to signal event consumption using 991 * perf_mmap__write_tail(). Using evlist__mmap_read() does this 992 * automatically. 993 * 994 * Similarly, if @auxtrace_overwrite is %false the user needs to signal data 995 * consumption using auxtrace_mmap__write_tail(). 996 * 997 * Return: %0 on success, negative error code otherwise. 998 */ 999 int evlist__mmap_ex(struct evlist *evlist, unsigned int pages, 1000 unsigned int auxtrace_pages, 1001 bool auxtrace_overwrite, int nr_cblocks, int affinity, int flush, 1002 int comp_level) 1003 { 1004 /* 1005 * Delay setting mp.prot: set it before calling perf_mmap__mmap. 1006 * Its value is decided by evsel's write_backward. 1007 * So &mp should not be passed through const pointer. 1008 */ 1009 struct mmap_params mp = { 1010 .nr_cblocks = nr_cblocks, 1011 .affinity = affinity, 1012 .flush = flush, 1013 .comp_level = comp_level 1014 }; 1015 struct perf_evlist_mmap_ops ops = { 1016 .idx = perf_evlist__mmap_cb_idx, 1017 .get = perf_evlist__mmap_cb_get, 1018 .mmap = perf_evlist__mmap_cb_mmap, 1019 }; 1020 1021 evlist->core.mmap_len = evlist__mmap_size(pages); 1022 pr_debug("mmap size %zuB\n", evlist->core.mmap_len); 1023 1024 auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->core.mmap_len, 1025 auxtrace_pages, auxtrace_overwrite); 1026 1027 return perf_evlist__mmap_ops(&evlist->core, &ops, &mp.core); 1028 } 1029 1030 int evlist__mmap(struct evlist *evlist, unsigned int pages) 1031 { 1032 return evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS, 1, 0); 1033 } 1034 1035 int evlist__create_maps(struct evlist *evlist, struct target *target) 1036 { 1037 bool all_threads = (target->per_thread && target->system_wide); 1038 struct perf_cpu_map *cpus; 1039 struct perf_thread_map *threads; 1040 1041 /* 1042 * If specify '-a' and '--per-thread' to perf record, perf record 1043 * will override '--per-thread'. target->per_thread = false and 1044 * target->system_wide = true. 1045 * 1046 * If specify '--per-thread' only to perf record, 1047 * target->per_thread = true and target->system_wide = false. 1048 * 1049 * So target->per_thread && target->system_wide is false. 1050 * For perf record, thread_map__new_str doesn't call 1051 * thread_map__new_all_cpus. That will keep perf record's 1052 * current behavior. 1053 * 1054 * For perf stat, it allows the case that target->per_thread and 1055 * target->system_wide are all true. It means to collect system-wide 1056 * per-thread data. thread_map__new_str will call 1057 * thread_map__new_all_cpus to enumerate all threads. 1058 */ 1059 threads = thread_map__new_str(target->pid, target->tid, target->uid, 1060 all_threads); 1061 1062 if (!threads) 1063 return -1; 1064 1065 if (target__uses_dummy_map(target)) 1066 cpus = perf_cpu_map__dummy_new(); 1067 else 1068 cpus = perf_cpu_map__new(target->cpu_list); 1069 1070 if (!cpus) 1071 goto out_delete_threads; 1072 1073 evlist->core.has_user_cpus = !!target->cpu_list; 1074 1075 perf_evlist__set_maps(&evlist->core, cpus, threads); 1076 1077 /* as evlist now has references, put count here */ 1078 perf_cpu_map__put(cpus); 1079 perf_thread_map__put(threads); 1080 1081 return 0; 1082 1083 out_delete_threads: 1084 perf_thread_map__put(threads); 1085 return -1; 1086 } 1087 1088 int evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel) 1089 { 1090 struct evsel *evsel; 1091 int err = 0; 1092 1093 evlist__for_each_entry(evlist, evsel) { 1094 /* 1095 * filters only work for tracepoint event, which doesn't have cpu limit. 1096 * So evlist and evsel should always be same. 1097 */ 1098 if (evsel->filter) { 1099 err = perf_evsel__apply_filter(&evsel->core, evsel->filter); 1100 if (err) { 1101 *err_evsel = evsel; 1102 break; 1103 } 1104 } 1105 1106 /* 1107 * non-tracepoint events can have BPF filters. 1108 */ 1109 if (!list_empty(&evsel->bpf_filters)) { 1110 err = perf_bpf_filter__prepare(evsel); 1111 if (err) { 1112 *err_evsel = evsel; 1113 break; 1114 } 1115 } 1116 } 1117 1118 return err; 1119 } 1120 1121 int evlist__set_tp_filter(struct evlist *evlist, const char *filter) 1122 { 1123 struct evsel *evsel; 1124 int err = 0; 1125 1126 if (filter == NULL) 1127 return -1; 1128 1129 evlist__for_each_entry(evlist, evsel) { 1130 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 1131 continue; 1132 1133 err = evsel__set_filter(evsel, filter); 1134 if (err) 1135 break; 1136 } 1137 1138 return err; 1139 } 1140 1141 int evlist__append_tp_filter(struct evlist *evlist, const char *filter) 1142 { 1143 struct evsel *evsel; 1144 int err = 0; 1145 1146 if (filter == NULL) 1147 return -1; 1148 1149 evlist__for_each_entry(evlist, evsel) { 1150 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 1151 continue; 1152 1153 err = evsel__append_tp_filter(evsel, filter); 1154 if (err) 1155 break; 1156 } 1157 1158 return err; 1159 } 1160 1161 char *asprintf__tp_filter_pids(size_t npids, pid_t *pids) 1162 { 1163 char *filter; 1164 size_t i; 1165 1166 for (i = 0; i < npids; ++i) { 1167 if (i == 0) { 1168 if (asprintf(&filter, "common_pid != %d", pids[i]) < 0) 1169 return NULL; 1170 } else { 1171 char *tmp; 1172 1173 if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0) 1174 goto out_free; 1175 1176 free(filter); 1177 filter = tmp; 1178 } 1179 } 1180 1181 return filter; 1182 out_free: 1183 free(filter); 1184 return NULL; 1185 } 1186 1187 int evlist__set_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids) 1188 { 1189 char *filter = asprintf__tp_filter_pids(npids, pids); 1190 int ret = evlist__set_tp_filter(evlist, filter); 1191 1192 free(filter); 1193 return ret; 1194 } 1195 1196 int evlist__set_tp_filter_pid(struct evlist *evlist, pid_t pid) 1197 { 1198 return evlist__set_tp_filter_pids(evlist, 1, &pid); 1199 } 1200 1201 int evlist__append_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids) 1202 { 1203 char *filter = asprintf__tp_filter_pids(npids, pids); 1204 int ret = evlist__append_tp_filter(evlist, filter); 1205 1206 free(filter); 1207 return ret; 1208 } 1209 1210 int evlist__append_tp_filter_pid(struct evlist *evlist, pid_t pid) 1211 { 1212 return evlist__append_tp_filter_pids(evlist, 1, &pid); 1213 } 1214 1215 bool evlist__valid_sample_type(struct evlist *evlist) 1216 { 1217 struct evsel *pos; 1218 1219 if (evlist->core.nr_entries == 1) 1220 return true; 1221 1222 if (evlist->id_pos < 0 || evlist->is_pos < 0) 1223 return false; 1224 1225 evlist__for_each_entry(evlist, pos) { 1226 if (pos->id_pos != evlist->id_pos || 1227 pos->is_pos != evlist->is_pos) 1228 return false; 1229 } 1230 1231 return true; 1232 } 1233 1234 u64 __evlist__combined_sample_type(struct evlist *evlist) 1235 { 1236 struct evsel *evsel; 1237 1238 if (evlist->combined_sample_type) 1239 return evlist->combined_sample_type; 1240 1241 evlist__for_each_entry(evlist, evsel) 1242 evlist->combined_sample_type |= evsel->core.attr.sample_type; 1243 1244 return evlist->combined_sample_type; 1245 } 1246 1247 u64 evlist__combined_sample_type(struct evlist *evlist) 1248 { 1249 evlist->combined_sample_type = 0; 1250 return __evlist__combined_sample_type(evlist); 1251 } 1252 1253 u64 evlist__combined_branch_type(struct evlist *evlist) 1254 { 1255 struct evsel *evsel; 1256 u64 branch_type = 0; 1257 1258 evlist__for_each_entry(evlist, evsel) 1259 branch_type |= evsel->core.attr.branch_sample_type; 1260 return branch_type; 1261 } 1262 1263 bool evlist__valid_read_format(struct evlist *evlist) 1264 { 1265 struct evsel *first = evlist__first(evlist), *pos = first; 1266 u64 read_format = first->core.attr.read_format; 1267 u64 sample_type = first->core.attr.sample_type; 1268 1269 evlist__for_each_entry(evlist, pos) { 1270 if (read_format != pos->core.attr.read_format) { 1271 pr_debug("Read format differs %#" PRIx64 " vs %#" PRIx64 "\n", 1272 read_format, (u64)pos->core.attr.read_format); 1273 } 1274 } 1275 1276 /* PERF_SAMPLE_READ implies PERF_FORMAT_ID. */ 1277 if ((sample_type & PERF_SAMPLE_READ) && 1278 !(read_format & PERF_FORMAT_ID)) { 1279 return false; 1280 } 1281 1282 return true; 1283 } 1284 1285 u16 evlist__id_hdr_size(struct evlist *evlist) 1286 { 1287 struct evsel *first = evlist__first(evlist); 1288 1289 return first->core.attr.sample_id_all ? evsel__id_hdr_size(first) : 0; 1290 } 1291 1292 bool evlist__valid_sample_id_all(struct evlist *evlist) 1293 { 1294 struct evsel *first = evlist__first(evlist), *pos = first; 1295 1296 evlist__for_each_entry_continue(evlist, pos) { 1297 if (first->core.attr.sample_id_all != pos->core.attr.sample_id_all) 1298 return false; 1299 } 1300 1301 return true; 1302 } 1303 1304 bool evlist__sample_id_all(struct evlist *evlist) 1305 { 1306 struct evsel *first = evlist__first(evlist); 1307 return first->core.attr.sample_id_all; 1308 } 1309 1310 void evlist__set_selected(struct evlist *evlist, struct evsel *evsel) 1311 { 1312 evlist->selected = evsel; 1313 } 1314 1315 void evlist__close(struct evlist *evlist) 1316 { 1317 struct evsel *evsel; 1318 struct evlist_cpu_iterator evlist_cpu_itr; 1319 struct affinity affinity; 1320 1321 /* 1322 * With perf record core.user_requested_cpus is usually NULL. 1323 * Use the old method to handle this for now. 1324 */ 1325 if (!evlist->core.user_requested_cpus || 1326 cpu_map__is_dummy(evlist->core.user_requested_cpus)) { 1327 evlist__for_each_entry_reverse(evlist, evsel) 1328 evsel__close(evsel); 1329 return; 1330 } 1331 1332 if (affinity__setup(&affinity) < 0) 1333 return; 1334 1335 evlist__for_each_cpu(evlist_cpu_itr, evlist, &affinity) { 1336 perf_evsel__close_cpu(&evlist_cpu_itr.evsel->core, 1337 evlist_cpu_itr.cpu_map_idx); 1338 } 1339 1340 affinity__cleanup(&affinity); 1341 evlist__for_each_entry_reverse(evlist, evsel) { 1342 perf_evsel__free_fd(&evsel->core); 1343 perf_evsel__free_id(&evsel->core); 1344 } 1345 perf_evlist__reset_id_hash(&evlist->core); 1346 } 1347 1348 static int evlist__create_syswide_maps(struct evlist *evlist) 1349 { 1350 struct perf_cpu_map *cpus; 1351 struct perf_thread_map *threads; 1352 1353 /* 1354 * Try reading /sys/devices/system/cpu/online to get 1355 * an all cpus map. 1356 * 1357 * FIXME: -ENOMEM is the best we can do here, the cpu_map 1358 * code needs an overhaul to properly forward the 1359 * error, and we may not want to do that fallback to a 1360 * default cpu identity map :-\ 1361 */ 1362 cpus = perf_cpu_map__new(NULL); 1363 if (!cpus) 1364 goto out; 1365 1366 threads = perf_thread_map__new_dummy(); 1367 if (!threads) 1368 goto out_put; 1369 1370 perf_evlist__set_maps(&evlist->core, cpus, threads); 1371 1372 perf_thread_map__put(threads); 1373 out_put: 1374 perf_cpu_map__put(cpus); 1375 out: 1376 return -ENOMEM; 1377 } 1378 1379 int evlist__open(struct evlist *evlist) 1380 { 1381 struct evsel *evsel; 1382 int err; 1383 1384 /* 1385 * Default: one fd per CPU, all threads, aka systemwide 1386 * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL 1387 */ 1388 if (evlist->core.threads == NULL && evlist->core.user_requested_cpus == NULL) { 1389 err = evlist__create_syswide_maps(evlist); 1390 if (err < 0) 1391 goto out_err; 1392 } 1393 1394 evlist__update_id_pos(evlist); 1395 1396 evlist__for_each_entry(evlist, evsel) { 1397 err = evsel__open(evsel, evsel->core.cpus, evsel->core.threads); 1398 if (err < 0) 1399 goto out_err; 1400 } 1401 1402 return 0; 1403 out_err: 1404 evlist__close(evlist); 1405 errno = -err; 1406 return err; 1407 } 1408 1409 int evlist__prepare_workload(struct evlist *evlist, struct target *target, const char *argv[], 1410 bool pipe_output, void (*exec_error)(int signo, siginfo_t *info, void *ucontext)) 1411 { 1412 int child_ready_pipe[2], go_pipe[2]; 1413 char bf; 1414 1415 if (pipe(child_ready_pipe) < 0) { 1416 perror("failed to create 'ready' pipe"); 1417 return -1; 1418 } 1419 1420 if (pipe(go_pipe) < 0) { 1421 perror("failed to create 'go' pipe"); 1422 goto out_close_ready_pipe; 1423 } 1424 1425 evlist->workload.pid = fork(); 1426 if (evlist->workload.pid < 0) { 1427 perror("failed to fork"); 1428 goto out_close_pipes; 1429 } 1430 1431 if (!evlist->workload.pid) { 1432 int ret; 1433 1434 if (pipe_output) 1435 dup2(2, 1); 1436 1437 signal(SIGTERM, SIG_DFL); 1438 1439 close(child_ready_pipe[0]); 1440 close(go_pipe[1]); 1441 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC); 1442 1443 /* 1444 * Change the name of this process not to confuse --exclude-perf users 1445 * that sees 'perf' in the window up to the execvp() and thinks that 1446 * perf samples are not being excluded. 1447 */ 1448 prctl(PR_SET_NAME, "perf-exec"); 1449 1450 /* 1451 * Tell the parent we're ready to go 1452 */ 1453 close(child_ready_pipe[1]); 1454 1455 /* 1456 * Wait until the parent tells us to go. 1457 */ 1458 ret = read(go_pipe[0], &bf, 1); 1459 /* 1460 * The parent will ask for the execvp() to be performed by 1461 * writing exactly one byte, in workload.cork_fd, usually via 1462 * evlist__start_workload(). 1463 * 1464 * For cancelling the workload without actually running it, 1465 * the parent will just close workload.cork_fd, without writing 1466 * anything, i.e. read will return zero and we just exit() 1467 * here. 1468 */ 1469 if (ret != 1) { 1470 if (ret == -1) 1471 perror("unable to read pipe"); 1472 exit(ret); 1473 } 1474 1475 execvp(argv[0], (char **)argv); 1476 1477 if (exec_error) { 1478 union sigval val; 1479 1480 val.sival_int = errno; 1481 if (sigqueue(getppid(), SIGUSR1, val)) 1482 perror(argv[0]); 1483 } else 1484 perror(argv[0]); 1485 exit(-1); 1486 } 1487 1488 if (exec_error) { 1489 struct sigaction act = { 1490 .sa_flags = SA_SIGINFO, 1491 .sa_sigaction = exec_error, 1492 }; 1493 sigaction(SIGUSR1, &act, NULL); 1494 } 1495 1496 if (target__none(target)) { 1497 if (evlist->core.threads == NULL) { 1498 fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n", 1499 __func__, __LINE__); 1500 goto out_close_pipes; 1501 } 1502 perf_thread_map__set_pid(evlist->core.threads, 0, evlist->workload.pid); 1503 } 1504 1505 close(child_ready_pipe[1]); 1506 close(go_pipe[0]); 1507 /* 1508 * wait for child to settle 1509 */ 1510 if (read(child_ready_pipe[0], &bf, 1) == -1) { 1511 perror("unable to read pipe"); 1512 goto out_close_pipes; 1513 } 1514 1515 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC); 1516 evlist->workload.cork_fd = go_pipe[1]; 1517 close(child_ready_pipe[0]); 1518 return 0; 1519 1520 out_close_pipes: 1521 close(go_pipe[0]); 1522 close(go_pipe[1]); 1523 out_close_ready_pipe: 1524 close(child_ready_pipe[0]); 1525 close(child_ready_pipe[1]); 1526 return -1; 1527 } 1528 1529 int evlist__start_workload(struct evlist *evlist) 1530 { 1531 if (evlist->workload.cork_fd > 0) { 1532 char bf = 0; 1533 int ret; 1534 /* 1535 * Remove the cork, let it rip! 1536 */ 1537 ret = write(evlist->workload.cork_fd, &bf, 1); 1538 if (ret < 0) 1539 perror("unable to write to pipe"); 1540 1541 close(evlist->workload.cork_fd); 1542 return ret; 1543 } 1544 1545 return 0; 1546 } 1547 1548 int evlist__parse_sample(struct evlist *evlist, union perf_event *event, struct perf_sample *sample) 1549 { 1550 struct evsel *evsel = evlist__event2evsel(evlist, event); 1551 int ret; 1552 1553 if (!evsel) 1554 return -EFAULT; 1555 ret = evsel__parse_sample(evsel, event, sample); 1556 if (ret) 1557 return ret; 1558 if (perf_guest && sample->id) { 1559 struct perf_sample_id *sid = evlist__id2sid(evlist, sample->id); 1560 1561 if (sid) { 1562 sample->machine_pid = sid->machine_pid; 1563 sample->vcpu = sid->vcpu.cpu; 1564 } 1565 } 1566 return 0; 1567 } 1568 1569 int evlist__parse_sample_timestamp(struct evlist *evlist, union perf_event *event, u64 *timestamp) 1570 { 1571 struct evsel *evsel = evlist__event2evsel(evlist, event); 1572 1573 if (!evsel) 1574 return -EFAULT; 1575 return evsel__parse_sample_timestamp(evsel, event, timestamp); 1576 } 1577 1578 int evlist__strerror_open(struct evlist *evlist, int err, char *buf, size_t size) 1579 { 1580 int printed, value; 1581 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf)); 1582 1583 switch (err) { 1584 case EACCES: 1585 case EPERM: 1586 printed = scnprintf(buf, size, 1587 "Error:\t%s.\n" 1588 "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg); 1589 1590 value = perf_event_paranoid(); 1591 1592 printed += scnprintf(buf + printed, size - printed, "\nHint:\t"); 1593 1594 if (value >= 2) { 1595 printed += scnprintf(buf + printed, size - printed, 1596 "For your workloads it needs to be <= 1\nHint:\t"); 1597 } 1598 printed += scnprintf(buf + printed, size - printed, 1599 "For system wide tracing it needs to be set to -1.\n"); 1600 1601 printed += scnprintf(buf + printed, size - printed, 1602 "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n" 1603 "Hint:\tThe current value is %d.", value); 1604 break; 1605 case EINVAL: { 1606 struct evsel *first = evlist__first(evlist); 1607 int max_freq; 1608 1609 if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0) 1610 goto out_default; 1611 1612 if (first->core.attr.sample_freq < (u64)max_freq) 1613 goto out_default; 1614 1615 printed = scnprintf(buf, size, 1616 "Error:\t%s.\n" 1617 "Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n" 1618 "Hint:\tThe current value is %d and %" PRIu64 " is being requested.", 1619 emsg, max_freq, first->core.attr.sample_freq); 1620 break; 1621 } 1622 default: 1623 out_default: 1624 scnprintf(buf, size, "%s", emsg); 1625 break; 1626 } 1627 1628 return 0; 1629 } 1630 1631 int evlist__strerror_mmap(struct evlist *evlist, int err, char *buf, size_t size) 1632 { 1633 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf)); 1634 int pages_attempted = evlist->core.mmap_len / 1024, pages_max_per_user, printed = 0; 1635 1636 switch (err) { 1637 case EPERM: 1638 sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user); 1639 printed += scnprintf(buf + printed, size - printed, 1640 "Error:\t%s.\n" 1641 "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n" 1642 "Hint:\tTried using %zd kB.\n", 1643 emsg, pages_max_per_user, pages_attempted); 1644 1645 if (pages_attempted >= pages_max_per_user) { 1646 printed += scnprintf(buf + printed, size - printed, 1647 "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n", 1648 pages_max_per_user + pages_attempted); 1649 } 1650 1651 printed += scnprintf(buf + printed, size - printed, 1652 "Hint:\tTry using a smaller -m/--mmap-pages value."); 1653 break; 1654 default: 1655 scnprintf(buf, size, "%s", emsg); 1656 break; 1657 } 1658 1659 return 0; 1660 } 1661 1662 void evlist__to_front(struct evlist *evlist, struct evsel *move_evsel) 1663 { 1664 struct evsel *evsel, *n; 1665 LIST_HEAD(move); 1666 1667 if (move_evsel == evlist__first(evlist)) 1668 return; 1669 1670 evlist__for_each_entry_safe(evlist, n, evsel) { 1671 if (evsel__leader(evsel) == evsel__leader(move_evsel)) 1672 list_move_tail(&evsel->core.node, &move); 1673 } 1674 1675 list_splice(&move, &evlist->core.entries); 1676 } 1677 1678 struct evsel *evlist__get_tracking_event(struct evlist *evlist) 1679 { 1680 struct evsel *evsel; 1681 1682 evlist__for_each_entry(evlist, evsel) { 1683 if (evsel->tracking) 1684 return evsel; 1685 } 1686 1687 return evlist__first(evlist); 1688 } 1689 1690 void evlist__set_tracking_event(struct evlist *evlist, struct evsel *tracking_evsel) 1691 { 1692 struct evsel *evsel; 1693 1694 if (tracking_evsel->tracking) 1695 return; 1696 1697 evlist__for_each_entry(evlist, evsel) { 1698 if (evsel != tracking_evsel) 1699 evsel->tracking = false; 1700 } 1701 1702 tracking_evsel->tracking = true; 1703 } 1704 1705 struct evsel *evlist__find_evsel_by_str(struct evlist *evlist, const char *str) 1706 { 1707 struct evsel *evsel; 1708 1709 evlist__for_each_entry(evlist, evsel) { 1710 if (!evsel->name) 1711 continue; 1712 if (evsel__name_is(evsel, str)) 1713 return evsel; 1714 } 1715 1716 return NULL; 1717 } 1718 1719 void evlist__toggle_bkw_mmap(struct evlist *evlist, enum bkw_mmap_state state) 1720 { 1721 enum bkw_mmap_state old_state = evlist->bkw_mmap_state; 1722 enum action { 1723 NONE, 1724 PAUSE, 1725 RESUME, 1726 } action = NONE; 1727 1728 if (!evlist->overwrite_mmap) 1729 return; 1730 1731 switch (old_state) { 1732 case BKW_MMAP_NOTREADY: { 1733 if (state != BKW_MMAP_RUNNING) 1734 goto state_err; 1735 break; 1736 } 1737 case BKW_MMAP_RUNNING: { 1738 if (state != BKW_MMAP_DATA_PENDING) 1739 goto state_err; 1740 action = PAUSE; 1741 break; 1742 } 1743 case BKW_MMAP_DATA_PENDING: { 1744 if (state != BKW_MMAP_EMPTY) 1745 goto state_err; 1746 break; 1747 } 1748 case BKW_MMAP_EMPTY: { 1749 if (state != BKW_MMAP_RUNNING) 1750 goto state_err; 1751 action = RESUME; 1752 break; 1753 } 1754 default: 1755 WARN_ONCE(1, "Shouldn't get there\n"); 1756 } 1757 1758 evlist->bkw_mmap_state = state; 1759 1760 switch (action) { 1761 case PAUSE: 1762 evlist__pause(evlist); 1763 break; 1764 case RESUME: 1765 evlist__resume(evlist); 1766 break; 1767 case NONE: 1768 default: 1769 break; 1770 } 1771 1772 state_err: 1773 return; 1774 } 1775 1776 bool evlist__exclude_kernel(struct evlist *evlist) 1777 { 1778 struct evsel *evsel; 1779 1780 evlist__for_each_entry(evlist, evsel) { 1781 if (!evsel->core.attr.exclude_kernel) 1782 return false; 1783 } 1784 1785 return true; 1786 } 1787 1788 /* 1789 * Events in data file are not collect in groups, but we still want 1790 * the group display. Set the artificial group and set the leader's 1791 * forced_leader flag to notify the display code. 1792 */ 1793 void evlist__force_leader(struct evlist *evlist) 1794 { 1795 if (evlist__nr_groups(evlist) == 0) { 1796 struct evsel *leader = evlist__first(evlist); 1797 1798 evlist__set_leader(evlist); 1799 leader->forced_leader = true; 1800 } 1801 } 1802 1803 struct evsel *evlist__reset_weak_group(struct evlist *evsel_list, struct evsel *evsel, bool close) 1804 { 1805 struct evsel *c2, *leader; 1806 bool is_open = true; 1807 1808 leader = evsel__leader(evsel); 1809 1810 pr_debug("Weak group for %s/%d failed\n", 1811 leader->name, leader->core.nr_members); 1812 1813 /* 1814 * for_each_group_member doesn't work here because it doesn't 1815 * include the first entry. 1816 */ 1817 evlist__for_each_entry(evsel_list, c2) { 1818 if (c2 == evsel) 1819 is_open = false; 1820 if (evsel__has_leader(c2, leader)) { 1821 if (is_open && close) 1822 perf_evsel__close(&c2->core); 1823 /* 1824 * We want to close all members of the group and reopen 1825 * them. Some events, like Intel topdown, require being 1826 * in a group and so keep these in the group. 1827 */ 1828 evsel__remove_from_group(c2, leader); 1829 1830 /* 1831 * Set this for all former members of the group 1832 * to indicate they get reopened. 1833 */ 1834 c2->reset_group = true; 1835 } 1836 } 1837 /* Reset the leader count if all entries were removed. */ 1838 if (leader->core.nr_members == 1) 1839 leader->core.nr_members = 0; 1840 return leader; 1841 } 1842 1843 static int evlist__parse_control_fifo(const char *str, int *ctl_fd, int *ctl_fd_ack, bool *ctl_fd_close) 1844 { 1845 char *s, *p; 1846 int ret = 0, fd; 1847 1848 if (strncmp(str, "fifo:", 5)) 1849 return -EINVAL; 1850 1851 str += 5; 1852 if (!*str || *str == ',') 1853 return -EINVAL; 1854 1855 s = strdup(str); 1856 if (!s) 1857 return -ENOMEM; 1858 1859 p = strchr(s, ','); 1860 if (p) 1861 *p = '\0'; 1862 1863 /* 1864 * O_RDWR avoids POLLHUPs which is necessary to allow the other 1865 * end of a FIFO to be repeatedly opened and closed. 1866 */ 1867 fd = open(s, O_RDWR | O_NONBLOCK | O_CLOEXEC); 1868 if (fd < 0) { 1869 pr_err("Failed to open '%s'\n", s); 1870 ret = -errno; 1871 goto out_free; 1872 } 1873 *ctl_fd = fd; 1874 *ctl_fd_close = true; 1875 1876 if (p && *++p) { 1877 /* O_RDWR | O_NONBLOCK means the other end need not be open */ 1878 fd = open(p, O_RDWR | O_NONBLOCK | O_CLOEXEC); 1879 if (fd < 0) { 1880 pr_err("Failed to open '%s'\n", p); 1881 ret = -errno; 1882 goto out_free; 1883 } 1884 *ctl_fd_ack = fd; 1885 } 1886 1887 out_free: 1888 free(s); 1889 return ret; 1890 } 1891 1892 int evlist__parse_control(const char *str, int *ctl_fd, int *ctl_fd_ack, bool *ctl_fd_close) 1893 { 1894 char *comma = NULL, *endptr = NULL; 1895 1896 *ctl_fd_close = false; 1897 1898 if (strncmp(str, "fd:", 3)) 1899 return evlist__parse_control_fifo(str, ctl_fd, ctl_fd_ack, ctl_fd_close); 1900 1901 *ctl_fd = strtoul(&str[3], &endptr, 0); 1902 if (endptr == &str[3]) 1903 return -EINVAL; 1904 1905 comma = strchr(str, ','); 1906 if (comma) { 1907 if (endptr != comma) 1908 return -EINVAL; 1909 1910 *ctl_fd_ack = strtoul(comma + 1, &endptr, 0); 1911 if (endptr == comma + 1 || *endptr != '\0') 1912 return -EINVAL; 1913 } 1914 1915 return 0; 1916 } 1917 1918 void evlist__close_control(int ctl_fd, int ctl_fd_ack, bool *ctl_fd_close) 1919 { 1920 if (*ctl_fd_close) { 1921 *ctl_fd_close = false; 1922 close(ctl_fd); 1923 if (ctl_fd_ack >= 0) 1924 close(ctl_fd_ack); 1925 } 1926 } 1927 1928 int evlist__initialize_ctlfd(struct evlist *evlist, int fd, int ack) 1929 { 1930 if (fd == -1) { 1931 pr_debug("Control descriptor is not initialized\n"); 1932 return 0; 1933 } 1934 1935 evlist->ctl_fd.pos = perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN, 1936 fdarray_flag__nonfilterable | 1937 fdarray_flag__non_perf_event); 1938 if (evlist->ctl_fd.pos < 0) { 1939 evlist->ctl_fd.pos = -1; 1940 pr_err("Failed to add ctl fd entry: %m\n"); 1941 return -1; 1942 } 1943 1944 evlist->ctl_fd.fd = fd; 1945 evlist->ctl_fd.ack = ack; 1946 1947 return 0; 1948 } 1949 1950 bool evlist__ctlfd_initialized(struct evlist *evlist) 1951 { 1952 return evlist->ctl_fd.pos >= 0; 1953 } 1954 1955 int evlist__finalize_ctlfd(struct evlist *evlist) 1956 { 1957 struct pollfd *entries = evlist->core.pollfd.entries; 1958 1959 if (!evlist__ctlfd_initialized(evlist)) 1960 return 0; 1961 1962 entries[evlist->ctl_fd.pos].fd = -1; 1963 entries[evlist->ctl_fd.pos].events = 0; 1964 entries[evlist->ctl_fd.pos].revents = 0; 1965 1966 evlist->ctl_fd.pos = -1; 1967 evlist->ctl_fd.ack = -1; 1968 evlist->ctl_fd.fd = -1; 1969 1970 return 0; 1971 } 1972 1973 static int evlist__ctlfd_recv(struct evlist *evlist, enum evlist_ctl_cmd *cmd, 1974 char *cmd_data, size_t data_size) 1975 { 1976 int err; 1977 char c; 1978 size_t bytes_read = 0; 1979 1980 *cmd = EVLIST_CTL_CMD_UNSUPPORTED; 1981 memset(cmd_data, 0, data_size); 1982 data_size--; 1983 1984 do { 1985 err = read(evlist->ctl_fd.fd, &c, 1); 1986 if (err > 0) { 1987 if (c == '\n' || c == '\0') 1988 break; 1989 cmd_data[bytes_read++] = c; 1990 if (bytes_read == data_size) 1991 break; 1992 continue; 1993 } else if (err == -1) { 1994 if (errno == EINTR) 1995 continue; 1996 if (errno == EAGAIN || errno == EWOULDBLOCK) 1997 err = 0; 1998 else 1999 pr_err("Failed to read from ctlfd %d: %m\n", evlist->ctl_fd.fd); 2000 } 2001 break; 2002 } while (1); 2003 2004 pr_debug("Message from ctl_fd: \"%s%s\"\n", cmd_data, 2005 bytes_read == data_size ? "" : c == '\n' ? "\\n" : "\\0"); 2006 2007 if (bytes_read > 0) { 2008 if (!strncmp(cmd_data, EVLIST_CTL_CMD_ENABLE_TAG, 2009 (sizeof(EVLIST_CTL_CMD_ENABLE_TAG)-1))) { 2010 *cmd = EVLIST_CTL_CMD_ENABLE; 2011 } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_DISABLE_TAG, 2012 (sizeof(EVLIST_CTL_CMD_DISABLE_TAG)-1))) { 2013 *cmd = EVLIST_CTL_CMD_DISABLE; 2014 } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_SNAPSHOT_TAG, 2015 (sizeof(EVLIST_CTL_CMD_SNAPSHOT_TAG)-1))) { 2016 *cmd = EVLIST_CTL_CMD_SNAPSHOT; 2017 pr_debug("is snapshot\n"); 2018 } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_EVLIST_TAG, 2019 (sizeof(EVLIST_CTL_CMD_EVLIST_TAG)-1))) { 2020 *cmd = EVLIST_CTL_CMD_EVLIST; 2021 } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_STOP_TAG, 2022 (sizeof(EVLIST_CTL_CMD_STOP_TAG)-1))) { 2023 *cmd = EVLIST_CTL_CMD_STOP; 2024 } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_PING_TAG, 2025 (sizeof(EVLIST_CTL_CMD_PING_TAG)-1))) { 2026 *cmd = EVLIST_CTL_CMD_PING; 2027 } 2028 } 2029 2030 return bytes_read ? (int)bytes_read : err; 2031 } 2032 2033 int evlist__ctlfd_ack(struct evlist *evlist) 2034 { 2035 int err; 2036 2037 if (evlist->ctl_fd.ack == -1) 2038 return 0; 2039 2040 err = write(evlist->ctl_fd.ack, EVLIST_CTL_CMD_ACK_TAG, 2041 sizeof(EVLIST_CTL_CMD_ACK_TAG)); 2042 if (err == -1) 2043 pr_err("failed to write to ctl_ack_fd %d: %m\n", evlist->ctl_fd.ack); 2044 2045 return err; 2046 } 2047 2048 static int get_cmd_arg(char *cmd_data, size_t cmd_size, char **arg) 2049 { 2050 char *data = cmd_data + cmd_size; 2051 2052 /* no argument */ 2053 if (!*data) 2054 return 0; 2055 2056 /* there's argument */ 2057 if (*data == ' ') { 2058 *arg = data + 1; 2059 return 1; 2060 } 2061 2062 /* malformed */ 2063 return -1; 2064 } 2065 2066 static int evlist__ctlfd_enable(struct evlist *evlist, char *cmd_data, bool enable) 2067 { 2068 struct evsel *evsel; 2069 char *name; 2070 int err; 2071 2072 err = get_cmd_arg(cmd_data, 2073 enable ? sizeof(EVLIST_CTL_CMD_ENABLE_TAG) - 1 : 2074 sizeof(EVLIST_CTL_CMD_DISABLE_TAG) - 1, 2075 &name); 2076 if (err < 0) { 2077 pr_info("failed: wrong command\n"); 2078 return -1; 2079 } 2080 2081 if (err) { 2082 evsel = evlist__find_evsel_by_str(evlist, name); 2083 if (evsel) { 2084 if (enable) 2085 evlist__enable_evsel(evlist, name); 2086 else 2087 evlist__disable_evsel(evlist, name); 2088 pr_info("Event %s %s\n", evsel->name, 2089 enable ? "enabled" : "disabled"); 2090 } else { 2091 pr_info("failed: can't find '%s' event\n", name); 2092 } 2093 } else { 2094 if (enable) { 2095 evlist__enable(evlist); 2096 pr_info(EVLIST_ENABLED_MSG); 2097 } else { 2098 evlist__disable(evlist); 2099 pr_info(EVLIST_DISABLED_MSG); 2100 } 2101 } 2102 2103 return 0; 2104 } 2105 2106 static int evlist__ctlfd_list(struct evlist *evlist, char *cmd_data) 2107 { 2108 struct perf_attr_details details = { .verbose = false, }; 2109 struct evsel *evsel; 2110 char *arg; 2111 int err; 2112 2113 err = get_cmd_arg(cmd_data, 2114 sizeof(EVLIST_CTL_CMD_EVLIST_TAG) - 1, 2115 &arg); 2116 if (err < 0) { 2117 pr_info("failed: wrong command\n"); 2118 return -1; 2119 } 2120 2121 if (err) { 2122 if (!strcmp(arg, "-v")) { 2123 details.verbose = true; 2124 } else if (!strcmp(arg, "-g")) { 2125 details.event_group = true; 2126 } else if (!strcmp(arg, "-F")) { 2127 details.freq = true; 2128 } else { 2129 pr_info("failed: wrong command\n"); 2130 return -1; 2131 } 2132 } 2133 2134 evlist__for_each_entry(evlist, evsel) 2135 evsel__fprintf(evsel, &details, stderr); 2136 2137 return 0; 2138 } 2139 2140 int evlist__ctlfd_process(struct evlist *evlist, enum evlist_ctl_cmd *cmd) 2141 { 2142 int err = 0; 2143 char cmd_data[EVLIST_CTL_CMD_MAX_LEN]; 2144 int ctlfd_pos = evlist->ctl_fd.pos; 2145 struct pollfd *entries = evlist->core.pollfd.entries; 2146 2147 if (!evlist__ctlfd_initialized(evlist) || !entries[ctlfd_pos].revents) 2148 return 0; 2149 2150 if (entries[ctlfd_pos].revents & POLLIN) { 2151 err = evlist__ctlfd_recv(evlist, cmd, cmd_data, 2152 EVLIST_CTL_CMD_MAX_LEN); 2153 if (err > 0) { 2154 switch (*cmd) { 2155 case EVLIST_CTL_CMD_ENABLE: 2156 case EVLIST_CTL_CMD_DISABLE: 2157 err = evlist__ctlfd_enable(evlist, cmd_data, 2158 *cmd == EVLIST_CTL_CMD_ENABLE); 2159 break; 2160 case EVLIST_CTL_CMD_EVLIST: 2161 err = evlist__ctlfd_list(evlist, cmd_data); 2162 break; 2163 case EVLIST_CTL_CMD_SNAPSHOT: 2164 case EVLIST_CTL_CMD_STOP: 2165 case EVLIST_CTL_CMD_PING: 2166 break; 2167 case EVLIST_CTL_CMD_ACK: 2168 case EVLIST_CTL_CMD_UNSUPPORTED: 2169 default: 2170 pr_debug("ctlfd: unsupported %d\n", *cmd); 2171 break; 2172 } 2173 if (!(*cmd == EVLIST_CTL_CMD_ACK || *cmd == EVLIST_CTL_CMD_UNSUPPORTED || 2174 *cmd == EVLIST_CTL_CMD_SNAPSHOT)) 2175 evlist__ctlfd_ack(evlist); 2176 } 2177 } 2178 2179 if (entries[ctlfd_pos].revents & (POLLHUP | POLLERR)) 2180 evlist__finalize_ctlfd(evlist); 2181 else 2182 entries[ctlfd_pos].revents = 0; 2183 2184 return err; 2185 } 2186 2187 /** 2188 * struct event_enable_time - perf record -D/--delay single time range. 2189 * @start: start of time range to enable events in milliseconds 2190 * @end: end of time range to enable events in milliseconds 2191 * 2192 * N.B. this structure is also accessed as an array of int. 2193 */ 2194 struct event_enable_time { 2195 int start; 2196 int end; 2197 }; 2198 2199 static int parse_event_enable_time(const char *str, struct event_enable_time *range, bool first) 2200 { 2201 const char *fmt = first ? "%u - %u %n" : " , %u - %u %n"; 2202 int ret, start, end, n; 2203 2204 ret = sscanf(str, fmt, &start, &end, &n); 2205 if (ret != 2 || end <= start) 2206 return -EINVAL; 2207 if (range) { 2208 range->start = start; 2209 range->end = end; 2210 } 2211 return n; 2212 } 2213 2214 static ssize_t parse_event_enable_times(const char *str, struct event_enable_time *range) 2215 { 2216 int incr = !!range; 2217 bool first = true; 2218 ssize_t ret, cnt; 2219 2220 for (cnt = 0; *str; cnt++) { 2221 ret = parse_event_enable_time(str, range, first); 2222 if (ret < 0) 2223 return ret; 2224 /* Check no overlap */ 2225 if (!first && range && range->start <= range[-1].end) 2226 return -EINVAL; 2227 str += ret; 2228 range += incr; 2229 first = false; 2230 } 2231 return cnt; 2232 } 2233 2234 /** 2235 * struct event_enable_timer - control structure for perf record -D/--delay. 2236 * @evlist: event list 2237 * @times: time ranges that events are enabled (N.B. this is also accessed as an 2238 * array of int) 2239 * @times_cnt: number of time ranges 2240 * @timerfd: timer file descriptor 2241 * @pollfd_pos: position in @evlist array of file descriptors to poll (fdarray) 2242 * @times_step: current position in (int *)@times)[], 2243 * refer event_enable_timer__process() 2244 * 2245 * Note, this structure is only used when there are time ranges, not when there 2246 * is only an initial delay. 2247 */ 2248 struct event_enable_timer { 2249 struct evlist *evlist; 2250 struct event_enable_time *times; 2251 size_t times_cnt; 2252 int timerfd; 2253 int pollfd_pos; 2254 size_t times_step; 2255 }; 2256 2257 static int str_to_delay(const char *str) 2258 { 2259 char *endptr; 2260 long d; 2261 2262 d = strtol(str, &endptr, 10); 2263 if (*endptr || d > INT_MAX || d < -1) 2264 return 0; 2265 return d; 2266 } 2267 2268 int evlist__parse_event_enable_time(struct evlist *evlist, struct record_opts *opts, 2269 const char *str, int unset) 2270 { 2271 enum fdarray_flags flags = fdarray_flag__nonfilterable | fdarray_flag__non_perf_event; 2272 struct event_enable_timer *eet; 2273 ssize_t times_cnt; 2274 ssize_t ret; 2275 int err; 2276 2277 if (unset) 2278 return 0; 2279 2280 opts->target.initial_delay = str_to_delay(str); 2281 if (opts->target.initial_delay) 2282 return 0; 2283 2284 ret = parse_event_enable_times(str, NULL); 2285 if (ret < 0) 2286 return ret; 2287 2288 times_cnt = ret; 2289 if (times_cnt == 0) 2290 return -EINVAL; 2291 2292 eet = zalloc(sizeof(*eet)); 2293 if (!eet) 2294 return -ENOMEM; 2295 2296 eet->times = calloc(times_cnt, sizeof(*eet->times)); 2297 if (!eet->times) { 2298 err = -ENOMEM; 2299 goto free_eet; 2300 } 2301 2302 if (parse_event_enable_times(str, eet->times) != times_cnt) { 2303 err = -EINVAL; 2304 goto free_eet_times; 2305 } 2306 2307 eet->times_cnt = times_cnt; 2308 2309 eet->timerfd = timerfd_create(CLOCK_MONOTONIC, TFD_CLOEXEC); 2310 if (eet->timerfd == -1) { 2311 err = -errno; 2312 pr_err("timerfd_create failed: %s\n", strerror(errno)); 2313 goto free_eet_times; 2314 } 2315 2316 eet->pollfd_pos = perf_evlist__add_pollfd(&evlist->core, eet->timerfd, NULL, POLLIN, flags); 2317 if (eet->pollfd_pos < 0) { 2318 err = eet->pollfd_pos; 2319 goto close_timerfd; 2320 } 2321 2322 eet->evlist = evlist; 2323 evlist->eet = eet; 2324 opts->target.initial_delay = eet->times[0].start; 2325 2326 return 0; 2327 2328 close_timerfd: 2329 close(eet->timerfd); 2330 free_eet_times: 2331 zfree(&eet->times); 2332 free_eet: 2333 free(eet); 2334 return err; 2335 } 2336 2337 static int event_enable_timer__set_timer(struct event_enable_timer *eet, int ms) 2338 { 2339 struct itimerspec its = { 2340 .it_value.tv_sec = ms / MSEC_PER_SEC, 2341 .it_value.tv_nsec = (ms % MSEC_PER_SEC) * NSEC_PER_MSEC, 2342 }; 2343 int err = 0; 2344 2345 if (timerfd_settime(eet->timerfd, 0, &its, NULL) < 0) { 2346 err = -errno; 2347 pr_err("timerfd_settime failed: %s\n", strerror(errno)); 2348 } 2349 return err; 2350 } 2351 2352 int event_enable_timer__start(struct event_enable_timer *eet) 2353 { 2354 int ms; 2355 2356 if (!eet) 2357 return 0; 2358 2359 ms = eet->times[0].end - eet->times[0].start; 2360 eet->times_step = 1; 2361 2362 return event_enable_timer__set_timer(eet, ms); 2363 } 2364 2365 int event_enable_timer__process(struct event_enable_timer *eet) 2366 { 2367 struct pollfd *entries; 2368 short revents; 2369 2370 if (!eet) 2371 return 0; 2372 2373 entries = eet->evlist->core.pollfd.entries; 2374 revents = entries[eet->pollfd_pos].revents; 2375 entries[eet->pollfd_pos].revents = 0; 2376 2377 if (revents & POLLIN) { 2378 size_t step = eet->times_step; 2379 size_t pos = step / 2; 2380 2381 if (step & 1) { 2382 evlist__disable_non_dummy(eet->evlist); 2383 pr_info(EVLIST_DISABLED_MSG); 2384 if (pos >= eet->times_cnt - 1) { 2385 /* Disarm timer */ 2386 event_enable_timer__set_timer(eet, 0); 2387 return 1; /* Stop */ 2388 } 2389 } else { 2390 evlist__enable_non_dummy(eet->evlist); 2391 pr_info(EVLIST_ENABLED_MSG); 2392 } 2393 2394 step += 1; 2395 pos = step / 2; 2396 2397 if (pos < eet->times_cnt) { 2398 int *times = (int *)eet->times; /* Accessing 'times' as array of int */ 2399 int ms = times[step] - times[step - 1]; 2400 2401 eet->times_step = step; 2402 return event_enable_timer__set_timer(eet, ms); 2403 } 2404 } 2405 2406 return 0; 2407 } 2408 2409 void event_enable_timer__exit(struct event_enable_timer **ep) 2410 { 2411 if (!ep || !*ep) 2412 return; 2413 zfree(&(*ep)->times); 2414 zfree(ep); 2415 } 2416 2417 struct evsel *evlist__find_evsel(struct evlist *evlist, int idx) 2418 { 2419 struct evsel *evsel; 2420 2421 evlist__for_each_entry(evlist, evsel) { 2422 if (evsel->core.idx == idx) 2423 return evsel; 2424 } 2425 return NULL; 2426 } 2427 2428 int evlist__scnprintf_evsels(struct evlist *evlist, size_t size, char *bf) 2429 { 2430 struct evsel *evsel; 2431 int printed = 0; 2432 2433 evlist__for_each_entry(evlist, evsel) { 2434 if (evsel__is_dummy_event(evsel)) 2435 continue; 2436 if (size > (strlen(evsel__name(evsel)) + (printed ? 2 : 1))) { 2437 printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "," : "", evsel__name(evsel)); 2438 } else { 2439 printed += scnprintf(bf + printed, size - printed, "%s...", printed ? "," : ""); 2440 break; 2441 } 2442 } 2443 2444 return printed; 2445 } 2446 2447 void evlist__check_mem_load_aux(struct evlist *evlist) 2448 { 2449 struct evsel *leader, *evsel, *pos; 2450 2451 /* 2452 * For some platforms, the 'mem-loads' event is required to use 2453 * together with 'mem-loads-aux' within a group and 'mem-loads-aux' 2454 * must be the group leader. Now we disable this group before reporting 2455 * because 'mem-loads-aux' is just an auxiliary event. It doesn't carry 2456 * any valid memory load information. 2457 */ 2458 evlist__for_each_entry(evlist, evsel) { 2459 leader = evsel__leader(evsel); 2460 if (leader == evsel) 2461 continue; 2462 2463 if (leader->name && strstr(leader->name, "mem-loads-aux")) { 2464 for_each_group_evsel(pos, leader) { 2465 evsel__set_leader(pos, pos); 2466 pos->core.nr_members = 0; 2467 } 2468 } 2469 } 2470 } 2471 2472 /** 2473 * evlist__warn_user_requested_cpus() - Check each evsel against requested CPUs 2474 * and warn if the user CPU list is inapplicable for the event's PMU's 2475 * CPUs. Not core PMUs list a CPU in sysfs, but this may be overwritten by a 2476 * user requested CPU and so any online CPU is applicable. Core PMUs handle 2477 * events on the CPUs in their list and otherwise the event isn't supported. 2478 * @evlist: The list of events being checked. 2479 * @cpu_list: The user provided list of CPUs. 2480 */ 2481 void evlist__warn_user_requested_cpus(struct evlist *evlist, const char *cpu_list) 2482 { 2483 struct perf_cpu_map *user_requested_cpus; 2484 struct evsel *pos; 2485 2486 if (!cpu_list) 2487 return; 2488 2489 user_requested_cpus = perf_cpu_map__new(cpu_list); 2490 if (!user_requested_cpus) 2491 return; 2492 2493 evlist__for_each_entry(evlist, pos) { 2494 struct perf_cpu_map *intersect, *to_test; 2495 const struct perf_pmu *pmu = evsel__find_pmu(pos); 2496 2497 to_test = pmu && pmu->is_core ? pmu->cpus : cpu_map__online(); 2498 intersect = perf_cpu_map__intersect(to_test, user_requested_cpus); 2499 if (!perf_cpu_map__equal(intersect, user_requested_cpus)) { 2500 char buf[128]; 2501 2502 cpu_map__snprint(to_test, buf, sizeof(buf)); 2503 pr_warning("WARNING: A requested CPU in '%s' is not supported by PMU '%s' (CPUs %s) for event '%s'\n", 2504 cpu_list, pmu ? pmu->name : "cpu", buf, evsel__name(pos)); 2505 } 2506 perf_cpu_map__put(intersect); 2507 } 2508 perf_cpu_map__put(user_requested_cpus); 2509 } 2510 2511 void evlist__uniquify_name(struct evlist *evlist) 2512 { 2513 struct evsel *pos; 2514 char *new_name; 2515 int ret; 2516 2517 if (perf_pmus__num_core_pmus() == 1) 2518 return; 2519 2520 evlist__for_each_entry(evlist, pos) { 2521 if (!evsel__is_hybrid(pos)) 2522 continue; 2523 2524 if (strchr(pos->name, '/')) 2525 continue; 2526 2527 ret = asprintf(&new_name, "%s/%s/", 2528 pos->pmu_name, pos->name); 2529 if (ret) { 2530 free(pos->name); 2531 pos->name = new_name; 2532 } 2533 } 2534 } 2535