1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> 4 * 5 * Parts came from builtin-{top,stat,record}.c, see those files for further 6 * copyright notes. 7 */ 8 #include <api/fs/fs.h> 9 #include <errno.h> 10 #include <inttypes.h> 11 #include <poll.h> 12 #include "cpumap.h" 13 #include "util/mmap.h" 14 #include "thread_map.h" 15 #include "target.h" 16 #include "evlist.h" 17 #include "evsel.h" 18 #include "record.h" 19 #include "debug.h" 20 #include "units.h" 21 #include "bpf_counter.h" 22 #include <internal/lib.h> // page_size 23 #include "affinity.h" 24 #include "../perf.h" 25 #include "asm/bug.h" 26 #include "bpf-event.h" 27 #include "util/event.h" 28 #include "util/string2.h" 29 #include "util/perf_api_probe.h" 30 #include "util/evsel_fprintf.h" 31 #include "util/pmu.h" 32 #include "util/sample.h" 33 #include "util/bpf-filter.h" 34 #include "util/stat.h" 35 #include "util/util.h" 36 #include <signal.h> 37 #include <unistd.h> 38 #include <sched.h> 39 #include <stdlib.h> 40 41 #include "parse-events.h" 42 #include <subcmd/parse-options.h> 43 44 #include <fcntl.h> 45 #include <sys/ioctl.h> 46 #include <sys/mman.h> 47 #include <sys/prctl.h> 48 #include <sys/timerfd.h> 49 50 #include <linux/bitops.h> 51 #include <linux/hash.h> 52 #include <linux/log2.h> 53 #include <linux/err.h> 54 #include <linux/string.h> 55 #include <linux/time64.h> 56 #include <linux/zalloc.h> 57 #include <perf/evlist.h> 58 #include <perf/evsel.h> 59 #include <perf/cpumap.h> 60 #include <perf/mmap.h> 61 62 #include <internal/xyarray.h> 63 64 #ifdef LACKS_SIGQUEUE_PROTOTYPE 65 int sigqueue(pid_t pid, int sig, const union sigval value); 66 #endif 67 68 #define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y)) 69 #define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y) 70 71 void evlist__init(struct evlist *evlist, struct perf_cpu_map *cpus, 72 struct perf_thread_map *threads) 73 { 74 perf_evlist__init(&evlist->core); 75 perf_evlist__set_maps(&evlist->core, cpus, threads); 76 evlist->workload.pid = -1; 77 evlist->bkw_mmap_state = BKW_MMAP_NOTREADY; 78 evlist->ctl_fd.fd = -1; 79 evlist->ctl_fd.ack = -1; 80 evlist->ctl_fd.pos = -1; 81 } 82 83 struct evlist *evlist__new(void) 84 { 85 struct evlist *evlist = zalloc(sizeof(*evlist)); 86 87 if (evlist != NULL) 88 evlist__init(evlist, NULL, NULL); 89 90 return evlist; 91 } 92 93 struct evlist *evlist__new_default(void) 94 { 95 struct evlist *evlist = evlist__new(); 96 bool can_profile_kernel; 97 int err; 98 99 if (!evlist) 100 return NULL; 101 102 can_profile_kernel = perf_event_paranoid_check(1); 103 err = parse_event(evlist, can_profile_kernel ? "cycles:P" : "cycles:Pu"); 104 if (err) { 105 evlist__delete(evlist); 106 return NULL; 107 } 108 109 if (evlist->core.nr_entries > 1) { 110 struct evsel *evsel; 111 112 evlist__for_each_entry(evlist, evsel) 113 evsel__set_sample_id(evsel, /*can_sample_identifier=*/false); 114 } 115 116 return evlist; 117 } 118 119 struct evlist *evlist__new_dummy(void) 120 { 121 struct evlist *evlist = evlist__new(); 122 123 if (evlist && evlist__add_dummy(evlist)) { 124 evlist__delete(evlist); 125 evlist = NULL; 126 } 127 128 return evlist; 129 } 130 131 /** 132 * evlist__set_id_pos - set the positions of event ids. 133 * @evlist: selected event list 134 * 135 * Events with compatible sample types all have the same id_pos 136 * and is_pos. For convenience, put a copy on evlist. 137 */ 138 void evlist__set_id_pos(struct evlist *evlist) 139 { 140 struct evsel *first = evlist__first(evlist); 141 142 evlist->id_pos = first->id_pos; 143 evlist->is_pos = first->is_pos; 144 } 145 146 static void evlist__update_id_pos(struct evlist *evlist) 147 { 148 struct evsel *evsel; 149 150 evlist__for_each_entry(evlist, evsel) 151 evsel__calc_id_pos(evsel); 152 153 evlist__set_id_pos(evlist); 154 } 155 156 static void evlist__purge(struct evlist *evlist) 157 { 158 struct evsel *pos, *n; 159 160 evlist__for_each_entry_safe(evlist, n, pos) { 161 list_del_init(&pos->core.node); 162 pos->evlist = NULL; 163 evsel__delete(pos); 164 } 165 166 evlist->core.nr_entries = 0; 167 } 168 169 void evlist__exit(struct evlist *evlist) 170 { 171 event_enable_timer__exit(&evlist->eet); 172 zfree(&evlist->mmap); 173 zfree(&evlist->overwrite_mmap); 174 perf_evlist__exit(&evlist->core); 175 } 176 177 void evlist__delete(struct evlist *evlist) 178 { 179 if (evlist == NULL) 180 return; 181 182 evlist__free_stats(evlist); 183 evlist__munmap(evlist); 184 evlist__close(evlist); 185 evlist__purge(evlist); 186 evlist__exit(evlist); 187 free(evlist); 188 } 189 190 void evlist__add(struct evlist *evlist, struct evsel *entry) 191 { 192 perf_evlist__add(&evlist->core, &entry->core); 193 entry->evlist = evlist; 194 entry->tracking = !entry->core.idx; 195 196 if (evlist->core.nr_entries == 1) 197 evlist__set_id_pos(evlist); 198 } 199 200 void evlist__remove(struct evlist *evlist, struct evsel *evsel) 201 { 202 evsel->evlist = NULL; 203 perf_evlist__remove(&evlist->core, &evsel->core); 204 } 205 206 void evlist__splice_list_tail(struct evlist *evlist, struct list_head *list) 207 { 208 while (!list_empty(list)) { 209 struct evsel *evsel, *temp, *leader = NULL; 210 211 __evlist__for_each_entry_safe(list, temp, evsel) { 212 list_del_init(&evsel->core.node); 213 evlist__add(evlist, evsel); 214 leader = evsel; 215 break; 216 } 217 218 __evlist__for_each_entry_safe(list, temp, evsel) { 219 if (evsel__has_leader(evsel, leader)) { 220 list_del_init(&evsel->core.node); 221 evlist__add(evlist, evsel); 222 } 223 } 224 } 225 } 226 227 int __evlist__set_tracepoints_handlers(struct evlist *evlist, 228 const struct evsel_str_handler *assocs, size_t nr_assocs) 229 { 230 size_t i; 231 int err; 232 233 for (i = 0; i < nr_assocs; i++) { 234 // Adding a handler for an event not in this evlist, just ignore it. 235 struct evsel *evsel = evlist__find_tracepoint_by_name(evlist, assocs[i].name); 236 if (evsel == NULL) 237 continue; 238 239 err = -EEXIST; 240 if (evsel->handler != NULL) 241 goto out; 242 evsel->handler = assocs[i].handler; 243 } 244 245 err = 0; 246 out: 247 return err; 248 } 249 250 static void evlist__set_leader(struct evlist *evlist) 251 { 252 perf_evlist__set_leader(&evlist->core); 253 } 254 255 static struct evsel *evlist__dummy_event(struct evlist *evlist) 256 { 257 struct perf_event_attr attr = { 258 .type = PERF_TYPE_SOFTWARE, 259 .config = PERF_COUNT_SW_DUMMY, 260 .size = sizeof(attr), /* to capture ABI version */ 261 /* Avoid frequency mode for dummy events to avoid associated timers. */ 262 .freq = 0, 263 .sample_period = 1, 264 }; 265 266 return evsel__new_idx(&attr, evlist->core.nr_entries); 267 } 268 269 int evlist__add_dummy(struct evlist *evlist) 270 { 271 struct evsel *evsel = evlist__dummy_event(evlist); 272 273 if (evsel == NULL) 274 return -ENOMEM; 275 276 evlist__add(evlist, evsel); 277 return 0; 278 } 279 280 struct evsel *evlist__add_aux_dummy(struct evlist *evlist, bool system_wide) 281 { 282 struct evsel *evsel = evlist__dummy_event(evlist); 283 284 if (!evsel) 285 return NULL; 286 287 evsel->core.attr.exclude_kernel = 1; 288 evsel->core.attr.exclude_guest = 1; 289 evsel->core.attr.exclude_hv = 1; 290 evsel->core.system_wide = system_wide; 291 evsel->no_aux_samples = true; 292 evsel->name = strdup("dummy:u"); 293 294 evlist__add(evlist, evsel); 295 return evsel; 296 } 297 298 #ifdef HAVE_LIBTRACEEVENT 299 struct evsel *evlist__add_sched_switch(struct evlist *evlist, bool system_wide) 300 { 301 struct evsel *evsel = evsel__newtp_idx("sched", "sched_switch", 0); 302 303 if (IS_ERR(evsel)) 304 return evsel; 305 306 evsel__set_sample_bit(evsel, CPU); 307 evsel__set_sample_bit(evsel, TIME); 308 309 evsel->core.system_wide = system_wide; 310 evsel->no_aux_samples = true; 311 312 evlist__add(evlist, evsel); 313 return evsel; 314 } 315 #endif 316 317 int evlist__add_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs) 318 { 319 struct evsel *evsel, *n; 320 LIST_HEAD(head); 321 size_t i; 322 323 for (i = 0; i < nr_attrs; i++) { 324 evsel = evsel__new_idx(attrs + i, evlist->core.nr_entries + i); 325 if (evsel == NULL) 326 goto out_delete_partial_list; 327 list_add_tail(&evsel->core.node, &head); 328 } 329 330 evlist__splice_list_tail(evlist, &head); 331 332 return 0; 333 334 out_delete_partial_list: 335 __evlist__for_each_entry_safe(&head, n, evsel) 336 evsel__delete(evsel); 337 return -1; 338 } 339 340 int __evlist__add_default_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs) 341 { 342 size_t i; 343 344 for (i = 0; i < nr_attrs; i++) 345 event_attr_init(attrs + i); 346 347 return evlist__add_attrs(evlist, attrs, nr_attrs); 348 } 349 350 __weak int arch_evlist__add_default_attrs(struct evlist *evlist, 351 struct perf_event_attr *attrs, 352 size_t nr_attrs) 353 { 354 if (!nr_attrs) 355 return 0; 356 357 return __evlist__add_default_attrs(evlist, attrs, nr_attrs); 358 } 359 360 struct evsel *evlist__find_tracepoint_by_id(struct evlist *evlist, int id) 361 { 362 struct evsel *evsel; 363 364 evlist__for_each_entry(evlist, evsel) { 365 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT && 366 (int)evsel->core.attr.config == id) 367 return evsel; 368 } 369 370 return NULL; 371 } 372 373 struct evsel *evlist__find_tracepoint_by_name(struct evlist *evlist, const char *name) 374 { 375 struct evsel *evsel; 376 377 evlist__for_each_entry(evlist, evsel) { 378 if ((evsel->core.attr.type == PERF_TYPE_TRACEPOINT) && 379 (strcmp(evsel->name, name) == 0)) 380 return evsel; 381 } 382 383 return NULL; 384 } 385 386 #ifdef HAVE_LIBTRACEEVENT 387 int evlist__add_newtp(struct evlist *evlist, const char *sys, const char *name, void *handler) 388 { 389 struct evsel *evsel = evsel__newtp(sys, name); 390 391 if (IS_ERR(evsel)) 392 return -1; 393 394 evsel->handler = handler; 395 evlist__add(evlist, evsel); 396 return 0; 397 } 398 #endif 399 400 struct evlist_cpu_iterator evlist__cpu_begin(struct evlist *evlist, struct affinity *affinity) 401 { 402 struct evlist_cpu_iterator itr = { 403 .container = evlist, 404 .evsel = NULL, 405 .cpu_map_idx = 0, 406 .evlist_cpu_map_idx = 0, 407 .evlist_cpu_map_nr = perf_cpu_map__nr(evlist->core.all_cpus), 408 .cpu = (struct perf_cpu){ .cpu = -1}, 409 .affinity = affinity, 410 }; 411 412 if (evlist__empty(evlist)) { 413 /* Ensure the empty list doesn't iterate. */ 414 itr.evlist_cpu_map_idx = itr.evlist_cpu_map_nr; 415 } else { 416 itr.evsel = evlist__first(evlist); 417 if (itr.affinity) { 418 itr.cpu = perf_cpu_map__cpu(evlist->core.all_cpus, 0); 419 affinity__set(itr.affinity, itr.cpu.cpu); 420 itr.cpu_map_idx = perf_cpu_map__idx(itr.evsel->core.cpus, itr.cpu); 421 /* 422 * If this CPU isn't in the evsel's cpu map then advance 423 * through the list. 424 */ 425 if (itr.cpu_map_idx == -1) 426 evlist_cpu_iterator__next(&itr); 427 } 428 } 429 return itr; 430 } 431 432 void evlist_cpu_iterator__next(struct evlist_cpu_iterator *evlist_cpu_itr) 433 { 434 while (evlist_cpu_itr->evsel != evlist__last(evlist_cpu_itr->container)) { 435 evlist_cpu_itr->evsel = evsel__next(evlist_cpu_itr->evsel); 436 evlist_cpu_itr->cpu_map_idx = 437 perf_cpu_map__idx(evlist_cpu_itr->evsel->core.cpus, 438 evlist_cpu_itr->cpu); 439 if (evlist_cpu_itr->cpu_map_idx != -1) 440 return; 441 } 442 evlist_cpu_itr->evlist_cpu_map_idx++; 443 if (evlist_cpu_itr->evlist_cpu_map_idx < evlist_cpu_itr->evlist_cpu_map_nr) { 444 evlist_cpu_itr->evsel = evlist__first(evlist_cpu_itr->container); 445 evlist_cpu_itr->cpu = 446 perf_cpu_map__cpu(evlist_cpu_itr->container->core.all_cpus, 447 evlist_cpu_itr->evlist_cpu_map_idx); 448 if (evlist_cpu_itr->affinity) 449 affinity__set(evlist_cpu_itr->affinity, evlist_cpu_itr->cpu.cpu); 450 evlist_cpu_itr->cpu_map_idx = 451 perf_cpu_map__idx(evlist_cpu_itr->evsel->core.cpus, 452 evlist_cpu_itr->cpu); 453 /* 454 * If this CPU isn't in the evsel's cpu map then advance through 455 * the list. 456 */ 457 if (evlist_cpu_itr->cpu_map_idx == -1) 458 evlist_cpu_iterator__next(evlist_cpu_itr); 459 } 460 } 461 462 bool evlist_cpu_iterator__end(const struct evlist_cpu_iterator *evlist_cpu_itr) 463 { 464 return evlist_cpu_itr->evlist_cpu_map_idx >= evlist_cpu_itr->evlist_cpu_map_nr; 465 } 466 467 static int evsel__strcmp(struct evsel *pos, char *evsel_name) 468 { 469 if (!evsel_name) 470 return 0; 471 if (evsel__is_dummy_event(pos)) 472 return 1; 473 return !evsel__name_is(pos, evsel_name); 474 } 475 476 static int evlist__is_enabled(struct evlist *evlist) 477 { 478 struct evsel *pos; 479 480 evlist__for_each_entry(evlist, pos) { 481 if (!evsel__is_group_leader(pos) || !pos->core.fd) 482 continue; 483 /* If at least one event is enabled, evlist is enabled. */ 484 if (!pos->disabled) 485 return true; 486 } 487 return false; 488 } 489 490 static void __evlist__disable(struct evlist *evlist, char *evsel_name, bool excl_dummy) 491 { 492 struct evsel *pos; 493 struct evlist_cpu_iterator evlist_cpu_itr; 494 struct affinity saved_affinity, *affinity = NULL; 495 bool has_imm = false; 496 497 // See explanation in evlist__close() 498 if (!cpu_map__is_dummy(evlist->core.user_requested_cpus)) { 499 if (affinity__setup(&saved_affinity) < 0) 500 return; 501 affinity = &saved_affinity; 502 } 503 504 /* Disable 'immediate' events last */ 505 for (int imm = 0; imm <= 1; imm++) { 506 evlist__for_each_cpu(evlist_cpu_itr, evlist, affinity) { 507 pos = evlist_cpu_itr.evsel; 508 if (evsel__strcmp(pos, evsel_name)) 509 continue; 510 if (pos->disabled || !evsel__is_group_leader(pos) || !pos->core.fd) 511 continue; 512 if (excl_dummy && evsel__is_dummy_event(pos)) 513 continue; 514 if (pos->immediate) 515 has_imm = true; 516 if (pos->immediate != imm) 517 continue; 518 evsel__disable_cpu(pos, evlist_cpu_itr.cpu_map_idx); 519 } 520 if (!has_imm) 521 break; 522 } 523 524 affinity__cleanup(affinity); 525 evlist__for_each_entry(evlist, pos) { 526 if (evsel__strcmp(pos, evsel_name)) 527 continue; 528 if (!evsel__is_group_leader(pos) || !pos->core.fd) 529 continue; 530 if (excl_dummy && evsel__is_dummy_event(pos)) 531 continue; 532 pos->disabled = true; 533 } 534 535 /* 536 * If we disabled only single event, we need to check 537 * the enabled state of the evlist manually. 538 */ 539 if (evsel_name) 540 evlist->enabled = evlist__is_enabled(evlist); 541 else 542 evlist->enabled = false; 543 } 544 545 void evlist__disable(struct evlist *evlist) 546 { 547 __evlist__disable(evlist, NULL, false); 548 } 549 550 void evlist__disable_non_dummy(struct evlist *evlist) 551 { 552 __evlist__disable(evlist, NULL, true); 553 } 554 555 void evlist__disable_evsel(struct evlist *evlist, char *evsel_name) 556 { 557 __evlist__disable(evlist, evsel_name, false); 558 } 559 560 static void __evlist__enable(struct evlist *evlist, char *evsel_name, bool excl_dummy) 561 { 562 struct evsel *pos; 563 struct evlist_cpu_iterator evlist_cpu_itr; 564 struct affinity saved_affinity, *affinity = NULL; 565 566 // See explanation in evlist__close() 567 if (!cpu_map__is_dummy(evlist->core.user_requested_cpus)) { 568 if (affinity__setup(&saved_affinity) < 0) 569 return; 570 affinity = &saved_affinity; 571 } 572 573 evlist__for_each_cpu(evlist_cpu_itr, evlist, affinity) { 574 pos = evlist_cpu_itr.evsel; 575 if (evsel__strcmp(pos, evsel_name)) 576 continue; 577 if (!evsel__is_group_leader(pos) || !pos->core.fd) 578 continue; 579 if (excl_dummy && evsel__is_dummy_event(pos)) 580 continue; 581 evsel__enable_cpu(pos, evlist_cpu_itr.cpu_map_idx); 582 } 583 affinity__cleanup(affinity); 584 evlist__for_each_entry(evlist, pos) { 585 if (evsel__strcmp(pos, evsel_name)) 586 continue; 587 if (!evsel__is_group_leader(pos) || !pos->core.fd) 588 continue; 589 if (excl_dummy && evsel__is_dummy_event(pos)) 590 continue; 591 pos->disabled = false; 592 } 593 594 /* 595 * Even single event sets the 'enabled' for evlist, 596 * so the toggle can work properly and toggle to 597 * 'disabled' state. 598 */ 599 evlist->enabled = true; 600 } 601 602 void evlist__enable(struct evlist *evlist) 603 { 604 __evlist__enable(evlist, NULL, false); 605 } 606 607 void evlist__enable_non_dummy(struct evlist *evlist) 608 { 609 __evlist__enable(evlist, NULL, true); 610 } 611 612 void evlist__enable_evsel(struct evlist *evlist, char *evsel_name) 613 { 614 __evlist__enable(evlist, evsel_name, false); 615 } 616 617 void evlist__toggle_enable(struct evlist *evlist) 618 { 619 (evlist->enabled ? evlist__disable : evlist__enable)(evlist); 620 } 621 622 int evlist__add_pollfd(struct evlist *evlist, int fd) 623 { 624 return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN, fdarray_flag__default); 625 } 626 627 int evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask) 628 { 629 return perf_evlist__filter_pollfd(&evlist->core, revents_and_mask); 630 } 631 632 #ifdef HAVE_EVENTFD_SUPPORT 633 int evlist__add_wakeup_eventfd(struct evlist *evlist, int fd) 634 { 635 return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN, 636 fdarray_flag__nonfilterable | 637 fdarray_flag__non_perf_event); 638 } 639 #endif 640 641 int evlist__poll(struct evlist *evlist, int timeout) 642 { 643 return perf_evlist__poll(&evlist->core, timeout); 644 } 645 646 struct perf_sample_id *evlist__id2sid(struct evlist *evlist, u64 id) 647 { 648 struct hlist_head *head; 649 struct perf_sample_id *sid; 650 int hash; 651 652 hash = hash_64(id, PERF_EVLIST__HLIST_BITS); 653 head = &evlist->core.heads[hash]; 654 655 hlist_for_each_entry(sid, head, node) 656 if (sid->id == id) 657 return sid; 658 659 return NULL; 660 } 661 662 struct evsel *evlist__id2evsel(struct evlist *evlist, u64 id) 663 { 664 struct perf_sample_id *sid; 665 666 if (evlist->core.nr_entries == 1 || !id) 667 return evlist__first(evlist); 668 669 sid = evlist__id2sid(evlist, id); 670 if (sid) 671 return container_of(sid->evsel, struct evsel, core); 672 673 if (!evlist__sample_id_all(evlist)) 674 return evlist__first(evlist); 675 676 return NULL; 677 } 678 679 struct evsel *evlist__id2evsel_strict(struct evlist *evlist, u64 id) 680 { 681 struct perf_sample_id *sid; 682 683 if (!id) 684 return NULL; 685 686 sid = evlist__id2sid(evlist, id); 687 if (sid) 688 return container_of(sid->evsel, struct evsel, core); 689 690 return NULL; 691 } 692 693 static int evlist__event2id(struct evlist *evlist, union perf_event *event, u64 *id) 694 { 695 const __u64 *array = event->sample.array; 696 ssize_t n; 697 698 n = (event->header.size - sizeof(event->header)) >> 3; 699 700 if (event->header.type == PERF_RECORD_SAMPLE) { 701 if (evlist->id_pos >= n) 702 return -1; 703 *id = array[evlist->id_pos]; 704 } else { 705 if (evlist->is_pos > n) 706 return -1; 707 n -= evlist->is_pos; 708 *id = array[n]; 709 } 710 return 0; 711 } 712 713 struct evsel *evlist__event2evsel(struct evlist *evlist, union perf_event *event) 714 { 715 struct evsel *first = evlist__first(evlist); 716 struct hlist_head *head; 717 struct perf_sample_id *sid; 718 int hash; 719 u64 id; 720 721 if (evlist->core.nr_entries == 1) 722 return first; 723 724 if (!first->core.attr.sample_id_all && 725 event->header.type != PERF_RECORD_SAMPLE) 726 return first; 727 728 if (evlist__event2id(evlist, event, &id)) 729 return NULL; 730 731 /* Synthesized events have an id of zero */ 732 if (!id) 733 return first; 734 735 hash = hash_64(id, PERF_EVLIST__HLIST_BITS); 736 head = &evlist->core.heads[hash]; 737 738 hlist_for_each_entry(sid, head, node) { 739 if (sid->id == id) 740 return container_of(sid->evsel, struct evsel, core); 741 } 742 return NULL; 743 } 744 745 static int evlist__set_paused(struct evlist *evlist, bool value) 746 { 747 int i; 748 749 if (!evlist->overwrite_mmap) 750 return 0; 751 752 for (i = 0; i < evlist->core.nr_mmaps; i++) { 753 int fd = evlist->overwrite_mmap[i].core.fd; 754 int err; 755 756 if (fd < 0) 757 continue; 758 err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0); 759 if (err) 760 return err; 761 } 762 return 0; 763 } 764 765 static int evlist__pause(struct evlist *evlist) 766 { 767 return evlist__set_paused(evlist, true); 768 } 769 770 static int evlist__resume(struct evlist *evlist) 771 { 772 return evlist__set_paused(evlist, false); 773 } 774 775 static void evlist__munmap_nofree(struct evlist *evlist) 776 { 777 int i; 778 779 if (evlist->mmap) 780 for (i = 0; i < evlist->core.nr_mmaps; i++) 781 perf_mmap__munmap(&evlist->mmap[i].core); 782 783 if (evlist->overwrite_mmap) 784 for (i = 0; i < evlist->core.nr_mmaps; i++) 785 perf_mmap__munmap(&evlist->overwrite_mmap[i].core); 786 } 787 788 void evlist__munmap(struct evlist *evlist) 789 { 790 evlist__munmap_nofree(evlist); 791 zfree(&evlist->mmap); 792 zfree(&evlist->overwrite_mmap); 793 } 794 795 static void perf_mmap__unmap_cb(struct perf_mmap *map) 796 { 797 struct mmap *m = container_of(map, struct mmap, core); 798 799 mmap__munmap(m); 800 } 801 802 static struct mmap *evlist__alloc_mmap(struct evlist *evlist, 803 bool overwrite) 804 { 805 int i; 806 struct mmap *map; 807 808 map = zalloc(evlist->core.nr_mmaps * sizeof(struct mmap)); 809 if (!map) 810 return NULL; 811 812 for (i = 0; i < evlist->core.nr_mmaps; i++) { 813 struct perf_mmap *prev = i ? &map[i - 1].core : NULL; 814 815 /* 816 * When the perf_mmap() call is made we grab one refcount, plus 817 * one extra to let perf_mmap__consume() get the last 818 * events after all real references (perf_mmap__get()) are 819 * dropped. 820 * 821 * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and 822 * thus does perf_mmap__get() on it. 823 */ 824 perf_mmap__init(&map[i].core, prev, overwrite, perf_mmap__unmap_cb); 825 } 826 827 return map; 828 } 829 830 static void 831 perf_evlist__mmap_cb_idx(struct perf_evlist *_evlist, 832 struct perf_evsel *_evsel, 833 struct perf_mmap_param *_mp, 834 int idx) 835 { 836 struct evlist *evlist = container_of(_evlist, struct evlist, core); 837 struct mmap_params *mp = container_of(_mp, struct mmap_params, core); 838 struct evsel *evsel = container_of(_evsel, struct evsel, core); 839 840 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, evsel, idx); 841 } 842 843 static struct perf_mmap* 844 perf_evlist__mmap_cb_get(struct perf_evlist *_evlist, bool overwrite, int idx) 845 { 846 struct evlist *evlist = container_of(_evlist, struct evlist, core); 847 struct mmap *maps; 848 849 maps = overwrite ? evlist->overwrite_mmap : evlist->mmap; 850 851 if (!maps) { 852 maps = evlist__alloc_mmap(evlist, overwrite); 853 if (!maps) 854 return NULL; 855 856 if (overwrite) { 857 evlist->overwrite_mmap = maps; 858 if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY) 859 evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING); 860 } else { 861 evlist->mmap = maps; 862 } 863 } 864 865 return &maps[idx].core; 866 } 867 868 static int 869 perf_evlist__mmap_cb_mmap(struct perf_mmap *_map, struct perf_mmap_param *_mp, 870 int output, struct perf_cpu cpu) 871 { 872 struct mmap *map = container_of(_map, struct mmap, core); 873 struct mmap_params *mp = container_of(_mp, struct mmap_params, core); 874 875 return mmap__mmap(map, mp, output, cpu); 876 } 877 878 unsigned long perf_event_mlock_kb_in_pages(void) 879 { 880 unsigned long pages; 881 int max; 882 883 if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) { 884 /* 885 * Pick a once upon a time good value, i.e. things look 886 * strange since we can't read a sysctl value, but lets not 887 * die yet... 888 */ 889 max = 512; 890 } else { 891 max -= (page_size / 1024); 892 } 893 894 pages = (max * 1024) / page_size; 895 if (!is_power_of_2(pages)) 896 pages = rounddown_pow_of_two(pages); 897 898 return pages; 899 } 900 901 size_t evlist__mmap_size(unsigned long pages) 902 { 903 if (pages == UINT_MAX) 904 pages = perf_event_mlock_kb_in_pages(); 905 else if (!is_power_of_2(pages)) 906 return 0; 907 908 return (pages + 1) * page_size; 909 } 910 911 static long parse_pages_arg(const char *str, unsigned long min, 912 unsigned long max) 913 { 914 unsigned long pages, val; 915 static struct parse_tag tags[] = { 916 { .tag = 'B', .mult = 1 }, 917 { .tag = 'K', .mult = 1 << 10 }, 918 { .tag = 'M', .mult = 1 << 20 }, 919 { .tag = 'G', .mult = 1 << 30 }, 920 { .tag = 0 }, 921 }; 922 923 if (str == NULL) 924 return -EINVAL; 925 926 val = parse_tag_value(str, tags); 927 if (val != (unsigned long) -1) { 928 /* we got file size value */ 929 pages = PERF_ALIGN(val, page_size) / page_size; 930 } else { 931 /* we got pages count value */ 932 char *eptr; 933 pages = strtoul(str, &eptr, 10); 934 if (*eptr != '\0') 935 return -EINVAL; 936 } 937 938 if (pages == 0 && min == 0) { 939 /* leave number of pages at 0 */ 940 } else if (!is_power_of_2(pages)) { 941 char buf[100]; 942 943 /* round pages up to next power of 2 */ 944 pages = roundup_pow_of_two(pages); 945 if (!pages) 946 return -EINVAL; 947 948 unit_number__scnprintf(buf, sizeof(buf), pages * page_size); 949 pr_info("rounding mmap pages size to %s (%lu pages)\n", 950 buf, pages); 951 } 952 953 if (pages > max) 954 return -EINVAL; 955 956 return pages; 957 } 958 959 int __evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str) 960 { 961 unsigned long max = UINT_MAX; 962 long pages; 963 964 if (max > SIZE_MAX / page_size) 965 max = SIZE_MAX / page_size; 966 967 pages = parse_pages_arg(str, 1, max); 968 if (pages < 0) { 969 pr_err("Invalid argument for --mmap_pages/-m\n"); 970 return -1; 971 } 972 973 *mmap_pages = pages; 974 return 0; 975 } 976 977 int evlist__parse_mmap_pages(const struct option *opt, const char *str, int unset __maybe_unused) 978 { 979 return __evlist__parse_mmap_pages(opt->value, str); 980 } 981 982 /** 983 * evlist__mmap_ex - Create mmaps to receive events. 984 * @evlist: list of events 985 * @pages: map length in pages 986 * @overwrite: overwrite older events? 987 * @auxtrace_pages - auxtrace map length in pages 988 * @auxtrace_overwrite - overwrite older auxtrace data? 989 * 990 * If @overwrite is %false the user needs to signal event consumption using 991 * perf_mmap__write_tail(). Using evlist__mmap_read() does this 992 * automatically. 993 * 994 * Similarly, if @auxtrace_overwrite is %false the user needs to signal data 995 * consumption using auxtrace_mmap__write_tail(). 996 * 997 * Return: %0 on success, negative error code otherwise. 998 */ 999 int evlist__mmap_ex(struct evlist *evlist, unsigned int pages, 1000 unsigned int auxtrace_pages, 1001 bool auxtrace_overwrite, int nr_cblocks, int affinity, int flush, 1002 int comp_level) 1003 { 1004 /* 1005 * Delay setting mp.prot: set it before calling perf_mmap__mmap. 1006 * Its value is decided by evsel's write_backward. 1007 * So &mp should not be passed through const pointer. 1008 */ 1009 struct mmap_params mp = { 1010 .nr_cblocks = nr_cblocks, 1011 .affinity = affinity, 1012 .flush = flush, 1013 .comp_level = comp_level 1014 }; 1015 struct perf_evlist_mmap_ops ops = { 1016 .idx = perf_evlist__mmap_cb_idx, 1017 .get = perf_evlist__mmap_cb_get, 1018 .mmap = perf_evlist__mmap_cb_mmap, 1019 }; 1020 1021 evlist->core.mmap_len = evlist__mmap_size(pages); 1022 pr_debug("mmap size %zuB\n", evlist->core.mmap_len); 1023 1024 auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->core.mmap_len, 1025 auxtrace_pages, auxtrace_overwrite); 1026 1027 return perf_evlist__mmap_ops(&evlist->core, &ops, &mp.core); 1028 } 1029 1030 int evlist__mmap(struct evlist *evlist, unsigned int pages) 1031 { 1032 return evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS, 1, 0); 1033 } 1034 1035 int evlist__create_maps(struct evlist *evlist, struct target *target) 1036 { 1037 bool all_threads = (target->per_thread && target->system_wide); 1038 struct perf_cpu_map *cpus; 1039 struct perf_thread_map *threads; 1040 1041 /* 1042 * If specify '-a' and '--per-thread' to perf record, perf record 1043 * will override '--per-thread'. target->per_thread = false and 1044 * target->system_wide = true. 1045 * 1046 * If specify '--per-thread' only to perf record, 1047 * target->per_thread = true and target->system_wide = false. 1048 * 1049 * So target->per_thread && target->system_wide is false. 1050 * For perf record, thread_map__new_str doesn't call 1051 * thread_map__new_all_cpus. That will keep perf record's 1052 * current behavior. 1053 * 1054 * For perf stat, it allows the case that target->per_thread and 1055 * target->system_wide are all true. It means to collect system-wide 1056 * per-thread data. thread_map__new_str will call 1057 * thread_map__new_all_cpus to enumerate all threads. 1058 */ 1059 threads = thread_map__new_str(target->pid, target->tid, target->uid, 1060 all_threads); 1061 1062 if (!threads) 1063 return -1; 1064 1065 if (target__uses_dummy_map(target)) 1066 cpus = perf_cpu_map__dummy_new(); 1067 else 1068 cpus = perf_cpu_map__new(target->cpu_list); 1069 1070 if (!cpus) 1071 goto out_delete_threads; 1072 1073 evlist->core.has_user_cpus = !!target->cpu_list; 1074 1075 perf_evlist__set_maps(&evlist->core, cpus, threads); 1076 1077 /* as evlist now has references, put count here */ 1078 perf_cpu_map__put(cpus); 1079 perf_thread_map__put(threads); 1080 1081 return 0; 1082 1083 out_delete_threads: 1084 perf_thread_map__put(threads); 1085 return -1; 1086 } 1087 1088 int evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel) 1089 { 1090 struct evsel *evsel; 1091 int err = 0; 1092 1093 evlist__for_each_entry(evlist, evsel) { 1094 /* 1095 * filters only work for tracepoint event, which doesn't have cpu limit. 1096 * So evlist and evsel should always be same. 1097 */ 1098 if (evsel->filter) { 1099 err = perf_evsel__apply_filter(&evsel->core, evsel->filter); 1100 if (err) { 1101 *err_evsel = evsel; 1102 break; 1103 } 1104 } 1105 1106 /* 1107 * non-tracepoint events can have BPF filters. 1108 */ 1109 if (!list_empty(&evsel->bpf_filters)) { 1110 err = perf_bpf_filter__prepare(evsel); 1111 if (err) { 1112 *err_evsel = evsel; 1113 break; 1114 } 1115 } 1116 } 1117 1118 return err; 1119 } 1120 1121 int evlist__set_tp_filter(struct evlist *evlist, const char *filter) 1122 { 1123 struct evsel *evsel; 1124 int err = 0; 1125 1126 if (filter == NULL) 1127 return -1; 1128 1129 evlist__for_each_entry(evlist, evsel) { 1130 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 1131 continue; 1132 1133 err = evsel__set_filter(evsel, filter); 1134 if (err) 1135 break; 1136 } 1137 1138 return err; 1139 } 1140 1141 int evlist__append_tp_filter(struct evlist *evlist, const char *filter) 1142 { 1143 struct evsel *evsel; 1144 int err = 0; 1145 1146 if (filter == NULL) 1147 return -1; 1148 1149 evlist__for_each_entry(evlist, evsel) { 1150 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 1151 continue; 1152 1153 err = evsel__append_tp_filter(evsel, filter); 1154 if (err) 1155 break; 1156 } 1157 1158 return err; 1159 } 1160 1161 char *asprintf__tp_filter_pids(size_t npids, pid_t *pids) 1162 { 1163 char *filter; 1164 size_t i; 1165 1166 for (i = 0; i < npids; ++i) { 1167 if (i == 0) { 1168 if (asprintf(&filter, "common_pid != %d", pids[i]) < 0) 1169 return NULL; 1170 } else { 1171 char *tmp; 1172 1173 if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0) 1174 goto out_free; 1175 1176 free(filter); 1177 filter = tmp; 1178 } 1179 } 1180 1181 return filter; 1182 out_free: 1183 free(filter); 1184 return NULL; 1185 } 1186 1187 int evlist__set_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids) 1188 { 1189 char *filter = asprintf__tp_filter_pids(npids, pids); 1190 int ret = evlist__set_tp_filter(evlist, filter); 1191 1192 free(filter); 1193 return ret; 1194 } 1195 1196 int evlist__set_tp_filter_pid(struct evlist *evlist, pid_t pid) 1197 { 1198 return evlist__set_tp_filter_pids(evlist, 1, &pid); 1199 } 1200 1201 int evlist__append_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids) 1202 { 1203 char *filter = asprintf__tp_filter_pids(npids, pids); 1204 int ret = evlist__append_tp_filter(evlist, filter); 1205 1206 free(filter); 1207 return ret; 1208 } 1209 1210 int evlist__append_tp_filter_pid(struct evlist *evlist, pid_t pid) 1211 { 1212 return evlist__append_tp_filter_pids(evlist, 1, &pid); 1213 } 1214 1215 bool evlist__valid_sample_type(struct evlist *evlist) 1216 { 1217 struct evsel *pos; 1218 1219 if (evlist->core.nr_entries == 1) 1220 return true; 1221 1222 if (evlist->id_pos < 0 || evlist->is_pos < 0) 1223 return false; 1224 1225 evlist__for_each_entry(evlist, pos) { 1226 if (pos->id_pos != evlist->id_pos || 1227 pos->is_pos != evlist->is_pos) 1228 return false; 1229 } 1230 1231 return true; 1232 } 1233 1234 u64 __evlist__combined_sample_type(struct evlist *evlist) 1235 { 1236 struct evsel *evsel; 1237 1238 if (evlist->combined_sample_type) 1239 return evlist->combined_sample_type; 1240 1241 evlist__for_each_entry(evlist, evsel) 1242 evlist->combined_sample_type |= evsel->core.attr.sample_type; 1243 1244 return evlist->combined_sample_type; 1245 } 1246 1247 u64 evlist__combined_sample_type(struct evlist *evlist) 1248 { 1249 evlist->combined_sample_type = 0; 1250 return __evlist__combined_sample_type(evlist); 1251 } 1252 1253 u64 evlist__combined_branch_type(struct evlist *evlist) 1254 { 1255 struct evsel *evsel; 1256 u64 branch_type = 0; 1257 1258 evlist__for_each_entry(evlist, evsel) 1259 branch_type |= evsel->core.attr.branch_sample_type; 1260 return branch_type; 1261 } 1262 1263 bool evlist__valid_read_format(struct evlist *evlist) 1264 { 1265 struct evsel *first = evlist__first(evlist), *pos = first; 1266 u64 read_format = first->core.attr.read_format; 1267 u64 sample_type = first->core.attr.sample_type; 1268 1269 evlist__for_each_entry(evlist, pos) { 1270 if (read_format != pos->core.attr.read_format) { 1271 pr_debug("Read format differs %#" PRIx64 " vs %#" PRIx64 "\n", 1272 read_format, (u64)pos->core.attr.read_format); 1273 } 1274 } 1275 1276 /* PERF_SAMPLE_READ implies PERF_FORMAT_ID. */ 1277 if ((sample_type & PERF_SAMPLE_READ) && 1278 !(read_format & PERF_FORMAT_ID)) { 1279 return false; 1280 } 1281 1282 return true; 1283 } 1284 1285 u16 evlist__id_hdr_size(struct evlist *evlist) 1286 { 1287 struct evsel *first = evlist__first(evlist); 1288 1289 return first->core.attr.sample_id_all ? evsel__id_hdr_size(first) : 0; 1290 } 1291 1292 bool evlist__valid_sample_id_all(struct evlist *evlist) 1293 { 1294 struct evsel *first = evlist__first(evlist), *pos = first; 1295 1296 evlist__for_each_entry_continue(evlist, pos) { 1297 if (first->core.attr.sample_id_all != pos->core.attr.sample_id_all) 1298 return false; 1299 } 1300 1301 return true; 1302 } 1303 1304 bool evlist__sample_id_all(struct evlist *evlist) 1305 { 1306 struct evsel *first = evlist__first(evlist); 1307 return first->core.attr.sample_id_all; 1308 } 1309 1310 void evlist__set_selected(struct evlist *evlist, struct evsel *evsel) 1311 { 1312 evlist->selected = evsel; 1313 } 1314 1315 void evlist__close(struct evlist *evlist) 1316 { 1317 struct evsel *evsel; 1318 struct evlist_cpu_iterator evlist_cpu_itr; 1319 struct affinity affinity; 1320 1321 /* 1322 * With perf record core.user_requested_cpus is usually NULL. 1323 * Use the old method to handle this for now. 1324 */ 1325 if (!evlist->core.user_requested_cpus || 1326 cpu_map__is_dummy(evlist->core.user_requested_cpus)) { 1327 evlist__for_each_entry_reverse(evlist, evsel) 1328 evsel__close(evsel); 1329 return; 1330 } 1331 1332 if (affinity__setup(&affinity) < 0) 1333 return; 1334 1335 evlist__for_each_cpu(evlist_cpu_itr, evlist, &affinity) { 1336 perf_evsel__close_cpu(&evlist_cpu_itr.evsel->core, 1337 evlist_cpu_itr.cpu_map_idx); 1338 } 1339 1340 affinity__cleanup(&affinity); 1341 evlist__for_each_entry_reverse(evlist, evsel) { 1342 perf_evsel__free_fd(&evsel->core); 1343 perf_evsel__free_id(&evsel->core); 1344 } 1345 perf_evlist__reset_id_hash(&evlist->core); 1346 } 1347 1348 static int evlist__create_syswide_maps(struct evlist *evlist) 1349 { 1350 struct perf_cpu_map *cpus; 1351 struct perf_thread_map *threads; 1352 1353 /* 1354 * Try reading /sys/devices/system/cpu/online to get 1355 * an all cpus map. 1356 * 1357 * FIXME: -ENOMEM is the best we can do here, the cpu_map 1358 * code needs an overhaul to properly forward the 1359 * error, and we may not want to do that fallback to a 1360 * default cpu identity map :-\ 1361 */ 1362 cpus = perf_cpu_map__new(NULL); 1363 if (!cpus) 1364 goto out; 1365 1366 threads = perf_thread_map__new_dummy(); 1367 if (!threads) 1368 goto out_put; 1369 1370 perf_evlist__set_maps(&evlist->core, cpus, threads); 1371 1372 perf_thread_map__put(threads); 1373 out_put: 1374 perf_cpu_map__put(cpus); 1375 out: 1376 return -ENOMEM; 1377 } 1378 1379 int evlist__open(struct evlist *evlist) 1380 { 1381 struct evsel *evsel; 1382 int err; 1383 1384 /* 1385 * Default: one fd per CPU, all threads, aka systemwide 1386 * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL 1387 */ 1388 if (evlist->core.threads == NULL && evlist->core.user_requested_cpus == NULL) { 1389 err = evlist__create_syswide_maps(evlist); 1390 if (err < 0) 1391 goto out_err; 1392 } 1393 1394 evlist__update_id_pos(evlist); 1395 1396 evlist__for_each_entry(evlist, evsel) { 1397 err = evsel__open(evsel, evsel->core.cpus, evsel->core.threads); 1398 if (err < 0) 1399 goto out_err; 1400 } 1401 1402 return 0; 1403 out_err: 1404 evlist__close(evlist); 1405 errno = -err; 1406 return err; 1407 } 1408 1409 int evlist__prepare_workload(struct evlist *evlist, struct target *target, const char *argv[], 1410 bool pipe_output, void (*exec_error)(int signo, siginfo_t *info, void *ucontext)) 1411 { 1412 int child_ready_pipe[2], go_pipe[2]; 1413 char bf; 1414 1415 if (pipe(child_ready_pipe) < 0) { 1416 perror("failed to create 'ready' pipe"); 1417 return -1; 1418 } 1419 1420 if (pipe(go_pipe) < 0) { 1421 perror("failed to create 'go' pipe"); 1422 goto out_close_ready_pipe; 1423 } 1424 1425 evlist->workload.pid = fork(); 1426 if (evlist->workload.pid < 0) { 1427 perror("failed to fork"); 1428 goto out_close_pipes; 1429 } 1430 1431 if (!evlist->workload.pid) { 1432 int ret; 1433 1434 if (pipe_output) 1435 dup2(2, 1); 1436 1437 signal(SIGTERM, SIG_DFL); 1438 1439 close(child_ready_pipe[0]); 1440 close(go_pipe[1]); 1441 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC); 1442 1443 /* 1444 * Change the name of this process not to confuse --exclude-perf users 1445 * that sees 'perf' in the window up to the execvp() and thinks that 1446 * perf samples are not being excluded. 1447 */ 1448 prctl(PR_SET_NAME, "perf-exec"); 1449 1450 /* 1451 * Tell the parent we're ready to go 1452 */ 1453 close(child_ready_pipe[1]); 1454 1455 /* 1456 * Wait until the parent tells us to go. 1457 */ 1458 ret = read(go_pipe[0], &bf, 1); 1459 /* 1460 * The parent will ask for the execvp() to be performed by 1461 * writing exactly one byte, in workload.cork_fd, usually via 1462 * evlist__start_workload(). 1463 * 1464 * For cancelling the workload without actually running it, 1465 * the parent will just close workload.cork_fd, without writing 1466 * anything, i.e. read will return zero and we just exit() 1467 * here. 1468 */ 1469 if (ret != 1) { 1470 if (ret == -1) 1471 perror("unable to read pipe"); 1472 exit(ret); 1473 } 1474 1475 execvp(argv[0], (char **)argv); 1476 1477 if (exec_error) { 1478 union sigval val; 1479 1480 val.sival_int = errno; 1481 if (sigqueue(getppid(), SIGUSR1, val)) 1482 perror(argv[0]); 1483 } else 1484 perror(argv[0]); 1485 exit(-1); 1486 } 1487 1488 if (exec_error) { 1489 struct sigaction act = { 1490 .sa_flags = SA_SIGINFO, 1491 .sa_sigaction = exec_error, 1492 }; 1493 sigaction(SIGUSR1, &act, NULL); 1494 } 1495 1496 if (target__none(target)) { 1497 if (evlist->core.threads == NULL) { 1498 fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n", 1499 __func__, __LINE__); 1500 goto out_close_pipes; 1501 } 1502 perf_thread_map__set_pid(evlist->core.threads, 0, evlist->workload.pid); 1503 } 1504 1505 close(child_ready_pipe[1]); 1506 close(go_pipe[0]); 1507 /* 1508 * wait for child to settle 1509 */ 1510 if (read(child_ready_pipe[0], &bf, 1) == -1) { 1511 perror("unable to read pipe"); 1512 goto out_close_pipes; 1513 } 1514 1515 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC); 1516 evlist->workload.cork_fd = go_pipe[1]; 1517 close(child_ready_pipe[0]); 1518 return 0; 1519 1520 out_close_pipes: 1521 close(go_pipe[0]); 1522 close(go_pipe[1]); 1523 out_close_ready_pipe: 1524 close(child_ready_pipe[0]); 1525 close(child_ready_pipe[1]); 1526 return -1; 1527 } 1528 1529 int evlist__start_workload(struct evlist *evlist) 1530 { 1531 if (evlist->workload.cork_fd > 0) { 1532 char bf = 0; 1533 int ret; 1534 /* 1535 * Remove the cork, let it rip! 1536 */ 1537 ret = write(evlist->workload.cork_fd, &bf, 1); 1538 if (ret < 0) 1539 perror("unable to write to pipe"); 1540 1541 close(evlist->workload.cork_fd); 1542 return ret; 1543 } 1544 1545 return 0; 1546 } 1547 1548 int evlist__parse_sample(struct evlist *evlist, union perf_event *event, struct perf_sample *sample) 1549 { 1550 struct evsel *evsel = evlist__event2evsel(evlist, event); 1551 int ret; 1552 1553 if (!evsel) 1554 return -EFAULT; 1555 ret = evsel__parse_sample(evsel, event, sample); 1556 if (ret) 1557 return ret; 1558 if (perf_guest && sample->id) { 1559 struct perf_sample_id *sid = evlist__id2sid(evlist, sample->id); 1560 1561 if (sid) { 1562 sample->machine_pid = sid->machine_pid; 1563 sample->vcpu = sid->vcpu.cpu; 1564 } 1565 } 1566 return 0; 1567 } 1568 1569 int evlist__parse_sample_timestamp(struct evlist *evlist, union perf_event *event, u64 *timestamp) 1570 { 1571 struct evsel *evsel = evlist__event2evsel(evlist, event); 1572 1573 if (!evsel) 1574 return -EFAULT; 1575 return evsel__parse_sample_timestamp(evsel, event, timestamp); 1576 } 1577 1578 int evlist__strerror_open(struct evlist *evlist, int err, char *buf, size_t size) 1579 { 1580 int printed, value; 1581 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf)); 1582 1583 switch (err) { 1584 case EACCES: 1585 case EPERM: 1586 printed = scnprintf(buf, size, 1587 "Error:\t%s.\n" 1588 "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg); 1589 1590 value = perf_event_paranoid(); 1591 1592 printed += scnprintf(buf + printed, size - printed, "\nHint:\t"); 1593 1594 if (value >= 2) { 1595 printed += scnprintf(buf + printed, size - printed, 1596 "For your workloads it needs to be <= 1\nHint:\t"); 1597 } 1598 printed += scnprintf(buf + printed, size - printed, 1599 "For system wide tracing it needs to be set to -1.\n"); 1600 1601 printed += scnprintf(buf + printed, size - printed, 1602 "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n" 1603 "Hint:\tThe current value is %d.", value); 1604 break; 1605 case EINVAL: { 1606 struct evsel *first = evlist__first(evlist); 1607 int max_freq; 1608 1609 if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0) 1610 goto out_default; 1611 1612 if (first->core.attr.sample_freq < (u64)max_freq) 1613 goto out_default; 1614 1615 printed = scnprintf(buf, size, 1616 "Error:\t%s.\n" 1617 "Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n" 1618 "Hint:\tThe current value is %d and %" PRIu64 " is being requested.", 1619 emsg, max_freq, first->core.attr.sample_freq); 1620 break; 1621 } 1622 default: 1623 out_default: 1624 scnprintf(buf, size, "%s", emsg); 1625 break; 1626 } 1627 1628 return 0; 1629 } 1630 1631 int evlist__strerror_mmap(struct evlist *evlist, int err, char *buf, size_t size) 1632 { 1633 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf)); 1634 int pages_attempted = evlist->core.mmap_len / 1024, pages_max_per_user, printed = 0; 1635 1636 switch (err) { 1637 case EPERM: 1638 sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user); 1639 printed += scnprintf(buf + printed, size - printed, 1640 "Error:\t%s.\n" 1641 "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n" 1642 "Hint:\tTried using %zd kB.\n", 1643 emsg, pages_max_per_user, pages_attempted); 1644 1645 if (pages_attempted >= pages_max_per_user) { 1646 printed += scnprintf(buf + printed, size - printed, 1647 "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n", 1648 pages_max_per_user + pages_attempted); 1649 } 1650 1651 printed += scnprintf(buf + printed, size - printed, 1652 "Hint:\tTry using a smaller -m/--mmap-pages value."); 1653 break; 1654 default: 1655 scnprintf(buf, size, "%s", emsg); 1656 break; 1657 } 1658 1659 return 0; 1660 } 1661 1662 void evlist__to_front(struct evlist *evlist, struct evsel *move_evsel) 1663 { 1664 struct evsel *evsel, *n; 1665 LIST_HEAD(move); 1666 1667 if (move_evsel == evlist__first(evlist)) 1668 return; 1669 1670 evlist__for_each_entry_safe(evlist, n, evsel) { 1671 if (evsel__leader(evsel) == evsel__leader(move_evsel)) 1672 list_move_tail(&evsel->core.node, &move); 1673 } 1674 1675 list_splice(&move, &evlist->core.entries); 1676 } 1677 1678 struct evsel *evlist__get_tracking_event(struct evlist *evlist) 1679 { 1680 struct evsel *evsel; 1681 1682 evlist__for_each_entry(evlist, evsel) { 1683 if (evsel->tracking) 1684 return evsel; 1685 } 1686 1687 return evlist__first(evlist); 1688 } 1689 1690 void evlist__set_tracking_event(struct evlist *evlist, struct evsel *tracking_evsel) 1691 { 1692 struct evsel *evsel; 1693 1694 if (tracking_evsel->tracking) 1695 return; 1696 1697 evlist__for_each_entry(evlist, evsel) { 1698 if (evsel != tracking_evsel) 1699 evsel->tracking = false; 1700 } 1701 1702 tracking_evsel->tracking = true; 1703 } 1704 1705 struct evsel *evlist__findnew_tracking_event(struct evlist *evlist, bool system_wide) 1706 { 1707 struct evsel *evsel; 1708 1709 evsel = evlist__get_tracking_event(evlist); 1710 if (!evsel__is_dummy_event(evsel)) { 1711 evsel = evlist__add_aux_dummy(evlist, system_wide); 1712 if (!evsel) 1713 return NULL; 1714 1715 evlist__set_tracking_event(evlist, evsel); 1716 } else if (system_wide) { 1717 perf_evlist__go_system_wide(&evlist->core, &evsel->core); 1718 } 1719 1720 return evsel; 1721 } 1722 1723 struct evsel *evlist__find_evsel_by_str(struct evlist *evlist, const char *str) 1724 { 1725 struct evsel *evsel; 1726 1727 evlist__for_each_entry(evlist, evsel) { 1728 if (!evsel->name) 1729 continue; 1730 if (evsel__name_is(evsel, str)) 1731 return evsel; 1732 } 1733 1734 return NULL; 1735 } 1736 1737 void evlist__toggle_bkw_mmap(struct evlist *evlist, enum bkw_mmap_state state) 1738 { 1739 enum bkw_mmap_state old_state = evlist->bkw_mmap_state; 1740 enum action { 1741 NONE, 1742 PAUSE, 1743 RESUME, 1744 } action = NONE; 1745 1746 if (!evlist->overwrite_mmap) 1747 return; 1748 1749 switch (old_state) { 1750 case BKW_MMAP_NOTREADY: { 1751 if (state != BKW_MMAP_RUNNING) 1752 goto state_err; 1753 break; 1754 } 1755 case BKW_MMAP_RUNNING: { 1756 if (state != BKW_MMAP_DATA_PENDING) 1757 goto state_err; 1758 action = PAUSE; 1759 break; 1760 } 1761 case BKW_MMAP_DATA_PENDING: { 1762 if (state != BKW_MMAP_EMPTY) 1763 goto state_err; 1764 break; 1765 } 1766 case BKW_MMAP_EMPTY: { 1767 if (state != BKW_MMAP_RUNNING) 1768 goto state_err; 1769 action = RESUME; 1770 break; 1771 } 1772 default: 1773 WARN_ONCE(1, "Shouldn't get there\n"); 1774 } 1775 1776 evlist->bkw_mmap_state = state; 1777 1778 switch (action) { 1779 case PAUSE: 1780 evlist__pause(evlist); 1781 break; 1782 case RESUME: 1783 evlist__resume(evlist); 1784 break; 1785 case NONE: 1786 default: 1787 break; 1788 } 1789 1790 state_err: 1791 return; 1792 } 1793 1794 bool evlist__exclude_kernel(struct evlist *evlist) 1795 { 1796 struct evsel *evsel; 1797 1798 evlist__for_each_entry(evlist, evsel) { 1799 if (!evsel->core.attr.exclude_kernel) 1800 return false; 1801 } 1802 1803 return true; 1804 } 1805 1806 /* 1807 * Events in data file are not collect in groups, but we still want 1808 * the group display. Set the artificial group and set the leader's 1809 * forced_leader flag to notify the display code. 1810 */ 1811 void evlist__force_leader(struct evlist *evlist) 1812 { 1813 if (evlist__nr_groups(evlist) == 0) { 1814 struct evsel *leader = evlist__first(evlist); 1815 1816 evlist__set_leader(evlist); 1817 leader->forced_leader = true; 1818 } 1819 } 1820 1821 struct evsel *evlist__reset_weak_group(struct evlist *evsel_list, struct evsel *evsel, bool close) 1822 { 1823 struct evsel *c2, *leader; 1824 bool is_open = true; 1825 1826 leader = evsel__leader(evsel); 1827 1828 pr_debug("Weak group for %s/%d failed\n", 1829 leader->name, leader->core.nr_members); 1830 1831 /* 1832 * for_each_group_member doesn't work here because it doesn't 1833 * include the first entry. 1834 */ 1835 evlist__for_each_entry(evsel_list, c2) { 1836 if (c2 == evsel) 1837 is_open = false; 1838 if (evsel__has_leader(c2, leader)) { 1839 if (is_open && close) 1840 perf_evsel__close(&c2->core); 1841 /* 1842 * We want to close all members of the group and reopen 1843 * them. Some events, like Intel topdown, require being 1844 * in a group and so keep these in the group. 1845 */ 1846 evsel__remove_from_group(c2, leader); 1847 1848 /* 1849 * Set this for all former members of the group 1850 * to indicate they get reopened. 1851 */ 1852 c2->reset_group = true; 1853 } 1854 } 1855 /* Reset the leader count if all entries were removed. */ 1856 if (leader->core.nr_members == 1) 1857 leader->core.nr_members = 0; 1858 return leader; 1859 } 1860 1861 static int evlist__parse_control_fifo(const char *str, int *ctl_fd, int *ctl_fd_ack, bool *ctl_fd_close) 1862 { 1863 char *s, *p; 1864 int ret = 0, fd; 1865 1866 if (strncmp(str, "fifo:", 5)) 1867 return -EINVAL; 1868 1869 str += 5; 1870 if (!*str || *str == ',') 1871 return -EINVAL; 1872 1873 s = strdup(str); 1874 if (!s) 1875 return -ENOMEM; 1876 1877 p = strchr(s, ','); 1878 if (p) 1879 *p = '\0'; 1880 1881 /* 1882 * O_RDWR avoids POLLHUPs which is necessary to allow the other 1883 * end of a FIFO to be repeatedly opened and closed. 1884 */ 1885 fd = open(s, O_RDWR | O_NONBLOCK | O_CLOEXEC); 1886 if (fd < 0) { 1887 pr_err("Failed to open '%s'\n", s); 1888 ret = -errno; 1889 goto out_free; 1890 } 1891 *ctl_fd = fd; 1892 *ctl_fd_close = true; 1893 1894 if (p && *++p) { 1895 /* O_RDWR | O_NONBLOCK means the other end need not be open */ 1896 fd = open(p, O_RDWR | O_NONBLOCK | O_CLOEXEC); 1897 if (fd < 0) { 1898 pr_err("Failed to open '%s'\n", p); 1899 ret = -errno; 1900 goto out_free; 1901 } 1902 *ctl_fd_ack = fd; 1903 } 1904 1905 out_free: 1906 free(s); 1907 return ret; 1908 } 1909 1910 int evlist__parse_control(const char *str, int *ctl_fd, int *ctl_fd_ack, bool *ctl_fd_close) 1911 { 1912 char *comma = NULL, *endptr = NULL; 1913 1914 *ctl_fd_close = false; 1915 1916 if (strncmp(str, "fd:", 3)) 1917 return evlist__parse_control_fifo(str, ctl_fd, ctl_fd_ack, ctl_fd_close); 1918 1919 *ctl_fd = strtoul(&str[3], &endptr, 0); 1920 if (endptr == &str[3]) 1921 return -EINVAL; 1922 1923 comma = strchr(str, ','); 1924 if (comma) { 1925 if (endptr != comma) 1926 return -EINVAL; 1927 1928 *ctl_fd_ack = strtoul(comma + 1, &endptr, 0); 1929 if (endptr == comma + 1 || *endptr != '\0') 1930 return -EINVAL; 1931 } 1932 1933 return 0; 1934 } 1935 1936 void evlist__close_control(int ctl_fd, int ctl_fd_ack, bool *ctl_fd_close) 1937 { 1938 if (*ctl_fd_close) { 1939 *ctl_fd_close = false; 1940 close(ctl_fd); 1941 if (ctl_fd_ack >= 0) 1942 close(ctl_fd_ack); 1943 } 1944 } 1945 1946 int evlist__initialize_ctlfd(struct evlist *evlist, int fd, int ack) 1947 { 1948 if (fd == -1) { 1949 pr_debug("Control descriptor is not initialized\n"); 1950 return 0; 1951 } 1952 1953 evlist->ctl_fd.pos = perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN, 1954 fdarray_flag__nonfilterable | 1955 fdarray_flag__non_perf_event); 1956 if (evlist->ctl_fd.pos < 0) { 1957 evlist->ctl_fd.pos = -1; 1958 pr_err("Failed to add ctl fd entry: %m\n"); 1959 return -1; 1960 } 1961 1962 evlist->ctl_fd.fd = fd; 1963 evlist->ctl_fd.ack = ack; 1964 1965 return 0; 1966 } 1967 1968 bool evlist__ctlfd_initialized(struct evlist *evlist) 1969 { 1970 return evlist->ctl_fd.pos >= 0; 1971 } 1972 1973 int evlist__finalize_ctlfd(struct evlist *evlist) 1974 { 1975 struct pollfd *entries = evlist->core.pollfd.entries; 1976 1977 if (!evlist__ctlfd_initialized(evlist)) 1978 return 0; 1979 1980 entries[evlist->ctl_fd.pos].fd = -1; 1981 entries[evlist->ctl_fd.pos].events = 0; 1982 entries[evlist->ctl_fd.pos].revents = 0; 1983 1984 evlist->ctl_fd.pos = -1; 1985 evlist->ctl_fd.ack = -1; 1986 evlist->ctl_fd.fd = -1; 1987 1988 return 0; 1989 } 1990 1991 static int evlist__ctlfd_recv(struct evlist *evlist, enum evlist_ctl_cmd *cmd, 1992 char *cmd_data, size_t data_size) 1993 { 1994 int err; 1995 char c; 1996 size_t bytes_read = 0; 1997 1998 *cmd = EVLIST_CTL_CMD_UNSUPPORTED; 1999 memset(cmd_data, 0, data_size); 2000 data_size--; 2001 2002 do { 2003 err = read(evlist->ctl_fd.fd, &c, 1); 2004 if (err > 0) { 2005 if (c == '\n' || c == '\0') 2006 break; 2007 cmd_data[bytes_read++] = c; 2008 if (bytes_read == data_size) 2009 break; 2010 continue; 2011 } else if (err == -1) { 2012 if (errno == EINTR) 2013 continue; 2014 if (errno == EAGAIN || errno == EWOULDBLOCK) 2015 err = 0; 2016 else 2017 pr_err("Failed to read from ctlfd %d: %m\n", evlist->ctl_fd.fd); 2018 } 2019 break; 2020 } while (1); 2021 2022 pr_debug("Message from ctl_fd: \"%s%s\"\n", cmd_data, 2023 bytes_read == data_size ? "" : c == '\n' ? "\\n" : "\\0"); 2024 2025 if (bytes_read > 0) { 2026 if (!strncmp(cmd_data, EVLIST_CTL_CMD_ENABLE_TAG, 2027 (sizeof(EVLIST_CTL_CMD_ENABLE_TAG)-1))) { 2028 *cmd = EVLIST_CTL_CMD_ENABLE; 2029 } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_DISABLE_TAG, 2030 (sizeof(EVLIST_CTL_CMD_DISABLE_TAG)-1))) { 2031 *cmd = EVLIST_CTL_CMD_DISABLE; 2032 } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_SNAPSHOT_TAG, 2033 (sizeof(EVLIST_CTL_CMD_SNAPSHOT_TAG)-1))) { 2034 *cmd = EVLIST_CTL_CMD_SNAPSHOT; 2035 pr_debug("is snapshot\n"); 2036 } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_EVLIST_TAG, 2037 (sizeof(EVLIST_CTL_CMD_EVLIST_TAG)-1))) { 2038 *cmd = EVLIST_CTL_CMD_EVLIST; 2039 } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_STOP_TAG, 2040 (sizeof(EVLIST_CTL_CMD_STOP_TAG)-1))) { 2041 *cmd = EVLIST_CTL_CMD_STOP; 2042 } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_PING_TAG, 2043 (sizeof(EVLIST_CTL_CMD_PING_TAG)-1))) { 2044 *cmd = EVLIST_CTL_CMD_PING; 2045 } 2046 } 2047 2048 return bytes_read ? (int)bytes_read : err; 2049 } 2050 2051 int evlist__ctlfd_ack(struct evlist *evlist) 2052 { 2053 int err; 2054 2055 if (evlist->ctl_fd.ack == -1) 2056 return 0; 2057 2058 err = write(evlist->ctl_fd.ack, EVLIST_CTL_CMD_ACK_TAG, 2059 sizeof(EVLIST_CTL_CMD_ACK_TAG)); 2060 if (err == -1) 2061 pr_err("failed to write to ctl_ack_fd %d: %m\n", evlist->ctl_fd.ack); 2062 2063 return err; 2064 } 2065 2066 static int get_cmd_arg(char *cmd_data, size_t cmd_size, char **arg) 2067 { 2068 char *data = cmd_data + cmd_size; 2069 2070 /* no argument */ 2071 if (!*data) 2072 return 0; 2073 2074 /* there's argument */ 2075 if (*data == ' ') { 2076 *arg = data + 1; 2077 return 1; 2078 } 2079 2080 /* malformed */ 2081 return -1; 2082 } 2083 2084 static int evlist__ctlfd_enable(struct evlist *evlist, char *cmd_data, bool enable) 2085 { 2086 struct evsel *evsel; 2087 char *name; 2088 int err; 2089 2090 err = get_cmd_arg(cmd_data, 2091 enable ? sizeof(EVLIST_CTL_CMD_ENABLE_TAG) - 1 : 2092 sizeof(EVLIST_CTL_CMD_DISABLE_TAG) - 1, 2093 &name); 2094 if (err < 0) { 2095 pr_info("failed: wrong command\n"); 2096 return -1; 2097 } 2098 2099 if (err) { 2100 evsel = evlist__find_evsel_by_str(evlist, name); 2101 if (evsel) { 2102 if (enable) 2103 evlist__enable_evsel(evlist, name); 2104 else 2105 evlist__disable_evsel(evlist, name); 2106 pr_info("Event %s %s\n", evsel->name, 2107 enable ? "enabled" : "disabled"); 2108 } else { 2109 pr_info("failed: can't find '%s' event\n", name); 2110 } 2111 } else { 2112 if (enable) { 2113 evlist__enable(evlist); 2114 pr_info(EVLIST_ENABLED_MSG); 2115 } else { 2116 evlist__disable(evlist); 2117 pr_info(EVLIST_DISABLED_MSG); 2118 } 2119 } 2120 2121 return 0; 2122 } 2123 2124 static int evlist__ctlfd_list(struct evlist *evlist, char *cmd_data) 2125 { 2126 struct perf_attr_details details = { .verbose = false, }; 2127 struct evsel *evsel; 2128 char *arg; 2129 int err; 2130 2131 err = get_cmd_arg(cmd_data, 2132 sizeof(EVLIST_CTL_CMD_EVLIST_TAG) - 1, 2133 &arg); 2134 if (err < 0) { 2135 pr_info("failed: wrong command\n"); 2136 return -1; 2137 } 2138 2139 if (err) { 2140 if (!strcmp(arg, "-v")) { 2141 details.verbose = true; 2142 } else if (!strcmp(arg, "-g")) { 2143 details.event_group = true; 2144 } else if (!strcmp(arg, "-F")) { 2145 details.freq = true; 2146 } else { 2147 pr_info("failed: wrong command\n"); 2148 return -1; 2149 } 2150 } 2151 2152 evlist__for_each_entry(evlist, evsel) 2153 evsel__fprintf(evsel, &details, stderr); 2154 2155 return 0; 2156 } 2157 2158 int evlist__ctlfd_process(struct evlist *evlist, enum evlist_ctl_cmd *cmd) 2159 { 2160 int err = 0; 2161 char cmd_data[EVLIST_CTL_CMD_MAX_LEN]; 2162 int ctlfd_pos = evlist->ctl_fd.pos; 2163 struct pollfd *entries = evlist->core.pollfd.entries; 2164 2165 if (!evlist__ctlfd_initialized(evlist) || !entries[ctlfd_pos].revents) 2166 return 0; 2167 2168 if (entries[ctlfd_pos].revents & POLLIN) { 2169 err = evlist__ctlfd_recv(evlist, cmd, cmd_data, 2170 EVLIST_CTL_CMD_MAX_LEN); 2171 if (err > 0) { 2172 switch (*cmd) { 2173 case EVLIST_CTL_CMD_ENABLE: 2174 case EVLIST_CTL_CMD_DISABLE: 2175 err = evlist__ctlfd_enable(evlist, cmd_data, 2176 *cmd == EVLIST_CTL_CMD_ENABLE); 2177 break; 2178 case EVLIST_CTL_CMD_EVLIST: 2179 err = evlist__ctlfd_list(evlist, cmd_data); 2180 break; 2181 case EVLIST_CTL_CMD_SNAPSHOT: 2182 case EVLIST_CTL_CMD_STOP: 2183 case EVLIST_CTL_CMD_PING: 2184 break; 2185 case EVLIST_CTL_CMD_ACK: 2186 case EVLIST_CTL_CMD_UNSUPPORTED: 2187 default: 2188 pr_debug("ctlfd: unsupported %d\n", *cmd); 2189 break; 2190 } 2191 if (!(*cmd == EVLIST_CTL_CMD_ACK || *cmd == EVLIST_CTL_CMD_UNSUPPORTED || 2192 *cmd == EVLIST_CTL_CMD_SNAPSHOT)) 2193 evlist__ctlfd_ack(evlist); 2194 } 2195 } 2196 2197 if (entries[ctlfd_pos].revents & (POLLHUP | POLLERR)) 2198 evlist__finalize_ctlfd(evlist); 2199 else 2200 entries[ctlfd_pos].revents = 0; 2201 2202 return err; 2203 } 2204 2205 /** 2206 * struct event_enable_time - perf record -D/--delay single time range. 2207 * @start: start of time range to enable events in milliseconds 2208 * @end: end of time range to enable events in milliseconds 2209 * 2210 * N.B. this structure is also accessed as an array of int. 2211 */ 2212 struct event_enable_time { 2213 int start; 2214 int end; 2215 }; 2216 2217 static int parse_event_enable_time(const char *str, struct event_enable_time *range, bool first) 2218 { 2219 const char *fmt = first ? "%u - %u %n" : " , %u - %u %n"; 2220 int ret, start, end, n; 2221 2222 ret = sscanf(str, fmt, &start, &end, &n); 2223 if (ret != 2 || end <= start) 2224 return -EINVAL; 2225 if (range) { 2226 range->start = start; 2227 range->end = end; 2228 } 2229 return n; 2230 } 2231 2232 static ssize_t parse_event_enable_times(const char *str, struct event_enable_time *range) 2233 { 2234 int incr = !!range; 2235 bool first = true; 2236 ssize_t ret, cnt; 2237 2238 for (cnt = 0; *str; cnt++) { 2239 ret = parse_event_enable_time(str, range, first); 2240 if (ret < 0) 2241 return ret; 2242 /* Check no overlap */ 2243 if (!first && range && range->start <= range[-1].end) 2244 return -EINVAL; 2245 str += ret; 2246 range += incr; 2247 first = false; 2248 } 2249 return cnt; 2250 } 2251 2252 /** 2253 * struct event_enable_timer - control structure for perf record -D/--delay. 2254 * @evlist: event list 2255 * @times: time ranges that events are enabled (N.B. this is also accessed as an 2256 * array of int) 2257 * @times_cnt: number of time ranges 2258 * @timerfd: timer file descriptor 2259 * @pollfd_pos: position in @evlist array of file descriptors to poll (fdarray) 2260 * @times_step: current position in (int *)@times)[], 2261 * refer event_enable_timer__process() 2262 * 2263 * Note, this structure is only used when there are time ranges, not when there 2264 * is only an initial delay. 2265 */ 2266 struct event_enable_timer { 2267 struct evlist *evlist; 2268 struct event_enable_time *times; 2269 size_t times_cnt; 2270 int timerfd; 2271 int pollfd_pos; 2272 size_t times_step; 2273 }; 2274 2275 static int str_to_delay(const char *str) 2276 { 2277 char *endptr; 2278 long d; 2279 2280 d = strtol(str, &endptr, 10); 2281 if (*endptr || d > INT_MAX || d < -1) 2282 return 0; 2283 return d; 2284 } 2285 2286 int evlist__parse_event_enable_time(struct evlist *evlist, struct record_opts *opts, 2287 const char *str, int unset) 2288 { 2289 enum fdarray_flags flags = fdarray_flag__nonfilterable | fdarray_flag__non_perf_event; 2290 struct event_enable_timer *eet; 2291 ssize_t times_cnt; 2292 ssize_t ret; 2293 int err; 2294 2295 if (unset) 2296 return 0; 2297 2298 opts->target.initial_delay = str_to_delay(str); 2299 if (opts->target.initial_delay) 2300 return 0; 2301 2302 ret = parse_event_enable_times(str, NULL); 2303 if (ret < 0) 2304 return ret; 2305 2306 times_cnt = ret; 2307 if (times_cnt == 0) 2308 return -EINVAL; 2309 2310 eet = zalloc(sizeof(*eet)); 2311 if (!eet) 2312 return -ENOMEM; 2313 2314 eet->times = calloc(times_cnt, sizeof(*eet->times)); 2315 if (!eet->times) { 2316 err = -ENOMEM; 2317 goto free_eet; 2318 } 2319 2320 if (parse_event_enable_times(str, eet->times) != times_cnt) { 2321 err = -EINVAL; 2322 goto free_eet_times; 2323 } 2324 2325 eet->times_cnt = times_cnt; 2326 2327 eet->timerfd = timerfd_create(CLOCK_MONOTONIC, TFD_CLOEXEC); 2328 if (eet->timerfd == -1) { 2329 err = -errno; 2330 pr_err("timerfd_create failed: %s\n", strerror(errno)); 2331 goto free_eet_times; 2332 } 2333 2334 eet->pollfd_pos = perf_evlist__add_pollfd(&evlist->core, eet->timerfd, NULL, POLLIN, flags); 2335 if (eet->pollfd_pos < 0) { 2336 err = eet->pollfd_pos; 2337 goto close_timerfd; 2338 } 2339 2340 eet->evlist = evlist; 2341 evlist->eet = eet; 2342 opts->target.initial_delay = eet->times[0].start; 2343 2344 return 0; 2345 2346 close_timerfd: 2347 close(eet->timerfd); 2348 free_eet_times: 2349 zfree(&eet->times); 2350 free_eet: 2351 free(eet); 2352 return err; 2353 } 2354 2355 static int event_enable_timer__set_timer(struct event_enable_timer *eet, int ms) 2356 { 2357 struct itimerspec its = { 2358 .it_value.tv_sec = ms / MSEC_PER_SEC, 2359 .it_value.tv_nsec = (ms % MSEC_PER_SEC) * NSEC_PER_MSEC, 2360 }; 2361 int err = 0; 2362 2363 if (timerfd_settime(eet->timerfd, 0, &its, NULL) < 0) { 2364 err = -errno; 2365 pr_err("timerfd_settime failed: %s\n", strerror(errno)); 2366 } 2367 return err; 2368 } 2369 2370 int event_enable_timer__start(struct event_enable_timer *eet) 2371 { 2372 int ms; 2373 2374 if (!eet) 2375 return 0; 2376 2377 ms = eet->times[0].end - eet->times[0].start; 2378 eet->times_step = 1; 2379 2380 return event_enable_timer__set_timer(eet, ms); 2381 } 2382 2383 int event_enable_timer__process(struct event_enable_timer *eet) 2384 { 2385 struct pollfd *entries; 2386 short revents; 2387 2388 if (!eet) 2389 return 0; 2390 2391 entries = eet->evlist->core.pollfd.entries; 2392 revents = entries[eet->pollfd_pos].revents; 2393 entries[eet->pollfd_pos].revents = 0; 2394 2395 if (revents & POLLIN) { 2396 size_t step = eet->times_step; 2397 size_t pos = step / 2; 2398 2399 if (step & 1) { 2400 evlist__disable_non_dummy(eet->evlist); 2401 pr_info(EVLIST_DISABLED_MSG); 2402 if (pos >= eet->times_cnt - 1) { 2403 /* Disarm timer */ 2404 event_enable_timer__set_timer(eet, 0); 2405 return 1; /* Stop */ 2406 } 2407 } else { 2408 evlist__enable_non_dummy(eet->evlist); 2409 pr_info(EVLIST_ENABLED_MSG); 2410 } 2411 2412 step += 1; 2413 pos = step / 2; 2414 2415 if (pos < eet->times_cnt) { 2416 int *times = (int *)eet->times; /* Accessing 'times' as array of int */ 2417 int ms = times[step] - times[step - 1]; 2418 2419 eet->times_step = step; 2420 return event_enable_timer__set_timer(eet, ms); 2421 } 2422 } 2423 2424 return 0; 2425 } 2426 2427 void event_enable_timer__exit(struct event_enable_timer **ep) 2428 { 2429 if (!ep || !*ep) 2430 return; 2431 zfree(&(*ep)->times); 2432 zfree(ep); 2433 } 2434 2435 struct evsel *evlist__find_evsel(struct evlist *evlist, int idx) 2436 { 2437 struct evsel *evsel; 2438 2439 evlist__for_each_entry(evlist, evsel) { 2440 if (evsel->core.idx == idx) 2441 return evsel; 2442 } 2443 return NULL; 2444 } 2445 2446 int evlist__scnprintf_evsels(struct evlist *evlist, size_t size, char *bf) 2447 { 2448 struct evsel *evsel; 2449 int printed = 0; 2450 2451 evlist__for_each_entry(evlist, evsel) { 2452 if (evsel__is_dummy_event(evsel)) 2453 continue; 2454 if (size > (strlen(evsel__name(evsel)) + (printed ? 2 : 1))) { 2455 printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "," : "", evsel__name(evsel)); 2456 } else { 2457 printed += scnprintf(bf + printed, size - printed, "%s...", printed ? "," : ""); 2458 break; 2459 } 2460 } 2461 2462 return printed; 2463 } 2464 2465 void evlist__check_mem_load_aux(struct evlist *evlist) 2466 { 2467 struct evsel *leader, *evsel, *pos; 2468 2469 /* 2470 * For some platforms, the 'mem-loads' event is required to use 2471 * together with 'mem-loads-aux' within a group and 'mem-loads-aux' 2472 * must be the group leader. Now we disable this group before reporting 2473 * because 'mem-loads-aux' is just an auxiliary event. It doesn't carry 2474 * any valid memory load information. 2475 */ 2476 evlist__for_each_entry(evlist, evsel) { 2477 leader = evsel__leader(evsel); 2478 if (leader == evsel) 2479 continue; 2480 2481 if (leader->name && strstr(leader->name, "mem-loads-aux")) { 2482 for_each_group_evsel(pos, leader) { 2483 evsel__set_leader(pos, pos); 2484 pos->core.nr_members = 0; 2485 } 2486 } 2487 } 2488 } 2489 2490 /** 2491 * evlist__warn_user_requested_cpus() - Check each evsel against requested CPUs 2492 * and warn if the user CPU list is inapplicable for the event's PMU's 2493 * CPUs. Not core PMUs list a CPU in sysfs, but this may be overwritten by a 2494 * user requested CPU and so any online CPU is applicable. Core PMUs handle 2495 * events on the CPUs in their list and otherwise the event isn't supported. 2496 * @evlist: The list of events being checked. 2497 * @cpu_list: The user provided list of CPUs. 2498 */ 2499 void evlist__warn_user_requested_cpus(struct evlist *evlist, const char *cpu_list) 2500 { 2501 struct perf_cpu_map *user_requested_cpus; 2502 struct evsel *pos; 2503 2504 if (!cpu_list) 2505 return; 2506 2507 user_requested_cpus = perf_cpu_map__new(cpu_list); 2508 if (!user_requested_cpus) 2509 return; 2510 2511 evlist__for_each_entry(evlist, pos) { 2512 struct perf_cpu_map *intersect, *to_test; 2513 const struct perf_pmu *pmu = evsel__find_pmu(pos); 2514 2515 to_test = pmu && pmu->is_core ? pmu->cpus : cpu_map__online(); 2516 intersect = perf_cpu_map__intersect(to_test, user_requested_cpus); 2517 if (!perf_cpu_map__equal(intersect, user_requested_cpus)) { 2518 char buf[128]; 2519 2520 cpu_map__snprint(to_test, buf, sizeof(buf)); 2521 pr_warning("WARNING: A requested CPU in '%s' is not supported by PMU '%s' (CPUs %s) for event '%s'\n", 2522 cpu_list, pmu ? pmu->name : "cpu", buf, evsel__name(pos)); 2523 } 2524 perf_cpu_map__put(intersect); 2525 } 2526 perf_cpu_map__put(user_requested_cpus); 2527 } 2528 2529 void evlist__uniquify_name(struct evlist *evlist) 2530 { 2531 struct evsel *pos; 2532 char *new_name; 2533 int ret; 2534 2535 if (perf_pmus__num_core_pmus() == 1) 2536 return; 2537 2538 evlist__for_each_entry(evlist, pos) { 2539 if (!evsel__is_hybrid(pos)) 2540 continue; 2541 2542 if (strchr(pos->name, '/')) 2543 continue; 2544 2545 ret = asprintf(&new_name, "%s/%s/", 2546 pos->pmu_name, pos->name); 2547 if (ret) { 2548 free(pos->name); 2549 pos->name = new_name; 2550 } 2551 } 2552 } 2553