1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> 4 * 5 * Parts came from builtin-{top,stat,record}.c, see those files for further 6 * copyright notes. 7 */ 8 #include <api/fs/fs.h> 9 #include <errno.h> 10 #include <inttypes.h> 11 #include <poll.h> 12 #include "cpumap.h" 13 #include "util/mmap.h" 14 #include "thread_map.h" 15 #include "target.h" 16 #include "evlist.h" 17 #include "evsel.h" 18 #include "debug.h" 19 #include "units.h" 20 #include "bpf_counter.h" 21 #include <internal/lib.h> // page_size 22 #include "affinity.h" 23 #include "../perf.h" 24 #include "asm/bug.h" 25 #include "bpf-event.h" 26 #include "util/string2.h" 27 #include "util/perf_api_probe.h" 28 #include "util/evsel_fprintf.h" 29 #include "util/evlist-hybrid.h" 30 #include "util/pmu.h" 31 #include <signal.h> 32 #include <unistd.h> 33 #include <sched.h> 34 #include <stdlib.h> 35 36 #include "parse-events.h" 37 #include <subcmd/parse-options.h> 38 39 #include <fcntl.h> 40 #include <sys/ioctl.h> 41 #include <sys/mman.h> 42 #include <sys/prctl.h> 43 44 #include <linux/bitops.h> 45 #include <linux/hash.h> 46 #include <linux/log2.h> 47 #include <linux/err.h> 48 #include <linux/string.h> 49 #include <linux/zalloc.h> 50 #include <perf/evlist.h> 51 #include <perf/evsel.h> 52 #include <perf/cpumap.h> 53 #include <perf/mmap.h> 54 55 #include <internal/xyarray.h> 56 57 #ifdef LACKS_SIGQUEUE_PROTOTYPE 58 int sigqueue(pid_t pid, int sig, const union sigval value); 59 #endif 60 61 #define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y)) 62 #define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y) 63 64 void evlist__init(struct evlist *evlist, struct perf_cpu_map *cpus, 65 struct perf_thread_map *threads) 66 { 67 perf_evlist__init(&evlist->core); 68 perf_evlist__set_maps(&evlist->core, cpus, threads); 69 evlist->workload.pid = -1; 70 evlist->bkw_mmap_state = BKW_MMAP_NOTREADY; 71 evlist->ctl_fd.fd = -1; 72 evlist->ctl_fd.ack = -1; 73 evlist->ctl_fd.pos = -1; 74 } 75 76 struct evlist *evlist__new(void) 77 { 78 struct evlist *evlist = zalloc(sizeof(*evlist)); 79 80 if (evlist != NULL) 81 evlist__init(evlist, NULL, NULL); 82 83 return evlist; 84 } 85 86 struct evlist *evlist__new_default(void) 87 { 88 struct evlist *evlist = evlist__new(); 89 90 if (evlist && evlist__add_default(evlist)) { 91 evlist__delete(evlist); 92 evlist = NULL; 93 } 94 95 return evlist; 96 } 97 98 struct evlist *evlist__new_dummy(void) 99 { 100 struct evlist *evlist = evlist__new(); 101 102 if (evlist && evlist__add_dummy(evlist)) { 103 evlist__delete(evlist); 104 evlist = NULL; 105 } 106 107 return evlist; 108 } 109 110 /** 111 * evlist__set_id_pos - set the positions of event ids. 112 * @evlist: selected event list 113 * 114 * Events with compatible sample types all have the same id_pos 115 * and is_pos. For convenience, put a copy on evlist. 116 */ 117 void evlist__set_id_pos(struct evlist *evlist) 118 { 119 struct evsel *first = evlist__first(evlist); 120 121 evlist->id_pos = first->id_pos; 122 evlist->is_pos = first->is_pos; 123 } 124 125 static void evlist__update_id_pos(struct evlist *evlist) 126 { 127 struct evsel *evsel; 128 129 evlist__for_each_entry(evlist, evsel) 130 evsel__calc_id_pos(evsel); 131 132 evlist__set_id_pos(evlist); 133 } 134 135 static void evlist__purge(struct evlist *evlist) 136 { 137 struct evsel *pos, *n; 138 139 evlist__for_each_entry_safe(evlist, n, pos) { 140 list_del_init(&pos->core.node); 141 pos->evlist = NULL; 142 evsel__delete(pos); 143 } 144 145 evlist->core.nr_entries = 0; 146 } 147 148 void evlist__exit(struct evlist *evlist) 149 { 150 zfree(&evlist->mmap); 151 zfree(&evlist->overwrite_mmap); 152 perf_evlist__exit(&evlist->core); 153 } 154 155 void evlist__delete(struct evlist *evlist) 156 { 157 if (evlist == NULL) 158 return; 159 160 evlist__munmap(evlist); 161 evlist__close(evlist); 162 evlist__purge(evlist); 163 evlist__exit(evlist); 164 free(evlist); 165 } 166 167 void evlist__add(struct evlist *evlist, struct evsel *entry) 168 { 169 perf_evlist__add(&evlist->core, &entry->core); 170 entry->evlist = evlist; 171 entry->tracking = !entry->core.idx; 172 173 if (evlist->core.nr_entries == 1) 174 evlist__set_id_pos(evlist); 175 } 176 177 void evlist__remove(struct evlist *evlist, struct evsel *evsel) 178 { 179 evsel->evlist = NULL; 180 perf_evlist__remove(&evlist->core, &evsel->core); 181 } 182 183 void evlist__splice_list_tail(struct evlist *evlist, struct list_head *list) 184 { 185 while (!list_empty(list)) { 186 struct evsel *evsel, *temp, *leader = NULL; 187 188 __evlist__for_each_entry_safe(list, temp, evsel) { 189 list_del_init(&evsel->core.node); 190 evlist__add(evlist, evsel); 191 leader = evsel; 192 break; 193 } 194 195 __evlist__for_each_entry_safe(list, temp, evsel) { 196 if (evsel__has_leader(evsel, leader)) { 197 list_del_init(&evsel->core.node); 198 evlist__add(evlist, evsel); 199 } 200 } 201 } 202 } 203 204 int __evlist__set_tracepoints_handlers(struct evlist *evlist, 205 const struct evsel_str_handler *assocs, size_t nr_assocs) 206 { 207 size_t i; 208 int err; 209 210 for (i = 0; i < nr_assocs; i++) { 211 // Adding a handler for an event not in this evlist, just ignore it. 212 struct evsel *evsel = evlist__find_tracepoint_by_name(evlist, assocs[i].name); 213 if (evsel == NULL) 214 continue; 215 216 err = -EEXIST; 217 if (evsel->handler != NULL) 218 goto out; 219 evsel->handler = assocs[i].handler; 220 } 221 222 err = 0; 223 out: 224 return err; 225 } 226 227 void evlist__set_leader(struct evlist *evlist) 228 { 229 perf_evlist__set_leader(&evlist->core); 230 } 231 232 int __evlist__add_default(struct evlist *evlist, bool precise) 233 { 234 struct evsel *evsel; 235 236 evsel = evsel__new_cycles(precise, PERF_TYPE_HARDWARE, 237 PERF_COUNT_HW_CPU_CYCLES); 238 if (evsel == NULL) 239 return -ENOMEM; 240 241 evlist__add(evlist, evsel); 242 return 0; 243 } 244 245 int evlist__add_dummy(struct evlist *evlist) 246 { 247 struct perf_event_attr attr = { 248 .type = PERF_TYPE_SOFTWARE, 249 .config = PERF_COUNT_SW_DUMMY, 250 .size = sizeof(attr), /* to capture ABI version */ 251 }; 252 struct evsel *evsel = evsel__new_idx(&attr, evlist->core.nr_entries); 253 254 if (evsel == NULL) 255 return -ENOMEM; 256 257 evlist__add(evlist, evsel); 258 return 0; 259 } 260 261 static int evlist__add_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs) 262 { 263 struct evsel *evsel, *n; 264 LIST_HEAD(head); 265 size_t i; 266 267 for (i = 0; i < nr_attrs; i++) { 268 evsel = evsel__new_idx(attrs + i, evlist->core.nr_entries + i); 269 if (evsel == NULL) 270 goto out_delete_partial_list; 271 list_add_tail(&evsel->core.node, &head); 272 } 273 274 evlist__splice_list_tail(evlist, &head); 275 276 return 0; 277 278 out_delete_partial_list: 279 __evlist__for_each_entry_safe(&head, n, evsel) 280 evsel__delete(evsel); 281 return -1; 282 } 283 284 int __evlist__add_default_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs) 285 { 286 size_t i; 287 288 for (i = 0; i < nr_attrs; i++) 289 event_attr_init(attrs + i); 290 291 return evlist__add_attrs(evlist, attrs, nr_attrs); 292 } 293 294 __weak int arch_evlist__add_default_attrs(struct evlist *evlist __maybe_unused) 295 { 296 return 0; 297 } 298 299 struct evsel *evlist__find_tracepoint_by_id(struct evlist *evlist, int id) 300 { 301 struct evsel *evsel; 302 303 evlist__for_each_entry(evlist, evsel) { 304 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT && 305 (int)evsel->core.attr.config == id) 306 return evsel; 307 } 308 309 return NULL; 310 } 311 312 struct evsel *evlist__find_tracepoint_by_name(struct evlist *evlist, const char *name) 313 { 314 struct evsel *evsel; 315 316 evlist__for_each_entry(evlist, evsel) { 317 if ((evsel->core.attr.type == PERF_TYPE_TRACEPOINT) && 318 (strcmp(evsel->name, name) == 0)) 319 return evsel; 320 } 321 322 return NULL; 323 } 324 325 int evlist__add_newtp(struct evlist *evlist, const char *sys, const char *name, void *handler) 326 { 327 struct evsel *evsel = evsel__newtp(sys, name); 328 329 if (IS_ERR(evsel)) 330 return -1; 331 332 evsel->handler = handler; 333 evlist__add(evlist, evsel); 334 return 0; 335 } 336 337 static int evlist__nr_threads(struct evlist *evlist, struct evsel *evsel) 338 { 339 if (evsel->core.system_wide) 340 return 1; 341 else 342 return perf_thread_map__nr(evlist->core.threads); 343 } 344 345 struct evlist_cpu_iterator evlist__cpu_begin(struct evlist *evlist, struct affinity *affinity) 346 { 347 struct evlist_cpu_iterator itr = { 348 .container = evlist, 349 .evsel = evlist__first(evlist), 350 .cpu_map_idx = 0, 351 .evlist_cpu_map_idx = 0, 352 .evlist_cpu_map_nr = perf_cpu_map__nr(evlist->core.all_cpus), 353 .cpu = (struct perf_cpu){ .cpu = -1}, 354 .affinity = affinity, 355 }; 356 357 if (itr.affinity) { 358 itr.cpu = perf_cpu_map__cpu(evlist->core.all_cpus, 0); 359 affinity__set(itr.affinity, itr.cpu.cpu); 360 itr.cpu_map_idx = perf_cpu_map__idx(itr.evsel->core.cpus, itr.cpu); 361 /* 362 * If this CPU isn't in the evsel's cpu map then advance through 363 * the list. 364 */ 365 if (itr.cpu_map_idx == -1) 366 evlist_cpu_iterator__next(&itr); 367 } 368 return itr; 369 } 370 371 void evlist_cpu_iterator__next(struct evlist_cpu_iterator *evlist_cpu_itr) 372 { 373 while (evlist_cpu_itr->evsel != evlist__last(evlist_cpu_itr->container)) { 374 evlist_cpu_itr->evsel = evsel__next(evlist_cpu_itr->evsel); 375 evlist_cpu_itr->cpu_map_idx = 376 perf_cpu_map__idx(evlist_cpu_itr->evsel->core.cpus, 377 evlist_cpu_itr->cpu); 378 if (evlist_cpu_itr->cpu_map_idx != -1) 379 return; 380 } 381 evlist_cpu_itr->evlist_cpu_map_idx++; 382 if (evlist_cpu_itr->evlist_cpu_map_idx < evlist_cpu_itr->evlist_cpu_map_nr) { 383 evlist_cpu_itr->evsel = evlist__first(evlist_cpu_itr->container); 384 evlist_cpu_itr->cpu = 385 perf_cpu_map__cpu(evlist_cpu_itr->container->core.all_cpus, 386 evlist_cpu_itr->evlist_cpu_map_idx); 387 if (evlist_cpu_itr->affinity) 388 affinity__set(evlist_cpu_itr->affinity, evlist_cpu_itr->cpu.cpu); 389 evlist_cpu_itr->cpu_map_idx = 390 perf_cpu_map__idx(evlist_cpu_itr->evsel->core.cpus, 391 evlist_cpu_itr->cpu); 392 /* 393 * If this CPU isn't in the evsel's cpu map then advance through 394 * the list. 395 */ 396 if (evlist_cpu_itr->cpu_map_idx == -1) 397 evlist_cpu_iterator__next(evlist_cpu_itr); 398 } 399 } 400 401 bool evlist_cpu_iterator__end(const struct evlist_cpu_iterator *evlist_cpu_itr) 402 { 403 return evlist_cpu_itr->evlist_cpu_map_idx >= evlist_cpu_itr->evlist_cpu_map_nr; 404 } 405 406 static int evsel__strcmp(struct evsel *pos, char *evsel_name) 407 { 408 if (!evsel_name) 409 return 0; 410 if (evsel__is_dummy_event(pos)) 411 return 1; 412 return strcmp(pos->name, evsel_name); 413 } 414 415 static int evlist__is_enabled(struct evlist *evlist) 416 { 417 struct evsel *pos; 418 419 evlist__for_each_entry(evlist, pos) { 420 if (!evsel__is_group_leader(pos) || !pos->core.fd) 421 continue; 422 /* If at least one event is enabled, evlist is enabled. */ 423 if (!pos->disabled) 424 return true; 425 } 426 return false; 427 } 428 429 static void __evlist__disable(struct evlist *evlist, char *evsel_name) 430 { 431 struct evsel *pos; 432 struct evlist_cpu_iterator evlist_cpu_itr; 433 struct affinity affinity; 434 bool has_imm = false; 435 436 if (affinity__setup(&affinity) < 0) 437 return; 438 439 /* Disable 'immediate' events last */ 440 for (int imm = 0; imm <= 1; imm++) { 441 evlist__for_each_cpu(evlist_cpu_itr, evlist, &affinity) { 442 pos = evlist_cpu_itr.evsel; 443 if (evsel__strcmp(pos, evsel_name)) 444 continue; 445 if (pos->disabled || !evsel__is_group_leader(pos) || !pos->core.fd) 446 continue; 447 if (pos->immediate) 448 has_imm = true; 449 if (pos->immediate != imm) 450 continue; 451 evsel__disable_cpu(pos, evlist_cpu_itr.cpu_map_idx); 452 } 453 if (!has_imm) 454 break; 455 } 456 457 affinity__cleanup(&affinity); 458 evlist__for_each_entry(evlist, pos) { 459 if (evsel__strcmp(pos, evsel_name)) 460 continue; 461 if (!evsel__is_group_leader(pos) || !pos->core.fd) 462 continue; 463 pos->disabled = true; 464 } 465 466 /* 467 * If we disabled only single event, we need to check 468 * the enabled state of the evlist manually. 469 */ 470 if (evsel_name) 471 evlist->enabled = evlist__is_enabled(evlist); 472 else 473 evlist->enabled = false; 474 } 475 476 void evlist__disable(struct evlist *evlist) 477 { 478 __evlist__disable(evlist, NULL); 479 } 480 481 void evlist__disable_evsel(struct evlist *evlist, char *evsel_name) 482 { 483 __evlist__disable(evlist, evsel_name); 484 } 485 486 static void __evlist__enable(struct evlist *evlist, char *evsel_name) 487 { 488 struct evsel *pos; 489 struct evlist_cpu_iterator evlist_cpu_itr; 490 struct affinity affinity; 491 492 if (affinity__setup(&affinity) < 0) 493 return; 494 495 evlist__for_each_cpu(evlist_cpu_itr, evlist, &affinity) { 496 pos = evlist_cpu_itr.evsel; 497 if (evsel__strcmp(pos, evsel_name)) 498 continue; 499 if (!evsel__is_group_leader(pos) || !pos->core.fd) 500 continue; 501 evsel__enable_cpu(pos, evlist_cpu_itr.cpu_map_idx); 502 } 503 affinity__cleanup(&affinity); 504 evlist__for_each_entry(evlist, pos) { 505 if (evsel__strcmp(pos, evsel_name)) 506 continue; 507 if (!evsel__is_group_leader(pos) || !pos->core.fd) 508 continue; 509 pos->disabled = false; 510 } 511 512 /* 513 * Even single event sets the 'enabled' for evlist, 514 * so the toggle can work properly and toggle to 515 * 'disabled' state. 516 */ 517 evlist->enabled = true; 518 } 519 520 void evlist__enable(struct evlist *evlist) 521 { 522 __evlist__enable(evlist, NULL); 523 } 524 525 void evlist__enable_evsel(struct evlist *evlist, char *evsel_name) 526 { 527 __evlist__enable(evlist, evsel_name); 528 } 529 530 void evlist__toggle_enable(struct evlist *evlist) 531 { 532 (evlist->enabled ? evlist__disable : evlist__enable)(evlist); 533 } 534 535 static int evlist__enable_event_cpu(struct evlist *evlist, struct evsel *evsel, int cpu) 536 { 537 int thread; 538 int nr_threads = evlist__nr_threads(evlist, evsel); 539 540 if (!evsel->core.fd) 541 return -EINVAL; 542 543 for (thread = 0; thread < nr_threads; thread++) { 544 int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0); 545 if (err) 546 return err; 547 } 548 return 0; 549 } 550 551 static int evlist__enable_event_thread(struct evlist *evlist, struct evsel *evsel, int thread) 552 { 553 int cpu; 554 int nr_cpus = perf_cpu_map__nr(evlist->core.cpus); 555 556 if (!evsel->core.fd) 557 return -EINVAL; 558 559 for (cpu = 0; cpu < nr_cpus; cpu++) { 560 int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0); 561 if (err) 562 return err; 563 } 564 return 0; 565 } 566 567 int evlist__enable_event_idx(struct evlist *evlist, struct evsel *evsel, int idx) 568 { 569 bool per_cpu_mmaps = !perf_cpu_map__empty(evlist->core.cpus); 570 571 if (per_cpu_mmaps) 572 return evlist__enable_event_cpu(evlist, evsel, idx); 573 574 return evlist__enable_event_thread(evlist, evsel, idx); 575 } 576 577 int evlist__add_pollfd(struct evlist *evlist, int fd) 578 { 579 return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN, fdarray_flag__default); 580 } 581 582 int evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask) 583 { 584 return perf_evlist__filter_pollfd(&evlist->core, revents_and_mask); 585 } 586 587 #ifdef HAVE_EVENTFD_SUPPORT 588 int evlist__add_wakeup_eventfd(struct evlist *evlist, int fd) 589 { 590 return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN, 591 fdarray_flag__nonfilterable); 592 } 593 #endif 594 595 int evlist__poll(struct evlist *evlist, int timeout) 596 { 597 return perf_evlist__poll(&evlist->core, timeout); 598 } 599 600 struct perf_sample_id *evlist__id2sid(struct evlist *evlist, u64 id) 601 { 602 struct hlist_head *head; 603 struct perf_sample_id *sid; 604 int hash; 605 606 hash = hash_64(id, PERF_EVLIST__HLIST_BITS); 607 head = &evlist->core.heads[hash]; 608 609 hlist_for_each_entry(sid, head, node) 610 if (sid->id == id) 611 return sid; 612 613 return NULL; 614 } 615 616 struct evsel *evlist__id2evsel(struct evlist *evlist, u64 id) 617 { 618 struct perf_sample_id *sid; 619 620 if (evlist->core.nr_entries == 1 || !id) 621 return evlist__first(evlist); 622 623 sid = evlist__id2sid(evlist, id); 624 if (sid) 625 return container_of(sid->evsel, struct evsel, core); 626 627 if (!evlist__sample_id_all(evlist)) 628 return evlist__first(evlist); 629 630 return NULL; 631 } 632 633 struct evsel *evlist__id2evsel_strict(struct evlist *evlist, u64 id) 634 { 635 struct perf_sample_id *sid; 636 637 if (!id) 638 return NULL; 639 640 sid = evlist__id2sid(evlist, id); 641 if (sid) 642 return container_of(sid->evsel, struct evsel, core); 643 644 return NULL; 645 } 646 647 static int evlist__event2id(struct evlist *evlist, union perf_event *event, u64 *id) 648 { 649 const __u64 *array = event->sample.array; 650 ssize_t n; 651 652 n = (event->header.size - sizeof(event->header)) >> 3; 653 654 if (event->header.type == PERF_RECORD_SAMPLE) { 655 if (evlist->id_pos >= n) 656 return -1; 657 *id = array[evlist->id_pos]; 658 } else { 659 if (evlist->is_pos > n) 660 return -1; 661 n -= evlist->is_pos; 662 *id = array[n]; 663 } 664 return 0; 665 } 666 667 struct evsel *evlist__event2evsel(struct evlist *evlist, union perf_event *event) 668 { 669 struct evsel *first = evlist__first(evlist); 670 struct hlist_head *head; 671 struct perf_sample_id *sid; 672 int hash; 673 u64 id; 674 675 if (evlist->core.nr_entries == 1) 676 return first; 677 678 if (!first->core.attr.sample_id_all && 679 event->header.type != PERF_RECORD_SAMPLE) 680 return first; 681 682 if (evlist__event2id(evlist, event, &id)) 683 return NULL; 684 685 /* Synthesized events have an id of zero */ 686 if (!id) 687 return first; 688 689 hash = hash_64(id, PERF_EVLIST__HLIST_BITS); 690 head = &evlist->core.heads[hash]; 691 692 hlist_for_each_entry(sid, head, node) { 693 if (sid->id == id) 694 return container_of(sid->evsel, struct evsel, core); 695 } 696 return NULL; 697 } 698 699 static int evlist__set_paused(struct evlist *evlist, bool value) 700 { 701 int i; 702 703 if (!evlist->overwrite_mmap) 704 return 0; 705 706 for (i = 0; i < evlist->core.nr_mmaps; i++) { 707 int fd = evlist->overwrite_mmap[i].core.fd; 708 int err; 709 710 if (fd < 0) 711 continue; 712 err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0); 713 if (err) 714 return err; 715 } 716 return 0; 717 } 718 719 static int evlist__pause(struct evlist *evlist) 720 { 721 return evlist__set_paused(evlist, true); 722 } 723 724 static int evlist__resume(struct evlist *evlist) 725 { 726 return evlist__set_paused(evlist, false); 727 } 728 729 static void evlist__munmap_nofree(struct evlist *evlist) 730 { 731 int i; 732 733 if (evlist->mmap) 734 for (i = 0; i < evlist->core.nr_mmaps; i++) 735 perf_mmap__munmap(&evlist->mmap[i].core); 736 737 if (evlist->overwrite_mmap) 738 for (i = 0; i < evlist->core.nr_mmaps; i++) 739 perf_mmap__munmap(&evlist->overwrite_mmap[i].core); 740 } 741 742 void evlist__munmap(struct evlist *evlist) 743 { 744 evlist__munmap_nofree(evlist); 745 zfree(&evlist->mmap); 746 zfree(&evlist->overwrite_mmap); 747 } 748 749 static void perf_mmap__unmap_cb(struct perf_mmap *map) 750 { 751 struct mmap *m = container_of(map, struct mmap, core); 752 753 mmap__munmap(m); 754 } 755 756 static struct mmap *evlist__alloc_mmap(struct evlist *evlist, 757 bool overwrite) 758 { 759 int i; 760 struct mmap *map; 761 762 map = zalloc(evlist->core.nr_mmaps * sizeof(struct mmap)); 763 if (!map) 764 return NULL; 765 766 for (i = 0; i < evlist->core.nr_mmaps; i++) { 767 struct perf_mmap *prev = i ? &map[i - 1].core : NULL; 768 769 /* 770 * When the perf_mmap() call is made we grab one refcount, plus 771 * one extra to let perf_mmap__consume() get the last 772 * events after all real references (perf_mmap__get()) are 773 * dropped. 774 * 775 * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and 776 * thus does perf_mmap__get() on it. 777 */ 778 perf_mmap__init(&map[i].core, prev, overwrite, perf_mmap__unmap_cb); 779 } 780 781 return map; 782 } 783 784 static void 785 perf_evlist__mmap_cb_idx(struct perf_evlist *_evlist, 786 struct perf_mmap_param *_mp, 787 int idx, bool per_cpu) 788 { 789 struct evlist *evlist = container_of(_evlist, struct evlist, core); 790 struct mmap_params *mp = container_of(_mp, struct mmap_params, core); 791 792 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, idx, per_cpu); 793 } 794 795 static struct perf_mmap* 796 perf_evlist__mmap_cb_get(struct perf_evlist *_evlist, bool overwrite, int idx) 797 { 798 struct evlist *evlist = container_of(_evlist, struct evlist, core); 799 struct mmap *maps; 800 801 maps = overwrite ? evlist->overwrite_mmap : evlist->mmap; 802 803 if (!maps) { 804 maps = evlist__alloc_mmap(evlist, overwrite); 805 if (!maps) 806 return NULL; 807 808 if (overwrite) { 809 evlist->overwrite_mmap = maps; 810 if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY) 811 evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING); 812 } else { 813 evlist->mmap = maps; 814 } 815 } 816 817 return &maps[idx].core; 818 } 819 820 static int 821 perf_evlist__mmap_cb_mmap(struct perf_mmap *_map, struct perf_mmap_param *_mp, 822 int output, struct perf_cpu cpu) 823 { 824 struct mmap *map = container_of(_map, struct mmap, core); 825 struct mmap_params *mp = container_of(_mp, struct mmap_params, core); 826 827 return mmap__mmap(map, mp, output, cpu); 828 } 829 830 unsigned long perf_event_mlock_kb_in_pages(void) 831 { 832 unsigned long pages; 833 int max; 834 835 if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) { 836 /* 837 * Pick a once upon a time good value, i.e. things look 838 * strange since we can't read a sysctl value, but lets not 839 * die yet... 840 */ 841 max = 512; 842 } else { 843 max -= (page_size / 1024); 844 } 845 846 pages = (max * 1024) / page_size; 847 if (!is_power_of_2(pages)) 848 pages = rounddown_pow_of_two(pages); 849 850 return pages; 851 } 852 853 size_t evlist__mmap_size(unsigned long pages) 854 { 855 if (pages == UINT_MAX) 856 pages = perf_event_mlock_kb_in_pages(); 857 else if (!is_power_of_2(pages)) 858 return 0; 859 860 return (pages + 1) * page_size; 861 } 862 863 static long parse_pages_arg(const char *str, unsigned long min, 864 unsigned long max) 865 { 866 unsigned long pages, val; 867 static struct parse_tag tags[] = { 868 { .tag = 'B', .mult = 1 }, 869 { .tag = 'K', .mult = 1 << 10 }, 870 { .tag = 'M', .mult = 1 << 20 }, 871 { .tag = 'G', .mult = 1 << 30 }, 872 { .tag = 0 }, 873 }; 874 875 if (str == NULL) 876 return -EINVAL; 877 878 val = parse_tag_value(str, tags); 879 if (val != (unsigned long) -1) { 880 /* we got file size value */ 881 pages = PERF_ALIGN(val, page_size) / page_size; 882 } else { 883 /* we got pages count value */ 884 char *eptr; 885 pages = strtoul(str, &eptr, 10); 886 if (*eptr != '\0') 887 return -EINVAL; 888 } 889 890 if (pages == 0 && min == 0) { 891 /* leave number of pages at 0 */ 892 } else if (!is_power_of_2(pages)) { 893 char buf[100]; 894 895 /* round pages up to next power of 2 */ 896 pages = roundup_pow_of_two(pages); 897 if (!pages) 898 return -EINVAL; 899 900 unit_number__scnprintf(buf, sizeof(buf), pages * page_size); 901 pr_info("rounding mmap pages size to %s (%lu pages)\n", 902 buf, pages); 903 } 904 905 if (pages > max) 906 return -EINVAL; 907 908 return pages; 909 } 910 911 int __evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str) 912 { 913 unsigned long max = UINT_MAX; 914 long pages; 915 916 if (max > SIZE_MAX / page_size) 917 max = SIZE_MAX / page_size; 918 919 pages = parse_pages_arg(str, 1, max); 920 if (pages < 0) { 921 pr_err("Invalid argument for --mmap_pages/-m\n"); 922 return -1; 923 } 924 925 *mmap_pages = pages; 926 return 0; 927 } 928 929 int evlist__parse_mmap_pages(const struct option *opt, const char *str, int unset __maybe_unused) 930 { 931 return __evlist__parse_mmap_pages(opt->value, str); 932 } 933 934 /** 935 * evlist__mmap_ex - Create mmaps to receive events. 936 * @evlist: list of events 937 * @pages: map length in pages 938 * @overwrite: overwrite older events? 939 * @auxtrace_pages - auxtrace map length in pages 940 * @auxtrace_overwrite - overwrite older auxtrace data? 941 * 942 * If @overwrite is %false the user needs to signal event consumption using 943 * perf_mmap__write_tail(). Using evlist__mmap_read() does this 944 * automatically. 945 * 946 * Similarly, if @auxtrace_overwrite is %false the user needs to signal data 947 * consumption using auxtrace_mmap__write_tail(). 948 * 949 * Return: %0 on success, negative error code otherwise. 950 */ 951 int evlist__mmap_ex(struct evlist *evlist, unsigned int pages, 952 unsigned int auxtrace_pages, 953 bool auxtrace_overwrite, int nr_cblocks, int affinity, int flush, 954 int comp_level) 955 { 956 /* 957 * Delay setting mp.prot: set it before calling perf_mmap__mmap. 958 * Its value is decided by evsel's write_backward. 959 * So &mp should not be passed through const pointer. 960 */ 961 struct mmap_params mp = { 962 .nr_cblocks = nr_cblocks, 963 .affinity = affinity, 964 .flush = flush, 965 .comp_level = comp_level 966 }; 967 struct perf_evlist_mmap_ops ops = { 968 .idx = perf_evlist__mmap_cb_idx, 969 .get = perf_evlist__mmap_cb_get, 970 .mmap = perf_evlist__mmap_cb_mmap, 971 }; 972 973 evlist->core.mmap_len = evlist__mmap_size(pages); 974 pr_debug("mmap size %zuB\n", evlist->core.mmap_len); 975 976 auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->core.mmap_len, 977 auxtrace_pages, auxtrace_overwrite); 978 979 return perf_evlist__mmap_ops(&evlist->core, &ops, &mp.core); 980 } 981 982 int evlist__mmap(struct evlist *evlist, unsigned int pages) 983 { 984 return evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS, 1, 0); 985 } 986 987 int evlist__create_maps(struct evlist *evlist, struct target *target) 988 { 989 bool all_threads = (target->per_thread && target->system_wide); 990 struct perf_cpu_map *cpus; 991 struct perf_thread_map *threads; 992 993 /* 994 * If specify '-a' and '--per-thread' to perf record, perf record 995 * will override '--per-thread'. target->per_thread = false and 996 * target->system_wide = true. 997 * 998 * If specify '--per-thread' only to perf record, 999 * target->per_thread = true and target->system_wide = false. 1000 * 1001 * So target->per_thread && target->system_wide is false. 1002 * For perf record, thread_map__new_str doesn't call 1003 * thread_map__new_all_cpus. That will keep perf record's 1004 * current behavior. 1005 * 1006 * For perf stat, it allows the case that target->per_thread and 1007 * target->system_wide are all true. It means to collect system-wide 1008 * per-thread data. thread_map__new_str will call 1009 * thread_map__new_all_cpus to enumerate all threads. 1010 */ 1011 threads = thread_map__new_str(target->pid, target->tid, target->uid, 1012 all_threads); 1013 1014 if (!threads) 1015 return -1; 1016 1017 if (target__uses_dummy_map(target)) 1018 cpus = perf_cpu_map__dummy_new(); 1019 else 1020 cpus = perf_cpu_map__new(target->cpu_list); 1021 1022 if (!cpus) 1023 goto out_delete_threads; 1024 1025 evlist->core.has_user_cpus = !!target->cpu_list && !target->hybrid; 1026 1027 perf_evlist__set_maps(&evlist->core, cpus, threads); 1028 1029 /* as evlist now has references, put count here */ 1030 perf_cpu_map__put(cpus); 1031 perf_thread_map__put(threads); 1032 1033 return 0; 1034 1035 out_delete_threads: 1036 perf_thread_map__put(threads); 1037 return -1; 1038 } 1039 1040 int evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel) 1041 { 1042 struct evsel *evsel; 1043 int err = 0; 1044 1045 evlist__for_each_entry(evlist, evsel) { 1046 if (evsel->filter == NULL) 1047 continue; 1048 1049 /* 1050 * filters only work for tracepoint event, which doesn't have cpu limit. 1051 * So evlist and evsel should always be same. 1052 */ 1053 err = perf_evsel__apply_filter(&evsel->core, evsel->filter); 1054 if (err) { 1055 *err_evsel = evsel; 1056 break; 1057 } 1058 } 1059 1060 return err; 1061 } 1062 1063 int evlist__set_tp_filter(struct evlist *evlist, const char *filter) 1064 { 1065 struct evsel *evsel; 1066 int err = 0; 1067 1068 if (filter == NULL) 1069 return -1; 1070 1071 evlist__for_each_entry(evlist, evsel) { 1072 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 1073 continue; 1074 1075 err = evsel__set_filter(evsel, filter); 1076 if (err) 1077 break; 1078 } 1079 1080 return err; 1081 } 1082 1083 int evlist__append_tp_filter(struct evlist *evlist, const char *filter) 1084 { 1085 struct evsel *evsel; 1086 int err = 0; 1087 1088 if (filter == NULL) 1089 return -1; 1090 1091 evlist__for_each_entry(evlist, evsel) { 1092 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) 1093 continue; 1094 1095 err = evsel__append_tp_filter(evsel, filter); 1096 if (err) 1097 break; 1098 } 1099 1100 return err; 1101 } 1102 1103 char *asprintf__tp_filter_pids(size_t npids, pid_t *pids) 1104 { 1105 char *filter; 1106 size_t i; 1107 1108 for (i = 0; i < npids; ++i) { 1109 if (i == 0) { 1110 if (asprintf(&filter, "common_pid != %d", pids[i]) < 0) 1111 return NULL; 1112 } else { 1113 char *tmp; 1114 1115 if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0) 1116 goto out_free; 1117 1118 free(filter); 1119 filter = tmp; 1120 } 1121 } 1122 1123 return filter; 1124 out_free: 1125 free(filter); 1126 return NULL; 1127 } 1128 1129 int evlist__set_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids) 1130 { 1131 char *filter = asprintf__tp_filter_pids(npids, pids); 1132 int ret = evlist__set_tp_filter(evlist, filter); 1133 1134 free(filter); 1135 return ret; 1136 } 1137 1138 int evlist__set_tp_filter_pid(struct evlist *evlist, pid_t pid) 1139 { 1140 return evlist__set_tp_filter_pids(evlist, 1, &pid); 1141 } 1142 1143 int evlist__append_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids) 1144 { 1145 char *filter = asprintf__tp_filter_pids(npids, pids); 1146 int ret = evlist__append_tp_filter(evlist, filter); 1147 1148 free(filter); 1149 return ret; 1150 } 1151 1152 int evlist__append_tp_filter_pid(struct evlist *evlist, pid_t pid) 1153 { 1154 return evlist__append_tp_filter_pids(evlist, 1, &pid); 1155 } 1156 1157 bool evlist__valid_sample_type(struct evlist *evlist) 1158 { 1159 struct evsel *pos; 1160 1161 if (evlist->core.nr_entries == 1) 1162 return true; 1163 1164 if (evlist->id_pos < 0 || evlist->is_pos < 0) 1165 return false; 1166 1167 evlist__for_each_entry(evlist, pos) { 1168 if (pos->id_pos != evlist->id_pos || 1169 pos->is_pos != evlist->is_pos) 1170 return false; 1171 } 1172 1173 return true; 1174 } 1175 1176 u64 __evlist__combined_sample_type(struct evlist *evlist) 1177 { 1178 struct evsel *evsel; 1179 1180 if (evlist->combined_sample_type) 1181 return evlist->combined_sample_type; 1182 1183 evlist__for_each_entry(evlist, evsel) 1184 evlist->combined_sample_type |= evsel->core.attr.sample_type; 1185 1186 return evlist->combined_sample_type; 1187 } 1188 1189 u64 evlist__combined_sample_type(struct evlist *evlist) 1190 { 1191 evlist->combined_sample_type = 0; 1192 return __evlist__combined_sample_type(evlist); 1193 } 1194 1195 u64 evlist__combined_branch_type(struct evlist *evlist) 1196 { 1197 struct evsel *evsel; 1198 u64 branch_type = 0; 1199 1200 evlist__for_each_entry(evlist, evsel) 1201 branch_type |= evsel->core.attr.branch_sample_type; 1202 return branch_type; 1203 } 1204 1205 bool evlist__valid_read_format(struct evlist *evlist) 1206 { 1207 struct evsel *first = evlist__first(evlist), *pos = first; 1208 u64 read_format = first->core.attr.read_format; 1209 u64 sample_type = first->core.attr.sample_type; 1210 1211 evlist__for_each_entry(evlist, pos) { 1212 if (read_format != pos->core.attr.read_format) { 1213 pr_debug("Read format differs %#" PRIx64 " vs %#" PRIx64 "\n", 1214 read_format, (u64)pos->core.attr.read_format); 1215 } 1216 } 1217 1218 /* PERF_SAMPLE_READ implies PERF_FORMAT_ID. */ 1219 if ((sample_type & PERF_SAMPLE_READ) && 1220 !(read_format & PERF_FORMAT_ID)) { 1221 return false; 1222 } 1223 1224 return true; 1225 } 1226 1227 u16 evlist__id_hdr_size(struct evlist *evlist) 1228 { 1229 struct evsel *first = evlist__first(evlist); 1230 struct perf_sample *data; 1231 u64 sample_type; 1232 u16 size = 0; 1233 1234 if (!first->core.attr.sample_id_all) 1235 goto out; 1236 1237 sample_type = first->core.attr.sample_type; 1238 1239 if (sample_type & PERF_SAMPLE_TID) 1240 size += sizeof(data->tid) * 2; 1241 1242 if (sample_type & PERF_SAMPLE_TIME) 1243 size += sizeof(data->time); 1244 1245 if (sample_type & PERF_SAMPLE_ID) 1246 size += sizeof(data->id); 1247 1248 if (sample_type & PERF_SAMPLE_STREAM_ID) 1249 size += sizeof(data->stream_id); 1250 1251 if (sample_type & PERF_SAMPLE_CPU) 1252 size += sizeof(data->cpu) * 2; 1253 1254 if (sample_type & PERF_SAMPLE_IDENTIFIER) 1255 size += sizeof(data->id); 1256 out: 1257 return size; 1258 } 1259 1260 bool evlist__valid_sample_id_all(struct evlist *evlist) 1261 { 1262 struct evsel *first = evlist__first(evlist), *pos = first; 1263 1264 evlist__for_each_entry_continue(evlist, pos) { 1265 if (first->core.attr.sample_id_all != pos->core.attr.sample_id_all) 1266 return false; 1267 } 1268 1269 return true; 1270 } 1271 1272 bool evlist__sample_id_all(struct evlist *evlist) 1273 { 1274 struct evsel *first = evlist__first(evlist); 1275 return first->core.attr.sample_id_all; 1276 } 1277 1278 void evlist__set_selected(struct evlist *evlist, struct evsel *evsel) 1279 { 1280 evlist->selected = evsel; 1281 } 1282 1283 void evlist__close(struct evlist *evlist) 1284 { 1285 struct evsel *evsel; 1286 struct evlist_cpu_iterator evlist_cpu_itr; 1287 struct affinity affinity; 1288 1289 /* 1290 * With perf record core.cpus is usually NULL. 1291 * Use the old method to handle this for now. 1292 */ 1293 if (!evlist->core.cpus || cpu_map__is_dummy(evlist->core.cpus)) { 1294 evlist__for_each_entry_reverse(evlist, evsel) 1295 evsel__close(evsel); 1296 return; 1297 } 1298 1299 if (affinity__setup(&affinity) < 0) 1300 return; 1301 1302 evlist__for_each_cpu(evlist_cpu_itr, evlist, &affinity) { 1303 perf_evsel__close_cpu(&evlist_cpu_itr.evsel->core, 1304 evlist_cpu_itr.cpu_map_idx); 1305 } 1306 1307 affinity__cleanup(&affinity); 1308 evlist__for_each_entry_reverse(evlist, evsel) { 1309 perf_evsel__free_fd(&evsel->core); 1310 perf_evsel__free_id(&evsel->core); 1311 } 1312 perf_evlist__reset_id_hash(&evlist->core); 1313 } 1314 1315 static int evlist__create_syswide_maps(struct evlist *evlist) 1316 { 1317 struct perf_cpu_map *cpus; 1318 struct perf_thread_map *threads; 1319 int err = -ENOMEM; 1320 1321 /* 1322 * Try reading /sys/devices/system/cpu/online to get 1323 * an all cpus map. 1324 * 1325 * FIXME: -ENOMEM is the best we can do here, the cpu_map 1326 * code needs an overhaul to properly forward the 1327 * error, and we may not want to do that fallback to a 1328 * default cpu identity map :-\ 1329 */ 1330 cpus = perf_cpu_map__new(NULL); 1331 if (!cpus) 1332 goto out; 1333 1334 threads = perf_thread_map__new_dummy(); 1335 if (!threads) 1336 goto out_put; 1337 1338 perf_evlist__set_maps(&evlist->core, cpus, threads); 1339 1340 perf_thread_map__put(threads); 1341 out_put: 1342 perf_cpu_map__put(cpus); 1343 out: 1344 return err; 1345 } 1346 1347 int evlist__open(struct evlist *evlist) 1348 { 1349 struct evsel *evsel; 1350 int err; 1351 1352 /* 1353 * Default: one fd per CPU, all threads, aka systemwide 1354 * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL 1355 */ 1356 if (evlist->core.threads == NULL && evlist->core.cpus == NULL) { 1357 err = evlist__create_syswide_maps(evlist); 1358 if (err < 0) 1359 goto out_err; 1360 } 1361 1362 evlist__update_id_pos(evlist); 1363 1364 evlist__for_each_entry(evlist, evsel) { 1365 err = evsel__open(evsel, evsel->core.cpus, evsel->core.threads); 1366 if (err < 0) 1367 goto out_err; 1368 } 1369 1370 return 0; 1371 out_err: 1372 evlist__close(evlist); 1373 errno = -err; 1374 return err; 1375 } 1376 1377 int evlist__prepare_workload(struct evlist *evlist, struct target *target, const char *argv[], 1378 bool pipe_output, void (*exec_error)(int signo, siginfo_t *info, void *ucontext)) 1379 { 1380 int child_ready_pipe[2], go_pipe[2]; 1381 char bf; 1382 1383 if (pipe(child_ready_pipe) < 0) { 1384 perror("failed to create 'ready' pipe"); 1385 return -1; 1386 } 1387 1388 if (pipe(go_pipe) < 0) { 1389 perror("failed to create 'go' pipe"); 1390 goto out_close_ready_pipe; 1391 } 1392 1393 evlist->workload.pid = fork(); 1394 if (evlist->workload.pid < 0) { 1395 perror("failed to fork"); 1396 goto out_close_pipes; 1397 } 1398 1399 if (!evlist->workload.pid) { 1400 int ret; 1401 1402 if (pipe_output) 1403 dup2(2, 1); 1404 1405 signal(SIGTERM, SIG_DFL); 1406 1407 close(child_ready_pipe[0]); 1408 close(go_pipe[1]); 1409 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC); 1410 1411 /* 1412 * Change the name of this process not to confuse --exclude-perf users 1413 * that sees 'perf' in the window up to the execvp() and thinks that 1414 * perf samples are not being excluded. 1415 */ 1416 prctl(PR_SET_NAME, "perf-exec"); 1417 1418 /* 1419 * Tell the parent we're ready to go 1420 */ 1421 close(child_ready_pipe[1]); 1422 1423 /* 1424 * Wait until the parent tells us to go. 1425 */ 1426 ret = read(go_pipe[0], &bf, 1); 1427 /* 1428 * The parent will ask for the execvp() to be performed by 1429 * writing exactly one byte, in workload.cork_fd, usually via 1430 * evlist__start_workload(). 1431 * 1432 * For cancelling the workload without actually running it, 1433 * the parent will just close workload.cork_fd, without writing 1434 * anything, i.e. read will return zero and we just exit() 1435 * here. 1436 */ 1437 if (ret != 1) { 1438 if (ret == -1) 1439 perror("unable to read pipe"); 1440 exit(ret); 1441 } 1442 1443 execvp(argv[0], (char **)argv); 1444 1445 if (exec_error) { 1446 union sigval val; 1447 1448 val.sival_int = errno; 1449 if (sigqueue(getppid(), SIGUSR1, val)) 1450 perror(argv[0]); 1451 } else 1452 perror(argv[0]); 1453 exit(-1); 1454 } 1455 1456 if (exec_error) { 1457 struct sigaction act = { 1458 .sa_flags = SA_SIGINFO, 1459 .sa_sigaction = exec_error, 1460 }; 1461 sigaction(SIGUSR1, &act, NULL); 1462 } 1463 1464 if (target__none(target)) { 1465 if (evlist->core.threads == NULL) { 1466 fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n", 1467 __func__, __LINE__); 1468 goto out_close_pipes; 1469 } 1470 perf_thread_map__set_pid(evlist->core.threads, 0, evlist->workload.pid); 1471 } 1472 1473 close(child_ready_pipe[1]); 1474 close(go_pipe[0]); 1475 /* 1476 * wait for child to settle 1477 */ 1478 if (read(child_ready_pipe[0], &bf, 1) == -1) { 1479 perror("unable to read pipe"); 1480 goto out_close_pipes; 1481 } 1482 1483 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC); 1484 evlist->workload.cork_fd = go_pipe[1]; 1485 close(child_ready_pipe[0]); 1486 return 0; 1487 1488 out_close_pipes: 1489 close(go_pipe[0]); 1490 close(go_pipe[1]); 1491 out_close_ready_pipe: 1492 close(child_ready_pipe[0]); 1493 close(child_ready_pipe[1]); 1494 return -1; 1495 } 1496 1497 int evlist__start_workload(struct evlist *evlist) 1498 { 1499 if (evlist->workload.cork_fd > 0) { 1500 char bf = 0; 1501 int ret; 1502 /* 1503 * Remove the cork, let it rip! 1504 */ 1505 ret = write(evlist->workload.cork_fd, &bf, 1); 1506 if (ret < 0) 1507 perror("unable to write to pipe"); 1508 1509 close(evlist->workload.cork_fd); 1510 return ret; 1511 } 1512 1513 return 0; 1514 } 1515 1516 int evlist__parse_sample(struct evlist *evlist, union perf_event *event, struct perf_sample *sample) 1517 { 1518 struct evsel *evsel = evlist__event2evsel(evlist, event); 1519 1520 if (!evsel) 1521 return -EFAULT; 1522 return evsel__parse_sample(evsel, event, sample); 1523 } 1524 1525 int evlist__parse_sample_timestamp(struct evlist *evlist, union perf_event *event, u64 *timestamp) 1526 { 1527 struct evsel *evsel = evlist__event2evsel(evlist, event); 1528 1529 if (!evsel) 1530 return -EFAULT; 1531 return evsel__parse_sample_timestamp(evsel, event, timestamp); 1532 } 1533 1534 int evlist__strerror_open(struct evlist *evlist, int err, char *buf, size_t size) 1535 { 1536 int printed, value; 1537 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf)); 1538 1539 switch (err) { 1540 case EACCES: 1541 case EPERM: 1542 printed = scnprintf(buf, size, 1543 "Error:\t%s.\n" 1544 "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg); 1545 1546 value = perf_event_paranoid(); 1547 1548 printed += scnprintf(buf + printed, size - printed, "\nHint:\t"); 1549 1550 if (value >= 2) { 1551 printed += scnprintf(buf + printed, size - printed, 1552 "For your workloads it needs to be <= 1\nHint:\t"); 1553 } 1554 printed += scnprintf(buf + printed, size - printed, 1555 "For system wide tracing it needs to be set to -1.\n"); 1556 1557 printed += scnprintf(buf + printed, size - printed, 1558 "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n" 1559 "Hint:\tThe current value is %d.", value); 1560 break; 1561 case EINVAL: { 1562 struct evsel *first = evlist__first(evlist); 1563 int max_freq; 1564 1565 if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0) 1566 goto out_default; 1567 1568 if (first->core.attr.sample_freq < (u64)max_freq) 1569 goto out_default; 1570 1571 printed = scnprintf(buf, size, 1572 "Error:\t%s.\n" 1573 "Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n" 1574 "Hint:\tThe current value is %d and %" PRIu64 " is being requested.", 1575 emsg, max_freq, first->core.attr.sample_freq); 1576 break; 1577 } 1578 default: 1579 out_default: 1580 scnprintf(buf, size, "%s", emsg); 1581 break; 1582 } 1583 1584 return 0; 1585 } 1586 1587 int evlist__strerror_mmap(struct evlist *evlist, int err, char *buf, size_t size) 1588 { 1589 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf)); 1590 int pages_attempted = evlist->core.mmap_len / 1024, pages_max_per_user, printed = 0; 1591 1592 switch (err) { 1593 case EPERM: 1594 sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user); 1595 printed += scnprintf(buf + printed, size - printed, 1596 "Error:\t%s.\n" 1597 "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n" 1598 "Hint:\tTried using %zd kB.\n", 1599 emsg, pages_max_per_user, pages_attempted); 1600 1601 if (pages_attempted >= pages_max_per_user) { 1602 printed += scnprintf(buf + printed, size - printed, 1603 "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n", 1604 pages_max_per_user + pages_attempted); 1605 } 1606 1607 printed += scnprintf(buf + printed, size - printed, 1608 "Hint:\tTry using a smaller -m/--mmap-pages value."); 1609 break; 1610 default: 1611 scnprintf(buf, size, "%s", emsg); 1612 break; 1613 } 1614 1615 return 0; 1616 } 1617 1618 void evlist__to_front(struct evlist *evlist, struct evsel *move_evsel) 1619 { 1620 struct evsel *evsel, *n; 1621 LIST_HEAD(move); 1622 1623 if (move_evsel == evlist__first(evlist)) 1624 return; 1625 1626 evlist__for_each_entry_safe(evlist, n, evsel) { 1627 if (evsel__leader(evsel) == evsel__leader(move_evsel)) 1628 list_move_tail(&evsel->core.node, &move); 1629 } 1630 1631 list_splice(&move, &evlist->core.entries); 1632 } 1633 1634 struct evsel *evlist__get_tracking_event(struct evlist *evlist) 1635 { 1636 struct evsel *evsel; 1637 1638 evlist__for_each_entry(evlist, evsel) { 1639 if (evsel->tracking) 1640 return evsel; 1641 } 1642 1643 return evlist__first(evlist); 1644 } 1645 1646 void evlist__set_tracking_event(struct evlist *evlist, struct evsel *tracking_evsel) 1647 { 1648 struct evsel *evsel; 1649 1650 if (tracking_evsel->tracking) 1651 return; 1652 1653 evlist__for_each_entry(evlist, evsel) { 1654 if (evsel != tracking_evsel) 1655 evsel->tracking = false; 1656 } 1657 1658 tracking_evsel->tracking = true; 1659 } 1660 1661 struct evsel *evlist__find_evsel_by_str(struct evlist *evlist, const char *str) 1662 { 1663 struct evsel *evsel; 1664 1665 evlist__for_each_entry(evlist, evsel) { 1666 if (!evsel->name) 1667 continue; 1668 if (strcmp(str, evsel->name) == 0) 1669 return evsel; 1670 } 1671 1672 return NULL; 1673 } 1674 1675 void evlist__toggle_bkw_mmap(struct evlist *evlist, enum bkw_mmap_state state) 1676 { 1677 enum bkw_mmap_state old_state = evlist->bkw_mmap_state; 1678 enum action { 1679 NONE, 1680 PAUSE, 1681 RESUME, 1682 } action = NONE; 1683 1684 if (!evlist->overwrite_mmap) 1685 return; 1686 1687 switch (old_state) { 1688 case BKW_MMAP_NOTREADY: { 1689 if (state != BKW_MMAP_RUNNING) 1690 goto state_err; 1691 break; 1692 } 1693 case BKW_MMAP_RUNNING: { 1694 if (state != BKW_MMAP_DATA_PENDING) 1695 goto state_err; 1696 action = PAUSE; 1697 break; 1698 } 1699 case BKW_MMAP_DATA_PENDING: { 1700 if (state != BKW_MMAP_EMPTY) 1701 goto state_err; 1702 break; 1703 } 1704 case BKW_MMAP_EMPTY: { 1705 if (state != BKW_MMAP_RUNNING) 1706 goto state_err; 1707 action = RESUME; 1708 break; 1709 } 1710 default: 1711 WARN_ONCE(1, "Shouldn't get there\n"); 1712 } 1713 1714 evlist->bkw_mmap_state = state; 1715 1716 switch (action) { 1717 case PAUSE: 1718 evlist__pause(evlist); 1719 break; 1720 case RESUME: 1721 evlist__resume(evlist); 1722 break; 1723 case NONE: 1724 default: 1725 break; 1726 } 1727 1728 state_err: 1729 return; 1730 } 1731 1732 bool evlist__exclude_kernel(struct evlist *evlist) 1733 { 1734 struct evsel *evsel; 1735 1736 evlist__for_each_entry(evlist, evsel) { 1737 if (!evsel->core.attr.exclude_kernel) 1738 return false; 1739 } 1740 1741 return true; 1742 } 1743 1744 /* 1745 * Events in data file are not collect in groups, but we still want 1746 * the group display. Set the artificial group and set the leader's 1747 * forced_leader flag to notify the display code. 1748 */ 1749 void evlist__force_leader(struct evlist *evlist) 1750 { 1751 if (!evlist->core.nr_groups) { 1752 struct evsel *leader = evlist__first(evlist); 1753 1754 evlist__set_leader(evlist); 1755 leader->forced_leader = true; 1756 } 1757 } 1758 1759 struct evsel *evlist__reset_weak_group(struct evlist *evsel_list, struct evsel *evsel, bool close) 1760 { 1761 struct evsel *c2, *leader; 1762 bool is_open = true; 1763 1764 leader = evsel__leader(evsel); 1765 1766 pr_debug("Weak group for %s/%d failed\n", 1767 leader->name, leader->core.nr_members); 1768 1769 /* 1770 * for_each_group_member doesn't work here because it doesn't 1771 * include the first entry. 1772 */ 1773 evlist__for_each_entry(evsel_list, c2) { 1774 if (c2 == evsel) 1775 is_open = false; 1776 if (evsel__has_leader(c2, leader)) { 1777 if (is_open && close) 1778 perf_evsel__close(&c2->core); 1779 evsel__set_leader(c2, c2); 1780 c2->core.nr_members = 0; 1781 /* 1782 * Set this for all former members of the group 1783 * to indicate they get reopened. 1784 */ 1785 c2->reset_group = true; 1786 } 1787 } 1788 return leader; 1789 } 1790 1791 static int evlist__parse_control_fifo(const char *str, int *ctl_fd, int *ctl_fd_ack, bool *ctl_fd_close) 1792 { 1793 char *s, *p; 1794 int ret = 0, fd; 1795 1796 if (strncmp(str, "fifo:", 5)) 1797 return -EINVAL; 1798 1799 str += 5; 1800 if (!*str || *str == ',') 1801 return -EINVAL; 1802 1803 s = strdup(str); 1804 if (!s) 1805 return -ENOMEM; 1806 1807 p = strchr(s, ','); 1808 if (p) 1809 *p = '\0'; 1810 1811 /* 1812 * O_RDWR avoids POLLHUPs which is necessary to allow the other 1813 * end of a FIFO to be repeatedly opened and closed. 1814 */ 1815 fd = open(s, O_RDWR | O_NONBLOCK | O_CLOEXEC); 1816 if (fd < 0) { 1817 pr_err("Failed to open '%s'\n", s); 1818 ret = -errno; 1819 goto out_free; 1820 } 1821 *ctl_fd = fd; 1822 *ctl_fd_close = true; 1823 1824 if (p && *++p) { 1825 /* O_RDWR | O_NONBLOCK means the other end need not be open */ 1826 fd = open(p, O_RDWR | O_NONBLOCK | O_CLOEXEC); 1827 if (fd < 0) { 1828 pr_err("Failed to open '%s'\n", p); 1829 ret = -errno; 1830 goto out_free; 1831 } 1832 *ctl_fd_ack = fd; 1833 } 1834 1835 out_free: 1836 free(s); 1837 return ret; 1838 } 1839 1840 int evlist__parse_control(const char *str, int *ctl_fd, int *ctl_fd_ack, bool *ctl_fd_close) 1841 { 1842 char *comma = NULL, *endptr = NULL; 1843 1844 *ctl_fd_close = false; 1845 1846 if (strncmp(str, "fd:", 3)) 1847 return evlist__parse_control_fifo(str, ctl_fd, ctl_fd_ack, ctl_fd_close); 1848 1849 *ctl_fd = strtoul(&str[3], &endptr, 0); 1850 if (endptr == &str[3]) 1851 return -EINVAL; 1852 1853 comma = strchr(str, ','); 1854 if (comma) { 1855 if (endptr != comma) 1856 return -EINVAL; 1857 1858 *ctl_fd_ack = strtoul(comma + 1, &endptr, 0); 1859 if (endptr == comma + 1 || *endptr != '\0') 1860 return -EINVAL; 1861 } 1862 1863 return 0; 1864 } 1865 1866 void evlist__close_control(int ctl_fd, int ctl_fd_ack, bool *ctl_fd_close) 1867 { 1868 if (*ctl_fd_close) { 1869 *ctl_fd_close = false; 1870 close(ctl_fd); 1871 if (ctl_fd_ack >= 0) 1872 close(ctl_fd_ack); 1873 } 1874 } 1875 1876 int evlist__initialize_ctlfd(struct evlist *evlist, int fd, int ack) 1877 { 1878 if (fd == -1) { 1879 pr_debug("Control descriptor is not initialized\n"); 1880 return 0; 1881 } 1882 1883 evlist->ctl_fd.pos = perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN, 1884 fdarray_flag__nonfilterable); 1885 if (evlist->ctl_fd.pos < 0) { 1886 evlist->ctl_fd.pos = -1; 1887 pr_err("Failed to add ctl fd entry: %m\n"); 1888 return -1; 1889 } 1890 1891 evlist->ctl_fd.fd = fd; 1892 evlist->ctl_fd.ack = ack; 1893 1894 return 0; 1895 } 1896 1897 bool evlist__ctlfd_initialized(struct evlist *evlist) 1898 { 1899 return evlist->ctl_fd.pos >= 0; 1900 } 1901 1902 int evlist__finalize_ctlfd(struct evlist *evlist) 1903 { 1904 struct pollfd *entries = evlist->core.pollfd.entries; 1905 1906 if (!evlist__ctlfd_initialized(evlist)) 1907 return 0; 1908 1909 entries[evlist->ctl_fd.pos].fd = -1; 1910 entries[evlist->ctl_fd.pos].events = 0; 1911 entries[evlist->ctl_fd.pos].revents = 0; 1912 1913 evlist->ctl_fd.pos = -1; 1914 evlist->ctl_fd.ack = -1; 1915 evlist->ctl_fd.fd = -1; 1916 1917 return 0; 1918 } 1919 1920 static int evlist__ctlfd_recv(struct evlist *evlist, enum evlist_ctl_cmd *cmd, 1921 char *cmd_data, size_t data_size) 1922 { 1923 int err; 1924 char c; 1925 size_t bytes_read = 0; 1926 1927 *cmd = EVLIST_CTL_CMD_UNSUPPORTED; 1928 memset(cmd_data, 0, data_size); 1929 data_size--; 1930 1931 do { 1932 err = read(evlist->ctl_fd.fd, &c, 1); 1933 if (err > 0) { 1934 if (c == '\n' || c == '\0') 1935 break; 1936 cmd_data[bytes_read++] = c; 1937 if (bytes_read == data_size) 1938 break; 1939 continue; 1940 } else if (err == -1) { 1941 if (errno == EINTR) 1942 continue; 1943 if (errno == EAGAIN || errno == EWOULDBLOCK) 1944 err = 0; 1945 else 1946 pr_err("Failed to read from ctlfd %d: %m\n", evlist->ctl_fd.fd); 1947 } 1948 break; 1949 } while (1); 1950 1951 pr_debug("Message from ctl_fd: \"%s%s\"\n", cmd_data, 1952 bytes_read == data_size ? "" : c == '\n' ? "\\n" : "\\0"); 1953 1954 if (bytes_read > 0) { 1955 if (!strncmp(cmd_data, EVLIST_CTL_CMD_ENABLE_TAG, 1956 (sizeof(EVLIST_CTL_CMD_ENABLE_TAG)-1))) { 1957 *cmd = EVLIST_CTL_CMD_ENABLE; 1958 } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_DISABLE_TAG, 1959 (sizeof(EVLIST_CTL_CMD_DISABLE_TAG)-1))) { 1960 *cmd = EVLIST_CTL_CMD_DISABLE; 1961 } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_SNAPSHOT_TAG, 1962 (sizeof(EVLIST_CTL_CMD_SNAPSHOT_TAG)-1))) { 1963 *cmd = EVLIST_CTL_CMD_SNAPSHOT; 1964 pr_debug("is snapshot\n"); 1965 } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_EVLIST_TAG, 1966 (sizeof(EVLIST_CTL_CMD_EVLIST_TAG)-1))) { 1967 *cmd = EVLIST_CTL_CMD_EVLIST; 1968 } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_STOP_TAG, 1969 (sizeof(EVLIST_CTL_CMD_STOP_TAG)-1))) { 1970 *cmd = EVLIST_CTL_CMD_STOP; 1971 } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_PING_TAG, 1972 (sizeof(EVLIST_CTL_CMD_PING_TAG)-1))) { 1973 *cmd = EVLIST_CTL_CMD_PING; 1974 } 1975 } 1976 1977 return bytes_read ? (int)bytes_read : err; 1978 } 1979 1980 int evlist__ctlfd_ack(struct evlist *evlist) 1981 { 1982 int err; 1983 1984 if (evlist->ctl_fd.ack == -1) 1985 return 0; 1986 1987 err = write(evlist->ctl_fd.ack, EVLIST_CTL_CMD_ACK_TAG, 1988 sizeof(EVLIST_CTL_CMD_ACK_TAG)); 1989 if (err == -1) 1990 pr_err("failed to write to ctl_ack_fd %d: %m\n", evlist->ctl_fd.ack); 1991 1992 return err; 1993 } 1994 1995 static int get_cmd_arg(char *cmd_data, size_t cmd_size, char **arg) 1996 { 1997 char *data = cmd_data + cmd_size; 1998 1999 /* no argument */ 2000 if (!*data) 2001 return 0; 2002 2003 /* there's argument */ 2004 if (*data == ' ') { 2005 *arg = data + 1; 2006 return 1; 2007 } 2008 2009 /* malformed */ 2010 return -1; 2011 } 2012 2013 static int evlist__ctlfd_enable(struct evlist *evlist, char *cmd_data, bool enable) 2014 { 2015 struct evsel *evsel; 2016 char *name; 2017 int err; 2018 2019 err = get_cmd_arg(cmd_data, 2020 enable ? sizeof(EVLIST_CTL_CMD_ENABLE_TAG) - 1 : 2021 sizeof(EVLIST_CTL_CMD_DISABLE_TAG) - 1, 2022 &name); 2023 if (err < 0) { 2024 pr_info("failed: wrong command\n"); 2025 return -1; 2026 } 2027 2028 if (err) { 2029 evsel = evlist__find_evsel_by_str(evlist, name); 2030 if (evsel) { 2031 if (enable) 2032 evlist__enable_evsel(evlist, name); 2033 else 2034 evlist__disable_evsel(evlist, name); 2035 pr_info("Event %s %s\n", evsel->name, 2036 enable ? "enabled" : "disabled"); 2037 } else { 2038 pr_info("failed: can't find '%s' event\n", name); 2039 } 2040 } else { 2041 if (enable) { 2042 evlist__enable(evlist); 2043 pr_info(EVLIST_ENABLED_MSG); 2044 } else { 2045 evlist__disable(evlist); 2046 pr_info(EVLIST_DISABLED_MSG); 2047 } 2048 } 2049 2050 return 0; 2051 } 2052 2053 static int evlist__ctlfd_list(struct evlist *evlist, char *cmd_data) 2054 { 2055 struct perf_attr_details details = { .verbose = false, }; 2056 struct evsel *evsel; 2057 char *arg; 2058 int err; 2059 2060 err = get_cmd_arg(cmd_data, 2061 sizeof(EVLIST_CTL_CMD_EVLIST_TAG) - 1, 2062 &arg); 2063 if (err < 0) { 2064 pr_info("failed: wrong command\n"); 2065 return -1; 2066 } 2067 2068 if (err) { 2069 if (!strcmp(arg, "-v")) { 2070 details.verbose = true; 2071 } else if (!strcmp(arg, "-g")) { 2072 details.event_group = true; 2073 } else if (!strcmp(arg, "-F")) { 2074 details.freq = true; 2075 } else { 2076 pr_info("failed: wrong command\n"); 2077 return -1; 2078 } 2079 } 2080 2081 evlist__for_each_entry(evlist, evsel) 2082 evsel__fprintf(evsel, &details, stderr); 2083 2084 return 0; 2085 } 2086 2087 int evlist__ctlfd_process(struct evlist *evlist, enum evlist_ctl_cmd *cmd) 2088 { 2089 int err = 0; 2090 char cmd_data[EVLIST_CTL_CMD_MAX_LEN]; 2091 int ctlfd_pos = evlist->ctl_fd.pos; 2092 struct pollfd *entries = evlist->core.pollfd.entries; 2093 2094 if (!evlist__ctlfd_initialized(evlist) || !entries[ctlfd_pos].revents) 2095 return 0; 2096 2097 if (entries[ctlfd_pos].revents & POLLIN) { 2098 err = evlist__ctlfd_recv(evlist, cmd, cmd_data, 2099 EVLIST_CTL_CMD_MAX_LEN); 2100 if (err > 0) { 2101 switch (*cmd) { 2102 case EVLIST_CTL_CMD_ENABLE: 2103 case EVLIST_CTL_CMD_DISABLE: 2104 err = evlist__ctlfd_enable(evlist, cmd_data, 2105 *cmd == EVLIST_CTL_CMD_ENABLE); 2106 break; 2107 case EVLIST_CTL_CMD_EVLIST: 2108 err = evlist__ctlfd_list(evlist, cmd_data); 2109 break; 2110 case EVLIST_CTL_CMD_SNAPSHOT: 2111 case EVLIST_CTL_CMD_STOP: 2112 case EVLIST_CTL_CMD_PING: 2113 break; 2114 case EVLIST_CTL_CMD_ACK: 2115 case EVLIST_CTL_CMD_UNSUPPORTED: 2116 default: 2117 pr_debug("ctlfd: unsupported %d\n", *cmd); 2118 break; 2119 } 2120 if (!(*cmd == EVLIST_CTL_CMD_ACK || *cmd == EVLIST_CTL_CMD_UNSUPPORTED || 2121 *cmd == EVLIST_CTL_CMD_SNAPSHOT)) 2122 evlist__ctlfd_ack(evlist); 2123 } 2124 } 2125 2126 if (entries[ctlfd_pos].revents & (POLLHUP | POLLERR)) 2127 evlist__finalize_ctlfd(evlist); 2128 else 2129 entries[ctlfd_pos].revents = 0; 2130 2131 return err; 2132 } 2133 2134 struct evsel *evlist__find_evsel(struct evlist *evlist, int idx) 2135 { 2136 struct evsel *evsel; 2137 2138 evlist__for_each_entry(evlist, evsel) { 2139 if (evsel->core.idx == idx) 2140 return evsel; 2141 } 2142 return NULL; 2143 } 2144 2145 int evlist__scnprintf_evsels(struct evlist *evlist, size_t size, char *bf) 2146 { 2147 struct evsel *evsel; 2148 int printed = 0; 2149 2150 evlist__for_each_entry(evlist, evsel) { 2151 if (evsel__is_dummy_event(evsel)) 2152 continue; 2153 if (size > (strlen(evsel__name(evsel)) + (printed ? 2 : 1))) { 2154 printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "," : "", evsel__name(evsel)); 2155 } else { 2156 printed += scnprintf(bf + printed, size - printed, "%s...", printed ? "," : ""); 2157 break; 2158 } 2159 } 2160 2161 return printed; 2162 } 2163 2164 void evlist__check_mem_load_aux(struct evlist *evlist) 2165 { 2166 struct evsel *leader, *evsel, *pos; 2167 2168 /* 2169 * For some platforms, the 'mem-loads' event is required to use 2170 * together with 'mem-loads-aux' within a group and 'mem-loads-aux' 2171 * must be the group leader. Now we disable this group before reporting 2172 * because 'mem-loads-aux' is just an auxiliary event. It doesn't carry 2173 * any valid memory load information. 2174 */ 2175 evlist__for_each_entry(evlist, evsel) { 2176 leader = evsel__leader(evsel); 2177 if (leader == evsel) 2178 continue; 2179 2180 if (leader->name && strstr(leader->name, "mem-loads-aux")) { 2181 for_each_group_evsel(pos, leader) { 2182 evsel__set_leader(pos, pos); 2183 pos->core.nr_members = 0; 2184 } 2185 } 2186 } 2187 } 2188