1 /* 2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> 3 * 4 * Parts came from builtin-{top,stat,record}.c, see those files for further 5 * copyright notes. 6 * 7 * Released under the GPL v2. (and only v2, not any later version) 8 */ 9 #include "util.h" 10 #include <api/fs/fs.h> 11 #include <poll.h> 12 #include "cpumap.h" 13 #include "thread_map.h" 14 #include "target.h" 15 #include "evlist.h" 16 #include "evsel.h" 17 #include "debug.h" 18 #include "asm/bug.h" 19 #include <unistd.h> 20 21 #include "parse-events.h" 22 #include <subcmd/parse-options.h> 23 24 #include <sys/mman.h> 25 26 #include <linux/bitops.h> 27 #include <linux/hash.h> 28 #include <linux/log2.h> 29 #include <linux/err.h> 30 31 static void perf_mmap__munmap(struct perf_mmap *map); 32 static void perf_mmap__put(struct perf_mmap *map); 33 34 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) 35 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y) 36 37 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus, 38 struct thread_map *threads) 39 { 40 int i; 41 42 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i) 43 INIT_HLIST_HEAD(&evlist->heads[i]); 44 INIT_LIST_HEAD(&evlist->entries); 45 perf_evlist__set_maps(evlist, cpus, threads); 46 fdarray__init(&evlist->pollfd, 64); 47 evlist->workload.pid = -1; 48 evlist->bkw_mmap_state = BKW_MMAP_NOTREADY; 49 } 50 51 struct perf_evlist *perf_evlist__new(void) 52 { 53 struct perf_evlist *evlist = zalloc(sizeof(*evlist)); 54 55 if (evlist != NULL) 56 perf_evlist__init(evlist, NULL, NULL); 57 58 return evlist; 59 } 60 61 struct perf_evlist *perf_evlist__new_default(void) 62 { 63 struct perf_evlist *evlist = perf_evlist__new(); 64 65 if (evlist && perf_evlist__add_default(evlist)) { 66 perf_evlist__delete(evlist); 67 evlist = NULL; 68 } 69 70 return evlist; 71 } 72 73 struct perf_evlist *perf_evlist__new_dummy(void) 74 { 75 struct perf_evlist *evlist = perf_evlist__new(); 76 77 if (evlist && perf_evlist__add_dummy(evlist)) { 78 perf_evlist__delete(evlist); 79 evlist = NULL; 80 } 81 82 return evlist; 83 } 84 85 /** 86 * perf_evlist__set_id_pos - set the positions of event ids. 87 * @evlist: selected event list 88 * 89 * Events with compatible sample types all have the same id_pos 90 * and is_pos. For convenience, put a copy on evlist. 91 */ 92 void perf_evlist__set_id_pos(struct perf_evlist *evlist) 93 { 94 struct perf_evsel *first = perf_evlist__first(evlist); 95 96 evlist->id_pos = first->id_pos; 97 evlist->is_pos = first->is_pos; 98 } 99 100 static void perf_evlist__update_id_pos(struct perf_evlist *evlist) 101 { 102 struct perf_evsel *evsel; 103 104 evlist__for_each_entry(evlist, evsel) 105 perf_evsel__calc_id_pos(evsel); 106 107 perf_evlist__set_id_pos(evlist); 108 } 109 110 static void perf_evlist__purge(struct perf_evlist *evlist) 111 { 112 struct perf_evsel *pos, *n; 113 114 evlist__for_each_entry_safe(evlist, n, pos) { 115 list_del_init(&pos->node); 116 pos->evlist = NULL; 117 perf_evsel__delete(pos); 118 } 119 120 evlist->nr_entries = 0; 121 } 122 123 void perf_evlist__exit(struct perf_evlist *evlist) 124 { 125 zfree(&evlist->mmap); 126 zfree(&evlist->backward_mmap); 127 fdarray__exit(&evlist->pollfd); 128 } 129 130 void perf_evlist__delete(struct perf_evlist *evlist) 131 { 132 if (evlist == NULL) 133 return; 134 135 perf_evlist__munmap(evlist); 136 perf_evlist__close(evlist); 137 cpu_map__put(evlist->cpus); 138 thread_map__put(evlist->threads); 139 evlist->cpus = NULL; 140 evlist->threads = NULL; 141 perf_evlist__purge(evlist); 142 perf_evlist__exit(evlist); 143 free(evlist); 144 } 145 146 static void __perf_evlist__propagate_maps(struct perf_evlist *evlist, 147 struct perf_evsel *evsel) 148 { 149 /* 150 * We already have cpus for evsel (via PMU sysfs) so 151 * keep it, if there's no target cpu list defined. 152 */ 153 if (!evsel->own_cpus || evlist->has_user_cpus) { 154 cpu_map__put(evsel->cpus); 155 evsel->cpus = cpu_map__get(evlist->cpus); 156 } else if (evsel->cpus != evsel->own_cpus) { 157 cpu_map__put(evsel->cpus); 158 evsel->cpus = cpu_map__get(evsel->own_cpus); 159 } 160 161 thread_map__put(evsel->threads); 162 evsel->threads = thread_map__get(evlist->threads); 163 } 164 165 static void perf_evlist__propagate_maps(struct perf_evlist *evlist) 166 { 167 struct perf_evsel *evsel; 168 169 evlist__for_each_entry(evlist, evsel) 170 __perf_evlist__propagate_maps(evlist, evsel); 171 } 172 173 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry) 174 { 175 entry->evlist = evlist; 176 list_add_tail(&entry->node, &evlist->entries); 177 entry->idx = evlist->nr_entries; 178 entry->tracking = !entry->idx; 179 180 if (!evlist->nr_entries++) 181 perf_evlist__set_id_pos(evlist); 182 183 __perf_evlist__propagate_maps(evlist, entry); 184 } 185 186 void perf_evlist__remove(struct perf_evlist *evlist, struct perf_evsel *evsel) 187 { 188 evsel->evlist = NULL; 189 list_del_init(&evsel->node); 190 evlist->nr_entries -= 1; 191 } 192 193 void perf_evlist__splice_list_tail(struct perf_evlist *evlist, 194 struct list_head *list) 195 { 196 struct perf_evsel *evsel, *temp; 197 198 __evlist__for_each_entry_safe(list, temp, evsel) { 199 list_del_init(&evsel->node); 200 perf_evlist__add(evlist, evsel); 201 } 202 } 203 204 void __perf_evlist__set_leader(struct list_head *list) 205 { 206 struct perf_evsel *evsel, *leader; 207 208 leader = list_entry(list->next, struct perf_evsel, node); 209 evsel = list_entry(list->prev, struct perf_evsel, node); 210 211 leader->nr_members = evsel->idx - leader->idx + 1; 212 213 __evlist__for_each_entry(list, evsel) { 214 evsel->leader = leader; 215 } 216 } 217 218 void perf_evlist__set_leader(struct perf_evlist *evlist) 219 { 220 if (evlist->nr_entries) { 221 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0; 222 __perf_evlist__set_leader(&evlist->entries); 223 } 224 } 225 226 void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr) 227 { 228 attr->precise_ip = 3; 229 230 while (attr->precise_ip != 0) { 231 int fd = sys_perf_event_open(attr, 0, -1, -1, 0); 232 if (fd != -1) { 233 close(fd); 234 break; 235 } 236 --attr->precise_ip; 237 } 238 } 239 240 int perf_evlist__add_default(struct perf_evlist *evlist) 241 { 242 struct perf_evsel *evsel = perf_evsel__new_cycles(); 243 244 if (evsel == NULL) 245 return -ENOMEM; 246 247 perf_evlist__add(evlist, evsel); 248 return 0; 249 } 250 251 int perf_evlist__add_dummy(struct perf_evlist *evlist) 252 { 253 struct perf_event_attr attr = { 254 .type = PERF_TYPE_SOFTWARE, 255 .config = PERF_COUNT_SW_DUMMY, 256 .size = sizeof(attr), /* to capture ABI version */ 257 }; 258 struct perf_evsel *evsel = perf_evsel__new(&attr); 259 260 if (evsel == NULL) 261 return -ENOMEM; 262 263 perf_evlist__add(evlist, evsel); 264 return 0; 265 } 266 267 static int perf_evlist__add_attrs(struct perf_evlist *evlist, 268 struct perf_event_attr *attrs, size_t nr_attrs) 269 { 270 struct perf_evsel *evsel, *n; 271 LIST_HEAD(head); 272 size_t i; 273 274 for (i = 0; i < nr_attrs; i++) { 275 evsel = perf_evsel__new_idx(attrs + i, evlist->nr_entries + i); 276 if (evsel == NULL) 277 goto out_delete_partial_list; 278 list_add_tail(&evsel->node, &head); 279 } 280 281 perf_evlist__splice_list_tail(evlist, &head); 282 283 return 0; 284 285 out_delete_partial_list: 286 __evlist__for_each_entry_safe(&head, n, evsel) 287 perf_evsel__delete(evsel); 288 return -1; 289 } 290 291 int __perf_evlist__add_default_attrs(struct perf_evlist *evlist, 292 struct perf_event_attr *attrs, size_t nr_attrs) 293 { 294 size_t i; 295 296 for (i = 0; i < nr_attrs; i++) 297 event_attr_init(attrs + i); 298 299 return perf_evlist__add_attrs(evlist, attrs, nr_attrs); 300 } 301 302 struct perf_evsel * 303 perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id) 304 { 305 struct perf_evsel *evsel; 306 307 evlist__for_each_entry(evlist, evsel) { 308 if (evsel->attr.type == PERF_TYPE_TRACEPOINT && 309 (int)evsel->attr.config == id) 310 return evsel; 311 } 312 313 return NULL; 314 } 315 316 struct perf_evsel * 317 perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist, 318 const char *name) 319 { 320 struct perf_evsel *evsel; 321 322 evlist__for_each_entry(evlist, evsel) { 323 if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) && 324 (strcmp(evsel->name, name) == 0)) 325 return evsel; 326 } 327 328 return NULL; 329 } 330 331 int perf_evlist__add_newtp(struct perf_evlist *evlist, 332 const char *sys, const char *name, void *handler) 333 { 334 struct perf_evsel *evsel = perf_evsel__newtp(sys, name); 335 336 if (IS_ERR(evsel)) 337 return -1; 338 339 evsel->handler = handler; 340 perf_evlist__add(evlist, evsel); 341 return 0; 342 } 343 344 static int perf_evlist__nr_threads(struct perf_evlist *evlist, 345 struct perf_evsel *evsel) 346 { 347 if (evsel->system_wide) 348 return 1; 349 else 350 return thread_map__nr(evlist->threads); 351 } 352 353 void perf_evlist__disable(struct perf_evlist *evlist) 354 { 355 struct perf_evsel *pos; 356 357 evlist__for_each_entry(evlist, pos) { 358 if (!perf_evsel__is_group_leader(pos) || !pos->fd) 359 continue; 360 perf_evsel__disable(pos); 361 } 362 363 evlist->enabled = false; 364 } 365 366 void perf_evlist__enable(struct perf_evlist *evlist) 367 { 368 struct perf_evsel *pos; 369 370 evlist__for_each_entry(evlist, pos) { 371 if (!perf_evsel__is_group_leader(pos) || !pos->fd) 372 continue; 373 perf_evsel__enable(pos); 374 } 375 376 evlist->enabled = true; 377 } 378 379 void perf_evlist__toggle_enable(struct perf_evlist *evlist) 380 { 381 (evlist->enabled ? perf_evlist__disable : perf_evlist__enable)(evlist); 382 } 383 384 static int perf_evlist__enable_event_cpu(struct perf_evlist *evlist, 385 struct perf_evsel *evsel, int cpu) 386 { 387 int thread; 388 int nr_threads = perf_evlist__nr_threads(evlist, evsel); 389 390 if (!evsel->fd) 391 return -EINVAL; 392 393 for (thread = 0; thread < nr_threads; thread++) { 394 int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0); 395 if (err) 396 return err; 397 } 398 return 0; 399 } 400 401 static int perf_evlist__enable_event_thread(struct perf_evlist *evlist, 402 struct perf_evsel *evsel, 403 int thread) 404 { 405 int cpu; 406 int nr_cpus = cpu_map__nr(evlist->cpus); 407 408 if (!evsel->fd) 409 return -EINVAL; 410 411 for (cpu = 0; cpu < nr_cpus; cpu++) { 412 int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0); 413 if (err) 414 return err; 415 } 416 return 0; 417 } 418 419 int perf_evlist__enable_event_idx(struct perf_evlist *evlist, 420 struct perf_evsel *evsel, int idx) 421 { 422 bool per_cpu_mmaps = !cpu_map__empty(evlist->cpus); 423 424 if (per_cpu_mmaps) 425 return perf_evlist__enable_event_cpu(evlist, evsel, idx); 426 else 427 return perf_evlist__enable_event_thread(evlist, evsel, idx); 428 } 429 430 int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) 431 { 432 int nr_cpus = cpu_map__nr(evlist->cpus); 433 int nr_threads = thread_map__nr(evlist->threads); 434 int nfds = 0; 435 struct perf_evsel *evsel; 436 437 evlist__for_each_entry(evlist, evsel) { 438 if (evsel->system_wide) 439 nfds += nr_cpus; 440 else 441 nfds += nr_cpus * nr_threads; 442 } 443 444 if (fdarray__available_entries(&evlist->pollfd) < nfds && 445 fdarray__grow(&evlist->pollfd, nfds) < 0) 446 return -ENOMEM; 447 448 return 0; 449 } 450 451 static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd, 452 struct perf_mmap *map, short revent) 453 { 454 int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP); 455 /* 456 * Save the idx so that when we filter out fds POLLHUP'ed we can 457 * close the associated evlist->mmap[] entry. 458 */ 459 if (pos >= 0) { 460 evlist->pollfd.priv[pos].ptr = map; 461 462 fcntl(fd, F_SETFL, O_NONBLOCK); 463 } 464 465 return pos; 466 } 467 468 int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd) 469 { 470 return __perf_evlist__add_pollfd(evlist, fd, NULL, POLLIN); 471 } 472 473 static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd, 474 void *arg __maybe_unused) 475 { 476 struct perf_mmap *map = fda->priv[fd].ptr; 477 478 if (map) 479 perf_mmap__put(map); 480 } 481 482 int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask) 483 { 484 return fdarray__filter(&evlist->pollfd, revents_and_mask, 485 perf_evlist__munmap_filtered, NULL); 486 } 487 488 int perf_evlist__poll(struct perf_evlist *evlist, int timeout) 489 { 490 return fdarray__poll(&evlist->pollfd, timeout); 491 } 492 493 static void perf_evlist__id_hash(struct perf_evlist *evlist, 494 struct perf_evsel *evsel, 495 int cpu, int thread, u64 id) 496 { 497 int hash; 498 struct perf_sample_id *sid = SID(evsel, cpu, thread); 499 500 sid->id = id; 501 sid->evsel = evsel; 502 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS); 503 hlist_add_head(&sid->node, &evlist->heads[hash]); 504 } 505 506 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel, 507 int cpu, int thread, u64 id) 508 { 509 perf_evlist__id_hash(evlist, evsel, cpu, thread, id); 510 evsel->id[evsel->ids++] = id; 511 } 512 513 int perf_evlist__id_add_fd(struct perf_evlist *evlist, 514 struct perf_evsel *evsel, 515 int cpu, int thread, int fd) 516 { 517 u64 read_data[4] = { 0, }; 518 int id_idx = 1; /* The first entry is the counter value */ 519 u64 id; 520 int ret; 521 522 ret = ioctl(fd, PERF_EVENT_IOC_ID, &id); 523 if (!ret) 524 goto add; 525 526 if (errno != ENOTTY) 527 return -1; 528 529 /* Legacy way to get event id.. All hail to old kernels! */ 530 531 /* 532 * This way does not work with group format read, so bail 533 * out in that case. 534 */ 535 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP) 536 return -1; 537 538 if (!(evsel->attr.read_format & PERF_FORMAT_ID) || 539 read(fd, &read_data, sizeof(read_data)) == -1) 540 return -1; 541 542 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 543 ++id_idx; 544 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 545 ++id_idx; 546 547 id = read_data[id_idx]; 548 549 add: 550 perf_evlist__id_add(evlist, evsel, cpu, thread, id); 551 return 0; 552 } 553 554 static void perf_evlist__set_sid_idx(struct perf_evlist *evlist, 555 struct perf_evsel *evsel, int idx, int cpu, 556 int thread) 557 { 558 struct perf_sample_id *sid = SID(evsel, cpu, thread); 559 sid->idx = idx; 560 if (evlist->cpus && cpu >= 0) 561 sid->cpu = evlist->cpus->map[cpu]; 562 else 563 sid->cpu = -1; 564 if (!evsel->system_wide && evlist->threads && thread >= 0) 565 sid->tid = thread_map__pid(evlist->threads, thread); 566 else 567 sid->tid = -1; 568 } 569 570 struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id) 571 { 572 struct hlist_head *head; 573 struct perf_sample_id *sid; 574 int hash; 575 576 hash = hash_64(id, PERF_EVLIST__HLIST_BITS); 577 head = &evlist->heads[hash]; 578 579 hlist_for_each_entry(sid, head, node) 580 if (sid->id == id) 581 return sid; 582 583 return NULL; 584 } 585 586 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id) 587 { 588 struct perf_sample_id *sid; 589 590 if (evlist->nr_entries == 1 || !id) 591 return perf_evlist__first(evlist); 592 593 sid = perf_evlist__id2sid(evlist, id); 594 if (sid) 595 return sid->evsel; 596 597 if (!perf_evlist__sample_id_all(evlist)) 598 return perf_evlist__first(evlist); 599 600 return NULL; 601 } 602 603 struct perf_evsel *perf_evlist__id2evsel_strict(struct perf_evlist *evlist, 604 u64 id) 605 { 606 struct perf_sample_id *sid; 607 608 if (!id) 609 return NULL; 610 611 sid = perf_evlist__id2sid(evlist, id); 612 if (sid) 613 return sid->evsel; 614 615 return NULL; 616 } 617 618 static int perf_evlist__event2id(struct perf_evlist *evlist, 619 union perf_event *event, u64 *id) 620 { 621 const u64 *array = event->sample.array; 622 ssize_t n; 623 624 n = (event->header.size - sizeof(event->header)) >> 3; 625 626 if (event->header.type == PERF_RECORD_SAMPLE) { 627 if (evlist->id_pos >= n) 628 return -1; 629 *id = array[evlist->id_pos]; 630 } else { 631 if (evlist->is_pos > n) 632 return -1; 633 n -= evlist->is_pos; 634 *id = array[n]; 635 } 636 return 0; 637 } 638 639 struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist, 640 union perf_event *event) 641 { 642 struct perf_evsel *first = perf_evlist__first(evlist); 643 struct hlist_head *head; 644 struct perf_sample_id *sid; 645 int hash; 646 u64 id; 647 648 if (evlist->nr_entries == 1) 649 return first; 650 651 if (!first->attr.sample_id_all && 652 event->header.type != PERF_RECORD_SAMPLE) 653 return first; 654 655 if (perf_evlist__event2id(evlist, event, &id)) 656 return NULL; 657 658 /* Synthesized events have an id of zero */ 659 if (!id) 660 return first; 661 662 hash = hash_64(id, PERF_EVLIST__HLIST_BITS); 663 head = &evlist->heads[hash]; 664 665 hlist_for_each_entry(sid, head, node) { 666 if (sid->id == id) 667 return sid->evsel; 668 } 669 return NULL; 670 } 671 672 static int perf_evlist__set_paused(struct perf_evlist *evlist, bool value) 673 { 674 int i; 675 676 if (!evlist->backward_mmap) 677 return 0; 678 679 for (i = 0; i < evlist->nr_mmaps; i++) { 680 int fd = evlist->backward_mmap[i].fd; 681 int err; 682 683 if (fd < 0) 684 continue; 685 err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0); 686 if (err) 687 return err; 688 } 689 return 0; 690 } 691 692 static int perf_evlist__pause(struct perf_evlist *evlist) 693 { 694 return perf_evlist__set_paused(evlist, true); 695 } 696 697 static int perf_evlist__resume(struct perf_evlist *evlist) 698 { 699 return perf_evlist__set_paused(evlist, false); 700 } 701 702 /* When check_messup is true, 'end' must points to a good entry */ 703 static union perf_event * 704 perf_mmap__read(struct perf_mmap *md, bool check_messup, u64 start, 705 u64 end, u64 *prev) 706 { 707 unsigned char *data = md->base + page_size; 708 union perf_event *event = NULL; 709 int diff = end - start; 710 711 if (check_messup) { 712 /* 713 * If we're further behind than half the buffer, there's a chance 714 * the writer will bite our tail and mess up the samples under us. 715 * 716 * If we somehow ended up ahead of the 'end', we got messed up. 717 * 718 * In either case, truncate and restart at 'end'. 719 */ 720 if (diff > md->mask / 2 || diff < 0) { 721 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n"); 722 723 /* 724 * 'end' points to a known good entry, start there. 725 */ 726 start = end; 727 diff = 0; 728 } 729 } 730 731 if (diff >= (int)sizeof(event->header)) { 732 size_t size; 733 734 event = (union perf_event *)&data[start & md->mask]; 735 size = event->header.size; 736 737 if (size < sizeof(event->header) || diff < (int)size) { 738 event = NULL; 739 goto broken_event; 740 } 741 742 /* 743 * Event straddles the mmap boundary -- header should always 744 * be inside due to u64 alignment of output. 745 */ 746 if ((start & md->mask) + size != ((start + size) & md->mask)) { 747 unsigned int offset = start; 748 unsigned int len = min(sizeof(*event), size), cpy; 749 void *dst = md->event_copy; 750 751 do { 752 cpy = min(md->mask + 1 - (offset & md->mask), len); 753 memcpy(dst, &data[offset & md->mask], cpy); 754 offset += cpy; 755 dst += cpy; 756 len -= cpy; 757 } while (len); 758 759 event = (union perf_event *) md->event_copy; 760 } 761 762 start += size; 763 } 764 765 broken_event: 766 if (prev) 767 *prev = start; 768 769 return event; 770 } 771 772 union perf_event *perf_mmap__read_forward(struct perf_mmap *md, bool check_messup) 773 { 774 u64 head; 775 u64 old = md->prev; 776 777 /* 778 * Check if event was unmapped due to a POLLHUP/POLLERR. 779 */ 780 if (!atomic_read(&md->refcnt)) 781 return NULL; 782 783 head = perf_mmap__read_head(md); 784 785 return perf_mmap__read(md, check_messup, old, head, &md->prev); 786 } 787 788 union perf_event * 789 perf_mmap__read_backward(struct perf_mmap *md) 790 { 791 u64 head, end; 792 u64 start = md->prev; 793 794 /* 795 * Check if event was unmapped due to a POLLHUP/POLLERR. 796 */ 797 if (!atomic_read(&md->refcnt)) 798 return NULL; 799 800 head = perf_mmap__read_head(md); 801 if (!head) 802 return NULL; 803 804 /* 805 * 'head' pointer starts from 0. Kernel minus sizeof(record) form 806 * it each time when kernel writes to it, so in fact 'head' is 807 * negative. 'end' pointer is made manually by adding the size of 808 * the ring buffer to 'head' pointer, means the validate data can 809 * read is the whole ring buffer. If 'end' is positive, the ring 810 * buffer has not fully filled, so we must adjust 'end' to 0. 811 * 812 * However, since both 'head' and 'end' is unsigned, we can't 813 * simply compare 'end' against 0. Here we compare '-head' and 814 * the size of the ring buffer, where -head is the number of bytes 815 * kernel write to the ring buffer. 816 */ 817 if (-head < (u64)(md->mask + 1)) 818 end = 0; 819 else 820 end = head + md->mask + 1; 821 822 return perf_mmap__read(md, false, start, end, &md->prev); 823 } 824 825 union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, int idx) 826 { 827 struct perf_mmap *md = &evlist->mmap[idx]; 828 829 /* 830 * Check messup is required for forward overwritable ring buffer: 831 * memory pointed by md->prev can be overwritten in this case. 832 * No need for read-write ring buffer: kernel stop outputting when 833 * it hit md->prev (perf_mmap__consume()). 834 */ 835 return perf_mmap__read_forward(md, evlist->overwrite); 836 } 837 838 union perf_event *perf_evlist__mmap_read_backward(struct perf_evlist *evlist, int idx) 839 { 840 struct perf_mmap *md = &evlist->mmap[idx]; 841 842 /* 843 * No need to check messup for backward ring buffer: 844 * We can always read arbitrary long data from a backward 845 * ring buffer unless we forget to pause it before reading. 846 */ 847 return perf_mmap__read_backward(md); 848 } 849 850 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) 851 { 852 return perf_evlist__mmap_read_forward(evlist, idx); 853 } 854 855 void perf_mmap__read_catchup(struct perf_mmap *md) 856 { 857 u64 head; 858 859 if (!atomic_read(&md->refcnt)) 860 return; 861 862 head = perf_mmap__read_head(md); 863 md->prev = head; 864 } 865 866 void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx) 867 { 868 perf_mmap__read_catchup(&evlist->mmap[idx]); 869 } 870 871 static bool perf_mmap__empty(struct perf_mmap *md) 872 { 873 return perf_mmap__read_head(md) == md->prev && !md->auxtrace_mmap.base; 874 } 875 876 static void perf_mmap__get(struct perf_mmap *map) 877 { 878 atomic_inc(&map->refcnt); 879 } 880 881 static void perf_mmap__put(struct perf_mmap *md) 882 { 883 BUG_ON(md->base && atomic_read(&md->refcnt) == 0); 884 885 if (atomic_dec_and_test(&md->refcnt)) 886 perf_mmap__munmap(md); 887 } 888 889 void perf_mmap__consume(struct perf_mmap *md, bool overwrite) 890 { 891 if (!overwrite) { 892 u64 old = md->prev; 893 894 perf_mmap__write_tail(md, old); 895 } 896 897 if (atomic_read(&md->refcnt) == 1 && perf_mmap__empty(md)) 898 perf_mmap__put(md); 899 } 900 901 void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx) 902 { 903 perf_mmap__consume(&evlist->mmap[idx], evlist->overwrite); 904 } 905 906 int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused, 907 struct auxtrace_mmap_params *mp __maybe_unused, 908 void *userpg __maybe_unused, 909 int fd __maybe_unused) 910 { 911 return 0; 912 } 913 914 void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused) 915 { 916 } 917 918 void __weak auxtrace_mmap_params__init( 919 struct auxtrace_mmap_params *mp __maybe_unused, 920 off_t auxtrace_offset __maybe_unused, 921 unsigned int auxtrace_pages __maybe_unused, 922 bool auxtrace_overwrite __maybe_unused) 923 { 924 } 925 926 void __weak auxtrace_mmap_params__set_idx( 927 struct auxtrace_mmap_params *mp __maybe_unused, 928 struct perf_evlist *evlist __maybe_unused, 929 int idx __maybe_unused, 930 bool per_cpu __maybe_unused) 931 { 932 } 933 934 static void perf_mmap__munmap(struct perf_mmap *map) 935 { 936 if (map->base != NULL) { 937 munmap(map->base, perf_mmap__mmap_len(map)); 938 map->base = NULL; 939 map->fd = -1; 940 atomic_set(&map->refcnt, 0); 941 } 942 auxtrace_mmap__munmap(&map->auxtrace_mmap); 943 } 944 945 static void perf_evlist__munmap_nofree(struct perf_evlist *evlist) 946 { 947 int i; 948 949 if (evlist->mmap) 950 for (i = 0; i < evlist->nr_mmaps; i++) 951 perf_mmap__munmap(&evlist->mmap[i]); 952 953 if (evlist->backward_mmap) 954 for (i = 0; i < evlist->nr_mmaps; i++) 955 perf_mmap__munmap(&evlist->backward_mmap[i]); 956 } 957 958 void perf_evlist__munmap(struct perf_evlist *evlist) 959 { 960 perf_evlist__munmap_nofree(evlist); 961 zfree(&evlist->mmap); 962 zfree(&evlist->backward_mmap); 963 } 964 965 static struct perf_mmap *perf_evlist__alloc_mmap(struct perf_evlist *evlist) 966 { 967 int i; 968 struct perf_mmap *map; 969 970 evlist->nr_mmaps = cpu_map__nr(evlist->cpus); 971 if (cpu_map__empty(evlist->cpus)) 972 evlist->nr_mmaps = thread_map__nr(evlist->threads); 973 map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap)); 974 if (!map) 975 return NULL; 976 977 for (i = 0; i < evlist->nr_mmaps; i++) 978 map[i].fd = -1; 979 return map; 980 } 981 982 struct mmap_params { 983 int prot; 984 int mask; 985 struct auxtrace_mmap_params auxtrace_mp; 986 }; 987 988 static int perf_mmap__mmap(struct perf_mmap *map, 989 struct mmap_params *mp, int fd) 990 { 991 /* 992 * The last one will be done at perf_evlist__mmap_consume(), so that we 993 * make sure we don't prevent tools from consuming every last event in 994 * the ring buffer. 995 * 996 * I.e. we can get the POLLHUP meaning that the fd doesn't exist 997 * anymore, but the last events for it are still in the ring buffer, 998 * waiting to be consumed. 999 * 1000 * Tools can chose to ignore this at their own discretion, but the 1001 * evlist layer can't just drop it when filtering events in 1002 * perf_evlist__filter_pollfd(). 1003 */ 1004 atomic_set(&map->refcnt, 2); 1005 map->prev = 0; 1006 map->mask = mp->mask; 1007 map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot, 1008 MAP_SHARED, fd, 0); 1009 if (map->base == MAP_FAILED) { 1010 pr_debug2("failed to mmap perf event ring buffer, error %d\n", 1011 errno); 1012 map->base = NULL; 1013 return -1; 1014 } 1015 map->fd = fd; 1016 1017 if (auxtrace_mmap__mmap(&map->auxtrace_mmap, 1018 &mp->auxtrace_mp, map->base, fd)) 1019 return -1; 1020 1021 return 0; 1022 } 1023 1024 static bool 1025 perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused, 1026 struct perf_evsel *evsel) 1027 { 1028 if (evsel->attr.write_backward) 1029 return false; 1030 return true; 1031 } 1032 1033 static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx, 1034 struct mmap_params *mp, int cpu_idx, 1035 int thread, int *_output, int *_output_backward) 1036 { 1037 struct perf_evsel *evsel; 1038 int revent; 1039 int evlist_cpu = cpu_map__cpu(evlist->cpus, cpu_idx); 1040 1041 evlist__for_each_entry(evlist, evsel) { 1042 struct perf_mmap *maps = evlist->mmap; 1043 int *output = _output; 1044 int fd; 1045 int cpu; 1046 1047 if (evsel->attr.write_backward) { 1048 output = _output_backward; 1049 maps = evlist->backward_mmap; 1050 1051 if (!maps) { 1052 maps = perf_evlist__alloc_mmap(evlist); 1053 if (!maps) 1054 return -1; 1055 evlist->backward_mmap = maps; 1056 if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY) 1057 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING); 1058 } 1059 } 1060 1061 if (evsel->system_wide && thread) 1062 continue; 1063 1064 cpu = cpu_map__idx(evsel->cpus, evlist_cpu); 1065 if (cpu == -1) 1066 continue; 1067 1068 fd = FD(evsel, cpu, thread); 1069 1070 if (*output == -1) { 1071 *output = fd; 1072 1073 if (perf_mmap__mmap(&maps[idx], mp, *output) < 0) 1074 return -1; 1075 } else { 1076 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0) 1077 return -1; 1078 1079 perf_mmap__get(&maps[idx]); 1080 } 1081 1082 revent = perf_evlist__should_poll(evlist, evsel) ? POLLIN : 0; 1083 1084 /* 1085 * The system_wide flag causes a selected event to be opened 1086 * always without a pid. Consequently it will never get a 1087 * POLLHUP, but it is used for tracking in combination with 1088 * other events, so it should not need to be polled anyway. 1089 * Therefore don't add it for polling. 1090 */ 1091 if (!evsel->system_wide && 1092 __perf_evlist__add_pollfd(evlist, fd, &maps[idx], revent) < 0) { 1093 perf_mmap__put(&maps[idx]); 1094 return -1; 1095 } 1096 1097 if (evsel->attr.read_format & PERF_FORMAT_ID) { 1098 if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread, 1099 fd) < 0) 1100 return -1; 1101 perf_evlist__set_sid_idx(evlist, evsel, idx, cpu, 1102 thread); 1103 } 1104 } 1105 1106 return 0; 1107 } 1108 1109 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, 1110 struct mmap_params *mp) 1111 { 1112 int cpu, thread; 1113 int nr_cpus = cpu_map__nr(evlist->cpus); 1114 int nr_threads = thread_map__nr(evlist->threads); 1115 1116 pr_debug2("perf event ring buffer mmapped per cpu\n"); 1117 for (cpu = 0; cpu < nr_cpus; cpu++) { 1118 int output = -1; 1119 int output_backward = -1; 1120 1121 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu, 1122 true); 1123 1124 for (thread = 0; thread < nr_threads; thread++) { 1125 if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu, 1126 thread, &output, &output_backward)) 1127 goto out_unmap; 1128 } 1129 } 1130 1131 return 0; 1132 1133 out_unmap: 1134 perf_evlist__munmap_nofree(evlist); 1135 return -1; 1136 } 1137 1138 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, 1139 struct mmap_params *mp) 1140 { 1141 int thread; 1142 int nr_threads = thread_map__nr(evlist->threads); 1143 1144 pr_debug2("perf event ring buffer mmapped per thread\n"); 1145 for (thread = 0; thread < nr_threads; thread++) { 1146 int output = -1; 1147 int output_backward = -1; 1148 1149 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread, 1150 false); 1151 1152 if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread, 1153 &output, &output_backward)) 1154 goto out_unmap; 1155 } 1156 1157 return 0; 1158 1159 out_unmap: 1160 perf_evlist__munmap_nofree(evlist); 1161 return -1; 1162 } 1163 1164 unsigned long perf_event_mlock_kb_in_pages(void) 1165 { 1166 unsigned long pages; 1167 int max; 1168 1169 if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) { 1170 /* 1171 * Pick a once upon a time good value, i.e. things look 1172 * strange since we can't read a sysctl value, but lets not 1173 * die yet... 1174 */ 1175 max = 512; 1176 } else { 1177 max -= (page_size / 1024); 1178 } 1179 1180 pages = (max * 1024) / page_size; 1181 if (!is_power_of_2(pages)) 1182 pages = rounddown_pow_of_two(pages); 1183 1184 return pages; 1185 } 1186 1187 size_t perf_evlist__mmap_size(unsigned long pages) 1188 { 1189 if (pages == UINT_MAX) 1190 pages = perf_event_mlock_kb_in_pages(); 1191 else if (!is_power_of_2(pages)) 1192 return 0; 1193 1194 return (pages + 1) * page_size; 1195 } 1196 1197 static long parse_pages_arg(const char *str, unsigned long min, 1198 unsigned long max) 1199 { 1200 unsigned long pages, val; 1201 static struct parse_tag tags[] = { 1202 { .tag = 'B', .mult = 1 }, 1203 { .tag = 'K', .mult = 1 << 10 }, 1204 { .tag = 'M', .mult = 1 << 20 }, 1205 { .tag = 'G', .mult = 1 << 30 }, 1206 { .tag = 0 }, 1207 }; 1208 1209 if (str == NULL) 1210 return -EINVAL; 1211 1212 val = parse_tag_value(str, tags); 1213 if (val != (unsigned long) -1) { 1214 /* we got file size value */ 1215 pages = PERF_ALIGN(val, page_size) / page_size; 1216 } else { 1217 /* we got pages count value */ 1218 char *eptr; 1219 pages = strtoul(str, &eptr, 10); 1220 if (*eptr != '\0') 1221 return -EINVAL; 1222 } 1223 1224 if (pages == 0 && min == 0) { 1225 /* leave number of pages at 0 */ 1226 } else if (!is_power_of_2(pages)) { 1227 char buf[100]; 1228 1229 /* round pages up to next power of 2 */ 1230 pages = roundup_pow_of_two(pages); 1231 if (!pages) 1232 return -EINVAL; 1233 1234 unit_number__scnprintf(buf, sizeof(buf), pages * page_size); 1235 pr_info("rounding mmap pages size to %s (%lu pages)\n", 1236 buf, pages); 1237 } 1238 1239 if (pages > max) 1240 return -EINVAL; 1241 1242 return pages; 1243 } 1244 1245 int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str) 1246 { 1247 unsigned long max = UINT_MAX; 1248 long pages; 1249 1250 if (max > SIZE_MAX / page_size) 1251 max = SIZE_MAX / page_size; 1252 1253 pages = parse_pages_arg(str, 1, max); 1254 if (pages < 0) { 1255 pr_err("Invalid argument for --mmap_pages/-m\n"); 1256 return -1; 1257 } 1258 1259 *mmap_pages = pages; 1260 return 0; 1261 } 1262 1263 int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str, 1264 int unset __maybe_unused) 1265 { 1266 return __perf_evlist__parse_mmap_pages(opt->value, str); 1267 } 1268 1269 /** 1270 * perf_evlist__mmap_ex - Create mmaps to receive events. 1271 * @evlist: list of events 1272 * @pages: map length in pages 1273 * @overwrite: overwrite older events? 1274 * @auxtrace_pages - auxtrace map length in pages 1275 * @auxtrace_overwrite - overwrite older auxtrace data? 1276 * 1277 * If @overwrite is %false the user needs to signal event consumption using 1278 * perf_mmap__write_tail(). Using perf_evlist__mmap_read() does this 1279 * automatically. 1280 * 1281 * Similarly, if @auxtrace_overwrite is %false the user needs to signal data 1282 * consumption using auxtrace_mmap__write_tail(). 1283 * 1284 * Return: %0 on success, negative error code otherwise. 1285 */ 1286 int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages, 1287 bool overwrite, unsigned int auxtrace_pages, 1288 bool auxtrace_overwrite) 1289 { 1290 struct perf_evsel *evsel; 1291 const struct cpu_map *cpus = evlist->cpus; 1292 const struct thread_map *threads = evlist->threads; 1293 struct mmap_params mp = { 1294 .prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), 1295 }; 1296 1297 if (!evlist->mmap) 1298 evlist->mmap = perf_evlist__alloc_mmap(evlist); 1299 if (!evlist->mmap) 1300 return -ENOMEM; 1301 1302 if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0) 1303 return -ENOMEM; 1304 1305 evlist->overwrite = overwrite; 1306 evlist->mmap_len = perf_evlist__mmap_size(pages); 1307 pr_debug("mmap size %zuB\n", evlist->mmap_len); 1308 mp.mask = evlist->mmap_len - page_size - 1; 1309 1310 auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->mmap_len, 1311 auxtrace_pages, auxtrace_overwrite); 1312 1313 evlist__for_each_entry(evlist, evsel) { 1314 if ((evsel->attr.read_format & PERF_FORMAT_ID) && 1315 evsel->sample_id == NULL && 1316 perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0) 1317 return -ENOMEM; 1318 } 1319 1320 if (cpu_map__empty(cpus)) 1321 return perf_evlist__mmap_per_thread(evlist, &mp); 1322 1323 return perf_evlist__mmap_per_cpu(evlist, &mp); 1324 } 1325 1326 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages, 1327 bool overwrite) 1328 { 1329 return perf_evlist__mmap_ex(evlist, pages, overwrite, 0, false); 1330 } 1331 1332 int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target) 1333 { 1334 struct cpu_map *cpus; 1335 struct thread_map *threads; 1336 1337 threads = thread_map__new_str(target->pid, target->tid, target->uid); 1338 1339 if (!threads) 1340 return -1; 1341 1342 if (target__uses_dummy_map(target)) 1343 cpus = cpu_map__dummy_new(); 1344 else 1345 cpus = cpu_map__new(target->cpu_list); 1346 1347 if (!cpus) 1348 goto out_delete_threads; 1349 1350 evlist->has_user_cpus = !!target->cpu_list; 1351 1352 perf_evlist__set_maps(evlist, cpus, threads); 1353 1354 return 0; 1355 1356 out_delete_threads: 1357 thread_map__put(threads); 1358 return -1; 1359 } 1360 1361 void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus, 1362 struct thread_map *threads) 1363 { 1364 /* 1365 * Allow for the possibility that one or another of the maps isn't being 1366 * changed i.e. don't put it. Note we are assuming the maps that are 1367 * being applied are brand new and evlist is taking ownership of the 1368 * original reference count of 1. If that is not the case it is up to 1369 * the caller to increase the reference count. 1370 */ 1371 if (cpus != evlist->cpus) { 1372 cpu_map__put(evlist->cpus); 1373 evlist->cpus = cpu_map__get(cpus); 1374 } 1375 1376 if (threads != evlist->threads) { 1377 thread_map__put(evlist->threads); 1378 evlist->threads = thread_map__get(threads); 1379 } 1380 1381 perf_evlist__propagate_maps(evlist); 1382 } 1383 1384 void __perf_evlist__set_sample_bit(struct perf_evlist *evlist, 1385 enum perf_event_sample_format bit) 1386 { 1387 struct perf_evsel *evsel; 1388 1389 evlist__for_each_entry(evlist, evsel) 1390 __perf_evsel__set_sample_bit(evsel, bit); 1391 } 1392 1393 void __perf_evlist__reset_sample_bit(struct perf_evlist *evlist, 1394 enum perf_event_sample_format bit) 1395 { 1396 struct perf_evsel *evsel; 1397 1398 evlist__for_each_entry(evlist, evsel) 1399 __perf_evsel__reset_sample_bit(evsel, bit); 1400 } 1401 1402 int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel) 1403 { 1404 struct perf_evsel *evsel; 1405 int err = 0; 1406 const int ncpus = cpu_map__nr(evlist->cpus), 1407 nthreads = thread_map__nr(evlist->threads); 1408 1409 evlist__for_each_entry(evlist, evsel) { 1410 if (evsel->filter == NULL) 1411 continue; 1412 1413 /* 1414 * filters only work for tracepoint event, which doesn't have cpu limit. 1415 * So evlist and evsel should always be same. 1416 */ 1417 err = perf_evsel__apply_filter(evsel, ncpus, nthreads, evsel->filter); 1418 if (err) { 1419 *err_evsel = evsel; 1420 break; 1421 } 1422 } 1423 1424 return err; 1425 } 1426 1427 int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter) 1428 { 1429 struct perf_evsel *evsel; 1430 int err = 0; 1431 1432 evlist__for_each_entry(evlist, evsel) { 1433 if (evsel->attr.type != PERF_TYPE_TRACEPOINT) 1434 continue; 1435 1436 err = perf_evsel__set_filter(evsel, filter); 1437 if (err) 1438 break; 1439 } 1440 1441 return err; 1442 } 1443 1444 int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids) 1445 { 1446 char *filter; 1447 int ret = -1; 1448 size_t i; 1449 1450 for (i = 0; i < npids; ++i) { 1451 if (i == 0) { 1452 if (asprintf(&filter, "common_pid != %d", pids[i]) < 0) 1453 return -1; 1454 } else { 1455 char *tmp; 1456 1457 if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0) 1458 goto out_free; 1459 1460 free(filter); 1461 filter = tmp; 1462 } 1463 } 1464 1465 ret = perf_evlist__set_filter(evlist, filter); 1466 out_free: 1467 free(filter); 1468 return ret; 1469 } 1470 1471 int perf_evlist__set_filter_pid(struct perf_evlist *evlist, pid_t pid) 1472 { 1473 return perf_evlist__set_filter_pids(evlist, 1, &pid); 1474 } 1475 1476 bool perf_evlist__valid_sample_type(struct perf_evlist *evlist) 1477 { 1478 struct perf_evsel *pos; 1479 1480 if (evlist->nr_entries == 1) 1481 return true; 1482 1483 if (evlist->id_pos < 0 || evlist->is_pos < 0) 1484 return false; 1485 1486 evlist__for_each_entry(evlist, pos) { 1487 if (pos->id_pos != evlist->id_pos || 1488 pos->is_pos != evlist->is_pos) 1489 return false; 1490 } 1491 1492 return true; 1493 } 1494 1495 u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist) 1496 { 1497 struct perf_evsel *evsel; 1498 1499 if (evlist->combined_sample_type) 1500 return evlist->combined_sample_type; 1501 1502 evlist__for_each_entry(evlist, evsel) 1503 evlist->combined_sample_type |= evsel->attr.sample_type; 1504 1505 return evlist->combined_sample_type; 1506 } 1507 1508 u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist) 1509 { 1510 evlist->combined_sample_type = 0; 1511 return __perf_evlist__combined_sample_type(evlist); 1512 } 1513 1514 u64 perf_evlist__combined_branch_type(struct perf_evlist *evlist) 1515 { 1516 struct perf_evsel *evsel; 1517 u64 branch_type = 0; 1518 1519 evlist__for_each_entry(evlist, evsel) 1520 branch_type |= evsel->attr.branch_sample_type; 1521 return branch_type; 1522 } 1523 1524 bool perf_evlist__valid_read_format(struct perf_evlist *evlist) 1525 { 1526 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first; 1527 u64 read_format = first->attr.read_format; 1528 u64 sample_type = first->attr.sample_type; 1529 1530 evlist__for_each_entry(evlist, pos) { 1531 if (read_format != pos->attr.read_format) 1532 return false; 1533 } 1534 1535 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */ 1536 if ((sample_type & PERF_SAMPLE_READ) && 1537 !(read_format & PERF_FORMAT_ID)) { 1538 return false; 1539 } 1540 1541 return true; 1542 } 1543 1544 u64 perf_evlist__read_format(struct perf_evlist *evlist) 1545 { 1546 struct perf_evsel *first = perf_evlist__first(evlist); 1547 return first->attr.read_format; 1548 } 1549 1550 u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist) 1551 { 1552 struct perf_evsel *first = perf_evlist__first(evlist); 1553 struct perf_sample *data; 1554 u64 sample_type; 1555 u16 size = 0; 1556 1557 if (!first->attr.sample_id_all) 1558 goto out; 1559 1560 sample_type = first->attr.sample_type; 1561 1562 if (sample_type & PERF_SAMPLE_TID) 1563 size += sizeof(data->tid) * 2; 1564 1565 if (sample_type & PERF_SAMPLE_TIME) 1566 size += sizeof(data->time); 1567 1568 if (sample_type & PERF_SAMPLE_ID) 1569 size += sizeof(data->id); 1570 1571 if (sample_type & PERF_SAMPLE_STREAM_ID) 1572 size += sizeof(data->stream_id); 1573 1574 if (sample_type & PERF_SAMPLE_CPU) 1575 size += sizeof(data->cpu) * 2; 1576 1577 if (sample_type & PERF_SAMPLE_IDENTIFIER) 1578 size += sizeof(data->id); 1579 out: 1580 return size; 1581 } 1582 1583 bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist) 1584 { 1585 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first; 1586 1587 evlist__for_each_entry_continue(evlist, pos) { 1588 if (first->attr.sample_id_all != pos->attr.sample_id_all) 1589 return false; 1590 } 1591 1592 return true; 1593 } 1594 1595 bool perf_evlist__sample_id_all(struct perf_evlist *evlist) 1596 { 1597 struct perf_evsel *first = perf_evlist__first(evlist); 1598 return first->attr.sample_id_all; 1599 } 1600 1601 void perf_evlist__set_selected(struct perf_evlist *evlist, 1602 struct perf_evsel *evsel) 1603 { 1604 evlist->selected = evsel; 1605 } 1606 1607 void perf_evlist__close(struct perf_evlist *evlist) 1608 { 1609 struct perf_evsel *evsel; 1610 int ncpus = cpu_map__nr(evlist->cpus); 1611 int nthreads = thread_map__nr(evlist->threads); 1612 1613 evlist__for_each_entry_reverse(evlist, evsel) { 1614 int n = evsel->cpus ? evsel->cpus->nr : ncpus; 1615 perf_evsel__close(evsel, n, nthreads); 1616 } 1617 } 1618 1619 static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist) 1620 { 1621 struct cpu_map *cpus; 1622 struct thread_map *threads; 1623 int err = -ENOMEM; 1624 1625 /* 1626 * Try reading /sys/devices/system/cpu/online to get 1627 * an all cpus map. 1628 * 1629 * FIXME: -ENOMEM is the best we can do here, the cpu_map 1630 * code needs an overhaul to properly forward the 1631 * error, and we may not want to do that fallback to a 1632 * default cpu identity map :-\ 1633 */ 1634 cpus = cpu_map__new(NULL); 1635 if (!cpus) 1636 goto out; 1637 1638 threads = thread_map__new_dummy(); 1639 if (!threads) 1640 goto out_put; 1641 1642 perf_evlist__set_maps(evlist, cpus, threads); 1643 out: 1644 return err; 1645 out_put: 1646 cpu_map__put(cpus); 1647 goto out; 1648 } 1649 1650 int perf_evlist__open(struct perf_evlist *evlist) 1651 { 1652 struct perf_evsel *evsel; 1653 int err; 1654 1655 /* 1656 * Default: one fd per CPU, all threads, aka systemwide 1657 * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL 1658 */ 1659 if (evlist->threads == NULL && evlist->cpus == NULL) { 1660 err = perf_evlist__create_syswide_maps(evlist); 1661 if (err < 0) 1662 goto out_err; 1663 } 1664 1665 perf_evlist__update_id_pos(evlist); 1666 1667 evlist__for_each_entry(evlist, evsel) { 1668 err = perf_evsel__open(evsel, evsel->cpus, evsel->threads); 1669 if (err < 0) 1670 goto out_err; 1671 } 1672 1673 return 0; 1674 out_err: 1675 perf_evlist__close(evlist); 1676 errno = -err; 1677 return err; 1678 } 1679 1680 int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *target, 1681 const char *argv[], bool pipe_output, 1682 void (*exec_error)(int signo, siginfo_t *info, void *ucontext)) 1683 { 1684 int child_ready_pipe[2], go_pipe[2]; 1685 char bf; 1686 1687 if (pipe(child_ready_pipe) < 0) { 1688 perror("failed to create 'ready' pipe"); 1689 return -1; 1690 } 1691 1692 if (pipe(go_pipe) < 0) { 1693 perror("failed to create 'go' pipe"); 1694 goto out_close_ready_pipe; 1695 } 1696 1697 evlist->workload.pid = fork(); 1698 if (evlist->workload.pid < 0) { 1699 perror("failed to fork"); 1700 goto out_close_pipes; 1701 } 1702 1703 if (!evlist->workload.pid) { 1704 int ret; 1705 1706 if (pipe_output) 1707 dup2(2, 1); 1708 1709 signal(SIGTERM, SIG_DFL); 1710 1711 close(child_ready_pipe[0]); 1712 close(go_pipe[1]); 1713 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC); 1714 1715 /* 1716 * Tell the parent we're ready to go 1717 */ 1718 close(child_ready_pipe[1]); 1719 1720 /* 1721 * Wait until the parent tells us to go. 1722 */ 1723 ret = read(go_pipe[0], &bf, 1); 1724 /* 1725 * The parent will ask for the execvp() to be performed by 1726 * writing exactly one byte, in workload.cork_fd, usually via 1727 * perf_evlist__start_workload(). 1728 * 1729 * For cancelling the workload without actually running it, 1730 * the parent will just close workload.cork_fd, without writing 1731 * anything, i.e. read will return zero and we just exit() 1732 * here. 1733 */ 1734 if (ret != 1) { 1735 if (ret == -1) 1736 perror("unable to read pipe"); 1737 exit(ret); 1738 } 1739 1740 execvp(argv[0], (char **)argv); 1741 1742 if (exec_error) { 1743 union sigval val; 1744 1745 val.sival_int = errno; 1746 if (sigqueue(getppid(), SIGUSR1, val)) 1747 perror(argv[0]); 1748 } else 1749 perror(argv[0]); 1750 exit(-1); 1751 } 1752 1753 if (exec_error) { 1754 struct sigaction act = { 1755 .sa_flags = SA_SIGINFO, 1756 .sa_sigaction = exec_error, 1757 }; 1758 sigaction(SIGUSR1, &act, NULL); 1759 } 1760 1761 if (target__none(target)) { 1762 if (evlist->threads == NULL) { 1763 fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n", 1764 __func__, __LINE__); 1765 goto out_close_pipes; 1766 } 1767 thread_map__set_pid(evlist->threads, 0, evlist->workload.pid); 1768 } 1769 1770 close(child_ready_pipe[1]); 1771 close(go_pipe[0]); 1772 /* 1773 * wait for child to settle 1774 */ 1775 if (read(child_ready_pipe[0], &bf, 1) == -1) { 1776 perror("unable to read pipe"); 1777 goto out_close_pipes; 1778 } 1779 1780 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC); 1781 evlist->workload.cork_fd = go_pipe[1]; 1782 close(child_ready_pipe[0]); 1783 return 0; 1784 1785 out_close_pipes: 1786 close(go_pipe[0]); 1787 close(go_pipe[1]); 1788 out_close_ready_pipe: 1789 close(child_ready_pipe[0]); 1790 close(child_ready_pipe[1]); 1791 return -1; 1792 } 1793 1794 int perf_evlist__start_workload(struct perf_evlist *evlist) 1795 { 1796 if (evlist->workload.cork_fd > 0) { 1797 char bf = 0; 1798 int ret; 1799 /* 1800 * Remove the cork, let it rip! 1801 */ 1802 ret = write(evlist->workload.cork_fd, &bf, 1); 1803 if (ret < 0) 1804 perror("unable to write to pipe"); 1805 1806 close(evlist->workload.cork_fd); 1807 return ret; 1808 } 1809 1810 return 0; 1811 } 1812 1813 int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event, 1814 struct perf_sample *sample) 1815 { 1816 struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event); 1817 1818 if (!evsel) 1819 return -EFAULT; 1820 return perf_evsel__parse_sample(evsel, event, sample); 1821 } 1822 1823 size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp) 1824 { 1825 struct perf_evsel *evsel; 1826 size_t printed = 0; 1827 1828 evlist__for_each_entry(evlist, evsel) { 1829 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "", 1830 perf_evsel__name(evsel)); 1831 } 1832 1833 return printed + fprintf(fp, "\n"); 1834 } 1835 1836 int perf_evlist__strerror_open(struct perf_evlist *evlist, 1837 int err, char *buf, size_t size) 1838 { 1839 int printed, value; 1840 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf)); 1841 1842 switch (err) { 1843 case EACCES: 1844 case EPERM: 1845 printed = scnprintf(buf, size, 1846 "Error:\t%s.\n" 1847 "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg); 1848 1849 value = perf_event_paranoid(); 1850 1851 printed += scnprintf(buf + printed, size - printed, "\nHint:\t"); 1852 1853 if (value >= 2) { 1854 printed += scnprintf(buf + printed, size - printed, 1855 "For your workloads it needs to be <= 1\nHint:\t"); 1856 } 1857 printed += scnprintf(buf + printed, size - printed, 1858 "For system wide tracing it needs to be set to -1.\n"); 1859 1860 printed += scnprintf(buf + printed, size - printed, 1861 "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n" 1862 "Hint:\tThe current value is %d.", value); 1863 break; 1864 case EINVAL: { 1865 struct perf_evsel *first = perf_evlist__first(evlist); 1866 int max_freq; 1867 1868 if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0) 1869 goto out_default; 1870 1871 if (first->attr.sample_freq < (u64)max_freq) 1872 goto out_default; 1873 1874 printed = scnprintf(buf, size, 1875 "Error:\t%s.\n" 1876 "Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n" 1877 "Hint:\tThe current value is %d and %" PRIu64 " is being requested.", 1878 emsg, max_freq, first->attr.sample_freq); 1879 break; 1880 } 1881 default: 1882 out_default: 1883 scnprintf(buf, size, "%s", emsg); 1884 break; 1885 } 1886 1887 return 0; 1888 } 1889 1890 int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size) 1891 { 1892 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf)); 1893 int pages_attempted = evlist->mmap_len / 1024, pages_max_per_user, printed = 0; 1894 1895 switch (err) { 1896 case EPERM: 1897 sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user); 1898 printed += scnprintf(buf + printed, size - printed, 1899 "Error:\t%s.\n" 1900 "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n" 1901 "Hint:\tTried using %zd kB.\n", 1902 emsg, pages_max_per_user, pages_attempted); 1903 1904 if (pages_attempted >= pages_max_per_user) { 1905 printed += scnprintf(buf + printed, size - printed, 1906 "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n", 1907 pages_max_per_user + pages_attempted); 1908 } 1909 1910 printed += scnprintf(buf + printed, size - printed, 1911 "Hint:\tTry using a smaller -m/--mmap-pages value."); 1912 break; 1913 default: 1914 scnprintf(buf, size, "%s", emsg); 1915 break; 1916 } 1917 1918 return 0; 1919 } 1920 1921 void perf_evlist__to_front(struct perf_evlist *evlist, 1922 struct perf_evsel *move_evsel) 1923 { 1924 struct perf_evsel *evsel, *n; 1925 LIST_HEAD(move); 1926 1927 if (move_evsel == perf_evlist__first(evlist)) 1928 return; 1929 1930 evlist__for_each_entry_safe(evlist, n, evsel) { 1931 if (evsel->leader == move_evsel->leader) 1932 list_move_tail(&evsel->node, &move); 1933 } 1934 1935 list_splice(&move, &evlist->entries); 1936 } 1937 1938 void perf_evlist__set_tracking_event(struct perf_evlist *evlist, 1939 struct perf_evsel *tracking_evsel) 1940 { 1941 struct perf_evsel *evsel; 1942 1943 if (tracking_evsel->tracking) 1944 return; 1945 1946 evlist__for_each_entry(evlist, evsel) { 1947 if (evsel != tracking_evsel) 1948 evsel->tracking = false; 1949 } 1950 1951 tracking_evsel->tracking = true; 1952 } 1953 1954 struct perf_evsel * 1955 perf_evlist__find_evsel_by_str(struct perf_evlist *evlist, 1956 const char *str) 1957 { 1958 struct perf_evsel *evsel; 1959 1960 evlist__for_each_entry(evlist, evsel) { 1961 if (!evsel->name) 1962 continue; 1963 if (strcmp(str, evsel->name) == 0) 1964 return evsel; 1965 } 1966 1967 return NULL; 1968 } 1969 1970 void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist, 1971 enum bkw_mmap_state state) 1972 { 1973 enum bkw_mmap_state old_state = evlist->bkw_mmap_state; 1974 enum action { 1975 NONE, 1976 PAUSE, 1977 RESUME, 1978 } action = NONE; 1979 1980 if (!evlist->backward_mmap) 1981 return; 1982 1983 switch (old_state) { 1984 case BKW_MMAP_NOTREADY: { 1985 if (state != BKW_MMAP_RUNNING) 1986 goto state_err;; 1987 break; 1988 } 1989 case BKW_MMAP_RUNNING: { 1990 if (state != BKW_MMAP_DATA_PENDING) 1991 goto state_err; 1992 action = PAUSE; 1993 break; 1994 } 1995 case BKW_MMAP_DATA_PENDING: { 1996 if (state != BKW_MMAP_EMPTY) 1997 goto state_err; 1998 break; 1999 } 2000 case BKW_MMAP_EMPTY: { 2001 if (state != BKW_MMAP_RUNNING) 2002 goto state_err; 2003 action = RESUME; 2004 break; 2005 } 2006 default: 2007 WARN_ONCE(1, "Shouldn't get there\n"); 2008 } 2009 2010 evlist->bkw_mmap_state = state; 2011 2012 switch (action) { 2013 case PAUSE: 2014 perf_evlist__pause(evlist); 2015 break; 2016 case RESUME: 2017 perf_evlist__resume(evlist); 2018 break; 2019 case NONE: 2020 default: 2021 break; 2022 } 2023 2024 state_err: 2025 return; 2026 } 2027