1 /* 2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> 3 * 4 * Parts came from builtin-{top,stat,record}.c, see those files for further 5 * copyright notes. 6 * 7 * Released under the GPL v2. (and only v2, not any later version) 8 */ 9 #include "util.h" 10 #include <api/fs/fs.h> 11 #include <poll.h> 12 #include "cpumap.h" 13 #include "thread_map.h" 14 #include "target.h" 15 #include "evlist.h" 16 #include "evsel.h" 17 #include "debug.h" 18 #include <unistd.h> 19 20 #include "parse-events.h" 21 #include <subcmd/parse-options.h> 22 23 #include <sys/mman.h> 24 25 #include <linux/bitops.h> 26 #include <linux/hash.h> 27 #include <linux/log2.h> 28 #include <linux/err.h> 29 30 static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx); 31 static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx); 32 33 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) 34 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y) 35 36 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus, 37 struct thread_map *threads) 38 { 39 int i; 40 41 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i) 42 INIT_HLIST_HEAD(&evlist->heads[i]); 43 INIT_LIST_HEAD(&evlist->entries); 44 perf_evlist__set_maps(evlist, cpus, threads); 45 fdarray__init(&evlist->pollfd, 64); 46 evlist->workload.pid = -1; 47 } 48 49 struct perf_evlist *perf_evlist__new(void) 50 { 51 struct perf_evlist *evlist = zalloc(sizeof(*evlist)); 52 53 if (evlist != NULL) 54 perf_evlist__init(evlist, NULL, NULL); 55 56 return evlist; 57 } 58 59 struct perf_evlist *perf_evlist__new_default(void) 60 { 61 struct perf_evlist *evlist = perf_evlist__new(); 62 63 if (evlist && perf_evlist__add_default(evlist)) { 64 perf_evlist__delete(evlist); 65 evlist = NULL; 66 } 67 68 return evlist; 69 } 70 71 struct perf_evlist *perf_evlist__new_dummy(void) 72 { 73 struct perf_evlist *evlist = perf_evlist__new(); 74 75 if (evlist && perf_evlist__add_dummy(evlist)) { 76 perf_evlist__delete(evlist); 77 evlist = NULL; 78 } 79 80 return evlist; 81 } 82 83 /** 84 * perf_evlist__set_id_pos - set the positions of event ids. 85 * @evlist: selected event list 86 * 87 * Events with compatible sample types all have the same id_pos 88 * and is_pos. For convenience, put a copy on evlist. 89 */ 90 void perf_evlist__set_id_pos(struct perf_evlist *evlist) 91 { 92 struct perf_evsel *first = perf_evlist__first(evlist); 93 94 evlist->id_pos = first->id_pos; 95 evlist->is_pos = first->is_pos; 96 } 97 98 static void perf_evlist__update_id_pos(struct perf_evlist *evlist) 99 { 100 struct perf_evsel *evsel; 101 102 evlist__for_each(evlist, evsel) 103 perf_evsel__calc_id_pos(evsel); 104 105 perf_evlist__set_id_pos(evlist); 106 } 107 108 static void perf_evlist__purge(struct perf_evlist *evlist) 109 { 110 struct perf_evsel *pos, *n; 111 112 evlist__for_each_safe(evlist, n, pos) { 113 list_del_init(&pos->node); 114 pos->evlist = NULL; 115 perf_evsel__delete(pos); 116 } 117 118 evlist->nr_entries = 0; 119 } 120 121 void perf_evlist__exit(struct perf_evlist *evlist) 122 { 123 zfree(&evlist->mmap); 124 fdarray__exit(&evlist->pollfd); 125 } 126 127 void perf_evlist__delete(struct perf_evlist *evlist) 128 { 129 perf_evlist__munmap(evlist); 130 perf_evlist__close(evlist); 131 cpu_map__put(evlist->cpus); 132 thread_map__put(evlist->threads); 133 evlist->cpus = NULL; 134 evlist->threads = NULL; 135 perf_evlist__purge(evlist); 136 perf_evlist__exit(evlist); 137 free(evlist); 138 } 139 140 static void __perf_evlist__propagate_maps(struct perf_evlist *evlist, 141 struct perf_evsel *evsel) 142 { 143 /* 144 * We already have cpus for evsel (via PMU sysfs) so 145 * keep it, if there's no target cpu list defined. 146 */ 147 if (!evsel->own_cpus || evlist->has_user_cpus) { 148 cpu_map__put(evsel->cpus); 149 evsel->cpus = cpu_map__get(evlist->cpus); 150 } else if (evsel->cpus != evsel->own_cpus) { 151 cpu_map__put(evsel->cpus); 152 evsel->cpus = cpu_map__get(evsel->own_cpus); 153 } 154 155 thread_map__put(evsel->threads); 156 evsel->threads = thread_map__get(evlist->threads); 157 } 158 159 static void perf_evlist__propagate_maps(struct perf_evlist *evlist) 160 { 161 struct perf_evsel *evsel; 162 163 evlist__for_each(evlist, evsel) 164 __perf_evlist__propagate_maps(evlist, evsel); 165 } 166 167 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry) 168 { 169 entry->evlist = evlist; 170 list_add_tail(&entry->node, &evlist->entries); 171 entry->idx = evlist->nr_entries; 172 entry->tracking = !entry->idx; 173 174 if (!evlist->nr_entries++) 175 perf_evlist__set_id_pos(evlist); 176 177 __perf_evlist__propagate_maps(evlist, entry); 178 } 179 180 void perf_evlist__remove(struct perf_evlist *evlist, struct perf_evsel *evsel) 181 { 182 evsel->evlist = NULL; 183 list_del_init(&evsel->node); 184 evlist->nr_entries -= 1; 185 } 186 187 void perf_evlist__splice_list_tail(struct perf_evlist *evlist, 188 struct list_head *list) 189 { 190 struct perf_evsel *evsel, *temp; 191 192 __evlist__for_each_safe(list, temp, evsel) { 193 list_del_init(&evsel->node); 194 perf_evlist__add(evlist, evsel); 195 } 196 } 197 198 void __perf_evlist__set_leader(struct list_head *list) 199 { 200 struct perf_evsel *evsel, *leader; 201 202 leader = list_entry(list->next, struct perf_evsel, node); 203 evsel = list_entry(list->prev, struct perf_evsel, node); 204 205 leader->nr_members = evsel->idx - leader->idx + 1; 206 207 __evlist__for_each(list, evsel) { 208 evsel->leader = leader; 209 } 210 } 211 212 void perf_evlist__set_leader(struct perf_evlist *evlist) 213 { 214 if (evlist->nr_entries) { 215 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0; 216 __perf_evlist__set_leader(&evlist->entries); 217 } 218 } 219 220 void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr) 221 { 222 attr->precise_ip = 3; 223 224 while (attr->precise_ip != 0) { 225 int fd = sys_perf_event_open(attr, 0, -1, -1, 0); 226 if (fd != -1) { 227 close(fd); 228 break; 229 } 230 --attr->precise_ip; 231 } 232 } 233 234 int perf_evlist__add_default(struct perf_evlist *evlist) 235 { 236 struct perf_event_attr attr = { 237 .type = PERF_TYPE_HARDWARE, 238 .config = PERF_COUNT_HW_CPU_CYCLES, 239 }; 240 struct perf_evsel *evsel; 241 242 event_attr_init(&attr); 243 244 perf_event_attr__set_max_precise_ip(&attr); 245 246 evsel = perf_evsel__new(&attr); 247 if (evsel == NULL) 248 goto error; 249 250 /* use asprintf() because free(evsel) assumes name is allocated */ 251 if (asprintf(&evsel->name, "cycles%.*s", 252 attr.precise_ip ? attr.precise_ip + 1 : 0, ":ppp") < 0) 253 goto error_free; 254 255 perf_evlist__add(evlist, evsel); 256 return 0; 257 error_free: 258 perf_evsel__delete(evsel); 259 error: 260 return -ENOMEM; 261 } 262 263 int perf_evlist__add_dummy(struct perf_evlist *evlist) 264 { 265 struct perf_event_attr attr = { 266 .type = PERF_TYPE_SOFTWARE, 267 .config = PERF_COUNT_SW_DUMMY, 268 .size = sizeof(attr), /* to capture ABI version */ 269 }; 270 struct perf_evsel *evsel = perf_evsel__new(&attr); 271 272 if (evsel == NULL) 273 return -ENOMEM; 274 275 perf_evlist__add(evlist, evsel); 276 return 0; 277 } 278 279 static int perf_evlist__add_attrs(struct perf_evlist *evlist, 280 struct perf_event_attr *attrs, size_t nr_attrs) 281 { 282 struct perf_evsel *evsel, *n; 283 LIST_HEAD(head); 284 size_t i; 285 286 for (i = 0; i < nr_attrs; i++) { 287 evsel = perf_evsel__new_idx(attrs + i, evlist->nr_entries + i); 288 if (evsel == NULL) 289 goto out_delete_partial_list; 290 list_add_tail(&evsel->node, &head); 291 } 292 293 perf_evlist__splice_list_tail(evlist, &head); 294 295 return 0; 296 297 out_delete_partial_list: 298 __evlist__for_each_safe(&head, n, evsel) 299 perf_evsel__delete(evsel); 300 return -1; 301 } 302 303 int __perf_evlist__add_default_attrs(struct perf_evlist *evlist, 304 struct perf_event_attr *attrs, size_t nr_attrs) 305 { 306 size_t i; 307 308 for (i = 0; i < nr_attrs; i++) 309 event_attr_init(attrs + i); 310 311 return perf_evlist__add_attrs(evlist, attrs, nr_attrs); 312 } 313 314 struct perf_evsel * 315 perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id) 316 { 317 struct perf_evsel *evsel; 318 319 evlist__for_each(evlist, evsel) { 320 if (evsel->attr.type == PERF_TYPE_TRACEPOINT && 321 (int)evsel->attr.config == id) 322 return evsel; 323 } 324 325 return NULL; 326 } 327 328 struct perf_evsel * 329 perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist, 330 const char *name) 331 { 332 struct perf_evsel *evsel; 333 334 evlist__for_each(evlist, evsel) { 335 if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) && 336 (strcmp(evsel->name, name) == 0)) 337 return evsel; 338 } 339 340 return NULL; 341 } 342 343 int perf_evlist__add_newtp(struct perf_evlist *evlist, 344 const char *sys, const char *name, void *handler) 345 { 346 struct perf_evsel *evsel = perf_evsel__newtp(sys, name); 347 348 if (IS_ERR(evsel)) 349 return -1; 350 351 evsel->handler = handler; 352 perf_evlist__add(evlist, evsel); 353 return 0; 354 } 355 356 static int perf_evlist__nr_threads(struct perf_evlist *evlist, 357 struct perf_evsel *evsel) 358 { 359 if (evsel->system_wide) 360 return 1; 361 else 362 return thread_map__nr(evlist->threads); 363 } 364 365 void perf_evlist__disable(struct perf_evlist *evlist) 366 { 367 struct perf_evsel *pos; 368 369 evlist__for_each(evlist, pos) { 370 if (!perf_evsel__is_group_leader(pos) || !pos->fd) 371 continue; 372 perf_evsel__disable(pos); 373 } 374 375 evlist->enabled = false; 376 } 377 378 void perf_evlist__enable(struct perf_evlist *evlist) 379 { 380 struct perf_evsel *pos; 381 382 evlist__for_each(evlist, pos) { 383 if (!perf_evsel__is_group_leader(pos) || !pos->fd) 384 continue; 385 perf_evsel__enable(pos); 386 } 387 388 evlist->enabled = true; 389 } 390 391 void perf_evlist__toggle_enable(struct perf_evlist *evlist) 392 { 393 (evlist->enabled ? perf_evlist__disable : perf_evlist__enable)(evlist); 394 } 395 396 static int perf_evlist__enable_event_cpu(struct perf_evlist *evlist, 397 struct perf_evsel *evsel, int cpu) 398 { 399 int thread, err; 400 int nr_threads = perf_evlist__nr_threads(evlist, evsel); 401 402 if (!evsel->fd) 403 return -EINVAL; 404 405 for (thread = 0; thread < nr_threads; thread++) { 406 err = ioctl(FD(evsel, cpu, thread), 407 PERF_EVENT_IOC_ENABLE, 0); 408 if (err) 409 return err; 410 } 411 return 0; 412 } 413 414 static int perf_evlist__enable_event_thread(struct perf_evlist *evlist, 415 struct perf_evsel *evsel, 416 int thread) 417 { 418 int cpu, err; 419 int nr_cpus = cpu_map__nr(evlist->cpus); 420 421 if (!evsel->fd) 422 return -EINVAL; 423 424 for (cpu = 0; cpu < nr_cpus; cpu++) { 425 err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0); 426 if (err) 427 return err; 428 } 429 return 0; 430 } 431 432 int perf_evlist__enable_event_idx(struct perf_evlist *evlist, 433 struct perf_evsel *evsel, int idx) 434 { 435 bool per_cpu_mmaps = !cpu_map__empty(evlist->cpus); 436 437 if (per_cpu_mmaps) 438 return perf_evlist__enable_event_cpu(evlist, evsel, idx); 439 else 440 return perf_evlist__enable_event_thread(evlist, evsel, idx); 441 } 442 443 int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) 444 { 445 int nr_cpus = cpu_map__nr(evlist->cpus); 446 int nr_threads = thread_map__nr(evlist->threads); 447 int nfds = 0; 448 struct perf_evsel *evsel; 449 450 evlist__for_each(evlist, evsel) { 451 if (evsel->system_wide) 452 nfds += nr_cpus; 453 else 454 nfds += nr_cpus * nr_threads; 455 } 456 457 if (fdarray__available_entries(&evlist->pollfd) < nfds && 458 fdarray__grow(&evlist->pollfd, nfds) < 0) 459 return -ENOMEM; 460 461 return 0; 462 } 463 464 static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd, int idx) 465 { 466 int pos = fdarray__add(&evlist->pollfd, fd, POLLIN | POLLERR | POLLHUP); 467 /* 468 * Save the idx so that when we filter out fds POLLHUP'ed we can 469 * close the associated evlist->mmap[] entry. 470 */ 471 if (pos >= 0) { 472 evlist->pollfd.priv[pos].idx = idx; 473 474 fcntl(fd, F_SETFL, O_NONBLOCK); 475 } 476 477 return pos; 478 } 479 480 int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd) 481 { 482 return __perf_evlist__add_pollfd(evlist, fd, -1); 483 } 484 485 static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd) 486 { 487 struct perf_evlist *evlist = container_of(fda, struct perf_evlist, pollfd); 488 489 perf_evlist__mmap_put(evlist, fda->priv[fd].idx); 490 } 491 492 int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask) 493 { 494 return fdarray__filter(&evlist->pollfd, revents_and_mask, 495 perf_evlist__munmap_filtered); 496 } 497 498 int perf_evlist__poll(struct perf_evlist *evlist, int timeout) 499 { 500 return fdarray__poll(&evlist->pollfd, timeout); 501 } 502 503 static void perf_evlist__id_hash(struct perf_evlist *evlist, 504 struct perf_evsel *evsel, 505 int cpu, int thread, u64 id) 506 { 507 int hash; 508 struct perf_sample_id *sid = SID(evsel, cpu, thread); 509 510 sid->id = id; 511 sid->evsel = evsel; 512 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS); 513 hlist_add_head(&sid->node, &evlist->heads[hash]); 514 } 515 516 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel, 517 int cpu, int thread, u64 id) 518 { 519 perf_evlist__id_hash(evlist, evsel, cpu, thread, id); 520 evsel->id[evsel->ids++] = id; 521 } 522 523 int perf_evlist__id_add_fd(struct perf_evlist *evlist, 524 struct perf_evsel *evsel, 525 int cpu, int thread, int fd) 526 { 527 u64 read_data[4] = { 0, }; 528 int id_idx = 1; /* The first entry is the counter value */ 529 u64 id; 530 int ret; 531 532 ret = ioctl(fd, PERF_EVENT_IOC_ID, &id); 533 if (!ret) 534 goto add; 535 536 if (errno != ENOTTY) 537 return -1; 538 539 /* Legacy way to get event id.. All hail to old kernels! */ 540 541 /* 542 * This way does not work with group format read, so bail 543 * out in that case. 544 */ 545 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP) 546 return -1; 547 548 if (!(evsel->attr.read_format & PERF_FORMAT_ID) || 549 read(fd, &read_data, sizeof(read_data)) == -1) 550 return -1; 551 552 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 553 ++id_idx; 554 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 555 ++id_idx; 556 557 id = read_data[id_idx]; 558 559 add: 560 perf_evlist__id_add(evlist, evsel, cpu, thread, id); 561 return 0; 562 } 563 564 static void perf_evlist__set_sid_idx(struct perf_evlist *evlist, 565 struct perf_evsel *evsel, int idx, int cpu, 566 int thread) 567 { 568 struct perf_sample_id *sid = SID(evsel, cpu, thread); 569 sid->idx = idx; 570 if (evlist->cpus && cpu >= 0) 571 sid->cpu = evlist->cpus->map[cpu]; 572 else 573 sid->cpu = -1; 574 if (!evsel->system_wide && evlist->threads && thread >= 0) 575 sid->tid = thread_map__pid(evlist->threads, thread); 576 else 577 sid->tid = -1; 578 } 579 580 struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id) 581 { 582 struct hlist_head *head; 583 struct perf_sample_id *sid; 584 int hash; 585 586 hash = hash_64(id, PERF_EVLIST__HLIST_BITS); 587 head = &evlist->heads[hash]; 588 589 hlist_for_each_entry(sid, head, node) 590 if (sid->id == id) 591 return sid; 592 593 return NULL; 594 } 595 596 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id) 597 { 598 struct perf_sample_id *sid; 599 600 if (evlist->nr_entries == 1 || !id) 601 return perf_evlist__first(evlist); 602 603 sid = perf_evlist__id2sid(evlist, id); 604 if (sid) 605 return sid->evsel; 606 607 if (!perf_evlist__sample_id_all(evlist)) 608 return perf_evlist__first(evlist); 609 610 return NULL; 611 } 612 613 struct perf_evsel *perf_evlist__id2evsel_strict(struct perf_evlist *evlist, 614 u64 id) 615 { 616 struct perf_sample_id *sid; 617 618 if (!id) 619 return NULL; 620 621 sid = perf_evlist__id2sid(evlist, id); 622 if (sid) 623 return sid->evsel; 624 625 return NULL; 626 } 627 628 static int perf_evlist__event2id(struct perf_evlist *evlist, 629 union perf_event *event, u64 *id) 630 { 631 const u64 *array = event->sample.array; 632 ssize_t n; 633 634 n = (event->header.size - sizeof(event->header)) >> 3; 635 636 if (event->header.type == PERF_RECORD_SAMPLE) { 637 if (evlist->id_pos >= n) 638 return -1; 639 *id = array[evlist->id_pos]; 640 } else { 641 if (evlist->is_pos > n) 642 return -1; 643 n -= evlist->is_pos; 644 *id = array[n]; 645 } 646 return 0; 647 } 648 649 static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist, 650 union perf_event *event) 651 { 652 struct perf_evsel *first = perf_evlist__first(evlist); 653 struct hlist_head *head; 654 struct perf_sample_id *sid; 655 int hash; 656 u64 id; 657 658 if (evlist->nr_entries == 1) 659 return first; 660 661 if (!first->attr.sample_id_all && 662 event->header.type != PERF_RECORD_SAMPLE) 663 return first; 664 665 if (perf_evlist__event2id(evlist, event, &id)) 666 return NULL; 667 668 /* Synthesized events have an id of zero */ 669 if (!id) 670 return first; 671 672 hash = hash_64(id, PERF_EVLIST__HLIST_BITS); 673 head = &evlist->heads[hash]; 674 675 hlist_for_each_entry(sid, head, node) { 676 if (sid->id == id) 677 return sid->evsel; 678 } 679 return NULL; 680 } 681 682 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) 683 { 684 struct perf_mmap *md = &evlist->mmap[idx]; 685 u64 head; 686 u64 old = md->prev; 687 unsigned char *data = md->base + page_size; 688 union perf_event *event = NULL; 689 690 /* 691 * Check if event was unmapped due to a POLLHUP/POLLERR. 692 */ 693 if (!atomic_read(&md->refcnt)) 694 return NULL; 695 696 head = perf_mmap__read_head(md); 697 if (evlist->overwrite) { 698 /* 699 * If we're further behind than half the buffer, there's a chance 700 * the writer will bite our tail and mess up the samples under us. 701 * 702 * If we somehow ended up ahead of the head, we got messed up. 703 * 704 * In either case, truncate and restart at head. 705 */ 706 int diff = head - old; 707 if (diff > md->mask / 2 || diff < 0) { 708 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n"); 709 710 /* 711 * head points to a known good entry, start there. 712 */ 713 old = head; 714 } 715 } 716 717 if (old != head) { 718 size_t size; 719 720 event = (union perf_event *)&data[old & md->mask]; 721 size = event->header.size; 722 723 /* 724 * Event straddles the mmap boundary -- header should always 725 * be inside due to u64 alignment of output. 726 */ 727 if ((old & md->mask) + size != ((old + size) & md->mask)) { 728 unsigned int offset = old; 729 unsigned int len = min(sizeof(*event), size), cpy; 730 void *dst = md->event_copy; 731 732 do { 733 cpy = min(md->mask + 1 - (offset & md->mask), len); 734 memcpy(dst, &data[offset & md->mask], cpy); 735 offset += cpy; 736 dst += cpy; 737 len -= cpy; 738 } while (len); 739 740 event = (union perf_event *) md->event_copy; 741 } 742 743 old += size; 744 } 745 746 md->prev = old; 747 748 return event; 749 } 750 751 static bool perf_mmap__empty(struct perf_mmap *md) 752 { 753 return perf_mmap__read_head(md) == md->prev && !md->auxtrace_mmap.base; 754 } 755 756 static void perf_evlist__mmap_get(struct perf_evlist *evlist, int idx) 757 { 758 atomic_inc(&evlist->mmap[idx].refcnt); 759 } 760 761 static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx) 762 { 763 BUG_ON(atomic_read(&evlist->mmap[idx].refcnt) == 0); 764 765 if (atomic_dec_and_test(&evlist->mmap[idx].refcnt)) 766 __perf_evlist__munmap(evlist, idx); 767 } 768 769 void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx) 770 { 771 struct perf_mmap *md = &evlist->mmap[idx]; 772 773 if (!evlist->overwrite) { 774 u64 old = md->prev; 775 776 perf_mmap__write_tail(md, old); 777 } 778 779 if (atomic_read(&md->refcnt) == 1 && perf_mmap__empty(md)) 780 perf_evlist__mmap_put(evlist, idx); 781 } 782 783 int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused, 784 struct auxtrace_mmap_params *mp __maybe_unused, 785 void *userpg __maybe_unused, 786 int fd __maybe_unused) 787 { 788 return 0; 789 } 790 791 void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused) 792 { 793 } 794 795 void __weak auxtrace_mmap_params__init( 796 struct auxtrace_mmap_params *mp __maybe_unused, 797 off_t auxtrace_offset __maybe_unused, 798 unsigned int auxtrace_pages __maybe_unused, 799 bool auxtrace_overwrite __maybe_unused) 800 { 801 } 802 803 void __weak auxtrace_mmap_params__set_idx( 804 struct auxtrace_mmap_params *mp __maybe_unused, 805 struct perf_evlist *evlist __maybe_unused, 806 int idx __maybe_unused, 807 bool per_cpu __maybe_unused) 808 { 809 } 810 811 static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx) 812 { 813 if (evlist->mmap[idx].base != NULL) { 814 munmap(evlist->mmap[idx].base, evlist->mmap_len); 815 evlist->mmap[idx].base = NULL; 816 atomic_set(&evlist->mmap[idx].refcnt, 0); 817 } 818 auxtrace_mmap__munmap(&evlist->mmap[idx].auxtrace_mmap); 819 } 820 821 void perf_evlist__munmap(struct perf_evlist *evlist) 822 { 823 int i; 824 825 if (evlist->mmap == NULL) 826 return; 827 828 for (i = 0; i < evlist->nr_mmaps; i++) 829 __perf_evlist__munmap(evlist, i); 830 831 zfree(&evlist->mmap); 832 } 833 834 static int perf_evlist__alloc_mmap(struct perf_evlist *evlist) 835 { 836 evlist->nr_mmaps = cpu_map__nr(evlist->cpus); 837 if (cpu_map__empty(evlist->cpus)) 838 evlist->nr_mmaps = thread_map__nr(evlist->threads); 839 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap)); 840 return evlist->mmap != NULL ? 0 : -ENOMEM; 841 } 842 843 struct mmap_params { 844 int prot; 845 int mask; 846 struct auxtrace_mmap_params auxtrace_mp; 847 }; 848 849 static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx, 850 struct mmap_params *mp, int fd) 851 { 852 /* 853 * The last one will be done at perf_evlist__mmap_consume(), so that we 854 * make sure we don't prevent tools from consuming every last event in 855 * the ring buffer. 856 * 857 * I.e. we can get the POLLHUP meaning that the fd doesn't exist 858 * anymore, but the last events for it are still in the ring buffer, 859 * waiting to be consumed. 860 * 861 * Tools can chose to ignore this at their own discretion, but the 862 * evlist layer can't just drop it when filtering events in 863 * perf_evlist__filter_pollfd(). 864 */ 865 atomic_set(&evlist->mmap[idx].refcnt, 2); 866 evlist->mmap[idx].prev = 0; 867 evlist->mmap[idx].mask = mp->mask; 868 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, mp->prot, 869 MAP_SHARED, fd, 0); 870 if (evlist->mmap[idx].base == MAP_FAILED) { 871 pr_debug2("failed to mmap perf event ring buffer, error %d\n", 872 errno); 873 evlist->mmap[idx].base = NULL; 874 return -1; 875 } 876 877 if (auxtrace_mmap__mmap(&evlist->mmap[idx].auxtrace_mmap, 878 &mp->auxtrace_mp, evlist->mmap[idx].base, fd)) 879 return -1; 880 881 return 0; 882 } 883 884 static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx, 885 struct mmap_params *mp, int cpu, 886 int thread, int *output) 887 { 888 struct perf_evsel *evsel; 889 890 evlist__for_each(evlist, evsel) { 891 int fd; 892 893 if (evsel->system_wide && thread) 894 continue; 895 896 fd = FD(evsel, cpu, thread); 897 898 if (*output == -1) { 899 *output = fd; 900 if (__perf_evlist__mmap(evlist, idx, mp, *output) < 0) 901 return -1; 902 } else { 903 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0) 904 return -1; 905 906 perf_evlist__mmap_get(evlist, idx); 907 } 908 909 /* 910 * The system_wide flag causes a selected event to be opened 911 * always without a pid. Consequently it will never get a 912 * POLLHUP, but it is used for tracking in combination with 913 * other events, so it should not need to be polled anyway. 914 * Therefore don't add it for polling. 915 */ 916 if (!evsel->system_wide && 917 __perf_evlist__add_pollfd(evlist, fd, idx) < 0) { 918 perf_evlist__mmap_put(evlist, idx); 919 return -1; 920 } 921 922 if (evsel->attr.read_format & PERF_FORMAT_ID) { 923 if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread, 924 fd) < 0) 925 return -1; 926 perf_evlist__set_sid_idx(evlist, evsel, idx, cpu, 927 thread); 928 } 929 } 930 931 return 0; 932 } 933 934 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, 935 struct mmap_params *mp) 936 { 937 int cpu, thread; 938 int nr_cpus = cpu_map__nr(evlist->cpus); 939 int nr_threads = thread_map__nr(evlist->threads); 940 941 pr_debug2("perf event ring buffer mmapped per cpu\n"); 942 for (cpu = 0; cpu < nr_cpus; cpu++) { 943 int output = -1; 944 945 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu, 946 true); 947 948 for (thread = 0; thread < nr_threads; thread++) { 949 if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu, 950 thread, &output)) 951 goto out_unmap; 952 } 953 } 954 955 return 0; 956 957 out_unmap: 958 for (cpu = 0; cpu < nr_cpus; cpu++) 959 __perf_evlist__munmap(evlist, cpu); 960 return -1; 961 } 962 963 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, 964 struct mmap_params *mp) 965 { 966 int thread; 967 int nr_threads = thread_map__nr(evlist->threads); 968 969 pr_debug2("perf event ring buffer mmapped per thread\n"); 970 for (thread = 0; thread < nr_threads; thread++) { 971 int output = -1; 972 973 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread, 974 false); 975 976 if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread, 977 &output)) 978 goto out_unmap; 979 } 980 981 return 0; 982 983 out_unmap: 984 for (thread = 0; thread < nr_threads; thread++) 985 __perf_evlist__munmap(evlist, thread); 986 return -1; 987 } 988 989 static size_t perf_evlist__mmap_size(unsigned long pages) 990 { 991 if (pages == UINT_MAX) { 992 int max; 993 994 if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) { 995 /* 996 * Pick a once upon a time good value, i.e. things look 997 * strange since we can't read a sysctl value, but lets not 998 * die yet... 999 */ 1000 max = 512; 1001 } else { 1002 max -= (page_size / 1024); 1003 } 1004 1005 pages = (max * 1024) / page_size; 1006 if (!is_power_of_2(pages)) 1007 pages = rounddown_pow_of_two(pages); 1008 } else if (!is_power_of_2(pages)) 1009 return 0; 1010 1011 return (pages + 1) * page_size; 1012 } 1013 1014 static long parse_pages_arg(const char *str, unsigned long min, 1015 unsigned long max) 1016 { 1017 unsigned long pages, val; 1018 static struct parse_tag tags[] = { 1019 { .tag = 'B', .mult = 1 }, 1020 { .tag = 'K', .mult = 1 << 10 }, 1021 { .tag = 'M', .mult = 1 << 20 }, 1022 { .tag = 'G', .mult = 1 << 30 }, 1023 { .tag = 0 }, 1024 }; 1025 1026 if (str == NULL) 1027 return -EINVAL; 1028 1029 val = parse_tag_value(str, tags); 1030 if (val != (unsigned long) -1) { 1031 /* we got file size value */ 1032 pages = PERF_ALIGN(val, page_size) / page_size; 1033 } else { 1034 /* we got pages count value */ 1035 char *eptr; 1036 pages = strtoul(str, &eptr, 10); 1037 if (*eptr != '\0') 1038 return -EINVAL; 1039 } 1040 1041 if (pages == 0 && min == 0) { 1042 /* leave number of pages at 0 */ 1043 } else if (!is_power_of_2(pages)) { 1044 /* round pages up to next power of 2 */ 1045 pages = roundup_pow_of_two(pages); 1046 if (!pages) 1047 return -EINVAL; 1048 pr_info("rounding mmap pages size to %lu bytes (%lu pages)\n", 1049 pages * page_size, pages); 1050 } 1051 1052 if (pages > max) 1053 return -EINVAL; 1054 1055 return pages; 1056 } 1057 1058 int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str) 1059 { 1060 unsigned long max = UINT_MAX; 1061 long pages; 1062 1063 if (max > SIZE_MAX / page_size) 1064 max = SIZE_MAX / page_size; 1065 1066 pages = parse_pages_arg(str, 1, max); 1067 if (pages < 0) { 1068 pr_err("Invalid argument for --mmap_pages/-m\n"); 1069 return -1; 1070 } 1071 1072 *mmap_pages = pages; 1073 return 0; 1074 } 1075 1076 int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str, 1077 int unset __maybe_unused) 1078 { 1079 return __perf_evlist__parse_mmap_pages(opt->value, str); 1080 } 1081 1082 /** 1083 * perf_evlist__mmap_ex - Create mmaps to receive events. 1084 * @evlist: list of events 1085 * @pages: map length in pages 1086 * @overwrite: overwrite older events? 1087 * @auxtrace_pages - auxtrace map length in pages 1088 * @auxtrace_overwrite - overwrite older auxtrace data? 1089 * 1090 * If @overwrite is %false the user needs to signal event consumption using 1091 * perf_mmap__write_tail(). Using perf_evlist__mmap_read() does this 1092 * automatically. 1093 * 1094 * Similarly, if @auxtrace_overwrite is %false the user needs to signal data 1095 * consumption using auxtrace_mmap__write_tail(). 1096 * 1097 * Return: %0 on success, negative error code otherwise. 1098 */ 1099 int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages, 1100 bool overwrite, unsigned int auxtrace_pages, 1101 bool auxtrace_overwrite) 1102 { 1103 struct perf_evsel *evsel; 1104 const struct cpu_map *cpus = evlist->cpus; 1105 const struct thread_map *threads = evlist->threads; 1106 struct mmap_params mp = { 1107 .prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), 1108 }; 1109 1110 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0) 1111 return -ENOMEM; 1112 1113 if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0) 1114 return -ENOMEM; 1115 1116 evlist->overwrite = overwrite; 1117 evlist->mmap_len = perf_evlist__mmap_size(pages); 1118 pr_debug("mmap size %zuB\n", evlist->mmap_len); 1119 mp.mask = evlist->mmap_len - page_size - 1; 1120 1121 auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->mmap_len, 1122 auxtrace_pages, auxtrace_overwrite); 1123 1124 evlist__for_each(evlist, evsel) { 1125 if ((evsel->attr.read_format & PERF_FORMAT_ID) && 1126 evsel->sample_id == NULL && 1127 perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0) 1128 return -ENOMEM; 1129 } 1130 1131 if (cpu_map__empty(cpus)) 1132 return perf_evlist__mmap_per_thread(evlist, &mp); 1133 1134 return perf_evlist__mmap_per_cpu(evlist, &mp); 1135 } 1136 1137 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages, 1138 bool overwrite) 1139 { 1140 return perf_evlist__mmap_ex(evlist, pages, overwrite, 0, false); 1141 } 1142 1143 int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target) 1144 { 1145 struct cpu_map *cpus; 1146 struct thread_map *threads; 1147 1148 threads = thread_map__new_str(target->pid, target->tid, target->uid); 1149 1150 if (!threads) 1151 return -1; 1152 1153 if (target__uses_dummy_map(target)) 1154 cpus = cpu_map__dummy_new(); 1155 else 1156 cpus = cpu_map__new(target->cpu_list); 1157 1158 if (!cpus) 1159 goto out_delete_threads; 1160 1161 evlist->has_user_cpus = !!target->cpu_list; 1162 1163 perf_evlist__set_maps(evlist, cpus, threads); 1164 1165 return 0; 1166 1167 out_delete_threads: 1168 thread_map__put(threads); 1169 return -1; 1170 } 1171 1172 void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus, 1173 struct thread_map *threads) 1174 { 1175 /* 1176 * Allow for the possibility that one or another of the maps isn't being 1177 * changed i.e. don't put it. Note we are assuming the maps that are 1178 * being applied are brand new and evlist is taking ownership of the 1179 * original reference count of 1. If that is not the case it is up to 1180 * the caller to increase the reference count. 1181 */ 1182 if (cpus != evlist->cpus) { 1183 cpu_map__put(evlist->cpus); 1184 evlist->cpus = cpus; 1185 } 1186 1187 if (threads != evlist->threads) { 1188 thread_map__put(evlist->threads); 1189 evlist->threads = threads; 1190 } 1191 1192 perf_evlist__propagate_maps(evlist); 1193 } 1194 1195 int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel) 1196 { 1197 struct perf_evsel *evsel; 1198 int err = 0; 1199 const int ncpus = cpu_map__nr(evlist->cpus), 1200 nthreads = thread_map__nr(evlist->threads); 1201 1202 evlist__for_each(evlist, evsel) { 1203 if (evsel->filter == NULL) 1204 continue; 1205 1206 /* 1207 * filters only work for tracepoint event, which doesn't have cpu limit. 1208 * So evlist and evsel should always be same. 1209 */ 1210 err = perf_evsel__apply_filter(evsel, ncpus, nthreads, evsel->filter); 1211 if (err) { 1212 *err_evsel = evsel; 1213 break; 1214 } 1215 } 1216 1217 return err; 1218 } 1219 1220 int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter) 1221 { 1222 struct perf_evsel *evsel; 1223 int err = 0; 1224 1225 evlist__for_each(evlist, evsel) { 1226 err = perf_evsel__set_filter(evsel, filter); 1227 if (err) 1228 break; 1229 } 1230 1231 return err; 1232 } 1233 1234 int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids) 1235 { 1236 char *filter; 1237 int ret = -1; 1238 size_t i; 1239 1240 for (i = 0; i < npids; ++i) { 1241 if (i == 0) { 1242 if (asprintf(&filter, "common_pid != %d", pids[i]) < 0) 1243 return -1; 1244 } else { 1245 char *tmp; 1246 1247 if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0) 1248 goto out_free; 1249 1250 free(filter); 1251 filter = tmp; 1252 } 1253 } 1254 1255 ret = perf_evlist__set_filter(evlist, filter); 1256 out_free: 1257 free(filter); 1258 return ret; 1259 } 1260 1261 int perf_evlist__set_filter_pid(struct perf_evlist *evlist, pid_t pid) 1262 { 1263 return perf_evlist__set_filter_pids(evlist, 1, &pid); 1264 } 1265 1266 bool perf_evlist__valid_sample_type(struct perf_evlist *evlist) 1267 { 1268 struct perf_evsel *pos; 1269 1270 if (evlist->nr_entries == 1) 1271 return true; 1272 1273 if (evlist->id_pos < 0 || evlist->is_pos < 0) 1274 return false; 1275 1276 evlist__for_each(evlist, pos) { 1277 if (pos->id_pos != evlist->id_pos || 1278 pos->is_pos != evlist->is_pos) 1279 return false; 1280 } 1281 1282 return true; 1283 } 1284 1285 u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist) 1286 { 1287 struct perf_evsel *evsel; 1288 1289 if (evlist->combined_sample_type) 1290 return evlist->combined_sample_type; 1291 1292 evlist__for_each(evlist, evsel) 1293 evlist->combined_sample_type |= evsel->attr.sample_type; 1294 1295 return evlist->combined_sample_type; 1296 } 1297 1298 u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist) 1299 { 1300 evlist->combined_sample_type = 0; 1301 return __perf_evlist__combined_sample_type(evlist); 1302 } 1303 1304 u64 perf_evlist__combined_branch_type(struct perf_evlist *evlist) 1305 { 1306 struct perf_evsel *evsel; 1307 u64 branch_type = 0; 1308 1309 evlist__for_each(evlist, evsel) 1310 branch_type |= evsel->attr.branch_sample_type; 1311 return branch_type; 1312 } 1313 1314 bool perf_evlist__valid_read_format(struct perf_evlist *evlist) 1315 { 1316 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first; 1317 u64 read_format = first->attr.read_format; 1318 u64 sample_type = first->attr.sample_type; 1319 1320 evlist__for_each(evlist, pos) { 1321 if (read_format != pos->attr.read_format) 1322 return false; 1323 } 1324 1325 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */ 1326 if ((sample_type & PERF_SAMPLE_READ) && 1327 !(read_format & PERF_FORMAT_ID)) { 1328 return false; 1329 } 1330 1331 return true; 1332 } 1333 1334 u64 perf_evlist__read_format(struct perf_evlist *evlist) 1335 { 1336 struct perf_evsel *first = perf_evlist__first(evlist); 1337 return first->attr.read_format; 1338 } 1339 1340 u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist) 1341 { 1342 struct perf_evsel *first = perf_evlist__first(evlist); 1343 struct perf_sample *data; 1344 u64 sample_type; 1345 u16 size = 0; 1346 1347 if (!first->attr.sample_id_all) 1348 goto out; 1349 1350 sample_type = first->attr.sample_type; 1351 1352 if (sample_type & PERF_SAMPLE_TID) 1353 size += sizeof(data->tid) * 2; 1354 1355 if (sample_type & PERF_SAMPLE_TIME) 1356 size += sizeof(data->time); 1357 1358 if (sample_type & PERF_SAMPLE_ID) 1359 size += sizeof(data->id); 1360 1361 if (sample_type & PERF_SAMPLE_STREAM_ID) 1362 size += sizeof(data->stream_id); 1363 1364 if (sample_type & PERF_SAMPLE_CPU) 1365 size += sizeof(data->cpu) * 2; 1366 1367 if (sample_type & PERF_SAMPLE_IDENTIFIER) 1368 size += sizeof(data->id); 1369 out: 1370 return size; 1371 } 1372 1373 bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist) 1374 { 1375 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first; 1376 1377 evlist__for_each_continue(evlist, pos) { 1378 if (first->attr.sample_id_all != pos->attr.sample_id_all) 1379 return false; 1380 } 1381 1382 return true; 1383 } 1384 1385 bool perf_evlist__sample_id_all(struct perf_evlist *evlist) 1386 { 1387 struct perf_evsel *first = perf_evlist__first(evlist); 1388 return first->attr.sample_id_all; 1389 } 1390 1391 void perf_evlist__set_selected(struct perf_evlist *evlist, 1392 struct perf_evsel *evsel) 1393 { 1394 evlist->selected = evsel; 1395 } 1396 1397 void perf_evlist__close(struct perf_evlist *evlist) 1398 { 1399 struct perf_evsel *evsel; 1400 int ncpus = cpu_map__nr(evlist->cpus); 1401 int nthreads = thread_map__nr(evlist->threads); 1402 int n; 1403 1404 evlist__for_each_reverse(evlist, evsel) { 1405 n = evsel->cpus ? evsel->cpus->nr : ncpus; 1406 perf_evsel__close(evsel, n, nthreads); 1407 } 1408 } 1409 1410 static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist) 1411 { 1412 struct cpu_map *cpus; 1413 struct thread_map *threads; 1414 int err = -ENOMEM; 1415 1416 /* 1417 * Try reading /sys/devices/system/cpu/online to get 1418 * an all cpus map. 1419 * 1420 * FIXME: -ENOMEM is the best we can do here, the cpu_map 1421 * code needs an overhaul to properly forward the 1422 * error, and we may not want to do that fallback to a 1423 * default cpu identity map :-\ 1424 */ 1425 cpus = cpu_map__new(NULL); 1426 if (!cpus) 1427 goto out; 1428 1429 threads = thread_map__new_dummy(); 1430 if (!threads) 1431 goto out_put; 1432 1433 perf_evlist__set_maps(evlist, cpus, threads); 1434 out: 1435 return err; 1436 out_put: 1437 cpu_map__put(cpus); 1438 goto out; 1439 } 1440 1441 int perf_evlist__open(struct perf_evlist *evlist) 1442 { 1443 struct perf_evsel *evsel; 1444 int err; 1445 1446 /* 1447 * Default: one fd per CPU, all threads, aka systemwide 1448 * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL 1449 */ 1450 if (evlist->threads == NULL && evlist->cpus == NULL) { 1451 err = perf_evlist__create_syswide_maps(evlist); 1452 if (err < 0) 1453 goto out_err; 1454 } 1455 1456 perf_evlist__update_id_pos(evlist); 1457 1458 evlist__for_each(evlist, evsel) { 1459 err = perf_evsel__open(evsel, evsel->cpus, evsel->threads); 1460 if (err < 0) 1461 goto out_err; 1462 } 1463 1464 return 0; 1465 out_err: 1466 perf_evlist__close(evlist); 1467 errno = -err; 1468 return err; 1469 } 1470 1471 int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *target, 1472 const char *argv[], bool pipe_output, 1473 void (*exec_error)(int signo, siginfo_t *info, void *ucontext)) 1474 { 1475 int child_ready_pipe[2], go_pipe[2]; 1476 char bf; 1477 1478 if (pipe(child_ready_pipe) < 0) { 1479 perror("failed to create 'ready' pipe"); 1480 return -1; 1481 } 1482 1483 if (pipe(go_pipe) < 0) { 1484 perror("failed to create 'go' pipe"); 1485 goto out_close_ready_pipe; 1486 } 1487 1488 evlist->workload.pid = fork(); 1489 if (evlist->workload.pid < 0) { 1490 perror("failed to fork"); 1491 goto out_close_pipes; 1492 } 1493 1494 if (!evlist->workload.pid) { 1495 int ret; 1496 1497 if (pipe_output) 1498 dup2(2, 1); 1499 1500 signal(SIGTERM, SIG_DFL); 1501 1502 close(child_ready_pipe[0]); 1503 close(go_pipe[1]); 1504 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC); 1505 1506 /* 1507 * Tell the parent we're ready to go 1508 */ 1509 close(child_ready_pipe[1]); 1510 1511 /* 1512 * Wait until the parent tells us to go. 1513 */ 1514 ret = read(go_pipe[0], &bf, 1); 1515 /* 1516 * The parent will ask for the execvp() to be performed by 1517 * writing exactly one byte, in workload.cork_fd, usually via 1518 * perf_evlist__start_workload(). 1519 * 1520 * For cancelling the workload without actually running it, 1521 * the parent will just close workload.cork_fd, without writing 1522 * anything, i.e. read will return zero and we just exit() 1523 * here. 1524 */ 1525 if (ret != 1) { 1526 if (ret == -1) 1527 perror("unable to read pipe"); 1528 exit(ret); 1529 } 1530 1531 execvp(argv[0], (char **)argv); 1532 1533 if (exec_error) { 1534 union sigval val; 1535 1536 val.sival_int = errno; 1537 if (sigqueue(getppid(), SIGUSR1, val)) 1538 perror(argv[0]); 1539 } else 1540 perror(argv[0]); 1541 exit(-1); 1542 } 1543 1544 if (exec_error) { 1545 struct sigaction act = { 1546 .sa_flags = SA_SIGINFO, 1547 .sa_sigaction = exec_error, 1548 }; 1549 sigaction(SIGUSR1, &act, NULL); 1550 } 1551 1552 if (target__none(target)) { 1553 if (evlist->threads == NULL) { 1554 fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n", 1555 __func__, __LINE__); 1556 goto out_close_pipes; 1557 } 1558 thread_map__set_pid(evlist->threads, 0, evlist->workload.pid); 1559 } 1560 1561 close(child_ready_pipe[1]); 1562 close(go_pipe[0]); 1563 /* 1564 * wait for child to settle 1565 */ 1566 if (read(child_ready_pipe[0], &bf, 1) == -1) { 1567 perror("unable to read pipe"); 1568 goto out_close_pipes; 1569 } 1570 1571 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC); 1572 evlist->workload.cork_fd = go_pipe[1]; 1573 close(child_ready_pipe[0]); 1574 return 0; 1575 1576 out_close_pipes: 1577 close(go_pipe[0]); 1578 close(go_pipe[1]); 1579 out_close_ready_pipe: 1580 close(child_ready_pipe[0]); 1581 close(child_ready_pipe[1]); 1582 return -1; 1583 } 1584 1585 int perf_evlist__start_workload(struct perf_evlist *evlist) 1586 { 1587 if (evlist->workload.cork_fd > 0) { 1588 char bf = 0; 1589 int ret; 1590 /* 1591 * Remove the cork, let it rip! 1592 */ 1593 ret = write(evlist->workload.cork_fd, &bf, 1); 1594 if (ret < 0) 1595 perror("enable to write to pipe"); 1596 1597 close(evlist->workload.cork_fd); 1598 return ret; 1599 } 1600 1601 return 0; 1602 } 1603 1604 int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event, 1605 struct perf_sample *sample) 1606 { 1607 struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event); 1608 1609 if (!evsel) 1610 return -EFAULT; 1611 return perf_evsel__parse_sample(evsel, event, sample); 1612 } 1613 1614 size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp) 1615 { 1616 struct perf_evsel *evsel; 1617 size_t printed = 0; 1618 1619 evlist__for_each(evlist, evsel) { 1620 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "", 1621 perf_evsel__name(evsel)); 1622 } 1623 1624 return printed + fprintf(fp, "\n"); 1625 } 1626 1627 int perf_evlist__strerror_open(struct perf_evlist *evlist __maybe_unused, 1628 int err, char *buf, size_t size) 1629 { 1630 int printed, value; 1631 char sbuf[STRERR_BUFSIZE], *emsg = strerror_r(err, sbuf, sizeof(sbuf)); 1632 1633 switch (err) { 1634 case EACCES: 1635 case EPERM: 1636 printed = scnprintf(buf, size, 1637 "Error:\t%s.\n" 1638 "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg); 1639 1640 value = perf_event_paranoid(); 1641 1642 printed += scnprintf(buf + printed, size - printed, "\nHint:\t"); 1643 1644 if (value >= 2) { 1645 printed += scnprintf(buf + printed, size - printed, 1646 "For your workloads it needs to be <= 1\nHint:\t"); 1647 } 1648 printed += scnprintf(buf + printed, size - printed, 1649 "For system wide tracing it needs to be set to -1.\n"); 1650 1651 printed += scnprintf(buf + printed, size - printed, 1652 "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n" 1653 "Hint:\tThe current value is %d.", value); 1654 break; 1655 default: 1656 scnprintf(buf, size, "%s", emsg); 1657 break; 1658 } 1659 1660 return 0; 1661 } 1662 1663 int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size) 1664 { 1665 char sbuf[STRERR_BUFSIZE], *emsg = strerror_r(err, sbuf, sizeof(sbuf)); 1666 int pages_attempted = evlist->mmap_len / 1024, pages_max_per_user, printed = 0; 1667 1668 switch (err) { 1669 case EPERM: 1670 sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user); 1671 printed += scnprintf(buf + printed, size - printed, 1672 "Error:\t%s.\n" 1673 "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n" 1674 "Hint:\tTried using %zd kB.\n", 1675 emsg, pages_max_per_user, pages_attempted); 1676 1677 if (pages_attempted >= pages_max_per_user) { 1678 printed += scnprintf(buf + printed, size - printed, 1679 "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n", 1680 pages_max_per_user + pages_attempted); 1681 } 1682 1683 printed += scnprintf(buf + printed, size - printed, 1684 "Hint:\tTry using a smaller -m/--mmap-pages value."); 1685 break; 1686 default: 1687 scnprintf(buf, size, "%s", emsg); 1688 break; 1689 } 1690 1691 return 0; 1692 } 1693 1694 void perf_evlist__to_front(struct perf_evlist *evlist, 1695 struct perf_evsel *move_evsel) 1696 { 1697 struct perf_evsel *evsel, *n; 1698 LIST_HEAD(move); 1699 1700 if (move_evsel == perf_evlist__first(evlist)) 1701 return; 1702 1703 evlist__for_each_safe(evlist, n, evsel) { 1704 if (evsel->leader == move_evsel->leader) 1705 list_move_tail(&evsel->node, &move); 1706 } 1707 1708 list_splice(&move, &evlist->entries); 1709 } 1710 1711 void perf_evlist__set_tracking_event(struct perf_evlist *evlist, 1712 struct perf_evsel *tracking_evsel) 1713 { 1714 struct perf_evsel *evsel; 1715 1716 if (tracking_evsel->tracking) 1717 return; 1718 1719 evlist__for_each(evlist, evsel) { 1720 if (evsel != tracking_evsel) 1721 evsel->tracking = false; 1722 } 1723 1724 tracking_evsel->tracking = true; 1725 } 1726