1 /* 2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> 3 * 4 * Parts came from builtin-{top,stat,record}.c, see those files for further 5 * copyright notes. 6 * 7 * Released under the GPL v2. (and only v2, not any later version) 8 */ 9 #include "util.h" 10 #include <api/fs/debugfs.h> 11 #include <poll.h> 12 #include "cpumap.h" 13 #include "thread_map.h" 14 #include "target.h" 15 #include "evlist.h" 16 #include "evsel.h" 17 #include "debug.h" 18 #include <unistd.h> 19 20 #include "parse-events.h" 21 #include "parse-options.h" 22 23 #include <sys/mman.h> 24 25 #include <linux/bitops.h> 26 #include <linux/hash.h> 27 28 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) 29 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y) 30 31 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus, 32 struct thread_map *threads) 33 { 34 int i; 35 36 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i) 37 INIT_HLIST_HEAD(&evlist->heads[i]); 38 INIT_LIST_HEAD(&evlist->entries); 39 perf_evlist__set_maps(evlist, cpus, threads); 40 evlist->workload.pid = -1; 41 } 42 43 struct perf_evlist *perf_evlist__new(void) 44 { 45 struct perf_evlist *evlist = zalloc(sizeof(*evlist)); 46 47 if (evlist != NULL) 48 perf_evlist__init(evlist, NULL, NULL); 49 50 return evlist; 51 } 52 53 struct perf_evlist *perf_evlist__new_default(void) 54 { 55 struct perf_evlist *evlist = perf_evlist__new(); 56 57 if (evlist && perf_evlist__add_default(evlist)) { 58 perf_evlist__delete(evlist); 59 evlist = NULL; 60 } 61 62 return evlist; 63 } 64 65 /** 66 * perf_evlist__set_id_pos - set the positions of event ids. 67 * @evlist: selected event list 68 * 69 * Events with compatible sample types all have the same id_pos 70 * and is_pos. For convenience, put a copy on evlist. 71 */ 72 void perf_evlist__set_id_pos(struct perf_evlist *evlist) 73 { 74 struct perf_evsel *first = perf_evlist__first(evlist); 75 76 evlist->id_pos = first->id_pos; 77 evlist->is_pos = first->is_pos; 78 } 79 80 static void perf_evlist__update_id_pos(struct perf_evlist *evlist) 81 { 82 struct perf_evsel *evsel; 83 84 evlist__for_each(evlist, evsel) 85 perf_evsel__calc_id_pos(evsel); 86 87 perf_evlist__set_id_pos(evlist); 88 } 89 90 static void perf_evlist__purge(struct perf_evlist *evlist) 91 { 92 struct perf_evsel *pos, *n; 93 94 evlist__for_each_safe(evlist, n, pos) { 95 list_del_init(&pos->node); 96 perf_evsel__delete(pos); 97 } 98 99 evlist->nr_entries = 0; 100 } 101 102 void perf_evlist__exit(struct perf_evlist *evlist) 103 { 104 zfree(&evlist->mmap); 105 zfree(&evlist->pollfd); 106 } 107 108 void perf_evlist__delete(struct perf_evlist *evlist) 109 { 110 perf_evlist__munmap(evlist); 111 perf_evlist__close(evlist); 112 cpu_map__delete(evlist->cpus); 113 thread_map__delete(evlist->threads); 114 evlist->cpus = NULL; 115 evlist->threads = NULL; 116 perf_evlist__purge(evlist); 117 perf_evlist__exit(evlist); 118 free(evlist); 119 } 120 121 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry) 122 { 123 list_add_tail(&entry->node, &evlist->entries); 124 entry->idx = evlist->nr_entries; 125 126 if (!evlist->nr_entries++) 127 perf_evlist__set_id_pos(evlist); 128 } 129 130 void perf_evlist__splice_list_tail(struct perf_evlist *evlist, 131 struct list_head *list, 132 int nr_entries) 133 { 134 bool set_id_pos = !evlist->nr_entries; 135 136 list_splice_tail(list, &evlist->entries); 137 evlist->nr_entries += nr_entries; 138 if (set_id_pos) 139 perf_evlist__set_id_pos(evlist); 140 } 141 142 void __perf_evlist__set_leader(struct list_head *list) 143 { 144 struct perf_evsel *evsel, *leader; 145 146 leader = list_entry(list->next, struct perf_evsel, node); 147 evsel = list_entry(list->prev, struct perf_evsel, node); 148 149 leader->nr_members = evsel->idx - leader->idx + 1; 150 151 __evlist__for_each(list, evsel) { 152 evsel->leader = leader; 153 } 154 } 155 156 void perf_evlist__set_leader(struct perf_evlist *evlist) 157 { 158 if (evlist->nr_entries) { 159 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0; 160 __perf_evlist__set_leader(&evlist->entries); 161 } 162 } 163 164 int perf_evlist__add_default(struct perf_evlist *evlist) 165 { 166 struct perf_event_attr attr = { 167 .type = PERF_TYPE_HARDWARE, 168 .config = PERF_COUNT_HW_CPU_CYCLES, 169 }; 170 struct perf_evsel *evsel; 171 172 event_attr_init(&attr); 173 174 evsel = perf_evsel__new(&attr); 175 if (evsel == NULL) 176 goto error; 177 178 /* use strdup() because free(evsel) assumes name is allocated */ 179 evsel->name = strdup("cycles"); 180 if (!evsel->name) 181 goto error_free; 182 183 perf_evlist__add(evlist, evsel); 184 return 0; 185 error_free: 186 perf_evsel__delete(evsel); 187 error: 188 return -ENOMEM; 189 } 190 191 static int perf_evlist__add_attrs(struct perf_evlist *evlist, 192 struct perf_event_attr *attrs, size_t nr_attrs) 193 { 194 struct perf_evsel *evsel, *n; 195 LIST_HEAD(head); 196 size_t i; 197 198 for (i = 0; i < nr_attrs; i++) { 199 evsel = perf_evsel__new_idx(attrs + i, evlist->nr_entries + i); 200 if (evsel == NULL) 201 goto out_delete_partial_list; 202 list_add_tail(&evsel->node, &head); 203 } 204 205 perf_evlist__splice_list_tail(evlist, &head, nr_attrs); 206 207 return 0; 208 209 out_delete_partial_list: 210 __evlist__for_each_safe(&head, n, evsel) 211 perf_evsel__delete(evsel); 212 return -1; 213 } 214 215 int __perf_evlist__add_default_attrs(struct perf_evlist *evlist, 216 struct perf_event_attr *attrs, size_t nr_attrs) 217 { 218 size_t i; 219 220 for (i = 0; i < nr_attrs; i++) 221 event_attr_init(attrs + i); 222 223 return perf_evlist__add_attrs(evlist, attrs, nr_attrs); 224 } 225 226 struct perf_evsel * 227 perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id) 228 { 229 struct perf_evsel *evsel; 230 231 evlist__for_each(evlist, evsel) { 232 if (evsel->attr.type == PERF_TYPE_TRACEPOINT && 233 (int)evsel->attr.config == id) 234 return evsel; 235 } 236 237 return NULL; 238 } 239 240 struct perf_evsel * 241 perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist, 242 const char *name) 243 { 244 struct perf_evsel *evsel; 245 246 evlist__for_each(evlist, evsel) { 247 if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) && 248 (strcmp(evsel->name, name) == 0)) 249 return evsel; 250 } 251 252 return NULL; 253 } 254 255 int perf_evlist__add_newtp(struct perf_evlist *evlist, 256 const char *sys, const char *name, void *handler) 257 { 258 struct perf_evsel *evsel = perf_evsel__newtp(sys, name); 259 260 if (evsel == NULL) 261 return -1; 262 263 evsel->handler = handler; 264 perf_evlist__add(evlist, evsel); 265 return 0; 266 } 267 268 void perf_evlist__disable(struct perf_evlist *evlist) 269 { 270 int cpu, thread; 271 struct perf_evsel *pos; 272 int nr_cpus = cpu_map__nr(evlist->cpus); 273 int nr_threads = thread_map__nr(evlist->threads); 274 275 for (cpu = 0; cpu < nr_cpus; cpu++) { 276 evlist__for_each(evlist, pos) { 277 if (!perf_evsel__is_group_leader(pos) || !pos->fd) 278 continue; 279 for (thread = 0; thread < nr_threads; thread++) 280 ioctl(FD(pos, cpu, thread), 281 PERF_EVENT_IOC_DISABLE, 0); 282 } 283 } 284 } 285 286 void perf_evlist__enable(struct perf_evlist *evlist) 287 { 288 int cpu, thread; 289 struct perf_evsel *pos; 290 int nr_cpus = cpu_map__nr(evlist->cpus); 291 int nr_threads = thread_map__nr(evlist->threads); 292 293 for (cpu = 0; cpu < nr_cpus; cpu++) { 294 evlist__for_each(evlist, pos) { 295 if (!perf_evsel__is_group_leader(pos) || !pos->fd) 296 continue; 297 for (thread = 0; thread < nr_threads; thread++) 298 ioctl(FD(pos, cpu, thread), 299 PERF_EVENT_IOC_ENABLE, 0); 300 } 301 } 302 } 303 304 int perf_evlist__disable_event(struct perf_evlist *evlist, 305 struct perf_evsel *evsel) 306 { 307 int cpu, thread, err; 308 309 if (!evsel->fd) 310 return 0; 311 312 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { 313 for (thread = 0; thread < evlist->threads->nr; thread++) { 314 err = ioctl(FD(evsel, cpu, thread), 315 PERF_EVENT_IOC_DISABLE, 0); 316 if (err) 317 return err; 318 } 319 } 320 return 0; 321 } 322 323 int perf_evlist__enable_event(struct perf_evlist *evlist, 324 struct perf_evsel *evsel) 325 { 326 int cpu, thread, err; 327 328 if (!evsel->fd) 329 return -EINVAL; 330 331 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { 332 for (thread = 0; thread < evlist->threads->nr; thread++) { 333 err = ioctl(FD(evsel, cpu, thread), 334 PERF_EVENT_IOC_ENABLE, 0); 335 if (err) 336 return err; 337 } 338 } 339 return 0; 340 } 341 342 static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) 343 { 344 int nr_cpus = cpu_map__nr(evlist->cpus); 345 int nr_threads = thread_map__nr(evlist->threads); 346 int nfds = nr_cpus * nr_threads * evlist->nr_entries; 347 evlist->pollfd = malloc(sizeof(struct pollfd) * nfds); 348 return evlist->pollfd != NULL ? 0 : -ENOMEM; 349 } 350 351 void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd) 352 { 353 fcntl(fd, F_SETFL, O_NONBLOCK); 354 evlist->pollfd[evlist->nr_fds].fd = fd; 355 evlist->pollfd[evlist->nr_fds].events = POLLIN; 356 evlist->nr_fds++; 357 } 358 359 static void perf_evlist__id_hash(struct perf_evlist *evlist, 360 struct perf_evsel *evsel, 361 int cpu, int thread, u64 id) 362 { 363 int hash; 364 struct perf_sample_id *sid = SID(evsel, cpu, thread); 365 366 sid->id = id; 367 sid->evsel = evsel; 368 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS); 369 hlist_add_head(&sid->node, &evlist->heads[hash]); 370 } 371 372 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel, 373 int cpu, int thread, u64 id) 374 { 375 perf_evlist__id_hash(evlist, evsel, cpu, thread, id); 376 evsel->id[evsel->ids++] = id; 377 } 378 379 static int perf_evlist__id_add_fd(struct perf_evlist *evlist, 380 struct perf_evsel *evsel, 381 int cpu, int thread, int fd) 382 { 383 u64 read_data[4] = { 0, }; 384 int id_idx = 1; /* The first entry is the counter value */ 385 u64 id; 386 int ret; 387 388 ret = ioctl(fd, PERF_EVENT_IOC_ID, &id); 389 if (!ret) 390 goto add; 391 392 if (errno != ENOTTY) 393 return -1; 394 395 /* Legacy way to get event id.. All hail to old kernels! */ 396 397 /* 398 * This way does not work with group format read, so bail 399 * out in that case. 400 */ 401 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP) 402 return -1; 403 404 if (!(evsel->attr.read_format & PERF_FORMAT_ID) || 405 read(fd, &read_data, sizeof(read_data)) == -1) 406 return -1; 407 408 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 409 ++id_idx; 410 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 411 ++id_idx; 412 413 id = read_data[id_idx]; 414 415 add: 416 perf_evlist__id_add(evlist, evsel, cpu, thread, id); 417 return 0; 418 } 419 420 struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id) 421 { 422 struct hlist_head *head; 423 struct perf_sample_id *sid; 424 int hash; 425 426 hash = hash_64(id, PERF_EVLIST__HLIST_BITS); 427 head = &evlist->heads[hash]; 428 429 hlist_for_each_entry(sid, head, node) 430 if (sid->id == id) 431 return sid; 432 433 return NULL; 434 } 435 436 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id) 437 { 438 struct perf_sample_id *sid; 439 440 if (evlist->nr_entries == 1) 441 return perf_evlist__first(evlist); 442 443 sid = perf_evlist__id2sid(evlist, id); 444 if (sid) 445 return sid->evsel; 446 447 if (!perf_evlist__sample_id_all(evlist)) 448 return perf_evlist__first(evlist); 449 450 return NULL; 451 } 452 453 static int perf_evlist__event2id(struct perf_evlist *evlist, 454 union perf_event *event, u64 *id) 455 { 456 const u64 *array = event->sample.array; 457 ssize_t n; 458 459 n = (event->header.size - sizeof(event->header)) >> 3; 460 461 if (event->header.type == PERF_RECORD_SAMPLE) { 462 if (evlist->id_pos >= n) 463 return -1; 464 *id = array[evlist->id_pos]; 465 } else { 466 if (evlist->is_pos > n) 467 return -1; 468 n -= evlist->is_pos; 469 *id = array[n]; 470 } 471 return 0; 472 } 473 474 static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist, 475 union perf_event *event) 476 { 477 struct perf_evsel *first = perf_evlist__first(evlist); 478 struct hlist_head *head; 479 struct perf_sample_id *sid; 480 int hash; 481 u64 id; 482 483 if (evlist->nr_entries == 1) 484 return first; 485 486 if (!first->attr.sample_id_all && 487 event->header.type != PERF_RECORD_SAMPLE) 488 return first; 489 490 if (perf_evlist__event2id(evlist, event, &id)) 491 return NULL; 492 493 /* Synthesized events have an id of zero */ 494 if (!id) 495 return first; 496 497 hash = hash_64(id, PERF_EVLIST__HLIST_BITS); 498 head = &evlist->heads[hash]; 499 500 hlist_for_each_entry(sid, head, node) { 501 if (sid->id == id) 502 return sid->evsel; 503 } 504 return NULL; 505 } 506 507 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) 508 { 509 struct perf_mmap *md = &evlist->mmap[idx]; 510 unsigned int head = perf_mmap__read_head(md); 511 unsigned int old = md->prev; 512 unsigned char *data = md->base + page_size; 513 union perf_event *event = NULL; 514 515 if (evlist->overwrite) { 516 /* 517 * If we're further behind than half the buffer, there's a chance 518 * the writer will bite our tail and mess up the samples under us. 519 * 520 * If we somehow ended up ahead of the head, we got messed up. 521 * 522 * In either case, truncate and restart at head. 523 */ 524 int diff = head - old; 525 if (diff > md->mask / 2 || diff < 0) { 526 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n"); 527 528 /* 529 * head points to a known good entry, start there. 530 */ 531 old = head; 532 } 533 } 534 535 if (old != head) { 536 size_t size; 537 538 event = (union perf_event *)&data[old & md->mask]; 539 size = event->header.size; 540 541 /* 542 * Event straddles the mmap boundary -- header should always 543 * be inside due to u64 alignment of output. 544 */ 545 if ((old & md->mask) + size != ((old + size) & md->mask)) { 546 unsigned int offset = old; 547 unsigned int len = min(sizeof(*event), size), cpy; 548 void *dst = md->event_copy; 549 550 do { 551 cpy = min(md->mask + 1 - (offset & md->mask), len); 552 memcpy(dst, &data[offset & md->mask], cpy); 553 offset += cpy; 554 dst += cpy; 555 len -= cpy; 556 } while (len); 557 558 event = (union perf_event *) md->event_copy; 559 } 560 561 old += size; 562 } 563 564 md->prev = old; 565 566 return event; 567 } 568 569 void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx) 570 { 571 if (!evlist->overwrite) { 572 struct perf_mmap *md = &evlist->mmap[idx]; 573 unsigned int old = md->prev; 574 575 perf_mmap__write_tail(md, old); 576 } 577 } 578 579 static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx) 580 { 581 if (evlist->mmap[idx].base != NULL) { 582 munmap(evlist->mmap[idx].base, evlist->mmap_len); 583 evlist->mmap[idx].base = NULL; 584 } 585 } 586 587 void perf_evlist__munmap(struct perf_evlist *evlist) 588 { 589 int i; 590 591 if (evlist->mmap == NULL) 592 return; 593 594 for (i = 0; i < evlist->nr_mmaps; i++) 595 __perf_evlist__munmap(evlist, i); 596 597 zfree(&evlist->mmap); 598 } 599 600 static int perf_evlist__alloc_mmap(struct perf_evlist *evlist) 601 { 602 evlist->nr_mmaps = cpu_map__nr(evlist->cpus); 603 if (cpu_map__empty(evlist->cpus)) 604 evlist->nr_mmaps = thread_map__nr(evlist->threads); 605 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap)); 606 return evlist->mmap != NULL ? 0 : -ENOMEM; 607 } 608 609 static int __perf_evlist__mmap(struct perf_evlist *evlist, 610 int idx, int prot, int mask, int fd) 611 { 612 evlist->mmap[idx].prev = 0; 613 evlist->mmap[idx].mask = mask; 614 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot, 615 MAP_SHARED, fd, 0); 616 if (evlist->mmap[idx].base == MAP_FAILED) { 617 pr_debug2("failed to mmap perf event ring buffer, error %d\n", 618 errno); 619 evlist->mmap[idx].base = NULL; 620 return -1; 621 } 622 623 perf_evlist__add_pollfd(evlist, fd); 624 return 0; 625 } 626 627 static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx, 628 int prot, int mask, int cpu, int thread, 629 int *output) 630 { 631 struct perf_evsel *evsel; 632 633 evlist__for_each(evlist, evsel) { 634 int fd = FD(evsel, cpu, thread); 635 636 if (*output == -1) { 637 *output = fd; 638 if (__perf_evlist__mmap(evlist, idx, prot, mask, 639 *output) < 0) 640 return -1; 641 } else { 642 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0) 643 return -1; 644 } 645 646 if ((evsel->attr.read_format & PERF_FORMAT_ID) && 647 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0) 648 return -1; 649 } 650 651 return 0; 652 } 653 654 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, 655 int mask) 656 { 657 int cpu, thread; 658 int nr_cpus = cpu_map__nr(evlist->cpus); 659 int nr_threads = thread_map__nr(evlist->threads); 660 661 pr_debug2("perf event ring buffer mmapped per cpu\n"); 662 for (cpu = 0; cpu < nr_cpus; cpu++) { 663 int output = -1; 664 665 for (thread = 0; thread < nr_threads; thread++) { 666 if (perf_evlist__mmap_per_evsel(evlist, cpu, prot, mask, 667 cpu, thread, &output)) 668 goto out_unmap; 669 } 670 } 671 672 return 0; 673 674 out_unmap: 675 for (cpu = 0; cpu < nr_cpus; cpu++) 676 __perf_evlist__munmap(evlist, cpu); 677 return -1; 678 } 679 680 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, 681 int mask) 682 { 683 int thread; 684 int nr_threads = thread_map__nr(evlist->threads); 685 686 pr_debug2("perf event ring buffer mmapped per thread\n"); 687 for (thread = 0; thread < nr_threads; thread++) { 688 int output = -1; 689 690 if (perf_evlist__mmap_per_evsel(evlist, thread, prot, mask, 0, 691 thread, &output)) 692 goto out_unmap; 693 } 694 695 return 0; 696 697 out_unmap: 698 for (thread = 0; thread < nr_threads; thread++) 699 __perf_evlist__munmap(evlist, thread); 700 return -1; 701 } 702 703 static size_t perf_evlist__mmap_size(unsigned long pages) 704 { 705 /* 512 kiB: default amount of unprivileged mlocked memory */ 706 if (pages == UINT_MAX) 707 pages = (512 * 1024) / page_size; 708 else if (!is_power_of_2(pages)) 709 return 0; 710 711 return (pages + 1) * page_size; 712 } 713 714 static long parse_pages_arg(const char *str, unsigned long min, 715 unsigned long max) 716 { 717 unsigned long pages, val; 718 static struct parse_tag tags[] = { 719 { .tag = 'B', .mult = 1 }, 720 { .tag = 'K', .mult = 1 << 10 }, 721 { .tag = 'M', .mult = 1 << 20 }, 722 { .tag = 'G', .mult = 1 << 30 }, 723 { .tag = 0 }, 724 }; 725 726 if (str == NULL) 727 return -EINVAL; 728 729 val = parse_tag_value(str, tags); 730 if (val != (unsigned long) -1) { 731 /* we got file size value */ 732 pages = PERF_ALIGN(val, page_size) / page_size; 733 } else { 734 /* we got pages count value */ 735 char *eptr; 736 pages = strtoul(str, &eptr, 10); 737 if (*eptr != '\0') 738 return -EINVAL; 739 } 740 741 if (pages == 0 && min == 0) { 742 /* leave number of pages at 0 */ 743 } else if (!is_power_of_2(pages)) { 744 /* round pages up to next power of 2 */ 745 pages = next_pow2_l(pages); 746 if (!pages) 747 return -EINVAL; 748 pr_info("rounding mmap pages size to %lu bytes (%lu pages)\n", 749 pages * page_size, pages); 750 } 751 752 if (pages > max) 753 return -EINVAL; 754 755 return pages; 756 } 757 758 int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str, 759 int unset __maybe_unused) 760 { 761 unsigned int *mmap_pages = opt->value; 762 unsigned long max = UINT_MAX; 763 long pages; 764 765 if (max > SIZE_MAX / page_size) 766 max = SIZE_MAX / page_size; 767 768 pages = parse_pages_arg(str, 1, max); 769 if (pages < 0) { 770 pr_err("Invalid argument for --mmap_pages/-m\n"); 771 return -1; 772 } 773 774 *mmap_pages = pages; 775 return 0; 776 } 777 778 /** 779 * perf_evlist__mmap - Create mmaps to receive events. 780 * @evlist: list of events 781 * @pages: map length in pages 782 * @overwrite: overwrite older events? 783 * 784 * If @overwrite is %false the user needs to signal event consumption using 785 * perf_mmap__write_tail(). Using perf_evlist__mmap_read() does this 786 * automatically. 787 * 788 * Return: %0 on success, negative error code otherwise. 789 */ 790 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages, 791 bool overwrite) 792 { 793 struct perf_evsel *evsel; 794 const struct cpu_map *cpus = evlist->cpus; 795 const struct thread_map *threads = evlist->threads; 796 int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask; 797 798 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0) 799 return -ENOMEM; 800 801 if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0) 802 return -ENOMEM; 803 804 evlist->overwrite = overwrite; 805 evlist->mmap_len = perf_evlist__mmap_size(pages); 806 pr_debug("mmap size %zuB\n", evlist->mmap_len); 807 mask = evlist->mmap_len - page_size - 1; 808 809 evlist__for_each(evlist, evsel) { 810 if ((evsel->attr.read_format & PERF_FORMAT_ID) && 811 evsel->sample_id == NULL && 812 perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0) 813 return -ENOMEM; 814 } 815 816 if (cpu_map__empty(cpus)) 817 return perf_evlist__mmap_per_thread(evlist, prot, mask); 818 819 return perf_evlist__mmap_per_cpu(evlist, prot, mask); 820 } 821 822 int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target) 823 { 824 evlist->threads = thread_map__new_str(target->pid, target->tid, 825 target->uid); 826 827 if (evlist->threads == NULL) 828 return -1; 829 830 if (target__uses_dummy_map(target)) 831 evlist->cpus = cpu_map__dummy_new(); 832 else 833 evlist->cpus = cpu_map__new(target->cpu_list); 834 835 if (evlist->cpus == NULL) 836 goto out_delete_threads; 837 838 return 0; 839 840 out_delete_threads: 841 thread_map__delete(evlist->threads); 842 return -1; 843 } 844 845 int perf_evlist__apply_filters(struct perf_evlist *evlist) 846 { 847 struct perf_evsel *evsel; 848 int err = 0; 849 const int ncpus = cpu_map__nr(evlist->cpus), 850 nthreads = thread_map__nr(evlist->threads); 851 852 evlist__for_each(evlist, evsel) { 853 if (evsel->filter == NULL) 854 continue; 855 856 err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter); 857 if (err) 858 break; 859 } 860 861 return err; 862 } 863 864 int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter) 865 { 866 struct perf_evsel *evsel; 867 int err = 0; 868 const int ncpus = cpu_map__nr(evlist->cpus), 869 nthreads = thread_map__nr(evlist->threads); 870 871 evlist__for_each(evlist, evsel) { 872 err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter); 873 if (err) 874 break; 875 } 876 877 return err; 878 } 879 880 bool perf_evlist__valid_sample_type(struct perf_evlist *evlist) 881 { 882 struct perf_evsel *pos; 883 884 if (evlist->nr_entries == 1) 885 return true; 886 887 if (evlist->id_pos < 0 || evlist->is_pos < 0) 888 return false; 889 890 evlist__for_each(evlist, pos) { 891 if (pos->id_pos != evlist->id_pos || 892 pos->is_pos != evlist->is_pos) 893 return false; 894 } 895 896 return true; 897 } 898 899 u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist) 900 { 901 struct perf_evsel *evsel; 902 903 if (evlist->combined_sample_type) 904 return evlist->combined_sample_type; 905 906 evlist__for_each(evlist, evsel) 907 evlist->combined_sample_type |= evsel->attr.sample_type; 908 909 return evlist->combined_sample_type; 910 } 911 912 u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist) 913 { 914 evlist->combined_sample_type = 0; 915 return __perf_evlist__combined_sample_type(evlist); 916 } 917 918 bool perf_evlist__valid_read_format(struct perf_evlist *evlist) 919 { 920 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first; 921 u64 read_format = first->attr.read_format; 922 u64 sample_type = first->attr.sample_type; 923 924 evlist__for_each(evlist, pos) { 925 if (read_format != pos->attr.read_format) 926 return false; 927 } 928 929 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */ 930 if ((sample_type & PERF_SAMPLE_READ) && 931 !(read_format & PERF_FORMAT_ID)) { 932 return false; 933 } 934 935 return true; 936 } 937 938 u64 perf_evlist__read_format(struct perf_evlist *evlist) 939 { 940 struct perf_evsel *first = perf_evlist__first(evlist); 941 return first->attr.read_format; 942 } 943 944 u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist) 945 { 946 struct perf_evsel *first = perf_evlist__first(evlist); 947 struct perf_sample *data; 948 u64 sample_type; 949 u16 size = 0; 950 951 if (!first->attr.sample_id_all) 952 goto out; 953 954 sample_type = first->attr.sample_type; 955 956 if (sample_type & PERF_SAMPLE_TID) 957 size += sizeof(data->tid) * 2; 958 959 if (sample_type & PERF_SAMPLE_TIME) 960 size += sizeof(data->time); 961 962 if (sample_type & PERF_SAMPLE_ID) 963 size += sizeof(data->id); 964 965 if (sample_type & PERF_SAMPLE_STREAM_ID) 966 size += sizeof(data->stream_id); 967 968 if (sample_type & PERF_SAMPLE_CPU) 969 size += sizeof(data->cpu) * 2; 970 971 if (sample_type & PERF_SAMPLE_IDENTIFIER) 972 size += sizeof(data->id); 973 out: 974 return size; 975 } 976 977 bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist) 978 { 979 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first; 980 981 evlist__for_each_continue(evlist, pos) { 982 if (first->attr.sample_id_all != pos->attr.sample_id_all) 983 return false; 984 } 985 986 return true; 987 } 988 989 bool perf_evlist__sample_id_all(struct perf_evlist *evlist) 990 { 991 struct perf_evsel *first = perf_evlist__first(evlist); 992 return first->attr.sample_id_all; 993 } 994 995 void perf_evlist__set_selected(struct perf_evlist *evlist, 996 struct perf_evsel *evsel) 997 { 998 evlist->selected = evsel; 999 } 1000 1001 void perf_evlist__close(struct perf_evlist *evlist) 1002 { 1003 struct perf_evsel *evsel; 1004 int ncpus = cpu_map__nr(evlist->cpus); 1005 int nthreads = thread_map__nr(evlist->threads); 1006 int n; 1007 1008 evlist__for_each_reverse(evlist, evsel) { 1009 n = evsel->cpus ? evsel->cpus->nr : ncpus; 1010 perf_evsel__close(evsel, n, nthreads); 1011 } 1012 } 1013 1014 int perf_evlist__open(struct perf_evlist *evlist) 1015 { 1016 struct perf_evsel *evsel; 1017 int err; 1018 1019 perf_evlist__update_id_pos(evlist); 1020 1021 evlist__for_each(evlist, evsel) { 1022 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads); 1023 if (err < 0) 1024 goto out_err; 1025 } 1026 1027 return 0; 1028 out_err: 1029 perf_evlist__close(evlist); 1030 errno = -err; 1031 return err; 1032 } 1033 1034 int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *target, 1035 const char *argv[], bool pipe_output, 1036 void (*exec_error)(int signo, siginfo_t *info, void *ucontext)) 1037 { 1038 int child_ready_pipe[2], go_pipe[2]; 1039 char bf; 1040 1041 if (pipe(child_ready_pipe) < 0) { 1042 perror("failed to create 'ready' pipe"); 1043 return -1; 1044 } 1045 1046 if (pipe(go_pipe) < 0) { 1047 perror("failed to create 'go' pipe"); 1048 goto out_close_ready_pipe; 1049 } 1050 1051 evlist->workload.pid = fork(); 1052 if (evlist->workload.pid < 0) { 1053 perror("failed to fork"); 1054 goto out_close_pipes; 1055 } 1056 1057 if (!evlist->workload.pid) { 1058 if (pipe_output) 1059 dup2(2, 1); 1060 1061 signal(SIGTERM, SIG_DFL); 1062 1063 close(child_ready_pipe[0]); 1064 close(go_pipe[1]); 1065 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC); 1066 1067 /* 1068 * Tell the parent we're ready to go 1069 */ 1070 close(child_ready_pipe[1]); 1071 1072 /* 1073 * Wait until the parent tells us to go. 1074 */ 1075 if (read(go_pipe[0], &bf, 1) == -1) 1076 perror("unable to read pipe"); 1077 1078 execvp(argv[0], (char **)argv); 1079 1080 if (exec_error) { 1081 union sigval val; 1082 1083 val.sival_int = errno; 1084 if (sigqueue(getppid(), SIGUSR1, val)) 1085 perror(argv[0]); 1086 } else 1087 perror(argv[0]); 1088 exit(-1); 1089 } 1090 1091 if (exec_error) { 1092 struct sigaction act = { 1093 .sa_flags = SA_SIGINFO, 1094 .sa_sigaction = exec_error, 1095 }; 1096 sigaction(SIGUSR1, &act, NULL); 1097 } 1098 1099 if (target__none(target)) 1100 evlist->threads->map[0] = evlist->workload.pid; 1101 1102 close(child_ready_pipe[1]); 1103 close(go_pipe[0]); 1104 /* 1105 * wait for child to settle 1106 */ 1107 if (read(child_ready_pipe[0], &bf, 1) == -1) { 1108 perror("unable to read pipe"); 1109 goto out_close_pipes; 1110 } 1111 1112 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC); 1113 evlist->workload.cork_fd = go_pipe[1]; 1114 close(child_ready_pipe[0]); 1115 return 0; 1116 1117 out_close_pipes: 1118 close(go_pipe[0]); 1119 close(go_pipe[1]); 1120 out_close_ready_pipe: 1121 close(child_ready_pipe[0]); 1122 close(child_ready_pipe[1]); 1123 return -1; 1124 } 1125 1126 int perf_evlist__start_workload(struct perf_evlist *evlist) 1127 { 1128 if (evlist->workload.cork_fd > 0) { 1129 char bf = 0; 1130 int ret; 1131 /* 1132 * Remove the cork, let it rip! 1133 */ 1134 ret = write(evlist->workload.cork_fd, &bf, 1); 1135 if (ret < 0) 1136 perror("enable to write to pipe"); 1137 1138 close(evlist->workload.cork_fd); 1139 return ret; 1140 } 1141 1142 return 0; 1143 } 1144 1145 int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event, 1146 struct perf_sample *sample) 1147 { 1148 struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event); 1149 1150 if (!evsel) 1151 return -EFAULT; 1152 return perf_evsel__parse_sample(evsel, event, sample); 1153 } 1154 1155 size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp) 1156 { 1157 struct perf_evsel *evsel; 1158 size_t printed = 0; 1159 1160 evlist__for_each(evlist, evsel) { 1161 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "", 1162 perf_evsel__name(evsel)); 1163 } 1164 1165 return printed + fprintf(fp, "\n"); 1166 } 1167 1168 int perf_evlist__strerror_tp(struct perf_evlist *evlist __maybe_unused, 1169 int err, char *buf, size_t size) 1170 { 1171 char sbuf[128]; 1172 1173 switch (err) { 1174 case ENOENT: 1175 scnprintf(buf, size, "%s", 1176 "Error:\tUnable to find debugfs\n" 1177 "Hint:\tWas your kernel was compiled with debugfs support?\n" 1178 "Hint:\tIs the debugfs filesystem mounted?\n" 1179 "Hint:\tTry 'sudo mount -t debugfs nodev /sys/kernel/debug'"); 1180 break; 1181 case EACCES: 1182 scnprintf(buf, size, 1183 "Error:\tNo permissions to read %s/tracing/events/raw_syscalls\n" 1184 "Hint:\tTry 'sudo mount -o remount,mode=755 %s'\n", 1185 debugfs_mountpoint, debugfs_mountpoint); 1186 break; 1187 default: 1188 scnprintf(buf, size, "%s", strerror_r(err, sbuf, sizeof(sbuf))); 1189 break; 1190 } 1191 1192 return 0; 1193 } 1194 1195 int perf_evlist__strerror_open(struct perf_evlist *evlist __maybe_unused, 1196 int err, char *buf, size_t size) 1197 { 1198 int printed, value; 1199 char sbuf[128], *emsg = strerror_r(err, sbuf, sizeof(sbuf)); 1200 1201 switch (err) { 1202 case EACCES: 1203 case EPERM: 1204 printed = scnprintf(buf, size, 1205 "Error:\t%s.\n" 1206 "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg); 1207 1208 value = perf_event_paranoid(); 1209 1210 printed += scnprintf(buf + printed, size - printed, "\nHint:\t"); 1211 1212 if (value >= 2) { 1213 printed += scnprintf(buf + printed, size - printed, 1214 "For your workloads it needs to be <= 1\nHint:\t"); 1215 } 1216 printed += scnprintf(buf + printed, size - printed, 1217 "For system wide tracing it needs to be set to -1"); 1218 1219 printed += scnprintf(buf + printed, size - printed, 1220 ".\nHint:\tThe current value is %d.", value); 1221 break; 1222 default: 1223 scnprintf(buf, size, "%s", emsg); 1224 break; 1225 } 1226 1227 return 0; 1228 } 1229 1230 void perf_evlist__to_front(struct perf_evlist *evlist, 1231 struct perf_evsel *move_evsel) 1232 { 1233 struct perf_evsel *evsel, *n; 1234 LIST_HEAD(move); 1235 1236 if (move_evsel == perf_evlist__first(evlist)) 1237 return; 1238 1239 evlist__for_each_safe(evlist, n, evsel) { 1240 if (evsel->leader == move_evsel->leader) 1241 list_move_tail(&evsel->node, &move); 1242 } 1243 1244 list_splice(&move, &evlist->entries); 1245 } 1246