1 /* 2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> 3 * 4 * Parts came from builtin-{top,stat,record}.c, see those files for further 5 * copyright notes. 6 * 7 * Released under the GPL v2. (and only v2, not any later version) 8 */ 9 #include "util.h" 10 #include <lk/debugfs.h> 11 #include <poll.h> 12 #include "cpumap.h" 13 #include "thread_map.h" 14 #include "target.h" 15 #include "evlist.h" 16 #include "evsel.h" 17 #include "debug.h" 18 #include <unistd.h> 19 20 #include "parse-events.h" 21 22 #include <sys/mman.h> 23 24 #include <linux/bitops.h> 25 #include <linux/hash.h> 26 27 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) 28 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y) 29 30 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus, 31 struct thread_map *threads) 32 { 33 int i; 34 35 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i) 36 INIT_HLIST_HEAD(&evlist->heads[i]); 37 INIT_LIST_HEAD(&evlist->entries); 38 perf_evlist__set_maps(evlist, cpus, threads); 39 evlist->workload.pid = -1; 40 } 41 42 struct perf_evlist *perf_evlist__new(void) 43 { 44 struct perf_evlist *evlist = zalloc(sizeof(*evlist)); 45 46 if (evlist != NULL) 47 perf_evlist__init(evlist, NULL, NULL); 48 49 return evlist; 50 } 51 52 /** 53 * perf_evlist__set_id_pos - set the positions of event ids. 54 * @evlist: selected event list 55 * 56 * Events with compatible sample types all have the same id_pos 57 * and is_pos. For convenience, put a copy on evlist. 58 */ 59 void perf_evlist__set_id_pos(struct perf_evlist *evlist) 60 { 61 struct perf_evsel *first = perf_evlist__first(evlist); 62 63 evlist->id_pos = first->id_pos; 64 evlist->is_pos = first->is_pos; 65 } 66 67 static void perf_evlist__update_id_pos(struct perf_evlist *evlist) 68 { 69 struct perf_evsel *evsel; 70 71 list_for_each_entry(evsel, &evlist->entries, node) 72 perf_evsel__calc_id_pos(evsel); 73 74 perf_evlist__set_id_pos(evlist); 75 } 76 77 static void perf_evlist__purge(struct perf_evlist *evlist) 78 { 79 struct perf_evsel *pos, *n; 80 81 list_for_each_entry_safe(pos, n, &evlist->entries, node) { 82 list_del_init(&pos->node); 83 perf_evsel__delete(pos); 84 } 85 86 evlist->nr_entries = 0; 87 } 88 89 void perf_evlist__exit(struct perf_evlist *evlist) 90 { 91 free(evlist->mmap); 92 free(evlist->pollfd); 93 evlist->mmap = NULL; 94 evlist->pollfd = NULL; 95 } 96 97 void perf_evlist__delete(struct perf_evlist *evlist) 98 { 99 perf_evlist__purge(evlist); 100 perf_evlist__exit(evlist); 101 free(evlist); 102 } 103 104 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry) 105 { 106 list_add_tail(&entry->node, &evlist->entries); 107 if (!evlist->nr_entries++) 108 perf_evlist__set_id_pos(evlist); 109 } 110 111 void perf_evlist__splice_list_tail(struct perf_evlist *evlist, 112 struct list_head *list, 113 int nr_entries) 114 { 115 bool set_id_pos = !evlist->nr_entries; 116 117 list_splice_tail(list, &evlist->entries); 118 evlist->nr_entries += nr_entries; 119 if (set_id_pos) 120 perf_evlist__set_id_pos(evlist); 121 } 122 123 void __perf_evlist__set_leader(struct list_head *list) 124 { 125 struct perf_evsel *evsel, *leader; 126 127 leader = list_entry(list->next, struct perf_evsel, node); 128 evsel = list_entry(list->prev, struct perf_evsel, node); 129 130 leader->nr_members = evsel->idx - leader->idx + 1; 131 132 list_for_each_entry(evsel, list, node) { 133 evsel->leader = leader; 134 } 135 } 136 137 void perf_evlist__set_leader(struct perf_evlist *evlist) 138 { 139 if (evlist->nr_entries) { 140 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0; 141 __perf_evlist__set_leader(&evlist->entries); 142 } 143 } 144 145 int perf_evlist__add_default(struct perf_evlist *evlist) 146 { 147 struct perf_event_attr attr = { 148 .type = PERF_TYPE_HARDWARE, 149 .config = PERF_COUNT_HW_CPU_CYCLES, 150 }; 151 struct perf_evsel *evsel; 152 153 event_attr_init(&attr); 154 155 evsel = perf_evsel__new(&attr, 0); 156 if (evsel == NULL) 157 goto error; 158 159 /* use strdup() because free(evsel) assumes name is allocated */ 160 evsel->name = strdup("cycles"); 161 if (!evsel->name) 162 goto error_free; 163 164 perf_evlist__add(evlist, evsel); 165 return 0; 166 error_free: 167 perf_evsel__delete(evsel); 168 error: 169 return -ENOMEM; 170 } 171 172 static int perf_evlist__add_attrs(struct perf_evlist *evlist, 173 struct perf_event_attr *attrs, size_t nr_attrs) 174 { 175 struct perf_evsel *evsel, *n; 176 LIST_HEAD(head); 177 size_t i; 178 179 for (i = 0; i < nr_attrs; i++) { 180 evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i); 181 if (evsel == NULL) 182 goto out_delete_partial_list; 183 list_add_tail(&evsel->node, &head); 184 } 185 186 perf_evlist__splice_list_tail(evlist, &head, nr_attrs); 187 188 return 0; 189 190 out_delete_partial_list: 191 list_for_each_entry_safe(evsel, n, &head, node) 192 perf_evsel__delete(evsel); 193 return -1; 194 } 195 196 int __perf_evlist__add_default_attrs(struct perf_evlist *evlist, 197 struct perf_event_attr *attrs, size_t nr_attrs) 198 { 199 size_t i; 200 201 for (i = 0; i < nr_attrs; i++) 202 event_attr_init(attrs + i); 203 204 return perf_evlist__add_attrs(evlist, attrs, nr_attrs); 205 } 206 207 struct perf_evsel * 208 perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id) 209 { 210 struct perf_evsel *evsel; 211 212 list_for_each_entry(evsel, &evlist->entries, node) { 213 if (evsel->attr.type == PERF_TYPE_TRACEPOINT && 214 (int)evsel->attr.config == id) 215 return evsel; 216 } 217 218 return NULL; 219 } 220 221 struct perf_evsel * 222 perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist, 223 const char *name) 224 { 225 struct perf_evsel *evsel; 226 227 list_for_each_entry(evsel, &evlist->entries, node) { 228 if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) && 229 (strcmp(evsel->name, name) == 0)) 230 return evsel; 231 } 232 233 return NULL; 234 } 235 236 int perf_evlist__add_newtp(struct perf_evlist *evlist, 237 const char *sys, const char *name, void *handler) 238 { 239 struct perf_evsel *evsel; 240 241 evsel = perf_evsel__newtp(sys, name, evlist->nr_entries); 242 if (evsel == NULL) 243 return -1; 244 245 evsel->handler.func = handler; 246 perf_evlist__add(evlist, evsel); 247 return 0; 248 } 249 250 void perf_evlist__disable(struct perf_evlist *evlist) 251 { 252 int cpu, thread; 253 struct perf_evsel *pos; 254 int nr_cpus = cpu_map__nr(evlist->cpus); 255 int nr_threads = thread_map__nr(evlist->threads); 256 257 for (cpu = 0; cpu < nr_cpus; cpu++) { 258 list_for_each_entry(pos, &evlist->entries, node) { 259 if (!perf_evsel__is_group_leader(pos) || !pos->fd) 260 continue; 261 for (thread = 0; thread < nr_threads; thread++) 262 ioctl(FD(pos, cpu, thread), 263 PERF_EVENT_IOC_DISABLE, 0); 264 } 265 } 266 } 267 268 void perf_evlist__enable(struct perf_evlist *evlist) 269 { 270 int cpu, thread; 271 struct perf_evsel *pos; 272 int nr_cpus = cpu_map__nr(evlist->cpus); 273 int nr_threads = thread_map__nr(evlist->threads); 274 275 for (cpu = 0; cpu < nr_cpus; cpu++) { 276 list_for_each_entry(pos, &evlist->entries, node) { 277 if (!perf_evsel__is_group_leader(pos) || !pos->fd) 278 continue; 279 for (thread = 0; thread < nr_threads; thread++) 280 ioctl(FD(pos, cpu, thread), 281 PERF_EVENT_IOC_ENABLE, 0); 282 } 283 } 284 } 285 286 int perf_evlist__disable_event(struct perf_evlist *evlist, 287 struct perf_evsel *evsel) 288 { 289 int cpu, thread, err; 290 291 if (!evsel->fd) 292 return 0; 293 294 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { 295 for (thread = 0; thread < evlist->threads->nr; thread++) { 296 err = ioctl(FD(evsel, cpu, thread), 297 PERF_EVENT_IOC_DISABLE, 0); 298 if (err) 299 return err; 300 } 301 } 302 return 0; 303 } 304 305 int perf_evlist__enable_event(struct perf_evlist *evlist, 306 struct perf_evsel *evsel) 307 { 308 int cpu, thread, err; 309 310 if (!evsel->fd) 311 return -EINVAL; 312 313 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { 314 for (thread = 0; thread < evlist->threads->nr; thread++) { 315 err = ioctl(FD(evsel, cpu, thread), 316 PERF_EVENT_IOC_ENABLE, 0); 317 if (err) 318 return err; 319 } 320 } 321 return 0; 322 } 323 324 static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) 325 { 326 int nr_cpus = cpu_map__nr(evlist->cpus); 327 int nr_threads = thread_map__nr(evlist->threads); 328 int nfds = nr_cpus * nr_threads * evlist->nr_entries; 329 evlist->pollfd = malloc(sizeof(struct pollfd) * nfds); 330 return evlist->pollfd != NULL ? 0 : -ENOMEM; 331 } 332 333 void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd) 334 { 335 fcntl(fd, F_SETFL, O_NONBLOCK); 336 evlist->pollfd[evlist->nr_fds].fd = fd; 337 evlist->pollfd[evlist->nr_fds].events = POLLIN; 338 evlist->nr_fds++; 339 } 340 341 static void perf_evlist__id_hash(struct perf_evlist *evlist, 342 struct perf_evsel *evsel, 343 int cpu, int thread, u64 id) 344 { 345 int hash; 346 struct perf_sample_id *sid = SID(evsel, cpu, thread); 347 348 sid->id = id; 349 sid->evsel = evsel; 350 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS); 351 hlist_add_head(&sid->node, &evlist->heads[hash]); 352 } 353 354 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel, 355 int cpu, int thread, u64 id) 356 { 357 perf_evlist__id_hash(evlist, evsel, cpu, thread, id); 358 evsel->id[evsel->ids++] = id; 359 } 360 361 static int perf_evlist__id_add_fd(struct perf_evlist *evlist, 362 struct perf_evsel *evsel, 363 int cpu, int thread, int fd) 364 { 365 u64 read_data[4] = { 0, }; 366 int id_idx = 1; /* The first entry is the counter value */ 367 u64 id; 368 int ret; 369 370 ret = ioctl(fd, PERF_EVENT_IOC_ID, &id); 371 if (!ret) 372 goto add; 373 374 if (errno != ENOTTY) 375 return -1; 376 377 /* Legacy way to get event id.. All hail to old kernels! */ 378 379 /* 380 * This way does not work with group format read, so bail 381 * out in that case. 382 */ 383 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP) 384 return -1; 385 386 if (!(evsel->attr.read_format & PERF_FORMAT_ID) || 387 read(fd, &read_data, sizeof(read_data)) == -1) 388 return -1; 389 390 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 391 ++id_idx; 392 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 393 ++id_idx; 394 395 id = read_data[id_idx]; 396 397 add: 398 perf_evlist__id_add(evlist, evsel, cpu, thread, id); 399 return 0; 400 } 401 402 struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id) 403 { 404 struct hlist_head *head; 405 struct perf_sample_id *sid; 406 int hash; 407 408 hash = hash_64(id, PERF_EVLIST__HLIST_BITS); 409 head = &evlist->heads[hash]; 410 411 hlist_for_each_entry(sid, head, node) 412 if (sid->id == id) 413 return sid; 414 415 return NULL; 416 } 417 418 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id) 419 { 420 struct perf_sample_id *sid; 421 422 if (evlist->nr_entries == 1) 423 return perf_evlist__first(evlist); 424 425 sid = perf_evlist__id2sid(evlist, id); 426 if (sid) 427 return sid->evsel; 428 429 if (!perf_evlist__sample_id_all(evlist)) 430 return perf_evlist__first(evlist); 431 432 return NULL; 433 } 434 435 static int perf_evlist__event2id(struct perf_evlist *evlist, 436 union perf_event *event, u64 *id) 437 { 438 const u64 *array = event->sample.array; 439 ssize_t n; 440 441 n = (event->header.size - sizeof(event->header)) >> 3; 442 443 if (event->header.type == PERF_RECORD_SAMPLE) { 444 if (evlist->id_pos >= n) 445 return -1; 446 *id = array[evlist->id_pos]; 447 } else { 448 if (evlist->is_pos > n) 449 return -1; 450 n -= evlist->is_pos; 451 *id = array[n]; 452 } 453 return 0; 454 } 455 456 static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist, 457 union perf_event *event) 458 { 459 struct perf_evsel *first = perf_evlist__first(evlist); 460 struct hlist_head *head; 461 struct perf_sample_id *sid; 462 int hash; 463 u64 id; 464 465 if (evlist->nr_entries == 1) 466 return first; 467 468 if (!first->attr.sample_id_all && 469 event->header.type != PERF_RECORD_SAMPLE) 470 return first; 471 472 if (perf_evlist__event2id(evlist, event, &id)) 473 return NULL; 474 475 /* Synthesized events have an id of zero */ 476 if (!id) 477 return first; 478 479 hash = hash_64(id, PERF_EVLIST__HLIST_BITS); 480 head = &evlist->heads[hash]; 481 482 hlist_for_each_entry(sid, head, node) { 483 if (sid->id == id) 484 return sid->evsel; 485 } 486 return NULL; 487 } 488 489 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) 490 { 491 struct perf_mmap *md = &evlist->mmap[idx]; 492 unsigned int head = perf_mmap__read_head(md); 493 unsigned int old = md->prev; 494 unsigned char *data = md->base + page_size; 495 union perf_event *event = NULL; 496 497 if (evlist->overwrite) { 498 /* 499 * If we're further behind than half the buffer, there's a chance 500 * the writer will bite our tail and mess up the samples under us. 501 * 502 * If we somehow ended up ahead of the head, we got messed up. 503 * 504 * In either case, truncate and restart at head. 505 */ 506 int diff = head - old; 507 if (diff > md->mask / 2 || diff < 0) { 508 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n"); 509 510 /* 511 * head points to a known good entry, start there. 512 */ 513 old = head; 514 } 515 } 516 517 if (old != head) { 518 size_t size; 519 520 event = (union perf_event *)&data[old & md->mask]; 521 size = event->header.size; 522 523 /* 524 * Event straddles the mmap boundary -- header should always 525 * be inside due to u64 alignment of output. 526 */ 527 if ((old & md->mask) + size != ((old + size) & md->mask)) { 528 unsigned int offset = old; 529 unsigned int len = min(sizeof(*event), size), cpy; 530 void *dst = &md->event_copy; 531 532 do { 533 cpy = min(md->mask + 1 - (offset & md->mask), len); 534 memcpy(dst, &data[offset & md->mask], cpy); 535 offset += cpy; 536 dst += cpy; 537 len -= cpy; 538 } while (len); 539 540 event = &md->event_copy; 541 } 542 543 old += size; 544 } 545 546 md->prev = old; 547 548 if (!evlist->overwrite) 549 perf_mmap__write_tail(md, old); 550 551 return event; 552 } 553 554 static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx) 555 { 556 if (evlist->mmap[idx].base != NULL) { 557 munmap(evlist->mmap[idx].base, evlist->mmap_len); 558 evlist->mmap[idx].base = NULL; 559 } 560 } 561 562 void perf_evlist__munmap(struct perf_evlist *evlist) 563 { 564 int i; 565 566 for (i = 0; i < evlist->nr_mmaps; i++) 567 __perf_evlist__munmap(evlist, i); 568 569 free(evlist->mmap); 570 evlist->mmap = NULL; 571 } 572 573 static int perf_evlist__alloc_mmap(struct perf_evlist *evlist) 574 { 575 evlist->nr_mmaps = cpu_map__nr(evlist->cpus); 576 if (cpu_map__empty(evlist->cpus)) 577 evlist->nr_mmaps = thread_map__nr(evlist->threads); 578 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap)); 579 return evlist->mmap != NULL ? 0 : -ENOMEM; 580 } 581 582 static int __perf_evlist__mmap(struct perf_evlist *evlist, 583 int idx, int prot, int mask, int fd) 584 { 585 evlist->mmap[idx].prev = 0; 586 evlist->mmap[idx].mask = mask; 587 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot, 588 MAP_SHARED, fd, 0); 589 if (evlist->mmap[idx].base == MAP_FAILED) { 590 evlist->mmap[idx].base = NULL; 591 return -1; 592 } 593 594 perf_evlist__add_pollfd(evlist, fd); 595 return 0; 596 } 597 598 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask) 599 { 600 struct perf_evsel *evsel; 601 int cpu, thread; 602 int nr_cpus = cpu_map__nr(evlist->cpus); 603 int nr_threads = thread_map__nr(evlist->threads); 604 605 pr_debug2("perf event ring buffer mmapped per cpu\n"); 606 for (cpu = 0; cpu < nr_cpus; cpu++) { 607 int output = -1; 608 609 for (thread = 0; thread < nr_threads; thread++) { 610 list_for_each_entry(evsel, &evlist->entries, node) { 611 int fd = FD(evsel, cpu, thread); 612 613 if (output == -1) { 614 output = fd; 615 if (__perf_evlist__mmap(evlist, cpu, 616 prot, mask, output) < 0) 617 goto out_unmap; 618 } else { 619 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0) 620 goto out_unmap; 621 } 622 623 if ((evsel->attr.read_format & PERF_FORMAT_ID) && 624 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0) 625 goto out_unmap; 626 } 627 } 628 } 629 630 return 0; 631 632 out_unmap: 633 for (cpu = 0; cpu < nr_cpus; cpu++) 634 __perf_evlist__munmap(evlist, cpu); 635 return -1; 636 } 637 638 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask) 639 { 640 struct perf_evsel *evsel; 641 int thread; 642 int nr_threads = thread_map__nr(evlist->threads); 643 644 pr_debug2("perf event ring buffer mmapped per thread\n"); 645 for (thread = 0; thread < nr_threads; thread++) { 646 int output = -1; 647 648 list_for_each_entry(evsel, &evlist->entries, node) { 649 int fd = FD(evsel, 0, thread); 650 651 if (output == -1) { 652 output = fd; 653 if (__perf_evlist__mmap(evlist, thread, 654 prot, mask, output) < 0) 655 goto out_unmap; 656 } else { 657 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0) 658 goto out_unmap; 659 } 660 661 if ((evsel->attr.read_format & PERF_FORMAT_ID) && 662 perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0) 663 goto out_unmap; 664 } 665 } 666 667 return 0; 668 669 out_unmap: 670 for (thread = 0; thread < nr_threads; thread++) 671 __perf_evlist__munmap(evlist, thread); 672 return -1; 673 } 674 675 /** perf_evlist__mmap - Create per cpu maps to receive events 676 * 677 * @evlist - list of events 678 * @pages - map length in pages 679 * @overwrite - overwrite older events? 680 * 681 * If overwrite is false the user needs to signal event consuption using: 682 * 683 * struct perf_mmap *m = &evlist->mmap[cpu]; 684 * unsigned int head = perf_mmap__read_head(m); 685 * 686 * perf_mmap__write_tail(m, head) 687 * 688 * Using perf_evlist__read_on_cpu does this automatically. 689 */ 690 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages, 691 bool overwrite) 692 { 693 struct perf_evsel *evsel; 694 const struct cpu_map *cpus = evlist->cpus; 695 const struct thread_map *threads = evlist->threads; 696 int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask; 697 698 /* 512 kiB: default amount of unprivileged mlocked memory */ 699 if (pages == UINT_MAX) 700 pages = (512 * 1024) / page_size; 701 else if (!is_power_of_2(pages)) 702 return -EINVAL; 703 704 mask = pages * page_size - 1; 705 706 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0) 707 return -ENOMEM; 708 709 if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0) 710 return -ENOMEM; 711 712 evlist->overwrite = overwrite; 713 evlist->mmap_len = (pages + 1) * page_size; 714 715 list_for_each_entry(evsel, &evlist->entries, node) { 716 if ((evsel->attr.read_format & PERF_FORMAT_ID) && 717 evsel->sample_id == NULL && 718 perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0) 719 return -ENOMEM; 720 } 721 722 if (cpu_map__empty(cpus)) 723 return perf_evlist__mmap_per_thread(evlist, prot, mask); 724 725 return perf_evlist__mmap_per_cpu(evlist, prot, mask); 726 } 727 728 int perf_evlist__create_maps(struct perf_evlist *evlist, 729 struct perf_target *target) 730 { 731 evlist->threads = thread_map__new_str(target->pid, target->tid, 732 target->uid); 733 734 if (evlist->threads == NULL) 735 return -1; 736 737 if (perf_target__has_task(target)) 738 evlist->cpus = cpu_map__dummy_new(); 739 else if (!perf_target__has_cpu(target) && !target->uses_mmap) 740 evlist->cpus = cpu_map__dummy_new(); 741 else 742 evlist->cpus = cpu_map__new(target->cpu_list); 743 744 if (evlist->cpus == NULL) 745 goto out_delete_threads; 746 747 return 0; 748 749 out_delete_threads: 750 thread_map__delete(evlist->threads); 751 return -1; 752 } 753 754 void perf_evlist__delete_maps(struct perf_evlist *evlist) 755 { 756 cpu_map__delete(evlist->cpus); 757 thread_map__delete(evlist->threads); 758 evlist->cpus = NULL; 759 evlist->threads = NULL; 760 } 761 762 int perf_evlist__apply_filters(struct perf_evlist *evlist) 763 { 764 struct perf_evsel *evsel; 765 int err = 0; 766 const int ncpus = cpu_map__nr(evlist->cpus), 767 nthreads = thread_map__nr(evlist->threads); 768 769 list_for_each_entry(evsel, &evlist->entries, node) { 770 if (evsel->filter == NULL) 771 continue; 772 773 err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter); 774 if (err) 775 break; 776 } 777 778 return err; 779 } 780 781 int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter) 782 { 783 struct perf_evsel *evsel; 784 int err = 0; 785 const int ncpus = cpu_map__nr(evlist->cpus), 786 nthreads = thread_map__nr(evlist->threads); 787 788 list_for_each_entry(evsel, &evlist->entries, node) { 789 err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter); 790 if (err) 791 break; 792 } 793 794 return err; 795 } 796 797 bool perf_evlist__valid_sample_type(struct perf_evlist *evlist) 798 { 799 struct perf_evsel *pos; 800 801 if (evlist->nr_entries == 1) 802 return true; 803 804 if (evlist->id_pos < 0 || evlist->is_pos < 0) 805 return false; 806 807 list_for_each_entry(pos, &evlist->entries, node) { 808 if (pos->id_pos != evlist->id_pos || 809 pos->is_pos != evlist->is_pos) 810 return false; 811 } 812 813 return true; 814 } 815 816 u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist) 817 { 818 struct perf_evsel *evsel; 819 820 if (evlist->combined_sample_type) 821 return evlist->combined_sample_type; 822 823 list_for_each_entry(evsel, &evlist->entries, node) 824 evlist->combined_sample_type |= evsel->attr.sample_type; 825 826 return evlist->combined_sample_type; 827 } 828 829 u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist) 830 { 831 evlist->combined_sample_type = 0; 832 return __perf_evlist__combined_sample_type(evlist); 833 } 834 835 bool perf_evlist__valid_read_format(struct perf_evlist *evlist) 836 { 837 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first; 838 u64 read_format = first->attr.read_format; 839 u64 sample_type = first->attr.sample_type; 840 841 list_for_each_entry_continue(pos, &evlist->entries, node) { 842 if (read_format != pos->attr.read_format) 843 return false; 844 } 845 846 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */ 847 if ((sample_type & PERF_SAMPLE_READ) && 848 !(read_format & PERF_FORMAT_ID)) { 849 return false; 850 } 851 852 return true; 853 } 854 855 u64 perf_evlist__read_format(struct perf_evlist *evlist) 856 { 857 struct perf_evsel *first = perf_evlist__first(evlist); 858 return first->attr.read_format; 859 } 860 861 u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist) 862 { 863 struct perf_evsel *first = perf_evlist__first(evlist); 864 struct perf_sample *data; 865 u64 sample_type; 866 u16 size = 0; 867 868 if (!first->attr.sample_id_all) 869 goto out; 870 871 sample_type = first->attr.sample_type; 872 873 if (sample_type & PERF_SAMPLE_TID) 874 size += sizeof(data->tid) * 2; 875 876 if (sample_type & PERF_SAMPLE_TIME) 877 size += sizeof(data->time); 878 879 if (sample_type & PERF_SAMPLE_ID) 880 size += sizeof(data->id); 881 882 if (sample_type & PERF_SAMPLE_STREAM_ID) 883 size += sizeof(data->stream_id); 884 885 if (sample_type & PERF_SAMPLE_CPU) 886 size += sizeof(data->cpu) * 2; 887 888 if (sample_type & PERF_SAMPLE_IDENTIFIER) 889 size += sizeof(data->id); 890 out: 891 return size; 892 } 893 894 bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist) 895 { 896 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first; 897 898 list_for_each_entry_continue(pos, &evlist->entries, node) { 899 if (first->attr.sample_id_all != pos->attr.sample_id_all) 900 return false; 901 } 902 903 return true; 904 } 905 906 bool perf_evlist__sample_id_all(struct perf_evlist *evlist) 907 { 908 struct perf_evsel *first = perf_evlist__first(evlist); 909 return first->attr.sample_id_all; 910 } 911 912 void perf_evlist__set_selected(struct perf_evlist *evlist, 913 struct perf_evsel *evsel) 914 { 915 evlist->selected = evsel; 916 } 917 918 void perf_evlist__close(struct perf_evlist *evlist) 919 { 920 struct perf_evsel *evsel; 921 int ncpus = cpu_map__nr(evlist->cpus); 922 int nthreads = thread_map__nr(evlist->threads); 923 924 list_for_each_entry_reverse(evsel, &evlist->entries, node) 925 perf_evsel__close(evsel, ncpus, nthreads); 926 } 927 928 int perf_evlist__open(struct perf_evlist *evlist) 929 { 930 struct perf_evsel *evsel; 931 int err; 932 933 perf_evlist__update_id_pos(evlist); 934 935 list_for_each_entry(evsel, &evlist->entries, node) { 936 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads); 937 if (err < 0) 938 goto out_err; 939 } 940 941 return 0; 942 out_err: 943 perf_evlist__close(evlist); 944 errno = -err; 945 return err; 946 } 947 948 int perf_evlist__prepare_workload(struct perf_evlist *evlist, 949 struct perf_target *target, 950 const char *argv[], bool pipe_output, 951 bool want_signal) 952 { 953 int child_ready_pipe[2], go_pipe[2]; 954 char bf; 955 956 if (pipe(child_ready_pipe) < 0) { 957 perror("failed to create 'ready' pipe"); 958 return -1; 959 } 960 961 if (pipe(go_pipe) < 0) { 962 perror("failed to create 'go' pipe"); 963 goto out_close_ready_pipe; 964 } 965 966 evlist->workload.pid = fork(); 967 if (evlist->workload.pid < 0) { 968 perror("failed to fork"); 969 goto out_close_pipes; 970 } 971 972 if (!evlist->workload.pid) { 973 if (pipe_output) 974 dup2(2, 1); 975 976 signal(SIGTERM, SIG_DFL); 977 978 close(child_ready_pipe[0]); 979 close(go_pipe[1]); 980 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC); 981 982 /* 983 * Tell the parent we're ready to go 984 */ 985 close(child_ready_pipe[1]); 986 987 /* 988 * Wait until the parent tells us to go. 989 */ 990 if (read(go_pipe[0], &bf, 1) == -1) 991 perror("unable to read pipe"); 992 993 execvp(argv[0], (char **)argv); 994 995 perror(argv[0]); 996 if (want_signal) 997 kill(getppid(), SIGUSR1); 998 exit(-1); 999 } 1000 1001 if (perf_target__none(target)) 1002 evlist->threads->map[0] = evlist->workload.pid; 1003 1004 close(child_ready_pipe[1]); 1005 close(go_pipe[0]); 1006 /* 1007 * wait for child to settle 1008 */ 1009 if (read(child_ready_pipe[0], &bf, 1) == -1) { 1010 perror("unable to read pipe"); 1011 goto out_close_pipes; 1012 } 1013 1014 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC); 1015 evlist->workload.cork_fd = go_pipe[1]; 1016 close(child_ready_pipe[0]); 1017 return 0; 1018 1019 out_close_pipes: 1020 close(go_pipe[0]); 1021 close(go_pipe[1]); 1022 out_close_ready_pipe: 1023 close(child_ready_pipe[0]); 1024 close(child_ready_pipe[1]); 1025 return -1; 1026 } 1027 1028 int perf_evlist__start_workload(struct perf_evlist *evlist) 1029 { 1030 if (evlist->workload.cork_fd > 0) { 1031 char bf = 0; 1032 int ret; 1033 /* 1034 * Remove the cork, let it rip! 1035 */ 1036 ret = write(evlist->workload.cork_fd, &bf, 1); 1037 if (ret < 0) 1038 perror("enable to write to pipe"); 1039 1040 close(evlist->workload.cork_fd); 1041 return ret; 1042 } 1043 1044 return 0; 1045 } 1046 1047 int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event, 1048 struct perf_sample *sample) 1049 { 1050 struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event); 1051 1052 if (!evsel) 1053 return -EFAULT; 1054 return perf_evsel__parse_sample(evsel, event, sample); 1055 } 1056 1057 size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp) 1058 { 1059 struct perf_evsel *evsel; 1060 size_t printed = 0; 1061 1062 list_for_each_entry(evsel, &evlist->entries, node) { 1063 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "", 1064 perf_evsel__name(evsel)); 1065 } 1066 1067 return printed + fprintf(fp, "\n");; 1068 } 1069