1 /* 2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> 3 * 4 * Parts came from builtin-{top,stat,record}.c, see those files for further 5 * copyright notes. 6 * 7 * Released under the GPL v2. (and only v2, not any later version) 8 */ 9 #include "util.h" 10 #include <lk/debugfs.h> 11 #include <poll.h> 12 #include "cpumap.h" 13 #include "thread_map.h" 14 #include "target.h" 15 #include "evlist.h" 16 #include "evsel.h" 17 #include "debug.h" 18 #include <unistd.h> 19 20 #include "parse-events.h" 21 22 #include <sys/mman.h> 23 24 #include <linux/bitops.h> 25 #include <linux/hash.h> 26 27 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) 28 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y) 29 30 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus, 31 struct thread_map *threads) 32 { 33 int i; 34 35 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i) 36 INIT_HLIST_HEAD(&evlist->heads[i]); 37 INIT_LIST_HEAD(&evlist->entries); 38 perf_evlist__set_maps(evlist, cpus, threads); 39 evlist->workload.pid = -1; 40 } 41 42 struct perf_evlist *perf_evlist__new(void) 43 { 44 struct perf_evlist *evlist = zalloc(sizeof(*evlist)); 45 46 if (evlist != NULL) 47 perf_evlist__init(evlist, NULL, NULL); 48 49 return evlist; 50 } 51 52 /** 53 * perf_evlist__set_id_pos - set the positions of event ids. 54 * @evlist: selected event list 55 * 56 * Events with compatible sample types all have the same id_pos 57 * and is_pos. For convenience, put a copy on evlist. 58 */ 59 void perf_evlist__set_id_pos(struct perf_evlist *evlist) 60 { 61 struct perf_evsel *first = perf_evlist__first(evlist); 62 63 evlist->id_pos = first->id_pos; 64 evlist->is_pos = first->is_pos; 65 } 66 67 static void perf_evlist__purge(struct perf_evlist *evlist) 68 { 69 struct perf_evsel *pos, *n; 70 71 list_for_each_entry_safe(pos, n, &evlist->entries, node) { 72 list_del_init(&pos->node); 73 perf_evsel__delete(pos); 74 } 75 76 evlist->nr_entries = 0; 77 } 78 79 void perf_evlist__exit(struct perf_evlist *evlist) 80 { 81 free(evlist->mmap); 82 free(evlist->pollfd); 83 evlist->mmap = NULL; 84 evlist->pollfd = NULL; 85 } 86 87 void perf_evlist__delete(struct perf_evlist *evlist) 88 { 89 perf_evlist__purge(evlist); 90 perf_evlist__exit(evlist); 91 free(evlist); 92 } 93 94 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry) 95 { 96 list_add_tail(&entry->node, &evlist->entries); 97 if (!evlist->nr_entries++) 98 perf_evlist__set_id_pos(evlist); 99 } 100 101 void perf_evlist__splice_list_tail(struct perf_evlist *evlist, 102 struct list_head *list, 103 int nr_entries) 104 { 105 bool set_id_pos = !evlist->nr_entries; 106 107 list_splice_tail(list, &evlist->entries); 108 evlist->nr_entries += nr_entries; 109 if (set_id_pos) 110 perf_evlist__set_id_pos(evlist); 111 } 112 113 void __perf_evlist__set_leader(struct list_head *list) 114 { 115 struct perf_evsel *evsel, *leader; 116 117 leader = list_entry(list->next, struct perf_evsel, node); 118 evsel = list_entry(list->prev, struct perf_evsel, node); 119 120 leader->nr_members = evsel->idx - leader->idx + 1; 121 122 list_for_each_entry(evsel, list, node) { 123 evsel->leader = leader; 124 } 125 } 126 127 void perf_evlist__set_leader(struct perf_evlist *evlist) 128 { 129 if (evlist->nr_entries) { 130 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0; 131 __perf_evlist__set_leader(&evlist->entries); 132 } 133 } 134 135 int perf_evlist__add_default(struct perf_evlist *evlist) 136 { 137 struct perf_event_attr attr = { 138 .type = PERF_TYPE_HARDWARE, 139 .config = PERF_COUNT_HW_CPU_CYCLES, 140 }; 141 struct perf_evsel *evsel; 142 143 event_attr_init(&attr); 144 145 evsel = perf_evsel__new(&attr, 0); 146 if (evsel == NULL) 147 goto error; 148 149 /* use strdup() because free(evsel) assumes name is allocated */ 150 evsel->name = strdup("cycles"); 151 if (!evsel->name) 152 goto error_free; 153 154 perf_evlist__add(evlist, evsel); 155 return 0; 156 error_free: 157 perf_evsel__delete(evsel); 158 error: 159 return -ENOMEM; 160 } 161 162 static int perf_evlist__add_attrs(struct perf_evlist *evlist, 163 struct perf_event_attr *attrs, size_t nr_attrs) 164 { 165 struct perf_evsel *evsel, *n; 166 LIST_HEAD(head); 167 size_t i; 168 169 for (i = 0; i < nr_attrs; i++) { 170 evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i); 171 if (evsel == NULL) 172 goto out_delete_partial_list; 173 list_add_tail(&evsel->node, &head); 174 } 175 176 perf_evlist__splice_list_tail(evlist, &head, nr_attrs); 177 178 return 0; 179 180 out_delete_partial_list: 181 list_for_each_entry_safe(evsel, n, &head, node) 182 perf_evsel__delete(evsel); 183 return -1; 184 } 185 186 int __perf_evlist__add_default_attrs(struct perf_evlist *evlist, 187 struct perf_event_attr *attrs, size_t nr_attrs) 188 { 189 size_t i; 190 191 for (i = 0; i < nr_attrs; i++) 192 event_attr_init(attrs + i); 193 194 return perf_evlist__add_attrs(evlist, attrs, nr_attrs); 195 } 196 197 struct perf_evsel * 198 perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id) 199 { 200 struct perf_evsel *evsel; 201 202 list_for_each_entry(evsel, &evlist->entries, node) { 203 if (evsel->attr.type == PERF_TYPE_TRACEPOINT && 204 (int)evsel->attr.config == id) 205 return evsel; 206 } 207 208 return NULL; 209 } 210 211 struct perf_evsel * 212 perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist, 213 const char *name) 214 { 215 struct perf_evsel *evsel; 216 217 list_for_each_entry(evsel, &evlist->entries, node) { 218 if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) && 219 (strcmp(evsel->name, name) == 0)) 220 return evsel; 221 } 222 223 return NULL; 224 } 225 226 int perf_evlist__add_newtp(struct perf_evlist *evlist, 227 const char *sys, const char *name, void *handler) 228 { 229 struct perf_evsel *evsel; 230 231 evsel = perf_evsel__newtp(sys, name, evlist->nr_entries); 232 if (evsel == NULL) 233 return -1; 234 235 evsel->handler.func = handler; 236 perf_evlist__add(evlist, evsel); 237 return 0; 238 } 239 240 void perf_evlist__disable(struct perf_evlist *evlist) 241 { 242 int cpu, thread; 243 struct perf_evsel *pos; 244 int nr_cpus = cpu_map__nr(evlist->cpus); 245 int nr_threads = thread_map__nr(evlist->threads); 246 247 for (cpu = 0; cpu < nr_cpus; cpu++) { 248 list_for_each_entry(pos, &evlist->entries, node) { 249 if (!perf_evsel__is_group_leader(pos) || !pos->fd) 250 continue; 251 for (thread = 0; thread < nr_threads; thread++) 252 ioctl(FD(pos, cpu, thread), 253 PERF_EVENT_IOC_DISABLE, 0); 254 } 255 } 256 } 257 258 void perf_evlist__enable(struct perf_evlist *evlist) 259 { 260 int cpu, thread; 261 struct perf_evsel *pos; 262 int nr_cpus = cpu_map__nr(evlist->cpus); 263 int nr_threads = thread_map__nr(evlist->threads); 264 265 for (cpu = 0; cpu < nr_cpus; cpu++) { 266 list_for_each_entry(pos, &evlist->entries, node) { 267 if (!perf_evsel__is_group_leader(pos) || !pos->fd) 268 continue; 269 for (thread = 0; thread < nr_threads; thread++) 270 ioctl(FD(pos, cpu, thread), 271 PERF_EVENT_IOC_ENABLE, 0); 272 } 273 } 274 } 275 276 int perf_evlist__disable_event(struct perf_evlist *evlist, 277 struct perf_evsel *evsel) 278 { 279 int cpu, thread, err; 280 281 if (!evsel->fd) 282 return 0; 283 284 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { 285 for (thread = 0; thread < evlist->threads->nr; thread++) { 286 err = ioctl(FD(evsel, cpu, thread), 287 PERF_EVENT_IOC_DISABLE, 0); 288 if (err) 289 return err; 290 } 291 } 292 return 0; 293 } 294 295 int perf_evlist__enable_event(struct perf_evlist *evlist, 296 struct perf_evsel *evsel) 297 { 298 int cpu, thread, err; 299 300 if (!evsel->fd) 301 return -EINVAL; 302 303 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { 304 for (thread = 0; thread < evlist->threads->nr; thread++) { 305 err = ioctl(FD(evsel, cpu, thread), 306 PERF_EVENT_IOC_ENABLE, 0); 307 if (err) 308 return err; 309 } 310 } 311 return 0; 312 } 313 314 static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) 315 { 316 int nr_cpus = cpu_map__nr(evlist->cpus); 317 int nr_threads = thread_map__nr(evlist->threads); 318 int nfds = nr_cpus * nr_threads * evlist->nr_entries; 319 evlist->pollfd = malloc(sizeof(struct pollfd) * nfds); 320 return evlist->pollfd != NULL ? 0 : -ENOMEM; 321 } 322 323 void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd) 324 { 325 fcntl(fd, F_SETFL, O_NONBLOCK); 326 evlist->pollfd[evlist->nr_fds].fd = fd; 327 evlist->pollfd[evlist->nr_fds].events = POLLIN; 328 evlist->nr_fds++; 329 } 330 331 static void perf_evlist__id_hash(struct perf_evlist *evlist, 332 struct perf_evsel *evsel, 333 int cpu, int thread, u64 id) 334 { 335 int hash; 336 struct perf_sample_id *sid = SID(evsel, cpu, thread); 337 338 sid->id = id; 339 sid->evsel = evsel; 340 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS); 341 hlist_add_head(&sid->node, &evlist->heads[hash]); 342 } 343 344 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel, 345 int cpu, int thread, u64 id) 346 { 347 perf_evlist__id_hash(evlist, evsel, cpu, thread, id); 348 evsel->id[evsel->ids++] = id; 349 } 350 351 static int perf_evlist__id_add_fd(struct perf_evlist *evlist, 352 struct perf_evsel *evsel, 353 int cpu, int thread, int fd) 354 { 355 u64 read_data[4] = { 0, }; 356 int id_idx = 1; /* The first entry is the counter value */ 357 u64 id; 358 int ret; 359 360 ret = ioctl(fd, PERF_EVENT_IOC_ID, &id); 361 if (!ret) 362 goto add; 363 364 if (errno != ENOTTY) 365 return -1; 366 367 /* Legacy way to get event id.. All hail to old kernels! */ 368 369 /* 370 * This way does not work with group format read, so bail 371 * out in that case. 372 */ 373 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP) 374 return -1; 375 376 if (!(evsel->attr.read_format & PERF_FORMAT_ID) || 377 read(fd, &read_data, sizeof(read_data)) == -1) 378 return -1; 379 380 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 381 ++id_idx; 382 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 383 ++id_idx; 384 385 id = read_data[id_idx]; 386 387 add: 388 perf_evlist__id_add(evlist, evsel, cpu, thread, id); 389 return 0; 390 } 391 392 struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id) 393 { 394 struct hlist_head *head; 395 struct perf_sample_id *sid; 396 int hash; 397 398 hash = hash_64(id, PERF_EVLIST__HLIST_BITS); 399 head = &evlist->heads[hash]; 400 401 hlist_for_each_entry(sid, head, node) 402 if (sid->id == id) 403 return sid; 404 405 return NULL; 406 } 407 408 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id) 409 { 410 struct perf_sample_id *sid; 411 412 if (evlist->nr_entries == 1) 413 return perf_evlist__first(evlist); 414 415 sid = perf_evlist__id2sid(evlist, id); 416 if (sid) 417 return sid->evsel; 418 419 if (!perf_evlist__sample_id_all(evlist)) 420 return perf_evlist__first(evlist); 421 422 return NULL; 423 } 424 425 static int perf_evlist__event2id(struct perf_evlist *evlist, 426 union perf_event *event, u64 *id) 427 { 428 const u64 *array = event->sample.array; 429 ssize_t n; 430 431 n = (event->header.size - sizeof(event->header)) >> 3; 432 433 if (event->header.type == PERF_RECORD_SAMPLE) { 434 if (evlist->id_pos >= n) 435 return -1; 436 *id = array[evlist->id_pos]; 437 } else { 438 if (evlist->is_pos > n) 439 return -1; 440 n -= evlist->is_pos; 441 *id = array[n]; 442 } 443 return 0; 444 } 445 446 static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist, 447 union perf_event *event) 448 { 449 struct hlist_head *head; 450 struct perf_sample_id *sid; 451 int hash; 452 u64 id; 453 454 if (evlist->nr_entries == 1) 455 return perf_evlist__first(evlist); 456 457 if (perf_evlist__event2id(evlist, event, &id)) 458 return NULL; 459 460 /* Synthesized events have an id of zero */ 461 if (!id) 462 return perf_evlist__first(evlist); 463 464 hash = hash_64(id, PERF_EVLIST__HLIST_BITS); 465 head = &evlist->heads[hash]; 466 467 hlist_for_each_entry(sid, head, node) { 468 if (sid->id == id) 469 return sid->evsel; 470 } 471 return NULL; 472 } 473 474 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) 475 { 476 struct perf_mmap *md = &evlist->mmap[idx]; 477 unsigned int head = perf_mmap__read_head(md); 478 unsigned int old = md->prev; 479 unsigned char *data = md->base + page_size; 480 union perf_event *event = NULL; 481 482 if (evlist->overwrite) { 483 /* 484 * If we're further behind than half the buffer, there's a chance 485 * the writer will bite our tail and mess up the samples under us. 486 * 487 * If we somehow ended up ahead of the head, we got messed up. 488 * 489 * In either case, truncate and restart at head. 490 */ 491 int diff = head - old; 492 if (diff > md->mask / 2 || diff < 0) { 493 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n"); 494 495 /* 496 * head points to a known good entry, start there. 497 */ 498 old = head; 499 } 500 } 501 502 if (old != head) { 503 size_t size; 504 505 event = (union perf_event *)&data[old & md->mask]; 506 size = event->header.size; 507 508 /* 509 * Event straddles the mmap boundary -- header should always 510 * be inside due to u64 alignment of output. 511 */ 512 if ((old & md->mask) + size != ((old + size) & md->mask)) { 513 unsigned int offset = old; 514 unsigned int len = min(sizeof(*event), size), cpy; 515 void *dst = &md->event_copy; 516 517 do { 518 cpy = min(md->mask + 1 - (offset & md->mask), len); 519 memcpy(dst, &data[offset & md->mask], cpy); 520 offset += cpy; 521 dst += cpy; 522 len -= cpy; 523 } while (len); 524 525 event = &md->event_copy; 526 } 527 528 old += size; 529 } 530 531 md->prev = old; 532 533 if (!evlist->overwrite) 534 perf_mmap__write_tail(md, old); 535 536 return event; 537 } 538 539 static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx) 540 { 541 if (evlist->mmap[idx].base != NULL) { 542 munmap(evlist->mmap[idx].base, evlist->mmap_len); 543 evlist->mmap[idx].base = NULL; 544 } 545 } 546 547 void perf_evlist__munmap(struct perf_evlist *evlist) 548 { 549 int i; 550 551 for (i = 0; i < evlist->nr_mmaps; i++) 552 __perf_evlist__munmap(evlist, i); 553 554 free(evlist->mmap); 555 evlist->mmap = NULL; 556 } 557 558 static int perf_evlist__alloc_mmap(struct perf_evlist *evlist) 559 { 560 evlist->nr_mmaps = cpu_map__nr(evlist->cpus); 561 if (cpu_map__empty(evlist->cpus)) 562 evlist->nr_mmaps = thread_map__nr(evlist->threads); 563 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap)); 564 return evlist->mmap != NULL ? 0 : -ENOMEM; 565 } 566 567 static int __perf_evlist__mmap(struct perf_evlist *evlist, 568 int idx, int prot, int mask, int fd) 569 { 570 evlist->mmap[idx].prev = 0; 571 evlist->mmap[idx].mask = mask; 572 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot, 573 MAP_SHARED, fd, 0); 574 if (evlist->mmap[idx].base == MAP_FAILED) { 575 evlist->mmap[idx].base = NULL; 576 return -1; 577 } 578 579 perf_evlist__add_pollfd(evlist, fd); 580 return 0; 581 } 582 583 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask) 584 { 585 struct perf_evsel *evsel; 586 int cpu, thread; 587 int nr_cpus = cpu_map__nr(evlist->cpus); 588 int nr_threads = thread_map__nr(evlist->threads); 589 590 pr_debug2("perf event ring buffer mmapped per cpu\n"); 591 for (cpu = 0; cpu < nr_cpus; cpu++) { 592 int output = -1; 593 594 for (thread = 0; thread < nr_threads; thread++) { 595 list_for_each_entry(evsel, &evlist->entries, node) { 596 int fd = FD(evsel, cpu, thread); 597 598 if (output == -1) { 599 output = fd; 600 if (__perf_evlist__mmap(evlist, cpu, 601 prot, mask, output) < 0) 602 goto out_unmap; 603 } else { 604 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0) 605 goto out_unmap; 606 } 607 608 if ((evsel->attr.read_format & PERF_FORMAT_ID) && 609 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0) 610 goto out_unmap; 611 } 612 } 613 } 614 615 return 0; 616 617 out_unmap: 618 for (cpu = 0; cpu < nr_cpus; cpu++) 619 __perf_evlist__munmap(evlist, cpu); 620 return -1; 621 } 622 623 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask) 624 { 625 struct perf_evsel *evsel; 626 int thread; 627 int nr_threads = thread_map__nr(evlist->threads); 628 629 pr_debug2("perf event ring buffer mmapped per thread\n"); 630 for (thread = 0; thread < nr_threads; thread++) { 631 int output = -1; 632 633 list_for_each_entry(evsel, &evlist->entries, node) { 634 int fd = FD(evsel, 0, thread); 635 636 if (output == -1) { 637 output = fd; 638 if (__perf_evlist__mmap(evlist, thread, 639 prot, mask, output) < 0) 640 goto out_unmap; 641 } else { 642 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0) 643 goto out_unmap; 644 } 645 646 if ((evsel->attr.read_format & PERF_FORMAT_ID) && 647 perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0) 648 goto out_unmap; 649 } 650 } 651 652 return 0; 653 654 out_unmap: 655 for (thread = 0; thread < nr_threads; thread++) 656 __perf_evlist__munmap(evlist, thread); 657 return -1; 658 } 659 660 /** perf_evlist__mmap - Create per cpu maps to receive events 661 * 662 * @evlist - list of events 663 * @pages - map length in pages 664 * @overwrite - overwrite older events? 665 * 666 * If overwrite is false the user needs to signal event consuption using: 667 * 668 * struct perf_mmap *m = &evlist->mmap[cpu]; 669 * unsigned int head = perf_mmap__read_head(m); 670 * 671 * perf_mmap__write_tail(m, head) 672 * 673 * Using perf_evlist__read_on_cpu does this automatically. 674 */ 675 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages, 676 bool overwrite) 677 { 678 struct perf_evsel *evsel; 679 const struct cpu_map *cpus = evlist->cpus; 680 const struct thread_map *threads = evlist->threads; 681 int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask; 682 683 /* 512 kiB: default amount of unprivileged mlocked memory */ 684 if (pages == UINT_MAX) 685 pages = (512 * 1024) / page_size; 686 else if (!is_power_of_2(pages)) 687 return -EINVAL; 688 689 mask = pages * page_size - 1; 690 691 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0) 692 return -ENOMEM; 693 694 if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0) 695 return -ENOMEM; 696 697 evlist->overwrite = overwrite; 698 evlist->mmap_len = (pages + 1) * page_size; 699 700 list_for_each_entry(evsel, &evlist->entries, node) { 701 if ((evsel->attr.read_format & PERF_FORMAT_ID) && 702 evsel->sample_id == NULL && 703 perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0) 704 return -ENOMEM; 705 } 706 707 if (cpu_map__empty(cpus)) 708 return perf_evlist__mmap_per_thread(evlist, prot, mask); 709 710 return perf_evlist__mmap_per_cpu(evlist, prot, mask); 711 } 712 713 int perf_evlist__create_maps(struct perf_evlist *evlist, 714 struct perf_target *target) 715 { 716 evlist->threads = thread_map__new_str(target->pid, target->tid, 717 target->uid); 718 719 if (evlist->threads == NULL) 720 return -1; 721 722 if (perf_target__has_task(target)) 723 evlist->cpus = cpu_map__dummy_new(); 724 else if (!perf_target__has_cpu(target) && !target->uses_mmap) 725 evlist->cpus = cpu_map__dummy_new(); 726 else 727 evlist->cpus = cpu_map__new(target->cpu_list); 728 729 if (evlist->cpus == NULL) 730 goto out_delete_threads; 731 732 return 0; 733 734 out_delete_threads: 735 thread_map__delete(evlist->threads); 736 return -1; 737 } 738 739 void perf_evlist__delete_maps(struct perf_evlist *evlist) 740 { 741 cpu_map__delete(evlist->cpus); 742 thread_map__delete(evlist->threads); 743 evlist->cpus = NULL; 744 evlist->threads = NULL; 745 } 746 747 int perf_evlist__apply_filters(struct perf_evlist *evlist) 748 { 749 struct perf_evsel *evsel; 750 int err = 0; 751 const int ncpus = cpu_map__nr(evlist->cpus), 752 nthreads = thread_map__nr(evlist->threads); 753 754 list_for_each_entry(evsel, &evlist->entries, node) { 755 if (evsel->filter == NULL) 756 continue; 757 758 err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter); 759 if (err) 760 break; 761 } 762 763 return err; 764 } 765 766 int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter) 767 { 768 struct perf_evsel *evsel; 769 int err = 0; 770 const int ncpus = cpu_map__nr(evlist->cpus), 771 nthreads = thread_map__nr(evlist->threads); 772 773 list_for_each_entry(evsel, &evlist->entries, node) { 774 err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter); 775 if (err) 776 break; 777 } 778 779 return err; 780 } 781 782 bool perf_evlist__valid_sample_type(struct perf_evlist *evlist) 783 { 784 struct perf_evsel *pos; 785 786 if (evlist->nr_entries == 1) 787 return true; 788 789 if (evlist->id_pos < 0 || evlist->is_pos < 0) 790 return false; 791 792 list_for_each_entry(pos, &evlist->entries, node) { 793 if (pos->id_pos != evlist->id_pos || 794 pos->is_pos != evlist->is_pos) 795 return false; 796 } 797 798 return true; 799 } 800 801 u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist) 802 { 803 struct perf_evsel *evsel; 804 805 if (evlist->combined_sample_type) 806 return evlist->combined_sample_type; 807 808 list_for_each_entry(evsel, &evlist->entries, node) 809 evlist->combined_sample_type |= evsel->attr.sample_type; 810 811 return evlist->combined_sample_type; 812 } 813 814 u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist) 815 { 816 evlist->combined_sample_type = 0; 817 return __perf_evlist__combined_sample_type(evlist); 818 } 819 820 bool perf_evlist__valid_read_format(struct perf_evlist *evlist) 821 { 822 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first; 823 u64 read_format = first->attr.read_format; 824 u64 sample_type = first->attr.sample_type; 825 826 list_for_each_entry_continue(pos, &evlist->entries, node) { 827 if (read_format != pos->attr.read_format) 828 return false; 829 } 830 831 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */ 832 if ((sample_type & PERF_SAMPLE_READ) && 833 !(read_format & PERF_FORMAT_ID)) { 834 return false; 835 } 836 837 return true; 838 } 839 840 u64 perf_evlist__read_format(struct perf_evlist *evlist) 841 { 842 struct perf_evsel *first = perf_evlist__first(evlist); 843 return first->attr.read_format; 844 } 845 846 u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist) 847 { 848 struct perf_evsel *first = perf_evlist__first(evlist); 849 struct perf_sample *data; 850 u64 sample_type; 851 u16 size = 0; 852 853 if (!first->attr.sample_id_all) 854 goto out; 855 856 sample_type = first->attr.sample_type; 857 858 if (sample_type & PERF_SAMPLE_TID) 859 size += sizeof(data->tid) * 2; 860 861 if (sample_type & PERF_SAMPLE_TIME) 862 size += sizeof(data->time); 863 864 if (sample_type & PERF_SAMPLE_ID) 865 size += sizeof(data->id); 866 867 if (sample_type & PERF_SAMPLE_STREAM_ID) 868 size += sizeof(data->stream_id); 869 870 if (sample_type & PERF_SAMPLE_CPU) 871 size += sizeof(data->cpu) * 2; 872 873 if (sample_type & PERF_SAMPLE_IDENTIFIER) 874 size += sizeof(data->id); 875 out: 876 return size; 877 } 878 879 bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist) 880 { 881 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first; 882 883 list_for_each_entry_continue(pos, &evlist->entries, node) { 884 if (first->attr.sample_id_all != pos->attr.sample_id_all) 885 return false; 886 } 887 888 return true; 889 } 890 891 bool perf_evlist__sample_id_all(struct perf_evlist *evlist) 892 { 893 struct perf_evsel *first = perf_evlist__first(evlist); 894 return first->attr.sample_id_all; 895 } 896 897 void perf_evlist__set_selected(struct perf_evlist *evlist, 898 struct perf_evsel *evsel) 899 { 900 evlist->selected = evsel; 901 } 902 903 void perf_evlist__close(struct perf_evlist *evlist) 904 { 905 struct perf_evsel *evsel; 906 int ncpus = cpu_map__nr(evlist->cpus); 907 int nthreads = thread_map__nr(evlist->threads); 908 909 list_for_each_entry_reverse(evsel, &evlist->entries, node) 910 perf_evsel__close(evsel, ncpus, nthreads); 911 } 912 913 int perf_evlist__open(struct perf_evlist *evlist) 914 { 915 struct perf_evsel *evsel; 916 int err; 917 918 list_for_each_entry(evsel, &evlist->entries, node) { 919 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads); 920 if (err < 0) 921 goto out_err; 922 } 923 924 return 0; 925 out_err: 926 perf_evlist__close(evlist); 927 errno = -err; 928 return err; 929 } 930 931 int perf_evlist__prepare_workload(struct perf_evlist *evlist, 932 struct perf_target *target, 933 const char *argv[], bool pipe_output, 934 bool want_signal) 935 { 936 int child_ready_pipe[2], go_pipe[2]; 937 char bf; 938 939 if (pipe(child_ready_pipe) < 0) { 940 perror("failed to create 'ready' pipe"); 941 return -1; 942 } 943 944 if (pipe(go_pipe) < 0) { 945 perror("failed to create 'go' pipe"); 946 goto out_close_ready_pipe; 947 } 948 949 evlist->workload.pid = fork(); 950 if (evlist->workload.pid < 0) { 951 perror("failed to fork"); 952 goto out_close_pipes; 953 } 954 955 if (!evlist->workload.pid) { 956 if (pipe_output) 957 dup2(2, 1); 958 959 signal(SIGTERM, SIG_DFL); 960 961 close(child_ready_pipe[0]); 962 close(go_pipe[1]); 963 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC); 964 965 /* 966 * Tell the parent we're ready to go 967 */ 968 close(child_ready_pipe[1]); 969 970 /* 971 * Wait until the parent tells us to go. 972 */ 973 if (read(go_pipe[0], &bf, 1) == -1) 974 perror("unable to read pipe"); 975 976 execvp(argv[0], (char **)argv); 977 978 perror(argv[0]); 979 if (want_signal) 980 kill(getppid(), SIGUSR1); 981 exit(-1); 982 } 983 984 if (perf_target__none(target)) 985 evlist->threads->map[0] = evlist->workload.pid; 986 987 close(child_ready_pipe[1]); 988 close(go_pipe[0]); 989 /* 990 * wait for child to settle 991 */ 992 if (read(child_ready_pipe[0], &bf, 1) == -1) { 993 perror("unable to read pipe"); 994 goto out_close_pipes; 995 } 996 997 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC); 998 evlist->workload.cork_fd = go_pipe[1]; 999 close(child_ready_pipe[0]); 1000 return 0; 1001 1002 out_close_pipes: 1003 close(go_pipe[0]); 1004 close(go_pipe[1]); 1005 out_close_ready_pipe: 1006 close(child_ready_pipe[0]); 1007 close(child_ready_pipe[1]); 1008 return -1; 1009 } 1010 1011 int perf_evlist__start_workload(struct perf_evlist *evlist) 1012 { 1013 if (evlist->workload.cork_fd > 0) { 1014 char bf = 0; 1015 int ret; 1016 /* 1017 * Remove the cork, let it rip! 1018 */ 1019 ret = write(evlist->workload.cork_fd, &bf, 1); 1020 if (ret < 0) 1021 perror("enable to write to pipe"); 1022 1023 close(evlist->workload.cork_fd); 1024 return ret; 1025 } 1026 1027 return 0; 1028 } 1029 1030 int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event, 1031 struct perf_sample *sample) 1032 { 1033 struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event); 1034 1035 if (!evsel) 1036 return -EFAULT; 1037 return perf_evsel__parse_sample(evsel, event, sample); 1038 } 1039 1040 size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp) 1041 { 1042 struct perf_evsel *evsel; 1043 size_t printed = 0; 1044 1045 list_for_each_entry(evsel, &evlist->entries, node) { 1046 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "", 1047 perf_evsel__name(evsel)); 1048 } 1049 1050 return printed + fprintf(fp, "\n");; 1051 } 1052