1 /* 2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> 3 * 4 * Parts came from builtin-{top,stat,record}.c, see those files for further 5 * copyright notes. 6 * 7 * Released under the GPL v2. (and only v2, not any later version) 8 */ 9 #include "util.h" 10 #include "debugfs.h" 11 #include <poll.h> 12 #include "cpumap.h" 13 #include "thread_map.h" 14 #include "target.h" 15 #include "evlist.h" 16 #include "evsel.h" 17 #include <unistd.h> 18 19 #include "parse-events.h" 20 21 #include <sys/mman.h> 22 23 #include <linux/bitops.h> 24 #include <linux/hash.h> 25 26 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) 27 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y) 28 29 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus, 30 struct thread_map *threads) 31 { 32 int i; 33 34 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i) 35 INIT_HLIST_HEAD(&evlist->heads[i]); 36 INIT_LIST_HEAD(&evlist->entries); 37 perf_evlist__set_maps(evlist, cpus, threads); 38 evlist->workload.pid = -1; 39 } 40 41 struct perf_evlist *perf_evlist__new(struct cpu_map *cpus, 42 struct thread_map *threads) 43 { 44 struct perf_evlist *evlist = zalloc(sizeof(*evlist)); 45 46 if (evlist != NULL) 47 perf_evlist__init(evlist, cpus, threads); 48 49 return evlist; 50 } 51 52 void perf_evlist__config(struct perf_evlist *evlist, 53 struct perf_record_opts *opts) 54 { 55 struct perf_evsel *evsel; 56 /* 57 * Set the evsel leader links before we configure attributes, 58 * since some might depend on this info. 59 */ 60 if (opts->group) 61 perf_evlist__set_leader(evlist); 62 63 if (evlist->cpus->map[0] < 0) 64 opts->no_inherit = true; 65 66 list_for_each_entry(evsel, &evlist->entries, node) { 67 perf_evsel__config(evsel, opts); 68 69 if (evlist->nr_entries > 1) 70 perf_evsel__set_sample_id(evsel); 71 } 72 } 73 74 static void perf_evlist__purge(struct perf_evlist *evlist) 75 { 76 struct perf_evsel *pos, *n; 77 78 list_for_each_entry_safe(pos, n, &evlist->entries, node) { 79 list_del_init(&pos->node); 80 perf_evsel__delete(pos); 81 } 82 83 evlist->nr_entries = 0; 84 } 85 86 void perf_evlist__exit(struct perf_evlist *evlist) 87 { 88 free(evlist->mmap); 89 free(evlist->pollfd); 90 evlist->mmap = NULL; 91 evlist->pollfd = NULL; 92 } 93 94 void perf_evlist__delete(struct perf_evlist *evlist) 95 { 96 perf_evlist__purge(evlist); 97 perf_evlist__exit(evlist); 98 free(evlist); 99 } 100 101 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry) 102 { 103 list_add_tail(&entry->node, &evlist->entries); 104 ++evlist->nr_entries; 105 } 106 107 void perf_evlist__splice_list_tail(struct perf_evlist *evlist, 108 struct list_head *list, 109 int nr_entries) 110 { 111 list_splice_tail(list, &evlist->entries); 112 evlist->nr_entries += nr_entries; 113 } 114 115 void __perf_evlist__set_leader(struct list_head *list) 116 { 117 struct perf_evsel *evsel, *leader; 118 119 leader = list_entry(list->next, struct perf_evsel, node); 120 evsel = list_entry(list->prev, struct perf_evsel, node); 121 122 leader->nr_members = evsel->idx - leader->idx + 1; 123 124 list_for_each_entry(evsel, list, node) { 125 evsel->leader = leader; 126 } 127 } 128 129 void perf_evlist__set_leader(struct perf_evlist *evlist) 130 { 131 if (evlist->nr_entries) { 132 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0; 133 __perf_evlist__set_leader(&evlist->entries); 134 } 135 } 136 137 int perf_evlist__add_default(struct perf_evlist *evlist) 138 { 139 struct perf_event_attr attr = { 140 .type = PERF_TYPE_HARDWARE, 141 .config = PERF_COUNT_HW_CPU_CYCLES, 142 }; 143 struct perf_evsel *evsel; 144 145 event_attr_init(&attr); 146 147 evsel = perf_evsel__new(&attr, 0); 148 if (evsel == NULL) 149 goto error; 150 151 /* use strdup() because free(evsel) assumes name is allocated */ 152 evsel->name = strdup("cycles"); 153 if (!evsel->name) 154 goto error_free; 155 156 perf_evlist__add(evlist, evsel); 157 return 0; 158 error_free: 159 perf_evsel__delete(evsel); 160 error: 161 return -ENOMEM; 162 } 163 164 static int perf_evlist__add_attrs(struct perf_evlist *evlist, 165 struct perf_event_attr *attrs, size_t nr_attrs) 166 { 167 struct perf_evsel *evsel, *n; 168 LIST_HEAD(head); 169 size_t i; 170 171 for (i = 0; i < nr_attrs; i++) { 172 evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i); 173 if (evsel == NULL) 174 goto out_delete_partial_list; 175 list_add_tail(&evsel->node, &head); 176 } 177 178 perf_evlist__splice_list_tail(evlist, &head, nr_attrs); 179 180 return 0; 181 182 out_delete_partial_list: 183 list_for_each_entry_safe(evsel, n, &head, node) 184 perf_evsel__delete(evsel); 185 return -1; 186 } 187 188 int __perf_evlist__add_default_attrs(struct perf_evlist *evlist, 189 struct perf_event_attr *attrs, size_t nr_attrs) 190 { 191 size_t i; 192 193 for (i = 0; i < nr_attrs; i++) 194 event_attr_init(attrs + i); 195 196 return perf_evlist__add_attrs(evlist, attrs, nr_attrs); 197 } 198 199 struct perf_evsel * 200 perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id) 201 { 202 struct perf_evsel *evsel; 203 204 list_for_each_entry(evsel, &evlist->entries, node) { 205 if (evsel->attr.type == PERF_TYPE_TRACEPOINT && 206 (int)evsel->attr.config == id) 207 return evsel; 208 } 209 210 return NULL; 211 } 212 213 int perf_evlist__add_newtp(struct perf_evlist *evlist, 214 const char *sys, const char *name, void *handler) 215 { 216 struct perf_evsel *evsel; 217 218 evsel = perf_evsel__newtp(sys, name, evlist->nr_entries); 219 if (evsel == NULL) 220 return -1; 221 222 evsel->handler.func = handler; 223 perf_evlist__add(evlist, evsel); 224 return 0; 225 } 226 227 void perf_evlist__disable(struct perf_evlist *evlist) 228 { 229 int cpu, thread; 230 struct perf_evsel *pos; 231 232 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { 233 list_for_each_entry(pos, &evlist->entries, node) { 234 if (!perf_evsel__is_group_leader(pos)) 235 continue; 236 for (thread = 0; thread < evlist->threads->nr; thread++) 237 ioctl(FD(pos, cpu, thread), 238 PERF_EVENT_IOC_DISABLE, 0); 239 } 240 } 241 } 242 243 void perf_evlist__enable(struct perf_evlist *evlist) 244 { 245 int cpu, thread; 246 struct perf_evsel *pos; 247 248 for (cpu = 0; cpu < cpu_map__nr(evlist->cpus); cpu++) { 249 list_for_each_entry(pos, &evlist->entries, node) { 250 if (!perf_evsel__is_group_leader(pos)) 251 continue; 252 for (thread = 0; thread < evlist->threads->nr; thread++) 253 ioctl(FD(pos, cpu, thread), 254 PERF_EVENT_IOC_ENABLE, 0); 255 } 256 } 257 } 258 259 static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) 260 { 261 int nfds = cpu_map__nr(evlist->cpus) * evlist->threads->nr * evlist->nr_entries; 262 evlist->pollfd = malloc(sizeof(struct pollfd) * nfds); 263 return evlist->pollfd != NULL ? 0 : -ENOMEM; 264 } 265 266 void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd) 267 { 268 fcntl(fd, F_SETFL, O_NONBLOCK); 269 evlist->pollfd[evlist->nr_fds].fd = fd; 270 evlist->pollfd[evlist->nr_fds].events = POLLIN; 271 evlist->nr_fds++; 272 } 273 274 static void perf_evlist__id_hash(struct perf_evlist *evlist, 275 struct perf_evsel *evsel, 276 int cpu, int thread, u64 id) 277 { 278 int hash; 279 struct perf_sample_id *sid = SID(evsel, cpu, thread); 280 281 sid->id = id; 282 sid->evsel = evsel; 283 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS); 284 hlist_add_head(&sid->node, &evlist->heads[hash]); 285 } 286 287 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel, 288 int cpu, int thread, u64 id) 289 { 290 perf_evlist__id_hash(evlist, evsel, cpu, thread, id); 291 evsel->id[evsel->ids++] = id; 292 } 293 294 static int perf_evlist__id_add_fd(struct perf_evlist *evlist, 295 struct perf_evsel *evsel, 296 int cpu, int thread, int fd) 297 { 298 u64 read_data[4] = { 0, }; 299 int id_idx = 1; /* The first entry is the counter value */ 300 301 if (!(evsel->attr.read_format & PERF_FORMAT_ID) || 302 read(fd, &read_data, sizeof(read_data)) == -1) 303 return -1; 304 305 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 306 ++id_idx; 307 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 308 ++id_idx; 309 310 perf_evlist__id_add(evlist, evsel, cpu, thread, read_data[id_idx]); 311 return 0; 312 } 313 314 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id) 315 { 316 struct hlist_head *head; 317 struct hlist_node *pos; 318 struct perf_sample_id *sid; 319 int hash; 320 321 if (evlist->nr_entries == 1) 322 return perf_evlist__first(evlist); 323 324 hash = hash_64(id, PERF_EVLIST__HLIST_BITS); 325 head = &evlist->heads[hash]; 326 327 hlist_for_each_entry(sid, pos, head, node) 328 if (sid->id == id) 329 return sid->evsel; 330 331 if (!perf_evlist__sample_id_all(evlist)) 332 return perf_evlist__first(evlist); 333 334 return NULL; 335 } 336 337 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) 338 { 339 struct perf_mmap *md = &evlist->mmap[idx]; 340 unsigned int head = perf_mmap__read_head(md); 341 unsigned int old = md->prev; 342 unsigned char *data = md->base + page_size; 343 union perf_event *event = NULL; 344 345 if (evlist->overwrite) { 346 /* 347 * If we're further behind than half the buffer, there's a chance 348 * the writer will bite our tail and mess up the samples under us. 349 * 350 * If we somehow ended up ahead of the head, we got messed up. 351 * 352 * In either case, truncate and restart at head. 353 */ 354 int diff = head - old; 355 if (diff > md->mask / 2 || diff < 0) { 356 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n"); 357 358 /* 359 * head points to a known good entry, start there. 360 */ 361 old = head; 362 } 363 } 364 365 if (old != head) { 366 size_t size; 367 368 event = (union perf_event *)&data[old & md->mask]; 369 size = event->header.size; 370 371 /* 372 * Event straddles the mmap boundary -- header should always 373 * be inside due to u64 alignment of output. 374 */ 375 if ((old & md->mask) + size != ((old + size) & md->mask)) { 376 unsigned int offset = old; 377 unsigned int len = min(sizeof(*event), size), cpy; 378 void *dst = &md->event_copy; 379 380 do { 381 cpy = min(md->mask + 1 - (offset & md->mask), len); 382 memcpy(dst, &data[offset & md->mask], cpy); 383 offset += cpy; 384 dst += cpy; 385 len -= cpy; 386 } while (len); 387 388 event = &md->event_copy; 389 } 390 391 old += size; 392 } 393 394 md->prev = old; 395 396 if (!evlist->overwrite) 397 perf_mmap__write_tail(md, old); 398 399 return event; 400 } 401 402 void perf_evlist__munmap(struct perf_evlist *evlist) 403 { 404 int i; 405 406 for (i = 0; i < evlist->nr_mmaps; i++) { 407 if (evlist->mmap[i].base != NULL) { 408 munmap(evlist->mmap[i].base, evlist->mmap_len); 409 evlist->mmap[i].base = NULL; 410 } 411 } 412 413 free(evlist->mmap); 414 evlist->mmap = NULL; 415 } 416 417 static int perf_evlist__alloc_mmap(struct perf_evlist *evlist) 418 { 419 evlist->nr_mmaps = cpu_map__nr(evlist->cpus); 420 if (cpu_map__all(evlist->cpus)) 421 evlist->nr_mmaps = evlist->threads->nr; 422 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap)); 423 return evlist->mmap != NULL ? 0 : -ENOMEM; 424 } 425 426 static int __perf_evlist__mmap(struct perf_evlist *evlist, 427 int idx, int prot, int mask, int fd) 428 { 429 evlist->mmap[idx].prev = 0; 430 evlist->mmap[idx].mask = mask; 431 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot, 432 MAP_SHARED, fd, 0); 433 if (evlist->mmap[idx].base == MAP_FAILED) { 434 evlist->mmap[idx].base = NULL; 435 return -1; 436 } 437 438 perf_evlist__add_pollfd(evlist, fd); 439 return 0; 440 } 441 442 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask) 443 { 444 struct perf_evsel *evsel; 445 int cpu, thread; 446 447 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { 448 int output = -1; 449 450 for (thread = 0; thread < evlist->threads->nr; thread++) { 451 list_for_each_entry(evsel, &evlist->entries, node) { 452 int fd = FD(evsel, cpu, thread); 453 454 if (output == -1) { 455 output = fd; 456 if (__perf_evlist__mmap(evlist, cpu, 457 prot, mask, output) < 0) 458 goto out_unmap; 459 } else { 460 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0) 461 goto out_unmap; 462 } 463 464 if ((evsel->attr.read_format & PERF_FORMAT_ID) && 465 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0) 466 goto out_unmap; 467 } 468 } 469 } 470 471 return 0; 472 473 out_unmap: 474 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { 475 if (evlist->mmap[cpu].base != NULL) { 476 munmap(evlist->mmap[cpu].base, evlist->mmap_len); 477 evlist->mmap[cpu].base = NULL; 478 } 479 } 480 return -1; 481 } 482 483 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask) 484 { 485 struct perf_evsel *evsel; 486 int thread; 487 488 for (thread = 0; thread < evlist->threads->nr; thread++) { 489 int output = -1; 490 491 list_for_each_entry(evsel, &evlist->entries, node) { 492 int fd = FD(evsel, 0, thread); 493 494 if (output == -1) { 495 output = fd; 496 if (__perf_evlist__mmap(evlist, thread, 497 prot, mask, output) < 0) 498 goto out_unmap; 499 } else { 500 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0) 501 goto out_unmap; 502 } 503 504 if ((evsel->attr.read_format & PERF_FORMAT_ID) && 505 perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0) 506 goto out_unmap; 507 } 508 } 509 510 return 0; 511 512 out_unmap: 513 for (thread = 0; thread < evlist->threads->nr; thread++) { 514 if (evlist->mmap[thread].base != NULL) { 515 munmap(evlist->mmap[thread].base, evlist->mmap_len); 516 evlist->mmap[thread].base = NULL; 517 } 518 } 519 return -1; 520 } 521 522 /** perf_evlist__mmap - Create per cpu maps to receive events 523 * 524 * @evlist - list of events 525 * @pages - map length in pages 526 * @overwrite - overwrite older events? 527 * 528 * If overwrite is false the user needs to signal event consuption using: 529 * 530 * struct perf_mmap *m = &evlist->mmap[cpu]; 531 * unsigned int head = perf_mmap__read_head(m); 532 * 533 * perf_mmap__write_tail(m, head) 534 * 535 * Using perf_evlist__read_on_cpu does this automatically. 536 */ 537 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages, 538 bool overwrite) 539 { 540 struct perf_evsel *evsel; 541 const struct cpu_map *cpus = evlist->cpus; 542 const struct thread_map *threads = evlist->threads; 543 int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask; 544 545 /* 512 kiB: default amount of unprivileged mlocked memory */ 546 if (pages == UINT_MAX) 547 pages = (512 * 1024) / page_size; 548 else if (!is_power_of_2(pages)) 549 return -EINVAL; 550 551 mask = pages * page_size - 1; 552 553 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0) 554 return -ENOMEM; 555 556 if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0) 557 return -ENOMEM; 558 559 evlist->overwrite = overwrite; 560 evlist->mmap_len = (pages + 1) * page_size; 561 562 list_for_each_entry(evsel, &evlist->entries, node) { 563 if ((evsel->attr.read_format & PERF_FORMAT_ID) && 564 evsel->sample_id == NULL && 565 perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0) 566 return -ENOMEM; 567 } 568 569 if (cpu_map__all(cpus)) 570 return perf_evlist__mmap_per_thread(evlist, prot, mask); 571 572 return perf_evlist__mmap_per_cpu(evlist, prot, mask); 573 } 574 575 int perf_evlist__create_maps(struct perf_evlist *evlist, 576 struct perf_target *target) 577 { 578 evlist->threads = thread_map__new_str(target->pid, target->tid, 579 target->uid); 580 581 if (evlist->threads == NULL) 582 return -1; 583 584 if (perf_target__has_task(target)) 585 evlist->cpus = cpu_map__dummy_new(); 586 else if (!perf_target__has_cpu(target) && !target->uses_mmap) 587 evlist->cpus = cpu_map__dummy_new(); 588 else 589 evlist->cpus = cpu_map__new(target->cpu_list); 590 591 if (evlist->cpus == NULL) 592 goto out_delete_threads; 593 594 return 0; 595 596 out_delete_threads: 597 thread_map__delete(evlist->threads); 598 return -1; 599 } 600 601 void perf_evlist__delete_maps(struct perf_evlist *evlist) 602 { 603 cpu_map__delete(evlist->cpus); 604 thread_map__delete(evlist->threads); 605 evlist->cpus = NULL; 606 evlist->threads = NULL; 607 } 608 609 int perf_evlist__apply_filters(struct perf_evlist *evlist) 610 { 611 struct perf_evsel *evsel; 612 int err = 0; 613 const int ncpus = cpu_map__nr(evlist->cpus), 614 nthreads = evlist->threads->nr; 615 616 list_for_each_entry(evsel, &evlist->entries, node) { 617 if (evsel->filter == NULL) 618 continue; 619 620 err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter); 621 if (err) 622 break; 623 } 624 625 return err; 626 } 627 628 int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter) 629 { 630 struct perf_evsel *evsel; 631 int err = 0; 632 const int ncpus = cpu_map__nr(evlist->cpus), 633 nthreads = evlist->threads->nr; 634 635 list_for_each_entry(evsel, &evlist->entries, node) { 636 err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter); 637 if (err) 638 break; 639 } 640 641 return err; 642 } 643 644 bool perf_evlist__valid_sample_type(struct perf_evlist *evlist) 645 { 646 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first; 647 648 list_for_each_entry_continue(pos, &evlist->entries, node) { 649 if (first->attr.sample_type != pos->attr.sample_type) 650 return false; 651 } 652 653 return true; 654 } 655 656 u64 perf_evlist__sample_type(struct perf_evlist *evlist) 657 { 658 struct perf_evsel *first = perf_evlist__first(evlist); 659 return first->attr.sample_type; 660 } 661 662 u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist) 663 { 664 struct perf_evsel *first = perf_evlist__first(evlist); 665 struct perf_sample *data; 666 u64 sample_type; 667 u16 size = 0; 668 669 if (!first->attr.sample_id_all) 670 goto out; 671 672 sample_type = first->attr.sample_type; 673 674 if (sample_type & PERF_SAMPLE_TID) 675 size += sizeof(data->tid) * 2; 676 677 if (sample_type & PERF_SAMPLE_TIME) 678 size += sizeof(data->time); 679 680 if (sample_type & PERF_SAMPLE_ID) 681 size += sizeof(data->id); 682 683 if (sample_type & PERF_SAMPLE_STREAM_ID) 684 size += sizeof(data->stream_id); 685 686 if (sample_type & PERF_SAMPLE_CPU) 687 size += sizeof(data->cpu) * 2; 688 out: 689 return size; 690 } 691 692 bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist) 693 { 694 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first; 695 696 list_for_each_entry_continue(pos, &evlist->entries, node) { 697 if (first->attr.sample_id_all != pos->attr.sample_id_all) 698 return false; 699 } 700 701 return true; 702 } 703 704 bool perf_evlist__sample_id_all(struct perf_evlist *evlist) 705 { 706 struct perf_evsel *first = perf_evlist__first(evlist); 707 return first->attr.sample_id_all; 708 } 709 710 void perf_evlist__set_selected(struct perf_evlist *evlist, 711 struct perf_evsel *evsel) 712 { 713 evlist->selected = evsel; 714 } 715 716 int perf_evlist__open(struct perf_evlist *evlist) 717 { 718 struct perf_evsel *evsel; 719 int err, ncpus, nthreads; 720 721 list_for_each_entry(evsel, &evlist->entries, node) { 722 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads); 723 if (err < 0) 724 goto out_err; 725 } 726 727 return 0; 728 out_err: 729 ncpus = evlist->cpus ? evlist->cpus->nr : 1; 730 nthreads = evlist->threads ? evlist->threads->nr : 1; 731 732 list_for_each_entry_reverse(evsel, &evlist->entries, node) 733 perf_evsel__close(evsel, ncpus, nthreads); 734 735 errno = -err; 736 return err; 737 } 738 739 int perf_evlist__prepare_workload(struct perf_evlist *evlist, 740 struct perf_record_opts *opts, 741 const char *argv[]) 742 { 743 int child_ready_pipe[2], go_pipe[2]; 744 char bf; 745 746 if (pipe(child_ready_pipe) < 0) { 747 perror("failed to create 'ready' pipe"); 748 return -1; 749 } 750 751 if (pipe(go_pipe) < 0) { 752 perror("failed to create 'go' pipe"); 753 goto out_close_ready_pipe; 754 } 755 756 evlist->workload.pid = fork(); 757 if (evlist->workload.pid < 0) { 758 perror("failed to fork"); 759 goto out_close_pipes; 760 } 761 762 if (!evlist->workload.pid) { 763 if (opts->pipe_output) 764 dup2(2, 1); 765 766 close(child_ready_pipe[0]); 767 close(go_pipe[1]); 768 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC); 769 770 /* 771 * Do a dummy execvp to get the PLT entry resolved, 772 * so we avoid the resolver overhead on the real 773 * execvp call. 774 */ 775 execvp("", (char **)argv); 776 777 /* 778 * Tell the parent we're ready to go 779 */ 780 close(child_ready_pipe[1]); 781 782 /* 783 * Wait until the parent tells us to go. 784 */ 785 if (read(go_pipe[0], &bf, 1) == -1) 786 perror("unable to read pipe"); 787 788 execvp(argv[0], (char **)argv); 789 790 perror(argv[0]); 791 kill(getppid(), SIGUSR1); 792 exit(-1); 793 } 794 795 if (perf_target__none(&opts->target)) 796 evlist->threads->map[0] = evlist->workload.pid; 797 798 close(child_ready_pipe[1]); 799 close(go_pipe[0]); 800 /* 801 * wait for child to settle 802 */ 803 if (read(child_ready_pipe[0], &bf, 1) == -1) { 804 perror("unable to read pipe"); 805 goto out_close_pipes; 806 } 807 808 evlist->workload.cork_fd = go_pipe[1]; 809 close(child_ready_pipe[0]); 810 return 0; 811 812 out_close_pipes: 813 close(go_pipe[0]); 814 close(go_pipe[1]); 815 out_close_ready_pipe: 816 close(child_ready_pipe[0]); 817 close(child_ready_pipe[1]); 818 return -1; 819 } 820 821 int perf_evlist__start_workload(struct perf_evlist *evlist) 822 { 823 if (evlist->workload.cork_fd > 0) { 824 /* 825 * Remove the cork, let it rip! 826 */ 827 return close(evlist->workload.cork_fd); 828 } 829 830 return 0; 831 } 832 833 int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event, 834 struct perf_sample *sample) 835 { 836 struct perf_evsel *evsel = perf_evlist__first(evlist); 837 return perf_evsel__parse_sample(evsel, event, sample); 838 } 839 840 size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp) 841 { 842 struct perf_evsel *evsel; 843 size_t printed = 0; 844 845 list_for_each_entry(evsel, &evlist->entries, node) { 846 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "", 847 perf_evsel__name(evsel)); 848 } 849 850 return printed + fprintf(fp, "\n");; 851 } 852