1 #define _FILE_OFFSET_BITS 64 2 3 #include <linux/kernel.h> 4 5 #include <byteswap.h> 6 #include <unistd.h> 7 #include <sys/types.h> 8 #include <sys/mman.h> 9 10 #include "evlist.h" 11 #include "evsel.h" 12 #include "session.h" 13 #include "tool.h" 14 #include "sort.h" 15 #include "util.h" 16 #include "cpumap.h" 17 #include "event-parse.h" 18 #include "perf_regs.h" 19 #include "unwind.h" 20 #include "vdso.h" 21 22 static int perf_session__open(struct perf_session *self, bool force) 23 { 24 struct stat input_stat; 25 26 if (!strcmp(self->filename, "-")) { 27 self->fd_pipe = true; 28 self->fd = STDIN_FILENO; 29 30 if (perf_session__read_header(self, self->fd) < 0) 31 pr_err("incompatible file format (rerun with -v to learn more)"); 32 33 return 0; 34 } 35 36 self->fd = open(self->filename, O_RDONLY); 37 if (self->fd < 0) { 38 int err = errno; 39 40 pr_err("failed to open %s: %s", self->filename, strerror(err)); 41 if (err == ENOENT && !strcmp(self->filename, "perf.data")) 42 pr_err(" (try 'perf record' first)"); 43 pr_err("\n"); 44 return -errno; 45 } 46 47 if (fstat(self->fd, &input_stat) < 0) 48 goto out_close; 49 50 if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) { 51 pr_err("file %s not owned by current user or root\n", 52 self->filename); 53 goto out_close; 54 } 55 56 if (!input_stat.st_size) { 57 pr_info("zero-sized file (%s), nothing to do!\n", 58 self->filename); 59 goto out_close; 60 } 61 62 if (perf_session__read_header(self, self->fd) < 0) { 63 pr_err("incompatible file format (rerun with -v to learn more)"); 64 goto out_close; 65 } 66 67 if (!perf_evlist__valid_sample_type(self->evlist)) { 68 pr_err("non matching sample_type"); 69 goto out_close; 70 } 71 72 if (!perf_evlist__valid_sample_id_all(self->evlist)) { 73 pr_err("non matching sample_id_all"); 74 goto out_close; 75 } 76 77 self->size = input_stat.st_size; 78 return 0; 79 80 out_close: 81 close(self->fd); 82 self->fd = -1; 83 return -1; 84 } 85 86 void perf_session__set_id_hdr_size(struct perf_session *session) 87 { 88 u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist); 89 90 session->host_machine.id_hdr_size = id_hdr_size; 91 machines__set_id_hdr_size(&session->machines, id_hdr_size); 92 } 93 94 int perf_session__create_kernel_maps(struct perf_session *self) 95 { 96 int ret = machine__create_kernel_maps(&self->host_machine); 97 98 if (ret >= 0) 99 ret = machines__create_guest_kernel_maps(&self->machines); 100 return ret; 101 } 102 103 static void perf_session__destroy_kernel_maps(struct perf_session *self) 104 { 105 machine__destroy_kernel_maps(&self->host_machine); 106 machines__destroy_guest_kernel_maps(&self->machines); 107 } 108 109 struct perf_session *perf_session__new(const char *filename, int mode, 110 bool force, bool repipe, 111 struct perf_tool *tool) 112 { 113 struct perf_session *self; 114 struct stat st; 115 size_t len; 116 117 if (!filename || !strlen(filename)) { 118 if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode)) 119 filename = "-"; 120 else 121 filename = "perf.data"; 122 } 123 124 len = strlen(filename); 125 self = zalloc(sizeof(*self) + len); 126 127 if (self == NULL) 128 goto out; 129 130 memcpy(self->filename, filename, len); 131 /* 132 * On 64bit we can mmap the data file in one go. No need for tiny mmap 133 * slices. On 32bit we use 32MB. 134 */ 135 #if BITS_PER_LONG == 64 136 self->mmap_window = ULLONG_MAX; 137 #else 138 self->mmap_window = 32 * 1024 * 1024ULL; 139 #endif 140 self->machines = RB_ROOT; 141 self->repipe = repipe; 142 INIT_LIST_HEAD(&self->ordered_samples.samples); 143 INIT_LIST_HEAD(&self->ordered_samples.sample_cache); 144 INIT_LIST_HEAD(&self->ordered_samples.to_free); 145 machine__init(&self->host_machine, "", HOST_KERNEL_ID); 146 hists__init(&self->hists); 147 148 if (mode == O_RDONLY) { 149 if (perf_session__open(self, force) < 0) 150 goto out_delete; 151 perf_session__set_id_hdr_size(self); 152 } else if (mode == O_WRONLY) { 153 /* 154 * In O_RDONLY mode this will be performed when reading the 155 * kernel MMAP event, in perf_event__process_mmap(). 156 */ 157 if (perf_session__create_kernel_maps(self) < 0) 158 goto out_delete; 159 } 160 161 if (tool && tool->ordering_requires_timestamps && 162 tool->ordered_samples && !perf_evlist__sample_id_all(self->evlist)) { 163 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n"); 164 tool->ordered_samples = false; 165 } 166 167 out: 168 return self; 169 out_delete: 170 perf_session__delete(self); 171 return NULL; 172 } 173 174 static void machine__delete_dead_threads(struct machine *machine) 175 { 176 struct thread *n, *t; 177 178 list_for_each_entry_safe(t, n, &machine->dead_threads, node) { 179 list_del(&t->node); 180 thread__delete(t); 181 } 182 } 183 184 static void perf_session__delete_dead_threads(struct perf_session *session) 185 { 186 machine__delete_dead_threads(&session->host_machine); 187 } 188 189 static void machine__delete_threads(struct machine *self) 190 { 191 struct rb_node *nd = rb_first(&self->threads); 192 193 while (nd) { 194 struct thread *t = rb_entry(nd, struct thread, rb_node); 195 196 rb_erase(&t->rb_node, &self->threads); 197 nd = rb_next(nd); 198 thread__delete(t); 199 } 200 } 201 202 static void perf_session__delete_threads(struct perf_session *session) 203 { 204 machine__delete_threads(&session->host_machine); 205 } 206 207 void perf_session__delete(struct perf_session *self) 208 { 209 perf_session__destroy_kernel_maps(self); 210 perf_session__delete_dead_threads(self); 211 perf_session__delete_threads(self); 212 machine__exit(&self->host_machine); 213 close(self->fd); 214 free(self); 215 vdso__exit(); 216 } 217 218 void machine__remove_thread(struct machine *self, struct thread *th) 219 { 220 self->last_match = NULL; 221 rb_erase(&th->rb_node, &self->threads); 222 /* 223 * We may have references to this thread, for instance in some hist_entry 224 * instances, so just move them to a separate list. 225 */ 226 list_add_tail(&th->node, &self->dead_threads); 227 } 228 229 static bool symbol__match_parent_regex(struct symbol *sym) 230 { 231 if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0)) 232 return 1; 233 234 return 0; 235 } 236 237 static const u8 cpumodes[] = { 238 PERF_RECORD_MISC_USER, 239 PERF_RECORD_MISC_KERNEL, 240 PERF_RECORD_MISC_GUEST_USER, 241 PERF_RECORD_MISC_GUEST_KERNEL 242 }; 243 #define NCPUMODES (sizeof(cpumodes)/sizeof(u8)) 244 245 static void ip__resolve_ams(struct machine *self, struct thread *thread, 246 struct addr_map_symbol *ams, 247 u64 ip) 248 { 249 struct addr_location al; 250 size_t i; 251 u8 m; 252 253 memset(&al, 0, sizeof(al)); 254 255 for (i = 0; i < NCPUMODES; i++) { 256 m = cpumodes[i]; 257 /* 258 * We cannot use the header.misc hint to determine whether a 259 * branch stack address is user, kernel, guest, hypervisor. 260 * Branches may straddle the kernel/user/hypervisor boundaries. 261 * Thus, we have to try consecutively until we find a match 262 * or else, the symbol is unknown 263 */ 264 thread__find_addr_location(thread, self, m, MAP__FUNCTION, 265 ip, &al, NULL); 266 if (al.sym) 267 goto found; 268 } 269 found: 270 ams->addr = ip; 271 ams->al_addr = al.addr; 272 ams->sym = al.sym; 273 ams->map = al.map; 274 } 275 276 struct branch_info *machine__resolve_bstack(struct machine *self, 277 struct thread *thr, 278 struct branch_stack *bs) 279 { 280 struct branch_info *bi; 281 unsigned int i; 282 283 bi = calloc(bs->nr, sizeof(struct branch_info)); 284 if (!bi) 285 return NULL; 286 287 for (i = 0; i < bs->nr; i++) { 288 ip__resolve_ams(self, thr, &bi[i].to, bs->entries[i].to); 289 ip__resolve_ams(self, thr, &bi[i].from, bs->entries[i].from); 290 bi[i].flags = bs->entries[i].flags; 291 } 292 return bi; 293 } 294 295 static int machine__resolve_callchain_sample(struct machine *machine, 296 struct thread *thread, 297 struct ip_callchain *chain, 298 struct symbol **parent) 299 300 { 301 u8 cpumode = PERF_RECORD_MISC_USER; 302 unsigned int i; 303 int err; 304 305 callchain_cursor_reset(&callchain_cursor); 306 307 if (chain->nr > PERF_MAX_STACK_DEPTH) { 308 pr_warning("corrupted callchain. skipping...\n"); 309 return 0; 310 } 311 312 for (i = 0; i < chain->nr; i++) { 313 u64 ip; 314 struct addr_location al; 315 316 if (callchain_param.order == ORDER_CALLEE) 317 ip = chain->ips[i]; 318 else 319 ip = chain->ips[chain->nr - i - 1]; 320 321 if (ip >= PERF_CONTEXT_MAX) { 322 switch (ip) { 323 case PERF_CONTEXT_HV: 324 cpumode = PERF_RECORD_MISC_HYPERVISOR; 325 break; 326 case PERF_CONTEXT_KERNEL: 327 cpumode = PERF_RECORD_MISC_KERNEL; 328 break; 329 case PERF_CONTEXT_USER: 330 cpumode = PERF_RECORD_MISC_USER; 331 break; 332 default: 333 pr_debug("invalid callchain context: " 334 "%"PRId64"\n", (s64) ip); 335 /* 336 * It seems the callchain is corrupted. 337 * Discard all. 338 */ 339 callchain_cursor_reset(&callchain_cursor); 340 return 0; 341 } 342 continue; 343 } 344 345 al.filtered = false; 346 thread__find_addr_location(thread, machine, cpumode, 347 MAP__FUNCTION, ip, &al, NULL); 348 if (al.sym != NULL) { 349 if (sort__has_parent && !*parent && 350 symbol__match_parent_regex(al.sym)) 351 *parent = al.sym; 352 if (!symbol_conf.use_callchain) 353 break; 354 } 355 356 err = callchain_cursor_append(&callchain_cursor, 357 ip, al.map, al.sym); 358 if (err) 359 return err; 360 } 361 362 return 0; 363 } 364 365 static int unwind_entry(struct unwind_entry *entry, void *arg) 366 { 367 struct callchain_cursor *cursor = arg; 368 return callchain_cursor_append(cursor, entry->ip, 369 entry->map, entry->sym); 370 } 371 372 int machine__resolve_callchain(struct machine *machine, 373 struct perf_evsel *evsel, 374 struct thread *thread, 375 struct perf_sample *sample, 376 struct symbol **parent) 377 378 { 379 int ret; 380 381 callchain_cursor_reset(&callchain_cursor); 382 383 ret = machine__resolve_callchain_sample(machine, thread, 384 sample->callchain, parent); 385 if (ret) 386 return ret; 387 388 /* Can we do dwarf post unwind? */ 389 if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) && 390 (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER))) 391 return 0; 392 393 /* Bail out if nothing was captured. */ 394 if ((!sample->user_regs.regs) || 395 (!sample->user_stack.size)) 396 return 0; 397 398 return unwind__get_entries(unwind_entry, &callchain_cursor, machine, 399 thread, evsel->attr.sample_regs_user, 400 sample); 401 402 } 403 404 static int process_event_synth_tracing_data_stub(union perf_event *event 405 __maybe_unused, 406 struct perf_session *session 407 __maybe_unused) 408 { 409 dump_printf(": unhandled!\n"); 410 return 0; 411 } 412 413 static int process_event_synth_attr_stub(union perf_event *event __maybe_unused, 414 struct perf_evlist **pevlist 415 __maybe_unused) 416 { 417 dump_printf(": unhandled!\n"); 418 return 0; 419 } 420 421 static int process_event_sample_stub(struct perf_tool *tool __maybe_unused, 422 union perf_event *event __maybe_unused, 423 struct perf_sample *sample __maybe_unused, 424 struct perf_evsel *evsel __maybe_unused, 425 struct machine *machine __maybe_unused) 426 { 427 dump_printf(": unhandled!\n"); 428 return 0; 429 } 430 431 static int process_event_stub(struct perf_tool *tool __maybe_unused, 432 union perf_event *event __maybe_unused, 433 struct perf_sample *sample __maybe_unused, 434 struct machine *machine __maybe_unused) 435 { 436 dump_printf(": unhandled!\n"); 437 return 0; 438 } 439 440 static int process_finished_round_stub(struct perf_tool *tool __maybe_unused, 441 union perf_event *event __maybe_unused, 442 struct perf_session *perf_session 443 __maybe_unused) 444 { 445 dump_printf(": unhandled!\n"); 446 return 0; 447 } 448 449 static int process_event_type_stub(struct perf_tool *tool __maybe_unused, 450 union perf_event *event __maybe_unused) 451 { 452 dump_printf(": unhandled!\n"); 453 return 0; 454 } 455 456 static int process_finished_round(struct perf_tool *tool, 457 union perf_event *event, 458 struct perf_session *session); 459 460 static void perf_tool__fill_defaults(struct perf_tool *tool) 461 { 462 if (tool->sample == NULL) 463 tool->sample = process_event_sample_stub; 464 if (tool->mmap == NULL) 465 tool->mmap = process_event_stub; 466 if (tool->comm == NULL) 467 tool->comm = process_event_stub; 468 if (tool->fork == NULL) 469 tool->fork = process_event_stub; 470 if (tool->exit == NULL) 471 tool->exit = process_event_stub; 472 if (tool->lost == NULL) 473 tool->lost = perf_event__process_lost; 474 if (tool->read == NULL) 475 tool->read = process_event_sample_stub; 476 if (tool->throttle == NULL) 477 tool->throttle = process_event_stub; 478 if (tool->unthrottle == NULL) 479 tool->unthrottle = process_event_stub; 480 if (tool->attr == NULL) 481 tool->attr = process_event_synth_attr_stub; 482 if (tool->event_type == NULL) 483 tool->event_type = process_event_type_stub; 484 if (tool->tracing_data == NULL) 485 tool->tracing_data = process_event_synth_tracing_data_stub; 486 if (tool->build_id == NULL) 487 tool->build_id = process_finished_round_stub; 488 if (tool->finished_round == NULL) { 489 if (tool->ordered_samples) 490 tool->finished_round = process_finished_round; 491 else 492 tool->finished_round = process_finished_round_stub; 493 } 494 } 495 496 void mem_bswap_32(void *src, int byte_size) 497 { 498 u32 *m = src; 499 while (byte_size > 0) { 500 *m = bswap_32(*m); 501 byte_size -= sizeof(u32); 502 ++m; 503 } 504 } 505 506 void mem_bswap_64(void *src, int byte_size) 507 { 508 u64 *m = src; 509 510 while (byte_size > 0) { 511 *m = bswap_64(*m); 512 byte_size -= sizeof(u64); 513 ++m; 514 } 515 } 516 517 static void swap_sample_id_all(union perf_event *event, void *data) 518 { 519 void *end = (void *) event + event->header.size; 520 int size = end - data; 521 522 BUG_ON(size % sizeof(u64)); 523 mem_bswap_64(data, size); 524 } 525 526 static void perf_event__all64_swap(union perf_event *event, 527 bool sample_id_all __maybe_unused) 528 { 529 struct perf_event_header *hdr = &event->header; 530 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr)); 531 } 532 533 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all) 534 { 535 event->comm.pid = bswap_32(event->comm.pid); 536 event->comm.tid = bswap_32(event->comm.tid); 537 538 if (sample_id_all) { 539 void *data = &event->comm.comm; 540 541 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); 542 swap_sample_id_all(event, data); 543 } 544 } 545 546 static void perf_event__mmap_swap(union perf_event *event, 547 bool sample_id_all) 548 { 549 event->mmap.pid = bswap_32(event->mmap.pid); 550 event->mmap.tid = bswap_32(event->mmap.tid); 551 event->mmap.start = bswap_64(event->mmap.start); 552 event->mmap.len = bswap_64(event->mmap.len); 553 event->mmap.pgoff = bswap_64(event->mmap.pgoff); 554 555 if (sample_id_all) { 556 void *data = &event->mmap.filename; 557 558 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); 559 swap_sample_id_all(event, data); 560 } 561 } 562 563 static void perf_event__task_swap(union perf_event *event, bool sample_id_all) 564 { 565 event->fork.pid = bswap_32(event->fork.pid); 566 event->fork.tid = bswap_32(event->fork.tid); 567 event->fork.ppid = bswap_32(event->fork.ppid); 568 event->fork.ptid = bswap_32(event->fork.ptid); 569 event->fork.time = bswap_64(event->fork.time); 570 571 if (sample_id_all) 572 swap_sample_id_all(event, &event->fork + 1); 573 } 574 575 static void perf_event__read_swap(union perf_event *event, bool sample_id_all) 576 { 577 event->read.pid = bswap_32(event->read.pid); 578 event->read.tid = bswap_32(event->read.tid); 579 event->read.value = bswap_64(event->read.value); 580 event->read.time_enabled = bswap_64(event->read.time_enabled); 581 event->read.time_running = bswap_64(event->read.time_running); 582 event->read.id = bswap_64(event->read.id); 583 584 if (sample_id_all) 585 swap_sample_id_all(event, &event->read + 1); 586 } 587 588 static u8 revbyte(u8 b) 589 { 590 int rev = (b >> 4) | ((b & 0xf) << 4); 591 rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2); 592 rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1); 593 return (u8) rev; 594 } 595 596 /* 597 * XXX this is hack in attempt to carry flags bitfield 598 * throught endian village. ABI says: 599 * 600 * Bit-fields are allocated from right to left (least to most significant) 601 * on little-endian implementations and from left to right (most to least 602 * significant) on big-endian implementations. 603 * 604 * The above seems to be byte specific, so we need to reverse each 605 * byte of the bitfield. 'Internet' also says this might be implementation 606 * specific and we probably need proper fix and carry perf_event_attr 607 * bitfield flags in separate data file FEAT_ section. Thought this seems 608 * to work for now. 609 */ 610 static void swap_bitfield(u8 *p, unsigned len) 611 { 612 unsigned i; 613 614 for (i = 0; i < len; i++) { 615 *p = revbyte(*p); 616 p++; 617 } 618 } 619 620 /* exported for swapping attributes in file header */ 621 void perf_event__attr_swap(struct perf_event_attr *attr) 622 { 623 attr->type = bswap_32(attr->type); 624 attr->size = bswap_32(attr->size); 625 attr->config = bswap_64(attr->config); 626 attr->sample_period = bswap_64(attr->sample_period); 627 attr->sample_type = bswap_64(attr->sample_type); 628 attr->read_format = bswap_64(attr->read_format); 629 attr->wakeup_events = bswap_32(attr->wakeup_events); 630 attr->bp_type = bswap_32(attr->bp_type); 631 attr->bp_addr = bswap_64(attr->bp_addr); 632 attr->bp_len = bswap_64(attr->bp_len); 633 634 swap_bitfield((u8 *) (&attr->read_format + 1), sizeof(u64)); 635 } 636 637 static void perf_event__hdr_attr_swap(union perf_event *event, 638 bool sample_id_all __maybe_unused) 639 { 640 size_t size; 641 642 perf_event__attr_swap(&event->attr.attr); 643 644 size = event->header.size; 645 size -= (void *)&event->attr.id - (void *)event; 646 mem_bswap_64(event->attr.id, size); 647 } 648 649 static void perf_event__event_type_swap(union perf_event *event, 650 bool sample_id_all __maybe_unused) 651 { 652 event->event_type.event_type.event_id = 653 bswap_64(event->event_type.event_type.event_id); 654 } 655 656 static void perf_event__tracing_data_swap(union perf_event *event, 657 bool sample_id_all __maybe_unused) 658 { 659 event->tracing_data.size = bswap_32(event->tracing_data.size); 660 } 661 662 typedef void (*perf_event__swap_op)(union perf_event *event, 663 bool sample_id_all); 664 665 static perf_event__swap_op perf_event__swap_ops[] = { 666 [PERF_RECORD_MMAP] = perf_event__mmap_swap, 667 [PERF_RECORD_COMM] = perf_event__comm_swap, 668 [PERF_RECORD_FORK] = perf_event__task_swap, 669 [PERF_RECORD_EXIT] = perf_event__task_swap, 670 [PERF_RECORD_LOST] = perf_event__all64_swap, 671 [PERF_RECORD_READ] = perf_event__read_swap, 672 [PERF_RECORD_SAMPLE] = perf_event__all64_swap, 673 [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap, 674 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap, 675 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap, 676 [PERF_RECORD_HEADER_BUILD_ID] = NULL, 677 [PERF_RECORD_HEADER_MAX] = NULL, 678 }; 679 680 struct sample_queue { 681 u64 timestamp; 682 u64 file_offset; 683 union perf_event *event; 684 struct list_head list; 685 }; 686 687 static void perf_session_free_sample_buffers(struct perf_session *session) 688 { 689 struct ordered_samples *os = &session->ordered_samples; 690 691 while (!list_empty(&os->to_free)) { 692 struct sample_queue *sq; 693 694 sq = list_entry(os->to_free.next, struct sample_queue, list); 695 list_del(&sq->list); 696 free(sq); 697 } 698 } 699 700 static int perf_session_deliver_event(struct perf_session *session, 701 union perf_event *event, 702 struct perf_sample *sample, 703 struct perf_tool *tool, 704 u64 file_offset); 705 706 static int flush_sample_queue(struct perf_session *s, 707 struct perf_tool *tool) 708 { 709 struct ordered_samples *os = &s->ordered_samples; 710 struct list_head *head = &os->samples; 711 struct sample_queue *tmp, *iter; 712 struct perf_sample sample; 713 u64 limit = os->next_flush; 714 u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL; 715 unsigned idx = 0, progress_next = os->nr_samples / 16; 716 int ret; 717 718 if (!tool->ordered_samples || !limit) 719 return 0; 720 721 list_for_each_entry_safe(iter, tmp, head, list) { 722 if (iter->timestamp > limit) 723 break; 724 725 ret = perf_evlist__parse_sample(s->evlist, iter->event, &sample); 726 if (ret) 727 pr_err("Can't parse sample, err = %d\n", ret); 728 else { 729 ret = perf_session_deliver_event(s, iter->event, &sample, tool, 730 iter->file_offset); 731 if (ret) 732 return ret; 733 } 734 735 os->last_flush = iter->timestamp; 736 list_del(&iter->list); 737 list_add(&iter->list, &os->sample_cache); 738 if (++idx >= progress_next) { 739 progress_next += os->nr_samples / 16; 740 ui_progress__update(idx, os->nr_samples, 741 "Processing time ordered events..."); 742 } 743 } 744 745 if (list_empty(head)) { 746 os->last_sample = NULL; 747 } else if (last_ts <= limit) { 748 os->last_sample = 749 list_entry(head->prev, struct sample_queue, list); 750 } 751 752 os->nr_samples = 0; 753 754 return 0; 755 } 756 757 /* 758 * When perf record finishes a pass on every buffers, it records this pseudo 759 * event. 760 * We record the max timestamp t found in the pass n. 761 * Assuming these timestamps are monotonic across cpus, we know that if 762 * a buffer still has events with timestamps below t, they will be all 763 * available and then read in the pass n + 1. 764 * Hence when we start to read the pass n + 2, we can safely flush every 765 * events with timestamps below t. 766 * 767 * ============ PASS n ================= 768 * CPU 0 | CPU 1 769 * | 770 * cnt1 timestamps | cnt2 timestamps 771 * 1 | 2 772 * 2 | 3 773 * - | 4 <--- max recorded 774 * 775 * ============ PASS n + 1 ============== 776 * CPU 0 | CPU 1 777 * | 778 * cnt1 timestamps | cnt2 timestamps 779 * 3 | 5 780 * 4 | 6 781 * 5 | 7 <---- max recorded 782 * 783 * Flush every events below timestamp 4 784 * 785 * ============ PASS n + 2 ============== 786 * CPU 0 | CPU 1 787 * | 788 * cnt1 timestamps | cnt2 timestamps 789 * 6 | 8 790 * 7 | 9 791 * - | 10 792 * 793 * Flush every events below timestamp 7 794 * etc... 795 */ 796 static int process_finished_round(struct perf_tool *tool, 797 union perf_event *event __maybe_unused, 798 struct perf_session *session) 799 { 800 int ret = flush_sample_queue(session, tool); 801 if (!ret) 802 session->ordered_samples.next_flush = session->ordered_samples.max_timestamp; 803 804 return ret; 805 } 806 807 /* The queue is ordered by time */ 808 static void __queue_event(struct sample_queue *new, struct perf_session *s) 809 { 810 struct ordered_samples *os = &s->ordered_samples; 811 struct sample_queue *sample = os->last_sample; 812 u64 timestamp = new->timestamp; 813 struct list_head *p; 814 815 ++os->nr_samples; 816 os->last_sample = new; 817 818 if (!sample) { 819 list_add(&new->list, &os->samples); 820 os->max_timestamp = timestamp; 821 return; 822 } 823 824 /* 825 * last_sample might point to some random place in the list as it's 826 * the last queued event. We expect that the new event is close to 827 * this. 828 */ 829 if (sample->timestamp <= timestamp) { 830 while (sample->timestamp <= timestamp) { 831 p = sample->list.next; 832 if (p == &os->samples) { 833 list_add_tail(&new->list, &os->samples); 834 os->max_timestamp = timestamp; 835 return; 836 } 837 sample = list_entry(p, struct sample_queue, list); 838 } 839 list_add_tail(&new->list, &sample->list); 840 } else { 841 while (sample->timestamp > timestamp) { 842 p = sample->list.prev; 843 if (p == &os->samples) { 844 list_add(&new->list, &os->samples); 845 return; 846 } 847 sample = list_entry(p, struct sample_queue, list); 848 } 849 list_add(&new->list, &sample->list); 850 } 851 } 852 853 #define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct sample_queue)) 854 855 static int perf_session_queue_event(struct perf_session *s, union perf_event *event, 856 struct perf_sample *sample, u64 file_offset) 857 { 858 struct ordered_samples *os = &s->ordered_samples; 859 struct list_head *sc = &os->sample_cache; 860 u64 timestamp = sample->time; 861 struct sample_queue *new; 862 863 if (!timestamp || timestamp == ~0ULL) 864 return -ETIME; 865 866 if (timestamp < s->ordered_samples.last_flush) { 867 printf("Warning: Timestamp below last timeslice flush\n"); 868 return -EINVAL; 869 } 870 871 if (!list_empty(sc)) { 872 new = list_entry(sc->next, struct sample_queue, list); 873 list_del(&new->list); 874 } else if (os->sample_buffer) { 875 new = os->sample_buffer + os->sample_buffer_idx; 876 if (++os->sample_buffer_idx == MAX_SAMPLE_BUFFER) 877 os->sample_buffer = NULL; 878 } else { 879 os->sample_buffer = malloc(MAX_SAMPLE_BUFFER * sizeof(*new)); 880 if (!os->sample_buffer) 881 return -ENOMEM; 882 list_add(&os->sample_buffer->list, &os->to_free); 883 os->sample_buffer_idx = 2; 884 new = os->sample_buffer + 1; 885 } 886 887 new->timestamp = timestamp; 888 new->file_offset = file_offset; 889 new->event = event; 890 891 __queue_event(new, s); 892 893 return 0; 894 } 895 896 static void callchain__printf(struct perf_sample *sample) 897 { 898 unsigned int i; 899 900 printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr); 901 902 for (i = 0; i < sample->callchain->nr; i++) 903 printf("..... %2d: %016" PRIx64 "\n", 904 i, sample->callchain->ips[i]); 905 } 906 907 static void branch_stack__printf(struct perf_sample *sample) 908 { 909 uint64_t i; 910 911 printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr); 912 913 for (i = 0; i < sample->branch_stack->nr; i++) 914 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 "\n", 915 i, sample->branch_stack->entries[i].from, 916 sample->branch_stack->entries[i].to); 917 } 918 919 static void regs_dump__printf(u64 mask, u64 *regs) 920 { 921 unsigned rid, i = 0; 922 923 for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) { 924 u64 val = regs[i++]; 925 926 printf(".... %-5s 0x%" PRIx64 "\n", 927 perf_reg_name(rid), val); 928 } 929 } 930 931 static void regs_user__printf(struct perf_sample *sample, u64 mask) 932 { 933 struct regs_dump *user_regs = &sample->user_regs; 934 935 if (user_regs->regs) { 936 printf("... user regs: mask 0x%" PRIx64 "\n", mask); 937 regs_dump__printf(mask, user_regs->regs); 938 } 939 } 940 941 static void stack_user__printf(struct stack_dump *dump) 942 { 943 printf("... ustack: size %" PRIu64 ", offset 0x%x\n", 944 dump->size, dump->offset); 945 } 946 947 static void perf_session__print_tstamp(struct perf_session *session, 948 union perf_event *event, 949 struct perf_sample *sample) 950 { 951 u64 sample_type = perf_evlist__sample_type(session->evlist); 952 953 if (event->header.type != PERF_RECORD_SAMPLE && 954 !perf_evlist__sample_id_all(session->evlist)) { 955 fputs("-1 -1 ", stdout); 956 return; 957 } 958 959 if ((sample_type & PERF_SAMPLE_CPU)) 960 printf("%u ", sample->cpu); 961 962 if (sample_type & PERF_SAMPLE_TIME) 963 printf("%" PRIu64 " ", sample->time); 964 } 965 966 static void dump_event(struct perf_session *session, union perf_event *event, 967 u64 file_offset, struct perf_sample *sample) 968 { 969 if (!dump_trace) 970 return; 971 972 printf("\n%#" PRIx64 " [%#x]: event: %d\n", 973 file_offset, event->header.size, event->header.type); 974 975 trace_event(event); 976 977 if (sample) 978 perf_session__print_tstamp(session, event, sample); 979 980 printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset, 981 event->header.size, perf_event__name(event->header.type)); 982 } 983 984 static void dump_sample(struct perf_evsel *evsel, union perf_event *event, 985 struct perf_sample *sample) 986 { 987 u64 sample_type; 988 989 if (!dump_trace) 990 return; 991 992 printf("(IP, %d): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n", 993 event->header.misc, sample->pid, sample->tid, sample->ip, 994 sample->period, sample->addr); 995 996 sample_type = evsel->attr.sample_type; 997 998 if (sample_type & PERF_SAMPLE_CALLCHAIN) 999 callchain__printf(sample); 1000 1001 if (sample_type & PERF_SAMPLE_BRANCH_STACK) 1002 branch_stack__printf(sample); 1003 1004 if (sample_type & PERF_SAMPLE_REGS_USER) 1005 regs_user__printf(sample, evsel->attr.sample_regs_user); 1006 1007 if (sample_type & PERF_SAMPLE_STACK_USER) 1008 stack_user__printf(&sample->user_stack); 1009 } 1010 1011 static struct machine * 1012 perf_session__find_machine_for_cpumode(struct perf_session *session, 1013 union perf_event *event) 1014 { 1015 const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 1016 1017 if (perf_guest && 1018 ((cpumode == PERF_RECORD_MISC_GUEST_KERNEL) || 1019 (cpumode == PERF_RECORD_MISC_GUEST_USER))) { 1020 u32 pid; 1021 1022 if (event->header.type == PERF_RECORD_MMAP) 1023 pid = event->mmap.pid; 1024 else 1025 pid = event->ip.pid; 1026 1027 return perf_session__findnew_machine(session, pid); 1028 } 1029 1030 return perf_session__find_host_machine(session); 1031 } 1032 1033 static int perf_session_deliver_event(struct perf_session *session, 1034 union perf_event *event, 1035 struct perf_sample *sample, 1036 struct perf_tool *tool, 1037 u64 file_offset) 1038 { 1039 struct perf_evsel *evsel; 1040 struct machine *machine; 1041 1042 dump_event(session, event, file_offset, sample); 1043 1044 evsel = perf_evlist__id2evsel(session->evlist, sample->id); 1045 if (evsel != NULL && event->header.type != PERF_RECORD_SAMPLE) { 1046 /* 1047 * XXX We're leaving PERF_RECORD_SAMPLE unnacounted here 1048 * because the tools right now may apply filters, discarding 1049 * some of the samples. For consistency, in the future we 1050 * should have something like nr_filtered_samples and remove 1051 * the sample->period from total_sample_period, etc, KISS for 1052 * now tho. 1053 * 1054 * Also testing against NULL allows us to handle files without 1055 * attr.sample_id_all and/or without PERF_SAMPLE_ID. In the 1056 * future probably it'll be a good idea to restrict event 1057 * processing via perf_session to files with both set. 1058 */ 1059 hists__inc_nr_events(&evsel->hists, event->header.type); 1060 } 1061 1062 machine = perf_session__find_machine_for_cpumode(session, event); 1063 1064 switch (event->header.type) { 1065 case PERF_RECORD_SAMPLE: 1066 dump_sample(evsel, event, sample); 1067 if (evsel == NULL) { 1068 ++session->hists.stats.nr_unknown_id; 1069 return 0; 1070 } 1071 if (machine == NULL) { 1072 ++session->hists.stats.nr_unprocessable_samples; 1073 return 0; 1074 } 1075 return tool->sample(tool, event, sample, evsel, machine); 1076 case PERF_RECORD_MMAP: 1077 return tool->mmap(tool, event, sample, machine); 1078 case PERF_RECORD_COMM: 1079 return tool->comm(tool, event, sample, machine); 1080 case PERF_RECORD_FORK: 1081 return tool->fork(tool, event, sample, machine); 1082 case PERF_RECORD_EXIT: 1083 return tool->exit(tool, event, sample, machine); 1084 case PERF_RECORD_LOST: 1085 if (tool->lost == perf_event__process_lost) 1086 session->hists.stats.total_lost += event->lost.lost; 1087 return tool->lost(tool, event, sample, machine); 1088 case PERF_RECORD_READ: 1089 return tool->read(tool, event, sample, evsel, machine); 1090 case PERF_RECORD_THROTTLE: 1091 return tool->throttle(tool, event, sample, machine); 1092 case PERF_RECORD_UNTHROTTLE: 1093 return tool->unthrottle(tool, event, sample, machine); 1094 default: 1095 ++session->hists.stats.nr_unknown_events; 1096 return -1; 1097 } 1098 } 1099 1100 static int perf_session__preprocess_sample(struct perf_session *session, 1101 union perf_event *event, struct perf_sample *sample) 1102 { 1103 if (event->header.type != PERF_RECORD_SAMPLE || 1104 !(perf_evlist__sample_type(session->evlist) & PERF_SAMPLE_CALLCHAIN)) 1105 return 0; 1106 1107 if (!ip_callchain__valid(sample->callchain, event)) { 1108 pr_debug("call-chain problem with event, skipping it.\n"); 1109 ++session->hists.stats.nr_invalid_chains; 1110 session->hists.stats.total_invalid_chains += sample->period; 1111 return -EINVAL; 1112 } 1113 return 0; 1114 } 1115 1116 static int perf_session__process_user_event(struct perf_session *session, union perf_event *event, 1117 struct perf_tool *tool, u64 file_offset) 1118 { 1119 int err; 1120 1121 dump_event(session, event, file_offset, NULL); 1122 1123 /* These events are processed right away */ 1124 switch (event->header.type) { 1125 case PERF_RECORD_HEADER_ATTR: 1126 err = tool->attr(event, &session->evlist); 1127 if (err == 0) 1128 perf_session__set_id_hdr_size(session); 1129 return err; 1130 case PERF_RECORD_HEADER_EVENT_TYPE: 1131 return tool->event_type(tool, event); 1132 case PERF_RECORD_HEADER_TRACING_DATA: 1133 /* setup for reading amidst mmap */ 1134 lseek(session->fd, file_offset, SEEK_SET); 1135 return tool->tracing_data(event, session); 1136 case PERF_RECORD_HEADER_BUILD_ID: 1137 return tool->build_id(tool, event, session); 1138 case PERF_RECORD_FINISHED_ROUND: 1139 return tool->finished_round(tool, event, session); 1140 default: 1141 return -EINVAL; 1142 } 1143 } 1144 1145 static void event_swap(union perf_event *event, bool sample_id_all) 1146 { 1147 perf_event__swap_op swap; 1148 1149 swap = perf_event__swap_ops[event->header.type]; 1150 if (swap) 1151 swap(event, sample_id_all); 1152 } 1153 1154 static int perf_session__process_event(struct perf_session *session, 1155 union perf_event *event, 1156 struct perf_tool *tool, 1157 u64 file_offset) 1158 { 1159 struct perf_sample sample; 1160 int ret; 1161 1162 if (session->header.needs_swap) 1163 event_swap(event, perf_evlist__sample_id_all(session->evlist)); 1164 1165 if (event->header.type >= PERF_RECORD_HEADER_MAX) 1166 return -EINVAL; 1167 1168 hists__inc_nr_events(&session->hists, event->header.type); 1169 1170 if (event->header.type >= PERF_RECORD_USER_TYPE_START) 1171 return perf_session__process_user_event(session, event, tool, file_offset); 1172 1173 /* 1174 * For all kernel events we get the sample data 1175 */ 1176 ret = perf_evlist__parse_sample(session->evlist, event, &sample); 1177 if (ret) 1178 return ret; 1179 1180 /* Preprocess sample records - precheck callchains */ 1181 if (perf_session__preprocess_sample(session, event, &sample)) 1182 return 0; 1183 1184 if (tool->ordered_samples) { 1185 ret = perf_session_queue_event(session, event, &sample, 1186 file_offset); 1187 if (ret != -ETIME) 1188 return ret; 1189 } 1190 1191 return perf_session_deliver_event(session, event, &sample, tool, 1192 file_offset); 1193 } 1194 1195 void perf_event_header__bswap(struct perf_event_header *self) 1196 { 1197 self->type = bswap_32(self->type); 1198 self->misc = bswap_16(self->misc); 1199 self->size = bswap_16(self->size); 1200 } 1201 1202 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid) 1203 { 1204 return machine__findnew_thread(&session->host_machine, pid); 1205 } 1206 1207 static struct thread *perf_session__register_idle_thread(struct perf_session *self) 1208 { 1209 struct thread *thread = perf_session__findnew(self, 0); 1210 1211 if (thread == NULL || thread__set_comm(thread, "swapper")) { 1212 pr_err("problem inserting idle task.\n"); 1213 thread = NULL; 1214 } 1215 1216 return thread; 1217 } 1218 1219 static void perf_session__warn_about_errors(const struct perf_session *session, 1220 const struct perf_tool *tool) 1221 { 1222 if (tool->lost == perf_event__process_lost && 1223 session->hists.stats.nr_events[PERF_RECORD_LOST] != 0) { 1224 ui__warning("Processed %d events and lost %d chunks!\n\n" 1225 "Check IO/CPU overload!\n\n", 1226 session->hists.stats.nr_events[0], 1227 session->hists.stats.nr_events[PERF_RECORD_LOST]); 1228 } 1229 1230 if (session->hists.stats.nr_unknown_events != 0) { 1231 ui__warning("Found %u unknown events!\n\n" 1232 "Is this an older tool processing a perf.data " 1233 "file generated by a more recent tool?\n\n" 1234 "If that is not the case, consider " 1235 "reporting to linux-kernel@vger.kernel.org.\n\n", 1236 session->hists.stats.nr_unknown_events); 1237 } 1238 1239 if (session->hists.stats.nr_unknown_id != 0) { 1240 ui__warning("%u samples with id not present in the header\n", 1241 session->hists.stats.nr_unknown_id); 1242 } 1243 1244 if (session->hists.stats.nr_invalid_chains != 0) { 1245 ui__warning("Found invalid callchains!\n\n" 1246 "%u out of %u events were discarded for this reason.\n\n" 1247 "Consider reporting to linux-kernel@vger.kernel.org.\n\n", 1248 session->hists.stats.nr_invalid_chains, 1249 session->hists.stats.nr_events[PERF_RECORD_SAMPLE]); 1250 } 1251 1252 if (session->hists.stats.nr_unprocessable_samples != 0) { 1253 ui__warning("%u unprocessable samples recorded.\n" 1254 "Do you have a KVM guest running and not using 'perf kvm'?\n", 1255 session->hists.stats.nr_unprocessable_samples); 1256 } 1257 } 1258 1259 #define session_done() (*(volatile int *)(&session_done)) 1260 volatile int session_done; 1261 1262 static int __perf_session__process_pipe_events(struct perf_session *self, 1263 struct perf_tool *tool) 1264 { 1265 union perf_event *event; 1266 uint32_t size, cur_size = 0; 1267 void *buf = NULL; 1268 int skip = 0; 1269 u64 head; 1270 int err; 1271 void *p; 1272 1273 perf_tool__fill_defaults(tool); 1274 1275 head = 0; 1276 cur_size = sizeof(union perf_event); 1277 1278 buf = malloc(cur_size); 1279 if (!buf) 1280 return -errno; 1281 more: 1282 event = buf; 1283 err = readn(self->fd, event, sizeof(struct perf_event_header)); 1284 if (err <= 0) { 1285 if (err == 0) 1286 goto done; 1287 1288 pr_err("failed to read event header\n"); 1289 goto out_err; 1290 } 1291 1292 if (self->header.needs_swap) 1293 perf_event_header__bswap(&event->header); 1294 1295 size = event->header.size; 1296 if (size == 0) 1297 size = 8; 1298 1299 if (size > cur_size) { 1300 void *new = realloc(buf, size); 1301 if (!new) { 1302 pr_err("failed to allocate memory to read event\n"); 1303 goto out_err; 1304 } 1305 buf = new; 1306 cur_size = size; 1307 event = buf; 1308 } 1309 p = event; 1310 p += sizeof(struct perf_event_header); 1311 1312 if (size - sizeof(struct perf_event_header)) { 1313 err = readn(self->fd, p, size - sizeof(struct perf_event_header)); 1314 if (err <= 0) { 1315 if (err == 0) { 1316 pr_err("unexpected end of event stream\n"); 1317 goto done; 1318 } 1319 1320 pr_err("failed to read event data\n"); 1321 goto out_err; 1322 } 1323 } 1324 1325 if ((skip = perf_session__process_event(self, event, tool, head)) < 0) { 1326 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n", 1327 head, event->header.size, event->header.type); 1328 err = -EINVAL; 1329 goto out_err; 1330 } 1331 1332 head += size; 1333 1334 if (skip > 0) 1335 head += skip; 1336 1337 if (!session_done()) 1338 goto more; 1339 done: 1340 err = 0; 1341 out_err: 1342 free(buf); 1343 perf_session__warn_about_errors(self, tool); 1344 perf_session_free_sample_buffers(self); 1345 return err; 1346 } 1347 1348 static union perf_event * 1349 fetch_mmaped_event(struct perf_session *session, 1350 u64 head, size_t mmap_size, char *buf) 1351 { 1352 union perf_event *event; 1353 1354 /* 1355 * Ensure we have enough space remaining to read 1356 * the size of the event in the headers. 1357 */ 1358 if (head + sizeof(event->header) > mmap_size) 1359 return NULL; 1360 1361 event = (union perf_event *)(buf + head); 1362 1363 if (session->header.needs_swap) 1364 perf_event_header__bswap(&event->header); 1365 1366 if (head + event->header.size > mmap_size) 1367 return NULL; 1368 1369 return event; 1370 } 1371 1372 int __perf_session__process_events(struct perf_session *session, 1373 u64 data_offset, u64 data_size, 1374 u64 file_size, struct perf_tool *tool) 1375 { 1376 u64 head, page_offset, file_offset, file_pos, progress_next; 1377 int err, mmap_prot, mmap_flags, map_idx = 0; 1378 size_t mmap_size; 1379 char *buf, *mmaps[8]; 1380 union perf_event *event; 1381 uint32_t size; 1382 1383 perf_tool__fill_defaults(tool); 1384 1385 page_offset = page_size * (data_offset / page_size); 1386 file_offset = page_offset; 1387 head = data_offset - page_offset; 1388 1389 if (data_offset + data_size < file_size) 1390 file_size = data_offset + data_size; 1391 1392 progress_next = file_size / 16; 1393 1394 mmap_size = session->mmap_window; 1395 if (mmap_size > file_size) 1396 mmap_size = file_size; 1397 1398 memset(mmaps, 0, sizeof(mmaps)); 1399 1400 mmap_prot = PROT_READ; 1401 mmap_flags = MAP_SHARED; 1402 1403 if (session->header.needs_swap) { 1404 mmap_prot |= PROT_WRITE; 1405 mmap_flags = MAP_PRIVATE; 1406 } 1407 remap: 1408 buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, session->fd, 1409 file_offset); 1410 if (buf == MAP_FAILED) { 1411 pr_err("failed to mmap file\n"); 1412 err = -errno; 1413 goto out_err; 1414 } 1415 mmaps[map_idx] = buf; 1416 map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1); 1417 file_pos = file_offset + head; 1418 1419 more: 1420 event = fetch_mmaped_event(session, head, mmap_size, buf); 1421 if (!event) { 1422 if (mmaps[map_idx]) { 1423 munmap(mmaps[map_idx], mmap_size); 1424 mmaps[map_idx] = NULL; 1425 } 1426 1427 page_offset = page_size * (head / page_size); 1428 file_offset += page_offset; 1429 head -= page_offset; 1430 goto remap; 1431 } 1432 1433 size = event->header.size; 1434 1435 if (size == 0 || 1436 perf_session__process_event(session, event, tool, file_pos) < 0) { 1437 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n", 1438 file_offset + head, event->header.size, 1439 event->header.type); 1440 err = -EINVAL; 1441 goto out_err; 1442 } 1443 1444 head += size; 1445 file_pos += size; 1446 1447 if (file_pos >= progress_next) { 1448 progress_next += file_size / 16; 1449 ui_progress__update(file_pos, file_size, 1450 "Processing events..."); 1451 } 1452 1453 if (file_pos < file_size) 1454 goto more; 1455 1456 err = 0; 1457 /* do the final flush for ordered samples */ 1458 session->ordered_samples.next_flush = ULLONG_MAX; 1459 err = flush_sample_queue(session, tool); 1460 out_err: 1461 ui_progress__finish(); 1462 perf_session__warn_about_errors(session, tool); 1463 perf_session_free_sample_buffers(session); 1464 return err; 1465 } 1466 1467 int perf_session__process_events(struct perf_session *self, 1468 struct perf_tool *tool) 1469 { 1470 int err; 1471 1472 if (perf_session__register_idle_thread(self) == NULL) 1473 return -ENOMEM; 1474 1475 if (!self->fd_pipe) 1476 err = __perf_session__process_events(self, 1477 self->header.data_offset, 1478 self->header.data_size, 1479 self->size, tool); 1480 else 1481 err = __perf_session__process_pipe_events(self, tool); 1482 1483 return err; 1484 } 1485 1486 bool perf_session__has_traces(struct perf_session *session, const char *msg) 1487 { 1488 if (!(perf_evlist__sample_type(session->evlist) & PERF_SAMPLE_RAW)) { 1489 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg); 1490 return false; 1491 } 1492 1493 return true; 1494 } 1495 1496 int maps__set_kallsyms_ref_reloc_sym(struct map **maps, 1497 const char *symbol_name, u64 addr) 1498 { 1499 char *bracket; 1500 enum map_type i; 1501 struct ref_reloc_sym *ref; 1502 1503 ref = zalloc(sizeof(struct ref_reloc_sym)); 1504 if (ref == NULL) 1505 return -ENOMEM; 1506 1507 ref->name = strdup(symbol_name); 1508 if (ref->name == NULL) { 1509 free(ref); 1510 return -ENOMEM; 1511 } 1512 1513 bracket = strchr(ref->name, ']'); 1514 if (bracket) 1515 *bracket = '\0'; 1516 1517 ref->addr = addr; 1518 1519 for (i = 0; i < MAP__NR_TYPES; ++i) { 1520 struct kmap *kmap = map__kmap(maps[i]); 1521 kmap->ref_reloc_sym = ref; 1522 } 1523 1524 return 0; 1525 } 1526 1527 size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp) 1528 { 1529 return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) + 1530 __dsos__fprintf(&self->host_machine.user_dsos, fp) + 1531 machines__fprintf_dsos(&self->machines, fp); 1532 } 1533 1534 size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp, 1535 bool with_hits) 1536 { 1537 size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits); 1538 return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits); 1539 } 1540 1541 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp) 1542 { 1543 struct perf_evsel *pos; 1544 size_t ret = fprintf(fp, "Aggregated stats:\n"); 1545 1546 ret += hists__fprintf_nr_events(&session->hists, fp); 1547 1548 list_for_each_entry(pos, &session->evlist->entries, node) { 1549 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos)); 1550 ret += hists__fprintf_nr_events(&pos->hists, fp); 1551 } 1552 1553 return ret; 1554 } 1555 1556 size_t perf_session__fprintf(struct perf_session *session, FILE *fp) 1557 { 1558 /* 1559 * FIXME: Here we have to actually print all the machines in this 1560 * session, not just the host... 1561 */ 1562 return machine__fprintf(&session->host_machine, fp); 1563 } 1564 1565 void perf_session__remove_thread(struct perf_session *session, 1566 struct thread *th) 1567 { 1568 /* 1569 * FIXME: This one makes no sense, we need to remove the thread from 1570 * the machine it belongs to, perf_session can have many machines, so 1571 * doing it always on ->host_machine is wrong. Fix when auditing all 1572 * the 'perf kvm' code. 1573 */ 1574 machine__remove_thread(&session->host_machine, th); 1575 } 1576 1577 struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, 1578 unsigned int type) 1579 { 1580 struct perf_evsel *pos; 1581 1582 list_for_each_entry(pos, &session->evlist->entries, node) { 1583 if (pos->attr.type == type) 1584 return pos; 1585 } 1586 return NULL; 1587 } 1588 1589 void perf_evsel__print_ip(struct perf_evsel *evsel, union perf_event *event, 1590 struct perf_sample *sample, struct machine *machine, 1591 int print_sym, int print_dso, int print_symoffset) 1592 { 1593 struct addr_location al; 1594 struct callchain_cursor_node *node; 1595 1596 if (perf_event__preprocess_sample(event, machine, &al, sample, 1597 NULL) < 0) { 1598 error("problem processing %d event, skipping it.\n", 1599 event->header.type); 1600 return; 1601 } 1602 1603 if (symbol_conf.use_callchain && sample->callchain) { 1604 1605 1606 if (machine__resolve_callchain(machine, evsel, al.thread, 1607 sample, NULL) != 0) { 1608 if (verbose) 1609 error("Failed to resolve callchain. Skipping\n"); 1610 return; 1611 } 1612 callchain_cursor_commit(&callchain_cursor); 1613 1614 while (1) { 1615 node = callchain_cursor_current(&callchain_cursor); 1616 if (!node) 1617 break; 1618 1619 printf("\t%16" PRIx64, node->ip); 1620 if (print_sym) { 1621 printf(" "); 1622 symbol__fprintf_symname(node->sym, stdout); 1623 } 1624 if (print_dso) { 1625 printf(" ("); 1626 map__fprintf_dsoname(node->map, stdout); 1627 printf(")"); 1628 } 1629 printf("\n"); 1630 1631 callchain_cursor_advance(&callchain_cursor); 1632 } 1633 1634 } else { 1635 printf("%16" PRIx64, sample->ip); 1636 if (print_sym) { 1637 printf(" "); 1638 if (print_symoffset) 1639 symbol__fprintf_symname_offs(al.sym, &al, 1640 stdout); 1641 else 1642 symbol__fprintf_symname(al.sym, stdout); 1643 } 1644 1645 if (print_dso) { 1646 printf(" ("); 1647 map__fprintf_dsoname(al.map, stdout); 1648 printf(")"); 1649 } 1650 } 1651 } 1652 1653 int perf_session__cpu_bitmap(struct perf_session *session, 1654 const char *cpu_list, unsigned long *cpu_bitmap) 1655 { 1656 int i; 1657 struct cpu_map *map; 1658 1659 for (i = 0; i < PERF_TYPE_MAX; ++i) { 1660 struct perf_evsel *evsel; 1661 1662 evsel = perf_session__find_first_evtype(session, i); 1663 if (!evsel) 1664 continue; 1665 1666 if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) { 1667 pr_err("File does not contain CPU events. " 1668 "Remove -c option to proceed.\n"); 1669 return -1; 1670 } 1671 } 1672 1673 map = cpu_map__new(cpu_list); 1674 if (map == NULL) { 1675 pr_err("Invalid cpu_list\n"); 1676 return -1; 1677 } 1678 1679 for (i = 0; i < map->nr; i++) { 1680 int cpu = map->map[i]; 1681 1682 if (cpu >= MAX_NR_CPUS) { 1683 pr_err("Requested CPU %d too large. " 1684 "Consider raising MAX_NR_CPUS\n", cpu); 1685 return -1; 1686 } 1687 1688 set_bit(cpu, cpu_bitmap); 1689 } 1690 1691 return 0; 1692 } 1693 1694 void perf_session__fprintf_info(struct perf_session *session, FILE *fp, 1695 bool full) 1696 { 1697 struct stat st; 1698 int ret; 1699 1700 if (session == NULL || fp == NULL) 1701 return; 1702 1703 ret = fstat(session->fd, &st); 1704 if (ret == -1) 1705 return; 1706 1707 fprintf(fp, "# ========\n"); 1708 fprintf(fp, "# captured on: %s", ctime(&st.st_ctime)); 1709 perf_header__fprintf_info(session, fp, full); 1710 fprintf(fp, "# ========\n#\n"); 1711 } 1712 1713 1714 int __perf_session__set_tracepoints_handlers(struct perf_session *session, 1715 const struct perf_evsel_str_handler *assocs, 1716 size_t nr_assocs) 1717 { 1718 struct perf_evlist *evlist = session->evlist; 1719 struct event_format *format; 1720 struct perf_evsel *evsel; 1721 char *tracepoint, *name; 1722 size_t i; 1723 int err; 1724 1725 for (i = 0; i < nr_assocs; i++) { 1726 err = -ENOMEM; 1727 tracepoint = strdup(assocs[i].name); 1728 if (tracepoint == NULL) 1729 goto out; 1730 1731 err = -ENOENT; 1732 name = strchr(tracepoint, ':'); 1733 if (name == NULL) 1734 goto out_free; 1735 1736 *name++ = '\0'; 1737 format = pevent_find_event_by_name(session->pevent, 1738 tracepoint, name); 1739 if (format == NULL) { 1740 /* 1741 * Adding a handler for an event not in the session, 1742 * just ignore it. 1743 */ 1744 goto next; 1745 } 1746 1747 evsel = perf_evlist__find_tracepoint_by_id(evlist, format->id); 1748 if (evsel == NULL) 1749 goto next; 1750 1751 err = -EEXIST; 1752 if (evsel->handler.func != NULL) 1753 goto out_free; 1754 evsel->handler.func = assocs[i].handler; 1755 next: 1756 free(tracepoint); 1757 } 1758 1759 err = 0; 1760 out: 1761 return err; 1762 1763 out_free: 1764 free(tracepoint); 1765 goto out; 1766 } 1767