1 #define _FILE_OFFSET_BITS 64 2 3 #include <linux/kernel.h> 4 5 #include <byteswap.h> 6 #include <unistd.h> 7 #include <sys/types.h> 8 #include <sys/mman.h> 9 10 #include "evlist.h" 11 #include "evsel.h" 12 #include "session.h" 13 #include "tool.h" 14 #include "sort.h" 15 #include "util.h" 16 #include "cpumap.h" 17 #include "event-parse.h" 18 19 int perf_session__parse_sample(struct perf_session *session, 20 const union perf_event *event, 21 struct perf_sample *sample) 22 { 23 struct perf_evsel *first; 24 first = list_entry(session->evlist->entries.next, struct perf_evsel, node); 25 26 return perf_event__parse_sample(event, session->sample_type, 27 first->sample_size, 28 session->sample_id_all, sample, 29 session->header.needs_swap); 30 } 31 32 static int perf_session__open(struct perf_session *self, bool force) 33 { 34 struct stat input_stat; 35 36 if (!strcmp(self->filename, "-")) { 37 self->fd_pipe = true; 38 self->fd = STDIN_FILENO; 39 40 if (perf_session__read_header(self, self->fd) < 0) 41 pr_err("incompatible file format (rerun with -v to learn more)"); 42 43 return 0; 44 } 45 46 self->fd = open(self->filename, O_RDONLY); 47 if (self->fd < 0) { 48 int err = errno; 49 50 pr_err("failed to open %s: %s", self->filename, strerror(err)); 51 if (err == ENOENT && !strcmp(self->filename, "perf.data")) 52 pr_err(" (try 'perf record' first)"); 53 pr_err("\n"); 54 return -errno; 55 } 56 57 if (fstat(self->fd, &input_stat) < 0) 58 goto out_close; 59 60 if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) { 61 pr_err("file %s not owned by current user or root\n", 62 self->filename); 63 goto out_close; 64 } 65 66 if (!input_stat.st_size) { 67 pr_info("zero-sized file (%s), nothing to do!\n", 68 self->filename); 69 goto out_close; 70 } 71 72 if (perf_session__read_header(self, self->fd) < 0) { 73 pr_err("incompatible file format (rerun with -v to learn more)"); 74 goto out_close; 75 } 76 77 if (!perf_evlist__valid_sample_type(self->evlist)) { 78 pr_err("non matching sample_type"); 79 goto out_close; 80 } 81 82 if (!perf_evlist__valid_sample_id_all(self->evlist)) { 83 pr_err("non matching sample_id_all"); 84 goto out_close; 85 } 86 87 self->size = input_stat.st_size; 88 return 0; 89 90 out_close: 91 close(self->fd); 92 self->fd = -1; 93 return -1; 94 } 95 96 void perf_session__update_sample_type(struct perf_session *self) 97 { 98 self->sample_type = perf_evlist__sample_type(self->evlist); 99 self->sample_id_all = perf_evlist__sample_id_all(self->evlist); 100 self->id_hdr_size = perf_evlist__id_hdr_size(self->evlist); 101 self->host_machine.id_hdr_size = self->id_hdr_size; 102 machines__set_id_hdr_size(&self->machines, self->id_hdr_size); 103 } 104 105 int perf_session__create_kernel_maps(struct perf_session *self) 106 { 107 int ret = machine__create_kernel_maps(&self->host_machine); 108 109 if (ret >= 0) 110 ret = machines__create_guest_kernel_maps(&self->machines); 111 return ret; 112 } 113 114 static void perf_session__destroy_kernel_maps(struct perf_session *self) 115 { 116 machine__destroy_kernel_maps(&self->host_machine); 117 machines__destroy_guest_kernel_maps(&self->machines); 118 } 119 120 struct perf_session *perf_session__new(const char *filename, int mode, 121 bool force, bool repipe, 122 struct perf_tool *tool) 123 { 124 struct perf_session *self; 125 struct stat st; 126 size_t len; 127 128 if (!filename || !strlen(filename)) { 129 if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode)) 130 filename = "-"; 131 else 132 filename = "perf.data"; 133 } 134 135 len = strlen(filename); 136 self = zalloc(sizeof(*self) + len); 137 138 if (self == NULL) 139 goto out; 140 141 memcpy(self->filename, filename, len); 142 /* 143 * On 64bit we can mmap the data file in one go. No need for tiny mmap 144 * slices. On 32bit we use 32MB. 145 */ 146 #if BITS_PER_LONG == 64 147 self->mmap_window = ULLONG_MAX; 148 #else 149 self->mmap_window = 32 * 1024 * 1024ULL; 150 #endif 151 self->machines = RB_ROOT; 152 self->repipe = repipe; 153 INIT_LIST_HEAD(&self->ordered_samples.samples); 154 INIT_LIST_HEAD(&self->ordered_samples.sample_cache); 155 INIT_LIST_HEAD(&self->ordered_samples.to_free); 156 machine__init(&self->host_machine, "", HOST_KERNEL_ID); 157 hists__init(&self->hists); 158 159 if (mode == O_RDONLY) { 160 if (perf_session__open(self, force) < 0) 161 goto out_delete; 162 perf_session__update_sample_type(self); 163 } else if (mode == O_WRONLY) { 164 /* 165 * In O_RDONLY mode this will be performed when reading the 166 * kernel MMAP event, in perf_event__process_mmap(). 167 */ 168 if (perf_session__create_kernel_maps(self) < 0) 169 goto out_delete; 170 } 171 172 if (tool && tool->ordering_requires_timestamps && 173 tool->ordered_samples && !self->sample_id_all) { 174 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n"); 175 tool->ordered_samples = false; 176 } 177 178 out: 179 return self; 180 out_delete: 181 perf_session__delete(self); 182 return NULL; 183 } 184 185 static void machine__delete_dead_threads(struct machine *machine) 186 { 187 struct thread *n, *t; 188 189 list_for_each_entry_safe(t, n, &machine->dead_threads, node) { 190 list_del(&t->node); 191 thread__delete(t); 192 } 193 } 194 195 static void perf_session__delete_dead_threads(struct perf_session *session) 196 { 197 machine__delete_dead_threads(&session->host_machine); 198 } 199 200 static void machine__delete_threads(struct machine *self) 201 { 202 struct rb_node *nd = rb_first(&self->threads); 203 204 while (nd) { 205 struct thread *t = rb_entry(nd, struct thread, rb_node); 206 207 rb_erase(&t->rb_node, &self->threads); 208 nd = rb_next(nd); 209 thread__delete(t); 210 } 211 } 212 213 static void perf_session__delete_threads(struct perf_session *session) 214 { 215 machine__delete_threads(&session->host_machine); 216 } 217 218 void perf_session__delete(struct perf_session *self) 219 { 220 perf_session__destroy_kernel_maps(self); 221 perf_session__delete_dead_threads(self); 222 perf_session__delete_threads(self); 223 machine__exit(&self->host_machine); 224 close(self->fd); 225 free(self); 226 } 227 228 void machine__remove_thread(struct machine *self, struct thread *th) 229 { 230 self->last_match = NULL; 231 rb_erase(&th->rb_node, &self->threads); 232 /* 233 * We may have references to this thread, for instance in some hist_entry 234 * instances, so just move them to a separate list. 235 */ 236 list_add_tail(&th->node, &self->dead_threads); 237 } 238 239 static bool symbol__match_parent_regex(struct symbol *sym) 240 { 241 if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0)) 242 return 1; 243 244 return 0; 245 } 246 247 static const u8 cpumodes[] = { 248 PERF_RECORD_MISC_USER, 249 PERF_RECORD_MISC_KERNEL, 250 PERF_RECORD_MISC_GUEST_USER, 251 PERF_RECORD_MISC_GUEST_KERNEL 252 }; 253 #define NCPUMODES (sizeof(cpumodes)/sizeof(u8)) 254 255 static void ip__resolve_ams(struct machine *self, struct thread *thread, 256 struct addr_map_symbol *ams, 257 u64 ip) 258 { 259 struct addr_location al; 260 size_t i; 261 u8 m; 262 263 memset(&al, 0, sizeof(al)); 264 265 for (i = 0; i < NCPUMODES; i++) { 266 m = cpumodes[i]; 267 /* 268 * We cannot use the header.misc hint to determine whether a 269 * branch stack address is user, kernel, guest, hypervisor. 270 * Branches may straddle the kernel/user/hypervisor boundaries. 271 * Thus, we have to try consecutively until we find a match 272 * or else, the symbol is unknown 273 */ 274 thread__find_addr_location(thread, self, m, MAP__FUNCTION, 275 ip, &al, NULL); 276 if (al.sym) 277 goto found; 278 } 279 found: 280 ams->addr = ip; 281 ams->al_addr = al.addr; 282 ams->sym = al.sym; 283 ams->map = al.map; 284 } 285 286 struct branch_info *machine__resolve_bstack(struct machine *self, 287 struct thread *thr, 288 struct branch_stack *bs) 289 { 290 struct branch_info *bi; 291 unsigned int i; 292 293 bi = calloc(bs->nr, sizeof(struct branch_info)); 294 if (!bi) 295 return NULL; 296 297 for (i = 0; i < bs->nr; i++) { 298 ip__resolve_ams(self, thr, &bi[i].to, bs->entries[i].to); 299 ip__resolve_ams(self, thr, &bi[i].from, bs->entries[i].from); 300 bi[i].flags = bs->entries[i].flags; 301 } 302 return bi; 303 } 304 305 int machine__resolve_callchain(struct machine *self, 306 struct thread *thread, 307 struct ip_callchain *chain, 308 struct symbol **parent) 309 { 310 u8 cpumode = PERF_RECORD_MISC_USER; 311 unsigned int i; 312 int err; 313 314 callchain_cursor_reset(&callchain_cursor); 315 316 if (chain->nr > PERF_MAX_STACK_DEPTH) { 317 pr_warning("corrupted callchain. skipping...\n"); 318 return 0; 319 } 320 321 for (i = 0; i < chain->nr; i++) { 322 u64 ip; 323 struct addr_location al; 324 325 if (callchain_param.order == ORDER_CALLEE) 326 ip = chain->ips[i]; 327 else 328 ip = chain->ips[chain->nr - i - 1]; 329 330 if (ip >= PERF_CONTEXT_MAX) { 331 switch (ip) { 332 case PERF_CONTEXT_HV: 333 cpumode = PERF_RECORD_MISC_HYPERVISOR; break; 334 case PERF_CONTEXT_KERNEL: 335 cpumode = PERF_RECORD_MISC_KERNEL; break; 336 case PERF_CONTEXT_USER: 337 cpumode = PERF_RECORD_MISC_USER; break; 338 default: 339 pr_debug("invalid callchain context: " 340 "%"PRId64"\n", (s64) ip); 341 /* 342 * It seems the callchain is corrupted. 343 * Discard all. 344 */ 345 callchain_cursor_reset(&callchain_cursor); 346 return 0; 347 } 348 continue; 349 } 350 351 al.filtered = false; 352 thread__find_addr_location(thread, self, cpumode, 353 MAP__FUNCTION, ip, &al, NULL); 354 if (al.sym != NULL) { 355 if (sort__has_parent && !*parent && 356 symbol__match_parent_regex(al.sym)) 357 *parent = al.sym; 358 if (!symbol_conf.use_callchain) 359 break; 360 } 361 362 err = callchain_cursor_append(&callchain_cursor, 363 ip, al.map, al.sym); 364 if (err) 365 return err; 366 } 367 368 return 0; 369 } 370 371 static int process_event_synth_tracing_data_stub(union perf_event *event __used, 372 struct perf_session *session __used) 373 { 374 dump_printf(": unhandled!\n"); 375 return 0; 376 } 377 378 static int process_event_synth_attr_stub(union perf_event *event __used, 379 struct perf_evlist **pevlist __used) 380 { 381 dump_printf(": unhandled!\n"); 382 return 0; 383 } 384 385 static int process_event_sample_stub(struct perf_tool *tool __used, 386 union perf_event *event __used, 387 struct perf_sample *sample __used, 388 struct perf_evsel *evsel __used, 389 struct machine *machine __used) 390 { 391 dump_printf(": unhandled!\n"); 392 return 0; 393 } 394 395 static int process_event_stub(struct perf_tool *tool __used, 396 union perf_event *event __used, 397 struct perf_sample *sample __used, 398 struct machine *machine __used) 399 { 400 dump_printf(": unhandled!\n"); 401 return 0; 402 } 403 404 static int process_finished_round_stub(struct perf_tool *tool __used, 405 union perf_event *event __used, 406 struct perf_session *perf_session __used) 407 { 408 dump_printf(": unhandled!\n"); 409 return 0; 410 } 411 412 static int process_event_type_stub(struct perf_tool *tool __used, 413 union perf_event *event __used) 414 { 415 dump_printf(": unhandled!\n"); 416 return 0; 417 } 418 419 static int process_finished_round(struct perf_tool *tool, 420 union perf_event *event, 421 struct perf_session *session); 422 423 static void perf_tool__fill_defaults(struct perf_tool *tool) 424 { 425 if (tool->sample == NULL) 426 tool->sample = process_event_sample_stub; 427 if (tool->mmap == NULL) 428 tool->mmap = process_event_stub; 429 if (tool->comm == NULL) 430 tool->comm = process_event_stub; 431 if (tool->fork == NULL) 432 tool->fork = process_event_stub; 433 if (tool->exit == NULL) 434 tool->exit = process_event_stub; 435 if (tool->lost == NULL) 436 tool->lost = perf_event__process_lost; 437 if (tool->read == NULL) 438 tool->read = process_event_sample_stub; 439 if (tool->throttle == NULL) 440 tool->throttle = process_event_stub; 441 if (tool->unthrottle == NULL) 442 tool->unthrottle = process_event_stub; 443 if (tool->attr == NULL) 444 tool->attr = process_event_synth_attr_stub; 445 if (tool->event_type == NULL) 446 tool->event_type = process_event_type_stub; 447 if (tool->tracing_data == NULL) 448 tool->tracing_data = process_event_synth_tracing_data_stub; 449 if (tool->build_id == NULL) 450 tool->build_id = process_finished_round_stub; 451 if (tool->finished_round == NULL) { 452 if (tool->ordered_samples) 453 tool->finished_round = process_finished_round; 454 else 455 tool->finished_round = process_finished_round_stub; 456 } 457 } 458 459 void mem_bswap_32(void *src, int byte_size) 460 { 461 u32 *m = src; 462 while (byte_size > 0) { 463 *m = bswap_32(*m); 464 byte_size -= sizeof(u32); 465 ++m; 466 } 467 } 468 469 void mem_bswap_64(void *src, int byte_size) 470 { 471 u64 *m = src; 472 473 while (byte_size > 0) { 474 *m = bswap_64(*m); 475 byte_size -= sizeof(u64); 476 ++m; 477 } 478 } 479 480 static void swap_sample_id_all(union perf_event *event, void *data) 481 { 482 void *end = (void *) event + event->header.size; 483 int size = end - data; 484 485 BUG_ON(size % sizeof(u64)); 486 mem_bswap_64(data, size); 487 } 488 489 static void perf_event__all64_swap(union perf_event *event, 490 bool sample_id_all __used) 491 { 492 struct perf_event_header *hdr = &event->header; 493 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr)); 494 } 495 496 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all) 497 { 498 event->comm.pid = bswap_32(event->comm.pid); 499 event->comm.tid = bswap_32(event->comm.tid); 500 501 if (sample_id_all) { 502 void *data = &event->comm.comm; 503 504 data += ALIGN(strlen(data) + 1, sizeof(u64)); 505 swap_sample_id_all(event, data); 506 } 507 } 508 509 static void perf_event__mmap_swap(union perf_event *event, 510 bool sample_id_all) 511 { 512 event->mmap.pid = bswap_32(event->mmap.pid); 513 event->mmap.tid = bswap_32(event->mmap.tid); 514 event->mmap.start = bswap_64(event->mmap.start); 515 event->mmap.len = bswap_64(event->mmap.len); 516 event->mmap.pgoff = bswap_64(event->mmap.pgoff); 517 518 if (sample_id_all) { 519 void *data = &event->mmap.filename; 520 521 data += ALIGN(strlen(data) + 1, sizeof(u64)); 522 swap_sample_id_all(event, data); 523 } 524 } 525 526 static void perf_event__task_swap(union perf_event *event, bool sample_id_all) 527 { 528 event->fork.pid = bswap_32(event->fork.pid); 529 event->fork.tid = bswap_32(event->fork.tid); 530 event->fork.ppid = bswap_32(event->fork.ppid); 531 event->fork.ptid = bswap_32(event->fork.ptid); 532 event->fork.time = bswap_64(event->fork.time); 533 534 if (sample_id_all) 535 swap_sample_id_all(event, &event->fork + 1); 536 } 537 538 static void perf_event__read_swap(union perf_event *event, bool sample_id_all) 539 { 540 event->read.pid = bswap_32(event->read.pid); 541 event->read.tid = bswap_32(event->read.tid); 542 event->read.value = bswap_64(event->read.value); 543 event->read.time_enabled = bswap_64(event->read.time_enabled); 544 event->read.time_running = bswap_64(event->read.time_running); 545 event->read.id = bswap_64(event->read.id); 546 547 if (sample_id_all) 548 swap_sample_id_all(event, &event->read + 1); 549 } 550 551 static u8 revbyte(u8 b) 552 { 553 int rev = (b >> 4) | ((b & 0xf) << 4); 554 rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2); 555 rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1); 556 return (u8) rev; 557 } 558 559 /* 560 * XXX this is hack in attempt to carry flags bitfield 561 * throught endian village. ABI says: 562 * 563 * Bit-fields are allocated from right to left (least to most significant) 564 * on little-endian implementations and from left to right (most to least 565 * significant) on big-endian implementations. 566 * 567 * The above seems to be byte specific, so we need to reverse each 568 * byte of the bitfield. 'Internet' also says this might be implementation 569 * specific and we probably need proper fix and carry perf_event_attr 570 * bitfield flags in separate data file FEAT_ section. Thought this seems 571 * to work for now. 572 */ 573 static void swap_bitfield(u8 *p, unsigned len) 574 { 575 unsigned i; 576 577 for (i = 0; i < len; i++) { 578 *p = revbyte(*p); 579 p++; 580 } 581 } 582 583 /* exported for swapping attributes in file header */ 584 void perf_event__attr_swap(struct perf_event_attr *attr) 585 { 586 attr->type = bswap_32(attr->type); 587 attr->size = bswap_32(attr->size); 588 attr->config = bswap_64(attr->config); 589 attr->sample_period = bswap_64(attr->sample_period); 590 attr->sample_type = bswap_64(attr->sample_type); 591 attr->read_format = bswap_64(attr->read_format); 592 attr->wakeup_events = bswap_32(attr->wakeup_events); 593 attr->bp_type = bswap_32(attr->bp_type); 594 attr->bp_addr = bswap_64(attr->bp_addr); 595 attr->bp_len = bswap_64(attr->bp_len); 596 597 swap_bitfield((u8 *) (&attr->read_format + 1), sizeof(u64)); 598 } 599 600 static void perf_event__hdr_attr_swap(union perf_event *event, 601 bool sample_id_all __used) 602 { 603 size_t size; 604 605 perf_event__attr_swap(&event->attr.attr); 606 607 size = event->header.size; 608 size -= (void *)&event->attr.id - (void *)event; 609 mem_bswap_64(event->attr.id, size); 610 } 611 612 static void perf_event__event_type_swap(union perf_event *event, 613 bool sample_id_all __used) 614 { 615 event->event_type.event_type.event_id = 616 bswap_64(event->event_type.event_type.event_id); 617 } 618 619 static void perf_event__tracing_data_swap(union perf_event *event, 620 bool sample_id_all __used) 621 { 622 event->tracing_data.size = bswap_32(event->tracing_data.size); 623 } 624 625 typedef void (*perf_event__swap_op)(union perf_event *event, 626 bool sample_id_all); 627 628 static perf_event__swap_op perf_event__swap_ops[] = { 629 [PERF_RECORD_MMAP] = perf_event__mmap_swap, 630 [PERF_RECORD_COMM] = perf_event__comm_swap, 631 [PERF_RECORD_FORK] = perf_event__task_swap, 632 [PERF_RECORD_EXIT] = perf_event__task_swap, 633 [PERF_RECORD_LOST] = perf_event__all64_swap, 634 [PERF_RECORD_READ] = perf_event__read_swap, 635 [PERF_RECORD_SAMPLE] = perf_event__all64_swap, 636 [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap, 637 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap, 638 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap, 639 [PERF_RECORD_HEADER_BUILD_ID] = NULL, 640 [PERF_RECORD_HEADER_MAX] = NULL, 641 }; 642 643 struct sample_queue { 644 u64 timestamp; 645 u64 file_offset; 646 union perf_event *event; 647 struct list_head list; 648 }; 649 650 static void perf_session_free_sample_buffers(struct perf_session *session) 651 { 652 struct ordered_samples *os = &session->ordered_samples; 653 654 while (!list_empty(&os->to_free)) { 655 struct sample_queue *sq; 656 657 sq = list_entry(os->to_free.next, struct sample_queue, list); 658 list_del(&sq->list); 659 free(sq); 660 } 661 } 662 663 static int perf_session_deliver_event(struct perf_session *session, 664 union perf_event *event, 665 struct perf_sample *sample, 666 struct perf_tool *tool, 667 u64 file_offset); 668 669 static void flush_sample_queue(struct perf_session *s, 670 struct perf_tool *tool) 671 { 672 struct ordered_samples *os = &s->ordered_samples; 673 struct list_head *head = &os->samples; 674 struct sample_queue *tmp, *iter; 675 struct perf_sample sample; 676 u64 limit = os->next_flush; 677 u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL; 678 unsigned idx = 0, progress_next = os->nr_samples / 16; 679 int ret; 680 681 if (!tool->ordered_samples || !limit) 682 return; 683 684 list_for_each_entry_safe(iter, tmp, head, list) { 685 if (iter->timestamp > limit) 686 break; 687 688 ret = perf_session__parse_sample(s, iter->event, &sample); 689 if (ret) 690 pr_err("Can't parse sample, err = %d\n", ret); 691 else 692 perf_session_deliver_event(s, iter->event, &sample, tool, 693 iter->file_offset); 694 695 os->last_flush = iter->timestamp; 696 list_del(&iter->list); 697 list_add(&iter->list, &os->sample_cache); 698 if (++idx >= progress_next) { 699 progress_next += os->nr_samples / 16; 700 ui_progress__update(idx, os->nr_samples, 701 "Processing time ordered events..."); 702 } 703 } 704 705 if (list_empty(head)) { 706 os->last_sample = NULL; 707 } else if (last_ts <= limit) { 708 os->last_sample = 709 list_entry(head->prev, struct sample_queue, list); 710 } 711 712 os->nr_samples = 0; 713 } 714 715 /* 716 * When perf record finishes a pass on every buffers, it records this pseudo 717 * event. 718 * We record the max timestamp t found in the pass n. 719 * Assuming these timestamps are monotonic across cpus, we know that if 720 * a buffer still has events with timestamps below t, they will be all 721 * available and then read in the pass n + 1. 722 * Hence when we start to read the pass n + 2, we can safely flush every 723 * events with timestamps below t. 724 * 725 * ============ PASS n ================= 726 * CPU 0 | CPU 1 727 * | 728 * cnt1 timestamps | cnt2 timestamps 729 * 1 | 2 730 * 2 | 3 731 * - | 4 <--- max recorded 732 * 733 * ============ PASS n + 1 ============== 734 * CPU 0 | CPU 1 735 * | 736 * cnt1 timestamps | cnt2 timestamps 737 * 3 | 5 738 * 4 | 6 739 * 5 | 7 <---- max recorded 740 * 741 * Flush every events below timestamp 4 742 * 743 * ============ PASS n + 2 ============== 744 * CPU 0 | CPU 1 745 * | 746 * cnt1 timestamps | cnt2 timestamps 747 * 6 | 8 748 * 7 | 9 749 * - | 10 750 * 751 * Flush every events below timestamp 7 752 * etc... 753 */ 754 static int process_finished_round(struct perf_tool *tool, 755 union perf_event *event __used, 756 struct perf_session *session) 757 { 758 flush_sample_queue(session, tool); 759 session->ordered_samples.next_flush = session->ordered_samples.max_timestamp; 760 761 return 0; 762 } 763 764 /* The queue is ordered by time */ 765 static void __queue_event(struct sample_queue *new, struct perf_session *s) 766 { 767 struct ordered_samples *os = &s->ordered_samples; 768 struct sample_queue *sample = os->last_sample; 769 u64 timestamp = new->timestamp; 770 struct list_head *p; 771 772 ++os->nr_samples; 773 os->last_sample = new; 774 775 if (!sample) { 776 list_add(&new->list, &os->samples); 777 os->max_timestamp = timestamp; 778 return; 779 } 780 781 /* 782 * last_sample might point to some random place in the list as it's 783 * the last queued event. We expect that the new event is close to 784 * this. 785 */ 786 if (sample->timestamp <= timestamp) { 787 while (sample->timestamp <= timestamp) { 788 p = sample->list.next; 789 if (p == &os->samples) { 790 list_add_tail(&new->list, &os->samples); 791 os->max_timestamp = timestamp; 792 return; 793 } 794 sample = list_entry(p, struct sample_queue, list); 795 } 796 list_add_tail(&new->list, &sample->list); 797 } else { 798 while (sample->timestamp > timestamp) { 799 p = sample->list.prev; 800 if (p == &os->samples) { 801 list_add(&new->list, &os->samples); 802 return; 803 } 804 sample = list_entry(p, struct sample_queue, list); 805 } 806 list_add(&new->list, &sample->list); 807 } 808 } 809 810 #define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct sample_queue)) 811 812 static int perf_session_queue_event(struct perf_session *s, union perf_event *event, 813 struct perf_sample *sample, u64 file_offset) 814 { 815 struct ordered_samples *os = &s->ordered_samples; 816 struct list_head *sc = &os->sample_cache; 817 u64 timestamp = sample->time; 818 struct sample_queue *new; 819 820 if (!timestamp || timestamp == ~0ULL) 821 return -ETIME; 822 823 if (timestamp < s->ordered_samples.last_flush) { 824 printf("Warning: Timestamp below last timeslice flush\n"); 825 return -EINVAL; 826 } 827 828 if (!list_empty(sc)) { 829 new = list_entry(sc->next, struct sample_queue, list); 830 list_del(&new->list); 831 } else if (os->sample_buffer) { 832 new = os->sample_buffer + os->sample_buffer_idx; 833 if (++os->sample_buffer_idx == MAX_SAMPLE_BUFFER) 834 os->sample_buffer = NULL; 835 } else { 836 os->sample_buffer = malloc(MAX_SAMPLE_BUFFER * sizeof(*new)); 837 if (!os->sample_buffer) 838 return -ENOMEM; 839 list_add(&os->sample_buffer->list, &os->to_free); 840 os->sample_buffer_idx = 2; 841 new = os->sample_buffer + 1; 842 } 843 844 new->timestamp = timestamp; 845 new->file_offset = file_offset; 846 new->event = event; 847 848 __queue_event(new, s); 849 850 return 0; 851 } 852 853 static void callchain__printf(struct perf_sample *sample) 854 { 855 unsigned int i; 856 857 printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr); 858 859 for (i = 0; i < sample->callchain->nr; i++) 860 printf("..... %2d: %016" PRIx64 "\n", 861 i, sample->callchain->ips[i]); 862 } 863 864 static void branch_stack__printf(struct perf_sample *sample) 865 { 866 uint64_t i; 867 868 printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr); 869 870 for (i = 0; i < sample->branch_stack->nr; i++) 871 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 "\n", 872 i, sample->branch_stack->entries[i].from, 873 sample->branch_stack->entries[i].to); 874 } 875 876 static void perf_session__print_tstamp(struct perf_session *session, 877 union perf_event *event, 878 struct perf_sample *sample) 879 { 880 if (event->header.type != PERF_RECORD_SAMPLE && 881 !session->sample_id_all) { 882 fputs("-1 -1 ", stdout); 883 return; 884 } 885 886 if ((session->sample_type & PERF_SAMPLE_CPU)) 887 printf("%u ", sample->cpu); 888 889 if (session->sample_type & PERF_SAMPLE_TIME) 890 printf("%" PRIu64 " ", sample->time); 891 } 892 893 static void dump_event(struct perf_session *session, union perf_event *event, 894 u64 file_offset, struct perf_sample *sample) 895 { 896 if (!dump_trace) 897 return; 898 899 printf("\n%#" PRIx64 " [%#x]: event: %d\n", 900 file_offset, event->header.size, event->header.type); 901 902 trace_event(event); 903 904 if (sample) 905 perf_session__print_tstamp(session, event, sample); 906 907 printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset, 908 event->header.size, perf_event__name(event->header.type)); 909 } 910 911 static void dump_sample(struct perf_session *session, union perf_event *event, 912 struct perf_sample *sample) 913 { 914 if (!dump_trace) 915 return; 916 917 printf("(IP, %d): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n", 918 event->header.misc, sample->pid, sample->tid, sample->ip, 919 sample->period, sample->addr); 920 921 if (session->sample_type & PERF_SAMPLE_CALLCHAIN) 922 callchain__printf(sample); 923 924 if (session->sample_type & PERF_SAMPLE_BRANCH_STACK) 925 branch_stack__printf(sample); 926 } 927 928 static struct machine * 929 perf_session__find_machine_for_cpumode(struct perf_session *session, 930 union perf_event *event) 931 { 932 const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 933 934 if (perf_guest && 935 ((cpumode == PERF_RECORD_MISC_GUEST_KERNEL) || 936 (cpumode == PERF_RECORD_MISC_GUEST_USER))) { 937 u32 pid; 938 939 if (event->header.type == PERF_RECORD_MMAP) 940 pid = event->mmap.pid; 941 else 942 pid = event->ip.pid; 943 944 return perf_session__findnew_machine(session, pid); 945 } 946 947 return perf_session__find_host_machine(session); 948 } 949 950 static int perf_session_deliver_event(struct perf_session *session, 951 union perf_event *event, 952 struct perf_sample *sample, 953 struct perf_tool *tool, 954 u64 file_offset) 955 { 956 struct perf_evsel *evsel; 957 struct machine *machine; 958 959 dump_event(session, event, file_offset, sample); 960 961 evsel = perf_evlist__id2evsel(session->evlist, sample->id); 962 if (evsel != NULL && event->header.type != PERF_RECORD_SAMPLE) { 963 /* 964 * XXX We're leaving PERF_RECORD_SAMPLE unnacounted here 965 * because the tools right now may apply filters, discarding 966 * some of the samples. For consistency, in the future we 967 * should have something like nr_filtered_samples and remove 968 * the sample->period from total_sample_period, etc, KISS for 969 * now tho. 970 * 971 * Also testing against NULL allows us to handle files without 972 * attr.sample_id_all and/or without PERF_SAMPLE_ID. In the 973 * future probably it'll be a good idea to restrict event 974 * processing via perf_session to files with both set. 975 */ 976 hists__inc_nr_events(&evsel->hists, event->header.type); 977 } 978 979 machine = perf_session__find_machine_for_cpumode(session, event); 980 981 switch (event->header.type) { 982 case PERF_RECORD_SAMPLE: 983 dump_sample(session, event, sample); 984 if (evsel == NULL) { 985 ++session->hists.stats.nr_unknown_id; 986 return 0; 987 } 988 if (machine == NULL) { 989 ++session->hists.stats.nr_unprocessable_samples; 990 return 0; 991 } 992 return tool->sample(tool, event, sample, evsel, machine); 993 case PERF_RECORD_MMAP: 994 return tool->mmap(tool, event, sample, machine); 995 case PERF_RECORD_COMM: 996 return tool->comm(tool, event, sample, machine); 997 case PERF_RECORD_FORK: 998 return tool->fork(tool, event, sample, machine); 999 case PERF_RECORD_EXIT: 1000 return tool->exit(tool, event, sample, machine); 1001 case PERF_RECORD_LOST: 1002 if (tool->lost == perf_event__process_lost) 1003 session->hists.stats.total_lost += event->lost.lost; 1004 return tool->lost(tool, event, sample, machine); 1005 case PERF_RECORD_READ: 1006 return tool->read(tool, event, sample, evsel, machine); 1007 case PERF_RECORD_THROTTLE: 1008 return tool->throttle(tool, event, sample, machine); 1009 case PERF_RECORD_UNTHROTTLE: 1010 return tool->unthrottle(tool, event, sample, machine); 1011 default: 1012 ++session->hists.stats.nr_unknown_events; 1013 return -1; 1014 } 1015 } 1016 1017 static int perf_session__preprocess_sample(struct perf_session *session, 1018 union perf_event *event, struct perf_sample *sample) 1019 { 1020 if (event->header.type != PERF_RECORD_SAMPLE || 1021 !(session->sample_type & PERF_SAMPLE_CALLCHAIN)) 1022 return 0; 1023 1024 if (!ip_callchain__valid(sample->callchain, event)) { 1025 pr_debug("call-chain problem with event, skipping it.\n"); 1026 ++session->hists.stats.nr_invalid_chains; 1027 session->hists.stats.total_invalid_chains += sample->period; 1028 return -EINVAL; 1029 } 1030 return 0; 1031 } 1032 1033 static int perf_session__process_user_event(struct perf_session *session, union perf_event *event, 1034 struct perf_tool *tool, u64 file_offset) 1035 { 1036 int err; 1037 1038 dump_event(session, event, file_offset, NULL); 1039 1040 /* These events are processed right away */ 1041 switch (event->header.type) { 1042 case PERF_RECORD_HEADER_ATTR: 1043 err = tool->attr(event, &session->evlist); 1044 if (err == 0) 1045 perf_session__update_sample_type(session); 1046 return err; 1047 case PERF_RECORD_HEADER_EVENT_TYPE: 1048 return tool->event_type(tool, event); 1049 case PERF_RECORD_HEADER_TRACING_DATA: 1050 /* setup for reading amidst mmap */ 1051 lseek(session->fd, file_offset, SEEK_SET); 1052 return tool->tracing_data(event, session); 1053 case PERF_RECORD_HEADER_BUILD_ID: 1054 return tool->build_id(tool, event, session); 1055 case PERF_RECORD_FINISHED_ROUND: 1056 return tool->finished_round(tool, event, session); 1057 default: 1058 return -EINVAL; 1059 } 1060 } 1061 1062 static void event_swap(union perf_event *event, bool sample_id_all) 1063 { 1064 perf_event__swap_op swap; 1065 1066 swap = perf_event__swap_ops[event->header.type]; 1067 if (swap) 1068 swap(event, sample_id_all); 1069 } 1070 1071 static int perf_session__process_event(struct perf_session *session, 1072 union perf_event *event, 1073 struct perf_tool *tool, 1074 u64 file_offset) 1075 { 1076 struct perf_sample sample; 1077 int ret; 1078 1079 if (session->header.needs_swap) 1080 event_swap(event, session->sample_id_all); 1081 1082 if (event->header.type >= PERF_RECORD_HEADER_MAX) 1083 return -EINVAL; 1084 1085 hists__inc_nr_events(&session->hists, event->header.type); 1086 1087 if (event->header.type >= PERF_RECORD_USER_TYPE_START) 1088 return perf_session__process_user_event(session, event, tool, file_offset); 1089 1090 /* 1091 * For all kernel events we get the sample data 1092 */ 1093 ret = perf_session__parse_sample(session, event, &sample); 1094 if (ret) 1095 return ret; 1096 1097 /* Preprocess sample records - precheck callchains */ 1098 if (perf_session__preprocess_sample(session, event, &sample)) 1099 return 0; 1100 1101 if (tool->ordered_samples) { 1102 ret = perf_session_queue_event(session, event, &sample, 1103 file_offset); 1104 if (ret != -ETIME) 1105 return ret; 1106 } 1107 1108 return perf_session_deliver_event(session, event, &sample, tool, 1109 file_offset); 1110 } 1111 1112 void perf_event_header__bswap(struct perf_event_header *self) 1113 { 1114 self->type = bswap_32(self->type); 1115 self->misc = bswap_16(self->misc); 1116 self->size = bswap_16(self->size); 1117 } 1118 1119 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid) 1120 { 1121 return machine__findnew_thread(&session->host_machine, pid); 1122 } 1123 1124 static struct thread *perf_session__register_idle_thread(struct perf_session *self) 1125 { 1126 struct thread *thread = perf_session__findnew(self, 0); 1127 1128 if (thread == NULL || thread__set_comm(thread, "swapper")) { 1129 pr_err("problem inserting idle task.\n"); 1130 thread = NULL; 1131 } 1132 1133 return thread; 1134 } 1135 1136 static void perf_session__warn_about_errors(const struct perf_session *session, 1137 const struct perf_tool *tool) 1138 { 1139 if (tool->lost == perf_event__process_lost && 1140 session->hists.stats.nr_events[PERF_RECORD_LOST] != 0) { 1141 ui__warning("Processed %d events and lost %d chunks!\n\n" 1142 "Check IO/CPU overload!\n\n", 1143 session->hists.stats.nr_events[0], 1144 session->hists.stats.nr_events[PERF_RECORD_LOST]); 1145 } 1146 1147 if (session->hists.stats.nr_unknown_events != 0) { 1148 ui__warning("Found %u unknown events!\n\n" 1149 "Is this an older tool processing a perf.data " 1150 "file generated by a more recent tool?\n\n" 1151 "If that is not the case, consider " 1152 "reporting to linux-kernel@vger.kernel.org.\n\n", 1153 session->hists.stats.nr_unknown_events); 1154 } 1155 1156 if (session->hists.stats.nr_unknown_id != 0) { 1157 ui__warning("%u samples with id not present in the header\n", 1158 session->hists.stats.nr_unknown_id); 1159 } 1160 1161 if (session->hists.stats.nr_invalid_chains != 0) { 1162 ui__warning("Found invalid callchains!\n\n" 1163 "%u out of %u events were discarded for this reason.\n\n" 1164 "Consider reporting to linux-kernel@vger.kernel.org.\n\n", 1165 session->hists.stats.nr_invalid_chains, 1166 session->hists.stats.nr_events[PERF_RECORD_SAMPLE]); 1167 } 1168 1169 if (session->hists.stats.nr_unprocessable_samples != 0) { 1170 ui__warning("%u unprocessable samples recorded.\n" 1171 "Do you have a KVM guest running and not using 'perf kvm'?\n", 1172 session->hists.stats.nr_unprocessable_samples); 1173 } 1174 } 1175 1176 #define session_done() (*(volatile int *)(&session_done)) 1177 volatile int session_done; 1178 1179 static int __perf_session__process_pipe_events(struct perf_session *self, 1180 struct perf_tool *tool) 1181 { 1182 union perf_event *event; 1183 uint32_t size, cur_size = 0; 1184 void *buf = NULL; 1185 int skip = 0; 1186 u64 head; 1187 int err; 1188 void *p; 1189 1190 perf_tool__fill_defaults(tool); 1191 1192 head = 0; 1193 cur_size = sizeof(union perf_event); 1194 1195 buf = malloc(cur_size); 1196 if (!buf) 1197 return -errno; 1198 more: 1199 event = buf; 1200 err = readn(self->fd, event, sizeof(struct perf_event_header)); 1201 if (err <= 0) { 1202 if (err == 0) 1203 goto done; 1204 1205 pr_err("failed to read event header\n"); 1206 goto out_err; 1207 } 1208 1209 if (self->header.needs_swap) 1210 perf_event_header__bswap(&event->header); 1211 1212 size = event->header.size; 1213 if (size == 0) 1214 size = 8; 1215 1216 if (size > cur_size) { 1217 void *new = realloc(buf, size); 1218 if (!new) { 1219 pr_err("failed to allocate memory to read event\n"); 1220 goto out_err; 1221 } 1222 buf = new; 1223 cur_size = size; 1224 event = buf; 1225 } 1226 p = event; 1227 p += sizeof(struct perf_event_header); 1228 1229 if (size - sizeof(struct perf_event_header)) { 1230 err = readn(self->fd, p, size - sizeof(struct perf_event_header)); 1231 if (err <= 0) { 1232 if (err == 0) { 1233 pr_err("unexpected end of event stream\n"); 1234 goto done; 1235 } 1236 1237 pr_err("failed to read event data\n"); 1238 goto out_err; 1239 } 1240 } 1241 1242 if ((skip = perf_session__process_event(self, event, tool, head)) < 0) { 1243 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n", 1244 head, event->header.size, event->header.type); 1245 err = -EINVAL; 1246 goto out_err; 1247 } 1248 1249 head += size; 1250 1251 if (skip > 0) 1252 head += skip; 1253 1254 if (!session_done()) 1255 goto more; 1256 done: 1257 err = 0; 1258 out_err: 1259 free(buf); 1260 perf_session__warn_about_errors(self, tool); 1261 perf_session_free_sample_buffers(self); 1262 return err; 1263 } 1264 1265 static union perf_event * 1266 fetch_mmaped_event(struct perf_session *session, 1267 u64 head, size_t mmap_size, char *buf) 1268 { 1269 union perf_event *event; 1270 1271 /* 1272 * Ensure we have enough space remaining to read 1273 * the size of the event in the headers. 1274 */ 1275 if (head + sizeof(event->header) > mmap_size) 1276 return NULL; 1277 1278 event = (union perf_event *)(buf + head); 1279 1280 if (session->header.needs_swap) 1281 perf_event_header__bswap(&event->header); 1282 1283 if (head + event->header.size > mmap_size) 1284 return NULL; 1285 1286 return event; 1287 } 1288 1289 int __perf_session__process_events(struct perf_session *session, 1290 u64 data_offset, u64 data_size, 1291 u64 file_size, struct perf_tool *tool) 1292 { 1293 u64 head, page_offset, file_offset, file_pos, progress_next; 1294 int err, mmap_prot, mmap_flags, map_idx = 0; 1295 size_t page_size, mmap_size; 1296 char *buf, *mmaps[8]; 1297 union perf_event *event; 1298 uint32_t size; 1299 1300 perf_tool__fill_defaults(tool); 1301 1302 page_size = sysconf(_SC_PAGESIZE); 1303 1304 page_offset = page_size * (data_offset / page_size); 1305 file_offset = page_offset; 1306 head = data_offset - page_offset; 1307 1308 if (data_offset + data_size < file_size) 1309 file_size = data_offset + data_size; 1310 1311 progress_next = file_size / 16; 1312 1313 mmap_size = session->mmap_window; 1314 if (mmap_size > file_size) 1315 mmap_size = file_size; 1316 1317 memset(mmaps, 0, sizeof(mmaps)); 1318 1319 mmap_prot = PROT_READ; 1320 mmap_flags = MAP_SHARED; 1321 1322 if (session->header.needs_swap) { 1323 mmap_prot |= PROT_WRITE; 1324 mmap_flags = MAP_PRIVATE; 1325 } 1326 remap: 1327 buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, session->fd, 1328 file_offset); 1329 if (buf == MAP_FAILED) { 1330 pr_err("failed to mmap file\n"); 1331 err = -errno; 1332 goto out_err; 1333 } 1334 mmaps[map_idx] = buf; 1335 map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1); 1336 file_pos = file_offset + head; 1337 1338 more: 1339 event = fetch_mmaped_event(session, head, mmap_size, buf); 1340 if (!event) { 1341 if (mmaps[map_idx]) { 1342 munmap(mmaps[map_idx], mmap_size); 1343 mmaps[map_idx] = NULL; 1344 } 1345 1346 page_offset = page_size * (head / page_size); 1347 file_offset += page_offset; 1348 head -= page_offset; 1349 goto remap; 1350 } 1351 1352 size = event->header.size; 1353 1354 if (size == 0 || 1355 perf_session__process_event(session, event, tool, file_pos) < 0) { 1356 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n", 1357 file_offset + head, event->header.size, 1358 event->header.type); 1359 err = -EINVAL; 1360 goto out_err; 1361 } 1362 1363 head += size; 1364 file_pos += size; 1365 1366 if (file_pos >= progress_next) { 1367 progress_next += file_size / 16; 1368 ui_progress__update(file_pos, file_size, 1369 "Processing events..."); 1370 } 1371 1372 if (file_pos < file_size) 1373 goto more; 1374 1375 err = 0; 1376 /* do the final flush for ordered samples */ 1377 session->ordered_samples.next_flush = ULLONG_MAX; 1378 flush_sample_queue(session, tool); 1379 out_err: 1380 perf_session__warn_about_errors(session, tool); 1381 perf_session_free_sample_buffers(session); 1382 return err; 1383 } 1384 1385 int perf_session__process_events(struct perf_session *self, 1386 struct perf_tool *tool) 1387 { 1388 int err; 1389 1390 if (perf_session__register_idle_thread(self) == NULL) 1391 return -ENOMEM; 1392 1393 if (!self->fd_pipe) 1394 err = __perf_session__process_events(self, 1395 self->header.data_offset, 1396 self->header.data_size, 1397 self->size, tool); 1398 else 1399 err = __perf_session__process_pipe_events(self, tool); 1400 1401 return err; 1402 } 1403 1404 bool perf_session__has_traces(struct perf_session *self, const char *msg) 1405 { 1406 if (!(self->sample_type & PERF_SAMPLE_RAW)) { 1407 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg); 1408 return false; 1409 } 1410 1411 return true; 1412 } 1413 1414 int maps__set_kallsyms_ref_reloc_sym(struct map **maps, 1415 const char *symbol_name, u64 addr) 1416 { 1417 char *bracket; 1418 enum map_type i; 1419 struct ref_reloc_sym *ref; 1420 1421 ref = zalloc(sizeof(struct ref_reloc_sym)); 1422 if (ref == NULL) 1423 return -ENOMEM; 1424 1425 ref->name = strdup(symbol_name); 1426 if (ref->name == NULL) { 1427 free(ref); 1428 return -ENOMEM; 1429 } 1430 1431 bracket = strchr(ref->name, ']'); 1432 if (bracket) 1433 *bracket = '\0'; 1434 1435 ref->addr = addr; 1436 1437 for (i = 0; i < MAP__NR_TYPES; ++i) { 1438 struct kmap *kmap = map__kmap(maps[i]); 1439 kmap->ref_reloc_sym = ref; 1440 } 1441 1442 return 0; 1443 } 1444 1445 size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp) 1446 { 1447 return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) + 1448 __dsos__fprintf(&self->host_machine.user_dsos, fp) + 1449 machines__fprintf_dsos(&self->machines, fp); 1450 } 1451 1452 size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp, 1453 bool with_hits) 1454 { 1455 size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits); 1456 return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits); 1457 } 1458 1459 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp) 1460 { 1461 struct perf_evsel *pos; 1462 size_t ret = fprintf(fp, "Aggregated stats:\n"); 1463 1464 ret += hists__fprintf_nr_events(&session->hists, fp); 1465 1466 list_for_each_entry(pos, &session->evlist->entries, node) { 1467 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos)); 1468 ret += hists__fprintf_nr_events(&pos->hists, fp); 1469 } 1470 1471 return ret; 1472 } 1473 1474 size_t perf_session__fprintf(struct perf_session *session, FILE *fp) 1475 { 1476 /* 1477 * FIXME: Here we have to actually print all the machines in this 1478 * session, not just the host... 1479 */ 1480 return machine__fprintf(&session->host_machine, fp); 1481 } 1482 1483 void perf_session__remove_thread(struct perf_session *session, 1484 struct thread *th) 1485 { 1486 /* 1487 * FIXME: This one makes no sense, we need to remove the thread from 1488 * the machine it belongs to, perf_session can have many machines, so 1489 * doing it always on ->host_machine is wrong. Fix when auditing all 1490 * the 'perf kvm' code. 1491 */ 1492 machine__remove_thread(&session->host_machine, th); 1493 } 1494 1495 struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, 1496 unsigned int type) 1497 { 1498 struct perf_evsel *pos; 1499 1500 list_for_each_entry(pos, &session->evlist->entries, node) { 1501 if (pos->attr.type == type) 1502 return pos; 1503 } 1504 return NULL; 1505 } 1506 1507 void perf_event__print_ip(union perf_event *event, struct perf_sample *sample, 1508 struct machine *machine, int print_sym, 1509 int print_dso, int print_symoffset) 1510 { 1511 struct addr_location al; 1512 struct callchain_cursor_node *node; 1513 1514 if (perf_event__preprocess_sample(event, machine, &al, sample, 1515 NULL) < 0) { 1516 error("problem processing %d event, skipping it.\n", 1517 event->header.type); 1518 return; 1519 } 1520 1521 if (symbol_conf.use_callchain && sample->callchain) { 1522 1523 if (machine__resolve_callchain(machine, al.thread, 1524 sample->callchain, NULL) != 0) { 1525 if (verbose) 1526 error("Failed to resolve callchain. Skipping\n"); 1527 return; 1528 } 1529 callchain_cursor_commit(&callchain_cursor); 1530 1531 while (1) { 1532 node = callchain_cursor_current(&callchain_cursor); 1533 if (!node) 1534 break; 1535 1536 printf("\t%16" PRIx64, node->ip); 1537 if (print_sym) { 1538 printf(" "); 1539 symbol__fprintf_symname(node->sym, stdout); 1540 } 1541 if (print_dso) { 1542 printf(" ("); 1543 map__fprintf_dsoname(node->map, stdout); 1544 printf(")"); 1545 } 1546 printf("\n"); 1547 1548 callchain_cursor_advance(&callchain_cursor); 1549 } 1550 1551 } else { 1552 printf("%16" PRIx64, sample->ip); 1553 if (print_sym) { 1554 printf(" "); 1555 if (print_symoffset) 1556 symbol__fprintf_symname_offs(al.sym, &al, 1557 stdout); 1558 else 1559 symbol__fprintf_symname(al.sym, stdout); 1560 } 1561 1562 if (print_dso) { 1563 printf(" ("); 1564 map__fprintf_dsoname(al.map, stdout); 1565 printf(")"); 1566 } 1567 } 1568 } 1569 1570 int perf_session__cpu_bitmap(struct perf_session *session, 1571 const char *cpu_list, unsigned long *cpu_bitmap) 1572 { 1573 int i; 1574 struct cpu_map *map; 1575 1576 for (i = 0; i < PERF_TYPE_MAX; ++i) { 1577 struct perf_evsel *evsel; 1578 1579 evsel = perf_session__find_first_evtype(session, i); 1580 if (!evsel) 1581 continue; 1582 1583 if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) { 1584 pr_err("File does not contain CPU events. " 1585 "Remove -c option to proceed.\n"); 1586 return -1; 1587 } 1588 } 1589 1590 map = cpu_map__new(cpu_list); 1591 if (map == NULL) { 1592 pr_err("Invalid cpu_list\n"); 1593 return -1; 1594 } 1595 1596 for (i = 0; i < map->nr; i++) { 1597 int cpu = map->map[i]; 1598 1599 if (cpu >= MAX_NR_CPUS) { 1600 pr_err("Requested CPU %d too large. " 1601 "Consider raising MAX_NR_CPUS\n", cpu); 1602 return -1; 1603 } 1604 1605 set_bit(cpu, cpu_bitmap); 1606 } 1607 1608 return 0; 1609 } 1610 1611 void perf_session__fprintf_info(struct perf_session *session, FILE *fp, 1612 bool full) 1613 { 1614 struct stat st; 1615 int ret; 1616 1617 if (session == NULL || fp == NULL) 1618 return; 1619 1620 ret = fstat(session->fd, &st); 1621 if (ret == -1) 1622 return; 1623 1624 fprintf(fp, "# ========\n"); 1625 fprintf(fp, "# captured on: %s", ctime(&st.st_ctime)); 1626 perf_header__fprintf_info(session, fp, full); 1627 fprintf(fp, "# ========\n#\n"); 1628 } 1629 1630 1631 int __perf_session__set_tracepoints_handlers(struct perf_session *session, 1632 const struct perf_evsel_str_handler *assocs, 1633 size_t nr_assocs) 1634 { 1635 struct perf_evlist *evlist = session->evlist; 1636 struct event_format *format; 1637 struct perf_evsel *evsel; 1638 char *tracepoint, *name; 1639 size_t i; 1640 int err; 1641 1642 for (i = 0; i < nr_assocs; i++) { 1643 err = -ENOMEM; 1644 tracepoint = strdup(assocs[i].name); 1645 if (tracepoint == NULL) 1646 goto out; 1647 1648 err = -ENOENT; 1649 name = strchr(tracepoint, ':'); 1650 if (name == NULL) 1651 goto out_free; 1652 1653 *name++ = '\0'; 1654 format = pevent_find_event_by_name(session->pevent, 1655 tracepoint, name); 1656 if (format == NULL) { 1657 /* 1658 * Adding a handler for an event not in the session, 1659 * just ignore it. 1660 */ 1661 goto next; 1662 } 1663 1664 evsel = perf_evlist__find_tracepoint_by_id(evlist, format->id); 1665 if (evsel == NULL) 1666 goto next; 1667 1668 err = -EEXIST; 1669 if (evsel->handler.func != NULL) 1670 goto out_free; 1671 evsel->handler.func = assocs[i].handler; 1672 next: 1673 free(tracepoint); 1674 } 1675 1676 err = 0; 1677 out: 1678 return err; 1679 1680 out_free: 1681 free(tracepoint); 1682 goto out; 1683 } 1684