1 #define _FILE_OFFSET_BITS 64 2 3 #include <linux/kernel.h> 4 5 #include <byteswap.h> 6 #include <unistd.h> 7 #include <sys/types.h> 8 #include <sys/mman.h> 9 10 #include "evlist.h" 11 #include "evsel.h" 12 #include "session.h" 13 #include "tool.h" 14 #include "sort.h" 15 #include "util.h" 16 #include "cpumap.h" 17 #include "event-parse.h" 18 19 static int perf_session__open(struct perf_session *self, bool force) 20 { 21 struct stat input_stat; 22 23 if (!strcmp(self->filename, "-")) { 24 self->fd_pipe = true; 25 self->fd = STDIN_FILENO; 26 27 if (perf_session__read_header(self, self->fd) < 0) 28 pr_err("incompatible file format (rerun with -v to learn more)"); 29 30 return 0; 31 } 32 33 self->fd = open(self->filename, O_RDONLY); 34 if (self->fd < 0) { 35 int err = errno; 36 37 pr_err("failed to open %s: %s", self->filename, strerror(err)); 38 if (err == ENOENT && !strcmp(self->filename, "perf.data")) 39 pr_err(" (try 'perf record' first)"); 40 pr_err("\n"); 41 return -errno; 42 } 43 44 if (fstat(self->fd, &input_stat) < 0) 45 goto out_close; 46 47 if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) { 48 pr_err("file %s not owned by current user or root\n", 49 self->filename); 50 goto out_close; 51 } 52 53 if (!input_stat.st_size) { 54 pr_info("zero-sized file (%s), nothing to do!\n", 55 self->filename); 56 goto out_close; 57 } 58 59 if (perf_session__read_header(self, self->fd) < 0) { 60 pr_err("incompatible file format (rerun with -v to learn more)"); 61 goto out_close; 62 } 63 64 if (!perf_evlist__valid_sample_type(self->evlist)) { 65 pr_err("non matching sample_type"); 66 goto out_close; 67 } 68 69 if (!perf_evlist__valid_sample_id_all(self->evlist)) { 70 pr_err("non matching sample_id_all"); 71 goto out_close; 72 } 73 74 self->size = input_stat.st_size; 75 return 0; 76 77 out_close: 78 close(self->fd); 79 self->fd = -1; 80 return -1; 81 } 82 83 void perf_session__update_sample_type(struct perf_session *self) 84 { 85 self->sample_type = perf_evlist__sample_type(self->evlist); 86 self->sample_size = __perf_evsel__sample_size(self->sample_type); 87 self->sample_id_all = perf_evlist__sample_id_all(self->evlist); 88 self->id_hdr_size = perf_evlist__id_hdr_size(self->evlist); 89 self->host_machine.id_hdr_size = self->id_hdr_size; 90 machines__set_id_hdr_size(&self->machines, self->id_hdr_size); 91 } 92 93 int perf_session__create_kernel_maps(struct perf_session *self) 94 { 95 int ret = machine__create_kernel_maps(&self->host_machine); 96 97 if (ret >= 0) 98 ret = machines__create_guest_kernel_maps(&self->machines); 99 return ret; 100 } 101 102 static void perf_session__destroy_kernel_maps(struct perf_session *self) 103 { 104 machine__destroy_kernel_maps(&self->host_machine); 105 machines__destroy_guest_kernel_maps(&self->machines); 106 } 107 108 struct perf_session *perf_session__new(const char *filename, int mode, 109 bool force, bool repipe, 110 struct perf_tool *tool) 111 { 112 struct perf_session *self; 113 struct stat st; 114 size_t len; 115 116 if (!filename || !strlen(filename)) { 117 if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode)) 118 filename = "-"; 119 else 120 filename = "perf.data"; 121 } 122 123 len = strlen(filename); 124 self = zalloc(sizeof(*self) + len); 125 126 if (self == NULL) 127 goto out; 128 129 memcpy(self->filename, filename, len); 130 /* 131 * On 64bit we can mmap the data file in one go. No need for tiny mmap 132 * slices. On 32bit we use 32MB. 133 */ 134 #if BITS_PER_LONG == 64 135 self->mmap_window = ULLONG_MAX; 136 #else 137 self->mmap_window = 32 * 1024 * 1024ULL; 138 #endif 139 self->machines = RB_ROOT; 140 self->repipe = repipe; 141 INIT_LIST_HEAD(&self->ordered_samples.samples); 142 INIT_LIST_HEAD(&self->ordered_samples.sample_cache); 143 INIT_LIST_HEAD(&self->ordered_samples.to_free); 144 machine__init(&self->host_machine, "", HOST_KERNEL_ID); 145 hists__init(&self->hists); 146 147 if (mode == O_RDONLY) { 148 if (perf_session__open(self, force) < 0) 149 goto out_delete; 150 perf_session__update_sample_type(self); 151 } else if (mode == O_WRONLY) { 152 /* 153 * In O_RDONLY mode this will be performed when reading the 154 * kernel MMAP event, in perf_event__process_mmap(). 155 */ 156 if (perf_session__create_kernel_maps(self) < 0) 157 goto out_delete; 158 } 159 160 if (tool && tool->ordering_requires_timestamps && 161 tool->ordered_samples && !self->sample_id_all) { 162 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n"); 163 tool->ordered_samples = false; 164 } 165 166 out: 167 return self; 168 out_delete: 169 perf_session__delete(self); 170 return NULL; 171 } 172 173 static void machine__delete_dead_threads(struct machine *machine) 174 { 175 struct thread *n, *t; 176 177 list_for_each_entry_safe(t, n, &machine->dead_threads, node) { 178 list_del(&t->node); 179 thread__delete(t); 180 } 181 } 182 183 static void perf_session__delete_dead_threads(struct perf_session *session) 184 { 185 machine__delete_dead_threads(&session->host_machine); 186 } 187 188 static void machine__delete_threads(struct machine *self) 189 { 190 struct rb_node *nd = rb_first(&self->threads); 191 192 while (nd) { 193 struct thread *t = rb_entry(nd, struct thread, rb_node); 194 195 rb_erase(&t->rb_node, &self->threads); 196 nd = rb_next(nd); 197 thread__delete(t); 198 } 199 } 200 201 static void perf_session__delete_threads(struct perf_session *session) 202 { 203 machine__delete_threads(&session->host_machine); 204 } 205 206 void perf_session__delete(struct perf_session *self) 207 { 208 perf_session__destroy_kernel_maps(self); 209 perf_session__delete_dead_threads(self); 210 perf_session__delete_threads(self); 211 machine__exit(&self->host_machine); 212 close(self->fd); 213 free(self); 214 } 215 216 void machine__remove_thread(struct machine *self, struct thread *th) 217 { 218 self->last_match = NULL; 219 rb_erase(&th->rb_node, &self->threads); 220 /* 221 * We may have references to this thread, for instance in some hist_entry 222 * instances, so just move them to a separate list. 223 */ 224 list_add_tail(&th->node, &self->dead_threads); 225 } 226 227 static bool symbol__match_parent_regex(struct symbol *sym) 228 { 229 if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0)) 230 return 1; 231 232 return 0; 233 } 234 235 static const u8 cpumodes[] = { 236 PERF_RECORD_MISC_USER, 237 PERF_RECORD_MISC_KERNEL, 238 PERF_RECORD_MISC_GUEST_USER, 239 PERF_RECORD_MISC_GUEST_KERNEL 240 }; 241 #define NCPUMODES (sizeof(cpumodes)/sizeof(u8)) 242 243 static void ip__resolve_ams(struct machine *self, struct thread *thread, 244 struct addr_map_symbol *ams, 245 u64 ip) 246 { 247 struct addr_location al; 248 size_t i; 249 u8 m; 250 251 memset(&al, 0, sizeof(al)); 252 253 for (i = 0; i < NCPUMODES; i++) { 254 m = cpumodes[i]; 255 /* 256 * We cannot use the header.misc hint to determine whether a 257 * branch stack address is user, kernel, guest, hypervisor. 258 * Branches may straddle the kernel/user/hypervisor boundaries. 259 * Thus, we have to try consecutively until we find a match 260 * or else, the symbol is unknown 261 */ 262 thread__find_addr_location(thread, self, m, MAP__FUNCTION, 263 ip, &al, NULL); 264 if (al.sym) 265 goto found; 266 } 267 found: 268 ams->addr = ip; 269 ams->al_addr = al.addr; 270 ams->sym = al.sym; 271 ams->map = al.map; 272 } 273 274 struct branch_info *machine__resolve_bstack(struct machine *self, 275 struct thread *thr, 276 struct branch_stack *bs) 277 { 278 struct branch_info *bi; 279 unsigned int i; 280 281 bi = calloc(bs->nr, sizeof(struct branch_info)); 282 if (!bi) 283 return NULL; 284 285 for (i = 0; i < bs->nr; i++) { 286 ip__resolve_ams(self, thr, &bi[i].to, bs->entries[i].to); 287 ip__resolve_ams(self, thr, &bi[i].from, bs->entries[i].from); 288 bi[i].flags = bs->entries[i].flags; 289 } 290 return bi; 291 } 292 293 int machine__resolve_callchain(struct machine *self, 294 struct thread *thread, 295 struct ip_callchain *chain, 296 struct symbol **parent) 297 { 298 u8 cpumode = PERF_RECORD_MISC_USER; 299 unsigned int i; 300 int err; 301 302 callchain_cursor_reset(&callchain_cursor); 303 304 if (chain->nr > PERF_MAX_STACK_DEPTH) { 305 pr_warning("corrupted callchain. skipping...\n"); 306 return 0; 307 } 308 309 for (i = 0; i < chain->nr; i++) { 310 u64 ip; 311 struct addr_location al; 312 313 if (callchain_param.order == ORDER_CALLEE) 314 ip = chain->ips[i]; 315 else 316 ip = chain->ips[chain->nr - i - 1]; 317 318 if (ip >= PERF_CONTEXT_MAX) { 319 switch (ip) { 320 case PERF_CONTEXT_HV: 321 cpumode = PERF_RECORD_MISC_HYPERVISOR; break; 322 case PERF_CONTEXT_KERNEL: 323 cpumode = PERF_RECORD_MISC_KERNEL; break; 324 case PERF_CONTEXT_USER: 325 cpumode = PERF_RECORD_MISC_USER; break; 326 default: 327 pr_debug("invalid callchain context: " 328 "%"PRId64"\n", (s64) ip); 329 /* 330 * It seems the callchain is corrupted. 331 * Discard all. 332 */ 333 callchain_cursor_reset(&callchain_cursor); 334 return 0; 335 } 336 continue; 337 } 338 339 al.filtered = false; 340 thread__find_addr_location(thread, self, cpumode, 341 MAP__FUNCTION, ip, &al, NULL); 342 if (al.sym != NULL) { 343 if (sort__has_parent && !*parent && 344 symbol__match_parent_regex(al.sym)) 345 *parent = al.sym; 346 if (!symbol_conf.use_callchain) 347 break; 348 } 349 350 err = callchain_cursor_append(&callchain_cursor, 351 ip, al.map, al.sym); 352 if (err) 353 return err; 354 } 355 356 return 0; 357 } 358 359 static int process_event_synth_tracing_data_stub(union perf_event *event __used, 360 struct perf_session *session __used) 361 { 362 dump_printf(": unhandled!\n"); 363 return 0; 364 } 365 366 static int process_event_synth_attr_stub(union perf_event *event __used, 367 struct perf_evlist **pevlist __used) 368 { 369 dump_printf(": unhandled!\n"); 370 return 0; 371 } 372 373 static int process_event_sample_stub(struct perf_tool *tool __used, 374 union perf_event *event __used, 375 struct perf_sample *sample __used, 376 struct perf_evsel *evsel __used, 377 struct machine *machine __used) 378 { 379 dump_printf(": unhandled!\n"); 380 return 0; 381 } 382 383 static int process_event_stub(struct perf_tool *tool __used, 384 union perf_event *event __used, 385 struct perf_sample *sample __used, 386 struct machine *machine __used) 387 { 388 dump_printf(": unhandled!\n"); 389 return 0; 390 } 391 392 static int process_finished_round_stub(struct perf_tool *tool __used, 393 union perf_event *event __used, 394 struct perf_session *perf_session __used) 395 { 396 dump_printf(": unhandled!\n"); 397 return 0; 398 } 399 400 static int process_event_type_stub(struct perf_tool *tool __used, 401 union perf_event *event __used) 402 { 403 dump_printf(": unhandled!\n"); 404 return 0; 405 } 406 407 static int process_finished_round(struct perf_tool *tool, 408 union perf_event *event, 409 struct perf_session *session); 410 411 static void perf_tool__fill_defaults(struct perf_tool *tool) 412 { 413 if (tool->sample == NULL) 414 tool->sample = process_event_sample_stub; 415 if (tool->mmap == NULL) 416 tool->mmap = process_event_stub; 417 if (tool->comm == NULL) 418 tool->comm = process_event_stub; 419 if (tool->fork == NULL) 420 tool->fork = process_event_stub; 421 if (tool->exit == NULL) 422 tool->exit = process_event_stub; 423 if (tool->lost == NULL) 424 tool->lost = perf_event__process_lost; 425 if (tool->read == NULL) 426 tool->read = process_event_sample_stub; 427 if (tool->throttle == NULL) 428 tool->throttle = process_event_stub; 429 if (tool->unthrottle == NULL) 430 tool->unthrottle = process_event_stub; 431 if (tool->attr == NULL) 432 tool->attr = process_event_synth_attr_stub; 433 if (tool->event_type == NULL) 434 tool->event_type = process_event_type_stub; 435 if (tool->tracing_data == NULL) 436 tool->tracing_data = process_event_synth_tracing_data_stub; 437 if (tool->build_id == NULL) 438 tool->build_id = process_finished_round_stub; 439 if (tool->finished_round == NULL) { 440 if (tool->ordered_samples) 441 tool->finished_round = process_finished_round; 442 else 443 tool->finished_round = process_finished_round_stub; 444 } 445 } 446 447 void mem_bswap_32(void *src, int byte_size) 448 { 449 u32 *m = src; 450 while (byte_size > 0) { 451 *m = bswap_32(*m); 452 byte_size -= sizeof(u32); 453 ++m; 454 } 455 } 456 457 void mem_bswap_64(void *src, int byte_size) 458 { 459 u64 *m = src; 460 461 while (byte_size > 0) { 462 *m = bswap_64(*m); 463 byte_size -= sizeof(u64); 464 ++m; 465 } 466 } 467 468 static void swap_sample_id_all(union perf_event *event, void *data) 469 { 470 void *end = (void *) event + event->header.size; 471 int size = end - data; 472 473 BUG_ON(size % sizeof(u64)); 474 mem_bswap_64(data, size); 475 } 476 477 static void perf_event__all64_swap(union perf_event *event, 478 bool sample_id_all __used) 479 { 480 struct perf_event_header *hdr = &event->header; 481 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr)); 482 } 483 484 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all) 485 { 486 event->comm.pid = bswap_32(event->comm.pid); 487 event->comm.tid = bswap_32(event->comm.tid); 488 489 if (sample_id_all) { 490 void *data = &event->comm.comm; 491 492 data += ALIGN(strlen(data) + 1, sizeof(u64)); 493 swap_sample_id_all(event, data); 494 } 495 } 496 497 static void perf_event__mmap_swap(union perf_event *event, 498 bool sample_id_all) 499 { 500 event->mmap.pid = bswap_32(event->mmap.pid); 501 event->mmap.tid = bswap_32(event->mmap.tid); 502 event->mmap.start = bswap_64(event->mmap.start); 503 event->mmap.len = bswap_64(event->mmap.len); 504 event->mmap.pgoff = bswap_64(event->mmap.pgoff); 505 506 if (sample_id_all) { 507 void *data = &event->mmap.filename; 508 509 data += ALIGN(strlen(data) + 1, sizeof(u64)); 510 swap_sample_id_all(event, data); 511 } 512 } 513 514 static void perf_event__task_swap(union perf_event *event, bool sample_id_all) 515 { 516 event->fork.pid = bswap_32(event->fork.pid); 517 event->fork.tid = bswap_32(event->fork.tid); 518 event->fork.ppid = bswap_32(event->fork.ppid); 519 event->fork.ptid = bswap_32(event->fork.ptid); 520 event->fork.time = bswap_64(event->fork.time); 521 522 if (sample_id_all) 523 swap_sample_id_all(event, &event->fork + 1); 524 } 525 526 static void perf_event__read_swap(union perf_event *event, bool sample_id_all) 527 { 528 event->read.pid = bswap_32(event->read.pid); 529 event->read.tid = bswap_32(event->read.tid); 530 event->read.value = bswap_64(event->read.value); 531 event->read.time_enabled = bswap_64(event->read.time_enabled); 532 event->read.time_running = bswap_64(event->read.time_running); 533 event->read.id = bswap_64(event->read.id); 534 535 if (sample_id_all) 536 swap_sample_id_all(event, &event->read + 1); 537 } 538 539 static u8 revbyte(u8 b) 540 { 541 int rev = (b >> 4) | ((b & 0xf) << 4); 542 rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2); 543 rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1); 544 return (u8) rev; 545 } 546 547 /* 548 * XXX this is hack in attempt to carry flags bitfield 549 * throught endian village. ABI says: 550 * 551 * Bit-fields are allocated from right to left (least to most significant) 552 * on little-endian implementations and from left to right (most to least 553 * significant) on big-endian implementations. 554 * 555 * The above seems to be byte specific, so we need to reverse each 556 * byte of the bitfield. 'Internet' also says this might be implementation 557 * specific and we probably need proper fix and carry perf_event_attr 558 * bitfield flags in separate data file FEAT_ section. Thought this seems 559 * to work for now. 560 */ 561 static void swap_bitfield(u8 *p, unsigned len) 562 { 563 unsigned i; 564 565 for (i = 0; i < len; i++) { 566 *p = revbyte(*p); 567 p++; 568 } 569 } 570 571 /* exported for swapping attributes in file header */ 572 void perf_event__attr_swap(struct perf_event_attr *attr) 573 { 574 attr->type = bswap_32(attr->type); 575 attr->size = bswap_32(attr->size); 576 attr->config = bswap_64(attr->config); 577 attr->sample_period = bswap_64(attr->sample_period); 578 attr->sample_type = bswap_64(attr->sample_type); 579 attr->read_format = bswap_64(attr->read_format); 580 attr->wakeup_events = bswap_32(attr->wakeup_events); 581 attr->bp_type = bswap_32(attr->bp_type); 582 attr->bp_addr = bswap_64(attr->bp_addr); 583 attr->bp_len = bswap_64(attr->bp_len); 584 585 swap_bitfield((u8 *) (&attr->read_format + 1), sizeof(u64)); 586 } 587 588 static void perf_event__hdr_attr_swap(union perf_event *event, 589 bool sample_id_all __used) 590 { 591 size_t size; 592 593 perf_event__attr_swap(&event->attr.attr); 594 595 size = event->header.size; 596 size -= (void *)&event->attr.id - (void *)event; 597 mem_bswap_64(event->attr.id, size); 598 } 599 600 static void perf_event__event_type_swap(union perf_event *event, 601 bool sample_id_all __used) 602 { 603 event->event_type.event_type.event_id = 604 bswap_64(event->event_type.event_type.event_id); 605 } 606 607 static void perf_event__tracing_data_swap(union perf_event *event, 608 bool sample_id_all __used) 609 { 610 event->tracing_data.size = bswap_32(event->tracing_data.size); 611 } 612 613 typedef void (*perf_event__swap_op)(union perf_event *event, 614 bool sample_id_all); 615 616 static perf_event__swap_op perf_event__swap_ops[] = { 617 [PERF_RECORD_MMAP] = perf_event__mmap_swap, 618 [PERF_RECORD_COMM] = perf_event__comm_swap, 619 [PERF_RECORD_FORK] = perf_event__task_swap, 620 [PERF_RECORD_EXIT] = perf_event__task_swap, 621 [PERF_RECORD_LOST] = perf_event__all64_swap, 622 [PERF_RECORD_READ] = perf_event__read_swap, 623 [PERF_RECORD_SAMPLE] = perf_event__all64_swap, 624 [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap, 625 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap, 626 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap, 627 [PERF_RECORD_HEADER_BUILD_ID] = NULL, 628 [PERF_RECORD_HEADER_MAX] = NULL, 629 }; 630 631 struct sample_queue { 632 u64 timestamp; 633 u64 file_offset; 634 union perf_event *event; 635 struct list_head list; 636 }; 637 638 static void perf_session_free_sample_buffers(struct perf_session *session) 639 { 640 struct ordered_samples *os = &session->ordered_samples; 641 642 while (!list_empty(&os->to_free)) { 643 struct sample_queue *sq; 644 645 sq = list_entry(os->to_free.next, struct sample_queue, list); 646 list_del(&sq->list); 647 free(sq); 648 } 649 } 650 651 static int perf_session_deliver_event(struct perf_session *session, 652 union perf_event *event, 653 struct perf_sample *sample, 654 struct perf_tool *tool, 655 u64 file_offset); 656 657 static void flush_sample_queue(struct perf_session *s, 658 struct perf_tool *tool) 659 { 660 struct ordered_samples *os = &s->ordered_samples; 661 struct list_head *head = &os->samples; 662 struct sample_queue *tmp, *iter; 663 struct perf_sample sample; 664 u64 limit = os->next_flush; 665 u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL; 666 unsigned idx = 0, progress_next = os->nr_samples / 16; 667 int ret; 668 669 if (!tool->ordered_samples || !limit) 670 return; 671 672 list_for_each_entry_safe(iter, tmp, head, list) { 673 if (iter->timestamp > limit) 674 break; 675 676 ret = perf_session__parse_sample(s, iter->event, &sample); 677 if (ret) 678 pr_err("Can't parse sample, err = %d\n", ret); 679 else 680 perf_session_deliver_event(s, iter->event, &sample, tool, 681 iter->file_offset); 682 683 os->last_flush = iter->timestamp; 684 list_del(&iter->list); 685 list_add(&iter->list, &os->sample_cache); 686 if (++idx >= progress_next) { 687 progress_next += os->nr_samples / 16; 688 ui_progress__update(idx, os->nr_samples, 689 "Processing time ordered events..."); 690 } 691 } 692 693 if (list_empty(head)) { 694 os->last_sample = NULL; 695 } else if (last_ts <= limit) { 696 os->last_sample = 697 list_entry(head->prev, struct sample_queue, list); 698 } 699 700 os->nr_samples = 0; 701 } 702 703 /* 704 * When perf record finishes a pass on every buffers, it records this pseudo 705 * event. 706 * We record the max timestamp t found in the pass n. 707 * Assuming these timestamps are monotonic across cpus, we know that if 708 * a buffer still has events with timestamps below t, they will be all 709 * available and then read in the pass n + 1. 710 * Hence when we start to read the pass n + 2, we can safely flush every 711 * events with timestamps below t. 712 * 713 * ============ PASS n ================= 714 * CPU 0 | CPU 1 715 * | 716 * cnt1 timestamps | cnt2 timestamps 717 * 1 | 2 718 * 2 | 3 719 * - | 4 <--- max recorded 720 * 721 * ============ PASS n + 1 ============== 722 * CPU 0 | CPU 1 723 * | 724 * cnt1 timestamps | cnt2 timestamps 725 * 3 | 5 726 * 4 | 6 727 * 5 | 7 <---- max recorded 728 * 729 * Flush every events below timestamp 4 730 * 731 * ============ PASS n + 2 ============== 732 * CPU 0 | CPU 1 733 * | 734 * cnt1 timestamps | cnt2 timestamps 735 * 6 | 8 736 * 7 | 9 737 * - | 10 738 * 739 * Flush every events below timestamp 7 740 * etc... 741 */ 742 static int process_finished_round(struct perf_tool *tool, 743 union perf_event *event __used, 744 struct perf_session *session) 745 { 746 flush_sample_queue(session, tool); 747 session->ordered_samples.next_flush = session->ordered_samples.max_timestamp; 748 749 return 0; 750 } 751 752 /* The queue is ordered by time */ 753 static void __queue_event(struct sample_queue *new, struct perf_session *s) 754 { 755 struct ordered_samples *os = &s->ordered_samples; 756 struct sample_queue *sample = os->last_sample; 757 u64 timestamp = new->timestamp; 758 struct list_head *p; 759 760 ++os->nr_samples; 761 os->last_sample = new; 762 763 if (!sample) { 764 list_add(&new->list, &os->samples); 765 os->max_timestamp = timestamp; 766 return; 767 } 768 769 /* 770 * last_sample might point to some random place in the list as it's 771 * the last queued event. We expect that the new event is close to 772 * this. 773 */ 774 if (sample->timestamp <= timestamp) { 775 while (sample->timestamp <= timestamp) { 776 p = sample->list.next; 777 if (p == &os->samples) { 778 list_add_tail(&new->list, &os->samples); 779 os->max_timestamp = timestamp; 780 return; 781 } 782 sample = list_entry(p, struct sample_queue, list); 783 } 784 list_add_tail(&new->list, &sample->list); 785 } else { 786 while (sample->timestamp > timestamp) { 787 p = sample->list.prev; 788 if (p == &os->samples) { 789 list_add(&new->list, &os->samples); 790 return; 791 } 792 sample = list_entry(p, struct sample_queue, list); 793 } 794 list_add(&new->list, &sample->list); 795 } 796 } 797 798 #define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct sample_queue)) 799 800 static int perf_session_queue_event(struct perf_session *s, union perf_event *event, 801 struct perf_sample *sample, u64 file_offset) 802 { 803 struct ordered_samples *os = &s->ordered_samples; 804 struct list_head *sc = &os->sample_cache; 805 u64 timestamp = sample->time; 806 struct sample_queue *new; 807 808 if (!timestamp || timestamp == ~0ULL) 809 return -ETIME; 810 811 if (timestamp < s->ordered_samples.last_flush) { 812 printf("Warning: Timestamp below last timeslice flush\n"); 813 return -EINVAL; 814 } 815 816 if (!list_empty(sc)) { 817 new = list_entry(sc->next, struct sample_queue, list); 818 list_del(&new->list); 819 } else if (os->sample_buffer) { 820 new = os->sample_buffer + os->sample_buffer_idx; 821 if (++os->sample_buffer_idx == MAX_SAMPLE_BUFFER) 822 os->sample_buffer = NULL; 823 } else { 824 os->sample_buffer = malloc(MAX_SAMPLE_BUFFER * sizeof(*new)); 825 if (!os->sample_buffer) 826 return -ENOMEM; 827 list_add(&os->sample_buffer->list, &os->to_free); 828 os->sample_buffer_idx = 2; 829 new = os->sample_buffer + 1; 830 } 831 832 new->timestamp = timestamp; 833 new->file_offset = file_offset; 834 new->event = event; 835 836 __queue_event(new, s); 837 838 return 0; 839 } 840 841 static void callchain__printf(struct perf_sample *sample) 842 { 843 unsigned int i; 844 845 printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr); 846 847 for (i = 0; i < sample->callchain->nr; i++) 848 printf("..... %2d: %016" PRIx64 "\n", 849 i, sample->callchain->ips[i]); 850 } 851 852 static void branch_stack__printf(struct perf_sample *sample) 853 { 854 uint64_t i; 855 856 printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr); 857 858 for (i = 0; i < sample->branch_stack->nr; i++) 859 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 "\n", 860 i, sample->branch_stack->entries[i].from, 861 sample->branch_stack->entries[i].to); 862 } 863 864 static void perf_session__print_tstamp(struct perf_session *session, 865 union perf_event *event, 866 struct perf_sample *sample) 867 { 868 if (event->header.type != PERF_RECORD_SAMPLE && 869 !session->sample_id_all) { 870 fputs("-1 -1 ", stdout); 871 return; 872 } 873 874 if ((session->sample_type & PERF_SAMPLE_CPU)) 875 printf("%u ", sample->cpu); 876 877 if (session->sample_type & PERF_SAMPLE_TIME) 878 printf("%" PRIu64 " ", sample->time); 879 } 880 881 static void dump_event(struct perf_session *session, union perf_event *event, 882 u64 file_offset, struct perf_sample *sample) 883 { 884 if (!dump_trace) 885 return; 886 887 printf("\n%#" PRIx64 " [%#x]: event: %d\n", 888 file_offset, event->header.size, event->header.type); 889 890 trace_event(event); 891 892 if (sample) 893 perf_session__print_tstamp(session, event, sample); 894 895 printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset, 896 event->header.size, perf_event__name(event->header.type)); 897 } 898 899 static void dump_sample(struct perf_session *session, union perf_event *event, 900 struct perf_sample *sample) 901 { 902 if (!dump_trace) 903 return; 904 905 printf("(IP, %d): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n", 906 event->header.misc, sample->pid, sample->tid, sample->ip, 907 sample->period, sample->addr); 908 909 if (session->sample_type & PERF_SAMPLE_CALLCHAIN) 910 callchain__printf(sample); 911 912 if (session->sample_type & PERF_SAMPLE_BRANCH_STACK) 913 branch_stack__printf(sample); 914 } 915 916 static struct machine * 917 perf_session__find_machine_for_cpumode(struct perf_session *session, 918 union perf_event *event) 919 { 920 const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 921 922 if (perf_guest && 923 ((cpumode == PERF_RECORD_MISC_GUEST_KERNEL) || 924 (cpumode == PERF_RECORD_MISC_GUEST_USER))) { 925 u32 pid; 926 927 if (event->header.type == PERF_RECORD_MMAP) 928 pid = event->mmap.pid; 929 else 930 pid = event->ip.pid; 931 932 return perf_session__findnew_machine(session, pid); 933 } 934 935 return perf_session__find_host_machine(session); 936 } 937 938 static int perf_session_deliver_event(struct perf_session *session, 939 union perf_event *event, 940 struct perf_sample *sample, 941 struct perf_tool *tool, 942 u64 file_offset) 943 { 944 struct perf_evsel *evsel; 945 struct machine *machine; 946 947 dump_event(session, event, file_offset, sample); 948 949 evsel = perf_evlist__id2evsel(session->evlist, sample->id); 950 if (evsel != NULL && event->header.type != PERF_RECORD_SAMPLE) { 951 /* 952 * XXX We're leaving PERF_RECORD_SAMPLE unnacounted here 953 * because the tools right now may apply filters, discarding 954 * some of the samples. For consistency, in the future we 955 * should have something like nr_filtered_samples and remove 956 * the sample->period from total_sample_period, etc, KISS for 957 * now tho. 958 * 959 * Also testing against NULL allows us to handle files without 960 * attr.sample_id_all and/or without PERF_SAMPLE_ID. In the 961 * future probably it'll be a good idea to restrict event 962 * processing via perf_session to files with both set. 963 */ 964 hists__inc_nr_events(&evsel->hists, event->header.type); 965 } 966 967 machine = perf_session__find_machine_for_cpumode(session, event); 968 969 switch (event->header.type) { 970 case PERF_RECORD_SAMPLE: 971 dump_sample(session, event, sample); 972 if (evsel == NULL) { 973 ++session->hists.stats.nr_unknown_id; 974 return 0; 975 } 976 if (machine == NULL) { 977 ++session->hists.stats.nr_unprocessable_samples; 978 return 0; 979 } 980 return tool->sample(tool, event, sample, evsel, machine); 981 case PERF_RECORD_MMAP: 982 return tool->mmap(tool, event, sample, machine); 983 case PERF_RECORD_COMM: 984 return tool->comm(tool, event, sample, machine); 985 case PERF_RECORD_FORK: 986 return tool->fork(tool, event, sample, machine); 987 case PERF_RECORD_EXIT: 988 return tool->exit(tool, event, sample, machine); 989 case PERF_RECORD_LOST: 990 if (tool->lost == perf_event__process_lost) 991 session->hists.stats.total_lost += event->lost.lost; 992 return tool->lost(tool, event, sample, machine); 993 case PERF_RECORD_READ: 994 return tool->read(tool, event, sample, evsel, machine); 995 case PERF_RECORD_THROTTLE: 996 return tool->throttle(tool, event, sample, machine); 997 case PERF_RECORD_UNTHROTTLE: 998 return tool->unthrottle(tool, event, sample, machine); 999 default: 1000 ++session->hists.stats.nr_unknown_events; 1001 return -1; 1002 } 1003 } 1004 1005 static int perf_session__preprocess_sample(struct perf_session *session, 1006 union perf_event *event, struct perf_sample *sample) 1007 { 1008 if (event->header.type != PERF_RECORD_SAMPLE || 1009 !(session->sample_type & PERF_SAMPLE_CALLCHAIN)) 1010 return 0; 1011 1012 if (!ip_callchain__valid(sample->callchain, event)) { 1013 pr_debug("call-chain problem with event, skipping it.\n"); 1014 ++session->hists.stats.nr_invalid_chains; 1015 session->hists.stats.total_invalid_chains += sample->period; 1016 return -EINVAL; 1017 } 1018 return 0; 1019 } 1020 1021 static int perf_session__process_user_event(struct perf_session *session, union perf_event *event, 1022 struct perf_tool *tool, u64 file_offset) 1023 { 1024 int err; 1025 1026 dump_event(session, event, file_offset, NULL); 1027 1028 /* These events are processed right away */ 1029 switch (event->header.type) { 1030 case PERF_RECORD_HEADER_ATTR: 1031 err = tool->attr(event, &session->evlist); 1032 if (err == 0) 1033 perf_session__update_sample_type(session); 1034 return err; 1035 case PERF_RECORD_HEADER_EVENT_TYPE: 1036 return tool->event_type(tool, event); 1037 case PERF_RECORD_HEADER_TRACING_DATA: 1038 /* setup for reading amidst mmap */ 1039 lseek(session->fd, file_offset, SEEK_SET); 1040 return tool->tracing_data(event, session); 1041 case PERF_RECORD_HEADER_BUILD_ID: 1042 return tool->build_id(tool, event, session); 1043 case PERF_RECORD_FINISHED_ROUND: 1044 return tool->finished_round(tool, event, session); 1045 default: 1046 return -EINVAL; 1047 } 1048 } 1049 1050 static void event_swap(union perf_event *event, bool sample_id_all) 1051 { 1052 perf_event__swap_op swap; 1053 1054 swap = perf_event__swap_ops[event->header.type]; 1055 if (swap) 1056 swap(event, sample_id_all); 1057 } 1058 1059 static int perf_session__process_event(struct perf_session *session, 1060 union perf_event *event, 1061 struct perf_tool *tool, 1062 u64 file_offset) 1063 { 1064 struct perf_sample sample; 1065 int ret; 1066 1067 if (session->header.needs_swap) 1068 event_swap(event, session->sample_id_all); 1069 1070 if (event->header.type >= PERF_RECORD_HEADER_MAX) 1071 return -EINVAL; 1072 1073 hists__inc_nr_events(&session->hists, event->header.type); 1074 1075 if (event->header.type >= PERF_RECORD_USER_TYPE_START) 1076 return perf_session__process_user_event(session, event, tool, file_offset); 1077 1078 /* 1079 * For all kernel events we get the sample data 1080 */ 1081 ret = perf_session__parse_sample(session, event, &sample); 1082 if (ret) 1083 return ret; 1084 1085 /* Preprocess sample records - precheck callchains */ 1086 if (perf_session__preprocess_sample(session, event, &sample)) 1087 return 0; 1088 1089 if (tool->ordered_samples) { 1090 ret = perf_session_queue_event(session, event, &sample, 1091 file_offset); 1092 if (ret != -ETIME) 1093 return ret; 1094 } 1095 1096 return perf_session_deliver_event(session, event, &sample, tool, 1097 file_offset); 1098 } 1099 1100 void perf_event_header__bswap(struct perf_event_header *self) 1101 { 1102 self->type = bswap_32(self->type); 1103 self->misc = bswap_16(self->misc); 1104 self->size = bswap_16(self->size); 1105 } 1106 1107 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid) 1108 { 1109 return machine__findnew_thread(&session->host_machine, pid); 1110 } 1111 1112 static struct thread *perf_session__register_idle_thread(struct perf_session *self) 1113 { 1114 struct thread *thread = perf_session__findnew(self, 0); 1115 1116 if (thread == NULL || thread__set_comm(thread, "swapper")) { 1117 pr_err("problem inserting idle task.\n"); 1118 thread = NULL; 1119 } 1120 1121 return thread; 1122 } 1123 1124 static void perf_session__warn_about_errors(const struct perf_session *session, 1125 const struct perf_tool *tool) 1126 { 1127 if (tool->lost == perf_event__process_lost && 1128 session->hists.stats.nr_events[PERF_RECORD_LOST] != 0) { 1129 ui__warning("Processed %d events and lost %d chunks!\n\n" 1130 "Check IO/CPU overload!\n\n", 1131 session->hists.stats.nr_events[0], 1132 session->hists.stats.nr_events[PERF_RECORD_LOST]); 1133 } 1134 1135 if (session->hists.stats.nr_unknown_events != 0) { 1136 ui__warning("Found %u unknown events!\n\n" 1137 "Is this an older tool processing a perf.data " 1138 "file generated by a more recent tool?\n\n" 1139 "If that is not the case, consider " 1140 "reporting to linux-kernel@vger.kernel.org.\n\n", 1141 session->hists.stats.nr_unknown_events); 1142 } 1143 1144 if (session->hists.stats.nr_unknown_id != 0) { 1145 ui__warning("%u samples with id not present in the header\n", 1146 session->hists.stats.nr_unknown_id); 1147 } 1148 1149 if (session->hists.stats.nr_invalid_chains != 0) { 1150 ui__warning("Found invalid callchains!\n\n" 1151 "%u out of %u events were discarded for this reason.\n\n" 1152 "Consider reporting to linux-kernel@vger.kernel.org.\n\n", 1153 session->hists.stats.nr_invalid_chains, 1154 session->hists.stats.nr_events[PERF_RECORD_SAMPLE]); 1155 } 1156 1157 if (session->hists.stats.nr_unprocessable_samples != 0) { 1158 ui__warning("%u unprocessable samples recorded.\n" 1159 "Do you have a KVM guest running and not using 'perf kvm'?\n", 1160 session->hists.stats.nr_unprocessable_samples); 1161 } 1162 } 1163 1164 #define session_done() (*(volatile int *)(&session_done)) 1165 volatile int session_done; 1166 1167 static int __perf_session__process_pipe_events(struct perf_session *self, 1168 struct perf_tool *tool) 1169 { 1170 union perf_event *event; 1171 uint32_t size, cur_size = 0; 1172 void *buf = NULL; 1173 int skip = 0; 1174 u64 head; 1175 int err; 1176 void *p; 1177 1178 perf_tool__fill_defaults(tool); 1179 1180 head = 0; 1181 cur_size = sizeof(union perf_event); 1182 1183 buf = malloc(cur_size); 1184 if (!buf) 1185 return -errno; 1186 more: 1187 event = buf; 1188 err = readn(self->fd, event, sizeof(struct perf_event_header)); 1189 if (err <= 0) { 1190 if (err == 0) 1191 goto done; 1192 1193 pr_err("failed to read event header\n"); 1194 goto out_err; 1195 } 1196 1197 if (self->header.needs_swap) 1198 perf_event_header__bswap(&event->header); 1199 1200 size = event->header.size; 1201 if (size == 0) 1202 size = 8; 1203 1204 if (size > cur_size) { 1205 void *new = realloc(buf, size); 1206 if (!new) { 1207 pr_err("failed to allocate memory to read event\n"); 1208 goto out_err; 1209 } 1210 buf = new; 1211 cur_size = size; 1212 event = buf; 1213 } 1214 p = event; 1215 p += sizeof(struct perf_event_header); 1216 1217 if (size - sizeof(struct perf_event_header)) { 1218 err = readn(self->fd, p, size - sizeof(struct perf_event_header)); 1219 if (err <= 0) { 1220 if (err == 0) { 1221 pr_err("unexpected end of event stream\n"); 1222 goto done; 1223 } 1224 1225 pr_err("failed to read event data\n"); 1226 goto out_err; 1227 } 1228 } 1229 1230 if ((skip = perf_session__process_event(self, event, tool, head)) < 0) { 1231 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n", 1232 head, event->header.size, event->header.type); 1233 err = -EINVAL; 1234 goto out_err; 1235 } 1236 1237 head += size; 1238 1239 if (skip > 0) 1240 head += skip; 1241 1242 if (!session_done()) 1243 goto more; 1244 done: 1245 err = 0; 1246 out_err: 1247 free(buf); 1248 perf_session__warn_about_errors(self, tool); 1249 perf_session_free_sample_buffers(self); 1250 return err; 1251 } 1252 1253 static union perf_event * 1254 fetch_mmaped_event(struct perf_session *session, 1255 u64 head, size_t mmap_size, char *buf) 1256 { 1257 union perf_event *event; 1258 1259 /* 1260 * Ensure we have enough space remaining to read 1261 * the size of the event in the headers. 1262 */ 1263 if (head + sizeof(event->header) > mmap_size) 1264 return NULL; 1265 1266 event = (union perf_event *)(buf + head); 1267 1268 if (session->header.needs_swap) 1269 perf_event_header__bswap(&event->header); 1270 1271 if (head + event->header.size > mmap_size) 1272 return NULL; 1273 1274 return event; 1275 } 1276 1277 int __perf_session__process_events(struct perf_session *session, 1278 u64 data_offset, u64 data_size, 1279 u64 file_size, struct perf_tool *tool) 1280 { 1281 u64 head, page_offset, file_offset, file_pos, progress_next; 1282 int err, mmap_prot, mmap_flags, map_idx = 0; 1283 size_t page_size, mmap_size; 1284 char *buf, *mmaps[8]; 1285 union perf_event *event; 1286 uint32_t size; 1287 1288 perf_tool__fill_defaults(tool); 1289 1290 page_size = sysconf(_SC_PAGESIZE); 1291 1292 page_offset = page_size * (data_offset / page_size); 1293 file_offset = page_offset; 1294 head = data_offset - page_offset; 1295 1296 if (data_offset + data_size < file_size) 1297 file_size = data_offset + data_size; 1298 1299 progress_next = file_size / 16; 1300 1301 mmap_size = session->mmap_window; 1302 if (mmap_size > file_size) 1303 mmap_size = file_size; 1304 1305 memset(mmaps, 0, sizeof(mmaps)); 1306 1307 mmap_prot = PROT_READ; 1308 mmap_flags = MAP_SHARED; 1309 1310 if (session->header.needs_swap) { 1311 mmap_prot |= PROT_WRITE; 1312 mmap_flags = MAP_PRIVATE; 1313 } 1314 remap: 1315 buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, session->fd, 1316 file_offset); 1317 if (buf == MAP_FAILED) { 1318 pr_err("failed to mmap file\n"); 1319 err = -errno; 1320 goto out_err; 1321 } 1322 mmaps[map_idx] = buf; 1323 map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1); 1324 file_pos = file_offset + head; 1325 1326 more: 1327 event = fetch_mmaped_event(session, head, mmap_size, buf); 1328 if (!event) { 1329 if (mmaps[map_idx]) { 1330 munmap(mmaps[map_idx], mmap_size); 1331 mmaps[map_idx] = NULL; 1332 } 1333 1334 page_offset = page_size * (head / page_size); 1335 file_offset += page_offset; 1336 head -= page_offset; 1337 goto remap; 1338 } 1339 1340 size = event->header.size; 1341 1342 if (size == 0 || 1343 perf_session__process_event(session, event, tool, file_pos) < 0) { 1344 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n", 1345 file_offset + head, event->header.size, 1346 event->header.type); 1347 err = -EINVAL; 1348 goto out_err; 1349 } 1350 1351 head += size; 1352 file_pos += size; 1353 1354 if (file_pos >= progress_next) { 1355 progress_next += file_size / 16; 1356 ui_progress__update(file_pos, file_size, 1357 "Processing events..."); 1358 } 1359 1360 if (file_pos < file_size) 1361 goto more; 1362 1363 err = 0; 1364 /* do the final flush for ordered samples */ 1365 session->ordered_samples.next_flush = ULLONG_MAX; 1366 flush_sample_queue(session, tool); 1367 out_err: 1368 perf_session__warn_about_errors(session, tool); 1369 perf_session_free_sample_buffers(session); 1370 return err; 1371 } 1372 1373 int perf_session__process_events(struct perf_session *self, 1374 struct perf_tool *tool) 1375 { 1376 int err; 1377 1378 if (perf_session__register_idle_thread(self) == NULL) 1379 return -ENOMEM; 1380 1381 if (!self->fd_pipe) 1382 err = __perf_session__process_events(self, 1383 self->header.data_offset, 1384 self->header.data_size, 1385 self->size, tool); 1386 else 1387 err = __perf_session__process_pipe_events(self, tool); 1388 1389 return err; 1390 } 1391 1392 bool perf_session__has_traces(struct perf_session *self, const char *msg) 1393 { 1394 if (!(self->sample_type & PERF_SAMPLE_RAW)) { 1395 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg); 1396 return false; 1397 } 1398 1399 return true; 1400 } 1401 1402 int maps__set_kallsyms_ref_reloc_sym(struct map **maps, 1403 const char *symbol_name, u64 addr) 1404 { 1405 char *bracket; 1406 enum map_type i; 1407 struct ref_reloc_sym *ref; 1408 1409 ref = zalloc(sizeof(struct ref_reloc_sym)); 1410 if (ref == NULL) 1411 return -ENOMEM; 1412 1413 ref->name = strdup(symbol_name); 1414 if (ref->name == NULL) { 1415 free(ref); 1416 return -ENOMEM; 1417 } 1418 1419 bracket = strchr(ref->name, ']'); 1420 if (bracket) 1421 *bracket = '\0'; 1422 1423 ref->addr = addr; 1424 1425 for (i = 0; i < MAP__NR_TYPES; ++i) { 1426 struct kmap *kmap = map__kmap(maps[i]); 1427 kmap->ref_reloc_sym = ref; 1428 } 1429 1430 return 0; 1431 } 1432 1433 size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp) 1434 { 1435 return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) + 1436 __dsos__fprintf(&self->host_machine.user_dsos, fp) + 1437 machines__fprintf_dsos(&self->machines, fp); 1438 } 1439 1440 size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp, 1441 bool with_hits) 1442 { 1443 size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits); 1444 return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits); 1445 } 1446 1447 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp) 1448 { 1449 struct perf_evsel *pos; 1450 size_t ret = fprintf(fp, "Aggregated stats:\n"); 1451 1452 ret += hists__fprintf_nr_events(&session->hists, fp); 1453 1454 list_for_each_entry(pos, &session->evlist->entries, node) { 1455 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos)); 1456 ret += hists__fprintf_nr_events(&pos->hists, fp); 1457 } 1458 1459 return ret; 1460 } 1461 1462 size_t perf_session__fprintf(struct perf_session *session, FILE *fp) 1463 { 1464 /* 1465 * FIXME: Here we have to actually print all the machines in this 1466 * session, not just the host... 1467 */ 1468 return machine__fprintf(&session->host_machine, fp); 1469 } 1470 1471 void perf_session__remove_thread(struct perf_session *session, 1472 struct thread *th) 1473 { 1474 /* 1475 * FIXME: This one makes no sense, we need to remove the thread from 1476 * the machine it belongs to, perf_session can have many machines, so 1477 * doing it always on ->host_machine is wrong. Fix when auditing all 1478 * the 'perf kvm' code. 1479 */ 1480 machine__remove_thread(&session->host_machine, th); 1481 } 1482 1483 struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, 1484 unsigned int type) 1485 { 1486 struct perf_evsel *pos; 1487 1488 list_for_each_entry(pos, &session->evlist->entries, node) { 1489 if (pos->attr.type == type) 1490 return pos; 1491 } 1492 return NULL; 1493 } 1494 1495 void perf_event__print_ip(union perf_event *event, struct perf_sample *sample, 1496 struct machine *machine, int print_sym, 1497 int print_dso, int print_symoffset) 1498 { 1499 struct addr_location al; 1500 struct callchain_cursor_node *node; 1501 1502 if (perf_event__preprocess_sample(event, machine, &al, sample, 1503 NULL) < 0) { 1504 error("problem processing %d event, skipping it.\n", 1505 event->header.type); 1506 return; 1507 } 1508 1509 if (symbol_conf.use_callchain && sample->callchain) { 1510 1511 if (machine__resolve_callchain(machine, al.thread, 1512 sample->callchain, NULL) != 0) { 1513 if (verbose) 1514 error("Failed to resolve callchain. Skipping\n"); 1515 return; 1516 } 1517 callchain_cursor_commit(&callchain_cursor); 1518 1519 while (1) { 1520 node = callchain_cursor_current(&callchain_cursor); 1521 if (!node) 1522 break; 1523 1524 printf("\t%16" PRIx64, node->ip); 1525 if (print_sym) { 1526 printf(" "); 1527 symbol__fprintf_symname(node->sym, stdout); 1528 } 1529 if (print_dso) { 1530 printf(" ("); 1531 map__fprintf_dsoname(node->map, stdout); 1532 printf(")"); 1533 } 1534 printf("\n"); 1535 1536 callchain_cursor_advance(&callchain_cursor); 1537 } 1538 1539 } else { 1540 printf("%16" PRIx64, sample->ip); 1541 if (print_sym) { 1542 printf(" "); 1543 if (print_symoffset) 1544 symbol__fprintf_symname_offs(al.sym, &al, 1545 stdout); 1546 else 1547 symbol__fprintf_symname(al.sym, stdout); 1548 } 1549 1550 if (print_dso) { 1551 printf(" ("); 1552 map__fprintf_dsoname(al.map, stdout); 1553 printf(")"); 1554 } 1555 } 1556 } 1557 1558 int perf_session__cpu_bitmap(struct perf_session *session, 1559 const char *cpu_list, unsigned long *cpu_bitmap) 1560 { 1561 int i; 1562 struct cpu_map *map; 1563 1564 for (i = 0; i < PERF_TYPE_MAX; ++i) { 1565 struct perf_evsel *evsel; 1566 1567 evsel = perf_session__find_first_evtype(session, i); 1568 if (!evsel) 1569 continue; 1570 1571 if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) { 1572 pr_err("File does not contain CPU events. " 1573 "Remove -c option to proceed.\n"); 1574 return -1; 1575 } 1576 } 1577 1578 map = cpu_map__new(cpu_list); 1579 if (map == NULL) { 1580 pr_err("Invalid cpu_list\n"); 1581 return -1; 1582 } 1583 1584 for (i = 0; i < map->nr; i++) { 1585 int cpu = map->map[i]; 1586 1587 if (cpu >= MAX_NR_CPUS) { 1588 pr_err("Requested CPU %d too large. " 1589 "Consider raising MAX_NR_CPUS\n", cpu); 1590 return -1; 1591 } 1592 1593 set_bit(cpu, cpu_bitmap); 1594 } 1595 1596 return 0; 1597 } 1598 1599 void perf_session__fprintf_info(struct perf_session *session, FILE *fp, 1600 bool full) 1601 { 1602 struct stat st; 1603 int ret; 1604 1605 if (session == NULL || fp == NULL) 1606 return; 1607 1608 ret = fstat(session->fd, &st); 1609 if (ret == -1) 1610 return; 1611 1612 fprintf(fp, "# ========\n"); 1613 fprintf(fp, "# captured on: %s", ctime(&st.st_ctime)); 1614 perf_header__fprintf_info(session, fp, full); 1615 fprintf(fp, "# ========\n#\n"); 1616 } 1617 1618 1619 int __perf_session__set_tracepoints_handlers(struct perf_session *session, 1620 const struct perf_evsel_str_handler *assocs, 1621 size_t nr_assocs) 1622 { 1623 struct perf_evlist *evlist = session->evlist; 1624 struct event_format *format; 1625 struct perf_evsel *evsel; 1626 char *tracepoint, *name; 1627 size_t i; 1628 int err; 1629 1630 for (i = 0; i < nr_assocs; i++) { 1631 err = -ENOMEM; 1632 tracepoint = strdup(assocs[i].name); 1633 if (tracepoint == NULL) 1634 goto out; 1635 1636 err = -ENOENT; 1637 name = strchr(tracepoint, ':'); 1638 if (name == NULL) 1639 goto out_free; 1640 1641 *name++ = '\0'; 1642 format = pevent_find_event_by_name(session->pevent, 1643 tracepoint, name); 1644 if (format == NULL) { 1645 /* 1646 * Adding a handler for an event not in the session, 1647 * just ignore it. 1648 */ 1649 goto next; 1650 } 1651 1652 evsel = perf_evlist__find_tracepoint_by_id(evlist, format->id); 1653 if (evsel == NULL) 1654 goto next; 1655 1656 err = -EEXIST; 1657 if (evsel->handler.func != NULL) 1658 goto out_free; 1659 evsel->handler.func = assocs[i].handler; 1660 next: 1661 free(tracepoint); 1662 } 1663 1664 err = 0; 1665 out: 1666 return err; 1667 1668 out_free: 1669 free(tracepoint); 1670 goto out; 1671 } 1672