1 #define _FILE_OFFSET_BITS 64 2 3 #include <linux/kernel.h> 4 5 #include <byteswap.h> 6 #include <unistd.h> 7 #include <sys/types.h> 8 #include <sys/mman.h> 9 10 #include "evlist.h" 11 #include "evsel.h" 12 #include "session.h" 13 #include "sort.h" 14 #include "util.h" 15 16 static int perf_session__open(struct perf_session *self, bool force) 17 { 18 struct stat input_stat; 19 20 if (!strcmp(self->filename, "-")) { 21 self->fd_pipe = true; 22 self->fd = STDIN_FILENO; 23 24 if (perf_session__read_header(self, self->fd) < 0) 25 pr_err("incompatible file format"); 26 27 return 0; 28 } 29 30 self->fd = open(self->filename, O_RDONLY); 31 if (self->fd < 0) { 32 int err = errno; 33 34 pr_err("failed to open %s: %s", self->filename, strerror(err)); 35 if (err == ENOENT && !strcmp(self->filename, "perf.data")) 36 pr_err(" (try 'perf record' first)"); 37 pr_err("\n"); 38 return -errno; 39 } 40 41 if (fstat(self->fd, &input_stat) < 0) 42 goto out_close; 43 44 if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) { 45 pr_err("file %s not owned by current user or root\n", 46 self->filename); 47 goto out_close; 48 } 49 50 if (!input_stat.st_size) { 51 pr_info("zero-sized file (%s), nothing to do!\n", 52 self->filename); 53 goto out_close; 54 } 55 56 if (perf_session__read_header(self, self->fd) < 0) { 57 pr_err("incompatible file format"); 58 goto out_close; 59 } 60 61 self->size = input_stat.st_size; 62 return 0; 63 64 out_close: 65 close(self->fd); 66 self->fd = -1; 67 return -1; 68 } 69 70 static void perf_session__id_header_size(struct perf_session *session) 71 { 72 struct perf_sample *data; 73 u64 sample_type = session->sample_type; 74 u16 size = 0; 75 76 if (!session->sample_id_all) 77 goto out; 78 79 if (sample_type & PERF_SAMPLE_TID) 80 size += sizeof(data->tid) * 2; 81 82 if (sample_type & PERF_SAMPLE_TIME) 83 size += sizeof(data->time); 84 85 if (sample_type & PERF_SAMPLE_ID) 86 size += sizeof(data->id); 87 88 if (sample_type & PERF_SAMPLE_STREAM_ID) 89 size += sizeof(data->stream_id); 90 91 if (sample_type & PERF_SAMPLE_CPU) 92 size += sizeof(data->cpu) * 2; 93 out: 94 session->id_hdr_size = size; 95 } 96 97 void perf_session__update_sample_type(struct perf_session *self) 98 { 99 self->sample_type = perf_evlist__sample_type(self->evlist); 100 self->sample_id_all = perf_evlist__sample_id_all(self->evlist); 101 perf_session__id_header_size(self); 102 } 103 104 int perf_session__create_kernel_maps(struct perf_session *self) 105 { 106 int ret = machine__create_kernel_maps(&self->host_machine); 107 108 if (ret >= 0) 109 ret = machines__create_guest_kernel_maps(&self->machines); 110 return ret; 111 } 112 113 static void perf_session__destroy_kernel_maps(struct perf_session *self) 114 { 115 machine__destroy_kernel_maps(&self->host_machine); 116 machines__destroy_guest_kernel_maps(&self->machines); 117 } 118 119 struct perf_session *perf_session__new(const char *filename, int mode, 120 bool force, bool repipe, 121 struct perf_event_ops *ops) 122 { 123 size_t len = filename ? strlen(filename) + 1 : 0; 124 struct perf_session *self = zalloc(sizeof(*self) + len); 125 126 if (self == NULL) 127 goto out; 128 129 memcpy(self->filename, filename, len); 130 self->threads = RB_ROOT; 131 INIT_LIST_HEAD(&self->dead_threads); 132 self->last_match = NULL; 133 /* 134 * On 64bit we can mmap the data file in one go. No need for tiny mmap 135 * slices. On 32bit we use 32MB. 136 */ 137 #if BITS_PER_LONG == 64 138 self->mmap_window = ULLONG_MAX; 139 #else 140 self->mmap_window = 32 * 1024 * 1024ULL; 141 #endif 142 self->machines = RB_ROOT; 143 self->repipe = repipe; 144 INIT_LIST_HEAD(&self->ordered_samples.samples); 145 INIT_LIST_HEAD(&self->ordered_samples.sample_cache); 146 INIT_LIST_HEAD(&self->ordered_samples.to_free); 147 machine__init(&self->host_machine, "", HOST_KERNEL_ID); 148 149 if (mode == O_RDONLY) { 150 if (perf_session__open(self, force) < 0) 151 goto out_delete; 152 perf_session__update_sample_type(self); 153 } else if (mode == O_WRONLY) { 154 /* 155 * In O_RDONLY mode this will be performed when reading the 156 * kernel MMAP event, in perf_event__process_mmap(). 157 */ 158 if (perf_session__create_kernel_maps(self) < 0) 159 goto out_delete; 160 } 161 162 if (ops && ops->ordering_requires_timestamps && 163 ops->ordered_samples && !self->sample_id_all) { 164 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n"); 165 ops->ordered_samples = false; 166 } 167 168 out: 169 return self; 170 out_delete: 171 perf_session__delete(self); 172 return NULL; 173 } 174 175 static void perf_session__delete_dead_threads(struct perf_session *self) 176 { 177 struct thread *n, *t; 178 179 list_for_each_entry_safe(t, n, &self->dead_threads, node) { 180 list_del(&t->node); 181 thread__delete(t); 182 } 183 } 184 185 static void perf_session__delete_threads(struct perf_session *self) 186 { 187 struct rb_node *nd = rb_first(&self->threads); 188 189 while (nd) { 190 struct thread *t = rb_entry(nd, struct thread, rb_node); 191 192 rb_erase(&t->rb_node, &self->threads); 193 nd = rb_next(nd); 194 thread__delete(t); 195 } 196 } 197 198 void perf_session__delete(struct perf_session *self) 199 { 200 perf_session__destroy_kernel_maps(self); 201 perf_session__delete_dead_threads(self); 202 perf_session__delete_threads(self); 203 machine__exit(&self->host_machine); 204 close(self->fd); 205 free(self); 206 } 207 208 void perf_session__remove_thread(struct perf_session *self, struct thread *th) 209 { 210 self->last_match = NULL; 211 rb_erase(&th->rb_node, &self->threads); 212 /* 213 * We may have references to this thread, for instance in some hist_entry 214 * instances, so just move them to a separate list. 215 */ 216 list_add_tail(&th->node, &self->dead_threads); 217 } 218 219 static bool symbol__match_parent_regex(struct symbol *sym) 220 { 221 if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0)) 222 return 1; 223 224 return 0; 225 } 226 227 int perf_session__resolve_callchain(struct perf_session *self, 228 struct thread *thread, 229 struct ip_callchain *chain, 230 struct symbol **parent) 231 { 232 u8 cpumode = PERF_RECORD_MISC_USER; 233 unsigned int i; 234 int err; 235 236 callchain_cursor_reset(&self->callchain_cursor); 237 238 for (i = 0; i < chain->nr; i++) { 239 u64 ip = chain->ips[i]; 240 struct addr_location al; 241 242 if (ip >= PERF_CONTEXT_MAX) { 243 switch (ip) { 244 case PERF_CONTEXT_HV: 245 cpumode = PERF_RECORD_MISC_HYPERVISOR; break; 246 case PERF_CONTEXT_KERNEL: 247 cpumode = PERF_RECORD_MISC_KERNEL; break; 248 case PERF_CONTEXT_USER: 249 cpumode = PERF_RECORD_MISC_USER; break; 250 default: 251 break; 252 } 253 continue; 254 } 255 256 al.filtered = false; 257 thread__find_addr_location(thread, self, cpumode, 258 MAP__FUNCTION, thread->pid, ip, &al, NULL); 259 if (al.sym != NULL) { 260 if (sort__has_parent && !*parent && 261 symbol__match_parent_regex(al.sym)) 262 *parent = al.sym; 263 if (!symbol_conf.use_callchain) 264 break; 265 } 266 267 err = callchain_cursor_append(&self->callchain_cursor, 268 ip, al.map, al.sym); 269 if (err) 270 return err; 271 } 272 273 return 0; 274 } 275 276 static int process_event_synth_stub(union perf_event *event __used, 277 struct perf_session *session __used) 278 { 279 dump_printf(": unhandled!\n"); 280 return 0; 281 } 282 283 static int process_event_stub(union perf_event *event __used, 284 struct perf_sample *sample __used, 285 struct perf_session *session __used) 286 { 287 dump_printf(": unhandled!\n"); 288 return 0; 289 } 290 291 static int process_finished_round_stub(union perf_event *event __used, 292 struct perf_session *session __used, 293 struct perf_event_ops *ops __used) 294 { 295 dump_printf(": unhandled!\n"); 296 return 0; 297 } 298 299 static int process_finished_round(union perf_event *event, 300 struct perf_session *session, 301 struct perf_event_ops *ops); 302 303 static void perf_event_ops__fill_defaults(struct perf_event_ops *handler) 304 { 305 if (handler->sample == NULL) 306 handler->sample = process_event_stub; 307 if (handler->mmap == NULL) 308 handler->mmap = process_event_stub; 309 if (handler->comm == NULL) 310 handler->comm = process_event_stub; 311 if (handler->fork == NULL) 312 handler->fork = process_event_stub; 313 if (handler->exit == NULL) 314 handler->exit = process_event_stub; 315 if (handler->lost == NULL) 316 handler->lost = perf_event__process_lost; 317 if (handler->read == NULL) 318 handler->read = process_event_stub; 319 if (handler->throttle == NULL) 320 handler->throttle = process_event_stub; 321 if (handler->unthrottle == NULL) 322 handler->unthrottle = process_event_stub; 323 if (handler->attr == NULL) 324 handler->attr = process_event_synth_stub; 325 if (handler->event_type == NULL) 326 handler->event_type = process_event_synth_stub; 327 if (handler->tracing_data == NULL) 328 handler->tracing_data = process_event_synth_stub; 329 if (handler->build_id == NULL) 330 handler->build_id = process_event_synth_stub; 331 if (handler->finished_round == NULL) { 332 if (handler->ordered_samples) 333 handler->finished_round = process_finished_round; 334 else 335 handler->finished_round = process_finished_round_stub; 336 } 337 } 338 339 void mem_bswap_64(void *src, int byte_size) 340 { 341 u64 *m = src; 342 343 while (byte_size > 0) { 344 *m = bswap_64(*m); 345 byte_size -= sizeof(u64); 346 ++m; 347 } 348 } 349 350 static void perf_event__all64_swap(union perf_event *event) 351 { 352 struct perf_event_header *hdr = &event->header; 353 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr)); 354 } 355 356 static void perf_event__comm_swap(union perf_event *event) 357 { 358 event->comm.pid = bswap_32(event->comm.pid); 359 event->comm.tid = bswap_32(event->comm.tid); 360 } 361 362 static void perf_event__mmap_swap(union perf_event *event) 363 { 364 event->mmap.pid = bswap_32(event->mmap.pid); 365 event->mmap.tid = bswap_32(event->mmap.tid); 366 event->mmap.start = bswap_64(event->mmap.start); 367 event->mmap.len = bswap_64(event->mmap.len); 368 event->mmap.pgoff = bswap_64(event->mmap.pgoff); 369 } 370 371 static void perf_event__task_swap(union perf_event *event) 372 { 373 event->fork.pid = bswap_32(event->fork.pid); 374 event->fork.tid = bswap_32(event->fork.tid); 375 event->fork.ppid = bswap_32(event->fork.ppid); 376 event->fork.ptid = bswap_32(event->fork.ptid); 377 event->fork.time = bswap_64(event->fork.time); 378 } 379 380 static void perf_event__read_swap(union perf_event *event) 381 { 382 event->read.pid = bswap_32(event->read.pid); 383 event->read.tid = bswap_32(event->read.tid); 384 event->read.value = bswap_64(event->read.value); 385 event->read.time_enabled = bswap_64(event->read.time_enabled); 386 event->read.time_running = bswap_64(event->read.time_running); 387 event->read.id = bswap_64(event->read.id); 388 } 389 390 static void perf_event__attr_swap(union perf_event *event) 391 { 392 size_t size; 393 394 event->attr.attr.type = bswap_32(event->attr.attr.type); 395 event->attr.attr.size = bswap_32(event->attr.attr.size); 396 event->attr.attr.config = bswap_64(event->attr.attr.config); 397 event->attr.attr.sample_period = bswap_64(event->attr.attr.sample_period); 398 event->attr.attr.sample_type = bswap_64(event->attr.attr.sample_type); 399 event->attr.attr.read_format = bswap_64(event->attr.attr.read_format); 400 event->attr.attr.wakeup_events = bswap_32(event->attr.attr.wakeup_events); 401 event->attr.attr.bp_type = bswap_32(event->attr.attr.bp_type); 402 event->attr.attr.bp_addr = bswap_64(event->attr.attr.bp_addr); 403 event->attr.attr.bp_len = bswap_64(event->attr.attr.bp_len); 404 405 size = event->header.size; 406 size -= (void *)&event->attr.id - (void *)event; 407 mem_bswap_64(event->attr.id, size); 408 } 409 410 static void perf_event__event_type_swap(union perf_event *event) 411 { 412 event->event_type.event_type.event_id = 413 bswap_64(event->event_type.event_type.event_id); 414 } 415 416 static void perf_event__tracing_data_swap(union perf_event *event) 417 { 418 event->tracing_data.size = bswap_32(event->tracing_data.size); 419 } 420 421 typedef void (*perf_event__swap_op)(union perf_event *event); 422 423 static perf_event__swap_op perf_event__swap_ops[] = { 424 [PERF_RECORD_MMAP] = perf_event__mmap_swap, 425 [PERF_RECORD_COMM] = perf_event__comm_swap, 426 [PERF_RECORD_FORK] = perf_event__task_swap, 427 [PERF_RECORD_EXIT] = perf_event__task_swap, 428 [PERF_RECORD_LOST] = perf_event__all64_swap, 429 [PERF_RECORD_READ] = perf_event__read_swap, 430 [PERF_RECORD_SAMPLE] = perf_event__all64_swap, 431 [PERF_RECORD_HEADER_ATTR] = perf_event__attr_swap, 432 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap, 433 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap, 434 [PERF_RECORD_HEADER_BUILD_ID] = NULL, 435 [PERF_RECORD_HEADER_MAX] = NULL, 436 }; 437 438 struct sample_queue { 439 u64 timestamp; 440 u64 file_offset; 441 union perf_event *event; 442 struct list_head list; 443 }; 444 445 static void perf_session_free_sample_buffers(struct perf_session *session) 446 { 447 struct ordered_samples *os = &session->ordered_samples; 448 449 while (!list_empty(&os->to_free)) { 450 struct sample_queue *sq; 451 452 sq = list_entry(os->to_free.next, struct sample_queue, list); 453 list_del(&sq->list); 454 free(sq); 455 } 456 } 457 458 static int perf_session_deliver_event(struct perf_session *session, 459 union perf_event *event, 460 struct perf_sample *sample, 461 struct perf_event_ops *ops, 462 u64 file_offset); 463 464 static void flush_sample_queue(struct perf_session *s, 465 struct perf_event_ops *ops) 466 { 467 struct ordered_samples *os = &s->ordered_samples; 468 struct list_head *head = &os->samples; 469 struct sample_queue *tmp, *iter; 470 struct perf_sample sample; 471 u64 limit = os->next_flush; 472 u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL; 473 474 if (!ops->ordered_samples || !limit) 475 return; 476 477 list_for_each_entry_safe(iter, tmp, head, list) { 478 if (iter->timestamp > limit) 479 break; 480 481 perf_session__parse_sample(s, iter->event, &sample); 482 perf_session_deliver_event(s, iter->event, &sample, ops, 483 iter->file_offset); 484 485 os->last_flush = iter->timestamp; 486 list_del(&iter->list); 487 list_add(&iter->list, &os->sample_cache); 488 } 489 490 if (list_empty(head)) { 491 os->last_sample = NULL; 492 } else if (last_ts <= limit) { 493 os->last_sample = 494 list_entry(head->prev, struct sample_queue, list); 495 } 496 } 497 498 /* 499 * When perf record finishes a pass on every buffers, it records this pseudo 500 * event. 501 * We record the max timestamp t found in the pass n. 502 * Assuming these timestamps are monotonic across cpus, we know that if 503 * a buffer still has events with timestamps below t, they will be all 504 * available and then read in the pass n + 1. 505 * Hence when we start to read the pass n + 2, we can safely flush every 506 * events with timestamps below t. 507 * 508 * ============ PASS n ================= 509 * CPU 0 | CPU 1 510 * | 511 * cnt1 timestamps | cnt2 timestamps 512 * 1 | 2 513 * 2 | 3 514 * - | 4 <--- max recorded 515 * 516 * ============ PASS n + 1 ============== 517 * CPU 0 | CPU 1 518 * | 519 * cnt1 timestamps | cnt2 timestamps 520 * 3 | 5 521 * 4 | 6 522 * 5 | 7 <---- max recorded 523 * 524 * Flush every events below timestamp 4 525 * 526 * ============ PASS n + 2 ============== 527 * CPU 0 | CPU 1 528 * | 529 * cnt1 timestamps | cnt2 timestamps 530 * 6 | 8 531 * 7 | 9 532 * - | 10 533 * 534 * Flush every events below timestamp 7 535 * etc... 536 */ 537 static int process_finished_round(union perf_event *event __used, 538 struct perf_session *session, 539 struct perf_event_ops *ops) 540 { 541 flush_sample_queue(session, ops); 542 session->ordered_samples.next_flush = session->ordered_samples.max_timestamp; 543 544 return 0; 545 } 546 547 /* The queue is ordered by time */ 548 static void __queue_event(struct sample_queue *new, struct perf_session *s) 549 { 550 struct ordered_samples *os = &s->ordered_samples; 551 struct sample_queue *sample = os->last_sample; 552 u64 timestamp = new->timestamp; 553 struct list_head *p; 554 555 os->last_sample = new; 556 557 if (!sample) { 558 list_add(&new->list, &os->samples); 559 os->max_timestamp = timestamp; 560 return; 561 } 562 563 /* 564 * last_sample might point to some random place in the list as it's 565 * the last queued event. We expect that the new event is close to 566 * this. 567 */ 568 if (sample->timestamp <= timestamp) { 569 while (sample->timestamp <= timestamp) { 570 p = sample->list.next; 571 if (p == &os->samples) { 572 list_add_tail(&new->list, &os->samples); 573 os->max_timestamp = timestamp; 574 return; 575 } 576 sample = list_entry(p, struct sample_queue, list); 577 } 578 list_add_tail(&new->list, &sample->list); 579 } else { 580 while (sample->timestamp > timestamp) { 581 p = sample->list.prev; 582 if (p == &os->samples) { 583 list_add(&new->list, &os->samples); 584 return; 585 } 586 sample = list_entry(p, struct sample_queue, list); 587 } 588 list_add(&new->list, &sample->list); 589 } 590 } 591 592 #define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct sample_queue)) 593 594 static int perf_session_queue_event(struct perf_session *s, union perf_event *event, 595 struct perf_sample *sample, u64 file_offset) 596 { 597 struct ordered_samples *os = &s->ordered_samples; 598 struct list_head *sc = &os->sample_cache; 599 u64 timestamp = sample->time; 600 struct sample_queue *new; 601 602 if (!timestamp || timestamp == ~0ULL) 603 return -ETIME; 604 605 if (timestamp < s->ordered_samples.last_flush) { 606 printf("Warning: Timestamp below last timeslice flush\n"); 607 return -EINVAL; 608 } 609 610 if (!list_empty(sc)) { 611 new = list_entry(sc->next, struct sample_queue, list); 612 list_del(&new->list); 613 } else if (os->sample_buffer) { 614 new = os->sample_buffer + os->sample_buffer_idx; 615 if (++os->sample_buffer_idx == MAX_SAMPLE_BUFFER) 616 os->sample_buffer = NULL; 617 } else { 618 os->sample_buffer = malloc(MAX_SAMPLE_BUFFER * sizeof(*new)); 619 if (!os->sample_buffer) 620 return -ENOMEM; 621 list_add(&os->sample_buffer->list, &os->to_free); 622 os->sample_buffer_idx = 2; 623 new = os->sample_buffer + 1; 624 } 625 626 new->timestamp = timestamp; 627 new->file_offset = file_offset; 628 new->event = event; 629 630 __queue_event(new, s); 631 632 return 0; 633 } 634 635 static void callchain__printf(struct perf_sample *sample) 636 { 637 unsigned int i; 638 639 printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr); 640 641 for (i = 0; i < sample->callchain->nr; i++) 642 printf("..... %2d: %016" PRIx64 "\n", 643 i, sample->callchain->ips[i]); 644 } 645 646 static void perf_session__print_tstamp(struct perf_session *session, 647 union perf_event *event, 648 struct perf_sample *sample) 649 { 650 if (event->header.type != PERF_RECORD_SAMPLE && 651 !session->sample_id_all) { 652 fputs("-1 -1 ", stdout); 653 return; 654 } 655 656 if ((session->sample_type & PERF_SAMPLE_CPU)) 657 printf("%u ", sample->cpu); 658 659 if (session->sample_type & PERF_SAMPLE_TIME) 660 printf("%" PRIu64 " ", sample->time); 661 } 662 663 static void dump_event(struct perf_session *session, union perf_event *event, 664 u64 file_offset, struct perf_sample *sample) 665 { 666 if (!dump_trace) 667 return; 668 669 printf("\n%#" PRIx64 " [%#x]: event: %d\n", 670 file_offset, event->header.size, event->header.type); 671 672 trace_event(event); 673 674 if (sample) 675 perf_session__print_tstamp(session, event, sample); 676 677 printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset, 678 event->header.size, perf_event__name(event->header.type)); 679 } 680 681 static void dump_sample(struct perf_session *session, union perf_event *event, 682 struct perf_sample *sample) 683 { 684 if (!dump_trace) 685 return; 686 687 printf("(IP, %d): %d/%d: %#" PRIx64 " period: %" PRIu64 "\n", 688 event->header.misc, sample->pid, sample->tid, sample->ip, 689 sample->period); 690 691 if (session->sample_type & PERF_SAMPLE_CALLCHAIN) 692 callchain__printf(sample); 693 } 694 695 static int perf_session_deliver_event(struct perf_session *session, 696 union perf_event *event, 697 struct perf_sample *sample, 698 struct perf_event_ops *ops, 699 u64 file_offset) 700 { 701 dump_event(session, event, file_offset, sample); 702 703 switch (event->header.type) { 704 case PERF_RECORD_SAMPLE: 705 dump_sample(session, event, sample); 706 return ops->sample(event, sample, session); 707 case PERF_RECORD_MMAP: 708 return ops->mmap(event, sample, session); 709 case PERF_RECORD_COMM: 710 return ops->comm(event, sample, session); 711 case PERF_RECORD_FORK: 712 return ops->fork(event, sample, session); 713 case PERF_RECORD_EXIT: 714 return ops->exit(event, sample, session); 715 case PERF_RECORD_LOST: 716 return ops->lost(event, sample, session); 717 case PERF_RECORD_READ: 718 return ops->read(event, sample, session); 719 case PERF_RECORD_THROTTLE: 720 return ops->throttle(event, sample, session); 721 case PERF_RECORD_UNTHROTTLE: 722 return ops->unthrottle(event, sample, session); 723 default: 724 ++session->hists.stats.nr_unknown_events; 725 return -1; 726 } 727 } 728 729 static int perf_session__preprocess_sample(struct perf_session *session, 730 union perf_event *event, struct perf_sample *sample) 731 { 732 if (event->header.type != PERF_RECORD_SAMPLE || 733 !(session->sample_type & PERF_SAMPLE_CALLCHAIN)) 734 return 0; 735 736 if (!ip_callchain__valid(sample->callchain, event)) { 737 pr_debug("call-chain problem with event, skipping it.\n"); 738 ++session->hists.stats.nr_invalid_chains; 739 session->hists.stats.total_invalid_chains += sample->period; 740 return -EINVAL; 741 } 742 return 0; 743 } 744 745 static int perf_session__process_user_event(struct perf_session *session, union perf_event *event, 746 struct perf_event_ops *ops, u64 file_offset) 747 { 748 dump_event(session, event, file_offset, NULL); 749 750 /* These events are processed right away */ 751 switch (event->header.type) { 752 case PERF_RECORD_HEADER_ATTR: 753 return ops->attr(event, session); 754 case PERF_RECORD_HEADER_EVENT_TYPE: 755 return ops->event_type(event, session); 756 case PERF_RECORD_HEADER_TRACING_DATA: 757 /* setup for reading amidst mmap */ 758 lseek(session->fd, file_offset, SEEK_SET); 759 return ops->tracing_data(event, session); 760 case PERF_RECORD_HEADER_BUILD_ID: 761 return ops->build_id(event, session); 762 case PERF_RECORD_FINISHED_ROUND: 763 return ops->finished_round(event, session, ops); 764 default: 765 return -EINVAL; 766 } 767 } 768 769 static int perf_session__process_event(struct perf_session *session, 770 union perf_event *event, 771 struct perf_event_ops *ops, 772 u64 file_offset) 773 { 774 struct perf_sample sample; 775 int ret; 776 777 if (session->header.needs_swap && 778 perf_event__swap_ops[event->header.type]) 779 perf_event__swap_ops[event->header.type](event); 780 781 if (event->header.type >= PERF_RECORD_HEADER_MAX) 782 return -EINVAL; 783 784 hists__inc_nr_events(&session->hists, event->header.type); 785 786 if (event->header.type >= PERF_RECORD_USER_TYPE_START) 787 return perf_session__process_user_event(session, event, ops, file_offset); 788 789 /* 790 * For all kernel events we get the sample data 791 */ 792 perf_session__parse_sample(session, event, &sample); 793 794 /* Preprocess sample records - precheck callchains */ 795 if (perf_session__preprocess_sample(session, event, &sample)) 796 return 0; 797 798 if (ops->ordered_samples) { 799 ret = perf_session_queue_event(session, event, &sample, 800 file_offset); 801 if (ret != -ETIME) 802 return ret; 803 } 804 805 return perf_session_deliver_event(session, event, &sample, ops, 806 file_offset); 807 } 808 809 void perf_event_header__bswap(struct perf_event_header *self) 810 { 811 self->type = bswap_32(self->type); 812 self->misc = bswap_16(self->misc); 813 self->size = bswap_16(self->size); 814 } 815 816 static struct thread *perf_session__register_idle_thread(struct perf_session *self) 817 { 818 struct thread *thread = perf_session__findnew(self, 0); 819 820 if (thread == NULL || thread__set_comm(thread, "swapper")) { 821 pr_err("problem inserting idle task.\n"); 822 thread = NULL; 823 } 824 825 return thread; 826 } 827 828 static void perf_session__warn_about_errors(const struct perf_session *session, 829 const struct perf_event_ops *ops) 830 { 831 if (ops->lost == perf_event__process_lost && 832 session->hists.stats.total_lost != 0) { 833 ui__warning("Processed %" PRIu64 " events and LOST %" PRIu64 834 "!\n\nCheck IO/CPU overload!\n\n", 835 session->hists.stats.total_period, 836 session->hists.stats.total_lost); 837 } 838 839 if (session->hists.stats.nr_unknown_events != 0) { 840 ui__warning("Found %u unknown events!\n\n" 841 "Is this an older tool processing a perf.data " 842 "file generated by a more recent tool?\n\n" 843 "If that is not the case, consider " 844 "reporting to linux-kernel@vger.kernel.org.\n\n", 845 session->hists.stats.nr_unknown_events); 846 } 847 848 if (session->hists.stats.nr_invalid_chains != 0) { 849 ui__warning("Found invalid callchains!\n\n" 850 "%u out of %u events were discarded for this reason.\n\n" 851 "Consider reporting to linux-kernel@vger.kernel.org.\n\n", 852 session->hists.stats.nr_invalid_chains, 853 session->hists.stats.nr_events[PERF_RECORD_SAMPLE]); 854 } 855 } 856 857 #define session_done() (*(volatile int *)(&session_done)) 858 volatile int session_done; 859 860 static int __perf_session__process_pipe_events(struct perf_session *self, 861 struct perf_event_ops *ops) 862 { 863 union perf_event event; 864 uint32_t size; 865 int skip = 0; 866 u64 head; 867 int err; 868 void *p; 869 870 perf_event_ops__fill_defaults(ops); 871 872 head = 0; 873 more: 874 err = readn(self->fd, &event, sizeof(struct perf_event_header)); 875 if (err <= 0) { 876 if (err == 0) 877 goto done; 878 879 pr_err("failed to read event header\n"); 880 goto out_err; 881 } 882 883 if (self->header.needs_swap) 884 perf_event_header__bswap(&event.header); 885 886 size = event.header.size; 887 if (size == 0) 888 size = 8; 889 890 p = &event; 891 p += sizeof(struct perf_event_header); 892 893 if (size - sizeof(struct perf_event_header)) { 894 err = readn(self->fd, p, size - sizeof(struct perf_event_header)); 895 if (err <= 0) { 896 if (err == 0) { 897 pr_err("unexpected end of event stream\n"); 898 goto done; 899 } 900 901 pr_err("failed to read event data\n"); 902 goto out_err; 903 } 904 } 905 906 if (size == 0 || 907 (skip = perf_session__process_event(self, &event, ops, head)) < 0) { 908 dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n", 909 head, event.header.size, event.header.type); 910 /* 911 * assume we lost track of the stream, check alignment, and 912 * increment a single u64 in the hope to catch on again 'soon'. 913 */ 914 if (unlikely(head & 7)) 915 head &= ~7ULL; 916 917 size = 8; 918 } 919 920 head += size; 921 922 if (skip > 0) 923 head += skip; 924 925 if (!session_done()) 926 goto more; 927 done: 928 err = 0; 929 out_err: 930 perf_session__warn_about_errors(self, ops); 931 perf_session_free_sample_buffers(self); 932 return err; 933 } 934 935 int __perf_session__process_events(struct perf_session *session, 936 u64 data_offset, u64 data_size, 937 u64 file_size, struct perf_event_ops *ops) 938 { 939 u64 head, page_offset, file_offset, file_pos, progress_next; 940 int err, mmap_prot, mmap_flags, map_idx = 0; 941 struct ui_progress *progress; 942 size_t page_size, mmap_size; 943 char *buf, *mmaps[8]; 944 union perf_event *event; 945 uint32_t size; 946 947 perf_event_ops__fill_defaults(ops); 948 949 page_size = sysconf(_SC_PAGESIZE); 950 951 page_offset = page_size * (data_offset / page_size); 952 file_offset = page_offset; 953 head = data_offset - page_offset; 954 955 if (data_offset + data_size < file_size) 956 file_size = data_offset + data_size; 957 958 progress_next = file_size / 16; 959 progress = ui_progress__new("Processing events...", file_size); 960 if (progress == NULL) 961 return -1; 962 963 mmap_size = session->mmap_window; 964 if (mmap_size > file_size) 965 mmap_size = file_size; 966 967 memset(mmaps, 0, sizeof(mmaps)); 968 969 mmap_prot = PROT_READ; 970 mmap_flags = MAP_SHARED; 971 972 if (session->header.needs_swap) { 973 mmap_prot |= PROT_WRITE; 974 mmap_flags = MAP_PRIVATE; 975 } 976 remap: 977 buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, session->fd, 978 file_offset); 979 if (buf == MAP_FAILED) { 980 pr_err("failed to mmap file\n"); 981 err = -errno; 982 goto out_err; 983 } 984 mmaps[map_idx] = buf; 985 map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1); 986 file_pos = file_offset + head; 987 988 more: 989 event = (union perf_event *)(buf + head); 990 991 if (session->header.needs_swap) 992 perf_event_header__bswap(&event->header); 993 size = event->header.size; 994 if (size == 0) 995 size = 8; 996 997 if (head + event->header.size > mmap_size) { 998 if (mmaps[map_idx]) { 999 munmap(mmaps[map_idx], mmap_size); 1000 mmaps[map_idx] = NULL; 1001 } 1002 1003 page_offset = page_size * (head / page_size); 1004 file_offset += page_offset; 1005 head -= page_offset; 1006 goto remap; 1007 } 1008 1009 size = event->header.size; 1010 1011 if (size == 0 || 1012 perf_session__process_event(session, event, ops, file_pos) < 0) { 1013 dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n", 1014 file_offset + head, event->header.size, 1015 event->header.type); 1016 /* 1017 * assume we lost track of the stream, check alignment, and 1018 * increment a single u64 in the hope to catch on again 'soon'. 1019 */ 1020 if (unlikely(head & 7)) 1021 head &= ~7ULL; 1022 1023 size = 8; 1024 } 1025 1026 head += size; 1027 file_pos += size; 1028 1029 if (file_pos >= progress_next) { 1030 progress_next += file_size / 16; 1031 ui_progress__update(progress, file_pos); 1032 } 1033 1034 if (file_pos < file_size) 1035 goto more; 1036 1037 err = 0; 1038 /* do the final flush for ordered samples */ 1039 session->ordered_samples.next_flush = ULLONG_MAX; 1040 flush_sample_queue(session, ops); 1041 out_err: 1042 ui_progress__delete(progress); 1043 perf_session__warn_about_errors(session, ops); 1044 perf_session_free_sample_buffers(session); 1045 return err; 1046 } 1047 1048 int perf_session__process_events(struct perf_session *self, 1049 struct perf_event_ops *ops) 1050 { 1051 int err; 1052 1053 if (perf_session__register_idle_thread(self) == NULL) 1054 return -ENOMEM; 1055 1056 if (!self->fd_pipe) 1057 err = __perf_session__process_events(self, 1058 self->header.data_offset, 1059 self->header.data_size, 1060 self->size, ops); 1061 else 1062 err = __perf_session__process_pipe_events(self, ops); 1063 1064 return err; 1065 } 1066 1067 bool perf_session__has_traces(struct perf_session *self, const char *msg) 1068 { 1069 if (!(self->sample_type & PERF_SAMPLE_RAW)) { 1070 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg); 1071 return false; 1072 } 1073 1074 return true; 1075 } 1076 1077 int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps, 1078 const char *symbol_name, 1079 u64 addr) 1080 { 1081 char *bracket; 1082 enum map_type i; 1083 struct ref_reloc_sym *ref; 1084 1085 ref = zalloc(sizeof(struct ref_reloc_sym)); 1086 if (ref == NULL) 1087 return -ENOMEM; 1088 1089 ref->name = strdup(symbol_name); 1090 if (ref->name == NULL) { 1091 free(ref); 1092 return -ENOMEM; 1093 } 1094 1095 bracket = strchr(ref->name, ']'); 1096 if (bracket) 1097 *bracket = '\0'; 1098 1099 ref->addr = addr; 1100 1101 for (i = 0; i < MAP__NR_TYPES; ++i) { 1102 struct kmap *kmap = map__kmap(maps[i]); 1103 kmap->ref_reloc_sym = ref; 1104 } 1105 1106 return 0; 1107 } 1108 1109 size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp) 1110 { 1111 return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) + 1112 __dsos__fprintf(&self->host_machine.user_dsos, fp) + 1113 machines__fprintf_dsos(&self->machines, fp); 1114 } 1115 1116 size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp, 1117 bool with_hits) 1118 { 1119 size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits); 1120 return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits); 1121 } 1122 1123 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp) 1124 { 1125 struct perf_evsel *pos; 1126 size_t ret = fprintf(fp, "Aggregated stats:\n"); 1127 1128 ret += hists__fprintf_nr_events(&session->hists, fp); 1129 1130 list_for_each_entry(pos, &session->evlist->entries, node) { 1131 ret += fprintf(fp, "%s stats:\n", event_name(pos)); 1132 ret += hists__fprintf_nr_events(&pos->hists, fp); 1133 } 1134 1135 return ret; 1136 } 1137