1 #define _FILE_OFFSET_BITS 64 2 3 #include <linux/kernel.h> 4 5 #include <byteswap.h> 6 #include <unistd.h> 7 #include <sys/types.h> 8 #include <sys/mman.h> 9 10 #include "session.h" 11 #include "sort.h" 12 #include "util.h" 13 14 static int perf_session__open(struct perf_session *self, bool force) 15 { 16 struct stat input_stat; 17 18 if (!strcmp(self->filename, "-")) { 19 self->fd_pipe = true; 20 self->fd = STDIN_FILENO; 21 22 if (perf_header__read(self, self->fd) < 0) 23 pr_err("incompatible file format"); 24 25 return 0; 26 } 27 28 self->fd = open(self->filename, O_RDONLY); 29 if (self->fd < 0) { 30 int err = errno; 31 32 pr_err("failed to open %s: %s", self->filename, strerror(err)); 33 if (err == ENOENT && !strcmp(self->filename, "perf.data")) 34 pr_err(" (try 'perf record' first)"); 35 pr_err("\n"); 36 return -errno; 37 } 38 39 if (fstat(self->fd, &input_stat) < 0) 40 goto out_close; 41 42 if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) { 43 pr_err("file %s not owned by current user or root\n", 44 self->filename); 45 goto out_close; 46 } 47 48 if (!input_stat.st_size) { 49 pr_info("zero-sized file (%s), nothing to do!\n", 50 self->filename); 51 goto out_close; 52 } 53 54 if (perf_header__read(self, self->fd) < 0) { 55 pr_err("incompatible file format"); 56 goto out_close; 57 } 58 59 self->size = input_stat.st_size; 60 return 0; 61 62 out_close: 63 close(self->fd); 64 self->fd = -1; 65 return -1; 66 } 67 68 static void perf_session__id_header_size(struct perf_session *session) 69 { 70 struct perf_sample *data; 71 u64 sample_type = session->sample_type; 72 u16 size = 0; 73 74 if (!session->sample_id_all) 75 goto out; 76 77 if (sample_type & PERF_SAMPLE_TID) 78 size += sizeof(data->tid) * 2; 79 80 if (sample_type & PERF_SAMPLE_TIME) 81 size += sizeof(data->time); 82 83 if (sample_type & PERF_SAMPLE_ID) 84 size += sizeof(data->id); 85 86 if (sample_type & PERF_SAMPLE_STREAM_ID) 87 size += sizeof(data->stream_id); 88 89 if (sample_type & PERF_SAMPLE_CPU) 90 size += sizeof(data->cpu) * 2; 91 out: 92 session->id_hdr_size = size; 93 } 94 95 void perf_session__set_sample_id_all(struct perf_session *session, bool value) 96 { 97 session->sample_id_all = value; 98 perf_session__id_header_size(session); 99 } 100 101 void perf_session__set_sample_type(struct perf_session *session, u64 type) 102 { 103 session->sample_type = type; 104 } 105 106 void perf_session__update_sample_type(struct perf_session *self) 107 { 108 self->sample_type = perf_header__sample_type(&self->header); 109 self->sample_id_all = perf_header__sample_id_all(&self->header); 110 perf_session__id_header_size(self); 111 } 112 113 int perf_session__create_kernel_maps(struct perf_session *self) 114 { 115 int ret = machine__create_kernel_maps(&self->host_machine); 116 117 if (ret >= 0) 118 ret = machines__create_guest_kernel_maps(&self->machines); 119 return ret; 120 } 121 122 static void perf_session__destroy_kernel_maps(struct perf_session *self) 123 { 124 machine__destroy_kernel_maps(&self->host_machine); 125 machines__destroy_guest_kernel_maps(&self->machines); 126 } 127 128 struct perf_session *perf_session__new(const char *filename, int mode, 129 bool force, bool repipe, 130 struct perf_event_ops *ops) 131 { 132 size_t len = filename ? strlen(filename) + 1 : 0; 133 struct perf_session *self = zalloc(sizeof(*self) + len); 134 135 if (self == NULL) 136 goto out; 137 138 if (perf_header__init(&self->header) < 0) 139 goto out_free; 140 141 memcpy(self->filename, filename, len); 142 self->threads = RB_ROOT; 143 INIT_LIST_HEAD(&self->dead_threads); 144 self->hists_tree = RB_ROOT; 145 self->last_match = NULL; 146 /* 147 * On 64bit we can mmap the data file in one go. No need for tiny mmap 148 * slices. On 32bit we use 32MB. 149 */ 150 #if BITS_PER_LONG == 64 151 self->mmap_window = ULLONG_MAX; 152 #else 153 self->mmap_window = 32 * 1024 * 1024ULL; 154 #endif 155 self->machines = RB_ROOT; 156 self->repipe = repipe; 157 INIT_LIST_HEAD(&self->ordered_samples.samples); 158 INIT_LIST_HEAD(&self->ordered_samples.sample_cache); 159 INIT_LIST_HEAD(&self->ordered_samples.to_free); 160 machine__init(&self->host_machine, "", HOST_KERNEL_ID); 161 162 if (mode == O_RDONLY) { 163 if (perf_session__open(self, force) < 0) 164 goto out_delete; 165 } else if (mode == O_WRONLY) { 166 /* 167 * In O_RDONLY mode this will be performed when reading the 168 * kernel MMAP event, in perf_event__process_mmap(). 169 */ 170 if (perf_session__create_kernel_maps(self) < 0) 171 goto out_delete; 172 } 173 174 perf_session__update_sample_type(self); 175 176 if (ops && ops->ordering_requires_timestamps && 177 ops->ordered_samples && !self->sample_id_all) { 178 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n"); 179 ops->ordered_samples = false; 180 } 181 182 out: 183 return self; 184 out_free: 185 free(self); 186 return NULL; 187 out_delete: 188 perf_session__delete(self); 189 return NULL; 190 } 191 192 static void perf_session__delete_dead_threads(struct perf_session *self) 193 { 194 struct thread *n, *t; 195 196 list_for_each_entry_safe(t, n, &self->dead_threads, node) { 197 list_del(&t->node); 198 thread__delete(t); 199 } 200 } 201 202 static void perf_session__delete_threads(struct perf_session *self) 203 { 204 struct rb_node *nd = rb_first(&self->threads); 205 206 while (nd) { 207 struct thread *t = rb_entry(nd, struct thread, rb_node); 208 209 rb_erase(&t->rb_node, &self->threads); 210 nd = rb_next(nd); 211 thread__delete(t); 212 } 213 } 214 215 void perf_session__delete(struct perf_session *self) 216 { 217 perf_header__exit(&self->header); 218 perf_session__destroy_kernel_maps(self); 219 perf_session__delete_dead_threads(self); 220 perf_session__delete_threads(self); 221 machine__exit(&self->host_machine); 222 close(self->fd); 223 free(self); 224 } 225 226 void perf_session__remove_thread(struct perf_session *self, struct thread *th) 227 { 228 self->last_match = NULL; 229 rb_erase(&th->rb_node, &self->threads); 230 /* 231 * We may have references to this thread, for instance in some hist_entry 232 * instances, so just move them to a separate list. 233 */ 234 list_add_tail(&th->node, &self->dead_threads); 235 } 236 237 static bool symbol__match_parent_regex(struct symbol *sym) 238 { 239 if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0)) 240 return 1; 241 242 return 0; 243 } 244 245 int perf_session__resolve_callchain(struct perf_session *self, 246 struct thread *thread, 247 struct ip_callchain *chain, 248 struct symbol **parent) 249 { 250 u8 cpumode = PERF_RECORD_MISC_USER; 251 unsigned int i; 252 int err; 253 254 callchain_cursor_reset(&self->callchain_cursor); 255 256 for (i = 0; i < chain->nr; i++) { 257 u64 ip = chain->ips[i]; 258 struct addr_location al; 259 260 if (ip >= PERF_CONTEXT_MAX) { 261 switch (ip) { 262 case PERF_CONTEXT_HV: 263 cpumode = PERF_RECORD_MISC_HYPERVISOR; break; 264 case PERF_CONTEXT_KERNEL: 265 cpumode = PERF_RECORD_MISC_KERNEL; break; 266 case PERF_CONTEXT_USER: 267 cpumode = PERF_RECORD_MISC_USER; break; 268 default: 269 break; 270 } 271 continue; 272 } 273 274 al.filtered = false; 275 thread__find_addr_location(thread, self, cpumode, 276 MAP__FUNCTION, thread->pid, ip, &al, NULL); 277 if (al.sym != NULL) { 278 if (sort__has_parent && !*parent && 279 symbol__match_parent_regex(al.sym)) 280 *parent = al.sym; 281 if (!symbol_conf.use_callchain) 282 break; 283 } 284 285 err = callchain_cursor_append(&self->callchain_cursor, 286 ip, al.map, al.sym); 287 if (err) 288 return err; 289 } 290 291 return 0; 292 } 293 294 static int process_event_synth_stub(union perf_event *event __used, 295 struct perf_session *session __used) 296 { 297 dump_printf(": unhandled!\n"); 298 return 0; 299 } 300 301 static int process_event_stub(union perf_event *event __used, 302 struct perf_sample *sample __used, 303 struct perf_session *session __used) 304 { 305 dump_printf(": unhandled!\n"); 306 return 0; 307 } 308 309 static int process_finished_round_stub(union perf_event *event __used, 310 struct perf_session *session __used, 311 struct perf_event_ops *ops __used) 312 { 313 dump_printf(": unhandled!\n"); 314 return 0; 315 } 316 317 static int process_finished_round(union perf_event *event, 318 struct perf_session *session, 319 struct perf_event_ops *ops); 320 321 static void perf_event_ops__fill_defaults(struct perf_event_ops *handler) 322 { 323 if (handler->sample == NULL) 324 handler->sample = process_event_stub; 325 if (handler->mmap == NULL) 326 handler->mmap = process_event_stub; 327 if (handler->comm == NULL) 328 handler->comm = process_event_stub; 329 if (handler->fork == NULL) 330 handler->fork = process_event_stub; 331 if (handler->exit == NULL) 332 handler->exit = process_event_stub; 333 if (handler->lost == NULL) 334 handler->lost = perf_event__process_lost; 335 if (handler->read == NULL) 336 handler->read = process_event_stub; 337 if (handler->throttle == NULL) 338 handler->throttle = process_event_stub; 339 if (handler->unthrottle == NULL) 340 handler->unthrottle = process_event_stub; 341 if (handler->attr == NULL) 342 handler->attr = process_event_synth_stub; 343 if (handler->event_type == NULL) 344 handler->event_type = process_event_synth_stub; 345 if (handler->tracing_data == NULL) 346 handler->tracing_data = process_event_synth_stub; 347 if (handler->build_id == NULL) 348 handler->build_id = process_event_synth_stub; 349 if (handler->finished_round == NULL) { 350 if (handler->ordered_samples) 351 handler->finished_round = process_finished_round; 352 else 353 handler->finished_round = process_finished_round_stub; 354 } 355 } 356 357 void mem_bswap_64(void *src, int byte_size) 358 { 359 u64 *m = src; 360 361 while (byte_size > 0) { 362 *m = bswap_64(*m); 363 byte_size -= sizeof(u64); 364 ++m; 365 } 366 } 367 368 static void perf_event__all64_swap(union perf_event *event) 369 { 370 struct perf_event_header *hdr = &event->header; 371 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr)); 372 } 373 374 static void perf_event__comm_swap(union perf_event *event) 375 { 376 event->comm.pid = bswap_32(event->comm.pid); 377 event->comm.tid = bswap_32(event->comm.tid); 378 } 379 380 static void perf_event__mmap_swap(union perf_event *event) 381 { 382 event->mmap.pid = bswap_32(event->mmap.pid); 383 event->mmap.tid = bswap_32(event->mmap.tid); 384 event->mmap.start = bswap_64(event->mmap.start); 385 event->mmap.len = bswap_64(event->mmap.len); 386 event->mmap.pgoff = bswap_64(event->mmap.pgoff); 387 } 388 389 static void perf_event__task_swap(union perf_event *event) 390 { 391 event->fork.pid = bswap_32(event->fork.pid); 392 event->fork.tid = bswap_32(event->fork.tid); 393 event->fork.ppid = bswap_32(event->fork.ppid); 394 event->fork.ptid = bswap_32(event->fork.ptid); 395 event->fork.time = bswap_64(event->fork.time); 396 } 397 398 static void perf_event__read_swap(union perf_event *event) 399 { 400 event->read.pid = bswap_32(event->read.pid); 401 event->read.tid = bswap_32(event->read.tid); 402 event->read.value = bswap_64(event->read.value); 403 event->read.time_enabled = bswap_64(event->read.time_enabled); 404 event->read.time_running = bswap_64(event->read.time_running); 405 event->read.id = bswap_64(event->read.id); 406 } 407 408 static void perf_event__attr_swap(union perf_event *event) 409 { 410 size_t size; 411 412 event->attr.attr.type = bswap_32(event->attr.attr.type); 413 event->attr.attr.size = bswap_32(event->attr.attr.size); 414 event->attr.attr.config = bswap_64(event->attr.attr.config); 415 event->attr.attr.sample_period = bswap_64(event->attr.attr.sample_period); 416 event->attr.attr.sample_type = bswap_64(event->attr.attr.sample_type); 417 event->attr.attr.read_format = bswap_64(event->attr.attr.read_format); 418 event->attr.attr.wakeup_events = bswap_32(event->attr.attr.wakeup_events); 419 event->attr.attr.bp_type = bswap_32(event->attr.attr.bp_type); 420 event->attr.attr.bp_addr = bswap_64(event->attr.attr.bp_addr); 421 event->attr.attr.bp_len = bswap_64(event->attr.attr.bp_len); 422 423 size = event->header.size; 424 size -= (void *)&event->attr.id - (void *)event; 425 mem_bswap_64(event->attr.id, size); 426 } 427 428 static void perf_event__event_type_swap(union perf_event *event) 429 { 430 event->event_type.event_type.event_id = 431 bswap_64(event->event_type.event_type.event_id); 432 } 433 434 static void perf_event__tracing_data_swap(union perf_event *event) 435 { 436 event->tracing_data.size = bswap_32(event->tracing_data.size); 437 } 438 439 typedef void (*perf_event__swap_op)(union perf_event *event); 440 441 static perf_event__swap_op perf_event__swap_ops[] = { 442 [PERF_RECORD_MMAP] = perf_event__mmap_swap, 443 [PERF_RECORD_COMM] = perf_event__comm_swap, 444 [PERF_RECORD_FORK] = perf_event__task_swap, 445 [PERF_RECORD_EXIT] = perf_event__task_swap, 446 [PERF_RECORD_LOST] = perf_event__all64_swap, 447 [PERF_RECORD_READ] = perf_event__read_swap, 448 [PERF_RECORD_SAMPLE] = perf_event__all64_swap, 449 [PERF_RECORD_HEADER_ATTR] = perf_event__attr_swap, 450 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap, 451 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap, 452 [PERF_RECORD_HEADER_BUILD_ID] = NULL, 453 [PERF_RECORD_HEADER_MAX] = NULL, 454 }; 455 456 struct sample_queue { 457 u64 timestamp; 458 u64 file_offset; 459 union perf_event *event; 460 struct list_head list; 461 }; 462 463 static void perf_session_free_sample_buffers(struct perf_session *session) 464 { 465 struct ordered_samples *os = &session->ordered_samples; 466 467 while (!list_empty(&os->to_free)) { 468 struct sample_queue *sq; 469 470 sq = list_entry(os->to_free.next, struct sample_queue, list); 471 list_del(&sq->list); 472 free(sq); 473 } 474 } 475 476 static int perf_session_deliver_event(struct perf_session *session, 477 union perf_event *event, 478 struct perf_sample *sample, 479 struct perf_event_ops *ops, 480 u64 file_offset); 481 482 static void flush_sample_queue(struct perf_session *s, 483 struct perf_event_ops *ops) 484 { 485 struct ordered_samples *os = &s->ordered_samples; 486 struct list_head *head = &os->samples; 487 struct sample_queue *tmp, *iter; 488 struct perf_sample sample; 489 u64 limit = os->next_flush; 490 u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL; 491 492 if (!ops->ordered_samples || !limit) 493 return; 494 495 list_for_each_entry_safe(iter, tmp, head, list) { 496 if (iter->timestamp > limit) 497 break; 498 499 perf_session__parse_sample(s, iter->event, &sample); 500 perf_session_deliver_event(s, iter->event, &sample, ops, 501 iter->file_offset); 502 503 os->last_flush = iter->timestamp; 504 list_del(&iter->list); 505 list_add(&iter->list, &os->sample_cache); 506 } 507 508 if (list_empty(head)) { 509 os->last_sample = NULL; 510 } else if (last_ts <= limit) { 511 os->last_sample = 512 list_entry(head->prev, struct sample_queue, list); 513 } 514 } 515 516 /* 517 * When perf record finishes a pass on every buffers, it records this pseudo 518 * event. 519 * We record the max timestamp t found in the pass n. 520 * Assuming these timestamps are monotonic across cpus, we know that if 521 * a buffer still has events with timestamps below t, they will be all 522 * available and then read in the pass n + 1. 523 * Hence when we start to read the pass n + 2, we can safely flush every 524 * events with timestamps below t. 525 * 526 * ============ PASS n ================= 527 * CPU 0 | CPU 1 528 * | 529 * cnt1 timestamps | cnt2 timestamps 530 * 1 | 2 531 * 2 | 3 532 * - | 4 <--- max recorded 533 * 534 * ============ PASS n + 1 ============== 535 * CPU 0 | CPU 1 536 * | 537 * cnt1 timestamps | cnt2 timestamps 538 * 3 | 5 539 * 4 | 6 540 * 5 | 7 <---- max recorded 541 * 542 * Flush every events below timestamp 4 543 * 544 * ============ PASS n + 2 ============== 545 * CPU 0 | CPU 1 546 * | 547 * cnt1 timestamps | cnt2 timestamps 548 * 6 | 8 549 * 7 | 9 550 * - | 10 551 * 552 * Flush every events below timestamp 7 553 * etc... 554 */ 555 static int process_finished_round(union perf_event *event __used, 556 struct perf_session *session, 557 struct perf_event_ops *ops) 558 { 559 flush_sample_queue(session, ops); 560 session->ordered_samples.next_flush = session->ordered_samples.max_timestamp; 561 562 return 0; 563 } 564 565 /* The queue is ordered by time */ 566 static void __queue_event(struct sample_queue *new, struct perf_session *s) 567 { 568 struct ordered_samples *os = &s->ordered_samples; 569 struct sample_queue *sample = os->last_sample; 570 u64 timestamp = new->timestamp; 571 struct list_head *p; 572 573 os->last_sample = new; 574 575 if (!sample) { 576 list_add(&new->list, &os->samples); 577 os->max_timestamp = timestamp; 578 return; 579 } 580 581 /* 582 * last_sample might point to some random place in the list as it's 583 * the last queued event. We expect that the new event is close to 584 * this. 585 */ 586 if (sample->timestamp <= timestamp) { 587 while (sample->timestamp <= timestamp) { 588 p = sample->list.next; 589 if (p == &os->samples) { 590 list_add_tail(&new->list, &os->samples); 591 os->max_timestamp = timestamp; 592 return; 593 } 594 sample = list_entry(p, struct sample_queue, list); 595 } 596 list_add_tail(&new->list, &sample->list); 597 } else { 598 while (sample->timestamp > timestamp) { 599 p = sample->list.prev; 600 if (p == &os->samples) { 601 list_add(&new->list, &os->samples); 602 return; 603 } 604 sample = list_entry(p, struct sample_queue, list); 605 } 606 list_add(&new->list, &sample->list); 607 } 608 } 609 610 #define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct sample_queue)) 611 612 static int perf_session_queue_event(struct perf_session *s, union perf_event *event, 613 struct perf_sample *sample, u64 file_offset) 614 { 615 struct ordered_samples *os = &s->ordered_samples; 616 struct list_head *sc = &os->sample_cache; 617 u64 timestamp = sample->time; 618 struct sample_queue *new; 619 620 if (!timestamp || timestamp == ~0ULL) 621 return -ETIME; 622 623 if (timestamp < s->ordered_samples.last_flush) { 624 printf("Warning: Timestamp below last timeslice flush\n"); 625 return -EINVAL; 626 } 627 628 if (!list_empty(sc)) { 629 new = list_entry(sc->next, struct sample_queue, list); 630 list_del(&new->list); 631 } else if (os->sample_buffer) { 632 new = os->sample_buffer + os->sample_buffer_idx; 633 if (++os->sample_buffer_idx == MAX_SAMPLE_BUFFER) 634 os->sample_buffer = NULL; 635 } else { 636 os->sample_buffer = malloc(MAX_SAMPLE_BUFFER * sizeof(*new)); 637 if (!os->sample_buffer) 638 return -ENOMEM; 639 list_add(&os->sample_buffer->list, &os->to_free); 640 os->sample_buffer_idx = 2; 641 new = os->sample_buffer + 1; 642 } 643 644 new->timestamp = timestamp; 645 new->file_offset = file_offset; 646 new->event = event; 647 648 __queue_event(new, s); 649 650 return 0; 651 } 652 653 static void callchain__printf(struct perf_sample *sample) 654 { 655 unsigned int i; 656 657 printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr); 658 659 for (i = 0; i < sample->callchain->nr; i++) 660 printf("..... %2d: %016" PRIx64 "\n", 661 i, sample->callchain->ips[i]); 662 } 663 664 static void perf_session__print_tstamp(struct perf_session *session, 665 union perf_event *event, 666 struct perf_sample *sample) 667 { 668 if (event->header.type != PERF_RECORD_SAMPLE && 669 !session->sample_id_all) { 670 fputs("-1 -1 ", stdout); 671 return; 672 } 673 674 if ((session->sample_type & PERF_SAMPLE_CPU)) 675 printf("%u ", sample->cpu); 676 677 if (session->sample_type & PERF_SAMPLE_TIME) 678 printf("%" PRIu64 " ", sample->time); 679 } 680 681 static void dump_event(struct perf_session *session, union perf_event *event, 682 u64 file_offset, struct perf_sample *sample) 683 { 684 if (!dump_trace) 685 return; 686 687 printf("\n%#" PRIx64 " [%#x]: event: %d\n", 688 file_offset, event->header.size, event->header.type); 689 690 trace_event(event); 691 692 if (sample) 693 perf_session__print_tstamp(session, event, sample); 694 695 printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset, 696 event->header.size, perf_event__name(event->header.type)); 697 } 698 699 static void dump_sample(struct perf_session *session, union perf_event *event, 700 struct perf_sample *sample) 701 { 702 if (!dump_trace) 703 return; 704 705 printf("(IP, %d): %d/%d: %#" PRIx64 " period: %" PRIu64 "\n", 706 event->header.misc, sample->pid, sample->tid, sample->ip, 707 sample->period); 708 709 if (session->sample_type & PERF_SAMPLE_CALLCHAIN) 710 callchain__printf(sample); 711 } 712 713 static int perf_session_deliver_event(struct perf_session *session, 714 union perf_event *event, 715 struct perf_sample *sample, 716 struct perf_event_ops *ops, 717 u64 file_offset) 718 { 719 dump_event(session, event, file_offset, sample); 720 721 switch (event->header.type) { 722 case PERF_RECORD_SAMPLE: 723 dump_sample(session, event, sample); 724 return ops->sample(event, sample, session); 725 case PERF_RECORD_MMAP: 726 return ops->mmap(event, sample, session); 727 case PERF_RECORD_COMM: 728 return ops->comm(event, sample, session); 729 case PERF_RECORD_FORK: 730 return ops->fork(event, sample, session); 731 case PERF_RECORD_EXIT: 732 return ops->exit(event, sample, session); 733 case PERF_RECORD_LOST: 734 return ops->lost(event, sample, session); 735 case PERF_RECORD_READ: 736 return ops->read(event, sample, session); 737 case PERF_RECORD_THROTTLE: 738 return ops->throttle(event, sample, session); 739 case PERF_RECORD_UNTHROTTLE: 740 return ops->unthrottle(event, sample, session); 741 default: 742 ++session->hists.stats.nr_unknown_events; 743 return -1; 744 } 745 } 746 747 static int perf_session__preprocess_sample(struct perf_session *session, 748 union perf_event *event, struct perf_sample *sample) 749 { 750 if (event->header.type != PERF_RECORD_SAMPLE || 751 !(session->sample_type & PERF_SAMPLE_CALLCHAIN)) 752 return 0; 753 754 if (!ip_callchain__valid(sample->callchain, event)) { 755 pr_debug("call-chain problem with event, skipping it.\n"); 756 ++session->hists.stats.nr_invalid_chains; 757 session->hists.stats.total_invalid_chains += sample->period; 758 return -EINVAL; 759 } 760 return 0; 761 } 762 763 static int perf_session__process_user_event(struct perf_session *session, union perf_event *event, 764 struct perf_event_ops *ops, u64 file_offset) 765 { 766 dump_event(session, event, file_offset, NULL); 767 768 /* These events are processed right away */ 769 switch (event->header.type) { 770 case PERF_RECORD_HEADER_ATTR: 771 return ops->attr(event, session); 772 case PERF_RECORD_HEADER_EVENT_TYPE: 773 return ops->event_type(event, session); 774 case PERF_RECORD_HEADER_TRACING_DATA: 775 /* setup for reading amidst mmap */ 776 lseek(session->fd, file_offset, SEEK_SET); 777 return ops->tracing_data(event, session); 778 case PERF_RECORD_HEADER_BUILD_ID: 779 return ops->build_id(event, session); 780 case PERF_RECORD_FINISHED_ROUND: 781 return ops->finished_round(event, session, ops); 782 default: 783 return -EINVAL; 784 } 785 } 786 787 static int perf_session__process_event(struct perf_session *session, 788 union perf_event *event, 789 struct perf_event_ops *ops, 790 u64 file_offset) 791 { 792 struct perf_sample sample; 793 int ret; 794 795 if (session->header.needs_swap && 796 perf_event__swap_ops[event->header.type]) 797 perf_event__swap_ops[event->header.type](event); 798 799 if (event->header.type >= PERF_RECORD_HEADER_MAX) 800 return -EINVAL; 801 802 hists__inc_nr_events(&session->hists, event->header.type); 803 804 if (event->header.type >= PERF_RECORD_USER_TYPE_START) 805 return perf_session__process_user_event(session, event, ops, file_offset); 806 807 /* 808 * For all kernel events we get the sample data 809 */ 810 perf_session__parse_sample(session, event, &sample); 811 812 /* Preprocess sample records - precheck callchains */ 813 if (perf_session__preprocess_sample(session, event, &sample)) 814 return 0; 815 816 if (ops->ordered_samples) { 817 ret = perf_session_queue_event(session, event, &sample, 818 file_offset); 819 if (ret != -ETIME) 820 return ret; 821 } 822 823 return perf_session_deliver_event(session, event, &sample, ops, 824 file_offset); 825 } 826 827 void perf_event_header__bswap(struct perf_event_header *self) 828 { 829 self->type = bswap_32(self->type); 830 self->misc = bswap_16(self->misc); 831 self->size = bswap_16(self->size); 832 } 833 834 static struct thread *perf_session__register_idle_thread(struct perf_session *self) 835 { 836 struct thread *thread = perf_session__findnew(self, 0); 837 838 if (thread == NULL || thread__set_comm(thread, "swapper")) { 839 pr_err("problem inserting idle task.\n"); 840 thread = NULL; 841 } 842 843 return thread; 844 } 845 846 static void perf_session__warn_about_errors(const struct perf_session *session, 847 const struct perf_event_ops *ops) 848 { 849 if (ops->lost == perf_event__process_lost && 850 session->hists.stats.total_lost != 0) { 851 ui__warning("Processed %" PRIu64 " events and LOST %" PRIu64 852 "!\n\nCheck IO/CPU overload!\n\n", 853 session->hists.stats.total_period, 854 session->hists.stats.total_lost); 855 } 856 857 if (session->hists.stats.nr_unknown_events != 0) { 858 ui__warning("Found %u unknown events!\n\n" 859 "Is this an older tool processing a perf.data " 860 "file generated by a more recent tool?\n\n" 861 "If that is not the case, consider " 862 "reporting to linux-kernel@vger.kernel.org.\n\n", 863 session->hists.stats.nr_unknown_events); 864 } 865 866 if (session->hists.stats.nr_invalid_chains != 0) { 867 ui__warning("Found invalid callchains!\n\n" 868 "%u out of %u events were discarded for this reason.\n\n" 869 "Consider reporting to linux-kernel@vger.kernel.org.\n\n", 870 session->hists.stats.nr_invalid_chains, 871 session->hists.stats.nr_events[PERF_RECORD_SAMPLE]); 872 } 873 } 874 875 #define session_done() (*(volatile int *)(&session_done)) 876 volatile int session_done; 877 878 static int __perf_session__process_pipe_events(struct perf_session *self, 879 struct perf_event_ops *ops) 880 { 881 union perf_event event; 882 uint32_t size; 883 int skip = 0; 884 u64 head; 885 int err; 886 void *p; 887 888 perf_event_ops__fill_defaults(ops); 889 890 head = 0; 891 more: 892 err = readn(self->fd, &event, sizeof(struct perf_event_header)); 893 if (err <= 0) { 894 if (err == 0) 895 goto done; 896 897 pr_err("failed to read event header\n"); 898 goto out_err; 899 } 900 901 if (self->header.needs_swap) 902 perf_event_header__bswap(&event.header); 903 904 size = event.header.size; 905 if (size == 0) 906 size = 8; 907 908 p = &event; 909 p += sizeof(struct perf_event_header); 910 911 if (size - sizeof(struct perf_event_header)) { 912 err = readn(self->fd, p, size - sizeof(struct perf_event_header)); 913 if (err <= 0) { 914 if (err == 0) { 915 pr_err("unexpected end of event stream\n"); 916 goto done; 917 } 918 919 pr_err("failed to read event data\n"); 920 goto out_err; 921 } 922 } 923 924 if (size == 0 || 925 (skip = perf_session__process_event(self, &event, ops, head)) < 0) { 926 dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n", 927 head, event.header.size, event.header.type); 928 /* 929 * assume we lost track of the stream, check alignment, and 930 * increment a single u64 in the hope to catch on again 'soon'. 931 */ 932 if (unlikely(head & 7)) 933 head &= ~7ULL; 934 935 size = 8; 936 } 937 938 head += size; 939 940 if (skip > 0) 941 head += skip; 942 943 if (!session_done()) 944 goto more; 945 done: 946 err = 0; 947 out_err: 948 perf_session__warn_about_errors(self, ops); 949 perf_session_free_sample_buffers(self); 950 return err; 951 } 952 953 int __perf_session__process_events(struct perf_session *session, 954 u64 data_offset, u64 data_size, 955 u64 file_size, struct perf_event_ops *ops) 956 { 957 u64 head, page_offset, file_offset, file_pos, progress_next; 958 int err, mmap_prot, mmap_flags, map_idx = 0; 959 struct ui_progress *progress; 960 size_t page_size, mmap_size; 961 char *buf, *mmaps[8]; 962 union perf_event *event; 963 uint32_t size; 964 965 perf_event_ops__fill_defaults(ops); 966 967 page_size = sysconf(_SC_PAGESIZE); 968 969 page_offset = page_size * (data_offset / page_size); 970 file_offset = page_offset; 971 head = data_offset - page_offset; 972 973 if (data_offset + data_size < file_size) 974 file_size = data_offset + data_size; 975 976 progress_next = file_size / 16; 977 progress = ui_progress__new("Processing events...", file_size); 978 if (progress == NULL) 979 return -1; 980 981 mmap_size = session->mmap_window; 982 if (mmap_size > file_size) 983 mmap_size = file_size; 984 985 memset(mmaps, 0, sizeof(mmaps)); 986 987 mmap_prot = PROT_READ; 988 mmap_flags = MAP_SHARED; 989 990 if (session->header.needs_swap) { 991 mmap_prot |= PROT_WRITE; 992 mmap_flags = MAP_PRIVATE; 993 } 994 remap: 995 buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, session->fd, 996 file_offset); 997 if (buf == MAP_FAILED) { 998 pr_err("failed to mmap file\n"); 999 err = -errno; 1000 goto out_err; 1001 } 1002 mmaps[map_idx] = buf; 1003 map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1); 1004 file_pos = file_offset + head; 1005 1006 more: 1007 event = (union perf_event *)(buf + head); 1008 1009 if (session->header.needs_swap) 1010 perf_event_header__bswap(&event->header); 1011 size = event->header.size; 1012 if (size == 0) 1013 size = 8; 1014 1015 if (head + event->header.size > mmap_size) { 1016 if (mmaps[map_idx]) { 1017 munmap(mmaps[map_idx], mmap_size); 1018 mmaps[map_idx] = NULL; 1019 } 1020 1021 page_offset = page_size * (head / page_size); 1022 file_offset += page_offset; 1023 head -= page_offset; 1024 goto remap; 1025 } 1026 1027 size = event->header.size; 1028 1029 if (size == 0 || 1030 perf_session__process_event(session, event, ops, file_pos) < 0) { 1031 dump_printf("%#" PRIx64 " [%#x]: skipping unknown header type: %d\n", 1032 file_offset + head, event->header.size, 1033 event->header.type); 1034 /* 1035 * assume we lost track of the stream, check alignment, and 1036 * increment a single u64 in the hope to catch on again 'soon'. 1037 */ 1038 if (unlikely(head & 7)) 1039 head &= ~7ULL; 1040 1041 size = 8; 1042 } 1043 1044 head += size; 1045 file_pos += size; 1046 1047 if (file_pos >= progress_next) { 1048 progress_next += file_size / 16; 1049 ui_progress__update(progress, file_pos); 1050 } 1051 1052 if (file_pos < file_size) 1053 goto more; 1054 1055 err = 0; 1056 /* do the final flush for ordered samples */ 1057 session->ordered_samples.next_flush = ULLONG_MAX; 1058 flush_sample_queue(session, ops); 1059 out_err: 1060 ui_progress__delete(progress); 1061 perf_session__warn_about_errors(session, ops); 1062 perf_session_free_sample_buffers(session); 1063 return err; 1064 } 1065 1066 int perf_session__process_events(struct perf_session *self, 1067 struct perf_event_ops *ops) 1068 { 1069 int err; 1070 1071 if (perf_session__register_idle_thread(self) == NULL) 1072 return -ENOMEM; 1073 1074 if (!self->fd_pipe) 1075 err = __perf_session__process_events(self, 1076 self->header.data_offset, 1077 self->header.data_size, 1078 self->size, ops); 1079 else 1080 err = __perf_session__process_pipe_events(self, ops); 1081 1082 return err; 1083 } 1084 1085 bool perf_session__has_traces(struct perf_session *self, const char *msg) 1086 { 1087 if (!(self->sample_type & PERF_SAMPLE_RAW)) { 1088 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg); 1089 return false; 1090 } 1091 1092 return true; 1093 } 1094 1095 int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps, 1096 const char *symbol_name, 1097 u64 addr) 1098 { 1099 char *bracket; 1100 enum map_type i; 1101 struct ref_reloc_sym *ref; 1102 1103 ref = zalloc(sizeof(struct ref_reloc_sym)); 1104 if (ref == NULL) 1105 return -ENOMEM; 1106 1107 ref->name = strdup(symbol_name); 1108 if (ref->name == NULL) { 1109 free(ref); 1110 return -ENOMEM; 1111 } 1112 1113 bracket = strchr(ref->name, ']'); 1114 if (bracket) 1115 *bracket = '\0'; 1116 1117 ref->addr = addr; 1118 1119 for (i = 0; i < MAP__NR_TYPES; ++i) { 1120 struct kmap *kmap = map__kmap(maps[i]); 1121 kmap->ref_reloc_sym = ref; 1122 } 1123 1124 return 0; 1125 } 1126 1127 size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp) 1128 { 1129 return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) + 1130 __dsos__fprintf(&self->host_machine.user_dsos, fp) + 1131 machines__fprintf_dsos(&self->machines, fp); 1132 } 1133 1134 size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp, 1135 bool with_hits) 1136 { 1137 size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits); 1138 return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits); 1139 } 1140