1 #define _FILE_OFFSET_BITS 64 2 3 #include <linux/kernel.h> 4 5 #include <byteswap.h> 6 #include <unistd.h> 7 #include <sys/types.h> 8 #include <sys/mman.h> 9 10 #include "session.h" 11 #include "sort.h" 12 #include "util.h" 13 14 static int perf_session__open(struct perf_session *self, bool force) 15 { 16 struct stat input_stat; 17 18 if (!strcmp(self->filename, "-")) { 19 self->fd_pipe = true; 20 self->fd = STDIN_FILENO; 21 22 if (perf_header__read(self, self->fd) < 0) 23 pr_err("incompatible file format"); 24 25 return 0; 26 } 27 28 self->fd = open(self->filename, O_RDONLY); 29 if (self->fd < 0) { 30 int err = errno; 31 32 pr_err("failed to open %s: %s", self->filename, strerror(err)); 33 if (err == ENOENT && !strcmp(self->filename, "perf.data")) 34 pr_err(" (try 'perf record' first)"); 35 pr_err("\n"); 36 return -errno; 37 } 38 39 if (fstat(self->fd, &input_stat) < 0) 40 goto out_close; 41 42 if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) { 43 pr_err("file %s not owned by current user or root\n", 44 self->filename); 45 goto out_close; 46 } 47 48 if (!input_stat.st_size) { 49 pr_info("zero-sized file (%s), nothing to do!\n", 50 self->filename); 51 goto out_close; 52 } 53 54 if (perf_header__read(self, self->fd) < 0) { 55 pr_err("incompatible file format"); 56 goto out_close; 57 } 58 59 self->size = input_stat.st_size; 60 return 0; 61 62 out_close: 63 close(self->fd); 64 self->fd = -1; 65 return -1; 66 } 67 68 void perf_session__update_sample_type(struct perf_session *self) 69 { 70 self->sample_type = perf_header__sample_type(&self->header); 71 } 72 73 int perf_session__create_kernel_maps(struct perf_session *self) 74 { 75 int ret = machine__create_kernel_maps(&self->host_machine); 76 77 if (ret >= 0) 78 ret = machines__create_guest_kernel_maps(&self->machines); 79 return ret; 80 } 81 82 static void perf_session__destroy_kernel_maps(struct perf_session *self) 83 { 84 machine__destroy_kernel_maps(&self->host_machine); 85 machines__destroy_guest_kernel_maps(&self->machines); 86 } 87 88 struct perf_session *perf_session__new(const char *filename, int mode, bool force, bool repipe) 89 { 90 size_t len = filename ? strlen(filename) + 1 : 0; 91 struct perf_session *self = zalloc(sizeof(*self) + len); 92 93 if (self == NULL) 94 goto out; 95 96 if (perf_header__init(&self->header) < 0) 97 goto out_free; 98 99 memcpy(self->filename, filename, len); 100 self->threads = RB_ROOT; 101 INIT_LIST_HEAD(&self->dead_threads); 102 self->hists_tree = RB_ROOT; 103 self->last_match = NULL; 104 /* 105 * On 64bit we can mmap the data file in one go. No need for tiny mmap 106 * slices. On 32bit we use 32MB. 107 */ 108 #if BITS_PER_LONG == 64 109 self->mmap_window = ULLONG_MAX; 110 #else 111 self->mmap_window = 32 * 1024 * 1024ULL; 112 #endif 113 self->machines = RB_ROOT; 114 self->repipe = repipe; 115 INIT_LIST_HEAD(&self->ordered_samples.samples); 116 INIT_LIST_HEAD(&self->ordered_samples.sample_cache); 117 machine__init(&self->host_machine, "", HOST_KERNEL_ID); 118 119 if (mode == O_RDONLY) { 120 if (perf_session__open(self, force) < 0) 121 goto out_delete; 122 } else if (mode == O_WRONLY) { 123 /* 124 * In O_RDONLY mode this will be performed when reading the 125 * kernel MMAP event, in event__process_mmap(). 126 */ 127 if (perf_session__create_kernel_maps(self) < 0) 128 goto out_delete; 129 } 130 131 perf_session__update_sample_type(self); 132 out: 133 return self; 134 out_free: 135 free(self); 136 return NULL; 137 out_delete: 138 perf_session__delete(self); 139 return NULL; 140 } 141 142 static void perf_session__delete_dead_threads(struct perf_session *self) 143 { 144 struct thread *n, *t; 145 146 list_for_each_entry_safe(t, n, &self->dead_threads, node) { 147 list_del(&t->node); 148 thread__delete(t); 149 } 150 } 151 152 static void perf_session__delete_threads(struct perf_session *self) 153 { 154 struct rb_node *nd = rb_first(&self->threads); 155 156 while (nd) { 157 struct thread *t = rb_entry(nd, struct thread, rb_node); 158 159 rb_erase(&t->rb_node, &self->threads); 160 nd = rb_next(nd); 161 thread__delete(t); 162 } 163 } 164 165 void perf_session__delete(struct perf_session *self) 166 { 167 perf_header__exit(&self->header); 168 perf_session__destroy_kernel_maps(self); 169 perf_session__delete_dead_threads(self); 170 perf_session__delete_threads(self); 171 machine__exit(&self->host_machine); 172 close(self->fd); 173 free(self); 174 } 175 176 void perf_session__remove_thread(struct perf_session *self, struct thread *th) 177 { 178 self->last_match = NULL; 179 rb_erase(&th->rb_node, &self->threads); 180 /* 181 * We may have references to this thread, for instance in some hist_entry 182 * instances, so just move them to a separate list. 183 */ 184 list_add_tail(&th->node, &self->dead_threads); 185 } 186 187 static bool symbol__match_parent_regex(struct symbol *sym) 188 { 189 if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0)) 190 return 1; 191 192 return 0; 193 } 194 195 struct map_symbol *perf_session__resolve_callchain(struct perf_session *self, 196 struct thread *thread, 197 struct ip_callchain *chain, 198 struct symbol **parent) 199 { 200 u8 cpumode = PERF_RECORD_MISC_USER; 201 unsigned int i; 202 struct map_symbol *syms = calloc(chain->nr, sizeof(*syms)); 203 204 if (!syms) 205 return NULL; 206 207 for (i = 0; i < chain->nr; i++) { 208 u64 ip = chain->ips[i]; 209 struct addr_location al; 210 211 if (ip >= PERF_CONTEXT_MAX) { 212 switch (ip) { 213 case PERF_CONTEXT_HV: 214 cpumode = PERF_RECORD_MISC_HYPERVISOR; break; 215 case PERF_CONTEXT_KERNEL: 216 cpumode = PERF_RECORD_MISC_KERNEL; break; 217 case PERF_CONTEXT_USER: 218 cpumode = PERF_RECORD_MISC_USER; break; 219 default: 220 break; 221 } 222 continue; 223 } 224 225 al.filtered = false; 226 thread__find_addr_location(thread, self, cpumode, 227 MAP__FUNCTION, thread->pid, ip, &al, NULL); 228 if (al.sym != NULL) { 229 if (sort__has_parent && !*parent && 230 symbol__match_parent_regex(al.sym)) 231 *parent = al.sym; 232 if (!symbol_conf.use_callchain) 233 break; 234 syms[i].map = al.map; 235 syms[i].sym = al.sym; 236 } 237 } 238 239 return syms; 240 } 241 242 static int process_event_stub(event_t *event __used, 243 struct perf_session *session __used) 244 { 245 dump_printf(": unhandled!\n"); 246 return 0; 247 } 248 249 static int process_finished_round_stub(event_t *event __used, 250 struct perf_session *session __used, 251 struct perf_event_ops *ops __used) 252 { 253 dump_printf(": unhandled!\n"); 254 return 0; 255 } 256 257 static int process_finished_round(event_t *event, 258 struct perf_session *session, 259 struct perf_event_ops *ops); 260 261 static void perf_event_ops__fill_defaults(struct perf_event_ops *handler) 262 { 263 if (handler->sample == NULL) 264 handler->sample = process_event_stub; 265 if (handler->mmap == NULL) 266 handler->mmap = process_event_stub; 267 if (handler->comm == NULL) 268 handler->comm = process_event_stub; 269 if (handler->fork == NULL) 270 handler->fork = process_event_stub; 271 if (handler->exit == NULL) 272 handler->exit = process_event_stub; 273 if (handler->lost == NULL) 274 handler->lost = event__process_lost; 275 if (handler->read == NULL) 276 handler->read = process_event_stub; 277 if (handler->throttle == NULL) 278 handler->throttle = process_event_stub; 279 if (handler->unthrottle == NULL) 280 handler->unthrottle = process_event_stub; 281 if (handler->attr == NULL) 282 handler->attr = process_event_stub; 283 if (handler->event_type == NULL) 284 handler->event_type = process_event_stub; 285 if (handler->tracing_data == NULL) 286 handler->tracing_data = process_event_stub; 287 if (handler->build_id == NULL) 288 handler->build_id = process_event_stub; 289 if (handler->finished_round == NULL) { 290 if (handler->ordered_samples) 291 handler->finished_round = process_finished_round; 292 else 293 handler->finished_round = process_finished_round_stub; 294 } 295 } 296 297 void mem_bswap_64(void *src, int byte_size) 298 { 299 u64 *m = src; 300 301 while (byte_size > 0) { 302 *m = bswap_64(*m); 303 byte_size -= sizeof(u64); 304 ++m; 305 } 306 } 307 308 static void event__all64_swap(event_t *self) 309 { 310 struct perf_event_header *hdr = &self->header; 311 mem_bswap_64(hdr + 1, self->header.size - sizeof(*hdr)); 312 } 313 314 static void event__comm_swap(event_t *self) 315 { 316 self->comm.pid = bswap_32(self->comm.pid); 317 self->comm.tid = bswap_32(self->comm.tid); 318 } 319 320 static void event__mmap_swap(event_t *self) 321 { 322 self->mmap.pid = bswap_32(self->mmap.pid); 323 self->mmap.tid = bswap_32(self->mmap.tid); 324 self->mmap.start = bswap_64(self->mmap.start); 325 self->mmap.len = bswap_64(self->mmap.len); 326 self->mmap.pgoff = bswap_64(self->mmap.pgoff); 327 } 328 329 static void event__task_swap(event_t *self) 330 { 331 self->fork.pid = bswap_32(self->fork.pid); 332 self->fork.tid = bswap_32(self->fork.tid); 333 self->fork.ppid = bswap_32(self->fork.ppid); 334 self->fork.ptid = bswap_32(self->fork.ptid); 335 self->fork.time = bswap_64(self->fork.time); 336 } 337 338 static void event__read_swap(event_t *self) 339 { 340 self->read.pid = bswap_32(self->read.pid); 341 self->read.tid = bswap_32(self->read.tid); 342 self->read.value = bswap_64(self->read.value); 343 self->read.time_enabled = bswap_64(self->read.time_enabled); 344 self->read.time_running = bswap_64(self->read.time_running); 345 self->read.id = bswap_64(self->read.id); 346 } 347 348 static void event__attr_swap(event_t *self) 349 { 350 size_t size; 351 352 self->attr.attr.type = bswap_32(self->attr.attr.type); 353 self->attr.attr.size = bswap_32(self->attr.attr.size); 354 self->attr.attr.config = bswap_64(self->attr.attr.config); 355 self->attr.attr.sample_period = bswap_64(self->attr.attr.sample_period); 356 self->attr.attr.sample_type = bswap_64(self->attr.attr.sample_type); 357 self->attr.attr.read_format = bswap_64(self->attr.attr.read_format); 358 self->attr.attr.wakeup_events = bswap_32(self->attr.attr.wakeup_events); 359 self->attr.attr.bp_type = bswap_32(self->attr.attr.bp_type); 360 self->attr.attr.bp_addr = bswap_64(self->attr.attr.bp_addr); 361 self->attr.attr.bp_len = bswap_64(self->attr.attr.bp_len); 362 363 size = self->header.size; 364 size -= (void *)&self->attr.id - (void *)self; 365 mem_bswap_64(self->attr.id, size); 366 } 367 368 static void event__event_type_swap(event_t *self) 369 { 370 self->event_type.event_type.event_id = 371 bswap_64(self->event_type.event_type.event_id); 372 } 373 374 static void event__tracing_data_swap(event_t *self) 375 { 376 self->tracing_data.size = bswap_32(self->tracing_data.size); 377 } 378 379 typedef void (*event__swap_op)(event_t *self); 380 381 static event__swap_op event__swap_ops[] = { 382 [PERF_RECORD_MMAP] = event__mmap_swap, 383 [PERF_RECORD_COMM] = event__comm_swap, 384 [PERF_RECORD_FORK] = event__task_swap, 385 [PERF_RECORD_EXIT] = event__task_swap, 386 [PERF_RECORD_LOST] = event__all64_swap, 387 [PERF_RECORD_READ] = event__read_swap, 388 [PERF_RECORD_SAMPLE] = event__all64_swap, 389 [PERF_RECORD_HEADER_ATTR] = event__attr_swap, 390 [PERF_RECORD_HEADER_EVENT_TYPE] = event__event_type_swap, 391 [PERF_RECORD_HEADER_TRACING_DATA] = event__tracing_data_swap, 392 [PERF_RECORD_HEADER_BUILD_ID] = NULL, 393 [PERF_RECORD_HEADER_MAX] = NULL, 394 }; 395 396 struct sample_queue { 397 u64 timestamp; 398 event_t *event; 399 struct list_head list; 400 }; 401 402 static void perf_session_free_sample_buffers(struct perf_session *session) 403 { 404 struct ordered_samples *os = &session->ordered_samples; 405 406 while (!list_empty(&os->sample_cache)) { 407 struct sample_queue *sq; 408 409 sq = list_entry(os->sample_cache.next, struct sample_queue, list); 410 list_del(&sq->list); 411 free(sq); 412 } 413 } 414 415 static void flush_sample_queue(struct perf_session *s, 416 struct perf_event_ops *ops) 417 { 418 struct ordered_samples *os = &s->ordered_samples; 419 struct list_head *head = &os->samples; 420 struct sample_queue *tmp, *iter; 421 u64 limit = os->next_flush; 422 u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL; 423 424 if (!ops->ordered_samples || !limit) 425 return; 426 427 list_for_each_entry_safe(iter, tmp, head, list) { 428 if (iter->timestamp > limit) 429 break; 430 431 ops->sample(iter->event, s); 432 433 os->last_flush = iter->timestamp; 434 list_del(&iter->list); 435 list_add(&iter->list, &os->sample_cache); 436 } 437 438 if (list_empty(head)) { 439 os->last_sample = NULL; 440 } else if (last_ts <= limit) { 441 os->last_sample = 442 list_entry(head->prev, struct sample_queue, list); 443 } 444 } 445 446 /* 447 * When perf record finishes a pass on every buffers, it records this pseudo 448 * event. 449 * We record the max timestamp t found in the pass n. 450 * Assuming these timestamps are monotonic across cpus, we know that if 451 * a buffer still has events with timestamps below t, they will be all 452 * available and then read in the pass n + 1. 453 * Hence when we start to read the pass n + 2, we can safely flush every 454 * events with timestamps below t. 455 * 456 * ============ PASS n ================= 457 * CPU 0 | CPU 1 458 * | 459 * cnt1 timestamps | cnt2 timestamps 460 * 1 | 2 461 * 2 | 3 462 * - | 4 <--- max recorded 463 * 464 * ============ PASS n + 1 ============== 465 * CPU 0 | CPU 1 466 * | 467 * cnt1 timestamps | cnt2 timestamps 468 * 3 | 5 469 * 4 | 6 470 * 5 | 7 <---- max recorded 471 * 472 * Flush every events below timestamp 4 473 * 474 * ============ PASS n + 2 ============== 475 * CPU 0 | CPU 1 476 * | 477 * cnt1 timestamps | cnt2 timestamps 478 * 6 | 8 479 * 7 | 9 480 * - | 10 481 * 482 * Flush every events below timestamp 7 483 * etc... 484 */ 485 static int process_finished_round(event_t *event __used, 486 struct perf_session *session, 487 struct perf_event_ops *ops) 488 { 489 flush_sample_queue(session, ops); 490 session->ordered_samples.next_flush = session->ordered_samples.max_timestamp; 491 492 return 0; 493 } 494 495 /* The queue is ordered by time */ 496 static void __queue_sample_event(struct sample_queue *new, 497 struct perf_session *s) 498 { 499 struct ordered_samples *os = &s->ordered_samples; 500 struct sample_queue *sample = os->last_sample; 501 u64 timestamp = new->timestamp; 502 struct list_head *p; 503 504 os->last_sample = new; 505 506 if (!sample) { 507 list_add(&new->list, &os->samples); 508 os->max_timestamp = timestamp; 509 return; 510 } 511 512 /* 513 * last_sample might point to some random place in the list as it's 514 * the last queued event. We expect that the new event is close to 515 * this. 516 */ 517 if (sample->timestamp <= timestamp) { 518 while (sample->timestamp <= timestamp) { 519 p = sample->list.next; 520 if (p == &os->samples) { 521 list_add_tail(&new->list, &os->samples); 522 os->max_timestamp = timestamp; 523 return; 524 } 525 sample = list_entry(p, struct sample_queue, list); 526 } 527 list_add_tail(&new->list, &sample->list); 528 } else { 529 while (sample->timestamp > timestamp) { 530 p = sample->list.prev; 531 if (p == &os->samples) { 532 list_add(&new->list, &os->samples); 533 return; 534 } 535 sample = list_entry(p, struct sample_queue, list); 536 } 537 list_add(&new->list, &sample->list); 538 } 539 } 540 541 static int queue_sample_event(event_t *event, struct sample_data *data, 542 struct perf_session *s) 543 { 544 struct list_head *sc = &s->ordered_samples.sample_cache; 545 u64 timestamp = data->time; 546 struct sample_queue *new; 547 548 if (timestamp < s->ordered_samples.last_flush) { 549 printf("Warning: Timestamp below last timeslice flush\n"); 550 return -EINVAL; 551 } 552 553 if (!list_empty(sc)) { 554 new = list_entry(sc->next, struct sample_queue, list); 555 list_del(&new->list); 556 } else { 557 new = malloc(sizeof(*new)); 558 if (!new) 559 return -ENOMEM; 560 } 561 562 new->timestamp = timestamp; 563 new->event = event; 564 565 __queue_sample_event(new, s); 566 567 return 0; 568 } 569 570 static int perf_session__process_sample(event_t *event, struct perf_session *s, 571 struct perf_event_ops *ops) 572 { 573 struct sample_data data; 574 575 if (!ops->ordered_samples) 576 return ops->sample(event, s); 577 578 bzero(&data, sizeof(struct sample_data)); 579 event__parse_sample(event, s->sample_type, &data); 580 581 queue_sample_event(event, &data, s); 582 583 return 0; 584 } 585 586 static int perf_session__process_event(struct perf_session *self, 587 event_t *event, 588 struct perf_event_ops *ops, 589 u64 file_offset) 590 { 591 trace_event(event); 592 593 if (event->header.type < PERF_RECORD_HEADER_MAX) { 594 dump_printf("%#Lx [%#x]: PERF_RECORD_%s", 595 file_offset, event->header.size, 596 event__name[event->header.type]); 597 hists__inc_nr_events(&self->hists, event->header.type); 598 } 599 600 if (self->header.needs_swap && event__swap_ops[event->header.type]) 601 event__swap_ops[event->header.type](event); 602 603 switch (event->header.type) { 604 case PERF_RECORD_SAMPLE: 605 return perf_session__process_sample(event, self, ops); 606 case PERF_RECORD_MMAP: 607 return ops->mmap(event, self); 608 case PERF_RECORD_COMM: 609 return ops->comm(event, self); 610 case PERF_RECORD_FORK: 611 return ops->fork(event, self); 612 case PERF_RECORD_EXIT: 613 return ops->exit(event, self); 614 case PERF_RECORD_LOST: 615 return ops->lost(event, self); 616 case PERF_RECORD_READ: 617 return ops->read(event, self); 618 case PERF_RECORD_THROTTLE: 619 return ops->throttle(event, self); 620 case PERF_RECORD_UNTHROTTLE: 621 return ops->unthrottle(event, self); 622 case PERF_RECORD_HEADER_ATTR: 623 return ops->attr(event, self); 624 case PERF_RECORD_HEADER_EVENT_TYPE: 625 return ops->event_type(event, self); 626 case PERF_RECORD_HEADER_TRACING_DATA: 627 /* setup for reading amidst mmap */ 628 lseek(self->fd, file_offset, SEEK_SET); 629 return ops->tracing_data(event, self); 630 case PERF_RECORD_HEADER_BUILD_ID: 631 return ops->build_id(event, self); 632 case PERF_RECORD_FINISHED_ROUND: 633 return ops->finished_round(event, self, ops); 634 default: 635 ++self->hists.stats.nr_unknown_events; 636 return -1; 637 } 638 } 639 640 void perf_event_header__bswap(struct perf_event_header *self) 641 { 642 self->type = bswap_32(self->type); 643 self->misc = bswap_16(self->misc); 644 self->size = bswap_16(self->size); 645 } 646 647 static struct thread *perf_session__register_idle_thread(struct perf_session *self) 648 { 649 struct thread *thread = perf_session__findnew(self, 0); 650 651 if (thread == NULL || thread__set_comm(thread, "swapper")) { 652 pr_err("problem inserting idle task.\n"); 653 thread = NULL; 654 } 655 656 return thread; 657 } 658 659 int do_read(int fd, void *buf, size_t size) 660 { 661 void *buf_start = buf; 662 663 while (size) { 664 int ret = read(fd, buf, size); 665 666 if (ret <= 0) 667 return ret; 668 669 size -= ret; 670 buf += ret; 671 } 672 673 return buf - buf_start; 674 } 675 676 #define session_done() (*(volatile int *)(&session_done)) 677 volatile int session_done; 678 679 static int __perf_session__process_pipe_events(struct perf_session *self, 680 struct perf_event_ops *ops) 681 { 682 event_t event; 683 uint32_t size; 684 int skip = 0; 685 u64 head; 686 int err; 687 void *p; 688 689 perf_event_ops__fill_defaults(ops); 690 691 head = 0; 692 more: 693 err = do_read(self->fd, &event, sizeof(struct perf_event_header)); 694 if (err <= 0) { 695 if (err == 0) 696 goto done; 697 698 pr_err("failed to read event header\n"); 699 goto out_err; 700 } 701 702 if (self->header.needs_swap) 703 perf_event_header__bswap(&event.header); 704 705 size = event.header.size; 706 if (size == 0) 707 size = 8; 708 709 p = &event; 710 p += sizeof(struct perf_event_header); 711 712 if (size - sizeof(struct perf_event_header)) { 713 err = do_read(self->fd, p, 714 size - sizeof(struct perf_event_header)); 715 if (err <= 0) { 716 if (err == 0) { 717 pr_err("unexpected end of event stream\n"); 718 goto done; 719 } 720 721 pr_err("failed to read event data\n"); 722 goto out_err; 723 } 724 } 725 726 if (size == 0 || 727 (skip = perf_session__process_event(self, &event, ops, head)) < 0) { 728 dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n", 729 head, event.header.size, event.header.type); 730 /* 731 * assume we lost track of the stream, check alignment, and 732 * increment a single u64 in the hope to catch on again 'soon'. 733 */ 734 if (unlikely(head & 7)) 735 head &= ~7ULL; 736 737 size = 8; 738 } 739 740 head += size; 741 742 dump_printf("\n%#Lx [%#x]: event: %d\n", 743 head, event.header.size, event.header.type); 744 745 if (skip > 0) 746 head += skip; 747 748 if (!session_done()) 749 goto more; 750 done: 751 err = 0; 752 out_err: 753 perf_session_free_sample_buffers(self); 754 return err; 755 } 756 757 int __perf_session__process_events(struct perf_session *session, 758 u64 data_offset, u64 data_size, 759 u64 file_size, struct perf_event_ops *ops) 760 { 761 u64 head, page_offset, file_offset, file_pos, progress_next; 762 int err, mmap_prot, mmap_flags, map_idx = 0; 763 struct ui_progress *progress; 764 size_t page_size, mmap_size; 765 char *buf, *mmaps[8]; 766 event_t *event; 767 uint32_t size; 768 769 perf_event_ops__fill_defaults(ops); 770 771 page_size = sysconf(_SC_PAGESIZE); 772 773 page_offset = page_size * (data_offset / page_size); 774 file_offset = page_offset; 775 head = data_offset - page_offset; 776 777 if (data_offset + data_size < file_size) 778 file_size = data_offset + data_size; 779 780 progress_next = file_size / 16; 781 progress = ui_progress__new("Processing events...", file_size); 782 if (progress == NULL) 783 return -1; 784 785 mmap_size = session->mmap_window; 786 if (mmap_size > file_size) 787 mmap_size = file_size; 788 789 memset(mmaps, 0, sizeof(mmaps)); 790 791 mmap_prot = PROT_READ; 792 mmap_flags = MAP_SHARED; 793 794 if (session->header.needs_swap) { 795 mmap_prot |= PROT_WRITE; 796 mmap_flags = MAP_PRIVATE; 797 } 798 remap: 799 buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, session->fd, 800 file_offset); 801 if (buf == MAP_FAILED) { 802 pr_err("failed to mmap file\n"); 803 err = -errno; 804 goto out_err; 805 } 806 mmaps[map_idx] = buf; 807 map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1); 808 file_pos = file_offset + head; 809 810 more: 811 event = (event_t *)(buf + head); 812 813 if (session->header.needs_swap) 814 perf_event_header__bswap(&event->header); 815 size = event->header.size; 816 if (size == 0) 817 size = 8; 818 819 if (head + event->header.size >= mmap_size) { 820 if (mmaps[map_idx]) { 821 munmap(mmaps[map_idx], mmap_size); 822 mmaps[map_idx] = NULL; 823 } 824 825 page_offset = page_size * (head / page_size); 826 file_offset += page_offset; 827 head -= page_offset; 828 goto remap; 829 } 830 831 size = event->header.size; 832 833 dump_printf("\n%#Lx [%#x]: event: %d\n", 834 file_pos, event->header.size, event->header.type); 835 836 if (size == 0 || 837 perf_session__process_event(session, event, ops, file_pos) < 0) { 838 dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n", 839 file_offset + head, event->header.size, 840 event->header.type); 841 /* 842 * assume we lost track of the stream, check alignment, and 843 * increment a single u64 in the hope to catch on again 'soon'. 844 */ 845 if (unlikely(head & 7)) 846 head &= ~7ULL; 847 848 size = 8; 849 } 850 851 head += size; 852 file_pos += size; 853 854 if (file_pos >= progress_next) { 855 progress_next += file_size / 16; 856 ui_progress__update(progress, file_pos); 857 } 858 859 if (file_pos < file_size) 860 goto more; 861 862 err = 0; 863 /* do the final flush for ordered samples */ 864 session->ordered_samples.next_flush = ULLONG_MAX; 865 flush_sample_queue(session, ops); 866 out_err: 867 ui_progress__delete(progress); 868 869 if (ops->lost == event__process_lost && 870 session->hists.stats.total_lost != 0) { 871 ui__warning("Processed %Lu events and LOST %Lu!\n\n" 872 "Check IO/CPU overload!\n\n", 873 session->hists.stats.total_period, 874 session->hists.stats.total_lost); 875 } 876 877 if (session->hists.stats.nr_unknown_events != 0) { 878 ui__warning("Found %u unknown events!\n\n" 879 "Is this an older tool processing a perf.data " 880 "file generated by a more recent tool?\n\n" 881 "If that is not the case, consider " 882 "reporting to linux-kernel@vger.kernel.org.\n\n", 883 session->hists.stats.nr_unknown_events); 884 } 885 886 perf_session_free_sample_buffers(session); 887 return err; 888 } 889 890 int perf_session__process_events(struct perf_session *self, 891 struct perf_event_ops *ops) 892 { 893 int err; 894 895 if (perf_session__register_idle_thread(self) == NULL) 896 return -ENOMEM; 897 898 if (!self->fd_pipe) 899 err = __perf_session__process_events(self, 900 self->header.data_offset, 901 self->header.data_size, 902 self->size, ops); 903 else 904 err = __perf_session__process_pipe_events(self, ops); 905 906 return err; 907 } 908 909 bool perf_session__has_traces(struct perf_session *self, const char *msg) 910 { 911 if (!(self->sample_type & PERF_SAMPLE_RAW)) { 912 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg); 913 return false; 914 } 915 916 return true; 917 } 918 919 int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps, 920 const char *symbol_name, 921 u64 addr) 922 { 923 char *bracket; 924 enum map_type i; 925 struct ref_reloc_sym *ref; 926 927 ref = zalloc(sizeof(struct ref_reloc_sym)); 928 if (ref == NULL) 929 return -ENOMEM; 930 931 ref->name = strdup(symbol_name); 932 if (ref->name == NULL) { 933 free(ref); 934 return -ENOMEM; 935 } 936 937 bracket = strchr(ref->name, ']'); 938 if (bracket) 939 *bracket = '\0'; 940 941 ref->addr = addr; 942 943 for (i = 0; i < MAP__NR_TYPES; ++i) { 944 struct kmap *kmap = map__kmap(maps[i]); 945 kmap->ref_reloc_sym = ref; 946 } 947 948 return 0; 949 } 950 951 size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp) 952 { 953 return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) + 954 __dsos__fprintf(&self->host_machine.user_dsos, fp) + 955 machines__fprintf_dsos(&self->machines, fp); 956 } 957 958 size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp, 959 bool with_hits) 960 { 961 size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits); 962 return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits); 963 } 964