1 #include <linux/kernel.h> 2 #include <traceevent/event-parse.h> 3 4 #include <byteswap.h> 5 #include <unistd.h> 6 #include <sys/types.h> 7 #include <sys/mman.h> 8 9 #include "evlist.h" 10 #include "evsel.h" 11 #include "session.h" 12 #include "tool.h" 13 #include "sort.h" 14 #include "util.h" 15 #include "cpumap.h" 16 #include "perf_regs.h" 17 #include "vdso.h" 18 19 static int perf_session__open(struct perf_session *self, bool force) 20 { 21 struct stat input_stat; 22 23 if (!strcmp(self->filename, "-")) { 24 self->fd_pipe = true; 25 self->fd = STDIN_FILENO; 26 27 if (perf_session__read_header(self) < 0) 28 pr_err("incompatible file format (rerun with -v to learn more)"); 29 30 return 0; 31 } 32 33 self->fd = open(self->filename, O_RDONLY); 34 if (self->fd < 0) { 35 int err = errno; 36 37 pr_err("failed to open %s: %s", self->filename, strerror(err)); 38 if (err == ENOENT && !strcmp(self->filename, "perf.data")) 39 pr_err(" (try 'perf record' first)"); 40 pr_err("\n"); 41 return -errno; 42 } 43 44 if (fstat(self->fd, &input_stat) < 0) 45 goto out_close; 46 47 if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) { 48 pr_err("file %s not owned by current user or root\n", 49 self->filename); 50 goto out_close; 51 } 52 53 if (!input_stat.st_size) { 54 pr_info("zero-sized file (%s), nothing to do!\n", 55 self->filename); 56 goto out_close; 57 } 58 59 if (perf_session__read_header(self) < 0) { 60 pr_err("incompatible file format (rerun with -v to learn more)"); 61 goto out_close; 62 } 63 64 if (!perf_evlist__valid_sample_type(self->evlist)) { 65 pr_err("non matching sample_type"); 66 goto out_close; 67 } 68 69 if (!perf_evlist__valid_sample_id_all(self->evlist)) { 70 pr_err("non matching sample_id_all"); 71 goto out_close; 72 } 73 74 if (!perf_evlist__valid_read_format(self->evlist)) { 75 pr_err("non matching read_format"); 76 goto out_close; 77 } 78 79 self->size = input_stat.st_size; 80 return 0; 81 82 out_close: 83 close(self->fd); 84 self->fd = -1; 85 return -1; 86 } 87 88 void perf_session__set_id_hdr_size(struct perf_session *session) 89 { 90 u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist); 91 92 machines__set_id_hdr_size(&session->machines, id_hdr_size); 93 } 94 95 int perf_session__create_kernel_maps(struct perf_session *self) 96 { 97 int ret = machine__create_kernel_maps(&self->machines.host); 98 99 if (ret >= 0) 100 ret = machines__create_guest_kernel_maps(&self->machines); 101 return ret; 102 } 103 104 static void perf_session__destroy_kernel_maps(struct perf_session *self) 105 { 106 machines__destroy_kernel_maps(&self->machines); 107 } 108 109 struct perf_session *perf_session__new(const char *filename, int mode, 110 bool force, bool repipe, 111 struct perf_tool *tool) 112 { 113 struct perf_session *self; 114 struct stat st; 115 size_t len; 116 117 if (!filename || !strlen(filename)) { 118 if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode)) 119 filename = "-"; 120 else 121 filename = "perf.data"; 122 } 123 124 len = strlen(filename); 125 self = zalloc(sizeof(*self) + len); 126 127 if (self == NULL) 128 goto out; 129 130 memcpy(self->filename, filename, len); 131 self->repipe = repipe; 132 INIT_LIST_HEAD(&self->ordered_samples.samples); 133 INIT_LIST_HEAD(&self->ordered_samples.sample_cache); 134 INIT_LIST_HEAD(&self->ordered_samples.to_free); 135 machines__init(&self->machines); 136 137 if (mode == O_RDONLY) { 138 if (perf_session__open(self, force) < 0) 139 goto out_delete; 140 perf_session__set_id_hdr_size(self); 141 } else if (mode == O_WRONLY) { 142 /* 143 * In O_RDONLY mode this will be performed when reading the 144 * kernel MMAP event, in perf_event__process_mmap(). 145 */ 146 if (perf_session__create_kernel_maps(self) < 0) 147 goto out_delete; 148 } 149 150 if (tool && tool->ordering_requires_timestamps && 151 tool->ordered_samples && !perf_evlist__sample_id_all(self->evlist)) { 152 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n"); 153 tool->ordered_samples = false; 154 } 155 156 out: 157 return self; 158 out_delete: 159 perf_session__delete(self); 160 return NULL; 161 } 162 163 static void perf_session__delete_dead_threads(struct perf_session *session) 164 { 165 machine__delete_dead_threads(&session->machines.host); 166 } 167 168 static void perf_session__delete_threads(struct perf_session *session) 169 { 170 machine__delete_threads(&session->machines.host); 171 } 172 173 static void perf_session_env__delete(struct perf_session_env *env) 174 { 175 free(env->hostname); 176 free(env->os_release); 177 free(env->version); 178 free(env->arch); 179 free(env->cpu_desc); 180 free(env->cpuid); 181 182 free(env->cmdline); 183 free(env->sibling_cores); 184 free(env->sibling_threads); 185 free(env->numa_nodes); 186 free(env->pmu_mappings); 187 } 188 189 void perf_session__delete(struct perf_session *self) 190 { 191 perf_session__destroy_kernel_maps(self); 192 perf_session__delete_dead_threads(self); 193 perf_session__delete_threads(self); 194 perf_session_env__delete(&self->header.env); 195 machines__exit(&self->machines); 196 close(self->fd); 197 free(self); 198 vdso__exit(); 199 } 200 201 static int process_event_synth_tracing_data_stub(struct perf_tool *tool 202 __maybe_unused, 203 union perf_event *event 204 __maybe_unused, 205 struct perf_session *session 206 __maybe_unused) 207 { 208 dump_printf(": unhandled!\n"); 209 return 0; 210 } 211 212 static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused, 213 union perf_event *event __maybe_unused, 214 struct perf_evlist **pevlist 215 __maybe_unused) 216 { 217 dump_printf(": unhandled!\n"); 218 return 0; 219 } 220 221 static int process_event_sample_stub(struct perf_tool *tool __maybe_unused, 222 union perf_event *event __maybe_unused, 223 struct perf_sample *sample __maybe_unused, 224 struct perf_evsel *evsel __maybe_unused, 225 struct machine *machine __maybe_unused) 226 { 227 dump_printf(": unhandled!\n"); 228 return 0; 229 } 230 231 static int process_event_stub(struct perf_tool *tool __maybe_unused, 232 union perf_event *event __maybe_unused, 233 struct perf_sample *sample __maybe_unused, 234 struct machine *machine __maybe_unused) 235 { 236 dump_printf(": unhandled!\n"); 237 return 0; 238 } 239 240 static int process_finished_round_stub(struct perf_tool *tool __maybe_unused, 241 union perf_event *event __maybe_unused, 242 struct perf_session *perf_session 243 __maybe_unused) 244 { 245 dump_printf(": unhandled!\n"); 246 return 0; 247 } 248 249 static int process_finished_round(struct perf_tool *tool, 250 union perf_event *event, 251 struct perf_session *session); 252 253 void perf_tool__fill_defaults(struct perf_tool *tool) 254 { 255 if (tool->sample == NULL) 256 tool->sample = process_event_sample_stub; 257 if (tool->mmap == NULL) 258 tool->mmap = process_event_stub; 259 if (tool->comm == NULL) 260 tool->comm = process_event_stub; 261 if (tool->fork == NULL) 262 tool->fork = process_event_stub; 263 if (tool->exit == NULL) 264 tool->exit = process_event_stub; 265 if (tool->lost == NULL) 266 tool->lost = perf_event__process_lost; 267 if (tool->read == NULL) 268 tool->read = process_event_sample_stub; 269 if (tool->throttle == NULL) 270 tool->throttle = process_event_stub; 271 if (tool->unthrottle == NULL) 272 tool->unthrottle = process_event_stub; 273 if (tool->attr == NULL) 274 tool->attr = process_event_synth_attr_stub; 275 if (tool->tracing_data == NULL) 276 tool->tracing_data = process_event_synth_tracing_data_stub; 277 if (tool->build_id == NULL) 278 tool->build_id = process_finished_round_stub; 279 if (tool->finished_round == NULL) { 280 if (tool->ordered_samples) 281 tool->finished_round = process_finished_round; 282 else 283 tool->finished_round = process_finished_round_stub; 284 } 285 } 286 287 void mem_bswap_32(void *src, int byte_size) 288 { 289 u32 *m = src; 290 while (byte_size > 0) { 291 *m = bswap_32(*m); 292 byte_size -= sizeof(u32); 293 ++m; 294 } 295 } 296 297 void mem_bswap_64(void *src, int byte_size) 298 { 299 u64 *m = src; 300 301 while (byte_size > 0) { 302 *m = bswap_64(*m); 303 byte_size -= sizeof(u64); 304 ++m; 305 } 306 } 307 308 static void swap_sample_id_all(union perf_event *event, void *data) 309 { 310 void *end = (void *) event + event->header.size; 311 int size = end - data; 312 313 BUG_ON(size % sizeof(u64)); 314 mem_bswap_64(data, size); 315 } 316 317 static void perf_event__all64_swap(union perf_event *event, 318 bool sample_id_all __maybe_unused) 319 { 320 struct perf_event_header *hdr = &event->header; 321 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr)); 322 } 323 324 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all) 325 { 326 event->comm.pid = bswap_32(event->comm.pid); 327 event->comm.tid = bswap_32(event->comm.tid); 328 329 if (sample_id_all) { 330 void *data = &event->comm.comm; 331 332 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); 333 swap_sample_id_all(event, data); 334 } 335 } 336 337 static void perf_event__mmap_swap(union perf_event *event, 338 bool sample_id_all) 339 { 340 event->mmap.pid = bswap_32(event->mmap.pid); 341 event->mmap.tid = bswap_32(event->mmap.tid); 342 event->mmap.start = bswap_64(event->mmap.start); 343 event->mmap.len = bswap_64(event->mmap.len); 344 event->mmap.pgoff = bswap_64(event->mmap.pgoff); 345 346 if (sample_id_all) { 347 void *data = &event->mmap.filename; 348 349 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); 350 swap_sample_id_all(event, data); 351 } 352 } 353 354 static void perf_event__mmap2_swap(union perf_event *event, 355 bool sample_id_all) 356 { 357 event->mmap2.pid = bswap_32(event->mmap2.pid); 358 event->mmap2.tid = bswap_32(event->mmap2.tid); 359 event->mmap2.start = bswap_64(event->mmap2.start); 360 event->mmap2.len = bswap_64(event->mmap2.len); 361 event->mmap2.pgoff = bswap_64(event->mmap2.pgoff); 362 event->mmap2.maj = bswap_32(event->mmap2.maj); 363 event->mmap2.min = bswap_32(event->mmap2.min); 364 event->mmap2.ino = bswap_64(event->mmap2.ino); 365 366 if (sample_id_all) { 367 void *data = &event->mmap2.filename; 368 369 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); 370 swap_sample_id_all(event, data); 371 } 372 } 373 static void perf_event__task_swap(union perf_event *event, bool sample_id_all) 374 { 375 event->fork.pid = bswap_32(event->fork.pid); 376 event->fork.tid = bswap_32(event->fork.tid); 377 event->fork.ppid = bswap_32(event->fork.ppid); 378 event->fork.ptid = bswap_32(event->fork.ptid); 379 event->fork.time = bswap_64(event->fork.time); 380 381 if (sample_id_all) 382 swap_sample_id_all(event, &event->fork + 1); 383 } 384 385 static void perf_event__read_swap(union perf_event *event, bool sample_id_all) 386 { 387 event->read.pid = bswap_32(event->read.pid); 388 event->read.tid = bswap_32(event->read.tid); 389 event->read.value = bswap_64(event->read.value); 390 event->read.time_enabled = bswap_64(event->read.time_enabled); 391 event->read.time_running = bswap_64(event->read.time_running); 392 event->read.id = bswap_64(event->read.id); 393 394 if (sample_id_all) 395 swap_sample_id_all(event, &event->read + 1); 396 } 397 398 static u8 revbyte(u8 b) 399 { 400 int rev = (b >> 4) | ((b & 0xf) << 4); 401 rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2); 402 rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1); 403 return (u8) rev; 404 } 405 406 /* 407 * XXX this is hack in attempt to carry flags bitfield 408 * throught endian village. ABI says: 409 * 410 * Bit-fields are allocated from right to left (least to most significant) 411 * on little-endian implementations and from left to right (most to least 412 * significant) on big-endian implementations. 413 * 414 * The above seems to be byte specific, so we need to reverse each 415 * byte of the bitfield. 'Internet' also says this might be implementation 416 * specific and we probably need proper fix and carry perf_event_attr 417 * bitfield flags in separate data file FEAT_ section. Thought this seems 418 * to work for now. 419 */ 420 static void swap_bitfield(u8 *p, unsigned len) 421 { 422 unsigned i; 423 424 for (i = 0; i < len; i++) { 425 *p = revbyte(*p); 426 p++; 427 } 428 } 429 430 /* exported for swapping attributes in file header */ 431 void perf_event__attr_swap(struct perf_event_attr *attr) 432 { 433 attr->type = bswap_32(attr->type); 434 attr->size = bswap_32(attr->size); 435 attr->config = bswap_64(attr->config); 436 attr->sample_period = bswap_64(attr->sample_period); 437 attr->sample_type = bswap_64(attr->sample_type); 438 attr->read_format = bswap_64(attr->read_format); 439 attr->wakeup_events = bswap_32(attr->wakeup_events); 440 attr->bp_type = bswap_32(attr->bp_type); 441 attr->bp_addr = bswap_64(attr->bp_addr); 442 attr->bp_len = bswap_64(attr->bp_len); 443 444 swap_bitfield((u8 *) (&attr->read_format + 1), sizeof(u64)); 445 } 446 447 static void perf_event__hdr_attr_swap(union perf_event *event, 448 bool sample_id_all __maybe_unused) 449 { 450 size_t size; 451 452 perf_event__attr_swap(&event->attr.attr); 453 454 size = event->header.size; 455 size -= (void *)&event->attr.id - (void *)event; 456 mem_bswap_64(event->attr.id, size); 457 } 458 459 static void perf_event__event_type_swap(union perf_event *event, 460 bool sample_id_all __maybe_unused) 461 { 462 event->event_type.event_type.event_id = 463 bswap_64(event->event_type.event_type.event_id); 464 } 465 466 static void perf_event__tracing_data_swap(union perf_event *event, 467 bool sample_id_all __maybe_unused) 468 { 469 event->tracing_data.size = bswap_32(event->tracing_data.size); 470 } 471 472 typedef void (*perf_event__swap_op)(union perf_event *event, 473 bool sample_id_all); 474 475 static perf_event__swap_op perf_event__swap_ops[] = { 476 [PERF_RECORD_MMAP] = perf_event__mmap_swap, 477 [PERF_RECORD_MMAP2] = perf_event__mmap2_swap, 478 [PERF_RECORD_COMM] = perf_event__comm_swap, 479 [PERF_RECORD_FORK] = perf_event__task_swap, 480 [PERF_RECORD_EXIT] = perf_event__task_swap, 481 [PERF_RECORD_LOST] = perf_event__all64_swap, 482 [PERF_RECORD_READ] = perf_event__read_swap, 483 [PERF_RECORD_SAMPLE] = perf_event__all64_swap, 484 [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap, 485 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap, 486 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap, 487 [PERF_RECORD_HEADER_BUILD_ID] = NULL, 488 [PERF_RECORD_HEADER_MAX] = NULL, 489 }; 490 491 struct sample_queue { 492 u64 timestamp; 493 u64 file_offset; 494 union perf_event *event; 495 struct list_head list; 496 }; 497 498 static void perf_session_free_sample_buffers(struct perf_session *session) 499 { 500 struct ordered_samples *os = &session->ordered_samples; 501 502 while (!list_empty(&os->to_free)) { 503 struct sample_queue *sq; 504 505 sq = list_entry(os->to_free.next, struct sample_queue, list); 506 list_del(&sq->list); 507 free(sq); 508 } 509 } 510 511 static int perf_session_deliver_event(struct perf_session *session, 512 union perf_event *event, 513 struct perf_sample *sample, 514 struct perf_tool *tool, 515 u64 file_offset); 516 517 static int flush_sample_queue(struct perf_session *s, 518 struct perf_tool *tool) 519 { 520 struct ordered_samples *os = &s->ordered_samples; 521 struct list_head *head = &os->samples; 522 struct sample_queue *tmp, *iter; 523 struct perf_sample sample; 524 u64 limit = os->next_flush; 525 u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL; 526 unsigned idx = 0, progress_next = os->nr_samples / 16; 527 bool show_progress = limit == ULLONG_MAX; 528 int ret; 529 530 if (!tool->ordered_samples || !limit) 531 return 0; 532 533 list_for_each_entry_safe(iter, tmp, head, list) { 534 if (iter->timestamp > limit) 535 break; 536 537 ret = perf_evlist__parse_sample(s->evlist, iter->event, &sample); 538 if (ret) 539 pr_err("Can't parse sample, err = %d\n", ret); 540 else { 541 ret = perf_session_deliver_event(s, iter->event, &sample, tool, 542 iter->file_offset); 543 if (ret) 544 return ret; 545 } 546 547 os->last_flush = iter->timestamp; 548 list_del(&iter->list); 549 list_add(&iter->list, &os->sample_cache); 550 if (show_progress && (++idx >= progress_next)) { 551 progress_next += os->nr_samples / 16; 552 ui_progress__update(idx, os->nr_samples, 553 "Processing time ordered events..."); 554 } 555 } 556 557 if (list_empty(head)) { 558 os->last_sample = NULL; 559 } else if (last_ts <= limit) { 560 os->last_sample = 561 list_entry(head->prev, struct sample_queue, list); 562 } 563 564 os->nr_samples = 0; 565 566 return 0; 567 } 568 569 /* 570 * When perf record finishes a pass on every buffers, it records this pseudo 571 * event. 572 * We record the max timestamp t found in the pass n. 573 * Assuming these timestamps are monotonic across cpus, we know that if 574 * a buffer still has events with timestamps below t, they will be all 575 * available and then read in the pass n + 1. 576 * Hence when we start to read the pass n + 2, we can safely flush every 577 * events with timestamps below t. 578 * 579 * ============ PASS n ================= 580 * CPU 0 | CPU 1 581 * | 582 * cnt1 timestamps | cnt2 timestamps 583 * 1 | 2 584 * 2 | 3 585 * - | 4 <--- max recorded 586 * 587 * ============ PASS n + 1 ============== 588 * CPU 0 | CPU 1 589 * | 590 * cnt1 timestamps | cnt2 timestamps 591 * 3 | 5 592 * 4 | 6 593 * 5 | 7 <---- max recorded 594 * 595 * Flush every events below timestamp 4 596 * 597 * ============ PASS n + 2 ============== 598 * CPU 0 | CPU 1 599 * | 600 * cnt1 timestamps | cnt2 timestamps 601 * 6 | 8 602 * 7 | 9 603 * - | 10 604 * 605 * Flush every events below timestamp 7 606 * etc... 607 */ 608 static int process_finished_round(struct perf_tool *tool, 609 union perf_event *event __maybe_unused, 610 struct perf_session *session) 611 { 612 int ret = flush_sample_queue(session, tool); 613 if (!ret) 614 session->ordered_samples.next_flush = session->ordered_samples.max_timestamp; 615 616 return ret; 617 } 618 619 /* The queue is ordered by time */ 620 static void __queue_event(struct sample_queue *new, struct perf_session *s) 621 { 622 struct ordered_samples *os = &s->ordered_samples; 623 struct sample_queue *sample = os->last_sample; 624 u64 timestamp = new->timestamp; 625 struct list_head *p; 626 627 ++os->nr_samples; 628 os->last_sample = new; 629 630 if (!sample) { 631 list_add(&new->list, &os->samples); 632 os->max_timestamp = timestamp; 633 return; 634 } 635 636 /* 637 * last_sample might point to some random place in the list as it's 638 * the last queued event. We expect that the new event is close to 639 * this. 640 */ 641 if (sample->timestamp <= timestamp) { 642 while (sample->timestamp <= timestamp) { 643 p = sample->list.next; 644 if (p == &os->samples) { 645 list_add_tail(&new->list, &os->samples); 646 os->max_timestamp = timestamp; 647 return; 648 } 649 sample = list_entry(p, struct sample_queue, list); 650 } 651 list_add_tail(&new->list, &sample->list); 652 } else { 653 while (sample->timestamp > timestamp) { 654 p = sample->list.prev; 655 if (p == &os->samples) { 656 list_add(&new->list, &os->samples); 657 return; 658 } 659 sample = list_entry(p, struct sample_queue, list); 660 } 661 list_add(&new->list, &sample->list); 662 } 663 } 664 665 #define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct sample_queue)) 666 667 int perf_session_queue_event(struct perf_session *s, union perf_event *event, 668 struct perf_sample *sample, u64 file_offset) 669 { 670 struct ordered_samples *os = &s->ordered_samples; 671 struct list_head *sc = &os->sample_cache; 672 u64 timestamp = sample->time; 673 struct sample_queue *new; 674 675 if (!timestamp || timestamp == ~0ULL) 676 return -ETIME; 677 678 if (timestamp < s->ordered_samples.last_flush) { 679 printf("Warning: Timestamp below last timeslice flush\n"); 680 return -EINVAL; 681 } 682 683 if (!list_empty(sc)) { 684 new = list_entry(sc->next, struct sample_queue, list); 685 list_del(&new->list); 686 } else if (os->sample_buffer) { 687 new = os->sample_buffer + os->sample_buffer_idx; 688 if (++os->sample_buffer_idx == MAX_SAMPLE_BUFFER) 689 os->sample_buffer = NULL; 690 } else { 691 os->sample_buffer = malloc(MAX_SAMPLE_BUFFER * sizeof(*new)); 692 if (!os->sample_buffer) 693 return -ENOMEM; 694 list_add(&os->sample_buffer->list, &os->to_free); 695 os->sample_buffer_idx = 2; 696 new = os->sample_buffer + 1; 697 } 698 699 new->timestamp = timestamp; 700 new->file_offset = file_offset; 701 new->event = event; 702 703 __queue_event(new, s); 704 705 return 0; 706 } 707 708 static void callchain__printf(struct perf_sample *sample) 709 { 710 unsigned int i; 711 712 printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr); 713 714 for (i = 0; i < sample->callchain->nr; i++) 715 printf("..... %2d: %016" PRIx64 "\n", 716 i, sample->callchain->ips[i]); 717 } 718 719 static void branch_stack__printf(struct perf_sample *sample) 720 { 721 uint64_t i; 722 723 printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr); 724 725 for (i = 0; i < sample->branch_stack->nr; i++) 726 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 "\n", 727 i, sample->branch_stack->entries[i].from, 728 sample->branch_stack->entries[i].to); 729 } 730 731 static void regs_dump__printf(u64 mask, u64 *regs) 732 { 733 unsigned rid, i = 0; 734 735 for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) { 736 u64 val = regs[i++]; 737 738 printf(".... %-5s 0x%" PRIx64 "\n", 739 perf_reg_name(rid), val); 740 } 741 } 742 743 static void regs_user__printf(struct perf_sample *sample, u64 mask) 744 { 745 struct regs_dump *user_regs = &sample->user_regs; 746 747 if (user_regs->regs) { 748 printf("... user regs: mask 0x%" PRIx64 "\n", mask); 749 regs_dump__printf(mask, user_regs->regs); 750 } 751 } 752 753 static void stack_user__printf(struct stack_dump *dump) 754 { 755 printf("... ustack: size %" PRIu64 ", offset 0x%x\n", 756 dump->size, dump->offset); 757 } 758 759 static void perf_session__print_tstamp(struct perf_session *session, 760 union perf_event *event, 761 struct perf_sample *sample) 762 { 763 u64 sample_type = __perf_evlist__combined_sample_type(session->evlist); 764 765 if (event->header.type != PERF_RECORD_SAMPLE && 766 !perf_evlist__sample_id_all(session->evlist)) { 767 fputs("-1 -1 ", stdout); 768 return; 769 } 770 771 if ((sample_type & PERF_SAMPLE_CPU)) 772 printf("%u ", sample->cpu); 773 774 if (sample_type & PERF_SAMPLE_TIME) 775 printf("%" PRIu64 " ", sample->time); 776 } 777 778 static void sample_read__printf(struct perf_sample *sample, u64 read_format) 779 { 780 printf("... sample_read:\n"); 781 782 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 783 printf("...... time enabled %016" PRIx64 "\n", 784 sample->read.time_enabled); 785 786 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 787 printf("...... time running %016" PRIx64 "\n", 788 sample->read.time_running); 789 790 if (read_format & PERF_FORMAT_GROUP) { 791 u64 i; 792 793 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr); 794 795 for (i = 0; i < sample->read.group.nr; i++) { 796 struct sample_read_value *value; 797 798 value = &sample->read.group.values[i]; 799 printf("..... id %016" PRIx64 800 ", value %016" PRIx64 "\n", 801 value->id, value->value); 802 } 803 } else 804 printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n", 805 sample->read.one.id, sample->read.one.value); 806 } 807 808 static void dump_event(struct perf_session *session, union perf_event *event, 809 u64 file_offset, struct perf_sample *sample) 810 { 811 if (!dump_trace) 812 return; 813 814 printf("\n%#" PRIx64 " [%#x]: event: %d\n", 815 file_offset, event->header.size, event->header.type); 816 817 trace_event(event); 818 819 if (sample) 820 perf_session__print_tstamp(session, event, sample); 821 822 printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset, 823 event->header.size, perf_event__name(event->header.type)); 824 } 825 826 static void dump_sample(struct perf_evsel *evsel, union perf_event *event, 827 struct perf_sample *sample) 828 { 829 u64 sample_type; 830 831 if (!dump_trace) 832 return; 833 834 printf("(IP, %d): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n", 835 event->header.misc, sample->pid, sample->tid, sample->ip, 836 sample->period, sample->addr); 837 838 sample_type = evsel->attr.sample_type; 839 840 if (sample_type & PERF_SAMPLE_CALLCHAIN) 841 callchain__printf(sample); 842 843 if (sample_type & PERF_SAMPLE_BRANCH_STACK) 844 branch_stack__printf(sample); 845 846 if (sample_type & PERF_SAMPLE_REGS_USER) 847 regs_user__printf(sample, evsel->attr.sample_regs_user); 848 849 if (sample_type & PERF_SAMPLE_STACK_USER) 850 stack_user__printf(&sample->user_stack); 851 852 if (sample_type & PERF_SAMPLE_WEIGHT) 853 printf("... weight: %" PRIu64 "\n", sample->weight); 854 855 if (sample_type & PERF_SAMPLE_DATA_SRC) 856 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src); 857 858 if (sample_type & PERF_SAMPLE_READ) 859 sample_read__printf(sample, evsel->attr.read_format); 860 } 861 862 static struct machine * 863 perf_session__find_machine_for_cpumode(struct perf_session *session, 864 union perf_event *event, 865 struct perf_sample *sample) 866 { 867 const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 868 869 if (perf_guest && 870 ((cpumode == PERF_RECORD_MISC_GUEST_KERNEL) || 871 (cpumode == PERF_RECORD_MISC_GUEST_USER))) { 872 u32 pid; 873 874 if (event->header.type == PERF_RECORD_MMAP 875 || event->header.type == PERF_RECORD_MMAP2) 876 pid = event->mmap.pid; 877 else 878 pid = sample->pid; 879 880 return perf_session__findnew_machine(session, pid); 881 } 882 883 return &session->machines.host; 884 } 885 886 static int deliver_sample_value(struct perf_session *session, 887 struct perf_tool *tool, 888 union perf_event *event, 889 struct perf_sample *sample, 890 struct sample_read_value *v, 891 struct machine *machine) 892 { 893 struct perf_sample_id *sid; 894 895 sid = perf_evlist__id2sid(session->evlist, v->id); 896 if (sid) { 897 sample->id = v->id; 898 sample->period = v->value - sid->period; 899 sid->period = v->value; 900 } 901 902 if (!sid || sid->evsel == NULL) { 903 ++session->stats.nr_unknown_id; 904 return 0; 905 } 906 907 return tool->sample(tool, event, sample, sid->evsel, machine); 908 } 909 910 static int deliver_sample_group(struct perf_session *session, 911 struct perf_tool *tool, 912 union perf_event *event, 913 struct perf_sample *sample, 914 struct machine *machine) 915 { 916 int ret = -EINVAL; 917 u64 i; 918 919 for (i = 0; i < sample->read.group.nr; i++) { 920 ret = deliver_sample_value(session, tool, event, sample, 921 &sample->read.group.values[i], 922 machine); 923 if (ret) 924 break; 925 } 926 927 return ret; 928 } 929 930 static int 931 perf_session__deliver_sample(struct perf_session *session, 932 struct perf_tool *tool, 933 union perf_event *event, 934 struct perf_sample *sample, 935 struct perf_evsel *evsel, 936 struct machine *machine) 937 { 938 /* We know evsel != NULL. */ 939 u64 sample_type = evsel->attr.sample_type; 940 u64 read_format = evsel->attr.read_format; 941 942 /* Standard sample delievery. */ 943 if (!(sample_type & PERF_SAMPLE_READ)) 944 return tool->sample(tool, event, sample, evsel, machine); 945 946 /* For PERF_SAMPLE_READ we have either single or group mode. */ 947 if (read_format & PERF_FORMAT_GROUP) 948 return deliver_sample_group(session, tool, event, sample, 949 machine); 950 else 951 return deliver_sample_value(session, tool, event, sample, 952 &sample->read.one, machine); 953 } 954 955 static int perf_session_deliver_event(struct perf_session *session, 956 union perf_event *event, 957 struct perf_sample *sample, 958 struct perf_tool *tool, 959 u64 file_offset) 960 { 961 struct perf_evsel *evsel; 962 struct machine *machine; 963 964 dump_event(session, event, file_offset, sample); 965 966 evsel = perf_evlist__id2evsel(session->evlist, sample->id); 967 if (evsel != NULL && event->header.type != PERF_RECORD_SAMPLE) { 968 /* 969 * XXX We're leaving PERF_RECORD_SAMPLE unnacounted here 970 * because the tools right now may apply filters, discarding 971 * some of the samples. For consistency, in the future we 972 * should have something like nr_filtered_samples and remove 973 * the sample->period from total_sample_period, etc, KISS for 974 * now tho. 975 * 976 * Also testing against NULL allows us to handle files without 977 * attr.sample_id_all and/or without PERF_SAMPLE_ID. In the 978 * future probably it'll be a good idea to restrict event 979 * processing via perf_session to files with both set. 980 */ 981 hists__inc_nr_events(&evsel->hists, event->header.type); 982 } 983 984 machine = perf_session__find_machine_for_cpumode(session, event, 985 sample); 986 987 switch (event->header.type) { 988 case PERF_RECORD_SAMPLE: 989 dump_sample(evsel, event, sample); 990 if (evsel == NULL) { 991 ++session->stats.nr_unknown_id; 992 return 0; 993 } 994 if (machine == NULL) { 995 ++session->stats.nr_unprocessable_samples; 996 return 0; 997 } 998 return perf_session__deliver_sample(session, tool, event, 999 sample, evsel, machine); 1000 case PERF_RECORD_MMAP: 1001 return tool->mmap(tool, event, sample, machine); 1002 case PERF_RECORD_MMAP2: 1003 return tool->mmap2(tool, event, sample, machine); 1004 case PERF_RECORD_COMM: 1005 return tool->comm(tool, event, sample, machine); 1006 case PERF_RECORD_FORK: 1007 return tool->fork(tool, event, sample, machine); 1008 case PERF_RECORD_EXIT: 1009 return tool->exit(tool, event, sample, machine); 1010 case PERF_RECORD_LOST: 1011 if (tool->lost == perf_event__process_lost) 1012 session->stats.total_lost += event->lost.lost; 1013 return tool->lost(tool, event, sample, machine); 1014 case PERF_RECORD_READ: 1015 return tool->read(tool, event, sample, evsel, machine); 1016 case PERF_RECORD_THROTTLE: 1017 return tool->throttle(tool, event, sample, machine); 1018 case PERF_RECORD_UNTHROTTLE: 1019 return tool->unthrottle(tool, event, sample, machine); 1020 default: 1021 ++session->stats.nr_unknown_events; 1022 return -1; 1023 } 1024 } 1025 1026 static int perf_session__process_user_event(struct perf_session *session, union perf_event *event, 1027 struct perf_tool *tool, u64 file_offset) 1028 { 1029 int err; 1030 1031 dump_event(session, event, file_offset, NULL); 1032 1033 /* These events are processed right away */ 1034 switch (event->header.type) { 1035 case PERF_RECORD_HEADER_ATTR: 1036 err = tool->attr(tool, event, &session->evlist); 1037 if (err == 0) 1038 perf_session__set_id_hdr_size(session); 1039 return err; 1040 case PERF_RECORD_HEADER_TRACING_DATA: 1041 /* setup for reading amidst mmap */ 1042 lseek(session->fd, file_offset, SEEK_SET); 1043 return tool->tracing_data(tool, event, session); 1044 case PERF_RECORD_HEADER_BUILD_ID: 1045 return tool->build_id(tool, event, session); 1046 case PERF_RECORD_FINISHED_ROUND: 1047 return tool->finished_round(tool, event, session); 1048 default: 1049 return -EINVAL; 1050 } 1051 } 1052 1053 static void event_swap(union perf_event *event, bool sample_id_all) 1054 { 1055 perf_event__swap_op swap; 1056 1057 swap = perf_event__swap_ops[event->header.type]; 1058 if (swap) 1059 swap(event, sample_id_all); 1060 } 1061 1062 static int perf_session__process_event(struct perf_session *session, 1063 union perf_event *event, 1064 struct perf_tool *tool, 1065 u64 file_offset) 1066 { 1067 struct perf_sample sample; 1068 int ret; 1069 1070 if (session->header.needs_swap) 1071 event_swap(event, perf_evlist__sample_id_all(session->evlist)); 1072 1073 if (event->header.type >= PERF_RECORD_HEADER_MAX) 1074 return -EINVAL; 1075 1076 events_stats__inc(&session->stats, event->header.type); 1077 1078 if (event->header.type >= PERF_RECORD_USER_TYPE_START) 1079 return perf_session__process_user_event(session, event, tool, file_offset); 1080 1081 /* 1082 * For all kernel events we get the sample data 1083 */ 1084 ret = perf_evlist__parse_sample(session->evlist, event, &sample); 1085 if (ret) 1086 return ret; 1087 1088 if (tool->ordered_samples) { 1089 ret = perf_session_queue_event(session, event, &sample, 1090 file_offset); 1091 if (ret != -ETIME) 1092 return ret; 1093 } 1094 1095 return perf_session_deliver_event(session, event, &sample, tool, 1096 file_offset); 1097 } 1098 1099 void perf_event_header__bswap(struct perf_event_header *self) 1100 { 1101 self->type = bswap_32(self->type); 1102 self->misc = bswap_16(self->misc); 1103 self->size = bswap_16(self->size); 1104 } 1105 1106 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid) 1107 { 1108 return machine__findnew_thread(&session->machines.host, 0, pid); 1109 } 1110 1111 static struct thread *perf_session__register_idle_thread(struct perf_session *self) 1112 { 1113 struct thread *thread = perf_session__findnew(self, 0); 1114 1115 if (thread == NULL || thread__set_comm(thread, "swapper")) { 1116 pr_err("problem inserting idle task.\n"); 1117 thread = NULL; 1118 } 1119 1120 return thread; 1121 } 1122 1123 static void perf_session__warn_about_errors(const struct perf_session *session, 1124 const struct perf_tool *tool) 1125 { 1126 if (tool->lost == perf_event__process_lost && 1127 session->stats.nr_events[PERF_RECORD_LOST] != 0) { 1128 ui__warning("Processed %d events and lost %d chunks!\n\n" 1129 "Check IO/CPU overload!\n\n", 1130 session->stats.nr_events[0], 1131 session->stats.nr_events[PERF_RECORD_LOST]); 1132 } 1133 1134 if (session->stats.nr_unknown_events != 0) { 1135 ui__warning("Found %u unknown events!\n\n" 1136 "Is this an older tool processing a perf.data " 1137 "file generated by a more recent tool?\n\n" 1138 "If that is not the case, consider " 1139 "reporting to linux-kernel@vger.kernel.org.\n\n", 1140 session->stats.nr_unknown_events); 1141 } 1142 1143 if (session->stats.nr_unknown_id != 0) { 1144 ui__warning("%u samples with id not present in the header\n", 1145 session->stats.nr_unknown_id); 1146 } 1147 1148 if (session->stats.nr_invalid_chains != 0) { 1149 ui__warning("Found invalid callchains!\n\n" 1150 "%u out of %u events were discarded for this reason.\n\n" 1151 "Consider reporting to linux-kernel@vger.kernel.org.\n\n", 1152 session->stats.nr_invalid_chains, 1153 session->stats.nr_events[PERF_RECORD_SAMPLE]); 1154 } 1155 1156 if (session->stats.nr_unprocessable_samples != 0) { 1157 ui__warning("%u unprocessable samples recorded.\n" 1158 "Do you have a KVM guest running and not using 'perf kvm'?\n", 1159 session->stats.nr_unprocessable_samples); 1160 } 1161 } 1162 1163 #define session_done() (*(volatile int *)(&session_done)) 1164 volatile int session_done; 1165 1166 static int __perf_session__process_pipe_events(struct perf_session *self, 1167 struct perf_tool *tool) 1168 { 1169 union perf_event *event; 1170 uint32_t size, cur_size = 0; 1171 void *buf = NULL; 1172 int skip = 0; 1173 u64 head; 1174 int err; 1175 void *p; 1176 1177 perf_tool__fill_defaults(tool); 1178 1179 head = 0; 1180 cur_size = sizeof(union perf_event); 1181 1182 buf = malloc(cur_size); 1183 if (!buf) 1184 return -errno; 1185 more: 1186 event = buf; 1187 err = readn(self->fd, event, sizeof(struct perf_event_header)); 1188 if (err <= 0) { 1189 if (err == 0) 1190 goto done; 1191 1192 pr_err("failed to read event header\n"); 1193 goto out_err; 1194 } 1195 1196 if (self->header.needs_swap) 1197 perf_event_header__bswap(&event->header); 1198 1199 size = event->header.size; 1200 if (size < sizeof(struct perf_event_header)) { 1201 pr_err("bad event header size\n"); 1202 goto out_err; 1203 } 1204 1205 if (size > cur_size) { 1206 void *new = realloc(buf, size); 1207 if (!new) { 1208 pr_err("failed to allocate memory to read event\n"); 1209 goto out_err; 1210 } 1211 buf = new; 1212 cur_size = size; 1213 event = buf; 1214 } 1215 p = event; 1216 p += sizeof(struct perf_event_header); 1217 1218 if (size - sizeof(struct perf_event_header)) { 1219 err = readn(self->fd, p, size - sizeof(struct perf_event_header)); 1220 if (err <= 0) { 1221 if (err == 0) { 1222 pr_err("unexpected end of event stream\n"); 1223 goto done; 1224 } 1225 1226 pr_err("failed to read event data\n"); 1227 goto out_err; 1228 } 1229 } 1230 1231 if ((skip = perf_session__process_event(self, event, tool, head)) < 0) { 1232 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n", 1233 head, event->header.size, event->header.type); 1234 err = -EINVAL; 1235 goto out_err; 1236 } 1237 1238 head += size; 1239 1240 if (skip > 0) 1241 head += skip; 1242 1243 if (!session_done()) 1244 goto more; 1245 done: 1246 err = 0; 1247 out_err: 1248 free(buf); 1249 perf_session__warn_about_errors(self, tool); 1250 perf_session_free_sample_buffers(self); 1251 return err; 1252 } 1253 1254 static union perf_event * 1255 fetch_mmaped_event(struct perf_session *session, 1256 u64 head, size_t mmap_size, char *buf) 1257 { 1258 union perf_event *event; 1259 1260 /* 1261 * Ensure we have enough space remaining to read 1262 * the size of the event in the headers. 1263 */ 1264 if (head + sizeof(event->header) > mmap_size) 1265 return NULL; 1266 1267 event = (union perf_event *)(buf + head); 1268 1269 if (session->header.needs_swap) 1270 perf_event_header__bswap(&event->header); 1271 1272 if (head + event->header.size > mmap_size) { 1273 /* We're not fetching the event so swap back again */ 1274 if (session->header.needs_swap) 1275 perf_event_header__bswap(&event->header); 1276 return NULL; 1277 } 1278 1279 return event; 1280 } 1281 1282 /* 1283 * On 64bit we can mmap the data file in one go. No need for tiny mmap 1284 * slices. On 32bit we use 32MB. 1285 */ 1286 #if BITS_PER_LONG == 64 1287 #define MMAP_SIZE ULLONG_MAX 1288 #define NUM_MMAPS 1 1289 #else 1290 #define MMAP_SIZE (32 * 1024 * 1024ULL) 1291 #define NUM_MMAPS 128 1292 #endif 1293 1294 int __perf_session__process_events(struct perf_session *session, 1295 u64 data_offset, u64 data_size, 1296 u64 file_size, struct perf_tool *tool) 1297 { 1298 u64 head, page_offset, file_offset, file_pos, progress_next; 1299 int err, mmap_prot, mmap_flags, map_idx = 0; 1300 size_t mmap_size; 1301 char *buf, *mmaps[NUM_MMAPS]; 1302 union perf_event *event; 1303 uint32_t size; 1304 1305 perf_tool__fill_defaults(tool); 1306 1307 page_offset = page_size * (data_offset / page_size); 1308 file_offset = page_offset; 1309 head = data_offset - page_offset; 1310 1311 if (data_offset + data_size < file_size) 1312 file_size = data_offset + data_size; 1313 1314 progress_next = file_size / 16; 1315 1316 mmap_size = MMAP_SIZE; 1317 if (mmap_size > file_size) 1318 mmap_size = file_size; 1319 1320 memset(mmaps, 0, sizeof(mmaps)); 1321 1322 mmap_prot = PROT_READ; 1323 mmap_flags = MAP_SHARED; 1324 1325 if (session->header.needs_swap) { 1326 mmap_prot |= PROT_WRITE; 1327 mmap_flags = MAP_PRIVATE; 1328 } 1329 remap: 1330 buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, session->fd, 1331 file_offset); 1332 if (buf == MAP_FAILED) { 1333 pr_err("failed to mmap file\n"); 1334 err = -errno; 1335 goto out_err; 1336 } 1337 mmaps[map_idx] = buf; 1338 map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1); 1339 file_pos = file_offset + head; 1340 1341 more: 1342 event = fetch_mmaped_event(session, head, mmap_size, buf); 1343 if (!event) { 1344 if (mmaps[map_idx]) { 1345 munmap(mmaps[map_idx], mmap_size); 1346 mmaps[map_idx] = NULL; 1347 } 1348 1349 page_offset = page_size * (head / page_size); 1350 file_offset += page_offset; 1351 head -= page_offset; 1352 goto remap; 1353 } 1354 1355 size = event->header.size; 1356 1357 if (size < sizeof(struct perf_event_header) || 1358 perf_session__process_event(session, event, tool, file_pos) < 0) { 1359 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n", 1360 file_offset + head, event->header.size, 1361 event->header.type); 1362 err = -EINVAL; 1363 goto out_err; 1364 } 1365 1366 head += size; 1367 file_pos += size; 1368 1369 if (file_pos >= progress_next) { 1370 progress_next += file_size / 16; 1371 ui_progress__update(file_pos, file_size, 1372 "Processing events..."); 1373 } 1374 1375 if (file_pos < file_size) 1376 goto more; 1377 1378 err = 0; 1379 /* do the final flush for ordered samples */ 1380 session->ordered_samples.next_flush = ULLONG_MAX; 1381 err = flush_sample_queue(session, tool); 1382 out_err: 1383 ui_progress__finish(); 1384 perf_session__warn_about_errors(session, tool); 1385 perf_session_free_sample_buffers(session); 1386 return err; 1387 } 1388 1389 int perf_session__process_events(struct perf_session *self, 1390 struct perf_tool *tool) 1391 { 1392 int err; 1393 1394 if (perf_session__register_idle_thread(self) == NULL) 1395 return -ENOMEM; 1396 1397 if (!self->fd_pipe) 1398 err = __perf_session__process_events(self, 1399 self->header.data_offset, 1400 self->header.data_size, 1401 self->size, tool); 1402 else 1403 err = __perf_session__process_pipe_events(self, tool); 1404 1405 return err; 1406 } 1407 1408 bool perf_session__has_traces(struct perf_session *session, const char *msg) 1409 { 1410 struct perf_evsel *evsel; 1411 1412 list_for_each_entry(evsel, &session->evlist->entries, node) { 1413 if (evsel->attr.type == PERF_TYPE_TRACEPOINT) 1414 return true; 1415 } 1416 1417 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg); 1418 return false; 1419 } 1420 1421 int maps__set_kallsyms_ref_reloc_sym(struct map **maps, 1422 const char *symbol_name, u64 addr) 1423 { 1424 char *bracket; 1425 enum map_type i; 1426 struct ref_reloc_sym *ref; 1427 1428 ref = zalloc(sizeof(struct ref_reloc_sym)); 1429 if (ref == NULL) 1430 return -ENOMEM; 1431 1432 ref->name = strdup(symbol_name); 1433 if (ref->name == NULL) { 1434 free(ref); 1435 return -ENOMEM; 1436 } 1437 1438 bracket = strchr(ref->name, ']'); 1439 if (bracket) 1440 *bracket = '\0'; 1441 1442 ref->addr = addr; 1443 1444 for (i = 0; i < MAP__NR_TYPES; ++i) { 1445 struct kmap *kmap = map__kmap(maps[i]); 1446 kmap->ref_reloc_sym = ref; 1447 } 1448 1449 return 0; 1450 } 1451 1452 size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp) 1453 { 1454 return machines__fprintf_dsos(&self->machines, fp); 1455 } 1456 1457 size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp, 1458 bool (skip)(struct dso *dso, int parm), int parm) 1459 { 1460 return machines__fprintf_dsos_buildid(&self->machines, fp, skip, parm); 1461 } 1462 1463 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp) 1464 { 1465 struct perf_evsel *pos; 1466 size_t ret = fprintf(fp, "Aggregated stats:\n"); 1467 1468 ret += events_stats__fprintf(&session->stats, fp); 1469 1470 list_for_each_entry(pos, &session->evlist->entries, node) { 1471 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos)); 1472 ret += events_stats__fprintf(&pos->hists.stats, fp); 1473 } 1474 1475 return ret; 1476 } 1477 1478 size_t perf_session__fprintf(struct perf_session *session, FILE *fp) 1479 { 1480 /* 1481 * FIXME: Here we have to actually print all the machines in this 1482 * session, not just the host... 1483 */ 1484 return machine__fprintf(&session->machines.host, fp); 1485 } 1486 1487 struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, 1488 unsigned int type) 1489 { 1490 struct perf_evsel *pos; 1491 1492 list_for_each_entry(pos, &session->evlist->entries, node) { 1493 if (pos->attr.type == type) 1494 return pos; 1495 } 1496 return NULL; 1497 } 1498 1499 void perf_evsel__print_ip(struct perf_evsel *evsel, union perf_event *event, 1500 struct perf_sample *sample, struct machine *machine, 1501 unsigned int print_opts, unsigned int stack_depth) 1502 { 1503 struct addr_location al; 1504 struct callchain_cursor_node *node; 1505 int print_ip = print_opts & PRINT_IP_OPT_IP; 1506 int print_sym = print_opts & PRINT_IP_OPT_SYM; 1507 int print_dso = print_opts & PRINT_IP_OPT_DSO; 1508 int print_symoffset = print_opts & PRINT_IP_OPT_SYMOFFSET; 1509 int print_oneline = print_opts & PRINT_IP_OPT_ONELINE; 1510 char s = print_oneline ? ' ' : '\t'; 1511 1512 if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) { 1513 error("problem processing %d event, skipping it.\n", 1514 event->header.type); 1515 return; 1516 } 1517 1518 if (symbol_conf.use_callchain && sample->callchain) { 1519 1520 if (machine__resolve_callchain(machine, evsel, al.thread, 1521 sample, NULL, NULL) != 0) { 1522 if (verbose) 1523 error("Failed to resolve callchain. Skipping\n"); 1524 return; 1525 } 1526 callchain_cursor_commit(&callchain_cursor); 1527 1528 while (stack_depth) { 1529 node = callchain_cursor_current(&callchain_cursor); 1530 if (!node) 1531 break; 1532 1533 if (print_ip) 1534 printf("%c%16" PRIx64, s, node->ip); 1535 1536 if (print_sym) { 1537 printf(" "); 1538 if (print_symoffset) { 1539 al.addr = node->ip; 1540 al.map = node->map; 1541 symbol__fprintf_symname_offs(node->sym, &al, stdout); 1542 } else 1543 symbol__fprintf_symname(node->sym, stdout); 1544 } 1545 1546 if (print_dso) { 1547 printf(" ("); 1548 map__fprintf_dsoname(node->map, stdout); 1549 printf(")"); 1550 } 1551 1552 if (!print_oneline) 1553 printf("\n"); 1554 1555 callchain_cursor_advance(&callchain_cursor); 1556 1557 stack_depth--; 1558 } 1559 1560 } else { 1561 if (print_ip) 1562 printf("%16" PRIx64, sample->ip); 1563 1564 if (print_sym) { 1565 printf(" "); 1566 if (print_symoffset) 1567 symbol__fprintf_symname_offs(al.sym, &al, 1568 stdout); 1569 else 1570 symbol__fprintf_symname(al.sym, stdout); 1571 } 1572 1573 if (print_dso) { 1574 printf(" ("); 1575 map__fprintf_dsoname(al.map, stdout); 1576 printf(")"); 1577 } 1578 } 1579 } 1580 1581 int perf_session__cpu_bitmap(struct perf_session *session, 1582 const char *cpu_list, unsigned long *cpu_bitmap) 1583 { 1584 int i; 1585 struct cpu_map *map; 1586 1587 for (i = 0; i < PERF_TYPE_MAX; ++i) { 1588 struct perf_evsel *evsel; 1589 1590 evsel = perf_session__find_first_evtype(session, i); 1591 if (!evsel) 1592 continue; 1593 1594 if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) { 1595 pr_err("File does not contain CPU events. " 1596 "Remove -c option to proceed.\n"); 1597 return -1; 1598 } 1599 } 1600 1601 map = cpu_map__new(cpu_list); 1602 if (map == NULL) { 1603 pr_err("Invalid cpu_list\n"); 1604 return -1; 1605 } 1606 1607 for (i = 0; i < map->nr; i++) { 1608 int cpu = map->map[i]; 1609 1610 if (cpu >= MAX_NR_CPUS) { 1611 pr_err("Requested CPU %d too large. " 1612 "Consider raising MAX_NR_CPUS\n", cpu); 1613 return -1; 1614 } 1615 1616 set_bit(cpu, cpu_bitmap); 1617 } 1618 1619 return 0; 1620 } 1621 1622 void perf_session__fprintf_info(struct perf_session *session, FILE *fp, 1623 bool full) 1624 { 1625 struct stat st; 1626 int ret; 1627 1628 if (session == NULL || fp == NULL) 1629 return; 1630 1631 ret = fstat(session->fd, &st); 1632 if (ret == -1) 1633 return; 1634 1635 fprintf(fp, "# ========\n"); 1636 fprintf(fp, "# captured on: %s", ctime(&st.st_ctime)); 1637 perf_header__fprintf_info(session, fp, full); 1638 fprintf(fp, "# ========\n#\n"); 1639 } 1640 1641 1642 int __perf_session__set_tracepoints_handlers(struct perf_session *session, 1643 const struct perf_evsel_str_handler *assocs, 1644 size_t nr_assocs) 1645 { 1646 struct perf_evsel *evsel; 1647 size_t i; 1648 int err; 1649 1650 for (i = 0; i < nr_assocs; i++) { 1651 /* 1652 * Adding a handler for an event not in the session, 1653 * just ignore it. 1654 */ 1655 evsel = perf_evlist__find_tracepoint_by_name(session->evlist, assocs[i].name); 1656 if (evsel == NULL) 1657 continue; 1658 1659 err = -EEXIST; 1660 if (evsel->handler.func != NULL) 1661 goto out; 1662 evsel->handler.func = assocs[i].handler; 1663 } 1664 1665 err = 0; 1666 out: 1667 return err; 1668 } 1669