1 #include <linux/kernel.h> 2 #include <traceevent/event-parse.h> 3 4 #include <byteswap.h> 5 #include <unistd.h> 6 #include <sys/types.h> 7 #include <sys/mman.h> 8 9 #include "evlist.h" 10 #include "evsel.h" 11 #include "session.h" 12 #include "tool.h" 13 #include "sort.h" 14 #include "util.h" 15 #include "cpumap.h" 16 #include "perf_regs.h" 17 18 static int perf_session__open(struct perf_session *session) 19 { 20 struct perf_data_file *file = session->file; 21 22 if (perf_session__read_header(session) < 0) { 23 pr_err("incompatible file format (rerun with -v to learn more)"); 24 return -1; 25 } 26 27 if (perf_data_file__is_pipe(file)) 28 return 0; 29 30 if (!perf_evlist__valid_sample_type(session->evlist)) { 31 pr_err("non matching sample_type"); 32 return -1; 33 } 34 35 if (!perf_evlist__valid_sample_id_all(session->evlist)) { 36 pr_err("non matching sample_id_all"); 37 return -1; 38 } 39 40 if (!perf_evlist__valid_read_format(session->evlist)) { 41 pr_err("non matching read_format"); 42 return -1; 43 } 44 45 return 0; 46 } 47 48 void perf_session__set_id_hdr_size(struct perf_session *session) 49 { 50 u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist); 51 52 machines__set_id_hdr_size(&session->machines, id_hdr_size); 53 } 54 55 int perf_session__create_kernel_maps(struct perf_session *session) 56 { 57 int ret = machine__create_kernel_maps(&session->machines.host); 58 59 if (ret >= 0) 60 ret = machines__create_guest_kernel_maps(&session->machines); 61 return ret; 62 } 63 64 static void perf_session__destroy_kernel_maps(struct perf_session *session) 65 { 66 machines__destroy_kernel_maps(&session->machines); 67 } 68 69 struct perf_session *perf_session__new(struct perf_data_file *file, 70 bool repipe, struct perf_tool *tool) 71 { 72 struct perf_session *session = zalloc(sizeof(*session)); 73 74 if (!session) 75 goto out; 76 77 session->repipe = repipe; 78 INIT_LIST_HEAD(&session->ordered_samples.samples); 79 INIT_LIST_HEAD(&session->ordered_samples.sample_cache); 80 INIT_LIST_HEAD(&session->ordered_samples.to_free); 81 machines__init(&session->machines); 82 83 if (file) { 84 if (perf_data_file__open(file)) 85 goto out_delete; 86 87 session->file = file; 88 89 if (perf_data_file__is_read(file)) { 90 if (perf_session__open(session) < 0) 91 goto out_close; 92 93 perf_session__set_id_hdr_size(session); 94 } 95 } 96 97 if (!file || perf_data_file__is_write(file)) { 98 /* 99 * In O_RDONLY mode this will be performed when reading the 100 * kernel MMAP event, in perf_event__process_mmap(). 101 */ 102 if (perf_session__create_kernel_maps(session) < 0) 103 goto out_delete; 104 } 105 106 if (tool && tool->ordering_requires_timestamps && 107 tool->ordered_samples && !perf_evlist__sample_id_all(session->evlist)) { 108 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n"); 109 tool->ordered_samples = false; 110 } 111 112 return session; 113 114 out_close: 115 perf_data_file__close(file); 116 out_delete: 117 perf_session__delete(session); 118 out: 119 return NULL; 120 } 121 122 static void perf_session__delete_dead_threads(struct perf_session *session) 123 { 124 machine__delete_dead_threads(&session->machines.host); 125 } 126 127 static void perf_session__delete_threads(struct perf_session *session) 128 { 129 machine__delete_threads(&session->machines.host); 130 } 131 132 static void perf_session_env__delete(struct perf_session_env *env) 133 { 134 zfree(&env->hostname); 135 zfree(&env->os_release); 136 zfree(&env->version); 137 zfree(&env->arch); 138 zfree(&env->cpu_desc); 139 zfree(&env->cpuid); 140 141 zfree(&env->cmdline); 142 zfree(&env->sibling_cores); 143 zfree(&env->sibling_threads); 144 zfree(&env->numa_nodes); 145 zfree(&env->pmu_mappings); 146 } 147 148 void perf_session__delete(struct perf_session *session) 149 { 150 perf_session__destroy_kernel_maps(session); 151 perf_session__delete_dead_threads(session); 152 perf_session__delete_threads(session); 153 perf_session_env__delete(&session->header.env); 154 machines__exit(&session->machines); 155 if (session->file) 156 perf_data_file__close(session->file); 157 free(session); 158 } 159 160 static int process_event_synth_tracing_data_stub(struct perf_tool *tool 161 __maybe_unused, 162 union perf_event *event 163 __maybe_unused, 164 struct perf_session *session 165 __maybe_unused) 166 { 167 dump_printf(": unhandled!\n"); 168 return 0; 169 } 170 171 static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused, 172 union perf_event *event __maybe_unused, 173 struct perf_evlist **pevlist 174 __maybe_unused) 175 { 176 dump_printf(": unhandled!\n"); 177 return 0; 178 } 179 180 static int process_event_sample_stub(struct perf_tool *tool __maybe_unused, 181 union perf_event *event __maybe_unused, 182 struct perf_sample *sample __maybe_unused, 183 struct perf_evsel *evsel __maybe_unused, 184 struct machine *machine __maybe_unused) 185 { 186 dump_printf(": unhandled!\n"); 187 return 0; 188 } 189 190 static int process_event_stub(struct perf_tool *tool __maybe_unused, 191 union perf_event *event __maybe_unused, 192 struct perf_sample *sample __maybe_unused, 193 struct machine *machine __maybe_unused) 194 { 195 dump_printf(": unhandled!\n"); 196 return 0; 197 } 198 199 static int process_finished_round_stub(struct perf_tool *tool __maybe_unused, 200 union perf_event *event __maybe_unused, 201 struct perf_session *perf_session 202 __maybe_unused) 203 { 204 dump_printf(": unhandled!\n"); 205 return 0; 206 } 207 208 static int process_finished_round(struct perf_tool *tool, 209 union perf_event *event, 210 struct perf_session *session); 211 212 void perf_tool__fill_defaults(struct perf_tool *tool) 213 { 214 if (tool->sample == NULL) 215 tool->sample = process_event_sample_stub; 216 if (tool->mmap == NULL) 217 tool->mmap = process_event_stub; 218 if (tool->mmap2 == NULL) 219 tool->mmap2 = process_event_stub; 220 if (tool->comm == NULL) 221 tool->comm = process_event_stub; 222 if (tool->fork == NULL) 223 tool->fork = process_event_stub; 224 if (tool->exit == NULL) 225 tool->exit = process_event_stub; 226 if (tool->lost == NULL) 227 tool->lost = perf_event__process_lost; 228 if (tool->read == NULL) 229 tool->read = process_event_sample_stub; 230 if (tool->throttle == NULL) 231 tool->throttle = process_event_stub; 232 if (tool->unthrottle == NULL) 233 tool->unthrottle = process_event_stub; 234 if (tool->attr == NULL) 235 tool->attr = process_event_synth_attr_stub; 236 if (tool->tracing_data == NULL) 237 tool->tracing_data = process_event_synth_tracing_data_stub; 238 if (tool->build_id == NULL) 239 tool->build_id = process_finished_round_stub; 240 if (tool->finished_round == NULL) { 241 if (tool->ordered_samples) 242 tool->finished_round = process_finished_round; 243 else 244 tool->finished_round = process_finished_round_stub; 245 } 246 } 247 248 static void swap_sample_id_all(union perf_event *event, void *data) 249 { 250 void *end = (void *) event + event->header.size; 251 int size = end - data; 252 253 BUG_ON(size % sizeof(u64)); 254 mem_bswap_64(data, size); 255 } 256 257 static void perf_event__all64_swap(union perf_event *event, 258 bool sample_id_all __maybe_unused) 259 { 260 struct perf_event_header *hdr = &event->header; 261 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr)); 262 } 263 264 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all) 265 { 266 event->comm.pid = bswap_32(event->comm.pid); 267 event->comm.tid = bswap_32(event->comm.tid); 268 269 if (sample_id_all) { 270 void *data = &event->comm.comm; 271 272 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); 273 swap_sample_id_all(event, data); 274 } 275 } 276 277 static void perf_event__mmap_swap(union perf_event *event, 278 bool sample_id_all) 279 { 280 event->mmap.pid = bswap_32(event->mmap.pid); 281 event->mmap.tid = bswap_32(event->mmap.tid); 282 event->mmap.start = bswap_64(event->mmap.start); 283 event->mmap.len = bswap_64(event->mmap.len); 284 event->mmap.pgoff = bswap_64(event->mmap.pgoff); 285 286 if (sample_id_all) { 287 void *data = &event->mmap.filename; 288 289 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); 290 swap_sample_id_all(event, data); 291 } 292 } 293 294 static void perf_event__mmap2_swap(union perf_event *event, 295 bool sample_id_all) 296 { 297 event->mmap2.pid = bswap_32(event->mmap2.pid); 298 event->mmap2.tid = bswap_32(event->mmap2.tid); 299 event->mmap2.start = bswap_64(event->mmap2.start); 300 event->mmap2.len = bswap_64(event->mmap2.len); 301 event->mmap2.pgoff = bswap_64(event->mmap2.pgoff); 302 event->mmap2.maj = bswap_32(event->mmap2.maj); 303 event->mmap2.min = bswap_32(event->mmap2.min); 304 event->mmap2.ino = bswap_64(event->mmap2.ino); 305 306 if (sample_id_all) { 307 void *data = &event->mmap2.filename; 308 309 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); 310 swap_sample_id_all(event, data); 311 } 312 } 313 static void perf_event__task_swap(union perf_event *event, bool sample_id_all) 314 { 315 event->fork.pid = bswap_32(event->fork.pid); 316 event->fork.tid = bswap_32(event->fork.tid); 317 event->fork.ppid = bswap_32(event->fork.ppid); 318 event->fork.ptid = bswap_32(event->fork.ptid); 319 event->fork.time = bswap_64(event->fork.time); 320 321 if (sample_id_all) 322 swap_sample_id_all(event, &event->fork + 1); 323 } 324 325 static void perf_event__read_swap(union perf_event *event, bool sample_id_all) 326 { 327 event->read.pid = bswap_32(event->read.pid); 328 event->read.tid = bswap_32(event->read.tid); 329 event->read.value = bswap_64(event->read.value); 330 event->read.time_enabled = bswap_64(event->read.time_enabled); 331 event->read.time_running = bswap_64(event->read.time_running); 332 event->read.id = bswap_64(event->read.id); 333 334 if (sample_id_all) 335 swap_sample_id_all(event, &event->read + 1); 336 } 337 338 static void perf_event__throttle_swap(union perf_event *event, 339 bool sample_id_all) 340 { 341 event->throttle.time = bswap_64(event->throttle.time); 342 event->throttle.id = bswap_64(event->throttle.id); 343 event->throttle.stream_id = bswap_64(event->throttle.stream_id); 344 345 if (sample_id_all) 346 swap_sample_id_all(event, &event->throttle + 1); 347 } 348 349 static u8 revbyte(u8 b) 350 { 351 int rev = (b >> 4) | ((b & 0xf) << 4); 352 rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2); 353 rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1); 354 return (u8) rev; 355 } 356 357 /* 358 * XXX this is hack in attempt to carry flags bitfield 359 * throught endian village. ABI says: 360 * 361 * Bit-fields are allocated from right to left (least to most significant) 362 * on little-endian implementations and from left to right (most to least 363 * significant) on big-endian implementations. 364 * 365 * The above seems to be byte specific, so we need to reverse each 366 * byte of the bitfield. 'Internet' also says this might be implementation 367 * specific and we probably need proper fix and carry perf_event_attr 368 * bitfield flags in separate data file FEAT_ section. Thought this seems 369 * to work for now. 370 */ 371 static void swap_bitfield(u8 *p, unsigned len) 372 { 373 unsigned i; 374 375 for (i = 0; i < len; i++) { 376 *p = revbyte(*p); 377 p++; 378 } 379 } 380 381 /* exported for swapping attributes in file header */ 382 void perf_event__attr_swap(struct perf_event_attr *attr) 383 { 384 attr->type = bswap_32(attr->type); 385 attr->size = bswap_32(attr->size); 386 attr->config = bswap_64(attr->config); 387 attr->sample_period = bswap_64(attr->sample_period); 388 attr->sample_type = bswap_64(attr->sample_type); 389 attr->read_format = bswap_64(attr->read_format); 390 attr->wakeup_events = bswap_32(attr->wakeup_events); 391 attr->bp_type = bswap_32(attr->bp_type); 392 attr->bp_addr = bswap_64(attr->bp_addr); 393 attr->bp_len = bswap_64(attr->bp_len); 394 attr->branch_sample_type = bswap_64(attr->branch_sample_type); 395 attr->sample_regs_user = bswap_64(attr->sample_regs_user); 396 attr->sample_stack_user = bswap_32(attr->sample_stack_user); 397 398 swap_bitfield((u8 *) (&attr->read_format + 1), sizeof(u64)); 399 } 400 401 static void perf_event__hdr_attr_swap(union perf_event *event, 402 bool sample_id_all __maybe_unused) 403 { 404 size_t size; 405 406 perf_event__attr_swap(&event->attr.attr); 407 408 size = event->header.size; 409 size -= (void *)&event->attr.id - (void *)event; 410 mem_bswap_64(event->attr.id, size); 411 } 412 413 static void perf_event__event_type_swap(union perf_event *event, 414 bool sample_id_all __maybe_unused) 415 { 416 event->event_type.event_type.event_id = 417 bswap_64(event->event_type.event_type.event_id); 418 } 419 420 static void perf_event__tracing_data_swap(union perf_event *event, 421 bool sample_id_all __maybe_unused) 422 { 423 event->tracing_data.size = bswap_32(event->tracing_data.size); 424 } 425 426 typedef void (*perf_event__swap_op)(union perf_event *event, 427 bool sample_id_all); 428 429 static perf_event__swap_op perf_event__swap_ops[] = { 430 [PERF_RECORD_MMAP] = perf_event__mmap_swap, 431 [PERF_RECORD_MMAP2] = perf_event__mmap2_swap, 432 [PERF_RECORD_COMM] = perf_event__comm_swap, 433 [PERF_RECORD_FORK] = perf_event__task_swap, 434 [PERF_RECORD_EXIT] = perf_event__task_swap, 435 [PERF_RECORD_LOST] = perf_event__all64_swap, 436 [PERF_RECORD_READ] = perf_event__read_swap, 437 [PERF_RECORD_THROTTLE] = perf_event__throttle_swap, 438 [PERF_RECORD_UNTHROTTLE] = perf_event__throttle_swap, 439 [PERF_RECORD_SAMPLE] = perf_event__all64_swap, 440 [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap, 441 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap, 442 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap, 443 [PERF_RECORD_HEADER_BUILD_ID] = NULL, 444 [PERF_RECORD_HEADER_MAX] = NULL, 445 }; 446 447 struct sample_queue { 448 u64 timestamp; 449 u64 file_offset; 450 union perf_event *event; 451 struct list_head list; 452 }; 453 454 static void perf_session_free_sample_buffers(struct perf_session *session) 455 { 456 struct ordered_samples *os = &session->ordered_samples; 457 458 while (!list_empty(&os->to_free)) { 459 struct sample_queue *sq; 460 461 sq = list_entry(os->to_free.next, struct sample_queue, list); 462 list_del(&sq->list); 463 free(sq); 464 } 465 } 466 467 static int perf_session_deliver_event(struct perf_session *session, 468 union perf_event *event, 469 struct perf_sample *sample, 470 struct perf_tool *tool, 471 u64 file_offset); 472 473 static int flush_sample_queue(struct perf_session *s, 474 struct perf_tool *tool) 475 { 476 struct ordered_samples *os = &s->ordered_samples; 477 struct list_head *head = &os->samples; 478 struct sample_queue *tmp, *iter; 479 struct perf_sample sample; 480 u64 limit = os->next_flush; 481 u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL; 482 bool show_progress = limit == ULLONG_MAX; 483 struct ui_progress prog; 484 int ret; 485 486 if (!tool->ordered_samples || !limit) 487 return 0; 488 489 if (show_progress) 490 ui_progress__init(&prog, os->nr_samples, "Processing time ordered events..."); 491 492 list_for_each_entry_safe(iter, tmp, head, list) { 493 if (session_done()) 494 return 0; 495 496 if (iter->timestamp > limit) 497 break; 498 499 ret = perf_evlist__parse_sample(s->evlist, iter->event, &sample); 500 if (ret) 501 pr_err("Can't parse sample, err = %d\n", ret); 502 else { 503 ret = perf_session_deliver_event(s, iter->event, &sample, tool, 504 iter->file_offset); 505 if (ret) 506 return ret; 507 } 508 509 os->last_flush = iter->timestamp; 510 list_del(&iter->list); 511 list_add(&iter->list, &os->sample_cache); 512 os->nr_samples--; 513 514 if (show_progress) 515 ui_progress__update(&prog, 1); 516 } 517 518 if (list_empty(head)) { 519 os->last_sample = NULL; 520 } else if (last_ts <= limit) { 521 os->last_sample = 522 list_entry(head->prev, struct sample_queue, list); 523 } 524 525 return 0; 526 } 527 528 /* 529 * When perf record finishes a pass on every buffers, it records this pseudo 530 * event. 531 * We record the max timestamp t found in the pass n. 532 * Assuming these timestamps are monotonic across cpus, we know that if 533 * a buffer still has events with timestamps below t, they will be all 534 * available and then read in the pass n + 1. 535 * Hence when we start to read the pass n + 2, we can safely flush every 536 * events with timestamps below t. 537 * 538 * ============ PASS n ================= 539 * CPU 0 | CPU 1 540 * | 541 * cnt1 timestamps | cnt2 timestamps 542 * 1 | 2 543 * 2 | 3 544 * - | 4 <--- max recorded 545 * 546 * ============ PASS n + 1 ============== 547 * CPU 0 | CPU 1 548 * | 549 * cnt1 timestamps | cnt2 timestamps 550 * 3 | 5 551 * 4 | 6 552 * 5 | 7 <---- max recorded 553 * 554 * Flush every events below timestamp 4 555 * 556 * ============ PASS n + 2 ============== 557 * CPU 0 | CPU 1 558 * | 559 * cnt1 timestamps | cnt2 timestamps 560 * 6 | 8 561 * 7 | 9 562 * - | 10 563 * 564 * Flush every events below timestamp 7 565 * etc... 566 */ 567 static int process_finished_round(struct perf_tool *tool, 568 union perf_event *event __maybe_unused, 569 struct perf_session *session) 570 { 571 int ret = flush_sample_queue(session, tool); 572 if (!ret) 573 session->ordered_samples.next_flush = session->ordered_samples.max_timestamp; 574 575 return ret; 576 } 577 578 /* The queue is ordered by time */ 579 static void __queue_event(struct sample_queue *new, struct perf_session *s) 580 { 581 struct ordered_samples *os = &s->ordered_samples; 582 struct sample_queue *sample = os->last_sample; 583 u64 timestamp = new->timestamp; 584 struct list_head *p; 585 586 ++os->nr_samples; 587 os->last_sample = new; 588 589 if (!sample) { 590 list_add(&new->list, &os->samples); 591 os->max_timestamp = timestamp; 592 return; 593 } 594 595 /* 596 * last_sample might point to some random place in the list as it's 597 * the last queued event. We expect that the new event is close to 598 * this. 599 */ 600 if (sample->timestamp <= timestamp) { 601 while (sample->timestamp <= timestamp) { 602 p = sample->list.next; 603 if (p == &os->samples) { 604 list_add_tail(&new->list, &os->samples); 605 os->max_timestamp = timestamp; 606 return; 607 } 608 sample = list_entry(p, struct sample_queue, list); 609 } 610 list_add_tail(&new->list, &sample->list); 611 } else { 612 while (sample->timestamp > timestamp) { 613 p = sample->list.prev; 614 if (p == &os->samples) { 615 list_add(&new->list, &os->samples); 616 return; 617 } 618 sample = list_entry(p, struct sample_queue, list); 619 } 620 list_add(&new->list, &sample->list); 621 } 622 } 623 624 #define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct sample_queue)) 625 626 int perf_session_queue_event(struct perf_session *s, union perf_event *event, 627 struct perf_sample *sample, u64 file_offset) 628 { 629 struct ordered_samples *os = &s->ordered_samples; 630 struct list_head *sc = &os->sample_cache; 631 u64 timestamp = sample->time; 632 struct sample_queue *new; 633 634 if (!timestamp || timestamp == ~0ULL) 635 return -ETIME; 636 637 if (timestamp < s->ordered_samples.last_flush) { 638 printf("Warning: Timestamp below last timeslice flush\n"); 639 return -EINVAL; 640 } 641 642 if (!list_empty(sc)) { 643 new = list_entry(sc->next, struct sample_queue, list); 644 list_del(&new->list); 645 } else if (os->sample_buffer) { 646 new = os->sample_buffer + os->sample_buffer_idx; 647 if (++os->sample_buffer_idx == MAX_SAMPLE_BUFFER) 648 os->sample_buffer = NULL; 649 } else { 650 os->sample_buffer = malloc(MAX_SAMPLE_BUFFER * sizeof(*new)); 651 if (!os->sample_buffer) 652 return -ENOMEM; 653 list_add(&os->sample_buffer->list, &os->to_free); 654 os->sample_buffer_idx = 2; 655 new = os->sample_buffer + 1; 656 } 657 658 new->timestamp = timestamp; 659 new->file_offset = file_offset; 660 new->event = event; 661 662 __queue_event(new, s); 663 664 return 0; 665 } 666 667 static void callchain__printf(struct perf_sample *sample) 668 { 669 unsigned int i; 670 671 printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr); 672 673 for (i = 0; i < sample->callchain->nr; i++) 674 printf("..... %2d: %016" PRIx64 "\n", 675 i, sample->callchain->ips[i]); 676 } 677 678 static void branch_stack__printf(struct perf_sample *sample) 679 { 680 uint64_t i; 681 682 printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr); 683 684 for (i = 0; i < sample->branch_stack->nr; i++) 685 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 "\n", 686 i, sample->branch_stack->entries[i].from, 687 sample->branch_stack->entries[i].to); 688 } 689 690 static void regs_dump__printf(u64 mask, u64 *regs) 691 { 692 unsigned rid, i = 0; 693 694 for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) { 695 u64 val = regs[i++]; 696 697 printf(".... %-5s 0x%" PRIx64 "\n", 698 perf_reg_name(rid), val); 699 } 700 } 701 702 static void regs_user__printf(struct perf_sample *sample) 703 { 704 struct regs_dump *user_regs = &sample->user_regs; 705 706 if (user_regs->regs) { 707 u64 mask = user_regs->mask; 708 printf("... user regs: mask 0x%" PRIx64 "\n", mask); 709 regs_dump__printf(mask, user_regs->regs); 710 } 711 } 712 713 static void stack_user__printf(struct stack_dump *dump) 714 { 715 printf("... ustack: size %" PRIu64 ", offset 0x%x\n", 716 dump->size, dump->offset); 717 } 718 719 static void perf_session__print_tstamp(struct perf_session *session, 720 union perf_event *event, 721 struct perf_sample *sample) 722 { 723 u64 sample_type = __perf_evlist__combined_sample_type(session->evlist); 724 725 if (event->header.type != PERF_RECORD_SAMPLE && 726 !perf_evlist__sample_id_all(session->evlist)) { 727 fputs("-1 -1 ", stdout); 728 return; 729 } 730 731 if ((sample_type & PERF_SAMPLE_CPU)) 732 printf("%u ", sample->cpu); 733 734 if (sample_type & PERF_SAMPLE_TIME) 735 printf("%" PRIu64 " ", sample->time); 736 } 737 738 static void sample_read__printf(struct perf_sample *sample, u64 read_format) 739 { 740 printf("... sample_read:\n"); 741 742 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 743 printf("...... time enabled %016" PRIx64 "\n", 744 sample->read.time_enabled); 745 746 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 747 printf("...... time running %016" PRIx64 "\n", 748 sample->read.time_running); 749 750 if (read_format & PERF_FORMAT_GROUP) { 751 u64 i; 752 753 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr); 754 755 for (i = 0; i < sample->read.group.nr; i++) { 756 struct sample_read_value *value; 757 758 value = &sample->read.group.values[i]; 759 printf("..... id %016" PRIx64 760 ", value %016" PRIx64 "\n", 761 value->id, value->value); 762 } 763 } else 764 printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n", 765 sample->read.one.id, sample->read.one.value); 766 } 767 768 static void dump_event(struct perf_session *session, union perf_event *event, 769 u64 file_offset, struct perf_sample *sample) 770 { 771 if (!dump_trace) 772 return; 773 774 printf("\n%#" PRIx64 " [%#x]: event: %d\n", 775 file_offset, event->header.size, event->header.type); 776 777 trace_event(event); 778 779 if (sample) 780 perf_session__print_tstamp(session, event, sample); 781 782 printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset, 783 event->header.size, perf_event__name(event->header.type)); 784 } 785 786 static void dump_sample(struct perf_evsel *evsel, union perf_event *event, 787 struct perf_sample *sample) 788 { 789 u64 sample_type; 790 791 if (!dump_trace) 792 return; 793 794 printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n", 795 event->header.misc, sample->pid, sample->tid, sample->ip, 796 sample->period, sample->addr); 797 798 sample_type = evsel->attr.sample_type; 799 800 if (sample_type & PERF_SAMPLE_CALLCHAIN) 801 callchain__printf(sample); 802 803 if (sample_type & PERF_SAMPLE_BRANCH_STACK) 804 branch_stack__printf(sample); 805 806 if (sample_type & PERF_SAMPLE_REGS_USER) 807 regs_user__printf(sample); 808 809 if (sample_type & PERF_SAMPLE_STACK_USER) 810 stack_user__printf(&sample->user_stack); 811 812 if (sample_type & PERF_SAMPLE_WEIGHT) 813 printf("... weight: %" PRIu64 "\n", sample->weight); 814 815 if (sample_type & PERF_SAMPLE_DATA_SRC) 816 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src); 817 818 if (sample_type & PERF_SAMPLE_TRANSACTION) 819 printf("... transaction: %" PRIx64 "\n", sample->transaction); 820 821 if (sample_type & PERF_SAMPLE_READ) 822 sample_read__printf(sample, evsel->attr.read_format); 823 } 824 825 static struct machine * 826 perf_session__find_machine_for_cpumode(struct perf_session *session, 827 union perf_event *event, 828 struct perf_sample *sample) 829 { 830 const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 831 struct machine *machine; 832 833 if (perf_guest && 834 ((cpumode == PERF_RECORD_MISC_GUEST_KERNEL) || 835 (cpumode == PERF_RECORD_MISC_GUEST_USER))) { 836 u32 pid; 837 838 if (event->header.type == PERF_RECORD_MMAP 839 || event->header.type == PERF_RECORD_MMAP2) 840 pid = event->mmap.pid; 841 else 842 pid = sample->pid; 843 844 machine = perf_session__find_machine(session, pid); 845 if (!machine) 846 machine = perf_session__findnew_machine(session, 847 DEFAULT_GUEST_KERNEL_ID); 848 return machine; 849 } 850 851 return &session->machines.host; 852 } 853 854 static int deliver_sample_value(struct perf_session *session, 855 struct perf_tool *tool, 856 union perf_event *event, 857 struct perf_sample *sample, 858 struct sample_read_value *v, 859 struct machine *machine) 860 { 861 struct perf_sample_id *sid; 862 863 sid = perf_evlist__id2sid(session->evlist, v->id); 864 if (sid) { 865 sample->id = v->id; 866 sample->period = v->value - sid->period; 867 sid->period = v->value; 868 } 869 870 if (!sid || sid->evsel == NULL) { 871 ++session->stats.nr_unknown_id; 872 return 0; 873 } 874 875 return tool->sample(tool, event, sample, sid->evsel, machine); 876 } 877 878 static int deliver_sample_group(struct perf_session *session, 879 struct perf_tool *tool, 880 union perf_event *event, 881 struct perf_sample *sample, 882 struct machine *machine) 883 { 884 int ret = -EINVAL; 885 u64 i; 886 887 for (i = 0; i < sample->read.group.nr; i++) { 888 ret = deliver_sample_value(session, tool, event, sample, 889 &sample->read.group.values[i], 890 machine); 891 if (ret) 892 break; 893 } 894 895 return ret; 896 } 897 898 static int 899 perf_session__deliver_sample(struct perf_session *session, 900 struct perf_tool *tool, 901 union perf_event *event, 902 struct perf_sample *sample, 903 struct perf_evsel *evsel, 904 struct machine *machine) 905 { 906 /* We know evsel != NULL. */ 907 u64 sample_type = evsel->attr.sample_type; 908 u64 read_format = evsel->attr.read_format; 909 910 /* Standard sample delievery. */ 911 if (!(sample_type & PERF_SAMPLE_READ)) 912 return tool->sample(tool, event, sample, evsel, machine); 913 914 /* For PERF_SAMPLE_READ we have either single or group mode. */ 915 if (read_format & PERF_FORMAT_GROUP) 916 return deliver_sample_group(session, tool, event, sample, 917 machine); 918 else 919 return deliver_sample_value(session, tool, event, sample, 920 &sample->read.one, machine); 921 } 922 923 static int perf_session_deliver_event(struct perf_session *session, 924 union perf_event *event, 925 struct perf_sample *sample, 926 struct perf_tool *tool, 927 u64 file_offset) 928 { 929 struct perf_evsel *evsel; 930 struct machine *machine; 931 932 dump_event(session, event, file_offset, sample); 933 934 evsel = perf_evlist__id2evsel(session->evlist, sample->id); 935 if (evsel != NULL && event->header.type != PERF_RECORD_SAMPLE) { 936 /* 937 * XXX We're leaving PERF_RECORD_SAMPLE unnacounted here 938 * because the tools right now may apply filters, discarding 939 * some of the samples. For consistency, in the future we 940 * should have something like nr_filtered_samples and remove 941 * the sample->period from total_sample_period, etc, KISS for 942 * now tho. 943 * 944 * Also testing against NULL allows us to handle files without 945 * attr.sample_id_all and/or without PERF_SAMPLE_ID. In the 946 * future probably it'll be a good idea to restrict event 947 * processing via perf_session to files with both set. 948 */ 949 hists__inc_nr_events(&evsel->hists, event->header.type); 950 } 951 952 machine = perf_session__find_machine_for_cpumode(session, event, 953 sample); 954 955 switch (event->header.type) { 956 case PERF_RECORD_SAMPLE: 957 dump_sample(evsel, event, sample); 958 if (evsel == NULL) { 959 ++session->stats.nr_unknown_id; 960 return 0; 961 } 962 if (machine == NULL) { 963 ++session->stats.nr_unprocessable_samples; 964 return 0; 965 } 966 return perf_session__deliver_sample(session, tool, event, 967 sample, evsel, machine); 968 case PERF_RECORD_MMAP: 969 return tool->mmap(tool, event, sample, machine); 970 case PERF_RECORD_MMAP2: 971 return tool->mmap2(tool, event, sample, machine); 972 case PERF_RECORD_COMM: 973 return tool->comm(tool, event, sample, machine); 974 case PERF_RECORD_FORK: 975 return tool->fork(tool, event, sample, machine); 976 case PERF_RECORD_EXIT: 977 return tool->exit(tool, event, sample, machine); 978 case PERF_RECORD_LOST: 979 if (tool->lost == perf_event__process_lost) 980 session->stats.total_lost += event->lost.lost; 981 return tool->lost(tool, event, sample, machine); 982 case PERF_RECORD_READ: 983 return tool->read(tool, event, sample, evsel, machine); 984 case PERF_RECORD_THROTTLE: 985 return tool->throttle(tool, event, sample, machine); 986 case PERF_RECORD_UNTHROTTLE: 987 return tool->unthrottle(tool, event, sample, machine); 988 default: 989 ++session->stats.nr_unknown_events; 990 return -1; 991 } 992 } 993 994 static s64 perf_session__process_user_event(struct perf_session *session, 995 union perf_event *event, 996 struct perf_tool *tool, 997 u64 file_offset) 998 { 999 int fd = perf_data_file__fd(session->file); 1000 int err; 1001 1002 dump_event(session, event, file_offset, NULL); 1003 1004 /* These events are processed right away */ 1005 switch (event->header.type) { 1006 case PERF_RECORD_HEADER_ATTR: 1007 err = tool->attr(tool, event, &session->evlist); 1008 if (err == 0) 1009 perf_session__set_id_hdr_size(session); 1010 return err; 1011 case PERF_RECORD_HEADER_EVENT_TYPE: 1012 /* 1013 * Depreceated, but we need to handle it for sake 1014 * of old data files create in pipe mode. 1015 */ 1016 return 0; 1017 case PERF_RECORD_HEADER_TRACING_DATA: 1018 /* setup for reading amidst mmap */ 1019 lseek(fd, file_offset, SEEK_SET); 1020 return tool->tracing_data(tool, event, session); 1021 case PERF_RECORD_HEADER_BUILD_ID: 1022 return tool->build_id(tool, event, session); 1023 case PERF_RECORD_FINISHED_ROUND: 1024 return tool->finished_round(tool, event, session); 1025 default: 1026 return -EINVAL; 1027 } 1028 } 1029 1030 static void event_swap(union perf_event *event, bool sample_id_all) 1031 { 1032 perf_event__swap_op swap; 1033 1034 swap = perf_event__swap_ops[event->header.type]; 1035 if (swap) 1036 swap(event, sample_id_all); 1037 } 1038 1039 static s64 perf_session__process_event(struct perf_session *session, 1040 union perf_event *event, 1041 struct perf_tool *tool, 1042 u64 file_offset) 1043 { 1044 struct perf_sample sample; 1045 int ret; 1046 1047 if (session->header.needs_swap) 1048 event_swap(event, perf_evlist__sample_id_all(session->evlist)); 1049 1050 if (event->header.type >= PERF_RECORD_HEADER_MAX) 1051 return -EINVAL; 1052 1053 events_stats__inc(&session->stats, event->header.type); 1054 1055 if (event->header.type >= PERF_RECORD_USER_TYPE_START) 1056 return perf_session__process_user_event(session, event, tool, file_offset); 1057 1058 /* 1059 * For all kernel events we get the sample data 1060 */ 1061 ret = perf_evlist__parse_sample(session->evlist, event, &sample); 1062 if (ret) 1063 return ret; 1064 1065 if (tool->ordered_samples) { 1066 ret = perf_session_queue_event(session, event, &sample, 1067 file_offset); 1068 if (ret != -ETIME) 1069 return ret; 1070 } 1071 1072 return perf_session_deliver_event(session, event, &sample, tool, 1073 file_offset); 1074 } 1075 1076 void perf_event_header__bswap(struct perf_event_header *hdr) 1077 { 1078 hdr->type = bswap_32(hdr->type); 1079 hdr->misc = bswap_16(hdr->misc); 1080 hdr->size = bswap_16(hdr->size); 1081 } 1082 1083 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid) 1084 { 1085 return machine__findnew_thread(&session->machines.host, -1, pid); 1086 } 1087 1088 static struct thread *perf_session__register_idle_thread(struct perf_session *session) 1089 { 1090 struct thread *thread; 1091 1092 thread = machine__findnew_thread(&session->machines.host, 0, 0); 1093 if (thread == NULL || thread__set_comm(thread, "swapper", 0)) { 1094 pr_err("problem inserting idle task.\n"); 1095 thread = NULL; 1096 } 1097 1098 return thread; 1099 } 1100 1101 static void perf_session__warn_about_errors(const struct perf_session *session, 1102 const struct perf_tool *tool) 1103 { 1104 if (tool->lost == perf_event__process_lost && 1105 session->stats.nr_events[PERF_RECORD_LOST] != 0) { 1106 ui__warning("Processed %d events and lost %d chunks!\n\n" 1107 "Check IO/CPU overload!\n\n", 1108 session->stats.nr_events[0], 1109 session->stats.nr_events[PERF_RECORD_LOST]); 1110 } 1111 1112 if (session->stats.nr_unknown_events != 0) { 1113 ui__warning("Found %u unknown events!\n\n" 1114 "Is this an older tool processing a perf.data " 1115 "file generated by a more recent tool?\n\n" 1116 "If that is not the case, consider " 1117 "reporting to linux-kernel@vger.kernel.org.\n\n", 1118 session->stats.nr_unknown_events); 1119 } 1120 1121 if (session->stats.nr_unknown_id != 0) { 1122 ui__warning("%u samples with id not present in the header\n", 1123 session->stats.nr_unknown_id); 1124 } 1125 1126 if (session->stats.nr_invalid_chains != 0) { 1127 ui__warning("Found invalid callchains!\n\n" 1128 "%u out of %u events were discarded for this reason.\n\n" 1129 "Consider reporting to linux-kernel@vger.kernel.org.\n\n", 1130 session->stats.nr_invalid_chains, 1131 session->stats.nr_events[PERF_RECORD_SAMPLE]); 1132 } 1133 1134 if (session->stats.nr_unprocessable_samples != 0) { 1135 ui__warning("%u unprocessable samples recorded.\n" 1136 "Do you have a KVM guest running and not using 'perf kvm'?\n", 1137 session->stats.nr_unprocessable_samples); 1138 } 1139 } 1140 1141 volatile int session_done; 1142 1143 static int __perf_session__process_pipe_events(struct perf_session *session, 1144 struct perf_tool *tool) 1145 { 1146 int fd = perf_data_file__fd(session->file); 1147 union perf_event *event; 1148 uint32_t size, cur_size = 0; 1149 void *buf = NULL; 1150 s64 skip = 0; 1151 u64 head; 1152 ssize_t err; 1153 void *p; 1154 1155 perf_tool__fill_defaults(tool); 1156 1157 head = 0; 1158 cur_size = sizeof(union perf_event); 1159 1160 buf = malloc(cur_size); 1161 if (!buf) 1162 return -errno; 1163 more: 1164 event = buf; 1165 err = readn(fd, event, sizeof(struct perf_event_header)); 1166 if (err <= 0) { 1167 if (err == 0) 1168 goto done; 1169 1170 pr_err("failed to read event header\n"); 1171 goto out_err; 1172 } 1173 1174 if (session->header.needs_swap) 1175 perf_event_header__bswap(&event->header); 1176 1177 size = event->header.size; 1178 if (size < sizeof(struct perf_event_header)) { 1179 pr_err("bad event header size\n"); 1180 goto out_err; 1181 } 1182 1183 if (size > cur_size) { 1184 void *new = realloc(buf, size); 1185 if (!new) { 1186 pr_err("failed to allocate memory to read event\n"); 1187 goto out_err; 1188 } 1189 buf = new; 1190 cur_size = size; 1191 event = buf; 1192 } 1193 p = event; 1194 p += sizeof(struct perf_event_header); 1195 1196 if (size - sizeof(struct perf_event_header)) { 1197 err = readn(fd, p, size - sizeof(struct perf_event_header)); 1198 if (err <= 0) { 1199 if (err == 0) { 1200 pr_err("unexpected end of event stream\n"); 1201 goto done; 1202 } 1203 1204 pr_err("failed to read event data\n"); 1205 goto out_err; 1206 } 1207 } 1208 1209 if ((skip = perf_session__process_event(session, event, tool, head)) < 0) { 1210 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n", 1211 head, event->header.size, event->header.type); 1212 err = -EINVAL; 1213 goto out_err; 1214 } 1215 1216 head += size; 1217 1218 if (skip > 0) 1219 head += skip; 1220 1221 if (!session_done()) 1222 goto more; 1223 done: 1224 /* do the final flush for ordered samples */ 1225 session->ordered_samples.next_flush = ULLONG_MAX; 1226 err = flush_sample_queue(session, tool); 1227 out_err: 1228 free(buf); 1229 perf_session__warn_about_errors(session, tool); 1230 perf_session_free_sample_buffers(session); 1231 return err; 1232 } 1233 1234 static union perf_event * 1235 fetch_mmaped_event(struct perf_session *session, 1236 u64 head, size_t mmap_size, char *buf) 1237 { 1238 union perf_event *event; 1239 1240 /* 1241 * Ensure we have enough space remaining to read 1242 * the size of the event in the headers. 1243 */ 1244 if (head + sizeof(event->header) > mmap_size) 1245 return NULL; 1246 1247 event = (union perf_event *)(buf + head); 1248 1249 if (session->header.needs_swap) 1250 perf_event_header__bswap(&event->header); 1251 1252 if (head + event->header.size > mmap_size) { 1253 /* We're not fetching the event so swap back again */ 1254 if (session->header.needs_swap) 1255 perf_event_header__bswap(&event->header); 1256 return NULL; 1257 } 1258 1259 return event; 1260 } 1261 1262 /* 1263 * On 64bit we can mmap the data file in one go. No need for tiny mmap 1264 * slices. On 32bit we use 32MB. 1265 */ 1266 #if BITS_PER_LONG == 64 1267 #define MMAP_SIZE ULLONG_MAX 1268 #define NUM_MMAPS 1 1269 #else 1270 #define MMAP_SIZE (32 * 1024 * 1024ULL) 1271 #define NUM_MMAPS 128 1272 #endif 1273 1274 int __perf_session__process_events(struct perf_session *session, 1275 u64 data_offset, u64 data_size, 1276 u64 file_size, struct perf_tool *tool) 1277 { 1278 int fd = perf_data_file__fd(session->file); 1279 u64 head, page_offset, file_offset, file_pos, size; 1280 int err, mmap_prot, mmap_flags, map_idx = 0; 1281 size_t mmap_size; 1282 char *buf, *mmaps[NUM_MMAPS]; 1283 union perf_event *event; 1284 struct ui_progress prog; 1285 s64 skip; 1286 1287 perf_tool__fill_defaults(tool); 1288 1289 page_offset = page_size * (data_offset / page_size); 1290 file_offset = page_offset; 1291 head = data_offset - page_offset; 1292 1293 if (data_size && (data_offset + data_size < file_size)) 1294 file_size = data_offset + data_size; 1295 1296 ui_progress__init(&prog, file_size, "Processing events..."); 1297 1298 mmap_size = MMAP_SIZE; 1299 if (mmap_size > file_size) { 1300 mmap_size = file_size; 1301 session->one_mmap = true; 1302 } 1303 1304 memset(mmaps, 0, sizeof(mmaps)); 1305 1306 mmap_prot = PROT_READ; 1307 mmap_flags = MAP_SHARED; 1308 1309 if (session->header.needs_swap) { 1310 mmap_prot |= PROT_WRITE; 1311 mmap_flags = MAP_PRIVATE; 1312 } 1313 remap: 1314 buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, fd, 1315 file_offset); 1316 if (buf == MAP_FAILED) { 1317 pr_err("failed to mmap file\n"); 1318 err = -errno; 1319 goto out_err; 1320 } 1321 mmaps[map_idx] = buf; 1322 map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1); 1323 file_pos = file_offset + head; 1324 if (session->one_mmap) { 1325 session->one_mmap_addr = buf; 1326 session->one_mmap_offset = file_offset; 1327 } 1328 1329 more: 1330 event = fetch_mmaped_event(session, head, mmap_size, buf); 1331 if (!event) { 1332 if (mmaps[map_idx]) { 1333 munmap(mmaps[map_idx], mmap_size); 1334 mmaps[map_idx] = NULL; 1335 } 1336 1337 page_offset = page_size * (head / page_size); 1338 file_offset += page_offset; 1339 head -= page_offset; 1340 goto remap; 1341 } 1342 1343 size = event->header.size; 1344 1345 if (size < sizeof(struct perf_event_header) || 1346 (skip = perf_session__process_event(session, event, tool, file_pos)) 1347 < 0) { 1348 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n", 1349 file_offset + head, event->header.size, 1350 event->header.type); 1351 err = -EINVAL; 1352 goto out_err; 1353 } 1354 1355 if (skip) 1356 size += skip; 1357 1358 head += size; 1359 file_pos += size; 1360 1361 ui_progress__update(&prog, size); 1362 1363 if (session_done()) 1364 goto out; 1365 1366 if (file_pos < file_size) 1367 goto more; 1368 1369 out: 1370 /* do the final flush for ordered samples */ 1371 session->ordered_samples.next_flush = ULLONG_MAX; 1372 err = flush_sample_queue(session, tool); 1373 out_err: 1374 ui_progress__finish(); 1375 perf_session__warn_about_errors(session, tool); 1376 perf_session_free_sample_buffers(session); 1377 session->one_mmap = false; 1378 return err; 1379 } 1380 1381 int perf_session__process_events(struct perf_session *session, 1382 struct perf_tool *tool) 1383 { 1384 u64 size = perf_data_file__size(session->file); 1385 int err; 1386 1387 if (perf_session__register_idle_thread(session) == NULL) 1388 return -ENOMEM; 1389 1390 if (!perf_data_file__is_pipe(session->file)) 1391 err = __perf_session__process_events(session, 1392 session->header.data_offset, 1393 session->header.data_size, 1394 size, tool); 1395 else 1396 err = __perf_session__process_pipe_events(session, tool); 1397 1398 return err; 1399 } 1400 1401 bool perf_session__has_traces(struct perf_session *session, const char *msg) 1402 { 1403 struct perf_evsel *evsel; 1404 1405 evlist__for_each(session->evlist, evsel) { 1406 if (evsel->attr.type == PERF_TYPE_TRACEPOINT) 1407 return true; 1408 } 1409 1410 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg); 1411 return false; 1412 } 1413 1414 int maps__set_kallsyms_ref_reloc_sym(struct map **maps, 1415 const char *symbol_name, u64 addr) 1416 { 1417 char *bracket; 1418 enum map_type i; 1419 struct ref_reloc_sym *ref; 1420 1421 ref = zalloc(sizeof(struct ref_reloc_sym)); 1422 if (ref == NULL) 1423 return -ENOMEM; 1424 1425 ref->name = strdup(symbol_name); 1426 if (ref->name == NULL) { 1427 free(ref); 1428 return -ENOMEM; 1429 } 1430 1431 bracket = strchr(ref->name, ']'); 1432 if (bracket) 1433 *bracket = '\0'; 1434 1435 ref->addr = addr; 1436 1437 for (i = 0; i < MAP__NR_TYPES; ++i) { 1438 struct kmap *kmap = map__kmap(maps[i]); 1439 kmap->ref_reloc_sym = ref; 1440 } 1441 1442 return 0; 1443 } 1444 1445 size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp) 1446 { 1447 return machines__fprintf_dsos(&session->machines, fp); 1448 } 1449 1450 size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp, 1451 bool (skip)(struct dso *dso, int parm), int parm) 1452 { 1453 return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm); 1454 } 1455 1456 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp) 1457 { 1458 struct perf_evsel *pos; 1459 size_t ret = fprintf(fp, "Aggregated stats:\n"); 1460 1461 ret += events_stats__fprintf(&session->stats, fp); 1462 1463 evlist__for_each(session->evlist, pos) { 1464 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos)); 1465 ret += events_stats__fprintf(&pos->hists.stats, fp); 1466 } 1467 1468 return ret; 1469 } 1470 1471 size_t perf_session__fprintf(struct perf_session *session, FILE *fp) 1472 { 1473 /* 1474 * FIXME: Here we have to actually print all the machines in this 1475 * session, not just the host... 1476 */ 1477 return machine__fprintf(&session->machines.host, fp); 1478 } 1479 1480 struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, 1481 unsigned int type) 1482 { 1483 struct perf_evsel *pos; 1484 1485 evlist__for_each(session->evlist, pos) { 1486 if (pos->attr.type == type) 1487 return pos; 1488 } 1489 return NULL; 1490 } 1491 1492 void perf_evsel__print_ip(struct perf_evsel *evsel, struct perf_sample *sample, 1493 struct addr_location *al, 1494 unsigned int print_opts, unsigned int stack_depth) 1495 { 1496 struct callchain_cursor_node *node; 1497 int print_ip = print_opts & PRINT_IP_OPT_IP; 1498 int print_sym = print_opts & PRINT_IP_OPT_SYM; 1499 int print_dso = print_opts & PRINT_IP_OPT_DSO; 1500 int print_symoffset = print_opts & PRINT_IP_OPT_SYMOFFSET; 1501 int print_oneline = print_opts & PRINT_IP_OPT_ONELINE; 1502 int print_srcline = print_opts & PRINT_IP_OPT_SRCLINE; 1503 char s = print_oneline ? ' ' : '\t'; 1504 1505 if (symbol_conf.use_callchain && sample->callchain) { 1506 struct addr_location node_al; 1507 1508 if (machine__resolve_callchain(al->machine, evsel, al->thread, 1509 sample, NULL, NULL, 1510 PERF_MAX_STACK_DEPTH) != 0) { 1511 if (verbose) 1512 error("Failed to resolve callchain. Skipping\n"); 1513 return; 1514 } 1515 callchain_cursor_commit(&callchain_cursor); 1516 1517 if (print_symoffset) 1518 node_al = *al; 1519 1520 while (stack_depth) { 1521 u64 addr = 0; 1522 1523 node = callchain_cursor_current(&callchain_cursor); 1524 if (!node) 1525 break; 1526 1527 if (node->sym && node->sym->ignore) 1528 goto next; 1529 1530 if (print_ip) 1531 printf("%c%16" PRIx64, s, node->ip); 1532 1533 if (node->map) 1534 addr = node->map->map_ip(node->map, node->ip); 1535 1536 if (print_sym) { 1537 printf(" "); 1538 if (print_symoffset) { 1539 node_al.addr = addr; 1540 node_al.map = node->map; 1541 symbol__fprintf_symname_offs(node->sym, &node_al, stdout); 1542 } else 1543 symbol__fprintf_symname(node->sym, stdout); 1544 } 1545 1546 if (print_dso) { 1547 printf(" ("); 1548 map__fprintf_dsoname(node->map, stdout); 1549 printf(")"); 1550 } 1551 1552 if (print_srcline) 1553 map__fprintf_srcline(node->map, addr, "\n ", 1554 stdout); 1555 1556 if (!print_oneline) 1557 printf("\n"); 1558 1559 stack_depth--; 1560 next: 1561 callchain_cursor_advance(&callchain_cursor); 1562 } 1563 1564 } else { 1565 if (al->sym && al->sym->ignore) 1566 return; 1567 1568 if (print_ip) 1569 printf("%16" PRIx64, sample->ip); 1570 1571 if (print_sym) { 1572 printf(" "); 1573 if (print_symoffset) 1574 symbol__fprintf_symname_offs(al->sym, al, 1575 stdout); 1576 else 1577 symbol__fprintf_symname(al->sym, stdout); 1578 } 1579 1580 if (print_dso) { 1581 printf(" ("); 1582 map__fprintf_dsoname(al->map, stdout); 1583 printf(")"); 1584 } 1585 1586 if (print_srcline) 1587 map__fprintf_srcline(al->map, al->addr, "\n ", stdout); 1588 } 1589 } 1590 1591 int perf_session__cpu_bitmap(struct perf_session *session, 1592 const char *cpu_list, unsigned long *cpu_bitmap) 1593 { 1594 int i, err = -1; 1595 struct cpu_map *map; 1596 1597 for (i = 0; i < PERF_TYPE_MAX; ++i) { 1598 struct perf_evsel *evsel; 1599 1600 evsel = perf_session__find_first_evtype(session, i); 1601 if (!evsel) 1602 continue; 1603 1604 if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) { 1605 pr_err("File does not contain CPU events. " 1606 "Remove -c option to proceed.\n"); 1607 return -1; 1608 } 1609 } 1610 1611 map = cpu_map__new(cpu_list); 1612 if (map == NULL) { 1613 pr_err("Invalid cpu_list\n"); 1614 return -1; 1615 } 1616 1617 for (i = 0; i < map->nr; i++) { 1618 int cpu = map->map[i]; 1619 1620 if (cpu >= MAX_NR_CPUS) { 1621 pr_err("Requested CPU %d too large. " 1622 "Consider raising MAX_NR_CPUS\n", cpu); 1623 goto out_delete_map; 1624 } 1625 1626 set_bit(cpu, cpu_bitmap); 1627 } 1628 1629 err = 0; 1630 1631 out_delete_map: 1632 cpu_map__delete(map); 1633 return err; 1634 } 1635 1636 void perf_session__fprintf_info(struct perf_session *session, FILE *fp, 1637 bool full) 1638 { 1639 struct stat st; 1640 int fd, ret; 1641 1642 if (session == NULL || fp == NULL) 1643 return; 1644 1645 fd = perf_data_file__fd(session->file); 1646 1647 ret = fstat(fd, &st); 1648 if (ret == -1) 1649 return; 1650 1651 fprintf(fp, "# ========\n"); 1652 fprintf(fp, "# captured on: %s", ctime(&st.st_ctime)); 1653 perf_header__fprintf_info(session, fp, full); 1654 fprintf(fp, "# ========\n#\n"); 1655 } 1656 1657 1658 int __perf_session__set_tracepoints_handlers(struct perf_session *session, 1659 const struct perf_evsel_str_handler *assocs, 1660 size_t nr_assocs) 1661 { 1662 struct perf_evsel *evsel; 1663 size_t i; 1664 int err; 1665 1666 for (i = 0; i < nr_assocs; i++) { 1667 /* 1668 * Adding a handler for an event not in the session, 1669 * just ignore it. 1670 */ 1671 evsel = perf_evlist__find_tracepoint_by_name(session->evlist, assocs[i].name); 1672 if (evsel == NULL) 1673 continue; 1674 1675 err = -EEXIST; 1676 if (evsel->handler != NULL) 1677 goto out; 1678 evsel->handler = assocs[i].handler; 1679 } 1680 1681 err = 0; 1682 out: 1683 return err; 1684 } 1685