1 #include <linux/kernel.h> 2 #include <traceevent/event-parse.h> 3 4 #include <byteswap.h> 5 #include <unistd.h> 6 #include <sys/types.h> 7 #include <sys/mman.h> 8 9 #include "evlist.h" 10 #include "evsel.h" 11 #include "session.h" 12 #include "tool.h" 13 #include "sort.h" 14 #include "util.h" 15 #include "cpumap.h" 16 #include "perf_regs.h" 17 #include "vdso.h" 18 19 static int perf_session__open(struct perf_session *session) 20 { 21 struct perf_data_file *file = session->file; 22 23 if (perf_session__read_header(session) < 0) { 24 pr_err("incompatible file format (rerun with -v to learn more)"); 25 return -1; 26 } 27 28 if (perf_data_file__is_pipe(file)) 29 return 0; 30 31 if (!perf_evlist__valid_sample_type(session->evlist)) { 32 pr_err("non matching sample_type"); 33 return -1; 34 } 35 36 if (!perf_evlist__valid_sample_id_all(session->evlist)) { 37 pr_err("non matching sample_id_all"); 38 return -1; 39 } 40 41 if (!perf_evlist__valid_read_format(session->evlist)) { 42 pr_err("non matching read_format"); 43 return -1; 44 } 45 46 return 0; 47 } 48 49 void perf_session__set_id_hdr_size(struct perf_session *session) 50 { 51 u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist); 52 53 machines__set_id_hdr_size(&session->machines, id_hdr_size); 54 } 55 56 int perf_session__create_kernel_maps(struct perf_session *session) 57 { 58 int ret = machine__create_kernel_maps(&session->machines.host); 59 60 if (ret >= 0) 61 ret = machines__create_guest_kernel_maps(&session->machines); 62 return ret; 63 } 64 65 static void perf_session__destroy_kernel_maps(struct perf_session *session) 66 { 67 machines__destroy_kernel_maps(&session->machines); 68 } 69 70 struct perf_session *perf_session__new(struct perf_data_file *file, 71 bool repipe, struct perf_tool *tool) 72 { 73 struct perf_session *session = zalloc(sizeof(*session)); 74 75 if (!session) 76 goto out; 77 78 session->repipe = repipe; 79 INIT_LIST_HEAD(&session->ordered_samples.samples); 80 INIT_LIST_HEAD(&session->ordered_samples.sample_cache); 81 INIT_LIST_HEAD(&session->ordered_samples.to_free); 82 machines__init(&session->machines); 83 84 if (file) { 85 if (perf_data_file__open(file)) 86 goto out_delete; 87 88 session->file = file; 89 90 if (perf_data_file__is_read(file)) { 91 if (perf_session__open(session) < 0) 92 goto out_close; 93 94 perf_session__set_id_hdr_size(session); 95 } 96 } 97 98 if (!file || perf_data_file__is_write(file)) { 99 /* 100 * In O_RDONLY mode this will be performed when reading the 101 * kernel MMAP event, in perf_event__process_mmap(). 102 */ 103 if (perf_session__create_kernel_maps(session) < 0) 104 goto out_delete; 105 } 106 107 if (tool && tool->ordering_requires_timestamps && 108 tool->ordered_samples && !perf_evlist__sample_id_all(session->evlist)) { 109 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n"); 110 tool->ordered_samples = false; 111 } 112 113 return session; 114 115 out_close: 116 perf_data_file__close(file); 117 out_delete: 118 perf_session__delete(session); 119 out: 120 return NULL; 121 } 122 123 static void perf_session__delete_dead_threads(struct perf_session *session) 124 { 125 machine__delete_dead_threads(&session->machines.host); 126 } 127 128 static void perf_session__delete_threads(struct perf_session *session) 129 { 130 machine__delete_threads(&session->machines.host); 131 } 132 133 static void perf_session_env__delete(struct perf_session_env *env) 134 { 135 zfree(&env->hostname); 136 zfree(&env->os_release); 137 zfree(&env->version); 138 zfree(&env->arch); 139 zfree(&env->cpu_desc); 140 zfree(&env->cpuid); 141 142 zfree(&env->cmdline); 143 zfree(&env->sibling_cores); 144 zfree(&env->sibling_threads); 145 zfree(&env->numa_nodes); 146 zfree(&env->pmu_mappings); 147 } 148 149 void perf_session__delete(struct perf_session *session) 150 { 151 perf_session__destroy_kernel_maps(session); 152 perf_session__delete_dead_threads(session); 153 perf_session__delete_threads(session); 154 perf_session_env__delete(&session->header.env); 155 machines__exit(&session->machines); 156 if (session->file) 157 perf_data_file__close(session->file); 158 free(session); 159 vdso__exit(); 160 } 161 162 static int process_event_synth_tracing_data_stub(struct perf_tool *tool 163 __maybe_unused, 164 union perf_event *event 165 __maybe_unused, 166 struct perf_session *session 167 __maybe_unused) 168 { 169 dump_printf(": unhandled!\n"); 170 return 0; 171 } 172 173 static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused, 174 union perf_event *event __maybe_unused, 175 struct perf_evlist **pevlist 176 __maybe_unused) 177 { 178 dump_printf(": unhandled!\n"); 179 return 0; 180 } 181 182 static int process_event_sample_stub(struct perf_tool *tool __maybe_unused, 183 union perf_event *event __maybe_unused, 184 struct perf_sample *sample __maybe_unused, 185 struct perf_evsel *evsel __maybe_unused, 186 struct machine *machine __maybe_unused) 187 { 188 dump_printf(": unhandled!\n"); 189 return 0; 190 } 191 192 static int process_event_stub(struct perf_tool *tool __maybe_unused, 193 union perf_event *event __maybe_unused, 194 struct perf_sample *sample __maybe_unused, 195 struct machine *machine __maybe_unused) 196 { 197 dump_printf(": unhandled!\n"); 198 return 0; 199 } 200 201 static int process_finished_round_stub(struct perf_tool *tool __maybe_unused, 202 union perf_event *event __maybe_unused, 203 struct perf_session *perf_session 204 __maybe_unused) 205 { 206 dump_printf(": unhandled!\n"); 207 return 0; 208 } 209 210 static int process_finished_round(struct perf_tool *tool, 211 union perf_event *event, 212 struct perf_session *session); 213 214 void perf_tool__fill_defaults(struct perf_tool *tool) 215 { 216 if (tool->sample == NULL) 217 tool->sample = process_event_sample_stub; 218 if (tool->mmap == NULL) 219 tool->mmap = process_event_stub; 220 if (tool->mmap2 == NULL) 221 tool->mmap2 = process_event_stub; 222 if (tool->comm == NULL) 223 tool->comm = process_event_stub; 224 if (tool->fork == NULL) 225 tool->fork = process_event_stub; 226 if (tool->exit == NULL) 227 tool->exit = process_event_stub; 228 if (tool->lost == NULL) 229 tool->lost = perf_event__process_lost; 230 if (tool->read == NULL) 231 tool->read = process_event_sample_stub; 232 if (tool->throttle == NULL) 233 tool->throttle = process_event_stub; 234 if (tool->unthrottle == NULL) 235 tool->unthrottle = process_event_stub; 236 if (tool->attr == NULL) 237 tool->attr = process_event_synth_attr_stub; 238 if (tool->tracing_data == NULL) 239 tool->tracing_data = process_event_synth_tracing_data_stub; 240 if (tool->build_id == NULL) 241 tool->build_id = process_finished_round_stub; 242 if (tool->finished_round == NULL) { 243 if (tool->ordered_samples) 244 tool->finished_round = process_finished_round; 245 else 246 tool->finished_round = process_finished_round_stub; 247 } 248 } 249 250 static void swap_sample_id_all(union perf_event *event, void *data) 251 { 252 void *end = (void *) event + event->header.size; 253 int size = end - data; 254 255 BUG_ON(size % sizeof(u64)); 256 mem_bswap_64(data, size); 257 } 258 259 static void perf_event__all64_swap(union perf_event *event, 260 bool sample_id_all __maybe_unused) 261 { 262 struct perf_event_header *hdr = &event->header; 263 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr)); 264 } 265 266 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all) 267 { 268 event->comm.pid = bswap_32(event->comm.pid); 269 event->comm.tid = bswap_32(event->comm.tid); 270 271 if (sample_id_all) { 272 void *data = &event->comm.comm; 273 274 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); 275 swap_sample_id_all(event, data); 276 } 277 } 278 279 static void perf_event__mmap_swap(union perf_event *event, 280 bool sample_id_all) 281 { 282 event->mmap.pid = bswap_32(event->mmap.pid); 283 event->mmap.tid = bswap_32(event->mmap.tid); 284 event->mmap.start = bswap_64(event->mmap.start); 285 event->mmap.len = bswap_64(event->mmap.len); 286 event->mmap.pgoff = bswap_64(event->mmap.pgoff); 287 288 if (sample_id_all) { 289 void *data = &event->mmap.filename; 290 291 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); 292 swap_sample_id_all(event, data); 293 } 294 } 295 296 static void perf_event__mmap2_swap(union perf_event *event, 297 bool sample_id_all) 298 { 299 event->mmap2.pid = bswap_32(event->mmap2.pid); 300 event->mmap2.tid = bswap_32(event->mmap2.tid); 301 event->mmap2.start = bswap_64(event->mmap2.start); 302 event->mmap2.len = bswap_64(event->mmap2.len); 303 event->mmap2.pgoff = bswap_64(event->mmap2.pgoff); 304 event->mmap2.maj = bswap_32(event->mmap2.maj); 305 event->mmap2.min = bswap_32(event->mmap2.min); 306 event->mmap2.ino = bswap_64(event->mmap2.ino); 307 308 if (sample_id_all) { 309 void *data = &event->mmap2.filename; 310 311 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); 312 swap_sample_id_all(event, data); 313 } 314 } 315 static void perf_event__task_swap(union perf_event *event, bool sample_id_all) 316 { 317 event->fork.pid = bswap_32(event->fork.pid); 318 event->fork.tid = bswap_32(event->fork.tid); 319 event->fork.ppid = bswap_32(event->fork.ppid); 320 event->fork.ptid = bswap_32(event->fork.ptid); 321 event->fork.time = bswap_64(event->fork.time); 322 323 if (sample_id_all) 324 swap_sample_id_all(event, &event->fork + 1); 325 } 326 327 static void perf_event__read_swap(union perf_event *event, bool sample_id_all) 328 { 329 event->read.pid = bswap_32(event->read.pid); 330 event->read.tid = bswap_32(event->read.tid); 331 event->read.value = bswap_64(event->read.value); 332 event->read.time_enabled = bswap_64(event->read.time_enabled); 333 event->read.time_running = bswap_64(event->read.time_running); 334 event->read.id = bswap_64(event->read.id); 335 336 if (sample_id_all) 337 swap_sample_id_all(event, &event->read + 1); 338 } 339 340 static void perf_event__throttle_swap(union perf_event *event, 341 bool sample_id_all) 342 { 343 event->throttle.time = bswap_64(event->throttle.time); 344 event->throttle.id = bswap_64(event->throttle.id); 345 event->throttle.stream_id = bswap_64(event->throttle.stream_id); 346 347 if (sample_id_all) 348 swap_sample_id_all(event, &event->throttle + 1); 349 } 350 351 static u8 revbyte(u8 b) 352 { 353 int rev = (b >> 4) | ((b & 0xf) << 4); 354 rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2); 355 rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1); 356 return (u8) rev; 357 } 358 359 /* 360 * XXX this is hack in attempt to carry flags bitfield 361 * throught endian village. ABI says: 362 * 363 * Bit-fields are allocated from right to left (least to most significant) 364 * on little-endian implementations and from left to right (most to least 365 * significant) on big-endian implementations. 366 * 367 * The above seems to be byte specific, so we need to reverse each 368 * byte of the bitfield. 'Internet' also says this might be implementation 369 * specific and we probably need proper fix and carry perf_event_attr 370 * bitfield flags in separate data file FEAT_ section. Thought this seems 371 * to work for now. 372 */ 373 static void swap_bitfield(u8 *p, unsigned len) 374 { 375 unsigned i; 376 377 for (i = 0; i < len; i++) { 378 *p = revbyte(*p); 379 p++; 380 } 381 } 382 383 /* exported for swapping attributes in file header */ 384 void perf_event__attr_swap(struct perf_event_attr *attr) 385 { 386 attr->type = bswap_32(attr->type); 387 attr->size = bswap_32(attr->size); 388 attr->config = bswap_64(attr->config); 389 attr->sample_period = bswap_64(attr->sample_period); 390 attr->sample_type = bswap_64(attr->sample_type); 391 attr->read_format = bswap_64(attr->read_format); 392 attr->wakeup_events = bswap_32(attr->wakeup_events); 393 attr->bp_type = bswap_32(attr->bp_type); 394 attr->bp_addr = bswap_64(attr->bp_addr); 395 attr->bp_len = bswap_64(attr->bp_len); 396 attr->branch_sample_type = bswap_64(attr->branch_sample_type); 397 attr->sample_regs_user = bswap_64(attr->sample_regs_user); 398 attr->sample_stack_user = bswap_32(attr->sample_stack_user); 399 400 swap_bitfield((u8 *) (&attr->read_format + 1), sizeof(u64)); 401 } 402 403 static void perf_event__hdr_attr_swap(union perf_event *event, 404 bool sample_id_all __maybe_unused) 405 { 406 size_t size; 407 408 perf_event__attr_swap(&event->attr.attr); 409 410 size = event->header.size; 411 size -= (void *)&event->attr.id - (void *)event; 412 mem_bswap_64(event->attr.id, size); 413 } 414 415 static void perf_event__event_type_swap(union perf_event *event, 416 bool sample_id_all __maybe_unused) 417 { 418 event->event_type.event_type.event_id = 419 bswap_64(event->event_type.event_type.event_id); 420 } 421 422 static void perf_event__tracing_data_swap(union perf_event *event, 423 bool sample_id_all __maybe_unused) 424 { 425 event->tracing_data.size = bswap_32(event->tracing_data.size); 426 } 427 428 typedef void (*perf_event__swap_op)(union perf_event *event, 429 bool sample_id_all); 430 431 static perf_event__swap_op perf_event__swap_ops[] = { 432 [PERF_RECORD_MMAP] = perf_event__mmap_swap, 433 [PERF_RECORD_MMAP2] = perf_event__mmap2_swap, 434 [PERF_RECORD_COMM] = perf_event__comm_swap, 435 [PERF_RECORD_FORK] = perf_event__task_swap, 436 [PERF_RECORD_EXIT] = perf_event__task_swap, 437 [PERF_RECORD_LOST] = perf_event__all64_swap, 438 [PERF_RECORD_READ] = perf_event__read_swap, 439 [PERF_RECORD_THROTTLE] = perf_event__throttle_swap, 440 [PERF_RECORD_UNTHROTTLE] = perf_event__throttle_swap, 441 [PERF_RECORD_SAMPLE] = perf_event__all64_swap, 442 [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap, 443 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap, 444 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap, 445 [PERF_RECORD_HEADER_BUILD_ID] = NULL, 446 [PERF_RECORD_HEADER_MAX] = NULL, 447 }; 448 449 struct sample_queue { 450 u64 timestamp; 451 u64 file_offset; 452 union perf_event *event; 453 struct list_head list; 454 }; 455 456 static void perf_session_free_sample_buffers(struct perf_session *session) 457 { 458 struct ordered_samples *os = &session->ordered_samples; 459 460 while (!list_empty(&os->to_free)) { 461 struct sample_queue *sq; 462 463 sq = list_entry(os->to_free.next, struct sample_queue, list); 464 list_del(&sq->list); 465 free(sq); 466 } 467 } 468 469 static int perf_session_deliver_event(struct perf_session *session, 470 union perf_event *event, 471 struct perf_sample *sample, 472 struct perf_tool *tool, 473 u64 file_offset); 474 475 static int flush_sample_queue(struct perf_session *s, 476 struct perf_tool *tool) 477 { 478 struct ordered_samples *os = &s->ordered_samples; 479 struct list_head *head = &os->samples; 480 struct sample_queue *tmp, *iter; 481 struct perf_sample sample; 482 u64 limit = os->next_flush; 483 u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL; 484 bool show_progress = limit == ULLONG_MAX; 485 struct ui_progress prog; 486 int ret; 487 488 if (!tool->ordered_samples || !limit) 489 return 0; 490 491 if (show_progress) 492 ui_progress__init(&prog, os->nr_samples, "Processing time ordered events..."); 493 494 list_for_each_entry_safe(iter, tmp, head, list) { 495 if (session_done()) 496 return 0; 497 498 if (iter->timestamp > limit) 499 break; 500 501 ret = perf_evlist__parse_sample(s->evlist, iter->event, &sample); 502 if (ret) 503 pr_err("Can't parse sample, err = %d\n", ret); 504 else { 505 ret = perf_session_deliver_event(s, iter->event, &sample, tool, 506 iter->file_offset); 507 if (ret) 508 return ret; 509 } 510 511 os->last_flush = iter->timestamp; 512 list_del(&iter->list); 513 list_add(&iter->list, &os->sample_cache); 514 515 if (show_progress) 516 ui_progress__update(&prog, 1); 517 } 518 519 if (list_empty(head)) { 520 os->last_sample = NULL; 521 } else if (last_ts <= limit) { 522 os->last_sample = 523 list_entry(head->prev, struct sample_queue, list); 524 } 525 526 os->nr_samples = 0; 527 528 return 0; 529 } 530 531 /* 532 * When perf record finishes a pass on every buffers, it records this pseudo 533 * event. 534 * We record the max timestamp t found in the pass n. 535 * Assuming these timestamps are monotonic across cpus, we know that if 536 * a buffer still has events with timestamps below t, they will be all 537 * available and then read in the pass n + 1. 538 * Hence when we start to read the pass n + 2, we can safely flush every 539 * events with timestamps below t. 540 * 541 * ============ PASS n ================= 542 * CPU 0 | CPU 1 543 * | 544 * cnt1 timestamps | cnt2 timestamps 545 * 1 | 2 546 * 2 | 3 547 * - | 4 <--- max recorded 548 * 549 * ============ PASS n + 1 ============== 550 * CPU 0 | CPU 1 551 * | 552 * cnt1 timestamps | cnt2 timestamps 553 * 3 | 5 554 * 4 | 6 555 * 5 | 7 <---- max recorded 556 * 557 * Flush every events below timestamp 4 558 * 559 * ============ PASS n + 2 ============== 560 * CPU 0 | CPU 1 561 * | 562 * cnt1 timestamps | cnt2 timestamps 563 * 6 | 8 564 * 7 | 9 565 * - | 10 566 * 567 * Flush every events below timestamp 7 568 * etc... 569 */ 570 static int process_finished_round(struct perf_tool *tool, 571 union perf_event *event __maybe_unused, 572 struct perf_session *session) 573 { 574 int ret = flush_sample_queue(session, tool); 575 if (!ret) 576 session->ordered_samples.next_flush = session->ordered_samples.max_timestamp; 577 578 return ret; 579 } 580 581 /* The queue is ordered by time */ 582 static void __queue_event(struct sample_queue *new, struct perf_session *s) 583 { 584 struct ordered_samples *os = &s->ordered_samples; 585 struct sample_queue *sample = os->last_sample; 586 u64 timestamp = new->timestamp; 587 struct list_head *p; 588 589 ++os->nr_samples; 590 os->last_sample = new; 591 592 if (!sample) { 593 list_add(&new->list, &os->samples); 594 os->max_timestamp = timestamp; 595 return; 596 } 597 598 /* 599 * last_sample might point to some random place in the list as it's 600 * the last queued event. We expect that the new event is close to 601 * this. 602 */ 603 if (sample->timestamp <= timestamp) { 604 while (sample->timestamp <= timestamp) { 605 p = sample->list.next; 606 if (p == &os->samples) { 607 list_add_tail(&new->list, &os->samples); 608 os->max_timestamp = timestamp; 609 return; 610 } 611 sample = list_entry(p, struct sample_queue, list); 612 } 613 list_add_tail(&new->list, &sample->list); 614 } else { 615 while (sample->timestamp > timestamp) { 616 p = sample->list.prev; 617 if (p == &os->samples) { 618 list_add(&new->list, &os->samples); 619 return; 620 } 621 sample = list_entry(p, struct sample_queue, list); 622 } 623 list_add(&new->list, &sample->list); 624 } 625 } 626 627 #define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct sample_queue)) 628 629 int perf_session_queue_event(struct perf_session *s, union perf_event *event, 630 struct perf_sample *sample, u64 file_offset) 631 { 632 struct ordered_samples *os = &s->ordered_samples; 633 struct list_head *sc = &os->sample_cache; 634 u64 timestamp = sample->time; 635 struct sample_queue *new; 636 637 if (!timestamp || timestamp == ~0ULL) 638 return -ETIME; 639 640 if (timestamp < s->ordered_samples.last_flush) { 641 printf("Warning: Timestamp below last timeslice flush\n"); 642 return -EINVAL; 643 } 644 645 if (!list_empty(sc)) { 646 new = list_entry(sc->next, struct sample_queue, list); 647 list_del(&new->list); 648 } else if (os->sample_buffer) { 649 new = os->sample_buffer + os->sample_buffer_idx; 650 if (++os->sample_buffer_idx == MAX_SAMPLE_BUFFER) 651 os->sample_buffer = NULL; 652 } else { 653 os->sample_buffer = malloc(MAX_SAMPLE_BUFFER * sizeof(*new)); 654 if (!os->sample_buffer) 655 return -ENOMEM; 656 list_add(&os->sample_buffer->list, &os->to_free); 657 os->sample_buffer_idx = 2; 658 new = os->sample_buffer + 1; 659 } 660 661 new->timestamp = timestamp; 662 new->file_offset = file_offset; 663 new->event = event; 664 665 __queue_event(new, s); 666 667 return 0; 668 } 669 670 static void callchain__printf(struct perf_sample *sample) 671 { 672 unsigned int i; 673 674 printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr); 675 676 for (i = 0; i < sample->callchain->nr; i++) 677 printf("..... %2d: %016" PRIx64 "\n", 678 i, sample->callchain->ips[i]); 679 } 680 681 static void branch_stack__printf(struct perf_sample *sample) 682 { 683 uint64_t i; 684 685 printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr); 686 687 for (i = 0; i < sample->branch_stack->nr; i++) 688 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 "\n", 689 i, sample->branch_stack->entries[i].from, 690 sample->branch_stack->entries[i].to); 691 } 692 693 static void regs_dump__printf(u64 mask, u64 *regs) 694 { 695 unsigned rid, i = 0; 696 697 for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) { 698 u64 val = regs[i++]; 699 700 printf(".... %-5s 0x%" PRIx64 "\n", 701 perf_reg_name(rid), val); 702 } 703 } 704 705 static void regs_user__printf(struct perf_sample *sample) 706 { 707 struct regs_dump *user_regs = &sample->user_regs; 708 709 if (user_regs->regs) { 710 u64 mask = user_regs->mask; 711 printf("... user regs: mask 0x%" PRIx64 "\n", mask); 712 regs_dump__printf(mask, user_regs->regs); 713 } 714 } 715 716 static void stack_user__printf(struct stack_dump *dump) 717 { 718 printf("... ustack: size %" PRIu64 ", offset 0x%x\n", 719 dump->size, dump->offset); 720 } 721 722 static void perf_session__print_tstamp(struct perf_session *session, 723 union perf_event *event, 724 struct perf_sample *sample) 725 { 726 u64 sample_type = __perf_evlist__combined_sample_type(session->evlist); 727 728 if (event->header.type != PERF_RECORD_SAMPLE && 729 !perf_evlist__sample_id_all(session->evlist)) { 730 fputs("-1 -1 ", stdout); 731 return; 732 } 733 734 if ((sample_type & PERF_SAMPLE_CPU)) 735 printf("%u ", sample->cpu); 736 737 if (sample_type & PERF_SAMPLE_TIME) 738 printf("%" PRIu64 " ", sample->time); 739 } 740 741 static void sample_read__printf(struct perf_sample *sample, u64 read_format) 742 { 743 printf("... sample_read:\n"); 744 745 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 746 printf("...... time enabled %016" PRIx64 "\n", 747 sample->read.time_enabled); 748 749 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 750 printf("...... time running %016" PRIx64 "\n", 751 sample->read.time_running); 752 753 if (read_format & PERF_FORMAT_GROUP) { 754 u64 i; 755 756 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr); 757 758 for (i = 0; i < sample->read.group.nr; i++) { 759 struct sample_read_value *value; 760 761 value = &sample->read.group.values[i]; 762 printf("..... id %016" PRIx64 763 ", value %016" PRIx64 "\n", 764 value->id, value->value); 765 } 766 } else 767 printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n", 768 sample->read.one.id, sample->read.one.value); 769 } 770 771 static void dump_event(struct perf_session *session, union perf_event *event, 772 u64 file_offset, struct perf_sample *sample) 773 { 774 if (!dump_trace) 775 return; 776 777 printf("\n%#" PRIx64 " [%#x]: event: %d\n", 778 file_offset, event->header.size, event->header.type); 779 780 trace_event(event); 781 782 if (sample) 783 perf_session__print_tstamp(session, event, sample); 784 785 printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset, 786 event->header.size, perf_event__name(event->header.type)); 787 } 788 789 static void dump_sample(struct perf_evsel *evsel, union perf_event *event, 790 struct perf_sample *sample) 791 { 792 u64 sample_type; 793 794 if (!dump_trace) 795 return; 796 797 printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n", 798 event->header.misc, sample->pid, sample->tid, sample->ip, 799 sample->period, sample->addr); 800 801 sample_type = evsel->attr.sample_type; 802 803 if (sample_type & PERF_SAMPLE_CALLCHAIN) 804 callchain__printf(sample); 805 806 if (sample_type & PERF_SAMPLE_BRANCH_STACK) 807 branch_stack__printf(sample); 808 809 if (sample_type & PERF_SAMPLE_REGS_USER) 810 regs_user__printf(sample); 811 812 if (sample_type & PERF_SAMPLE_STACK_USER) 813 stack_user__printf(&sample->user_stack); 814 815 if (sample_type & PERF_SAMPLE_WEIGHT) 816 printf("... weight: %" PRIu64 "\n", sample->weight); 817 818 if (sample_type & PERF_SAMPLE_DATA_SRC) 819 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src); 820 821 if (sample_type & PERF_SAMPLE_TRANSACTION) 822 printf("... transaction: %" PRIx64 "\n", sample->transaction); 823 824 if (sample_type & PERF_SAMPLE_READ) 825 sample_read__printf(sample, evsel->attr.read_format); 826 } 827 828 static struct machine * 829 perf_session__find_machine_for_cpumode(struct perf_session *session, 830 union perf_event *event, 831 struct perf_sample *sample) 832 { 833 const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 834 struct machine *machine; 835 836 if (perf_guest && 837 ((cpumode == PERF_RECORD_MISC_GUEST_KERNEL) || 838 (cpumode == PERF_RECORD_MISC_GUEST_USER))) { 839 u32 pid; 840 841 if (event->header.type == PERF_RECORD_MMAP 842 || event->header.type == PERF_RECORD_MMAP2) 843 pid = event->mmap.pid; 844 else 845 pid = sample->pid; 846 847 machine = perf_session__find_machine(session, pid); 848 if (!machine) 849 machine = perf_session__findnew_machine(session, 850 DEFAULT_GUEST_KERNEL_ID); 851 return machine; 852 } 853 854 return &session->machines.host; 855 } 856 857 static int deliver_sample_value(struct perf_session *session, 858 struct perf_tool *tool, 859 union perf_event *event, 860 struct perf_sample *sample, 861 struct sample_read_value *v, 862 struct machine *machine) 863 { 864 struct perf_sample_id *sid; 865 866 sid = perf_evlist__id2sid(session->evlist, v->id); 867 if (sid) { 868 sample->id = v->id; 869 sample->period = v->value - sid->period; 870 sid->period = v->value; 871 } 872 873 if (!sid || sid->evsel == NULL) { 874 ++session->stats.nr_unknown_id; 875 return 0; 876 } 877 878 return tool->sample(tool, event, sample, sid->evsel, machine); 879 } 880 881 static int deliver_sample_group(struct perf_session *session, 882 struct perf_tool *tool, 883 union perf_event *event, 884 struct perf_sample *sample, 885 struct machine *machine) 886 { 887 int ret = -EINVAL; 888 u64 i; 889 890 for (i = 0; i < sample->read.group.nr; i++) { 891 ret = deliver_sample_value(session, tool, event, sample, 892 &sample->read.group.values[i], 893 machine); 894 if (ret) 895 break; 896 } 897 898 return ret; 899 } 900 901 static int 902 perf_session__deliver_sample(struct perf_session *session, 903 struct perf_tool *tool, 904 union perf_event *event, 905 struct perf_sample *sample, 906 struct perf_evsel *evsel, 907 struct machine *machine) 908 { 909 /* We know evsel != NULL. */ 910 u64 sample_type = evsel->attr.sample_type; 911 u64 read_format = evsel->attr.read_format; 912 913 /* Standard sample delievery. */ 914 if (!(sample_type & PERF_SAMPLE_READ)) 915 return tool->sample(tool, event, sample, evsel, machine); 916 917 /* For PERF_SAMPLE_READ we have either single or group mode. */ 918 if (read_format & PERF_FORMAT_GROUP) 919 return deliver_sample_group(session, tool, event, sample, 920 machine); 921 else 922 return deliver_sample_value(session, tool, event, sample, 923 &sample->read.one, machine); 924 } 925 926 static int perf_session_deliver_event(struct perf_session *session, 927 union perf_event *event, 928 struct perf_sample *sample, 929 struct perf_tool *tool, 930 u64 file_offset) 931 { 932 struct perf_evsel *evsel; 933 struct machine *machine; 934 935 dump_event(session, event, file_offset, sample); 936 937 evsel = perf_evlist__id2evsel(session->evlist, sample->id); 938 if (evsel != NULL && event->header.type != PERF_RECORD_SAMPLE) { 939 /* 940 * XXX We're leaving PERF_RECORD_SAMPLE unnacounted here 941 * because the tools right now may apply filters, discarding 942 * some of the samples. For consistency, in the future we 943 * should have something like nr_filtered_samples and remove 944 * the sample->period from total_sample_period, etc, KISS for 945 * now tho. 946 * 947 * Also testing against NULL allows us to handle files without 948 * attr.sample_id_all and/or without PERF_SAMPLE_ID. In the 949 * future probably it'll be a good idea to restrict event 950 * processing via perf_session to files with both set. 951 */ 952 hists__inc_nr_events(&evsel->hists, event->header.type); 953 } 954 955 machine = perf_session__find_machine_for_cpumode(session, event, 956 sample); 957 958 switch (event->header.type) { 959 case PERF_RECORD_SAMPLE: 960 dump_sample(evsel, event, sample); 961 if (evsel == NULL) { 962 ++session->stats.nr_unknown_id; 963 return 0; 964 } 965 if (machine == NULL) { 966 ++session->stats.nr_unprocessable_samples; 967 return 0; 968 } 969 return perf_session__deliver_sample(session, tool, event, 970 sample, evsel, machine); 971 case PERF_RECORD_MMAP: 972 return tool->mmap(tool, event, sample, machine); 973 case PERF_RECORD_MMAP2: 974 return tool->mmap2(tool, event, sample, machine); 975 case PERF_RECORD_COMM: 976 return tool->comm(tool, event, sample, machine); 977 case PERF_RECORD_FORK: 978 return tool->fork(tool, event, sample, machine); 979 case PERF_RECORD_EXIT: 980 return tool->exit(tool, event, sample, machine); 981 case PERF_RECORD_LOST: 982 if (tool->lost == perf_event__process_lost) 983 session->stats.total_lost += event->lost.lost; 984 return tool->lost(tool, event, sample, machine); 985 case PERF_RECORD_READ: 986 return tool->read(tool, event, sample, evsel, machine); 987 case PERF_RECORD_THROTTLE: 988 return tool->throttle(tool, event, sample, machine); 989 case PERF_RECORD_UNTHROTTLE: 990 return tool->unthrottle(tool, event, sample, machine); 991 default: 992 ++session->stats.nr_unknown_events; 993 return -1; 994 } 995 } 996 997 static int perf_session__process_user_event(struct perf_session *session, union perf_event *event, 998 struct perf_tool *tool, u64 file_offset) 999 { 1000 int fd = perf_data_file__fd(session->file); 1001 int err; 1002 1003 dump_event(session, event, file_offset, NULL); 1004 1005 /* These events are processed right away */ 1006 switch (event->header.type) { 1007 case PERF_RECORD_HEADER_ATTR: 1008 err = tool->attr(tool, event, &session->evlist); 1009 if (err == 0) 1010 perf_session__set_id_hdr_size(session); 1011 return err; 1012 case PERF_RECORD_HEADER_EVENT_TYPE: 1013 /* 1014 * Depreceated, but we need to handle it for sake 1015 * of old data files create in pipe mode. 1016 */ 1017 return 0; 1018 case PERF_RECORD_HEADER_TRACING_DATA: 1019 /* setup for reading amidst mmap */ 1020 lseek(fd, file_offset, SEEK_SET); 1021 return tool->tracing_data(tool, event, session); 1022 case PERF_RECORD_HEADER_BUILD_ID: 1023 return tool->build_id(tool, event, session); 1024 case PERF_RECORD_FINISHED_ROUND: 1025 return tool->finished_round(tool, event, session); 1026 default: 1027 return -EINVAL; 1028 } 1029 } 1030 1031 static void event_swap(union perf_event *event, bool sample_id_all) 1032 { 1033 perf_event__swap_op swap; 1034 1035 swap = perf_event__swap_ops[event->header.type]; 1036 if (swap) 1037 swap(event, sample_id_all); 1038 } 1039 1040 static int perf_session__process_event(struct perf_session *session, 1041 union perf_event *event, 1042 struct perf_tool *tool, 1043 u64 file_offset) 1044 { 1045 struct perf_sample sample; 1046 int ret; 1047 1048 if (session->header.needs_swap) 1049 event_swap(event, perf_evlist__sample_id_all(session->evlist)); 1050 1051 if (event->header.type >= PERF_RECORD_HEADER_MAX) 1052 return -EINVAL; 1053 1054 events_stats__inc(&session->stats, event->header.type); 1055 1056 if (event->header.type >= PERF_RECORD_USER_TYPE_START) 1057 return perf_session__process_user_event(session, event, tool, file_offset); 1058 1059 /* 1060 * For all kernel events we get the sample data 1061 */ 1062 ret = perf_evlist__parse_sample(session->evlist, event, &sample); 1063 if (ret) 1064 return ret; 1065 1066 if (tool->ordered_samples) { 1067 ret = perf_session_queue_event(session, event, &sample, 1068 file_offset); 1069 if (ret != -ETIME) 1070 return ret; 1071 } 1072 1073 return perf_session_deliver_event(session, event, &sample, tool, 1074 file_offset); 1075 } 1076 1077 void perf_event_header__bswap(struct perf_event_header *hdr) 1078 { 1079 hdr->type = bswap_32(hdr->type); 1080 hdr->misc = bswap_16(hdr->misc); 1081 hdr->size = bswap_16(hdr->size); 1082 } 1083 1084 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid) 1085 { 1086 return machine__findnew_thread(&session->machines.host, -1, pid); 1087 } 1088 1089 static struct thread *perf_session__register_idle_thread(struct perf_session *session) 1090 { 1091 struct thread *thread; 1092 1093 thread = machine__findnew_thread(&session->machines.host, 0, 0); 1094 if (thread == NULL || thread__set_comm(thread, "swapper", 0)) { 1095 pr_err("problem inserting idle task.\n"); 1096 thread = NULL; 1097 } 1098 1099 return thread; 1100 } 1101 1102 static void perf_session__warn_about_errors(const struct perf_session *session, 1103 const struct perf_tool *tool) 1104 { 1105 if (tool->lost == perf_event__process_lost && 1106 session->stats.nr_events[PERF_RECORD_LOST] != 0) { 1107 ui__warning("Processed %d events and lost %d chunks!\n\n" 1108 "Check IO/CPU overload!\n\n", 1109 session->stats.nr_events[0], 1110 session->stats.nr_events[PERF_RECORD_LOST]); 1111 } 1112 1113 if (session->stats.nr_unknown_events != 0) { 1114 ui__warning("Found %u unknown events!\n\n" 1115 "Is this an older tool processing a perf.data " 1116 "file generated by a more recent tool?\n\n" 1117 "If that is not the case, consider " 1118 "reporting to linux-kernel@vger.kernel.org.\n\n", 1119 session->stats.nr_unknown_events); 1120 } 1121 1122 if (session->stats.nr_unknown_id != 0) { 1123 ui__warning("%u samples with id not present in the header\n", 1124 session->stats.nr_unknown_id); 1125 } 1126 1127 if (session->stats.nr_invalid_chains != 0) { 1128 ui__warning("Found invalid callchains!\n\n" 1129 "%u out of %u events were discarded for this reason.\n\n" 1130 "Consider reporting to linux-kernel@vger.kernel.org.\n\n", 1131 session->stats.nr_invalid_chains, 1132 session->stats.nr_events[PERF_RECORD_SAMPLE]); 1133 } 1134 1135 if (session->stats.nr_unprocessable_samples != 0) { 1136 ui__warning("%u unprocessable samples recorded.\n" 1137 "Do you have a KVM guest running and not using 'perf kvm'?\n", 1138 session->stats.nr_unprocessable_samples); 1139 } 1140 } 1141 1142 volatile int session_done; 1143 1144 static int __perf_session__process_pipe_events(struct perf_session *session, 1145 struct perf_tool *tool) 1146 { 1147 int fd = perf_data_file__fd(session->file); 1148 union perf_event *event; 1149 uint32_t size, cur_size = 0; 1150 void *buf = NULL; 1151 int skip = 0; 1152 u64 head; 1153 ssize_t err; 1154 void *p; 1155 1156 perf_tool__fill_defaults(tool); 1157 1158 head = 0; 1159 cur_size = sizeof(union perf_event); 1160 1161 buf = malloc(cur_size); 1162 if (!buf) 1163 return -errno; 1164 more: 1165 event = buf; 1166 err = readn(fd, event, sizeof(struct perf_event_header)); 1167 if (err <= 0) { 1168 if (err == 0) 1169 goto done; 1170 1171 pr_err("failed to read event header\n"); 1172 goto out_err; 1173 } 1174 1175 if (session->header.needs_swap) 1176 perf_event_header__bswap(&event->header); 1177 1178 size = event->header.size; 1179 if (size < sizeof(struct perf_event_header)) { 1180 pr_err("bad event header size\n"); 1181 goto out_err; 1182 } 1183 1184 if (size > cur_size) { 1185 void *new = realloc(buf, size); 1186 if (!new) { 1187 pr_err("failed to allocate memory to read event\n"); 1188 goto out_err; 1189 } 1190 buf = new; 1191 cur_size = size; 1192 event = buf; 1193 } 1194 p = event; 1195 p += sizeof(struct perf_event_header); 1196 1197 if (size - sizeof(struct perf_event_header)) { 1198 err = readn(fd, p, size - sizeof(struct perf_event_header)); 1199 if (err <= 0) { 1200 if (err == 0) { 1201 pr_err("unexpected end of event stream\n"); 1202 goto done; 1203 } 1204 1205 pr_err("failed to read event data\n"); 1206 goto out_err; 1207 } 1208 } 1209 1210 if ((skip = perf_session__process_event(session, event, tool, head)) < 0) { 1211 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n", 1212 head, event->header.size, event->header.type); 1213 err = -EINVAL; 1214 goto out_err; 1215 } 1216 1217 head += size; 1218 1219 if (skip > 0) 1220 head += skip; 1221 1222 if (!session_done()) 1223 goto more; 1224 done: 1225 /* do the final flush for ordered samples */ 1226 session->ordered_samples.next_flush = ULLONG_MAX; 1227 err = flush_sample_queue(session, tool); 1228 out_err: 1229 free(buf); 1230 perf_session__warn_about_errors(session, tool); 1231 perf_session_free_sample_buffers(session); 1232 return err; 1233 } 1234 1235 static union perf_event * 1236 fetch_mmaped_event(struct perf_session *session, 1237 u64 head, size_t mmap_size, char *buf) 1238 { 1239 union perf_event *event; 1240 1241 /* 1242 * Ensure we have enough space remaining to read 1243 * the size of the event in the headers. 1244 */ 1245 if (head + sizeof(event->header) > mmap_size) 1246 return NULL; 1247 1248 event = (union perf_event *)(buf + head); 1249 1250 if (session->header.needs_swap) 1251 perf_event_header__bswap(&event->header); 1252 1253 if (head + event->header.size > mmap_size) { 1254 /* We're not fetching the event so swap back again */ 1255 if (session->header.needs_swap) 1256 perf_event_header__bswap(&event->header); 1257 return NULL; 1258 } 1259 1260 return event; 1261 } 1262 1263 /* 1264 * On 64bit we can mmap the data file in one go. No need for tiny mmap 1265 * slices. On 32bit we use 32MB. 1266 */ 1267 #if BITS_PER_LONG == 64 1268 #define MMAP_SIZE ULLONG_MAX 1269 #define NUM_MMAPS 1 1270 #else 1271 #define MMAP_SIZE (32 * 1024 * 1024ULL) 1272 #define NUM_MMAPS 128 1273 #endif 1274 1275 int __perf_session__process_events(struct perf_session *session, 1276 u64 data_offset, u64 data_size, 1277 u64 file_size, struct perf_tool *tool) 1278 { 1279 int fd = perf_data_file__fd(session->file); 1280 u64 head, page_offset, file_offset, file_pos; 1281 int err, mmap_prot, mmap_flags, map_idx = 0; 1282 size_t mmap_size; 1283 char *buf, *mmaps[NUM_MMAPS]; 1284 union perf_event *event; 1285 uint32_t size; 1286 struct ui_progress prog; 1287 1288 perf_tool__fill_defaults(tool); 1289 1290 page_offset = page_size * (data_offset / page_size); 1291 file_offset = page_offset; 1292 head = data_offset - page_offset; 1293 1294 if (data_size && (data_offset + data_size < file_size)) 1295 file_size = data_offset + data_size; 1296 1297 ui_progress__init(&prog, file_size, "Processing events..."); 1298 1299 mmap_size = MMAP_SIZE; 1300 if (mmap_size > file_size) { 1301 mmap_size = file_size; 1302 session->one_mmap = true; 1303 } 1304 1305 memset(mmaps, 0, sizeof(mmaps)); 1306 1307 mmap_prot = PROT_READ; 1308 mmap_flags = MAP_SHARED; 1309 1310 if (session->header.needs_swap) { 1311 mmap_prot |= PROT_WRITE; 1312 mmap_flags = MAP_PRIVATE; 1313 } 1314 remap: 1315 buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, fd, 1316 file_offset); 1317 if (buf == MAP_FAILED) { 1318 pr_err("failed to mmap file\n"); 1319 err = -errno; 1320 goto out_err; 1321 } 1322 mmaps[map_idx] = buf; 1323 map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1); 1324 file_pos = file_offset + head; 1325 if (session->one_mmap) { 1326 session->one_mmap_addr = buf; 1327 session->one_mmap_offset = file_offset; 1328 } 1329 1330 more: 1331 event = fetch_mmaped_event(session, head, mmap_size, buf); 1332 if (!event) { 1333 if (mmaps[map_idx]) { 1334 munmap(mmaps[map_idx], mmap_size); 1335 mmaps[map_idx] = NULL; 1336 } 1337 1338 page_offset = page_size * (head / page_size); 1339 file_offset += page_offset; 1340 head -= page_offset; 1341 goto remap; 1342 } 1343 1344 size = event->header.size; 1345 1346 if (size < sizeof(struct perf_event_header) || 1347 perf_session__process_event(session, event, tool, file_pos) < 0) { 1348 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n", 1349 file_offset + head, event->header.size, 1350 event->header.type); 1351 err = -EINVAL; 1352 goto out_err; 1353 } 1354 1355 head += size; 1356 file_pos += size; 1357 1358 ui_progress__update(&prog, size); 1359 1360 if (session_done()) 1361 goto out; 1362 1363 if (file_pos < file_size) 1364 goto more; 1365 1366 out: 1367 /* do the final flush for ordered samples */ 1368 session->ordered_samples.next_flush = ULLONG_MAX; 1369 err = flush_sample_queue(session, tool); 1370 out_err: 1371 ui_progress__finish(); 1372 perf_session__warn_about_errors(session, tool); 1373 perf_session_free_sample_buffers(session); 1374 session->one_mmap = false; 1375 return err; 1376 } 1377 1378 int perf_session__process_events(struct perf_session *session, 1379 struct perf_tool *tool) 1380 { 1381 u64 size = perf_data_file__size(session->file); 1382 int err; 1383 1384 if (perf_session__register_idle_thread(session) == NULL) 1385 return -ENOMEM; 1386 1387 if (!perf_data_file__is_pipe(session->file)) 1388 err = __perf_session__process_events(session, 1389 session->header.data_offset, 1390 session->header.data_size, 1391 size, tool); 1392 else 1393 err = __perf_session__process_pipe_events(session, tool); 1394 1395 return err; 1396 } 1397 1398 bool perf_session__has_traces(struct perf_session *session, const char *msg) 1399 { 1400 struct perf_evsel *evsel; 1401 1402 evlist__for_each(session->evlist, evsel) { 1403 if (evsel->attr.type == PERF_TYPE_TRACEPOINT) 1404 return true; 1405 } 1406 1407 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg); 1408 return false; 1409 } 1410 1411 int maps__set_kallsyms_ref_reloc_sym(struct map **maps, 1412 const char *symbol_name, u64 addr) 1413 { 1414 char *bracket; 1415 enum map_type i; 1416 struct ref_reloc_sym *ref; 1417 1418 ref = zalloc(sizeof(struct ref_reloc_sym)); 1419 if (ref == NULL) 1420 return -ENOMEM; 1421 1422 ref->name = strdup(symbol_name); 1423 if (ref->name == NULL) { 1424 free(ref); 1425 return -ENOMEM; 1426 } 1427 1428 bracket = strchr(ref->name, ']'); 1429 if (bracket) 1430 *bracket = '\0'; 1431 1432 ref->addr = addr; 1433 1434 for (i = 0; i < MAP__NR_TYPES; ++i) { 1435 struct kmap *kmap = map__kmap(maps[i]); 1436 kmap->ref_reloc_sym = ref; 1437 } 1438 1439 return 0; 1440 } 1441 1442 size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp) 1443 { 1444 return machines__fprintf_dsos(&session->machines, fp); 1445 } 1446 1447 size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp, 1448 bool (skip)(struct dso *dso, int parm), int parm) 1449 { 1450 return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm); 1451 } 1452 1453 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp) 1454 { 1455 struct perf_evsel *pos; 1456 size_t ret = fprintf(fp, "Aggregated stats:\n"); 1457 1458 ret += events_stats__fprintf(&session->stats, fp); 1459 1460 evlist__for_each(session->evlist, pos) { 1461 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos)); 1462 ret += events_stats__fprintf(&pos->hists.stats, fp); 1463 } 1464 1465 return ret; 1466 } 1467 1468 size_t perf_session__fprintf(struct perf_session *session, FILE *fp) 1469 { 1470 /* 1471 * FIXME: Here we have to actually print all the machines in this 1472 * session, not just the host... 1473 */ 1474 return machine__fprintf(&session->machines.host, fp); 1475 } 1476 1477 struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, 1478 unsigned int type) 1479 { 1480 struct perf_evsel *pos; 1481 1482 evlist__for_each(session->evlist, pos) { 1483 if (pos->attr.type == type) 1484 return pos; 1485 } 1486 return NULL; 1487 } 1488 1489 void perf_evsel__print_ip(struct perf_evsel *evsel, struct perf_sample *sample, 1490 struct addr_location *al, 1491 unsigned int print_opts, unsigned int stack_depth) 1492 { 1493 struct callchain_cursor_node *node; 1494 int print_ip = print_opts & PRINT_IP_OPT_IP; 1495 int print_sym = print_opts & PRINT_IP_OPT_SYM; 1496 int print_dso = print_opts & PRINT_IP_OPT_DSO; 1497 int print_symoffset = print_opts & PRINT_IP_OPT_SYMOFFSET; 1498 int print_oneline = print_opts & PRINT_IP_OPT_ONELINE; 1499 int print_srcline = print_opts & PRINT_IP_OPT_SRCLINE; 1500 char s = print_oneline ? ' ' : '\t'; 1501 1502 if (symbol_conf.use_callchain && sample->callchain) { 1503 struct addr_location node_al; 1504 1505 if (machine__resolve_callchain(al->machine, evsel, al->thread, 1506 sample, NULL, NULL, 1507 PERF_MAX_STACK_DEPTH) != 0) { 1508 if (verbose) 1509 error("Failed to resolve callchain. Skipping\n"); 1510 return; 1511 } 1512 callchain_cursor_commit(&callchain_cursor); 1513 1514 if (print_symoffset) 1515 node_al = *al; 1516 1517 while (stack_depth) { 1518 u64 addr = 0; 1519 1520 node = callchain_cursor_current(&callchain_cursor); 1521 if (!node) 1522 break; 1523 1524 if (node->sym && node->sym->ignore) 1525 goto next; 1526 1527 if (print_ip) 1528 printf("%c%16" PRIx64, s, node->ip); 1529 1530 if (node->map) 1531 addr = node->map->map_ip(node->map, node->ip); 1532 1533 if (print_sym) { 1534 printf(" "); 1535 if (print_symoffset) { 1536 node_al.addr = addr; 1537 node_al.map = node->map; 1538 symbol__fprintf_symname_offs(node->sym, &node_al, stdout); 1539 } else 1540 symbol__fprintf_symname(node->sym, stdout); 1541 } 1542 1543 if (print_dso) { 1544 printf(" ("); 1545 map__fprintf_dsoname(node->map, stdout); 1546 printf(")"); 1547 } 1548 1549 if (print_srcline) 1550 map__fprintf_srcline(node->map, addr, "\n ", 1551 stdout); 1552 1553 if (!print_oneline) 1554 printf("\n"); 1555 1556 stack_depth--; 1557 next: 1558 callchain_cursor_advance(&callchain_cursor); 1559 } 1560 1561 } else { 1562 if (al->sym && al->sym->ignore) 1563 return; 1564 1565 if (print_ip) 1566 printf("%16" PRIx64, sample->ip); 1567 1568 if (print_sym) { 1569 printf(" "); 1570 if (print_symoffset) 1571 symbol__fprintf_symname_offs(al->sym, al, 1572 stdout); 1573 else 1574 symbol__fprintf_symname(al->sym, stdout); 1575 } 1576 1577 if (print_dso) { 1578 printf(" ("); 1579 map__fprintf_dsoname(al->map, stdout); 1580 printf(")"); 1581 } 1582 1583 if (print_srcline) 1584 map__fprintf_srcline(al->map, al->addr, "\n ", stdout); 1585 } 1586 } 1587 1588 int perf_session__cpu_bitmap(struct perf_session *session, 1589 const char *cpu_list, unsigned long *cpu_bitmap) 1590 { 1591 int i, err = -1; 1592 struct cpu_map *map; 1593 1594 for (i = 0; i < PERF_TYPE_MAX; ++i) { 1595 struct perf_evsel *evsel; 1596 1597 evsel = perf_session__find_first_evtype(session, i); 1598 if (!evsel) 1599 continue; 1600 1601 if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) { 1602 pr_err("File does not contain CPU events. " 1603 "Remove -c option to proceed.\n"); 1604 return -1; 1605 } 1606 } 1607 1608 map = cpu_map__new(cpu_list); 1609 if (map == NULL) { 1610 pr_err("Invalid cpu_list\n"); 1611 return -1; 1612 } 1613 1614 for (i = 0; i < map->nr; i++) { 1615 int cpu = map->map[i]; 1616 1617 if (cpu >= MAX_NR_CPUS) { 1618 pr_err("Requested CPU %d too large. " 1619 "Consider raising MAX_NR_CPUS\n", cpu); 1620 goto out_delete_map; 1621 } 1622 1623 set_bit(cpu, cpu_bitmap); 1624 } 1625 1626 err = 0; 1627 1628 out_delete_map: 1629 cpu_map__delete(map); 1630 return err; 1631 } 1632 1633 void perf_session__fprintf_info(struct perf_session *session, FILE *fp, 1634 bool full) 1635 { 1636 struct stat st; 1637 int fd, ret; 1638 1639 if (session == NULL || fp == NULL) 1640 return; 1641 1642 fd = perf_data_file__fd(session->file); 1643 1644 ret = fstat(fd, &st); 1645 if (ret == -1) 1646 return; 1647 1648 fprintf(fp, "# ========\n"); 1649 fprintf(fp, "# captured on: %s", ctime(&st.st_ctime)); 1650 perf_header__fprintf_info(session, fp, full); 1651 fprintf(fp, "# ========\n#\n"); 1652 } 1653 1654 1655 int __perf_session__set_tracepoints_handlers(struct perf_session *session, 1656 const struct perf_evsel_str_handler *assocs, 1657 size_t nr_assocs) 1658 { 1659 struct perf_evsel *evsel; 1660 size_t i; 1661 int err; 1662 1663 for (i = 0; i < nr_assocs; i++) { 1664 /* 1665 * Adding a handler for an event not in the session, 1666 * just ignore it. 1667 */ 1668 evsel = perf_evlist__find_tracepoint_by_name(session->evlist, assocs[i].name); 1669 if (evsel == NULL) 1670 continue; 1671 1672 err = -EEXIST; 1673 if (evsel->handler != NULL) 1674 goto out; 1675 evsel->handler = assocs[i].handler; 1676 } 1677 1678 err = 0; 1679 out: 1680 return err; 1681 } 1682