1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <inttypes.h> 4 #include <linux/err.h> 5 #include <linux/kernel.h> 6 #include <linux/zalloc.h> 7 #include <traceevent/event-parse.h> 8 #include <api/fs/fs.h> 9 10 #include <byteswap.h> 11 #include <unistd.h> 12 #include <sys/types.h> 13 #include <sys/mman.h> 14 #include <perf/cpumap.h> 15 16 #include "evlist.h" 17 #include "evsel.h" 18 #include "memswap.h" 19 #include "map.h" 20 #include "symbol.h" 21 #include "session.h" 22 #include "tool.h" 23 #include "sort.h" 24 #include "cpumap.h" 25 #include "perf_regs.h" 26 #include "asm/bug.h" 27 #include "auxtrace.h" 28 #include "thread.h" 29 #include "thread-stack.h" 30 #include "sample-raw.h" 31 #include "stat.h" 32 #include "arch/common.h" 33 34 #ifdef HAVE_ZSTD_SUPPORT 35 static int perf_session__process_compressed_event(struct perf_session *session, 36 union perf_event *event, u64 file_offset) 37 { 38 void *src; 39 size_t decomp_size, src_size; 40 u64 decomp_last_rem = 0; 41 size_t mmap_len, decomp_len = session->header.env.comp_mmap_len; 42 struct decomp *decomp, *decomp_last = session->decomp_last; 43 44 if (decomp_last) { 45 decomp_last_rem = decomp_last->size - decomp_last->head; 46 decomp_len += decomp_last_rem; 47 } 48 49 mmap_len = sizeof(struct decomp) + decomp_len; 50 decomp = mmap(NULL, mmap_len, PROT_READ|PROT_WRITE, 51 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); 52 if (decomp == MAP_FAILED) { 53 pr_err("Couldn't allocate memory for decompression\n"); 54 return -1; 55 } 56 57 decomp->file_pos = file_offset; 58 decomp->mmap_len = mmap_len; 59 decomp->head = 0; 60 61 if (decomp_last_rem) { 62 memcpy(decomp->data, &(decomp_last->data[decomp_last->head]), decomp_last_rem); 63 decomp->size = decomp_last_rem; 64 } 65 66 src = (void *)event + sizeof(struct compressed_event); 67 src_size = event->pack.header.size - sizeof(struct compressed_event); 68 69 decomp_size = zstd_decompress_stream(&(session->zstd_data), src, src_size, 70 &(decomp->data[decomp_last_rem]), decomp_len - decomp_last_rem); 71 if (!decomp_size) { 72 munmap(decomp, mmap_len); 73 pr_err("Couldn't decompress data\n"); 74 return -1; 75 } 76 77 decomp->size += decomp_size; 78 79 if (session->decomp == NULL) { 80 session->decomp = decomp; 81 session->decomp_last = decomp; 82 } else { 83 session->decomp_last->next = decomp; 84 session->decomp_last = decomp; 85 } 86 87 pr_debug("decomp (B): %ld to %ld\n", src_size, decomp_size); 88 89 return 0; 90 } 91 #else /* !HAVE_ZSTD_SUPPORT */ 92 #define perf_session__process_compressed_event perf_session__process_compressed_event_stub 93 #endif 94 95 static int perf_session__deliver_event(struct perf_session *session, 96 union perf_event *event, 97 struct perf_tool *tool, 98 u64 file_offset); 99 100 static int perf_session__open(struct perf_session *session) 101 { 102 struct perf_data *data = session->data; 103 104 if (perf_session__read_header(session) < 0) { 105 pr_err("incompatible file format (rerun with -v to learn more)\n"); 106 return -1; 107 } 108 109 if (perf_data__is_pipe(data)) 110 return 0; 111 112 if (perf_header__has_feat(&session->header, HEADER_STAT)) 113 return 0; 114 115 if (!perf_evlist__valid_sample_type(session->evlist)) { 116 pr_err("non matching sample_type\n"); 117 return -1; 118 } 119 120 if (!perf_evlist__valid_sample_id_all(session->evlist)) { 121 pr_err("non matching sample_id_all\n"); 122 return -1; 123 } 124 125 if (!perf_evlist__valid_read_format(session->evlist)) { 126 pr_err("non matching read_format\n"); 127 return -1; 128 } 129 130 return 0; 131 } 132 133 void perf_session__set_id_hdr_size(struct perf_session *session) 134 { 135 u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist); 136 137 machines__set_id_hdr_size(&session->machines, id_hdr_size); 138 } 139 140 int perf_session__create_kernel_maps(struct perf_session *session) 141 { 142 int ret = machine__create_kernel_maps(&session->machines.host); 143 144 if (ret >= 0) 145 ret = machines__create_guest_kernel_maps(&session->machines); 146 return ret; 147 } 148 149 static void perf_session__destroy_kernel_maps(struct perf_session *session) 150 { 151 machines__destroy_kernel_maps(&session->machines); 152 } 153 154 static bool perf_session__has_comm_exec(struct perf_session *session) 155 { 156 struct evsel *evsel; 157 158 evlist__for_each_entry(session->evlist, evsel) { 159 if (evsel->core.attr.comm_exec) 160 return true; 161 } 162 163 return false; 164 } 165 166 static void perf_session__set_comm_exec(struct perf_session *session) 167 { 168 bool comm_exec = perf_session__has_comm_exec(session); 169 170 machines__set_comm_exec(&session->machines, comm_exec); 171 } 172 173 static int ordered_events__deliver_event(struct ordered_events *oe, 174 struct ordered_event *event) 175 { 176 struct perf_session *session = container_of(oe, struct perf_session, 177 ordered_events); 178 179 return perf_session__deliver_event(session, event->event, 180 session->tool, event->file_offset); 181 } 182 183 struct perf_session *perf_session__new(struct perf_data *data, 184 bool repipe, struct perf_tool *tool) 185 { 186 struct perf_session *session = zalloc(sizeof(*session)); 187 188 if (!session) 189 goto out; 190 191 session->repipe = repipe; 192 session->tool = tool; 193 INIT_LIST_HEAD(&session->auxtrace_index); 194 machines__init(&session->machines); 195 ordered_events__init(&session->ordered_events, 196 ordered_events__deliver_event, NULL); 197 198 perf_env__init(&session->header.env); 199 if (data) { 200 if (perf_data__open(data)) 201 goto out_delete; 202 203 session->data = data; 204 205 if (perf_data__is_read(data)) { 206 if (perf_session__open(session) < 0) 207 goto out_delete; 208 209 /* 210 * set session attributes that are present in perf.data 211 * but not in pipe-mode. 212 */ 213 if (!data->is_pipe) { 214 perf_session__set_id_hdr_size(session); 215 perf_session__set_comm_exec(session); 216 } 217 218 perf_evlist__init_trace_event_sample_raw(session->evlist); 219 220 /* Open the directory data. */ 221 if (data->is_dir && perf_data__open_dir(data)) 222 goto out_delete; 223 } 224 } else { 225 session->machines.host.env = &perf_env; 226 } 227 228 session->machines.host.single_address_space = 229 perf_env__single_address_space(session->machines.host.env); 230 231 if (!data || perf_data__is_write(data)) { 232 /* 233 * In O_RDONLY mode this will be performed when reading the 234 * kernel MMAP event, in perf_event__process_mmap(). 235 */ 236 if (perf_session__create_kernel_maps(session) < 0) 237 pr_warning("Cannot read kernel map\n"); 238 } 239 240 /* 241 * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is 242 * processed, so perf_evlist__sample_id_all is not meaningful here. 243 */ 244 if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps && 245 tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) { 246 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n"); 247 tool->ordered_events = false; 248 } 249 250 return session; 251 252 out_delete: 253 perf_session__delete(session); 254 out: 255 return NULL; 256 } 257 258 static void perf_session__delete_threads(struct perf_session *session) 259 { 260 machine__delete_threads(&session->machines.host); 261 } 262 263 static void perf_session__release_decomp_events(struct perf_session *session) 264 { 265 struct decomp *next, *decomp; 266 size_t mmap_len; 267 next = session->decomp; 268 do { 269 decomp = next; 270 if (decomp == NULL) 271 break; 272 next = decomp->next; 273 mmap_len = decomp->mmap_len; 274 munmap(decomp, mmap_len); 275 } while (1); 276 } 277 278 void perf_session__delete(struct perf_session *session) 279 { 280 if (session == NULL) 281 return; 282 auxtrace__free(session); 283 auxtrace_index__free(&session->auxtrace_index); 284 perf_session__destroy_kernel_maps(session); 285 perf_session__delete_threads(session); 286 perf_session__release_decomp_events(session); 287 perf_env__exit(&session->header.env); 288 machines__exit(&session->machines); 289 if (session->data) 290 perf_data__close(session->data); 291 free(session); 292 } 293 294 static int process_event_synth_tracing_data_stub(struct perf_session *session 295 __maybe_unused, 296 union perf_event *event 297 __maybe_unused) 298 { 299 dump_printf(": unhandled!\n"); 300 return 0; 301 } 302 303 static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused, 304 union perf_event *event __maybe_unused, 305 struct evlist **pevlist 306 __maybe_unused) 307 { 308 dump_printf(": unhandled!\n"); 309 return 0; 310 } 311 312 static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused, 313 union perf_event *event __maybe_unused, 314 struct evlist **pevlist 315 __maybe_unused) 316 { 317 if (dump_trace) 318 perf_event__fprintf_event_update(event, stdout); 319 320 dump_printf(": unhandled!\n"); 321 return 0; 322 } 323 324 static int process_event_sample_stub(struct perf_tool *tool __maybe_unused, 325 union perf_event *event __maybe_unused, 326 struct perf_sample *sample __maybe_unused, 327 struct evsel *evsel __maybe_unused, 328 struct machine *machine __maybe_unused) 329 { 330 dump_printf(": unhandled!\n"); 331 return 0; 332 } 333 334 static int process_event_stub(struct perf_tool *tool __maybe_unused, 335 union perf_event *event __maybe_unused, 336 struct perf_sample *sample __maybe_unused, 337 struct machine *machine __maybe_unused) 338 { 339 dump_printf(": unhandled!\n"); 340 return 0; 341 } 342 343 static int process_finished_round_stub(struct perf_tool *tool __maybe_unused, 344 union perf_event *event __maybe_unused, 345 struct ordered_events *oe __maybe_unused) 346 { 347 dump_printf(": unhandled!\n"); 348 return 0; 349 } 350 351 static int process_finished_round(struct perf_tool *tool, 352 union perf_event *event, 353 struct ordered_events *oe); 354 355 static int skipn(int fd, off_t n) 356 { 357 char buf[4096]; 358 ssize_t ret; 359 360 while (n > 0) { 361 ret = read(fd, buf, min(n, (off_t)sizeof(buf))); 362 if (ret <= 0) 363 return ret; 364 n -= ret; 365 } 366 367 return 0; 368 } 369 370 static s64 process_event_auxtrace_stub(struct perf_session *session __maybe_unused, 371 union perf_event *event) 372 { 373 dump_printf(": unhandled!\n"); 374 if (perf_data__is_pipe(session->data)) 375 skipn(perf_data__fd(session->data), event->auxtrace.size); 376 return event->auxtrace.size; 377 } 378 379 static int process_event_op2_stub(struct perf_session *session __maybe_unused, 380 union perf_event *event __maybe_unused) 381 { 382 dump_printf(": unhandled!\n"); 383 return 0; 384 } 385 386 387 static 388 int process_event_thread_map_stub(struct perf_session *session __maybe_unused, 389 union perf_event *event __maybe_unused) 390 { 391 if (dump_trace) 392 perf_event__fprintf_thread_map(event, stdout); 393 394 dump_printf(": unhandled!\n"); 395 return 0; 396 } 397 398 static 399 int process_event_cpu_map_stub(struct perf_session *session __maybe_unused, 400 union perf_event *event __maybe_unused) 401 { 402 if (dump_trace) 403 perf_event__fprintf_cpu_map(event, stdout); 404 405 dump_printf(": unhandled!\n"); 406 return 0; 407 } 408 409 static 410 int process_event_stat_config_stub(struct perf_session *session __maybe_unused, 411 union perf_event *event __maybe_unused) 412 { 413 if (dump_trace) 414 perf_event__fprintf_stat_config(event, stdout); 415 416 dump_printf(": unhandled!\n"); 417 return 0; 418 } 419 420 static int process_stat_stub(struct perf_session *perf_session __maybe_unused, 421 union perf_event *event) 422 { 423 if (dump_trace) 424 perf_event__fprintf_stat(event, stdout); 425 426 dump_printf(": unhandled!\n"); 427 return 0; 428 } 429 430 static int process_stat_round_stub(struct perf_session *perf_session __maybe_unused, 431 union perf_event *event) 432 { 433 if (dump_trace) 434 perf_event__fprintf_stat_round(event, stdout); 435 436 dump_printf(": unhandled!\n"); 437 return 0; 438 } 439 440 static int perf_session__process_compressed_event_stub(struct perf_session *session __maybe_unused, 441 union perf_event *event __maybe_unused, 442 u64 file_offset __maybe_unused) 443 { 444 dump_printf(": unhandled!\n"); 445 return 0; 446 } 447 448 void perf_tool__fill_defaults(struct perf_tool *tool) 449 { 450 if (tool->sample == NULL) 451 tool->sample = process_event_sample_stub; 452 if (tool->mmap == NULL) 453 tool->mmap = process_event_stub; 454 if (tool->mmap2 == NULL) 455 tool->mmap2 = process_event_stub; 456 if (tool->comm == NULL) 457 tool->comm = process_event_stub; 458 if (tool->namespaces == NULL) 459 tool->namespaces = process_event_stub; 460 if (tool->fork == NULL) 461 tool->fork = process_event_stub; 462 if (tool->exit == NULL) 463 tool->exit = process_event_stub; 464 if (tool->lost == NULL) 465 tool->lost = perf_event__process_lost; 466 if (tool->lost_samples == NULL) 467 tool->lost_samples = perf_event__process_lost_samples; 468 if (tool->aux == NULL) 469 tool->aux = perf_event__process_aux; 470 if (tool->itrace_start == NULL) 471 tool->itrace_start = perf_event__process_itrace_start; 472 if (tool->context_switch == NULL) 473 tool->context_switch = perf_event__process_switch; 474 if (tool->ksymbol == NULL) 475 tool->ksymbol = perf_event__process_ksymbol; 476 if (tool->bpf == NULL) 477 tool->bpf = perf_event__process_bpf; 478 if (tool->read == NULL) 479 tool->read = process_event_sample_stub; 480 if (tool->throttle == NULL) 481 tool->throttle = process_event_stub; 482 if (tool->unthrottle == NULL) 483 tool->unthrottle = process_event_stub; 484 if (tool->attr == NULL) 485 tool->attr = process_event_synth_attr_stub; 486 if (tool->event_update == NULL) 487 tool->event_update = process_event_synth_event_update_stub; 488 if (tool->tracing_data == NULL) 489 tool->tracing_data = process_event_synth_tracing_data_stub; 490 if (tool->build_id == NULL) 491 tool->build_id = process_event_op2_stub; 492 if (tool->finished_round == NULL) { 493 if (tool->ordered_events) 494 tool->finished_round = process_finished_round; 495 else 496 tool->finished_round = process_finished_round_stub; 497 } 498 if (tool->id_index == NULL) 499 tool->id_index = process_event_op2_stub; 500 if (tool->auxtrace_info == NULL) 501 tool->auxtrace_info = process_event_op2_stub; 502 if (tool->auxtrace == NULL) 503 tool->auxtrace = process_event_auxtrace_stub; 504 if (tool->auxtrace_error == NULL) 505 tool->auxtrace_error = process_event_op2_stub; 506 if (tool->thread_map == NULL) 507 tool->thread_map = process_event_thread_map_stub; 508 if (tool->cpu_map == NULL) 509 tool->cpu_map = process_event_cpu_map_stub; 510 if (tool->stat_config == NULL) 511 tool->stat_config = process_event_stat_config_stub; 512 if (tool->stat == NULL) 513 tool->stat = process_stat_stub; 514 if (tool->stat_round == NULL) 515 tool->stat_round = process_stat_round_stub; 516 if (tool->time_conv == NULL) 517 tool->time_conv = process_event_op2_stub; 518 if (tool->feature == NULL) 519 tool->feature = process_event_op2_stub; 520 if (tool->compressed == NULL) 521 tool->compressed = perf_session__process_compressed_event; 522 } 523 524 static void swap_sample_id_all(union perf_event *event, void *data) 525 { 526 void *end = (void *) event + event->header.size; 527 int size = end - data; 528 529 BUG_ON(size % sizeof(u64)); 530 mem_bswap_64(data, size); 531 } 532 533 static void perf_event__all64_swap(union perf_event *event, 534 bool sample_id_all __maybe_unused) 535 { 536 struct perf_event_header *hdr = &event->header; 537 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr)); 538 } 539 540 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all) 541 { 542 event->comm.pid = bswap_32(event->comm.pid); 543 event->comm.tid = bswap_32(event->comm.tid); 544 545 if (sample_id_all) { 546 void *data = &event->comm.comm; 547 548 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); 549 swap_sample_id_all(event, data); 550 } 551 } 552 553 static void perf_event__mmap_swap(union perf_event *event, 554 bool sample_id_all) 555 { 556 event->mmap.pid = bswap_32(event->mmap.pid); 557 event->mmap.tid = bswap_32(event->mmap.tid); 558 event->mmap.start = bswap_64(event->mmap.start); 559 event->mmap.len = bswap_64(event->mmap.len); 560 event->mmap.pgoff = bswap_64(event->mmap.pgoff); 561 562 if (sample_id_all) { 563 void *data = &event->mmap.filename; 564 565 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); 566 swap_sample_id_all(event, data); 567 } 568 } 569 570 static void perf_event__mmap2_swap(union perf_event *event, 571 bool sample_id_all) 572 { 573 event->mmap2.pid = bswap_32(event->mmap2.pid); 574 event->mmap2.tid = bswap_32(event->mmap2.tid); 575 event->mmap2.start = bswap_64(event->mmap2.start); 576 event->mmap2.len = bswap_64(event->mmap2.len); 577 event->mmap2.pgoff = bswap_64(event->mmap2.pgoff); 578 event->mmap2.maj = bswap_32(event->mmap2.maj); 579 event->mmap2.min = bswap_32(event->mmap2.min); 580 event->mmap2.ino = bswap_64(event->mmap2.ino); 581 582 if (sample_id_all) { 583 void *data = &event->mmap2.filename; 584 585 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); 586 swap_sample_id_all(event, data); 587 } 588 } 589 static void perf_event__task_swap(union perf_event *event, bool sample_id_all) 590 { 591 event->fork.pid = bswap_32(event->fork.pid); 592 event->fork.tid = bswap_32(event->fork.tid); 593 event->fork.ppid = bswap_32(event->fork.ppid); 594 event->fork.ptid = bswap_32(event->fork.ptid); 595 event->fork.time = bswap_64(event->fork.time); 596 597 if (sample_id_all) 598 swap_sample_id_all(event, &event->fork + 1); 599 } 600 601 static void perf_event__read_swap(union perf_event *event, bool sample_id_all) 602 { 603 event->read.pid = bswap_32(event->read.pid); 604 event->read.tid = bswap_32(event->read.tid); 605 event->read.value = bswap_64(event->read.value); 606 event->read.time_enabled = bswap_64(event->read.time_enabled); 607 event->read.time_running = bswap_64(event->read.time_running); 608 event->read.id = bswap_64(event->read.id); 609 610 if (sample_id_all) 611 swap_sample_id_all(event, &event->read + 1); 612 } 613 614 static void perf_event__aux_swap(union perf_event *event, bool sample_id_all) 615 { 616 event->aux.aux_offset = bswap_64(event->aux.aux_offset); 617 event->aux.aux_size = bswap_64(event->aux.aux_size); 618 event->aux.flags = bswap_64(event->aux.flags); 619 620 if (sample_id_all) 621 swap_sample_id_all(event, &event->aux + 1); 622 } 623 624 static void perf_event__itrace_start_swap(union perf_event *event, 625 bool sample_id_all) 626 { 627 event->itrace_start.pid = bswap_32(event->itrace_start.pid); 628 event->itrace_start.tid = bswap_32(event->itrace_start.tid); 629 630 if (sample_id_all) 631 swap_sample_id_all(event, &event->itrace_start + 1); 632 } 633 634 static void perf_event__switch_swap(union perf_event *event, bool sample_id_all) 635 { 636 if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) { 637 event->context_switch.next_prev_pid = 638 bswap_32(event->context_switch.next_prev_pid); 639 event->context_switch.next_prev_tid = 640 bswap_32(event->context_switch.next_prev_tid); 641 } 642 643 if (sample_id_all) 644 swap_sample_id_all(event, &event->context_switch + 1); 645 } 646 647 static void perf_event__throttle_swap(union perf_event *event, 648 bool sample_id_all) 649 { 650 event->throttle.time = bswap_64(event->throttle.time); 651 event->throttle.id = bswap_64(event->throttle.id); 652 event->throttle.stream_id = bswap_64(event->throttle.stream_id); 653 654 if (sample_id_all) 655 swap_sample_id_all(event, &event->throttle + 1); 656 } 657 658 static void perf_event__namespaces_swap(union perf_event *event, 659 bool sample_id_all) 660 { 661 u64 i; 662 663 event->namespaces.pid = bswap_32(event->namespaces.pid); 664 event->namespaces.tid = bswap_32(event->namespaces.tid); 665 event->namespaces.nr_namespaces = bswap_64(event->namespaces.nr_namespaces); 666 667 for (i = 0; i < event->namespaces.nr_namespaces; i++) { 668 struct perf_ns_link_info *ns = &event->namespaces.link_info[i]; 669 670 ns->dev = bswap_64(ns->dev); 671 ns->ino = bswap_64(ns->ino); 672 } 673 674 if (sample_id_all) 675 swap_sample_id_all(event, &event->namespaces.link_info[i]); 676 } 677 678 static u8 revbyte(u8 b) 679 { 680 int rev = (b >> 4) | ((b & 0xf) << 4); 681 rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2); 682 rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1); 683 return (u8) rev; 684 } 685 686 /* 687 * XXX this is hack in attempt to carry flags bitfield 688 * through endian village. ABI says: 689 * 690 * Bit-fields are allocated from right to left (least to most significant) 691 * on little-endian implementations and from left to right (most to least 692 * significant) on big-endian implementations. 693 * 694 * The above seems to be byte specific, so we need to reverse each 695 * byte of the bitfield. 'Internet' also says this might be implementation 696 * specific and we probably need proper fix and carry perf_event_attr 697 * bitfield flags in separate data file FEAT_ section. Thought this seems 698 * to work for now. 699 */ 700 static void swap_bitfield(u8 *p, unsigned len) 701 { 702 unsigned i; 703 704 for (i = 0; i < len; i++) { 705 *p = revbyte(*p); 706 p++; 707 } 708 } 709 710 /* exported for swapping attributes in file header */ 711 void perf_event__attr_swap(struct perf_event_attr *attr) 712 { 713 attr->type = bswap_32(attr->type); 714 attr->size = bswap_32(attr->size); 715 716 #define bswap_safe(f, n) \ 717 (attr->size > (offsetof(struct perf_event_attr, f) + \ 718 sizeof(attr->f) * (n))) 719 #define bswap_field(f, sz) \ 720 do { \ 721 if (bswap_safe(f, 0)) \ 722 attr->f = bswap_##sz(attr->f); \ 723 } while(0) 724 #define bswap_field_16(f) bswap_field(f, 16) 725 #define bswap_field_32(f) bswap_field(f, 32) 726 #define bswap_field_64(f) bswap_field(f, 64) 727 728 bswap_field_64(config); 729 bswap_field_64(sample_period); 730 bswap_field_64(sample_type); 731 bswap_field_64(read_format); 732 bswap_field_32(wakeup_events); 733 bswap_field_32(bp_type); 734 bswap_field_64(bp_addr); 735 bswap_field_64(bp_len); 736 bswap_field_64(branch_sample_type); 737 bswap_field_64(sample_regs_user); 738 bswap_field_32(sample_stack_user); 739 bswap_field_32(aux_watermark); 740 bswap_field_16(sample_max_stack); 741 742 /* 743 * After read_format are bitfields. Check read_format because 744 * we are unable to use offsetof on bitfield. 745 */ 746 if (bswap_safe(read_format, 1)) 747 swap_bitfield((u8 *) (&attr->read_format + 1), 748 sizeof(u64)); 749 #undef bswap_field_64 750 #undef bswap_field_32 751 #undef bswap_field 752 #undef bswap_safe 753 } 754 755 static void perf_event__hdr_attr_swap(union perf_event *event, 756 bool sample_id_all __maybe_unused) 757 { 758 size_t size; 759 760 perf_event__attr_swap(&event->attr.attr); 761 762 size = event->header.size; 763 size -= (void *)&event->attr.id - (void *)event; 764 mem_bswap_64(event->attr.id, size); 765 } 766 767 static void perf_event__event_update_swap(union perf_event *event, 768 bool sample_id_all __maybe_unused) 769 { 770 event->event_update.type = bswap_64(event->event_update.type); 771 event->event_update.id = bswap_64(event->event_update.id); 772 } 773 774 static void perf_event__event_type_swap(union perf_event *event, 775 bool sample_id_all __maybe_unused) 776 { 777 event->event_type.event_type.event_id = 778 bswap_64(event->event_type.event_type.event_id); 779 } 780 781 static void perf_event__tracing_data_swap(union perf_event *event, 782 bool sample_id_all __maybe_unused) 783 { 784 event->tracing_data.size = bswap_32(event->tracing_data.size); 785 } 786 787 static void perf_event__auxtrace_info_swap(union perf_event *event, 788 bool sample_id_all __maybe_unused) 789 { 790 size_t size; 791 792 event->auxtrace_info.type = bswap_32(event->auxtrace_info.type); 793 794 size = event->header.size; 795 size -= (void *)&event->auxtrace_info.priv - (void *)event; 796 mem_bswap_64(event->auxtrace_info.priv, size); 797 } 798 799 static void perf_event__auxtrace_swap(union perf_event *event, 800 bool sample_id_all __maybe_unused) 801 { 802 event->auxtrace.size = bswap_64(event->auxtrace.size); 803 event->auxtrace.offset = bswap_64(event->auxtrace.offset); 804 event->auxtrace.reference = bswap_64(event->auxtrace.reference); 805 event->auxtrace.idx = bswap_32(event->auxtrace.idx); 806 event->auxtrace.tid = bswap_32(event->auxtrace.tid); 807 event->auxtrace.cpu = bswap_32(event->auxtrace.cpu); 808 } 809 810 static void perf_event__auxtrace_error_swap(union perf_event *event, 811 bool sample_id_all __maybe_unused) 812 { 813 event->auxtrace_error.type = bswap_32(event->auxtrace_error.type); 814 event->auxtrace_error.code = bswap_32(event->auxtrace_error.code); 815 event->auxtrace_error.cpu = bswap_32(event->auxtrace_error.cpu); 816 event->auxtrace_error.pid = bswap_32(event->auxtrace_error.pid); 817 event->auxtrace_error.tid = bswap_32(event->auxtrace_error.tid); 818 event->auxtrace_error.fmt = bswap_32(event->auxtrace_error.fmt); 819 event->auxtrace_error.ip = bswap_64(event->auxtrace_error.ip); 820 if (event->auxtrace_error.fmt) 821 event->auxtrace_error.time = bswap_64(event->auxtrace_error.time); 822 } 823 824 static void perf_event__thread_map_swap(union perf_event *event, 825 bool sample_id_all __maybe_unused) 826 { 827 unsigned i; 828 829 event->thread_map.nr = bswap_64(event->thread_map.nr); 830 831 for (i = 0; i < event->thread_map.nr; i++) 832 event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid); 833 } 834 835 static void perf_event__cpu_map_swap(union perf_event *event, 836 bool sample_id_all __maybe_unused) 837 { 838 struct cpu_map_data *data = &event->cpu_map.data; 839 struct cpu_map_entries *cpus; 840 struct cpu_map_mask *mask; 841 unsigned i; 842 843 data->type = bswap_64(data->type); 844 845 switch (data->type) { 846 case PERF_CPU_MAP__CPUS: 847 cpus = (struct cpu_map_entries *)data->data; 848 849 cpus->nr = bswap_16(cpus->nr); 850 851 for (i = 0; i < cpus->nr; i++) 852 cpus->cpu[i] = bswap_16(cpus->cpu[i]); 853 break; 854 case PERF_CPU_MAP__MASK: 855 mask = (struct cpu_map_mask *) data->data; 856 857 mask->nr = bswap_16(mask->nr); 858 mask->long_size = bswap_16(mask->long_size); 859 860 switch (mask->long_size) { 861 case 4: mem_bswap_32(&mask->mask, mask->nr); break; 862 case 8: mem_bswap_64(&mask->mask, mask->nr); break; 863 default: 864 pr_err("cpu_map swap: unsupported long size\n"); 865 } 866 default: 867 break; 868 } 869 } 870 871 static void perf_event__stat_config_swap(union perf_event *event, 872 bool sample_id_all __maybe_unused) 873 { 874 u64 size; 875 876 size = event->stat_config.nr * sizeof(event->stat_config.data[0]); 877 size += 1; /* nr item itself */ 878 mem_bswap_64(&event->stat_config.nr, size); 879 } 880 881 static void perf_event__stat_swap(union perf_event *event, 882 bool sample_id_all __maybe_unused) 883 { 884 event->stat.id = bswap_64(event->stat.id); 885 event->stat.thread = bswap_32(event->stat.thread); 886 event->stat.cpu = bswap_32(event->stat.cpu); 887 event->stat.val = bswap_64(event->stat.val); 888 event->stat.ena = bswap_64(event->stat.ena); 889 event->stat.run = bswap_64(event->stat.run); 890 } 891 892 static void perf_event__stat_round_swap(union perf_event *event, 893 bool sample_id_all __maybe_unused) 894 { 895 event->stat_round.type = bswap_64(event->stat_round.type); 896 event->stat_round.time = bswap_64(event->stat_round.time); 897 } 898 899 typedef void (*perf_event__swap_op)(union perf_event *event, 900 bool sample_id_all); 901 902 static perf_event__swap_op perf_event__swap_ops[] = { 903 [PERF_RECORD_MMAP] = perf_event__mmap_swap, 904 [PERF_RECORD_MMAP2] = perf_event__mmap2_swap, 905 [PERF_RECORD_COMM] = perf_event__comm_swap, 906 [PERF_RECORD_FORK] = perf_event__task_swap, 907 [PERF_RECORD_EXIT] = perf_event__task_swap, 908 [PERF_RECORD_LOST] = perf_event__all64_swap, 909 [PERF_RECORD_READ] = perf_event__read_swap, 910 [PERF_RECORD_THROTTLE] = perf_event__throttle_swap, 911 [PERF_RECORD_UNTHROTTLE] = perf_event__throttle_swap, 912 [PERF_RECORD_SAMPLE] = perf_event__all64_swap, 913 [PERF_RECORD_AUX] = perf_event__aux_swap, 914 [PERF_RECORD_ITRACE_START] = perf_event__itrace_start_swap, 915 [PERF_RECORD_LOST_SAMPLES] = perf_event__all64_swap, 916 [PERF_RECORD_SWITCH] = perf_event__switch_swap, 917 [PERF_RECORD_SWITCH_CPU_WIDE] = perf_event__switch_swap, 918 [PERF_RECORD_NAMESPACES] = perf_event__namespaces_swap, 919 [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap, 920 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap, 921 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap, 922 [PERF_RECORD_HEADER_BUILD_ID] = NULL, 923 [PERF_RECORD_ID_INDEX] = perf_event__all64_swap, 924 [PERF_RECORD_AUXTRACE_INFO] = perf_event__auxtrace_info_swap, 925 [PERF_RECORD_AUXTRACE] = perf_event__auxtrace_swap, 926 [PERF_RECORD_AUXTRACE_ERROR] = perf_event__auxtrace_error_swap, 927 [PERF_RECORD_THREAD_MAP] = perf_event__thread_map_swap, 928 [PERF_RECORD_CPU_MAP] = perf_event__cpu_map_swap, 929 [PERF_RECORD_STAT_CONFIG] = perf_event__stat_config_swap, 930 [PERF_RECORD_STAT] = perf_event__stat_swap, 931 [PERF_RECORD_STAT_ROUND] = perf_event__stat_round_swap, 932 [PERF_RECORD_EVENT_UPDATE] = perf_event__event_update_swap, 933 [PERF_RECORD_TIME_CONV] = perf_event__all64_swap, 934 [PERF_RECORD_HEADER_MAX] = NULL, 935 }; 936 937 /* 938 * When perf record finishes a pass on every buffers, it records this pseudo 939 * event. 940 * We record the max timestamp t found in the pass n. 941 * Assuming these timestamps are monotonic across cpus, we know that if 942 * a buffer still has events with timestamps below t, they will be all 943 * available and then read in the pass n + 1. 944 * Hence when we start to read the pass n + 2, we can safely flush every 945 * events with timestamps below t. 946 * 947 * ============ PASS n ================= 948 * CPU 0 | CPU 1 949 * | 950 * cnt1 timestamps | cnt2 timestamps 951 * 1 | 2 952 * 2 | 3 953 * - | 4 <--- max recorded 954 * 955 * ============ PASS n + 1 ============== 956 * CPU 0 | CPU 1 957 * | 958 * cnt1 timestamps | cnt2 timestamps 959 * 3 | 5 960 * 4 | 6 961 * 5 | 7 <---- max recorded 962 * 963 * Flush every events below timestamp 4 964 * 965 * ============ PASS n + 2 ============== 966 * CPU 0 | CPU 1 967 * | 968 * cnt1 timestamps | cnt2 timestamps 969 * 6 | 8 970 * 7 | 9 971 * - | 10 972 * 973 * Flush every events below timestamp 7 974 * etc... 975 */ 976 static int process_finished_round(struct perf_tool *tool __maybe_unused, 977 union perf_event *event __maybe_unused, 978 struct ordered_events *oe) 979 { 980 if (dump_trace) 981 fprintf(stdout, "\n"); 982 return ordered_events__flush(oe, OE_FLUSH__ROUND); 983 } 984 985 int perf_session__queue_event(struct perf_session *s, union perf_event *event, 986 u64 timestamp, u64 file_offset) 987 { 988 return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset); 989 } 990 991 static void callchain__lbr_callstack_printf(struct perf_sample *sample) 992 { 993 struct ip_callchain *callchain = sample->callchain; 994 struct branch_stack *lbr_stack = sample->branch_stack; 995 u64 kernel_callchain_nr = callchain->nr; 996 unsigned int i; 997 998 for (i = 0; i < kernel_callchain_nr; i++) { 999 if (callchain->ips[i] == PERF_CONTEXT_USER) 1000 break; 1001 } 1002 1003 if ((i != kernel_callchain_nr) && lbr_stack->nr) { 1004 u64 total_nr; 1005 /* 1006 * LBR callstack can only get user call chain, 1007 * i is kernel call chain number, 1008 * 1 is PERF_CONTEXT_USER. 1009 * 1010 * The user call chain is stored in LBR registers. 1011 * LBR are pair registers. The caller is stored 1012 * in "from" register, while the callee is stored 1013 * in "to" register. 1014 * For example, there is a call stack 1015 * "A"->"B"->"C"->"D". 1016 * The LBR registers will recorde like 1017 * "C"->"D", "B"->"C", "A"->"B". 1018 * So only the first "to" register and all "from" 1019 * registers are needed to construct the whole stack. 1020 */ 1021 total_nr = i + 1 + lbr_stack->nr + 1; 1022 kernel_callchain_nr = i + 1; 1023 1024 printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr); 1025 1026 for (i = 0; i < kernel_callchain_nr; i++) 1027 printf("..... %2d: %016" PRIx64 "\n", 1028 i, callchain->ips[i]); 1029 1030 printf("..... %2d: %016" PRIx64 "\n", 1031 (int)(kernel_callchain_nr), lbr_stack->entries[0].to); 1032 for (i = 0; i < lbr_stack->nr; i++) 1033 printf("..... %2d: %016" PRIx64 "\n", 1034 (int)(i + kernel_callchain_nr + 1), lbr_stack->entries[i].from); 1035 } 1036 } 1037 1038 static void callchain__printf(struct evsel *evsel, 1039 struct perf_sample *sample) 1040 { 1041 unsigned int i; 1042 struct ip_callchain *callchain = sample->callchain; 1043 1044 if (perf_evsel__has_branch_callstack(evsel)) 1045 callchain__lbr_callstack_printf(sample); 1046 1047 printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr); 1048 1049 for (i = 0; i < callchain->nr; i++) 1050 printf("..... %2d: %016" PRIx64 "\n", 1051 i, callchain->ips[i]); 1052 } 1053 1054 static void branch_stack__printf(struct perf_sample *sample, bool callstack) 1055 { 1056 uint64_t i; 1057 1058 printf("%s: nr:%" PRIu64 "\n", 1059 !callstack ? "... branch stack" : "... branch callstack", 1060 sample->branch_stack->nr); 1061 1062 for (i = 0; i < sample->branch_stack->nr; i++) { 1063 struct branch_entry *e = &sample->branch_stack->entries[i]; 1064 1065 if (!callstack) { 1066 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x\n", 1067 i, e->from, e->to, 1068 (unsigned short)e->flags.cycles, 1069 e->flags.mispred ? "M" : " ", 1070 e->flags.predicted ? "P" : " ", 1071 e->flags.abort ? "A" : " ", 1072 e->flags.in_tx ? "T" : " ", 1073 (unsigned)e->flags.reserved); 1074 } else { 1075 printf("..... %2"PRIu64": %016" PRIx64 "\n", 1076 i, i > 0 ? e->from : e->to); 1077 } 1078 } 1079 } 1080 1081 static void regs_dump__printf(u64 mask, u64 *regs) 1082 { 1083 unsigned rid, i = 0; 1084 1085 for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) { 1086 u64 val = regs[i++]; 1087 1088 printf(".... %-5s 0x%" PRIx64 "\n", 1089 perf_reg_name(rid), val); 1090 } 1091 } 1092 1093 static const char *regs_abi[] = { 1094 [PERF_SAMPLE_REGS_ABI_NONE] = "none", 1095 [PERF_SAMPLE_REGS_ABI_32] = "32-bit", 1096 [PERF_SAMPLE_REGS_ABI_64] = "64-bit", 1097 }; 1098 1099 static inline const char *regs_dump_abi(struct regs_dump *d) 1100 { 1101 if (d->abi > PERF_SAMPLE_REGS_ABI_64) 1102 return "unknown"; 1103 1104 return regs_abi[d->abi]; 1105 } 1106 1107 static void regs__printf(const char *type, struct regs_dump *regs) 1108 { 1109 u64 mask = regs->mask; 1110 1111 printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n", 1112 type, 1113 mask, 1114 regs_dump_abi(regs)); 1115 1116 regs_dump__printf(mask, regs->regs); 1117 } 1118 1119 static void regs_user__printf(struct perf_sample *sample) 1120 { 1121 struct regs_dump *user_regs = &sample->user_regs; 1122 1123 if (user_regs->regs) 1124 regs__printf("user", user_regs); 1125 } 1126 1127 static void regs_intr__printf(struct perf_sample *sample) 1128 { 1129 struct regs_dump *intr_regs = &sample->intr_regs; 1130 1131 if (intr_regs->regs) 1132 regs__printf("intr", intr_regs); 1133 } 1134 1135 static void stack_user__printf(struct stack_dump *dump) 1136 { 1137 printf("... ustack: size %" PRIu64 ", offset 0x%x\n", 1138 dump->size, dump->offset); 1139 } 1140 1141 static void perf_evlist__print_tstamp(struct evlist *evlist, 1142 union perf_event *event, 1143 struct perf_sample *sample) 1144 { 1145 u64 sample_type = __perf_evlist__combined_sample_type(evlist); 1146 1147 if (event->header.type != PERF_RECORD_SAMPLE && 1148 !perf_evlist__sample_id_all(evlist)) { 1149 fputs("-1 -1 ", stdout); 1150 return; 1151 } 1152 1153 if ((sample_type & PERF_SAMPLE_CPU)) 1154 printf("%u ", sample->cpu); 1155 1156 if (sample_type & PERF_SAMPLE_TIME) 1157 printf("%" PRIu64 " ", sample->time); 1158 } 1159 1160 static void sample_read__printf(struct perf_sample *sample, u64 read_format) 1161 { 1162 printf("... sample_read:\n"); 1163 1164 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1165 printf("...... time enabled %016" PRIx64 "\n", 1166 sample->read.time_enabled); 1167 1168 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 1169 printf("...... time running %016" PRIx64 "\n", 1170 sample->read.time_running); 1171 1172 if (read_format & PERF_FORMAT_GROUP) { 1173 u64 i; 1174 1175 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr); 1176 1177 for (i = 0; i < sample->read.group.nr; i++) { 1178 struct sample_read_value *value; 1179 1180 value = &sample->read.group.values[i]; 1181 printf("..... id %016" PRIx64 1182 ", value %016" PRIx64 "\n", 1183 value->id, value->value); 1184 } 1185 } else 1186 printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n", 1187 sample->read.one.id, sample->read.one.value); 1188 } 1189 1190 static void dump_event(struct evlist *evlist, union perf_event *event, 1191 u64 file_offset, struct perf_sample *sample) 1192 { 1193 if (!dump_trace) 1194 return; 1195 1196 printf("\n%#" PRIx64 " [%#x]: event: %d\n", 1197 file_offset, event->header.size, event->header.type); 1198 1199 trace_event(event); 1200 if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw) 1201 evlist->trace_event_sample_raw(evlist, event, sample); 1202 1203 if (sample) 1204 perf_evlist__print_tstamp(evlist, event, sample); 1205 1206 printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset, 1207 event->header.size, perf_event__name(event->header.type)); 1208 } 1209 1210 static void dump_sample(struct evsel *evsel, union perf_event *event, 1211 struct perf_sample *sample) 1212 { 1213 u64 sample_type; 1214 1215 if (!dump_trace) 1216 return; 1217 1218 printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n", 1219 event->header.misc, sample->pid, sample->tid, sample->ip, 1220 sample->period, sample->addr); 1221 1222 sample_type = evsel->core.attr.sample_type; 1223 1224 if (evsel__has_callchain(evsel)) 1225 callchain__printf(evsel, sample); 1226 1227 if (sample_type & PERF_SAMPLE_BRANCH_STACK) 1228 branch_stack__printf(sample, perf_evsel__has_branch_callstack(evsel)); 1229 1230 if (sample_type & PERF_SAMPLE_REGS_USER) 1231 regs_user__printf(sample); 1232 1233 if (sample_type & PERF_SAMPLE_REGS_INTR) 1234 regs_intr__printf(sample); 1235 1236 if (sample_type & PERF_SAMPLE_STACK_USER) 1237 stack_user__printf(&sample->user_stack); 1238 1239 if (sample_type & PERF_SAMPLE_WEIGHT) 1240 printf("... weight: %" PRIu64 "\n", sample->weight); 1241 1242 if (sample_type & PERF_SAMPLE_DATA_SRC) 1243 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src); 1244 1245 if (sample_type & PERF_SAMPLE_PHYS_ADDR) 1246 printf(" .. phys_addr: 0x%"PRIx64"\n", sample->phys_addr); 1247 1248 if (sample_type & PERF_SAMPLE_TRANSACTION) 1249 printf("... transaction: %" PRIx64 "\n", sample->transaction); 1250 1251 if (sample_type & PERF_SAMPLE_READ) 1252 sample_read__printf(sample, evsel->core.attr.read_format); 1253 } 1254 1255 static void dump_read(struct evsel *evsel, union perf_event *event) 1256 { 1257 struct perf_record_read *read_event = &event->read; 1258 u64 read_format; 1259 1260 if (!dump_trace) 1261 return; 1262 1263 printf(": %d %d %s %" PRI_lu64 "\n", event->read.pid, event->read.tid, 1264 perf_evsel__name(evsel), 1265 event->read.value); 1266 1267 if (!evsel) 1268 return; 1269 1270 read_format = evsel->core.attr.read_format; 1271 1272 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1273 printf("... time enabled : %" PRI_lu64 "\n", read_event->time_enabled); 1274 1275 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 1276 printf("... time running : %" PRI_lu64 "\n", read_event->time_running); 1277 1278 if (read_format & PERF_FORMAT_ID) 1279 printf("... id : %" PRI_lu64 "\n", read_event->id); 1280 } 1281 1282 static struct machine *machines__find_for_cpumode(struct machines *machines, 1283 union perf_event *event, 1284 struct perf_sample *sample) 1285 { 1286 struct machine *machine; 1287 1288 if (perf_guest && 1289 ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) || 1290 (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) { 1291 u32 pid; 1292 1293 if (event->header.type == PERF_RECORD_MMAP 1294 || event->header.type == PERF_RECORD_MMAP2) 1295 pid = event->mmap.pid; 1296 else 1297 pid = sample->pid; 1298 1299 machine = machines__find(machines, pid); 1300 if (!machine) 1301 machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID); 1302 return machine; 1303 } 1304 1305 return &machines->host; 1306 } 1307 1308 static int deliver_sample_value(struct evlist *evlist, 1309 struct perf_tool *tool, 1310 union perf_event *event, 1311 struct perf_sample *sample, 1312 struct sample_read_value *v, 1313 struct machine *machine) 1314 { 1315 struct perf_sample_id *sid = perf_evlist__id2sid(evlist, v->id); 1316 1317 if (sid) { 1318 sample->id = v->id; 1319 sample->period = v->value - sid->period; 1320 sid->period = v->value; 1321 } 1322 1323 if (!sid || sid->evsel == NULL) { 1324 ++evlist->stats.nr_unknown_id; 1325 return 0; 1326 } 1327 1328 /* 1329 * There's no reason to deliver sample 1330 * for zero period, bail out. 1331 */ 1332 if (!sample->period) 1333 return 0; 1334 1335 return tool->sample(tool, event, sample, sid->evsel, machine); 1336 } 1337 1338 static int deliver_sample_group(struct evlist *evlist, 1339 struct perf_tool *tool, 1340 union perf_event *event, 1341 struct perf_sample *sample, 1342 struct machine *machine) 1343 { 1344 int ret = -EINVAL; 1345 u64 i; 1346 1347 for (i = 0; i < sample->read.group.nr; i++) { 1348 ret = deliver_sample_value(evlist, tool, event, sample, 1349 &sample->read.group.values[i], 1350 machine); 1351 if (ret) 1352 break; 1353 } 1354 1355 return ret; 1356 } 1357 1358 static int 1359 perf_evlist__deliver_sample(struct evlist *evlist, 1360 struct perf_tool *tool, 1361 union perf_event *event, 1362 struct perf_sample *sample, 1363 struct evsel *evsel, 1364 struct machine *machine) 1365 { 1366 /* We know evsel != NULL. */ 1367 u64 sample_type = evsel->core.attr.sample_type; 1368 u64 read_format = evsel->core.attr.read_format; 1369 1370 /* Standard sample delivery. */ 1371 if (!(sample_type & PERF_SAMPLE_READ)) 1372 return tool->sample(tool, event, sample, evsel, machine); 1373 1374 /* For PERF_SAMPLE_READ we have either single or group mode. */ 1375 if (read_format & PERF_FORMAT_GROUP) 1376 return deliver_sample_group(evlist, tool, event, sample, 1377 machine); 1378 else 1379 return deliver_sample_value(evlist, tool, event, sample, 1380 &sample->read.one, machine); 1381 } 1382 1383 static int machines__deliver_event(struct machines *machines, 1384 struct evlist *evlist, 1385 union perf_event *event, 1386 struct perf_sample *sample, 1387 struct perf_tool *tool, u64 file_offset) 1388 { 1389 struct evsel *evsel; 1390 struct machine *machine; 1391 1392 dump_event(evlist, event, file_offset, sample); 1393 1394 evsel = perf_evlist__id2evsel(evlist, sample->id); 1395 1396 machine = machines__find_for_cpumode(machines, event, sample); 1397 1398 switch (event->header.type) { 1399 case PERF_RECORD_SAMPLE: 1400 if (evsel == NULL) { 1401 ++evlist->stats.nr_unknown_id; 1402 return 0; 1403 } 1404 dump_sample(evsel, event, sample); 1405 if (machine == NULL) { 1406 ++evlist->stats.nr_unprocessable_samples; 1407 return 0; 1408 } 1409 return perf_evlist__deliver_sample(evlist, tool, event, sample, evsel, machine); 1410 case PERF_RECORD_MMAP: 1411 return tool->mmap(tool, event, sample, machine); 1412 case PERF_RECORD_MMAP2: 1413 if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT) 1414 ++evlist->stats.nr_proc_map_timeout; 1415 return tool->mmap2(tool, event, sample, machine); 1416 case PERF_RECORD_COMM: 1417 return tool->comm(tool, event, sample, machine); 1418 case PERF_RECORD_NAMESPACES: 1419 return tool->namespaces(tool, event, sample, machine); 1420 case PERF_RECORD_FORK: 1421 return tool->fork(tool, event, sample, machine); 1422 case PERF_RECORD_EXIT: 1423 return tool->exit(tool, event, sample, machine); 1424 case PERF_RECORD_LOST: 1425 if (tool->lost == perf_event__process_lost) 1426 evlist->stats.total_lost += event->lost.lost; 1427 return tool->lost(tool, event, sample, machine); 1428 case PERF_RECORD_LOST_SAMPLES: 1429 if (tool->lost_samples == perf_event__process_lost_samples) 1430 evlist->stats.total_lost_samples += event->lost_samples.lost; 1431 return tool->lost_samples(tool, event, sample, machine); 1432 case PERF_RECORD_READ: 1433 dump_read(evsel, event); 1434 return tool->read(tool, event, sample, evsel, machine); 1435 case PERF_RECORD_THROTTLE: 1436 return tool->throttle(tool, event, sample, machine); 1437 case PERF_RECORD_UNTHROTTLE: 1438 return tool->unthrottle(tool, event, sample, machine); 1439 case PERF_RECORD_AUX: 1440 if (tool->aux == perf_event__process_aux) { 1441 if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) 1442 evlist->stats.total_aux_lost += 1; 1443 if (event->aux.flags & PERF_AUX_FLAG_PARTIAL) 1444 evlist->stats.total_aux_partial += 1; 1445 } 1446 return tool->aux(tool, event, sample, machine); 1447 case PERF_RECORD_ITRACE_START: 1448 return tool->itrace_start(tool, event, sample, machine); 1449 case PERF_RECORD_SWITCH: 1450 case PERF_RECORD_SWITCH_CPU_WIDE: 1451 return tool->context_switch(tool, event, sample, machine); 1452 case PERF_RECORD_KSYMBOL: 1453 return tool->ksymbol(tool, event, sample, machine); 1454 case PERF_RECORD_BPF_EVENT: 1455 return tool->bpf(tool, event, sample, machine); 1456 default: 1457 ++evlist->stats.nr_unknown_events; 1458 return -1; 1459 } 1460 } 1461 1462 static int perf_session__deliver_event(struct perf_session *session, 1463 union perf_event *event, 1464 struct perf_tool *tool, 1465 u64 file_offset) 1466 { 1467 struct perf_sample sample; 1468 int ret; 1469 1470 ret = perf_evlist__parse_sample(session->evlist, event, &sample); 1471 if (ret) { 1472 pr_err("Can't parse sample, err = %d\n", ret); 1473 return ret; 1474 } 1475 1476 ret = auxtrace__process_event(session, event, &sample, tool); 1477 if (ret < 0) 1478 return ret; 1479 if (ret > 0) 1480 return 0; 1481 1482 return machines__deliver_event(&session->machines, session->evlist, 1483 event, &sample, tool, file_offset); 1484 } 1485 1486 static s64 perf_session__process_user_event(struct perf_session *session, 1487 union perf_event *event, 1488 u64 file_offset) 1489 { 1490 struct ordered_events *oe = &session->ordered_events; 1491 struct perf_tool *tool = session->tool; 1492 struct perf_sample sample = { .time = 0, }; 1493 int fd = perf_data__fd(session->data); 1494 int err; 1495 1496 if (event->header.type != PERF_RECORD_COMPRESSED || 1497 tool->compressed == perf_session__process_compressed_event_stub) 1498 dump_event(session->evlist, event, file_offset, &sample); 1499 1500 /* These events are processed right away */ 1501 switch (event->header.type) { 1502 case PERF_RECORD_HEADER_ATTR: 1503 err = tool->attr(tool, event, &session->evlist); 1504 if (err == 0) { 1505 perf_session__set_id_hdr_size(session); 1506 perf_session__set_comm_exec(session); 1507 } 1508 return err; 1509 case PERF_RECORD_EVENT_UPDATE: 1510 return tool->event_update(tool, event, &session->evlist); 1511 case PERF_RECORD_HEADER_EVENT_TYPE: 1512 /* 1513 * Depreceated, but we need to handle it for sake 1514 * of old data files create in pipe mode. 1515 */ 1516 return 0; 1517 case PERF_RECORD_HEADER_TRACING_DATA: 1518 /* setup for reading amidst mmap */ 1519 lseek(fd, file_offset, SEEK_SET); 1520 return tool->tracing_data(session, event); 1521 case PERF_RECORD_HEADER_BUILD_ID: 1522 return tool->build_id(session, event); 1523 case PERF_RECORD_FINISHED_ROUND: 1524 return tool->finished_round(tool, event, oe); 1525 case PERF_RECORD_ID_INDEX: 1526 return tool->id_index(session, event); 1527 case PERF_RECORD_AUXTRACE_INFO: 1528 return tool->auxtrace_info(session, event); 1529 case PERF_RECORD_AUXTRACE: 1530 /* setup for reading amidst mmap */ 1531 lseek(fd, file_offset + event->header.size, SEEK_SET); 1532 return tool->auxtrace(session, event); 1533 case PERF_RECORD_AUXTRACE_ERROR: 1534 perf_session__auxtrace_error_inc(session, event); 1535 return tool->auxtrace_error(session, event); 1536 case PERF_RECORD_THREAD_MAP: 1537 return tool->thread_map(session, event); 1538 case PERF_RECORD_CPU_MAP: 1539 return tool->cpu_map(session, event); 1540 case PERF_RECORD_STAT_CONFIG: 1541 return tool->stat_config(session, event); 1542 case PERF_RECORD_STAT: 1543 return tool->stat(session, event); 1544 case PERF_RECORD_STAT_ROUND: 1545 return tool->stat_round(session, event); 1546 case PERF_RECORD_TIME_CONV: 1547 session->time_conv = event->time_conv; 1548 return tool->time_conv(session, event); 1549 case PERF_RECORD_HEADER_FEATURE: 1550 return tool->feature(session, event); 1551 case PERF_RECORD_COMPRESSED: 1552 err = tool->compressed(session, event, file_offset); 1553 if (err) 1554 dump_event(session->evlist, event, file_offset, &sample); 1555 return err; 1556 default: 1557 return -EINVAL; 1558 } 1559 } 1560 1561 int perf_session__deliver_synth_event(struct perf_session *session, 1562 union perf_event *event, 1563 struct perf_sample *sample) 1564 { 1565 struct evlist *evlist = session->evlist; 1566 struct perf_tool *tool = session->tool; 1567 1568 events_stats__inc(&evlist->stats, event->header.type); 1569 1570 if (event->header.type >= PERF_RECORD_USER_TYPE_START) 1571 return perf_session__process_user_event(session, event, 0); 1572 1573 return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0); 1574 } 1575 1576 static void event_swap(union perf_event *event, bool sample_id_all) 1577 { 1578 perf_event__swap_op swap; 1579 1580 swap = perf_event__swap_ops[event->header.type]; 1581 if (swap) 1582 swap(event, sample_id_all); 1583 } 1584 1585 int perf_session__peek_event(struct perf_session *session, off_t file_offset, 1586 void *buf, size_t buf_sz, 1587 union perf_event **event_ptr, 1588 struct perf_sample *sample) 1589 { 1590 union perf_event *event; 1591 size_t hdr_sz, rest; 1592 int fd; 1593 1594 if (session->one_mmap && !session->header.needs_swap) { 1595 event = file_offset - session->one_mmap_offset + 1596 session->one_mmap_addr; 1597 goto out_parse_sample; 1598 } 1599 1600 if (perf_data__is_pipe(session->data)) 1601 return -1; 1602 1603 fd = perf_data__fd(session->data); 1604 hdr_sz = sizeof(struct perf_event_header); 1605 1606 if (buf_sz < hdr_sz) 1607 return -1; 1608 1609 if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 || 1610 readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz) 1611 return -1; 1612 1613 event = (union perf_event *)buf; 1614 1615 if (session->header.needs_swap) 1616 perf_event_header__bswap(&event->header); 1617 1618 if (event->header.size < hdr_sz || event->header.size > buf_sz) 1619 return -1; 1620 1621 rest = event->header.size - hdr_sz; 1622 1623 if (readn(fd, buf, rest) != (ssize_t)rest) 1624 return -1; 1625 1626 if (session->header.needs_swap) 1627 event_swap(event, perf_evlist__sample_id_all(session->evlist)); 1628 1629 out_parse_sample: 1630 1631 if (sample && event->header.type < PERF_RECORD_USER_TYPE_START && 1632 perf_evlist__parse_sample(session->evlist, event, sample)) 1633 return -1; 1634 1635 *event_ptr = event; 1636 1637 return 0; 1638 } 1639 1640 static s64 perf_session__process_event(struct perf_session *session, 1641 union perf_event *event, u64 file_offset) 1642 { 1643 struct evlist *evlist = session->evlist; 1644 struct perf_tool *tool = session->tool; 1645 int ret; 1646 1647 if (session->header.needs_swap) 1648 event_swap(event, perf_evlist__sample_id_all(evlist)); 1649 1650 if (event->header.type >= PERF_RECORD_HEADER_MAX) 1651 return -EINVAL; 1652 1653 events_stats__inc(&evlist->stats, event->header.type); 1654 1655 if (event->header.type >= PERF_RECORD_USER_TYPE_START) 1656 return perf_session__process_user_event(session, event, file_offset); 1657 1658 if (tool->ordered_events) { 1659 u64 timestamp = -1ULL; 1660 1661 ret = perf_evlist__parse_sample_timestamp(evlist, event, ×tamp); 1662 if (ret && ret != -1) 1663 return ret; 1664 1665 ret = perf_session__queue_event(session, event, timestamp, file_offset); 1666 if (ret != -ETIME) 1667 return ret; 1668 } 1669 1670 return perf_session__deliver_event(session, event, tool, file_offset); 1671 } 1672 1673 void perf_event_header__bswap(struct perf_event_header *hdr) 1674 { 1675 hdr->type = bswap_32(hdr->type); 1676 hdr->misc = bswap_16(hdr->misc); 1677 hdr->size = bswap_16(hdr->size); 1678 } 1679 1680 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid) 1681 { 1682 return machine__findnew_thread(&session->machines.host, -1, pid); 1683 } 1684 1685 /* 1686 * Threads are identified by pid and tid, and the idle task has pid == tid == 0. 1687 * So here a single thread is created for that, but actually there is a separate 1688 * idle task per cpu, so there should be one 'struct thread' per cpu, but there 1689 * is only 1. That causes problems for some tools, requiring workarounds. For 1690 * example get_idle_thread() in builtin-sched.c, or thread_stack__per_cpu(). 1691 */ 1692 int perf_session__register_idle_thread(struct perf_session *session) 1693 { 1694 struct thread *thread; 1695 int err = 0; 1696 1697 thread = machine__findnew_thread(&session->machines.host, 0, 0); 1698 if (thread == NULL || thread__set_comm(thread, "swapper", 0)) { 1699 pr_err("problem inserting idle task.\n"); 1700 err = -1; 1701 } 1702 1703 if (thread == NULL || thread__set_namespaces(thread, 0, NULL)) { 1704 pr_err("problem inserting idle task.\n"); 1705 err = -1; 1706 } 1707 1708 /* machine__findnew_thread() got the thread, so put it */ 1709 thread__put(thread); 1710 return err; 1711 } 1712 1713 static void 1714 perf_session__warn_order(const struct perf_session *session) 1715 { 1716 const struct ordered_events *oe = &session->ordered_events; 1717 struct evsel *evsel; 1718 bool should_warn = true; 1719 1720 evlist__for_each_entry(session->evlist, evsel) { 1721 if (evsel->core.attr.write_backward) 1722 should_warn = false; 1723 } 1724 1725 if (!should_warn) 1726 return; 1727 if (oe->nr_unordered_events != 0) 1728 ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events); 1729 } 1730 1731 static void perf_session__warn_about_errors(const struct perf_session *session) 1732 { 1733 const struct events_stats *stats = &session->evlist->stats; 1734 1735 if (session->tool->lost == perf_event__process_lost && 1736 stats->nr_events[PERF_RECORD_LOST] != 0) { 1737 ui__warning("Processed %d events and lost %d chunks!\n\n" 1738 "Check IO/CPU overload!\n\n", 1739 stats->nr_events[0], 1740 stats->nr_events[PERF_RECORD_LOST]); 1741 } 1742 1743 if (session->tool->lost_samples == perf_event__process_lost_samples) { 1744 double drop_rate; 1745 1746 drop_rate = (double)stats->total_lost_samples / 1747 (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples); 1748 if (drop_rate > 0.05) { 1749 ui__warning("Processed %" PRIu64 " samples and lost %3.2f%%!\n\n", 1750 stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples, 1751 drop_rate * 100.0); 1752 } 1753 } 1754 1755 if (session->tool->aux == perf_event__process_aux && 1756 stats->total_aux_lost != 0) { 1757 ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n", 1758 stats->total_aux_lost, 1759 stats->nr_events[PERF_RECORD_AUX]); 1760 } 1761 1762 if (session->tool->aux == perf_event__process_aux && 1763 stats->total_aux_partial != 0) { 1764 bool vmm_exclusive = false; 1765 1766 (void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive", 1767 &vmm_exclusive); 1768 1769 ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n" 1770 "Are you running a KVM guest in the background?%s\n\n", 1771 stats->total_aux_partial, 1772 stats->nr_events[PERF_RECORD_AUX], 1773 vmm_exclusive ? 1774 "\nReloading kvm_intel module with vmm_exclusive=0\n" 1775 "will reduce the gaps to only guest's timeslices." : 1776 ""); 1777 } 1778 1779 if (stats->nr_unknown_events != 0) { 1780 ui__warning("Found %u unknown events!\n\n" 1781 "Is this an older tool processing a perf.data " 1782 "file generated by a more recent tool?\n\n" 1783 "If that is not the case, consider " 1784 "reporting to linux-kernel@vger.kernel.org.\n\n", 1785 stats->nr_unknown_events); 1786 } 1787 1788 if (stats->nr_unknown_id != 0) { 1789 ui__warning("%u samples with id not present in the header\n", 1790 stats->nr_unknown_id); 1791 } 1792 1793 if (stats->nr_invalid_chains != 0) { 1794 ui__warning("Found invalid callchains!\n\n" 1795 "%u out of %u events were discarded for this reason.\n\n" 1796 "Consider reporting to linux-kernel@vger.kernel.org.\n\n", 1797 stats->nr_invalid_chains, 1798 stats->nr_events[PERF_RECORD_SAMPLE]); 1799 } 1800 1801 if (stats->nr_unprocessable_samples != 0) { 1802 ui__warning("%u unprocessable samples recorded.\n" 1803 "Do you have a KVM guest running and not using 'perf kvm'?\n", 1804 stats->nr_unprocessable_samples); 1805 } 1806 1807 perf_session__warn_order(session); 1808 1809 events_stats__auxtrace_error_warn(stats); 1810 1811 if (stats->nr_proc_map_timeout != 0) { 1812 ui__warning("%d map information files for pre-existing threads were\n" 1813 "not processed, if there are samples for addresses they\n" 1814 "will not be resolved, you may find out which are these\n" 1815 "threads by running with -v and redirecting the output\n" 1816 "to a file.\n" 1817 "The time limit to process proc map is too short?\n" 1818 "Increase it by --proc-map-timeout\n", 1819 stats->nr_proc_map_timeout); 1820 } 1821 } 1822 1823 static int perf_session__flush_thread_stack(struct thread *thread, 1824 void *p __maybe_unused) 1825 { 1826 return thread_stack__flush(thread); 1827 } 1828 1829 static int perf_session__flush_thread_stacks(struct perf_session *session) 1830 { 1831 return machines__for_each_thread(&session->machines, 1832 perf_session__flush_thread_stack, 1833 NULL); 1834 } 1835 1836 volatile int session_done; 1837 1838 static int __perf_session__process_decomp_events(struct perf_session *session); 1839 1840 static int __perf_session__process_pipe_events(struct perf_session *session) 1841 { 1842 struct ordered_events *oe = &session->ordered_events; 1843 struct perf_tool *tool = session->tool; 1844 int fd = perf_data__fd(session->data); 1845 union perf_event *event; 1846 uint32_t size, cur_size = 0; 1847 void *buf = NULL; 1848 s64 skip = 0; 1849 u64 head; 1850 ssize_t err; 1851 void *p; 1852 1853 perf_tool__fill_defaults(tool); 1854 1855 head = 0; 1856 cur_size = sizeof(union perf_event); 1857 1858 buf = malloc(cur_size); 1859 if (!buf) 1860 return -errno; 1861 ordered_events__set_copy_on_queue(oe, true); 1862 more: 1863 event = buf; 1864 err = readn(fd, event, sizeof(struct perf_event_header)); 1865 if (err <= 0) { 1866 if (err == 0) 1867 goto done; 1868 1869 pr_err("failed to read event header\n"); 1870 goto out_err; 1871 } 1872 1873 if (session->header.needs_swap) 1874 perf_event_header__bswap(&event->header); 1875 1876 size = event->header.size; 1877 if (size < sizeof(struct perf_event_header)) { 1878 pr_err("bad event header size\n"); 1879 goto out_err; 1880 } 1881 1882 if (size > cur_size) { 1883 void *new = realloc(buf, size); 1884 if (!new) { 1885 pr_err("failed to allocate memory to read event\n"); 1886 goto out_err; 1887 } 1888 buf = new; 1889 cur_size = size; 1890 event = buf; 1891 } 1892 p = event; 1893 p += sizeof(struct perf_event_header); 1894 1895 if (size - sizeof(struct perf_event_header)) { 1896 err = readn(fd, p, size - sizeof(struct perf_event_header)); 1897 if (err <= 0) { 1898 if (err == 0) { 1899 pr_err("unexpected end of event stream\n"); 1900 goto done; 1901 } 1902 1903 pr_err("failed to read event data\n"); 1904 goto out_err; 1905 } 1906 } 1907 1908 if ((skip = perf_session__process_event(session, event, head)) < 0) { 1909 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n", 1910 head, event->header.size, event->header.type); 1911 err = -EINVAL; 1912 goto out_err; 1913 } 1914 1915 head += size; 1916 1917 if (skip > 0) 1918 head += skip; 1919 1920 err = __perf_session__process_decomp_events(session); 1921 if (err) 1922 goto out_err; 1923 1924 if (!session_done()) 1925 goto more; 1926 done: 1927 /* do the final flush for ordered samples */ 1928 err = ordered_events__flush(oe, OE_FLUSH__FINAL); 1929 if (err) 1930 goto out_err; 1931 err = auxtrace__flush_events(session, tool); 1932 if (err) 1933 goto out_err; 1934 err = perf_session__flush_thread_stacks(session); 1935 out_err: 1936 free(buf); 1937 if (!tool->no_warn) 1938 perf_session__warn_about_errors(session); 1939 ordered_events__free(&session->ordered_events); 1940 auxtrace__free_events(session); 1941 return err; 1942 } 1943 1944 static union perf_event * 1945 fetch_mmaped_event(struct perf_session *session, 1946 u64 head, size_t mmap_size, char *buf) 1947 { 1948 union perf_event *event; 1949 1950 /* 1951 * Ensure we have enough space remaining to read 1952 * the size of the event in the headers. 1953 */ 1954 if (head + sizeof(event->header) > mmap_size) 1955 return NULL; 1956 1957 event = (union perf_event *)(buf + head); 1958 1959 if (session->header.needs_swap) 1960 perf_event_header__bswap(&event->header); 1961 1962 if (head + event->header.size > mmap_size) { 1963 /* We're not fetching the event so swap back again */ 1964 if (session->header.needs_swap) 1965 perf_event_header__bswap(&event->header); 1966 pr_debug("%s: head=%#" PRIx64 " event->header_size=%#x, mmap_size=%#zx: fuzzed perf.data?\n", 1967 __func__, head, event->header.size, mmap_size); 1968 return ERR_PTR(-EINVAL); 1969 } 1970 1971 return event; 1972 } 1973 1974 static int __perf_session__process_decomp_events(struct perf_session *session) 1975 { 1976 s64 skip; 1977 u64 size, file_pos = 0; 1978 struct decomp *decomp = session->decomp_last; 1979 1980 if (!decomp) 1981 return 0; 1982 1983 while (decomp->head < decomp->size && !session_done()) { 1984 union perf_event *event = fetch_mmaped_event(session, decomp->head, decomp->size, decomp->data); 1985 1986 if (IS_ERR(event)) 1987 return PTR_ERR(event); 1988 1989 if (!event) 1990 break; 1991 1992 size = event->header.size; 1993 1994 if (size < sizeof(struct perf_event_header) || 1995 (skip = perf_session__process_event(session, event, file_pos)) < 0) { 1996 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n", 1997 decomp->file_pos + decomp->head, event->header.size, event->header.type); 1998 return -EINVAL; 1999 } 2000 2001 if (skip) 2002 size += skip; 2003 2004 decomp->head += size; 2005 } 2006 2007 return 0; 2008 } 2009 2010 /* 2011 * On 64bit we can mmap the data file in one go. No need for tiny mmap 2012 * slices. On 32bit we use 32MB. 2013 */ 2014 #if BITS_PER_LONG == 64 2015 #define MMAP_SIZE ULLONG_MAX 2016 #define NUM_MMAPS 1 2017 #else 2018 #define MMAP_SIZE (32 * 1024 * 1024ULL) 2019 #define NUM_MMAPS 128 2020 #endif 2021 2022 struct reader; 2023 2024 typedef s64 (*reader_cb_t)(struct perf_session *session, 2025 union perf_event *event, 2026 u64 file_offset); 2027 2028 struct reader { 2029 int fd; 2030 u64 data_size; 2031 u64 data_offset; 2032 reader_cb_t process; 2033 }; 2034 2035 static int 2036 reader__process_events(struct reader *rd, struct perf_session *session, 2037 struct ui_progress *prog) 2038 { 2039 u64 data_size = rd->data_size; 2040 u64 head, page_offset, file_offset, file_pos, size; 2041 int err = 0, mmap_prot, mmap_flags, map_idx = 0; 2042 size_t mmap_size; 2043 char *buf, *mmaps[NUM_MMAPS]; 2044 union perf_event *event; 2045 s64 skip; 2046 2047 page_offset = page_size * (rd->data_offset / page_size); 2048 file_offset = page_offset; 2049 head = rd->data_offset - page_offset; 2050 2051 ui_progress__init_size(prog, data_size, "Processing events..."); 2052 2053 data_size += rd->data_offset; 2054 2055 mmap_size = MMAP_SIZE; 2056 if (mmap_size > data_size) { 2057 mmap_size = data_size; 2058 session->one_mmap = true; 2059 } 2060 2061 memset(mmaps, 0, sizeof(mmaps)); 2062 2063 mmap_prot = PROT_READ; 2064 mmap_flags = MAP_SHARED; 2065 2066 if (session->header.needs_swap) { 2067 mmap_prot |= PROT_WRITE; 2068 mmap_flags = MAP_PRIVATE; 2069 } 2070 remap: 2071 buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, rd->fd, 2072 file_offset); 2073 if (buf == MAP_FAILED) { 2074 pr_err("failed to mmap file\n"); 2075 err = -errno; 2076 goto out; 2077 } 2078 mmaps[map_idx] = buf; 2079 map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1); 2080 file_pos = file_offset + head; 2081 if (session->one_mmap) { 2082 session->one_mmap_addr = buf; 2083 session->one_mmap_offset = file_offset; 2084 } 2085 2086 more: 2087 event = fetch_mmaped_event(session, head, mmap_size, buf); 2088 if (IS_ERR(event)) 2089 return PTR_ERR(event); 2090 2091 if (!event) { 2092 if (mmaps[map_idx]) { 2093 munmap(mmaps[map_idx], mmap_size); 2094 mmaps[map_idx] = NULL; 2095 } 2096 2097 page_offset = page_size * (head / page_size); 2098 file_offset += page_offset; 2099 head -= page_offset; 2100 goto remap; 2101 } 2102 2103 size = event->header.size; 2104 2105 skip = -EINVAL; 2106 2107 if (size < sizeof(struct perf_event_header) || 2108 (skip = rd->process(session, event, file_pos)) < 0) { 2109 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d [%s]\n", 2110 file_offset + head, event->header.size, 2111 event->header.type, strerror(-skip)); 2112 err = skip; 2113 goto out; 2114 } 2115 2116 if (skip) 2117 size += skip; 2118 2119 head += size; 2120 file_pos += size; 2121 2122 err = __perf_session__process_decomp_events(session); 2123 if (err) 2124 goto out; 2125 2126 ui_progress__update(prog, size); 2127 2128 if (session_done()) 2129 goto out; 2130 2131 if (file_pos < data_size) 2132 goto more; 2133 2134 out: 2135 return err; 2136 } 2137 2138 static s64 process_simple(struct perf_session *session, 2139 union perf_event *event, 2140 u64 file_offset) 2141 { 2142 return perf_session__process_event(session, event, file_offset); 2143 } 2144 2145 static int __perf_session__process_events(struct perf_session *session) 2146 { 2147 struct reader rd = { 2148 .fd = perf_data__fd(session->data), 2149 .data_size = session->header.data_size, 2150 .data_offset = session->header.data_offset, 2151 .process = process_simple, 2152 }; 2153 struct ordered_events *oe = &session->ordered_events; 2154 struct perf_tool *tool = session->tool; 2155 struct ui_progress prog; 2156 int err; 2157 2158 perf_tool__fill_defaults(tool); 2159 2160 if (rd.data_size == 0) 2161 return -1; 2162 2163 ui_progress__init_size(&prog, rd.data_size, "Processing events..."); 2164 2165 err = reader__process_events(&rd, session, &prog); 2166 if (err) 2167 goto out_err; 2168 /* do the final flush for ordered samples */ 2169 err = ordered_events__flush(oe, OE_FLUSH__FINAL); 2170 if (err) 2171 goto out_err; 2172 err = auxtrace__flush_events(session, tool); 2173 if (err) 2174 goto out_err; 2175 err = perf_session__flush_thread_stacks(session); 2176 out_err: 2177 ui_progress__finish(); 2178 if (!tool->no_warn) 2179 perf_session__warn_about_errors(session); 2180 /* 2181 * We may switching perf.data output, make ordered_events 2182 * reusable. 2183 */ 2184 ordered_events__reinit(&session->ordered_events); 2185 auxtrace__free_events(session); 2186 session->one_mmap = false; 2187 return err; 2188 } 2189 2190 int perf_session__process_events(struct perf_session *session) 2191 { 2192 if (perf_session__register_idle_thread(session) < 0) 2193 return -ENOMEM; 2194 2195 if (perf_data__is_pipe(session->data)) 2196 return __perf_session__process_pipe_events(session); 2197 2198 return __perf_session__process_events(session); 2199 } 2200 2201 bool perf_session__has_traces(struct perf_session *session, const char *msg) 2202 { 2203 struct evsel *evsel; 2204 2205 evlist__for_each_entry(session->evlist, evsel) { 2206 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) 2207 return true; 2208 } 2209 2210 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg); 2211 return false; 2212 } 2213 2214 int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr) 2215 { 2216 char *bracket; 2217 struct ref_reloc_sym *ref; 2218 struct kmap *kmap; 2219 2220 ref = zalloc(sizeof(struct ref_reloc_sym)); 2221 if (ref == NULL) 2222 return -ENOMEM; 2223 2224 ref->name = strdup(symbol_name); 2225 if (ref->name == NULL) { 2226 free(ref); 2227 return -ENOMEM; 2228 } 2229 2230 bracket = strchr(ref->name, ']'); 2231 if (bracket) 2232 *bracket = '\0'; 2233 2234 ref->addr = addr; 2235 2236 kmap = map__kmap(map); 2237 if (kmap) 2238 kmap->ref_reloc_sym = ref; 2239 2240 return 0; 2241 } 2242 2243 size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp) 2244 { 2245 return machines__fprintf_dsos(&session->machines, fp); 2246 } 2247 2248 size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp, 2249 bool (skip)(struct dso *dso, int parm), int parm) 2250 { 2251 return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm); 2252 } 2253 2254 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp) 2255 { 2256 size_t ret; 2257 const char *msg = ""; 2258 2259 if (perf_header__has_feat(&session->header, HEADER_AUXTRACE)) 2260 msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)"; 2261 2262 ret = fprintf(fp, "\nAggregated stats:%s\n", msg); 2263 2264 ret += events_stats__fprintf(&session->evlist->stats, fp); 2265 return ret; 2266 } 2267 2268 size_t perf_session__fprintf(struct perf_session *session, FILE *fp) 2269 { 2270 /* 2271 * FIXME: Here we have to actually print all the machines in this 2272 * session, not just the host... 2273 */ 2274 return machine__fprintf(&session->machines.host, fp); 2275 } 2276 2277 struct evsel *perf_session__find_first_evtype(struct perf_session *session, 2278 unsigned int type) 2279 { 2280 struct evsel *pos; 2281 2282 evlist__for_each_entry(session->evlist, pos) { 2283 if (pos->core.attr.type == type) 2284 return pos; 2285 } 2286 return NULL; 2287 } 2288 2289 int perf_session__cpu_bitmap(struct perf_session *session, 2290 const char *cpu_list, unsigned long *cpu_bitmap) 2291 { 2292 int i, err = -1; 2293 struct perf_cpu_map *map; 2294 2295 for (i = 0; i < PERF_TYPE_MAX; ++i) { 2296 struct evsel *evsel; 2297 2298 evsel = perf_session__find_first_evtype(session, i); 2299 if (!evsel) 2300 continue; 2301 2302 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CPU)) { 2303 pr_err("File does not contain CPU events. " 2304 "Remove -C option to proceed.\n"); 2305 return -1; 2306 } 2307 } 2308 2309 map = perf_cpu_map__new(cpu_list); 2310 if (map == NULL) { 2311 pr_err("Invalid cpu_list\n"); 2312 return -1; 2313 } 2314 2315 for (i = 0; i < map->nr; i++) { 2316 int cpu = map->map[i]; 2317 2318 if (cpu >= MAX_NR_CPUS) { 2319 pr_err("Requested CPU %d too large. " 2320 "Consider raising MAX_NR_CPUS\n", cpu); 2321 goto out_delete_map; 2322 } 2323 2324 set_bit(cpu, cpu_bitmap); 2325 } 2326 2327 err = 0; 2328 2329 out_delete_map: 2330 perf_cpu_map__put(map); 2331 return err; 2332 } 2333 2334 void perf_session__fprintf_info(struct perf_session *session, FILE *fp, 2335 bool full) 2336 { 2337 if (session == NULL || fp == NULL) 2338 return; 2339 2340 fprintf(fp, "# ========\n"); 2341 perf_header__fprintf_info(session, fp, full); 2342 fprintf(fp, "# ========\n#\n"); 2343 } 2344 2345 2346 int __perf_session__set_tracepoints_handlers(struct perf_session *session, 2347 const struct evsel_str_handler *assocs, 2348 size_t nr_assocs) 2349 { 2350 struct evsel *evsel; 2351 size_t i; 2352 int err; 2353 2354 for (i = 0; i < nr_assocs; i++) { 2355 /* 2356 * Adding a handler for an event not in the session, 2357 * just ignore it. 2358 */ 2359 evsel = perf_evlist__find_tracepoint_by_name(session->evlist, assocs[i].name); 2360 if (evsel == NULL) 2361 continue; 2362 2363 err = -EEXIST; 2364 if (evsel->handler != NULL) 2365 goto out; 2366 evsel->handler = assocs[i].handler; 2367 } 2368 2369 err = 0; 2370 out: 2371 return err; 2372 } 2373 2374 int perf_event__process_id_index(struct perf_session *session, 2375 union perf_event *event) 2376 { 2377 struct evlist *evlist = session->evlist; 2378 struct id_index_event *ie = &event->id_index; 2379 size_t i, nr, max_nr; 2380 2381 max_nr = (ie->header.size - sizeof(struct id_index_event)) / 2382 sizeof(struct id_index_entry); 2383 nr = ie->nr; 2384 if (nr > max_nr) 2385 return -EINVAL; 2386 2387 if (dump_trace) 2388 fprintf(stdout, " nr: %zu\n", nr); 2389 2390 for (i = 0; i < nr; i++) { 2391 struct id_index_entry *e = &ie->entries[i]; 2392 struct perf_sample_id *sid; 2393 2394 if (dump_trace) { 2395 fprintf(stdout, " ... id: %"PRIu64, e->id); 2396 fprintf(stdout, " idx: %"PRIu64, e->idx); 2397 fprintf(stdout, " cpu: %"PRId64, e->cpu); 2398 fprintf(stdout, " tid: %"PRId64"\n", e->tid); 2399 } 2400 2401 sid = perf_evlist__id2sid(evlist, e->id); 2402 if (!sid) 2403 return -ENOENT; 2404 sid->idx = e->idx; 2405 sid->cpu = e->cpu; 2406 sid->tid = e->tid; 2407 } 2408 return 0; 2409 } 2410 2411 int perf_event__synthesize_id_index(struct perf_tool *tool, 2412 perf_event__handler_t process, 2413 struct evlist *evlist, 2414 struct machine *machine) 2415 { 2416 union perf_event *ev; 2417 struct evsel *evsel; 2418 size_t nr = 0, i = 0, sz, max_nr, n; 2419 int err; 2420 2421 pr_debug2("Synthesizing id index\n"); 2422 2423 max_nr = (UINT16_MAX - sizeof(struct id_index_event)) / 2424 sizeof(struct id_index_entry); 2425 2426 evlist__for_each_entry(evlist, evsel) 2427 nr += evsel->ids; 2428 2429 n = nr > max_nr ? max_nr : nr; 2430 sz = sizeof(struct id_index_event) + n * sizeof(struct id_index_entry); 2431 ev = zalloc(sz); 2432 if (!ev) 2433 return -ENOMEM; 2434 2435 ev->id_index.header.type = PERF_RECORD_ID_INDEX; 2436 ev->id_index.header.size = sz; 2437 ev->id_index.nr = n; 2438 2439 evlist__for_each_entry(evlist, evsel) { 2440 u32 j; 2441 2442 for (j = 0; j < evsel->ids; j++) { 2443 struct id_index_entry *e; 2444 struct perf_sample_id *sid; 2445 2446 if (i >= n) { 2447 err = process(tool, ev, NULL, machine); 2448 if (err) 2449 goto out_err; 2450 nr -= n; 2451 i = 0; 2452 } 2453 2454 e = &ev->id_index.entries[i++]; 2455 2456 e->id = evsel->id[j]; 2457 2458 sid = perf_evlist__id2sid(evlist, e->id); 2459 if (!sid) { 2460 free(ev); 2461 return -ENOENT; 2462 } 2463 2464 e->idx = sid->idx; 2465 e->cpu = sid->cpu; 2466 e->tid = sid->tid; 2467 } 2468 } 2469 2470 sz = sizeof(struct id_index_event) + nr * sizeof(struct id_index_entry); 2471 ev->id_index.header.size = sz; 2472 ev->id_index.nr = nr; 2473 2474 err = process(tool, ev, NULL, machine); 2475 out_err: 2476 free(ev); 2477 2478 return err; 2479 } 2480