1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <inttypes.h> 4 #include <linux/err.h> 5 #include <linux/kernel.h> 6 #include <linux/zalloc.h> 7 #include <api/fs/fs.h> 8 9 #include <byteswap.h> 10 #include <unistd.h> 11 #include <sys/types.h> 12 #include <sys/mman.h> 13 #include <perf/cpumap.h> 14 15 #include "map_symbol.h" 16 #include "branch.h" 17 #include "debug.h" 18 #include "evlist.h" 19 #include "evsel.h" 20 #include "memswap.h" 21 #include "map.h" 22 #include "symbol.h" 23 #include "session.h" 24 #include "tool.h" 25 #include "perf_regs.h" 26 #include "asm/bug.h" 27 #include "auxtrace.h" 28 #include "thread.h" 29 #include "thread-stack.h" 30 #include "sample-raw.h" 31 #include "stat.h" 32 #include "ui/progress.h" 33 #include "../perf.h" 34 #include "arch/common.h" 35 #include <internal/lib.h> 36 #include <linux/err.h> 37 38 #ifdef HAVE_ZSTD_SUPPORT 39 static int perf_session__process_compressed_event(struct perf_session *session, 40 union perf_event *event, u64 file_offset) 41 { 42 void *src; 43 size_t decomp_size, src_size; 44 u64 decomp_last_rem = 0; 45 size_t mmap_len, decomp_len = session->header.env.comp_mmap_len; 46 struct decomp *decomp, *decomp_last = session->decomp_last; 47 48 if (decomp_last) { 49 decomp_last_rem = decomp_last->size - decomp_last->head; 50 decomp_len += decomp_last_rem; 51 } 52 53 mmap_len = sizeof(struct decomp) + decomp_len; 54 decomp = mmap(NULL, mmap_len, PROT_READ|PROT_WRITE, 55 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); 56 if (decomp == MAP_FAILED) { 57 pr_err("Couldn't allocate memory for decompression\n"); 58 return -1; 59 } 60 61 decomp->file_pos = file_offset; 62 decomp->mmap_len = mmap_len; 63 decomp->head = 0; 64 65 if (decomp_last_rem) { 66 memcpy(decomp->data, &(decomp_last->data[decomp_last->head]), decomp_last_rem); 67 decomp->size = decomp_last_rem; 68 } 69 70 src = (void *)event + sizeof(struct perf_record_compressed); 71 src_size = event->pack.header.size - sizeof(struct perf_record_compressed); 72 73 decomp_size = zstd_decompress_stream(&(session->zstd_data), src, src_size, 74 &(decomp->data[decomp_last_rem]), decomp_len - decomp_last_rem); 75 if (!decomp_size) { 76 munmap(decomp, mmap_len); 77 pr_err("Couldn't decompress data\n"); 78 return -1; 79 } 80 81 decomp->size += decomp_size; 82 83 if (session->decomp == NULL) { 84 session->decomp = decomp; 85 session->decomp_last = decomp; 86 } else { 87 session->decomp_last->next = decomp; 88 session->decomp_last = decomp; 89 } 90 91 pr_debug("decomp (B): %ld to %ld\n", src_size, decomp_size); 92 93 return 0; 94 } 95 #else /* !HAVE_ZSTD_SUPPORT */ 96 #define perf_session__process_compressed_event perf_session__process_compressed_event_stub 97 #endif 98 99 static int perf_session__deliver_event(struct perf_session *session, 100 union perf_event *event, 101 struct perf_tool *tool, 102 u64 file_offset); 103 104 static int perf_session__open(struct perf_session *session) 105 { 106 struct perf_data *data = session->data; 107 108 if (perf_session__read_header(session) < 0) { 109 pr_err("incompatible file format (rerun with -v to learn more)\n"); 110 return -1; 111 } 112 113 if (perf_data__is_pipe(data)) 114 return 0; 115 116 if (perf_header__has_feat(&session->header, HEADER_STAT)) 117 return 0; 118 119 if (!perf_evlist__valid_sample_type(session->evlist)) { 120 pr_err("non matching sample_type\n"); 121 return -1; 122 } 123 124 if (!perf_evlist__valid_sample_id_all(session->evlist)) { 125 pr_err("non matching sample_id_all\n"); 126 return -1; 127 } 128 129 if (!perf_evlist__valid_read_format(session->evlist)) { 130 pr_err("non matching read_format\n"); 131 return -1; 132 } 133 134 return 0; 135 } 136 137 void perf_session__set_id_hdr_size(struct perf_session *session) 138 { 139 u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist); 140 141 machines__set_id_hdr_size(&session->machines, id_hdr_size); 142 } 143 144 int perf_session__create_kernel_maps(struct perf_session *session) 145 { 146 int ret = machine__create_kernel_maps(&session->machines.host); 147 148 if (ret >= 0) 149 ret = machines__create_guest_kernel_maps(&session->machines); 150 return ret; 151 } 152 153 static void perf_session__destroy_kernel_maps(struct perf_session *session) 154 { 155 machines__destroy_kernel_maps(&session->machines); 156 } 157 158 static bool perf_session__has_comm_exec(struct perf_session *session) 159 { 160 struct evsel *evsel; 161 162 evlist__for_each_entry(session->evlist, evsel) { 163 if (evsel->core.attr.comm_exec) 164 return true; 165 } 166 167 return false; 168 } 169 170 static void perf_session__set_comm_exec(struct perf_session *session) 171 { 172 bool comm_exec = perf_session__has_comm_exec(session); 173 174 machines__set_comm_exec(&session->machines, comm_exec); 175 } 176 177 static int ordered_events__deliver_event(struct ordered_events *oe, 178 struct ordered_event *event) 179 { 180 struct perf_session *session = container_of(oe, struct perf_session, 181 ordered_events); 182 183 return perf_session__deliver_event(session, event->event, 184 session->tool, event->file_offset); 185 } 186 187 struct perf_session *perf_session__new(struct perf_data *data, 188 bool repipe, struct perf_tool *tool) 189 { 190 int ret = -ENOMEM; 191 struct perf_session *session = zalloc(sizeof(*session)); 192 193 if (!session) 194 goto out; 195 196 session->repipe = repipe; 197 session->tool = tool; 198 INIT_LIST_HEAD(&session->auxtrace_index); 199 machines__init(&session->machines); 200 ordered_events__init(&session->ordered_events, 201 ordered_events__deliver_event, NULL); 202 203 perf_env__init(&session->header.env); 204 if (data) { 205 ret = perf_data__open(data); 206 if (ret < 0) 207 goto out_delete; 208 209 session->data = data; 210 211 if (perf_data__is_read(data)) { 212 ret = perf_session__open(session); 213 if (ret < 0) 214 goto out_delete; 215 216 /* 217 * set session attributes that are present in perf.data 218 * but not in pipe-mode. 219 */ 220 if (!data->is_pipe) { 221 perf_session__set_id_hdr_size(session); 222 perf_session__set_comm_exec(session); 223 } 224 225 perf_evlist__init_trace_event_sample_raw(session->evlist); 226 227 /* Open the directory data. */ 228 if (data->is_dir) { 229 ret = perf_data__open_dir(data); 230 if (ret) 231 goto out_delete; 232 } 233 234 if (!symbol_conf.kallsyms_name && 235 !symbol_conf.vmlinux_name) 236 symbol_conf.kallsyms_name = perf_data__kallsyms_name(data); 237 } 238 } else { 239 session->machines.host.env = &perf_env; 240 } 241 242 session->machines.host.single_address_space = 243 perf_env__single_address_space(session->machines.host.env); 244 245 if (!data || perf_data__is_write(data)) { 246 /* 247 * In O_RDONLY mode this will be performed when reading the 248 * kernel MMAP event, in perf_event__process_mmap(). 249 */ 250 if (perf_session__create_kernel_maps(session) < 0) 251 pr_warning("Cannot read kernel map\n"); 252 } 253 254 /* 255 * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is 256 * processed, so perf_evlist__sample_id_all is not meaningful here. 257 */ 258 if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps && 259 tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) { 260 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n"); 261 tool->ordered_events = false; 262 } 263 264 return session; 265 266 out_delete: 267 perf_session__delete(session); 268 out: 269 return ERR_PTR(ret); 270 } 271 272 static void perf_session__delete_threads(struct perf_session *session) 273 { 274 machine__delete_threads(&session->machines.host); 275 } 276 277 static void perf_session__release_decomp_events(struct perf_session *session) 278 { 279 struct decomp *next, *decomp; 280 size_t mmap_len; 281 next = session->decomp; 282 do { 283 decomp = next; 284 if (decomp == NULL) 285 break; 286 next = decomp->next; 287 mmap_len = decomp->mmap_len; 288 munmap(decomp, mmap_len); 289 } while (1); 290 } 291 292 void perf_session__delete(struct perf_session *session) 293 { 294 if (session == NULL) 295 return; 296 auxtrace__free(session); 297 auxtrace_index__free(&session->auxtrace_index); 298 perf_session__destroy_kernel_maps(session); 299 perf_session__delete_threads(session); 300 perf_session__release_decomp_events(session); 301 perf_env__exit(&session->header.env); 302 machines__exit(&session->machines); 303 if (session->data) 304 perf_data__close(session->data); 305 free(session); 306 } 307 308 static int process_event_synth_tracing_data_stub(struct perf_session *session 309 __maybe_unused, 310 union perf_event *event 311 __maybe_unused) 312 { 313 dump_printf(": unhandled!\n"); 314 return 0; 315 } 316 317 static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused, 318 union perf_event *event __maybe_unused, 319 struct evlist **pevlist 320 __maybe_unused) 321 { 322 dump_printf(": unhandled!\n"); 323 return 0; 324 } 325 326 static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused, 327 union perf_event *event __maybe_unused, 328 struct evlist **pevlist 329 __maybe_unused) 330 { 331 if (dump_trace) 332 perf_event__fprintf_event_update(event, stdout); 333 334 dump_printf(": unhandled!\n"); 335 return 0; 336 } 337 338 static int process_event_sample_stub(struct perf_tool *tool __maybe_unused, 339 union perf_event *event __maybe_unused, 340 struct perf_sample *sample __maybe_unused, 341 struct evsel *evsel __maybe_unused, 342 struct machine *machine __maybe_unused) 343 { 344 dump_printf(": unhandled!\n"); 345 return 0; 346 } 347 348 static int process_event_stub(struct perf_tool *tool __maybe_unused, 349 union perf_event *event __maybe_unused, 350 struct perf_sample *sample __maybe_unused, 351 struct machine *machine __maybe_unused) 352 { 353 dump_printf(": unhandled!\n"); 354 return 0; 355 } 356 357 static int process_finished_round_stub(struct perf_tool *tool __maybe_unused, 358 union perf_event *event __maybe_unused, 359 struct ordered_events *oe __maybe_unused) 360 { 361 dump_printf(": unhandled!\n"); 362 return 0; 363 } 364 365 static int process_finished_round(struct perf_tool *tool, 366 union perf_event *event, 367 struct ordered_events *oe); 368 369 static int skipn(int fd, off_t n) 370 { 371 char buf[4096]; 372 ssize_t ret; 373 374 while (n > 0) { 375 ret = read(fd, buf, min(n, (off_t)sizeof(buf))); 376 if (ret <= 0) 377 return ret; 378 n -= ret; 379 } 380 381 return 0; 382 } 383 384 static s64 process_event_auxtrace_stub(struct perf_session *session __maybe_unused, 385 union perf_event *event) 386 { 387 dump_printf(": unhandled!\n"); 388 if (perf_data__is_pipe(session->data)) 389 skipn(perf_data__fd(session->data), event->auxtrace.size); 390 return event->auxtrace.size; 391 } 392 393 static int process_event_op2_stub(struct perf_session *session __maybe_unused, 394 union perf_event *event __maybe_unused) 395 { 396 dump_printf(": unhandled!\n"); 397 return 0; 398 } 399 400 401 static 402 int process_event_thread_map_stub(struct perf_session *session __maybe_unused, 403 union perf_event *event __maybe_unused) 404 { 405 if (dump_trace) 406 perf_event__fprintf_thread_map(event, stdout); 407 408 dump_printf(": unhandled!\n"); 409 return 0; 410 } 411 412 static 413 int process_event_cpu_map_stub(struct perf_session *session __maybe_unused, 414 union perf_event *event __maybe_unused) 415 { 416 if (dump_trace) 417 perf_event__fprintf_cpu_map(event, stdout); 418 419 dump_printf(": unhandled!\n"); 420 return 0; 421 } 422 423 static 424 int process_event_stat_config_stub(struct perf_session *session __maybe_unused, 425 union perf_event *event __maybe_unused) 426 { 427 if (dump_trace) 428 perf_event__fprintf_stat_config(event, stdout); 429 430 dump_printf(": unhandled!\n"); 431 return 0; 432 } 433 434 static int process_stat_stub(struct perf_session *perf_session __maybe_unused, 435 union perf_event *event) 436 { 437 if (dump_trace) 438 perf_event__fprintf_stat(event, stdout); 439 440 dump_printf(": unhandled!\n"); 441 return 0; 442 } 443 444 static int process_stat_round_stub(struct perf_session *perf_session __maybe_unused, 445 union perf_event *event) 446 { 447 if (dump_trace) 448 perf_event__fprintf_stat_round(event, stdout); 449 450 dump_printf(": unhandled!\n"); 451 return 0; 452 } 453 454 static int perf_session__process_compressed_event_stub(struct perf_session *session __maybe_unused, 455 union perf_event *event __maybe_unused, 456 u64 file_offset __maybe_unused) 457 { 458 dump_printf(": unhandled!\n"); 459 return 0; 460 } 461 462 void perf_tool__fill_defaults(struct perf_tool *tool) 463 { 464 if (tool->sample == NULL) 465 tool->sample = process_event_sample_stub; 466 if (tool->mmap == NULL) 467 tool->mmap = process_event_stub; 468 if (tool->mmap2 == NULL) 469 tool->mmap2 = process_event_stub; 470 if (tool->comm == NULL) 471 tool->comm = process_event_stub; 472 if (tool->namespaces == NULL) 473 tool->namespaces = process_event_stub; 474 if (tool->cgroup == NULL) 475 tool->cgroup = process_event_stub; 476 if (tool->fork == NULL) 477 tool->fork = process_event_stub; 478 if (tool->exit == NULL) 479 tool->exit = process_event_stub; 480 if (tool->lost == NULL) 481 tool->lost = perf_event__process_lost; 482 if (tool->lost_samples == NULL) 483 tool->lost_samples = perf_event__process_lost_samples; 484 if (tool->aux == NULL) 485 tool->aux = perf_event__process_aux; 486 if (tool->itrace_start == NULL) 487 tool->itrace_start = perf_event__process_itrace_start; 488 if (tool->context_switch == NULL) 489 tool->context_switch = perf_event__process_switch; 490 if (tool->ksymbol == NULL) 491 tool->ksymbol = perf_event__process_ksymbol; 492 if (tool->bpf == NULL) 493 tool->bpf = perf_event__process_bpf; 494 if (tool->read == NULL) 495 tool->read = process_event_sample_stub; 496 if (tool->throttle == NULL) 497 tool->throttle = process_event_stub; 498 if (tool->unthrottle == NULL) 499 tool->unthrottle = process_event_stub; 500 if (tool->attr == NULL) 501 tool->attr = process_event_synth_attr_stub; 502 if (tool->event_update == NULL) 503 tool->event_update = process_event_synth_event_update_stub; 504 if (tool->tracing_data == NULL) 505 tool->tracing_data = process_event_synth_tracing_data_stub; 506 if (tool->build_id == NULL) 507 tool->build_id = process_event_op2_stub; 508 if (tool->finished_round == NULL) { 509 if (tool->ordered_events) 510 tool->finished_round = process_finished_round; 511 else 512 tool->finished_round = process_finished_round_stub; 513 } 514 if (tool->id_index == NULL) 515 tool->id_index = process_event_op2_stub; 516 if (tool->auxtrace_info == NULL) 517 tool->auxtrace_info = process_event_op2_stub; 518 if (tool->auxtrace == NULL) 519 tool->auxtrace = process_event_auxtrace_stub; 520 if (tool->auxtrace_error == NULL) 521 tool->auxtrace_error = process_event_op2_stub; 522 if (tool->thread_map == NULL) 523 tool->thread_map = process_event_thread_map_stub; 524 if (tool->cpu_map == NULL) 525 tool->cpu_map = process_event_cpu_map_stub; 526 if (tool->stat_config == NULL) 527 tool->stat_config = process_event_stat_config_stub; 528 if (tool->stat == NULL) 529 tool->stat = process_stat_stub; 530 if (tool->stat_round == NULL) 531 tool->stat_round = process_stat_round_stub; 532 if (tool->time_conv == NULL) 533 tool->time_conv = process_event_op2_stub; 534 if (tool->feature == NULL) 535 tool->feature = process_event_op2_stub; 536 if (tool->compressed == NULL) 537 tool->compressed = perf_session__process_compressed_event; 538 } 539 540 static void swap_sample_id_all(union perf_event *event, void *data) 541 { 542 void *end = (void *) event + event->header.size; 543 int size = end - data; 544 545 BUG_ON(size % sizeof(u64)); 546 mem_bswap_64(data, size); 547 } 548 549 static void perf_event__all64_swap(union perf_event *event, 550 bool sample_id_all __maybe_unused) 551 { 552 struct perf_event_header *hdr = &event->header; 553 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr)); 554 } 555 556 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all) 557 { 558 event->comm.pid = bswap_32(event->comm.pid); 559 event->comm.tid = bswap_32(event->comm.tid); 560 561 if (sample_id_all) { 562 void *data = &event->comm.comm; 563 564 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); 565 swap_sample_id_all(event, data); 566 } 567 } 568 569 static void perf_event__mmap_swap(union perf_event *event, 570 bool sample_id_all) 571 { 572 event->mmap.pid = bswap_32(event->mmap.pid); 573 event->mmap.tid = bswap_32(event->mmap.tid); 574 event->mmap.start = bswap_64(event->mmap.start); 575 event->mmap.len = bswap_64(event->mmap.len); 576 event->mmap.pgoff = bswap_64(event->mmap.pgoff); 577 578 if (sample_id_all) { 579 void *data = &event->mmap.filename; 580 581 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); 582 swap_sample_id_all(event, data); 583 } 584 } 585 586 static void perf_event__mmap2_swap(union perf_event *event, 587 bool sample_id_all) 588 { 589 event->mmap2.pid = bswap_32(event->mmap2.pid); 590 event->mmap2.tid = bswap_32(event->mmap2.tid); 591 event->mmap2.start = bswap_64(event->mmap2.start); 592 event->mmap2.len = bswap_64(event->mmap2.len); 593 event->mmap2.pgoff = bswap_64(event->mmap2.pgoff); 594 event->mmap2.maj = bswap_32(event->mmap2.maj); 595 event->mmap2.min = bswap_32(event->mmap2.min); 596 event->mmap2.ino = bswap_64(event->mmap2.ino); 597 598 if (sample_id_all) { 599 void *data = &event->mmap2.filename; 600 601 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); 602 swap_sample_id_all(event, data); 603 } 604 } 605 static void perf_event__task_swap(union perf_event *event, bool sample_id_all) 606 { 607 event->fork.pid = bswap_32(event->fork.pid); 608 event->fork.tid = bswap_32(event->fork.tid); 609 event->fork.ppid = bswap_32(event->fork.ppid); 610 event->fork.ptid = bswap_32(event->fork.ptid); 611 event->fork.time = bswap_64(event->fork.time); 612 613 if (sample_id_all) 614 swap_sample_id_all(event, &event->fork + 1); 615 } 616 617 static void perf_event__read_swap(union perf_event *event, bool sample_id_all) 618 { 619 event->read.pid = bswap_32(event->read.pid); 620 event->read.tid = bswap_32(event->read.tid); 621 event->read.value = bswap_64(event->read.value); 622 event->read.time_enabled = bswap_64(event->read.time_enabled); 623 event->read.time_running = bswap_64(event->read.time_running); 624 event->read.id = bswap_64(event->read.id); 625 626 if (sample_id_all) 627 swap_sample_id_all(event, &event->read + 1); 628 } 629 630 static void perf_event__aux_swap(union perf_event *event, bool sample_id_all) 631 { 632 event->aux.aux_offset = bswap_64(event->aux.aux_offset); 633 event->aux.aux_size = bswap_64(event->aux.aux_size); 634 event->aux.flags = bswap_64(event->aux.flags); 635 636 if (sample_id_all) 637 swap_sample_id_all(event, &event->aux + 1); 638 } 639 640 static void perf_event__itrace_start_swap(union perf_event *event, 641 bool sample_id_all) 642 { 643 event->itrace_start.pid = bswap_32(event->itrace_start.pid); 644 event->itrace_start.tid = bswap_32(event->itrace_start.tid); 645 646 if (sample_id_all) 647 swap_sample_id_all(event, &event->itrace_start + 1); 648 } 649 650 static void perf_event__switch_swap(union perf_event *event, bool sample_id_all) 651 { 652 if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) { 653 event->context_switch.next_prev_pid = 654 bswap_32(event->context_switch.next_prev_pid); 655 event->context_switch.next_prev_tid = 656 bswap_32(event->context_switch.next_prev_tid); 657 } 658 659 if (sample_id_all) 660 swap_sample_id_all(event, &event->context_switch + 1); 661 } 662 663 static void perf_event__throttle_swap(union perf_event *event, 664 bool sample_id_all) 665 { 666 event->throttle.time = bswap_64(event->throttle.time); 667 event->throttle.id = bswap_64(event->throttle.id); 668 event->throttle.stream_id = bswap_64(event->throttle.stream_id); 669 670 if (sample_id_all) 671 swap_sample_id_all(event, &event->throttle + 1); 672 } 673 674 static void perf_event__namespaces_swap(union perf_event *event, 675 bool sample_id_all) 676 { 677 u64 i; 678 679 event->namespaces.pid = bswap_32(event->namespaces.pid); 680 event->namespaces.tid = bswap_32(event->namespaces.tid); 681 event->namespaces.nr_namespaces = bswap_64(event->namespaces.nr_namespaces); 682 683 for (i = 0; i < event->namespaces.nr_namespaces; i++) { 684 struct perf_ns_link_info *ns = &event->namespaces.link_info[i]; 685 686 ns->dev = bswap_64(ns->dev); 687 ns->ino = bswap_64(ns->ino); 688 } 689 690 if (sample_id_all) 691 swap_sample_id_all(event, &event->namespaces.link_info[i]); 692 } 693 694 static u8 revbyte(u8 b) 695 { 696 int rev = (b >> 4) | ((b & 0xf) << 4); 697 rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2); 698 rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1); 699 return (u8) rev; 700 } 701 702 /* 703 * XXX this is hack in attempt to carry flags bitfield 704 * through endian village. ABI says: 705 * 706 * Bit-fields are allocated from right to left (least to most significant) 707 * on little-endian implementations and from left to right (most to least 708 * significant) on big-endian implementations. 709 * 710 * The above seems to be byte specific, so we need to reverse each 711 * byte of the bitfield. 'Internet' also says this might be implementation 712 * specific and we probably need proper fix and carry perf_event_attr 713 * bitfield flags in separate data file FEAT_ section. Thought this seems 714 * to work for now. 715 */ 716 static void swap_bitfield(u8 *p, unsigned len) 717 { 718 unsigned i; 719 720 for (i = 0; i < len; i++) { 721 *p = revbyte(*p); 722 p++; 723 } 724 } 725 726 /* exported for swapping attributes in file header */ 727 void perf_event__attr_swap(struct perf_event_attr *attr) 728 { 729 attr->type = bswap_32(attr->type); 730 attr->size = bswap_32(attr->size); 731 732 #define bswap_safe(f, n) \ 733 (attr->size > (offsetof(struct perf_event_attr, f) + \ 734 sizeof(attr->f) * (n))) 735 #define bswap_field(f, sz) \ 736 do { \ 737 if (bswap_safe(f, 0)) \ 738 attr->f = bswap_##sz(attr->f); \ 739 } while(0) 740 #define bswap_field_16(f) bswap_field(f, 16) 741 #define bswap_field_32(f) bswap_field(f, 32) 742 #define bswap_field_64(f) bswap_field(f, 64) 743 744 bswap_field_64(config); 745 bswap_field_64(sample_period); 746 bswap_field_64(sample_type); 747 bswap_field_64(read_format); 748 bswap_field_32(wakeup_events); 749 bswap_field_32(bp_type); 750 bswap_field_64(bp_addr); 751 bswap_field_64(bp_len); 752 bswap_field_64(branch_sample_type); 753 bswap_field_64(sample_regs_user); 754 bswap_field_32(sample_stack_user); 755 bswap_field_32(aux_watermark); 756 bswap_field_16(sample_max_stack); 757 bswap_field_32(aux_sample_size); 758 759 /* 760 * After read_format are bitfields. Check read_format because 761 * we are unable to use offsetof on bitfield. 762 */ 763 if (bswap_safe(read_format, 1)) 764 swap_bitfield((u8 *) (&attr->read_format + 1), 765 sizeof(u64)); 766 #undef bswap_field_64 767 #undef bswap_field_32 768 #undef bswap_field 769 #undef bswap_safe 770 } 771 772 static void perf_event__hdr_attr_swap(union perf_event *event, 773 bool sample_id_all __maybe_unused) 774 { 775 size_t size; 776 777 perf_event__attr_swap(&event->attr.attr); 778 779 size = event->header.size; 780 size -= (void *)&event->attr.id - (void *)event; 781 mem_bswap_64(event->attr.id, size); 782 } 783 784 static void perf_event__event_update_swap(union perf_event *event, 785 bool sample_id_all __maybe_unused) 786 { 787 event->event_update.type = bswap_64(event->event_update.type); 788 event->event_update.id = bswap_64(event->event_update.id); 789 } 790 791 static void perf_event__event_type_swap(union perf_event *event, 792 bool sample_id_all __maybe_unused) 793 { 794 event->event_type.event_type.event_id = 795 bswap_64(event->event_type.event_type.event_id); 796 } 797 798 static void perf_event__tracing_data_swap(union perf_event *event, 799 bool sample_id_all __maybe_unused) 800 { 801 event->tracing_data.size = bswap_32(event->tracing_data.size); 802 } 803 804 static void perf_event__auxtrace_info_swap(union perf_event *event, 805 bool sample_id_all __maybe_unused) 806 { 807 size_t size; 808 809 event->auxtrace_info.type = bswap_32(event->auxtrace_info.type); 810 811 size = event->header.size; 812 size -= (void *)&event->auxtrace_info.priv - (void *)event; 813 mem_bswap_64(event->auxtrace_info.priv, size); 814 } 815 816 static void perf_event__auxtrace_swap(union perf_event *event, 817 bool sample_id_all __maybe_unused) 818 { 819 event->auxtrace.size = bswap_64(event->auxtrace.size); 820 event->auxtrace.offset = bswap_64(event->auxtrace.offset); 821 event->auxtrace.reference = bswap_64(event->auxtrace.reference); 822 event->auxtrace.idx = bswap_32(event->auxtrace.idx); 823 event->auxtrace.tid = bswap_32(event->auxtrace.tid); 824 event->auxtrace.cpu = bswap_32(event->auxtrace.cpu); 825 } 826 827 static void perf_event__auxtrace_error_swap(union perf_event *event, 828 bool sample_id_all __maybe_unused) 829 { 830 event->auxtrace_error.type = bswap_32(event->auxtrace_error.type); 831 event->auxtrace_error.code = bswap_32(event->auxtrace_error.code); 832 event->auxtrace_error.cpu = bswap_32(event->auxtrace_error.cpu); 833 event->auxtrace_error.pid = bswap_32(event->auxtrace_error.pid); 834 event->auxtrace_error.tid = bswap_32(event->auxtrace_error.tid); 835 event->auxtrace_error.fmt = bswap_32(event->auxtrace_error.fmt); 836 event->auxtrace_error.ip = bswap_64(event->auxtrace_error.ip); 837 if (event->auxtrace_error.fmt) 838 event->auxtrace_error.time = bswap_64(event->auxtrace_error.time); 839 } 840 841 static void perf_event__thread_map_swap(union perf_event *event, 842 bool sample_id_all __maybe_unused) 843 { 844 unsigned i; 845 846 event->thread_map.nr = bswap_64(event->thread_map.nr); 847 848 for (i = 0; i < event->thread_map.nr; i++) 849 event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid); 850 } 851 852 static void perf_event__cpu_map_swap(union perf_event *event, 853 bool sample_id_all __maybe_unused) 854 { 855 struct perf_record_cpu_map_data *data = &event->cpu_map.data; 856 struct cpu_map_entries *cpus; 857 struct perf_record_record_cpu_map *mask; 858 unsigned i; 859 860 data->type = bswap_64(data->type); 861 862 switch (data->type) { 863 case PERF_CPU_MAP__CPUS: 864 cpus = (struct cpu_map_entries *)data->data; 865 866 cpus->nr = bswap_16(cpus->nr); 867 868 for (i = 0; i < cpus->nr; i++) 869 cpus->cpu[i] = bswap_16(cpus->cpu[i]); 870 break; 871 case PERF_CPU_MAP__MASK: 872 mask = (struct perf_record_record_cpu_map *)data->data; 873 874 mask->nr = bswap_16(mask->nr); 875 mask->long_size = bswap_16(mask->long_size); 876 877 switch (mask->long_size) { 878 case 4: mem_bswap_32(&mask->mask, mask->nr); break; 879 case 8: mem_bswap_64(&mask->mask, mask->nr); break; 880 default: 881 pr_err("cpu_map swap: unsupported long size\n"); 882 } 883 default: 884 break; 885 } 886 } 887 888 static void perf_event__stat_config_swap(union perf_event *event, 889 bool sample_id_all __maybe_unused) 890 { 891 u64 size; 892 893 size = event->stat_config.nr * sizeof(event->stat_config.data[0]); 894 size += 1; /* nr item itself */ 895 mem_bswap_64(&event->stat_config.nr, size); 896 } 897 898 static void perf_event__stat_swap(union perf_event *event, 899 bool sample_id_all __maybe_unused) 900 { 901 event->stat.id = bswap_64(event->stat.id); 902 event->stat.thread = bswap_32(event->stat.thread); 903 event->stat.cpu = bswap_32(event->stat.cpu); 904 event->stat.val = bswap_64(event->stat.val); 905 event->stat.ena = bswap_64(event->stat.ena); 906 event->stat.run = bswap_64(event->stat.run); 907 } 908 909 static void perf_event__stat_round_swap(union perf_event *event, 910 bool sample_id_all __maybe_unused) 911 { 912 event->stat_round.type = bswap_64(event->stat_round.type); 913 event->stat_round.time = bswap_64(event->stat_round.time); 914 } 915 916 typedef void (*perf_event__swap_op)(union perf_event *event, 917 bool sample_id_all); 918 919 static perf_event__swap_op perf_event__swap_ops[] = { 920 [PERF_RECORD_MMAP] = perf_event__mmap_swap, 921 [PERF_RECORD_MMAP2] = perf_event__mmap2_swap, 922 [PERF_RECORD_COMM] = perf_event__comm_swap, 923 [PERF_RECORD_FORK] = perf_event__task_swap, 924 [PERF_RECORD_EXIT] = perf_event__task_swap, 925 [PERF_RECORD_LOST] = perf_event__all64_swap, 926 [PERF_RECORD_READ] = perf_event__read_swap, 927 [PERF_RECORD_THROTTLE] = perf_event__throttle_swap, 928 [PERF_RECORD_UNTHROTTLE] = perf_event__throttle_swap, 929 [PERF_RECORD_SAMPLE] = perf_event__all64_swap, 930 [PERF_RECORD_AUX] = perf_event__aux_swap, 931 [PERF_RECORD_ITRACE_START] = perf_event__itrace_start_swap, 932 [PERF_RECORD_LOST_SAMPLES] = perf_event__all64_swap, 933 [PERF_RECORD_SWITCH] = perf_event__switch_swap, 934 [PERF_RECORD_SWITCH_CPU_WIDE] = perf_event__switch_swap, 935 [PERF_RECORD_NAMESPACES] = perf_event__namespaces_swap, 936 [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap, 937 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap, 938 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap, 939 [PERF_RECORD_HEADER_BUILD_ID] = NULL, 940 [PERF_RECORD_ID_INDEX] = perf_event__all64_swap, 941 [PERF_RECORD_AUXTRACE_INFO] = perf_event__auxtrace_info_swap, 942 [PERF_RECORD_AUXTRACE] = perf_event__auxtrace_swap, 943 [PERF_RECORD_AUXTRACE_ERROR] = perf_event__auxtrace_error_swap, 944 [PERF_RECORD_THREAD_MAP] = perf_event__thread_map_swap, 945 [PERF_RECORD_CPU_MAP] = perf_event__cpu_map_swap, 946 [PERF_RECORD_STAT_CONFIG] = perf_event__stat_config_swap, 947 [PERF_RECORD_STAT] = perf_event__stat_swap, 948 [PERF_RECORD_STAT_ROUND] = perf_event__stat_round_swap, 949 [PERF_RECORD_EVENT_UPDATE] = perf_event__event_update_swap, 950 [PERF_RECORD_TIME_CONV] = perf_event__all64_swap, 951 [PERF_RECORD_HEADER_MAX] = NULL, 952 }; 953 954 /* 955 * When perf record finishes a pass on every buffers, it records this pseudo 956 * event. 957 * We record the max timestamp t found in the pass n. 958 * Assuming these timestamps are monotonic across cpus, we know that if 959 * a buffer still has events with timestamps below t, they will be all 960 * available and then read in the pass n + 1. 961 * Hence when we start to read the pass n + 2, we can safely flush every 962 * events with timestamps below t. 963 * 964 * ============ PASS n ================= 965 * CPU 0 | CPU 1 966 * | 967 * cnt1 timestamps | cnt2 timestamps 968 * 1 | 2 969 * 2 | 3 970 * - | 4 <--- max recorded 971 * 972 * ============ PASS n + 1 ============== 973 * CPU 0 | CPU 1 974 * | 975 * cnt1 timestamps | cnt2 timestamps 976 * 3 | 5 977 * 4 | 6 978 * 5 | 7 <---- max recorded 979 * 980 * Flush every events below timestamp 4 981 * 982 * ============ PASS n + 2 ============== 983 * CPU 0 | CPU 1 984 * | 985 * cnt1 timestamps | cnt2 timestamps 986 * 6 | 8 987 * 7 | 9 988 * - | 10 989 * 990 * Flush every events below timestamp 7 991 * etc... 992 */ 993 static int process_finished_round(struct perf_tool *tool __maybe_unused, 994 union perf_event *event __maybe_unused, 995 struct ordered_events *oe) 996 { 997 if (dump_trace) 998 fprintf(stdout, "\n"); 999 return ordered_events__flush(oe, OE_FLUSH__ROUND); 1000 } 1001 1002 int perf_session__queue_event(struct perf_session *s, union perf_event *event, 1003 u64 timestamp, u64 file_offset) 1004 { 1005 return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset); 1006 } 1007 1008 static void callchain__lbr_callstack_printf(struct perf_sample *sample) 1009 { 1010 struct ip_callchain *callchain = sample->callchain; 1011 struct branch_stack *lbr_stack = sample->branch_stack; 1012 struct branch_entry *entries = perf_sample__branch_entries(sample); 1013 u64 kernel_callchain_nr = callchain->nr; 1014 unsigned int i; 1015 1016 for (i = 0; i < kernel_callchain_nr; i++) { 1017 if (callchain->ips[i] == PERF_CONTEXT_USER) 1018 break; 1019 } 1020 1021 if ((i != kernel_callchain_nr) && lbr_stack->nr) { 1022 u64 total_nr; 1023 /* 1024 * LBR callstack can only get user call chain, 1025 * i is kernel call chain number, 1026 * 1 is PERF_CONTEXT_USER. 1027 * 1028 * The user call chain is stored in LBR registers. 1029 * LBR are pair registers. The caller is stored 1030 * in "from" register, while the callee is stored 1031 * in "to" register. 1032 * For example, there is a call stack 1033 * "A"->"B"->"C"->"D". 1034 * The LBR registers will recorde like 1035 * "C"->"D", "B"->"C", "A"->"B". 1036 * So only the first "to" register and all "from" 1037 * registers are needed to construct the whole stack. 1038 */ 1039 total_nr = i + 1 + lbr_stack->nr + 1; 1040 kernel_callchain_nr = i + 1; 1041 1042 printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr); 1043 1044 for (i = 0; i < kernel_callchain_nr; i++) 1045 printf("..... %2d: %016" PRIx64 "\n", 1046 i, callchain->ips[i]); 1047 1048 printf("..... %2d: %016" PRIx64 "\n", 1049 (int)(kernel_callchain_nr), entries[0].to); 1050 for (i = 0; i < lbr_stack->nr; i++) 1051 printf("..... %2d: %016" PRIx64 "\n", 1052 (int)(i + kernel_callchain_nr + 1), entries[i].from); 1053 } 1054 } 1055 1056 static void callchain__printf(struct evsel *evsel, 1057 struct perf_sample *sample) 1058 { 1059 unsigned int i; 1060 struct ip_callchain *callchain = sample->callchain; 1061 1062 if (evsel__has_branch_callstack(evsel)) 1063 callchain__lbr_callstack_printf(sample); 1064 1065 printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr); 1066 1067 for (i = 0; i < callchain->nr; i++) 1068 printf("..... %2d: %016" PRIx64 "\n", 1069 i, callchain->ips[i]); 1070 } 1071 1072 static void branch_stack__printf(struct perf_sample *sample, bool callstack) 1073 { 1074 struct branch_entry *entries = perf_sample__branch_entries(sample); 1075 uint64_t i; 1076 1077 printf("%s: nr:%" PRIu64 "\n", 1078 !callstack ? "... branch stack" : "... branch callstack", 1079 sample->branch_stack->nr); 1080 1081 for (i = 0; i < sample->branch_stack->nr; i++) { 1082 struct branch_entry *e = &entries[i]; 1083 1084 if (!callstack) { 1085 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x\n", 1086 i, e->from, e->to, 1087 (unsigned short)e->flags.cycles, 1088 e->flags.mispred ? "M" : " ", 1089 e->flags.predicted ? "P" : " ", 1090 e->flags.abort ? "A" : " ", 1091 e->flags.in_tx ? "T" : " ", 1092 (unsigned)e->flags.reserved); 1093 } else { 1094 printf("..... %2"PRIu64": %016" PRIx64 "\n", 1095 i, i > 0 ? e->from : e->to); 1096 } 1097 } 1098 } 1099 1100 static void regs_dump__printf(u64 mask, u64 *regs) 1101 { 1102 unsigned rid, i = 0; 1103 1104 for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) { 1105 u64 val = regs[i++]; 1106 1107 printf(".... %-5s 0x%" PRIx64 "\n", 1108 perf_reg_name(rid), val); 1109 } 1110 } 1111 1112 static const char *regs_abi[] = { 1113 [PERF_SAMPLE_REGS_ABI_NONE] = "none", 1114 [PERF_SAMPLE_REGS_ABI_32] = "32-bit", 1115 [PERF_SAMPLE_REGS_ABI_64] = "64-bit", 1116 }; 1117 1118 static inline const char *regs_dump_abi(struct regs_dump *d) 1119 { 1120 if (d->abi > PERF_SAMPLE_REGS_ABI_64) 1121 return "unknown"; 1122 1123 return regs_abi[d->abi]; 1124 } 1125 1126 static void regs__printf(const char *type, struct regs_dump *regs) 1127 { 1128 u64 mask = regs->mask; 1129 1130 printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n", 1131 type, 1132 mask, 1133 regs_dump_abi(regs)); 1134 1135 regs_dump__printf(mask, regs->regs); 1136 } 1137 1138 static void regs_user__printf(struct perf_sample *sample) 1139 { 1140 struct regs_dump *user_regs = &sample->user_regs; 1141 1142 if (user_regs->regs) 1143 regs__printf("user", user_regs); 1144 } 1145 1146 static void regs_intr__printf(struct perf_sample *sample) 1147 { 1148 struct regs_dump *intr_regs = &sample->intr_regs; 1149 1150 if (intr_regs->regs) 1151 regs__printf("intr", intr_regs); 1152 } 1153 1154 static void stack_user__printf(struct stack_dump *dump) 1155 { 1156 printf("... ustack: size %" PRIu64 ", offset 0x%x\n", 1157 dump->size, dump->offset); 1158 } 1159 1160 static void perf_evlist__print_tstamp(struct evlist *evlist, 1161 union perf_event *event, 1162 struct perf_sample *sample) 1163 { 1164 u64 sample_type = __perf_evlist__combined_sample_type(evlist); 1165 1166 if (event->header.type != PERF_RECORD_SAMPLE && 1167 !perf_evlist__sample_id_all(evlist)) { 1168 fputs("-1 -1 ", stdout); 1169 return; 1170 } 1171 1172 if ((sample_type & PERF_SAMPLE_CPU)) 1173 printf("%u ", sample->cpu); 1174 1175 if (sample_type & PERF_SAMPLE_TIME) 1176 printf("%" PRIu64 " ", sample->time); 1177 } 1178 1179 static void sample_read__printf(struct perf_sample *sample, u64 read_format) 1180 { 1181 printf("... sample_read:\n"); 1182 1183 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1184 printf("...... time enabled %016" PRIx64 "\n", 1185 sample->read.time_enabled); 1186 1187 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 1188 printf("...... time running %016" PRIx64 "\n", 1189 sample->read.time_running); 1190 1191 if (read_format & PERF_FORMAT_GROUP) { 1192 u64 i; 1193 1194 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr); 1195 1196 for (i = 0; i < sample->read.group.nr; i++) { 1197 struct sample_read_value *value; 1198 1199 value = &sample->read.group.values[i]; 1200 printf("..... id %016" PRIx64 1201 ", value %016" PRIx64 "\n", 1202 value->id, value->value); 1203 } 1204 } else 1205 printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n", 1206 sample->read.one.id, sample->read.one.value); 1207 } 1208 1209 static void dump_event(struct evlist *evlist, union perf_event *event, 1210 u64 file_offset, struct perf_sample *sample) 1211 { 1212 if (!dump_trace) 1213 return; 1214 1215 printf("\n%#" PRIx64 " [%#x]: event: %d\n", 1216 file_offset, event->header.size, event->header.type); 1217 1218 trace_event(event); 1219 if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw) 1220 evlist->trace_event_sample_raw(evlist, event, sample); 1221 1222 if (sample) 1223 perf_evlist__print_tstamp(evlist, event, sample); 1224 1225 printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset, 1226 event->header.size, perf_event__name(event->header.type)); 1227 } 1228 1229 static void dump_sample(struct evsel *evsel, union perf_event *event, 1230 struct perf_sample *sample) 1231 { 1232 u64 sample_type; 1233 1234 if (!dump_trace) 1235 return; 1236 1237 printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n", 1238 event->header.misc, sample->pid, sample->tid, sample->ip, 1239 sample->period, sample->addr); 1240 1241 sample_type = evsel->core.attr.sample_type; 1242 1243 if (evsel__has_callchain(evsel)) 1244 callchain__printf(evsel, sample); 1245 1246 if (evsel__has_br_stack(evsel)) 1247 branch_stack__printf(sample, evsel__has_branch_callstack(evsel)); 1248 1249 if (sample_type & PERF_SAMPLE_REGS_USER) 1250 regs_user__printf(sample); 1251 1252 if (sample_type & PERF_SAMPLE_REGS_INTR) 1253 regs_intr__printf(sample); 1254 1255 if (sample_type & PERF_SAMPLE_STACK_USER) 1256 stack_user__printf(&sample->user_stack); 1257 1258 if (sample_type & PERF_SAMPLE_WEIGHT) 1259 printf("... weight: %" PRIu64 "\n", sample->weight); 1260 1261 if (sample_type & PERF_SAMPLE_DATA_SRC) 1262 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src); 1263 1264 if (sample_type & PERF_SAMPLE_PHYS_ADDR) 1265 printf(" .. phys_addr: 0x%"PRIx64"\n", sample->phys_addr); 1266 1267 if (sample_type & PERF_SAMPLE_TRANSACTION) 1268 printf("... transaction: %" PRIx64 "\n", sample->transaction); 1269 1270 if (sample_type & PERF_SAMPLE_READ) 1271 sample_read__printf(sample, evsel->core.attr.read_format); 1272 } 1273 1274 static void dump_read(struct evsel *evsel, union perf_event *event) 1275 { 1276 struct perf_record_read *read_event = &event->read; 1277 u64 read_format; 1278 1279 if (!dump_trace) 1280 return; 1281 1282 printf(": %d %d %s %" PRI_lu64 "\n", event->read.pid, event->read.tid, 1283 evsel__name(evsel), event->read.value); 1284 1285 if (!evsel) 1286 return; 1287 1288 read_format = evsel->core.attr.read_format; 1289 1290 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1291 printf("... time enabled : %" PRI_lu64 "\n", read_event->time_enabled); 1292 1293 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 1294 printf("... time running : %" PRI_lu64 "\n", read_event->time_running); 1295 1296 if (read_format & PERF_FORMAT_ID) 1297 printf("... id : %" PRI_lu64 "\n", read_event->id); 1298 } 1299 1300 static struct machine *machines__find_for_cpumode(struct machines *machines, 1301 union perf_event *event, 1302 struct perf_sample *sample) 1303 { 1304 struct machine *machine; 1305 1306 if (perf_guest && 1307 ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) || 1308 (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) { 1309 u32 pid; 1310 1311 if (event->header.type == PERF_RECORD_MMAP 1312 || event->header.type == PERF_RECORD_MMAP2) 1313 pid = event->mmap.pid; 1314 else 1315 pid = sample->pid; 1316 1317 machine = machines__find(machines, pid); 1318 if (!machine) 1319 machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID); 1320 return machine; 1321 } 1322 1323 return &machines->host; 1324 } 1325 1326 static int deliver_sample_value(struct evlist *evlist, 1327 struct perf_tool *tool, 1328 union perf_event *event, 1329 struct perf_sample *sample, 1330 struct sample_read_value *v, 1331 struct machine *machine) 1332 { 1333 struct perf_sample_id *sid = perf_evlist__id2sid(evlist, v->id); 1334 struct evsel *evsel; 1335 1336 if (sid) { 1337 sample->id = v->id; 1338 sample->period = v->value - sid->period; 1339 sid->period = v->value; 1340 } 1341 1342 if (!sid || sid->evsel == NULL) { 1343 ++evlist->stats.nr_unknown_id; 1344 return 0; 1345 } 1346 1347 /* 1348 * There's no reason to deliver sample 1349 * for zero period, bail out. 1350 */ 1351 if (!sample->period) 1352 return 0; 1353 1354 evsel = container_of(sid->evsel, struct evsel, core); 1355 return tool->sample(tool, event, sample, evsel, machine); 1356 } 1357 1358 static int deliver_sample_group(struct evlist *evlist, 1359 struct perf_tool *tool, 1360 union perf_event *event, 1361 struct perf_sample *sample, 1362 struct machine *machine) 1363 { 1364 int ret = -EINVAL; 1365 u64 i; 1366 1367 for (i = 0; i < sample->read.group.nr; i++) { 1368 ret = deliver_sample_value(evlist, tool, event, sample, 1369 &sample->read.group.values[i], 1370 machine); 1371 if (ret) 1372 break; 1373 } 1374 1375 return ret; 1376 } 1377 1378 static int 1379 perf_evlist__deliver_sample(struct evlist *evlist, 1380 struct perf_tool *tool, 1381 union perf_event *event, 1382 struct perf_sample *sample, 1383 struct evsel *evsel, 1384 struct machine *machine) 1385 { 1386 /* We know evsel != NULL. */ 1387 u64 sample_type = evsel->core.attr.sample_type; 1388 u64 read_format = evsel->core.attr.read_format; 1389 1390 /* Standard sample delivery. */ 1391 if (!(sample_type & PERF_SAMPLE_READ)) 1392 return tool->sample(tool, event, sample, evsel, machine); 1393 1394 /* For PERF_SAMPLE_READ we have either single or group mode. */ 1395 if (read_format & PERF_FORMAT_GROUP) 1396 return deliver_sample_group(evlist, tool, event, sample, 1397 machine); 1398 else 1399 return deliver_sample_value(evlist, tool, event, sample, 1400 &sample->read.one, machine); 1401 } 1402 1403 static int machines__deliver_event(struct machines *machines, 1404 struct evlist *evlist, 1405 union perf_event *event, 1406 struct perf_sample *sample, 1407 struct perf_tool *tool, u64 file_offset) 1408 { 1409 struct evsel *evsel; 1410 struct machine *machine; 1411 1412 dump_event(evlist, event, file_offset, sample); 1413 1414 evsel = perf_evlist__id2evsel(evlist, sample->id); 1415 1416 machine = machines__find_for_cpumode(machines, event, sample); 1417 1418 switch (event->header.type) { 1419 case PERF_RECORD_SAMPLE: 1420 if (evsel == NULL) { 1421 ++evlist->stats.nr_unknown_id; 1422 return 0; 1423 } 1424 dump_sample(evsel, event, sample); 1425 if (machine == NULL) { 1426 ++evlist->stats.nr_unprocessable_samples; 1427 return 0; 1428 } 1429 return perf_evlist__deliver_sample(evlist, tool, event, sample, evsel, machine); 1430 case PERF_RECORD_MMAP: 1431 return tool->mmap(tool, event, sample, machine); 1432 case PERF_RECORD_MMAP2: 1433 if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT) 1434 ++evlist->stats.nr_proc_map_timeout; 1435 return tool->mmap2(tool, event, sample, machine); 1436 case PERF_RECORD_COMM: 1437 return tool->comm(tool, event, sample, machine); 1438 case PERF_RECORD_NAMESPACES: 1439 return tool->namespaces(tool, event, sample, machine); 1440 case PERF_RECORD_CGROUP: 1441 return tool->cgroup(tool, event, sample, machine); 1442 case PERF_RECORD_FORK: 1443 return tool->fork(tool, event, sample, machine); 1444 case PERF_RECORD_EXIT: 1445 return tool->exit(tool, event, sample, machine); 1446 case PERF_RECORD_LOST: 1447 if (tool->lost == perf_event__process_lost) 1448 evlist->stats.total_lost += event->lost.lost; 1449 return tool->lost(tool, event, sample, machine); 1450 case PERF_RECORD_LOST_SAMPLES: 1451 if (tool->lost_samples == perf_event__process_lost_samples) 1452 evlist->stats.total_lost_samples += event->lost_samples.lost; 1453 return tool->lost_samples(tool, event, sample, machine); 1454 case PERF_RECORD_READ: 1455 dump_read(evsel, event); 1456 return tool->read(tool, event, sample, evsel, machine); 1457 case PERF_RECORD_THROTTLE: 1458 return tool->throttle(tool, event, sample, machine); 1459 case PERF_RECORD_UNTHROTTLE: 1460 return tool->unthrottle(tool, event, sample, machine); 1461 case PERF_RECORD_AUX: 1462 if (tool->aux == perf_event__process_aux) { 1463 if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) 1464 evlist->stats.total_aux_lost += 1; 1465 if (event->aux.flags & PERF_AUX_FLAG_PARTIAL) 1466 evlist->stats.total_aux_partial += 1; 1467 } 1468 return tool->aux(tool, event, sample, machine); 1469 case PERF_RECORD_ITRACE_START: 1470 return tool->itrace_start(tool, event, sample, machine); 1471 case PERF_RECORD_SWITCH: 1472 case PERF_RECORD_SWITCH_CPU_WIDE: 1473 return tool->context_switch(tool, event, sample, machine); 1474 case PERF_RECORD_KSYMBOL: 1475 return tool->ksymbol(tool, event, sample, machine); 1476 case PERF_RECORD_BPF_EVENT: 1477 return tool->bpf(tool, event, sample, machine); 1478 default: 1479 ++evlist->stats.nr_unknown_events; 1480 return -1; 1481 } 1482 } 1483 1484 static int perf_session__deliver_event(struct perf_session *session, 1485 union perf_event *event, 1486 struct perf_tool *tool, 1487 u64 file_offset) 1488 { 1489 struct perf_sample sample; 1490 int ret; 1491 1492 ret = perf_evlist__parse_sample(session->evlist, event, &sample); 1493 if (ret) { 1494 pr_err("Can't parse sample, err = %d\n", ret); 1495 return ret; 1496 } 1497 1498 ret = auxtrace__process_event(session, event, &sample, tool); 1499 if (ret < 0) 1500 return ret; 1501 if (ret > 0) 1502 return 0; 1503 1504 ret = machines__deliver_event(&session->machines, session->evlist, 1505 event, &sample, tool, file_offset); 1506 1507 if (dump_trace && sample.aux_sample.size) 1508 auxtrace__dump_auxtrace_sample(session, &sample); 1509 1510 return ret; 1511 } 1512 1513 static s64 perf_session__process_user_event(struct perf_session *session, 1514 union perf_event *event, 1515 u64 file_offset) 1516 { 1517 struct ordered_events *oe = &session->ordered_events; 1518 struct perf_tool *tool = session->tool; 1519 struct perf_sample sample = { .time = 0, }; 1520 int fd = perf_data__fd(session->data); 1521 int err; 1522 1523 if (event->header.type != PERF_RECORD_COMPRESSED || 1524 tool->compressed == perf_session__process_compressed_event_stub) 1525 dump_event(session->evlist, event, file_offset, &sample); 1526 1527 /* These events are processed right away */ 1528 switch (event->header.type) { 1529 case PERF_RECORD_HEADER_ATTR: 1530 err = tool->attr(tool, event, &session->evlist); 1531 if (err == 0) { 1532 perf_session__set_id_hdr_size(session); 1533 perf_session__set_comm_exec(session); 1534 } 1535 return err; 1536 case PERF_RECORD_EVENT_UPDATE: 1537 return tool->event_update(tool, event, &session->evlist); 1538 case PERF_RECORD_HEADER_EVENT_TYPE: 1539 /* 1540 * Depreceated, but we need to handle it for sake 1541 * of old data files create in pipe mode. 1542 */ 1543 return 0; 1544 case PERF_RECORD_HEADER_TRACING_DATA: 1545 /* 1546 * Setup for reading amidst mmap, but only when we 1547 * are in 'file' mode. The 'pipe' fd is in proper 1548 * place already. 1549 */ 1550 if (!perf_data__is_pipe(session->data)) 1551 lseek(fd, file_offset, SEEK_SET); 1552 return tool->tracing_data(session, event); 1553 case PERF_RECORD_HEADER_BUILD_ID: 1554 return tool->build_id(session, event); 1555 case PERF_RECORD_FINISHED_ROUND: 1556 return tool->finished_round(tool, event, oe); 1557 case PERF_RECORD_ID_INDEX: 1558 return tool->id_index(session, event); 1559 case PERF_RECORD_AUXTRACE_INFO: 1560 return tool->auxtrace_info(session, event); 1561 case PERF_RECORD_AUXTRACE: 1562 /* setup for reading amidst mmap */ 1563 lseek(fd, file_offset + event->header.size, SEEK_SET); 1564 return tool->auxtrace(session, event); 1565 case PERF_RECORD_AUXTRACE_ERROR: 1566 perf_session__auxtrace_error_inc(session, event); 1567 return tool->auxtrace_error(session, event); 1568 case PERF_RECORD_THREAD_MAP: 1569 return tool->thread_map(session, event); 1570 case PERF_RECORD_CPU_MAP: 1571 return tool->cpu_map(session, event); 1572 case PERF_RECORD_STAT_CONFIG: 1573 return tool->stat_config(session, event); 1574 case PERF_RECORD_STAT: 1575 return tool->stat(session, event); 1576 case PERF_RECORD_STAT_ROUND: 1577 return tool->stat_round(session, event); 1578 case PERF_RECORD_TIME_CONV: 1579 session->time_conv = event->time_conv; 1580 return tool->time_conv(session, event); 1581 case PERF_RECORD_HEADER_FEATURE: 1582 return tool->feature(session, event); 1583 case PERF_RECORD_COMPRESSED: 1584 err = tool->compressed(session, event, file_offset); 1585 if (err) 1586 dump_event(session->evlist, event, file_offset, &sample); 1587 return err; 1588 default: 1589 return -EINVAL; 1590 } 1591 } 1592 1593 int perf_session__deliver_synth_event(struct perf_session *session, 1594 union perf_event *event, 1595 struct perf_sample *sample) 1596 { 1597 struct evlist *evlist = session->evlist; 1598 struct perf_tool *tool = session->tool; 1599 1600 events_stats__inc(&evlist->stats, event->header.type); 1601 1602 if (event->header.type >= PERF_RECORD_USER_TYPE_START) 1603 return perf_session__process_user_event(session, event, 0); 1604 1605 return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0); 1606 } 1607 1608 static void event_swap(union perf_event *event, bool sample_id_all) 1609 { 1610 perf_event__swap_op swap; 1611 1612 swap = perf_event__swap_ops[event->header.type]; 1613 if (swap) 1614 swap(event, sample_id_all); 1615 } 1616 1617 int perf_session__peek_event(struct perf_session *session, off_t file_offset, 1618 void *buf, size_t buf_sz, 1619 union perf_event **event_ptr, 1620 struct perf_sample *sample) 1621 { 1622 union perf_event *event; 1623 size_t hdr_sz, rest; 1624 int fd; 1625 1626 if (session->one_mmap && !session->header.needs_swap) { 1627 event = file_offset - session->one_mmap_offset + 1628 session->one_mmap_addr; 1629 goto out_parse_sample; 1630 } 1631 1632 if (perf_data__is_pipe(session->data)) 1633 return -1; 1634 1635 fd = perf_data__fd(session->data); 1636 hdr_sz = sizeof(struct perf_event_header); 1637 1638 if (buf_sz < hdr_sz) 1639 return -1; 1640 1641 if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 || 1642 readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz) 1643 return -1; 1644 1645 event = (union perf_event *)buf; 1646 1647 if (session->header.needs_swap) 1648 perf_event_header__bswap(&event->header); 1649 1650 if (event->header.size < hdr_sz || event->header.size > buf_sz) 1651 return -1; 1652 1653 rest = event->header.size - hdr_sz; 1654 1655 if (readn(fd, buf, rest) != (ssize_t)rest) 1656 return -1; 1657 1658 if (session->header.needs_swap) 1659 event_swap(event, perf_evlist__sample_id_all(session->evlist)); 1660 1661 out_parse_sample: 1662 1663 if (sample && event->header.type < PERF_RECORD_USER_TYPE_START && 1664 perf_evlist__parse_sample(session->evlist, event, sample)) 1665 return -1; 1666 1667 *event_ptr = event; 1668 1669 return 0; 1670 } 1671 1672 int perf_session__peek_events(struct perf_session *session, u64 offset, 1673 u64 size, peek_events_cb_t cb, void *data) 1674 { 1675 u64 max_offset = offset + size; 1676 char buf[PERF_SAMPLE_MAX_SIZE]; 1677 union perf_event *event; 1678 int err; 1679 1680 do { 1681 err = perf_session__peek_event(session, offset, buf, 1682 PERF_SAMPLE_MAX_SIZE, &event, 1683 NULL); 1684 if (err) 1685 return err; 1686 1687 err = cb(session, event, offset, data); 1688 if (err) 1689 return err; 1690 1691 offset += event->header.size; 1692 if (event->header.type == PERF_RECORD_AUXTRACE) 1693 offset += event->auxtrace.size; 1694 1695 } while (offset < max_offset); 1696 1697 return err; 1698 } 1699 1700 static s64 perf_session__process_event(struct perf_session *session, 1701 union perf_event *event, u64 file_offset) 1702 { 1703 struct evlist *evlist = session->evlist; 1704 struct perf_tool *tool = session->tool; 1705 int ret; 1706 1707 if (session->header.needs_swap) 1708 event_swap(event, perf_evlist__sample_id_all(evlist)); 1709 1710 if (event->header.type >= PERF_RECORD_HEADER_MAX) 1711 return -EINVAL; 1712 1713 events_stats__inc(&evlist->stats, event->header.type); 1714 1715 if (event->header.type >= PERF_RECORD_USER_TYPE_START) 1716 return perf_session__process_user_event(session, event, file_offset); 1717 1718 if (tool->ordered_events) { 1719 u64 timestamp = -1ULL; 1720 1721 ret = perf_evlist__parse_sample_timestamp(evlist, event, ×tamp); 1722 if (ret && ret != -1) 1723 return ret; 1724 1725 ret = perf_session__queue_event(session, event, timestamp, file_offset); 1726 if (ret != -ETIME) 1727 return ret; 1728 } 1729 1730 return perf_session__deliver_event(session, event, tool, file_offset); 1731 } 1732 1733 void perf_event_header__bswap(struct perf_event_header *hdr) 1734 { 1735 hdr->type = bswap_32(hdr->type); 1736 hdr->misc = bswap_16(hdr->misc); 1737 hdr->size = bswap_16(hdr->size); 1738 } 1739 1740 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid) 1741 { 1742 return machine__findnew_thread(&session->machines.host, -1, pid); 1743 } 1744 1745 /* 1746 * Threads are identified by pid and tid, and the idle task has pid == tid == 0. 1747 * So here a single thread is created for that, but actually there is a separate 1748 * idle task per cpu, so there should be one 'struct thread' per cpu, but there 1749 * is only 1. That causes problems for some tools, requiring workarounds. For 1750 * example get_idle_thread() in builtin-sched.c, or thread_stack__per_cpu(). 1751 */ 1752 int perf_session__register_idle_thread(struct perf_session *session) 1753 { 1754 struct thread *thread; 1755 int err = 0; 1756 1757 thread = machine__findnew_thread(&session->machines.host, 0, 0); 1758 if (thread == NULL || thread__set_comm(thread, "swapper", 0)) { 1759 pr_err("problem inserting idle task.\n"); 1760 err = -1; 1761 } 1762 1763 if (thread == NULL || thread__set_namespaces(thread, 0, NULL)) { 1764 pr_err("problem inserting idle task.\n"); 1765 err = -1; 1766 } 1767 1768 /* machine__findnew_thread() got the thread, so put it */ 1769 thread__put(thread); 1770 return err; 1771 } 1772 1773 static void 1774 perf_session__warn_order(const struct perf_session *session) 1775 { 1776 const struct ordered_events *oe = &session->ordered_events; 1777 struct evsel *evsel; 1778 bool should_warn = true; 1779 1780 evlist__for_each_entry(session->evlist, evsel) { 1781 if (evsel->core.attr.write_backward) 1782 should_warn = false; 1783 } 1784 1785 if (!should_warn) 1786 return; 1787 if (oe->nr_unordered_events != 0) 1788 ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events); 1789 } 1790 1791 static void perf_session__warn_about_errors(const struct perf_session *session) 1792 { 1793 const struct events_stats *stats = &session->evlist->stats; 1794 1795 if (session->tool->lost == perf_event__process_lost && 1796 stats->nr_events[PERF_RECORD_LOST] != 0) { 1797 ui__warning("Processed %d events and lost %d chunks!\n\n" 1798 "Check IO/CPU overload!\n\n", 1799 stats->nr_events[0], 1800 stats->nr_events[PERF_RECORD_LOST]); 1801 } 1802 1803 if (session->tool->lost_samples == perf_event__process_lost_samples) { 1804 double drop_rate; 1805 1806 drop_rate = (double)stats->total_lost_samples / 1807 (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples); 1808 if (drop_rate > 0.05) { 1809 ui__warning("Processed %" PRIu64 " samples and lost %3.2f%%!\n\n", 1810 stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples, 1811 drop_rate * 100.0); 1812 } 1813 } 1814 1815 if (session->tool->aux == perf_event__process_aux && 1816 stats->total_aux_lost != 0) { 1817 ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n", 1818 stats->total_aux_lost, 1819 stats->nr_events[PERF_RECORD_AUX]); 1820 } 1821 1822 if (session->tool->aux == perf_event__process_aux && 1823 stats->total_aux_partial != 0) { 1824 bool vmm_exclusive = false; 1825 1826 (void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive", 1827 &vmm_exclusive); 1828 1829 ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n" 1830 "Are you running a KVM guest in the background?%s\n\n", 1831 stats->total_aux_partial, 1832 stats->nr_events[PERF_RECORD_AUX], 1833 vmm_exclusive ? 1834 "\nReloading kvm_intel module with vmm_exclusive=0\n" 1835 "will reduce the gaps to only guest's timeslices." : 1836 ""); 1837 } 1838 1839 if (stats->nr_unknown_events != 0) { 1840 ui__warning("Found %u unknown events!\n\n" 1841 "Is this an older tool processing a perf.data " 1842 "file generated by a more recent tool?\n\n" 1843 "If that is not the case, consider " 1844 "reporting to linux-kernel@vger.kernel.org.\n\n", 1845 stats->nr_unknown_events); 1846 } 1847 1848 if (stats->nr_unknown_id != 0) { 1849 ui__warning("%u samples with id not present in the header\n", 1850 stats->nr_unknown_id); 1851 } 1852 1853 if (stats->nr_invalid_chains != 0) { 1854 ui__warning("Found invalid callchains!\n\n" 1855 "%u out of %u events were discarded for this reason.\n\n" 1856 "Consider reporting to linux-kernel@vger.kernel.org.\n\n", 1857 stats->nr_invalid_chains, 1858 stats->nr_events[PERF_RECORD_SAMPLE]); 1859 } 1860 1861 if (stats->nr_unprocessable_samples != 0) { 1862 ui__warning("%u unprocessable samples recorded.\n" 1863 "Do you have a KVM guest running and not using 'perf kvm'?\n", 1864 stats->nr_unprocessable_samples); 1865 } 1866 1867 perf_session__warn_order(session); 1868 1869 events_stats__auxtrace_error_warn(stats); 1870 1871 if (stats->nr_proc_map_timeout != 0) { 1872 ui__warning("%d map information files for pre-existing threads were\n" 1873 "not processed, if there are samples for addresses they\n" 1874 "will not be resolved, you may find out which are these\n" 1875 "threads by running with -v and redirecting the output\n" 1876 "to a file.\n" 1877 "The time limit to process proc map is too short?\n" 1878 "Increase it by --proc-map-timeout\n", 1879 stats->nr_proc_map_timeout); 1880 } 1881 } 1882 1883 static int perf_session__flush_thread_stack(struct thread *thread, 1884 void *p __maybe_unused) 1885 { 1886 return thread_stack__flush(thread); 1887 } 1888 1889 static int perf_session__flush_thread_stacks(struct perf_session *session) 1890 { 1891 return machines__for_each_thread(&session->machines, 1892 perf_session__flush_thread_stack, 1893 NULL); 1894 } 1895 1896 volatile int session_done; 1897 1898 static int __perf_session__process_decomp_events(struct perf_session *session); 1899 1900 static int __perf_session__process_pipe_events(struct perf_session *session) 1901 { 1902 struct ordered_events *oe = &session->ordered_events; 1903 struct perf_tool *tool = session->tool; 1904 int fd = perf_data__fd(session->data); 1905 union perf_event *event; 1906 uint32_t size, cur_size = 0; 1907 void *buf = NULL; 1908 s64 skip = 0; 1909 u64 head; 1910 ssize_t err; 1911 void *p; 1912 1913 perf_tool__fill_defaults(tool); 1914 1915 head = 0; 1916 cur_size = sizeof(union perf_event); 1917 1918 buf = malloc(cur_size); 1919 if (!buf) 1920 return -errno; 1921 ordered_events__set_copy_on_queue(oe, true); 1922 more: 1923 event = buf; 1924 err = readn(fd, event, sizeof(struct perf_event_header)); 1925 if (err <= 0) { 1926 if (err == 0) 1927 goto done; 1928 1929 pr_err("failed to read event header\n"); 1930 goto out_err; 1931 } 1932 1933 if (session->header.needs_swap) 1934 perf_event_header__bswap(&event->header); 1935 1936 size = event->header.size; 1937 if (size < sizeof(struct perf_event_header)) { 1938 pr_err("bad event header size\n"); 1939 goto out_err; 1940 } 1941 1942 if (size > cur_size) { 1943 void *new = realloc(buf, size); 1944 if (!new) { 1945 pr_err("failed to allocate memory to read event\n"); 1946 goto out_err; 1947 } 1948 buf = new; 1949 cur_size = size; 1950 event = buf; 1951 } 1952 p = event; 1953 p += sizeof(struct perf_event_header); 1954 1955 if (size - sizeof(struct perf_event_header)) { 1956 err = readn(fd, p, size - sizeof(struct perf_event_header)); 1957 if (err <= 0) { 1958 if (err == 0) { 1959 pr_err("unexpected end of event stream\n"); 1960 goto done; 1961 } 1962 1963 pr_err("failed to read event data\n"); 1964 goto out_err; 1965 } 1966 } 1967 1968 if ((skip = perf_session__process_event(session, event, head)) < 0) { 1969 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n", 1970 head, event->header.size, event->header.type); 1971 err = -EINVAL; 1972 goto out_err; 1973 } 1974 1975 head += size; 1976 1977 if (skip > 0) 1978 head += skip; 1979 1980 err = __perf_session__process_decomp_events(session); 1981 if (err) 1982 goto out_err; 1983 1984 if (!session_done()) 1985 goto more; 1986 done: 1987 /* do the final flush for ordered samples */ 1988 err = ordered_events__flush(oe, OE_FLUSH__FINAL); 1989 if (err) 1990 goto out_err; 1991 err = auxtrace__flush_events(session, tool); 1992 if (err) 1993 goto out_err; 1994 err = perf_session__flush_thread_stacks(session); 1995 out_err: 1996 free(buf); 1997 if (!tool->no_warn) 1998 perf_session__warn_about_errors(session); 1999 ordered_events__free(&session->ordered_events); 2000 auxtrace__free_events(session); 2001 return err; 2002 } 2003 2004 static union perf_event * 2005 prefetch_event(char *buf, u64 head, size_t mmap_size, 2006 bool needs_swap, union perf_event *error) 2007 { 2008 union perf_event *event; 2009 2010 /* 2011 * Ensure we have enough space remaining to read 2012 * the size of the event in the headers. 2013 */ 2014 if (head + sizeof(event->header) > mmap_size) 2015 return NULL; 2016 2017 event = (union perf_event *)(buf + head); 2018 if (needs_swap) 2019 perf_event_header__bswap(&event->header); 2020 2021 if (head + event->header.size <= mmap_size) 2022 return event; 2023 2024 /* We're not fetching the event so swap back again */ 2025 if (needs_swap) 2026 perf_event_header__bswap(&event->header); 2027 2028 pr_debug("%s: head=%#" PRIx64 " event->header_size=%#x, mmap_size=%#zx:" 2029 " fuzzed or compressed perf.data?\n",__func__, head, event->header.size, mmap_size); 2030 2031 return error; 2032 } 2033 2034 static union perf_event * 2035 fetch_mmaped_event(u64 head, size_t mmap_size, char *buf, bool needs_swap) 2036 { 2037 return prefetch_event(buf, head, mmap_size, needs_swap, ERR_PTR(-EINVAL)); 2038 } 2039 2040 static union perf_event * 2041 fetch_decomp_event(u64 head, size_t mmap_size, char *buf, bool needs_swap) 2042 { 2043 return prefetch_event(buf, head, mmap_size, needs_swap, NULL); 2044 } 2045 2046 static int __perf_session__process_decomp_events(struct perf_session *session) 2047 { 2048 s64 skip; 2049 u64 size, file_pos = 0; 2050 struct decomp *decomp = session->decomp_last; 2051 2052 if (!decomp) 2053 return 0; 2054 2055 while (decomp->head < decomp->size && !session_done()) { 2056 union perf_event *event = fetch_decomp_event(decomp->head, decomp->size, decomp->data, 2057 session->header.needs_swap); 2058 2059 if (!event) 2060 break; 2061 2062 size = event->header.size; 2063 2064 if (size < sizeof(struct perf_event_header) || 2065 (skip = perf_session__process_event(session, event, file_pos)) < 0) { 2066 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n", 2067 decomp->file_pos + decomp->head, event->header.size, event->header.type); 2068 return -EINVAL; 2069 } 2070 2071 if (skip) 2072 size += skip; 2073 2074 decomp->head += size; 2075 } 2076 2077 return 0; 2078 } 2079 2080 /* 2081 * On 64bit we can mmap the data file in one go. No need for tiny mmap 2082 * slices. On 32bit we use 32MB. 2083 */ 2084 #if BITS_PER_LONG == 64 2085 #define MMAP_SIZE ULLONG_MAX 2086 #define NUM_MMAPS 1 2087 #else 2088 #define MMAP_SIZE (32 * 1024 * 1024ULL) 2089 #define NUM_MMAPS 128 2090 #endif 2091 2092 struct reader; 2093 2094 typedef s64 (*reader_cb_t)(struct perf_session *session, 2095 union perf_event *event, 2096 u64 file_offset); 2097 2098 struct reader { 2099 int fd; 2100 u64 data_size; 2101 u64 data_offset; 2102 reader_cb_t process; 2103 }; 2104 2105 static int 2106 reader__process_events(struct reader *rd, struct perf_session *session, 2107 struct ui_progress *prog) 2108 { 2109 u64 data_size = rd->data_size; 2110 u64 head, page_offset, file_offset, file_pos, size; 2111 int err = 0, mmap_prot, mmap_flags, map_idx = 0; 2112 size_t mmap_size; 2113 char *buf, *mmaps[NUM_MMAPS]; 2114 union perf_event *event; 2115 s64 skip; 2116 2117 page_offset = page_size * (rd->data_offset / page_size); 2118 file_offset = page_offset; 2119 head = rd->data_offset - page_offset; 2120 2121 ui_progress__init_size(prog, data_size, "Processing events..."); 2122 2123 data_size += rd->data_offset; 2124 2125 mmap_size = MMAP_SIZE; 2126 if (mmap_size > data_size) { 2127 mmap_size = data_size; 2128 session->one_mmap = true; 2129 } 2130 2131 memset(mmaps, 0, sizeof(mmaps)); 2132 2133 mmap_prot = PROT_READ; 2134 mmap_flags = MAP_SHARED; 2135 2136 if (session->header.needs_swap) { 2137 mmap_prot |= PROT_WRITE; 2138 mmap_flags = MAP_PRIVATE; 2139 } 2140 remap: 2141 buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, rd->fd, 2142 file_offset); 2143 if (buf == MAP_FAILED) { 2144 pr_err("failed to mmap file\n"); 2145 err = -errno; 2146 goto out; 2147 } 2148 mmaps[map_idx] = buf; 2149 map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1); 2150 file_pos = file_offset + head; 2151 if (session->one_mmap) { 2152 session->one_mmap_addr = buf; 2153 session->one_mmap_offset = file_offset; 2154 } 2155 2156 more: 2157 event = fetch_mmaped_event(head, mmap_size, buf, session->header.needs_swap); 2158 if (IS_ERR(event)) 2159 return PTR_ERR(event); 2160 2161 if (!event) { 2162 if (mmaps[map_idx]) { 2163 munmap(mmaps[map_idx], mmap_size); 2164 mmaps[map_idx] = NULL; 2165 } 2166 2167 page_offset = page_size * (head / page_size); 2168 file_offset += page_offset; 2169 head -= page_offset; 2170 goto remap; 2171 } 2172 2173 size = event->header.size; 2174 2175 skip = -EINVAL; 2176 2177 if (size < sizeof(struct perf_event_header) || 2178 (skip = rd->process(session, event, file_pos)) < 0) { 2179 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d [%s]\n", 2180 file_offset + head, event->header.size, 2181 event->header.type, strerror(-skip)); 2182 err = skip; 2183 goto out; 2184 } 2185 2186 if (skip) 2187 size += skip; 2188 2189 head += size; 2190 file_pos += size; 2191 2192 err = __perf_session__process_decomp_events(session); 2193 if (err) 2194 goto out; 2195 2196 ui_progress__update(prog, size); 2197 2198 if (session_done()) 2199 goto out; 2200 2201 if (file_pos < data_size) 2202 goto more; 2203 2204 out: 2205 return err; 2206 } 2207 2208 static s64 process_simple(struct perf_session *session, 2209 union perf_event *event, 2210 u64 file_offset) 2211 { 2212 return perf_session__process_event(session, event, file_offset); 2213 } 2214 2215 static int __perf_session__process_events(struct perf_session *session) 2216 { 2217 struct reader rd = { 2218 .fd = perf_data__fd(session->data), 2219 .data_size = session->header.data_size, 2220 .data_offset = session->header.data_offset, 2221 .process = process_simple, 2222 }; 2223 struct ordered_events *oe = &session->ordered_events; 2224 struct perf_tool *tool = session->tool; 2225 struct ui_progress prog; 2226 int err; 2227 2228 perf_tool__fill_defaults(tool); 2229 2230 if (rd.data_size == 0) 2231 return -1; 2232 2233 ui_progress__init_size(&prog, rd.data_size, "Processing events..."); 2234 2235 err = reader__process_events(&rd, session, &prog); 2236 if (err) 2237 goto out_err; 2238 /* do the final flush for ordered samples */ 2239 err = ordered_events__flush(oe, OE_FLUSH__FINAL); 2240 if (err) 2241 goto out_err; 2242 err = auxtrace__flush_events(session, tool); 2243 if (err) 2244 goto out_err; 2245 err = perf_session__flush_thread_stacks(session); 2246 out_err: 2247 ui_progress__finish(); 2248 if (!tool->no_warn) 2249 perf_session__warn_about_errors(session); 2250 /* 2251 * We may switching perf.data output, make ordered_events 2252 * reusable. 2253 */ 2254 ordered_events__reinit(&session->ordered_events); 2255 auxtrace__free_events(session); 2256 session->one_mmap = false; 2257 return err; 2258 } 2259 2260 int perf_session__process_events(struct perf_session *session) 2261 { 2262 if (perf_session__register_idle_thread(session) < 0) 2263 return -ENOMEM; 2264 2265 if (perf_data__is_pipe(session->data)) 2266 return __perf_session__process_pipe_events(session); 2267 2268 return __perf_session__process_events(session); 2269 } 2270 2271 bool perf_session__has_traces(struct perf_session *session, const char *msg) 2272 { 2273 struct evsel *evsel; 2274 2275 evlist__for_each_entry(session->evlist, evsel) { 2276 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) 2277 return true; 2278 } 2279 2280 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg); 2281 return false; 2282 } 2283 2284 int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr) 2285 { 2286 char *bracket; 2287 struct ref_reloc_sym *ref; 2288 struct kmap *kmap; 2289 2290 ref = zalloc(sizeof(struct ref_reloc_sym)); 2291 if (ref == NULL) 2292 return -ENOMEM; 2293 2294 ref->name = strdup(symbol_name); 2295 if (ref->name == NULL) { 2296 free(ref); 2297 return -ENOMEM; 2298 } 2299 2300 bracket = strchr(ref->name, ']'); 2301 if (bracket) 2302 *bracket = '\0'; 2303 2304 ref->addr = addr; 2305 2306 kmap = map__kmap(map); 2307 if (kmap) 2308 kmap->ref_reloc_sym = ref; 2309 2310 return 0; 2311 } 2312 2313 size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp) 2314 { 2315 return machines__fprintf_dsos(&session->machines, fp); 2316 } 2317 2318 size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp, 2319 bool (skip)(struct dso *dso, int parm), int parm) 2320 { 2321 return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm); 2322 } 2323 2324 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp) 2325 { 2326 size_t ret; 2327 const char *msg = ""; 2328 2329 if (perf_header__has_feat(&session->header, HEADER_AUXTRACE)) 2330 msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)"; 2331 2332 ret = fprintf(fp, "\nAggregated stats:%s\n", msg); 2333 2334 ret += events_stats__fprintf(&session->evlist->stats, fp); 2335 return ret; 2336 } 2337 2338 size_t perf_session__fprintf(struct perf_session *session, FILE *fp) 2339 { 2340 /* 2341 * FIXME: Here we have to actually print all the machines in this 2342 * session, not just the host... 2343 */ 2344 return machine__fprintf(&session->machines.host, fp); 2345 } 2346 2347 struct evsel *perf_session__find_first_evtype(struct perf_session *session, 2348 unsigned int type) 2349 { 2350 struct evsel *pos; 2351 2352 evlist__for_each_entry(session->evlist, pos) { 2353 if (pos->core.attr.type == type) 2354 return pos; 2355 } 2356 return NULL; 2357 } 2358 2359 int perf_session__cpu_bitmap(struct perf_session *session, 2360 const char *cpu_list, unsigned long *cpu_bitmap) 2361 { 2362 int i, err = -1; 2363 struct perf_cpu_map *map; 2364 int nr_cpus = min(session->header.env.nr_cpus_online, MAX_NR_CPUS); 2365 2366 for (i = 0; i < PERF_TYPE_MAX; ++i) { 2367 struct evsel *evsel; 2368 2369 evsel = perf_session__find_first_evtype(session, i); 2370 if (!evsel) 2371 continue; 2372 2373 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CPU)) { 2374 pr_err("File does not contain CPU events. " 2375 "Remove -C option to proceed.\n"); 2376 return -1; 2377 } 2378 } 2379 2380 map = perf_cpu_map__new(cpu_list); 2381 if (map == NULL) { 2382 pr_err("Invalid cpu_list\n"); 2383 return -1; 2384 } 2385 2386 for (i = 0; i < map->nr; i++) { 2387 int cpu = map->map[i]; 2388 2389 if (cpu >= nr_cpus) { 2390 pr_err("Requested CPU %d too large. " 2391 "Consider raising MAX_NR_CPUS\n", cpu); 2392 goto out_delete_map; 2393 } 2394 2395 set_bit(cpu, cpu_bitmap); 2396 } 2397 2398 err = 0; 2399 2400 out_delete_map: 2401 perf_cpu_map__put(map); 2402 return err; 2403 } 2404 2405 void perf_session__fprintf_info(struct perf_session *session, FILE *fp, 2406 bool full) 2407 { 2408 if (session == NULL || fp == NULL) 2409 return; 2410 2411 fprintf(fp, "# ========\n"); 2412 perf_header__fprintf_info(session, fp, full); 2413 fprintf(fp, "# ========\n#\n"); 2414 } 2415 2416 int perf_event__process_id_index(struct perf_session *session, 2417 union perf_event *event) 2418 { 2419 struct evlist *evlist = session->evlist; 2420 struct perf_record_id_index *ie = &event->id_index; 2421 size_t i, nr, max_nr; 2422 2423 max_nr = (ie->header.size - sizeof(struct perf_record_id_index)) / 2424 sizeof(struct id_index_entry); 2425 nr = ie->nr; 2426 if (nr > max_nr) 2427 return -EINVAL; 2428 2429 if (dump_trace) 2430 fprintf(stdout, " nr: %zu\n", nr); 2431 2432 for (i = 0; i < nr; i++) { 2433 struct id_index_entry *e = &ie->entries[i]; 2434 struct perf_sample_id *sid; 2435 2436 if (dump_trace) { 2437 fprintf(stdout, " ... id: %"PRI_lu64, e->id); 2438 fprintf(stdout, " idx: %"PRI_lu64, e->idx); 2439 fprintf(stdout, " cpu: %"PRI_ld64, e->cpu); 2440 fprintf(stdout, " tid: %"PRI_ld64"\n", e->tid); 2441 } 2442 2443 sid = perf_evlist__id2sid(evlist, e->id); 2444 if (!sid) 2445 return -ENOENT; 2446 sid->idx = e->idx; 2447 sid->cpu = e->cpu; 2448 sid->tid = e->tid; 2449 } 2450 return 0; 2451 } 2452