1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <inttypes.h> 4 #include <linux/err.h> 5 #include <linux/kernel.h> 6 #include <linux/zalloc.h> 7 #include <api/fs/fs.h> 8 9 #include <byteswap.h> 10 #include <unistd.h> 11 #include <sys/types.h> 12 #include <sys/mman.h> 13 #include <perf/cpumap.h> 14 15 #include "map_symbol.h" 16 #include "branch.h" 17 #include "debug.h" 18 #include "evlist.h" 19 #include "evsel.h" 20 #include "memswap.h" 21 #include "map.h" 22 #include "symbol.h" 23 #include "session.h" 24 #include "tool.h" 25 #include "cpumap.h" 26 #include "perf_regs.h" 27 #include "asm/bug.h" 28 #include "auxtrace.h" 29 #include "thread.h" 30 #include "thread-stack.h" 31 #include "sample-raw.h" 32 #include "stat.h" 33 #include "util.h" 34 #include "ui/progress.h" 35 #include "../perf.h" 36 #include "arch/common.h" 37 #include <internal/lib.h> 38 39 #ifdef HAVE_ZSTD_SUPPORT 40 static int perf_session__process_compressed_event(struct perf_session *session, 41 union perf_event *event, u64 file_offset) 42 { 43 void *src; 44 size_t decomp_size, src_size; 45 u64 decomp_last_rem = 0; 46 size_t mmap_len, decomp_len = session->header.env.comp_mmap_len; 47 struct decomp *decomp, *decomp_last = session->decomp_last; 48 49 if (decomp_last) { 50 decomp_last_rem = decomp_last->size - decomp_last->head; 51 decomp_len += decomp_last_rem; 52 } 53 54 mmap_len = sizeof(struct decomp) + decomp_len; 55 decomp = mmap(NULL, mmap_len, PROT_READ|PROT_WRITE, 56 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); 57 if (decomp == MAP_FAILED) { 58 pr_err("Couldn't allocate memory for decompression\n"); 59 return -1; 60 } 61 62 decomp->file_pos = file_offset; 63 decomp->mmap_len = mmap_len; 64 decomp->head = 0; 65 66 if (decomp_last_rem) { 67 memcpy(decomp->data, &(decomp_last->data[decomp_last->head]), decomp_last_rem); 68 decomp->size = decomp_last_rem; 69 } 70 71 src = (void *)event + sizeof(struct perf_record_compressed); 72 src_size = event->pack.header.size - sizeof(struct perf_record_compressed); 73 74 decomp_size = zstd_decompress_stream(&(session->zstd_data), src, src_size, 75 &(decomp->data[decomp_last_rem]), decomp_len - decomp_last_rem); 76 if (!decomp_size) { 77 munmap(decomp, mmap_len); 78 pr_err("Couldn't decompress data\n"); 79 return -1; 80 } 81 82 decomp->size += decomp_size; 83 84 if (session->decomp == NULL) { 85 session->decomp = decomp; 86 session->decomp_last = decomp; 87 } else { 88 session->decomp_last->next = decomp; 89 session->decomp_last = decomp; 90 } 91 92 pr_debug("decomp (B): %ld to %ld\n", src_size, decomp_size); 93 94 return 0; 95 } 96 #else /* !HAVE_ZSTD_SUPPORT */ 97 #define perf_session__process_compressed_event perf_session__process_compressed_event_stub 98 #endif 99 100 static int perf_session__deliver_event(struct perf_session *session, 101 union perf_event *event, 102 struct perf_tool *tool, 103 u64 file_offset); 104 105 static int perf_session__open(struct perf_session *session) 106 { 107 struct perf_data *data = session->data; 108 109 if (perf_session__read_header(session) < 0) { 110 pr_err("incompatible file format (rerun with -v to learn more)\n"); 111 return -1; 112 } 113 114 if (perf_data__is_pipe(data)) 115 return 0; 116 117 if (perf_header__has_feat(&session->header, HEADER_STAT)) 118 return 0; 119 120 if (!perf_evlist__valid_sample_type(session->evlist)) { 121 pr_err("non matching sample_type\n"); 122 return -1; 123 } 124 125 if (!perf_evlist__valid_sample_id_all(session->evlist)) { 126 pr_err("non matching sample_id_all\n"); 127 return -1; 128 } 129 130 if (!perf_evlist__valid_read_format(session->evlist)) { 131 pr_err("non matching read_format\n"); 132 return -1; 133 } 134 135 return 0; 136 } 137 138 void perf_session__set_id_hdr_size(struct perf_session *session) 139 { 140 u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist); 141 142 machines__set_id_hdr_size(&session->machines, id_hdr_size); 143 } 144 145 int perf_session__create_kernel_maps(struct perf_session *session) 146 { 147 int ret = machine__create_kernel_maps(&session->machines.host); 148 149 if (ret >= 0) 150 ret = machines__create_guest_kernel_maps(&session->machines); 151 return ret; 152 } 153 154 static void perf_session__destroy_kernel_maps(struct perf_session *session) 155 { 156 machines__destroy_kernel_maps(&session->machines); 157 } 158 159 static bool perf_session__has_comm_exec(struct perf_session *session) 160 { 161 struct evsel *evsel; 162 163 evlist__for_each_entry(session->evlist, evsel) { 164 if (evsel->core.attr.comm_exec) 165 return true; 166 } 167 168 return false; 169 } 170 171 static void perf_session__set_comm_exec(struct perf_session *session) 172 { 173 bool comm_exec = perf_session__has_comm_exec(session); 174 175 machines__set_comm_exec(&session->machines, comm_exec); 176 } 177 178 static int ordered_events__deliver_event(struct ordered_events *oe, 179 struct ordered_event *event) 180 { 181 struct perf_session *session = container_of(oe, struct perf_session, 182 ordered_events); 183 184 return perf_session__deliver_event(session, event->event, 185 session->tool, event->file_offset); 186 } 187 188 struct perf_session *perf_session__new(struct perf_data *data, 189 bool repipe, struct perf_tool *tool) 190 { 191 struct perf_session *session = zalloc(sizeof(*session)); 192 193 if (!session) 194 goto out; 195 196 session->repipe = repipe; 197 session->tool = tool; 198 INIT_LIST_HEAD(&session->auxtrace_index); 199 machines__init(&session->machines); 200 ordered_events__init(&session->ordered_events, 201 ordered_events__deliver_event, NULL); 202 203 perf_env__init(&session->header.env); 204 if (data) { 205 if (perf_data__open(data)) 206 goto out_delete; 207 208 session->data = data; 209 210 if (perf_data__is_read(data)) { 211 if (perf_session__open(session) < 0) 212 goto out_delete; 213 214 /* 215 * set session attributes that are present in perf.data 216 * but not in pipe-mode. 217 */ 218 if (!data->is_pipe) { 219 perf_session__set_id_hdr_size(session); 220 perf_session__set_comm_exec(session); 221 } 222 223 perf_evlist__init_trace_event_sample_raw(session->evlist); 224 225 /* Open the directory data. */ 226 if (data->is_dir && perf_data__open_dir(data)) 227 goto out_delete; 228 } 229 } else { 230 session->machines.host.env = &perf_env; 231 } 232 233 session->machines.host.single_address_space = 234 perf_env__single_address_space(session->machines.host.env); 235 236 if (!data || perf_data__is_write(data)) { 237 /* 238 * In O_RDONLY mode this will be performed when reading the 239 * kernel MMAP event, in perf_event__process_mmap(). 240 */ 241 if (perf_session__create_kernel_maps(session) < 0) 242 pr_warning("Cannot read kernel map\n"); 243 } 244 245 /* 246 * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is 247 * processed, so perf_evlist__sample_id_all is not meaningful here. 248 */ 249 if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps && 250 tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) { 251 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n"); 252 tool->ordered_events = false; 253 } 254 255 return session; 256 257 out_delete: 258 perf_session__delete(session); 259 out: 260 return NULL; 261 } 262 263 static void perf_session__delete_threads(struct perf_session *session) 264 { 265 machine__delete_threads(&session->machines.host); 266 } 267 268 static void perf_session__release_decomp_events(struct perf_session *session) 269 { 270 struct decomp *next, *decomp; 271 size_t mmap_len; 272 next = session->decomp; 273 do { 274 decomp = next; 275 if (decomp == NULL) 276 break; 277 next = decomp->next; 278 mmap_len = decomp->mmap_len; 279 munmap(decomp, mmap_len); 280 } while (1); 281 } 282 283 void perf_session__delete(struct perf_session *session) 284 { 285 if (session == NULL) 286 return; 287 auxtrace__free(session); 288 auxtrace_index__free(&session->auxtrace_index); 289 perf_session__destroy_kernel_maps(session); 290 perf_session__delete_threads(session); 291 perf_session__release_decomp_events(session); 292 perf_env__exit(&session->header.env); 293 machines__exit(&session->machines); 294 if (session->data) 295 perf_data__close(session->data); 296 free(session); 297 } 298 299 static int process_event_synth_tracing_data_stub(struct perf_session *session 300 __maybe_unused, 301 union perf_event *event 302 __maybe_unused) 303 { 304 dump_printf(": unhandled!\n"); 305 return 0; 306 } 307 308 static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused, 309 union perf_event *event __maybe_unused, 310 struct evlist **pevlist 311 __maybe_unused) 312 { 313 dump_printf(": unhandled!\n"); 314 return 0; 315 } 316 317 static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused, 318 union perf_event *event __maybe_unused, 319 struct evlist **pevlist 320 __maybe_unused) 321 { 322 if (dump_trace) 323 perf_event__fprintf_event_update(event, stdout); 324 325 dump_printf(": unhandled!\n"); 326 return 0; 327 } 328 329 static int process_event_sample_stub(struct perf_tool *tool __maybe_unused, 330 union perf_event *event __maybe_unused, 331 struct perf_sample *sample __maybe_unused, 332 struct evsel *evsel __maybe_unused, 333 struct machine *machine __maybe_unused) 334 { 335 dump_printf(": unhandled!\n"); 336 return 0; 337 } 338 339 static int process_event_stub(struct perf_tool *tool __maybe_unused, 340 union perf_event *event __maybe_unused, 341 struct perf_sample *sample __maybe_unused, 342 struct machine *machine __maybe_unused) 343 { 344 dump_printf(": unhandled!\n"); 345 return 0; 346 } 347 348 static int process_finished_round_stub(struct perf_tool *tool __maybe_unused, 349 union perf_event *event __maybe_unused, 350 struct ordered_events *oe __maybe_unused) 351 { 352 dump_printf(": unhandled!\n"); 353 return 0; 354 } 355 356 static int process_finished_round(struct perf_tool *tool, 357 union perf_event *event, 358 struct ordered_events *oe); 359 360 static int skipn(int fd, off_t n) 361 { 362 char buf[4096]; 363 ssize_t ret; 364 365 while (n > 0) { 366 ret = read(fd, buf, min(n, (off_t)sizeof(buf))); 367 if (ret <= 0) 368 return ret; 369 n -= ret; 370 } 371 372 return 0; 373 } 374 375 static s64 process_event_auxtrace_stub(struct perf_session *session __maybe_unused, 376 union perf_event *event) 377 { 378 dump_printf(": unhandled!\n"); 379 if (perf_data__is_pipe(session->data)) 380 skipn(perf_data__fd(session->data), event->auxtrace.size); 381 return event->auxtrace.size; 382 } 383 384 static int process_event_op2_stub(struct perf_session *session __maybe_unused, 385 union perf_event *event __maybe_unused) 386 { 387 dump_printf(": unhandled!\n"); 388 return 0; 389 } 390 391 392 static 393 int process_event_thread_map_stub(struct perf_session *session __maybe_unused, 394 union perf_event *event __maybe_unused) 395 { 396 if (dump_trace) 397 perf_event__fprintf_thread_map(event, stdout); 398 399 dump_printf(": unhandled!\n"); 400 return 0; 401 } 402 403 static 404 int process_event_cpu_map_stub(struct perf_session *session __maybe_unused, 405 union perf_event *event __maybe_unused) 406 { 407 if (dump_trace) 408 perf_event__fprintf_cpu_map(event, stdout); 409 410 dump_printf(": unhandled!\n"); 411 return 0; 412 } 413 414 static 415 int process_event_stat_config_stub(struct perf_session *session __maybe_unused, 416 union perf_event *event __maybe_unused) 417 { 418 if (dump_trace) 419 perf_event__fprintf_stat_config(event, stdout); 420 421 dump_printf(": unhandled!\n"); 422 return 0; 423 } 424 425 static int process_stat_stub(struct perf_session *perf_session __maybe_unused, 426 union perf_event *event) 427 { 428 if (dump_trace) 429 perf_event__fprintf_stat(event, stdout); 430 431 dump_printf(": unhandled!\n"); 432 return 0; 433 } 434 435 static int process_stat_round_stub(struct perf_session *perf_session __maybe_unused, 436 union perf_event *event) 437 { 438 if (dump_trace) 439 perf_event__fprintf_stat_round(event, stdout); 440 441 dump_printf(": unhandled!\n"); 442 return 0; 443 } 444 445 static int perf_session__process_compressed_event_stub(struct perf_session *session __maybe_unused, 446 union perf_event *event __maybe_unused, 447 u64 file_offset __maybe_unused) 448 { 449 dump_printf(": unhandled!\n"); 450 return 0; 451 } 452 453 void perf_tool__fill_defaults(struct perf_tool *tool) 454 { 455 if (tool->sample == NULL) 456 tool->sample = process_event_sample_stub; 457 if (tool->mmap == NULL) 458 tool->mmap = process_event_stub; 459 if (tool->mmap2 == NULL) 460 tool->mmap2 = process_event_stub; 461 if (tool->comm == NULL) 462 tool->comm = process_event_stub; 463 if (tool->namespaces == NULL) 464 tool->namespaces = process_event_stub; 465 if (tool->fork == NULL) 466 tool->fork = process_event_stub; 467 if (tool->exit == NULL) 468 tool->exit = process_event_stub; 469 if (tool->lost == NULL) 470 tool->lost = perf_event__process_lost; 471 if (tool->lost_samples == NULL) 472 tool->lost_samples = perf_event__process_lost_samples; 473 if (tool->aux == NULL) 474 tool->aux = perf_event__process_aux; 475 if (tool->itrace_start == NULL) 476 tool->itrace_start = perf_event__process_itrace_start; 477 if (tool->context_switch == NULL) 478 tool->context_switch = perf_event__process_switch; 479 if (tool->ksymbol == NULL) 480 tool->ksymbol = perf_event__process_ksymbol; 481 if (tool->bpf == NULL) 482 tool->bpf = perf_event__process_bpf; 483 if (tool->read == NULL) 484 tool->read = process_event_sample_stub; 485 if (tool->throttle == NULL) 486 tool->throttle = process_event_stub; 487 if (tool->unthrottle == NULL) 488 tool->unthrottle = process_event_stub; 489 if (tool->attr == NULL) 490 tool->attr = process_event_synth_attr_stub; 491 if (tool->event_update == NULL) 492 tool->event_update = process_event_synth_event_update_stub; 493 if (tool->tracing_data == NULL) 494 tool->tracing_data = process_event_synth_tracing_data_stub; 495 if (tool->build_id == NULL) 496 tool->build_id = process_event_op2_stub; 497 if (tool->finished_round == NULL) { 498 if (tool->ordered_events) 499 tool->finished_round = process_finished_round; 500 else 501 tool->finished_round = process_finished_round_stub; 502 } 503 if (tool->id_index == NULL) 504 tool->id_index = process_event_op2_stub; 505 if (tool->auxtrace_info == NULL) 506 tool->auxtrace_info = process_event_op2_stub; 507 if (tool->auxtrace == NULL) 508 tool->auxtrace = process_event_auxtrace_stub; 509 if (tool->auxtrace_error == NULL) 510 tool->auxtrace_error = process_event_op2_stub; 511 if (tool->thread_map == NULL) 512 tool->thread_map = process_event_thread_map_stub; 513 if (tool->cpu_map == NULL) 514 tool->cpu_map = process_event_cpu_map_stub; 515 if (tool->stat_config == NULL) 516 tool->stat_config = process_event_stat_config_stub; 517 if (tool->stat == NULL) 518 tool->stat = process_stat_stub; 519 if (tool->stat_round == NULL) 520 tool->stat_round = process_stat_round_stub; 521 if (tool->time_conv == NULL) 522 tool->time_conv = process_event_op2_stub; 523 if (tool->feature == NULL) 524 tool->feature = process_event_op2_stub; 525 if (tool->compressed == NULL) 526 tool->compressed = perf_session__process_compressed_event; 527 } 528 529 static void swap_sample_id_all(union perf_event *event, void *data) 530 { 531 void *end = (void *) event + event->header.size; 532 int size = end - data; 533 534 BUG_ON(size % sizeof(u64)); 535 mem_bswap_64(data, size); 536 } 537 538 static void perf_event__all64_swap(union perf_event *event, 539 bool sample_id_all __maybe_unused) 540 { 541 struct perf_event_header *hdr = &event->header; 542 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr)); 543 } 544 545 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all) 546 { 547 event->comm.pid = bswap_32(event->comm.pid); 548 event->comm.tid = bswap_32(event->comm.tid); 549 550 if (sample_id_all) { 551 void *data = &event->comm.comm; 552 553 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); 554 swap_sample_id_all(event, data); 555 } 556 } 557 558 static void perf_event__mmap_swap(union perf_event *event, 559 bool sample_id_all) 560 { 561 event->mmap.pid = bswap_32(event->mmap.pid); 562 event->mmap.tid = bswap_32(event->mmap.tid); 563 event->mmap.start = bswap_64(event->mmap.start); 564 event->mmap.len = bswap_64(event->mmap.len); 565 event->mmap.pgoff = bswap_64(event->mmap.pgoff); 566 567 if (sample_id_all) { 568 void *data = &event->mmap.filename; 569 570 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); 571 swap_sample_id_all(event, data); 572 } 573 } 574 575 static void perf_event__mmap2_swap(union perf_event *event, 576 bool sample_id_all) 577 { 578 event->mmap2.pid = bswap_32(event->mmap2.pid); 579 event->mmap2.tid = bswap_32(event->mmap2.tid); 580 event->mmap2.start = bswap_64(event->mmap2.start); 581 event->mmap2.len = bswap_64(event->mmap2.len); 582 event->mmap2.pgoff = bswap_64(event->mmap2.pgoff); 583 event->mmap2.maj = bswap_32(event->mmap2.maj); 584 event->mmap2.min = bswap_32(event->mmap2.min); 585 event->mmap2.ino = bswap_64(event->mmap2.ino); 586 587 if (sample_id_all) { 588 void *data = &event->mmap2.filename; 589 590 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); 591 swap_sample_id_all(event, data); 592 } 593 } 594 static void perf_event__task_swap(union perf_event *event, bool sample_id_all) 595 { 596 event->fork.pid = bswap_32(event->fork.pid); 597 event->fork.tid = bswap_32(event->fork.tid); 598 event->fork.ppid = bswap_32(event->fork.ppid); 599 event->fork.ptid = bswap_32(event->fork.ptid); 600 event->fork.time = bswap_64(event->fork.time); 601 602 if (sample_id_all) 603 swap_sample_id_all(event, &event->fork + 1); 604 } 605 606 static void perf_event__read_swap(union perf_event *event, bool sample_id_all) 607 { 608 event->read.pid = bswap_32(event->read.pid); 609 event->read.tid = bswap_32(event->read.tid); 610 event->read.value = bswap_64(event->read.value); 611 event->read.time_enabled = bswap_64(event->read.time_enabled); 612 event->read.time_running = bswap_64(event->read.time_running); 613 event->read.id = bswap_64(event->read.id); 614 615 if (sample_id_all) 616 swap_sample_id_all(event, &event->read + 1); 617 } 618 619 static void perf_event__aux_swap(union perf_event *event, bool sample_id_all) 620 { 621 event->aux.aux_offset = bswap_64(event->aux.aux_offset); 622 event->aux.aux_size = bswap_64(event->aux.aux_size); 623 event->aux.flags = bswap_64(event->aux.flags); 624 625 if (sample_id_all) 626 swap_sample_id_all(event, &event->aux + 1); 627 } 628 629 static void perf_event__itrace_start_swap(union perf_event *event, 630 bool sample_id_all) 631 { 632 event->itrace_start.pid = bswap_32(event->itrace_start.pid); 633 event->itrace_start.tid = bswap_32(event->itrace_start.tid); 634 635 if (sample_id_all) 636 swap_sample_id_all(event, &event->itrace_start + 1); 637 } 638 639 static void perf_event__switch_swap(union perf_event *event, bool sample_id_all) 640 { 641 if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) { 642 event->context_switch.next_prev_pid = 643 bswap_32(event->context_switch.next_prev_pid); 644 event->context_switch.next_prev_tid = 645 bswap_32(event->context_switch.next_prev_tid); 646 } 647 648 if (sample_id_all) 649 swap_sample_id_all(event, &event->context_switch + 1); 650 } 651 652 static void perf_event__throttle_swap(union perf_event *event, 653 bool sample_id_all) 654 { 655 event->throttle.time = bswap_64(event->throttle.time); 656 event->throttle.id = bswap_64(event->throttle.id); 657 event->throttle.stream_id = bswap_64(event->throttle.stream_id); 658 659 if (sample_id_all) 660 swap_sample_id_all(event, &event->throttle + 1); 661 } 662 663 static void perf_event__namespaces_swap(union perf_event *event, 664 bool sample_id_all) 665 { 666 u64 i; 667 668 event->namespaces.pid = bswap_32(event->namespaces.pid); 669 event->namespaces.tid = bswap_32(event->namespaces.tid); 670 event->namespaces.nr_namespaces = bswap_64(event->namespaces.nr_namespaces); 671 672 for (i = 0; i < event->namespaces.nr_namespaces; i++) { 673 struct perf_ns_link_info *ns = &event->namespaces.link_info[i]; 674 675 ns->dev = bswap_64(ns->dev); 676 ns->ino = bswap_64(ns->ino); 677 } 678 679 if (sample_id_all) 680 swap_sample_id_all(event, &event->namespaces.link_info[i]); 681 } 682 683 static u8 revbyte(u8 b) 684 { 685 int rev = (b >> 4) | ((b & 0xf) << 4); 686 rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2); 687 rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1); 688 return (u8) rev; 689 } 690 691 /* 692 * XXX this is hack in attempt to carry flags bitfield 693 * through endian village. ABI says: 694 * 695 * Bit-fields are allocated from right to left (least to most significant) 696 * on little-endian implementations and from left to right (most to least 697 * significant) on big-endian implementations. 698 * 699 * The above seems to be byte specific, so we need to reverse each 700 * byte of the bitfield. 'Internet' also says this might be implementation 701 * specific and we probably need proper fix and carry perf_event_attr 702 * bitfield flags in separate data file FEAT_ section. Thought this seems 703 * to work for now. 704 */ 705 static void swap_bitfield(u8 *p, unsigned len) 706 { 707 unsigned i; 708 709 for (i = 0; i < len; i++) { 710 *p = revbyte(*p); 711 p++; 712 } 713 } 714 715 /* exported for swapping attributes in file header */ 716 void perf_event__attr_swap(struct perf_event_attr *attr) 717 { 718 attr->type = bswap_32(attr->type); 719 attr->size = bswap_32(attr->size); 720 721 #define bswap_safe(f, n) \ 722 (attr->size > (offsetof(struct perf_event_attr, f) + \ 723 sizeof(attr->f) * (n))) 724 #define bswap_field(f, sz) \ 725 do { \ 726 if (bswap_safe(f, 0)) \ 727 attr->f = bswap_##sz(attr->f); \ 728 } while(0) 729 #define bswap_field_16(f) bswap_field(f, 16) 730 #define bswap_field_32(f) bswap_field(f, 32) 731 #define bswap_field_64(f) bswap_field(f, 64) 732 733 bswap_field_64(config); 734 bswap_field_64(sample_period); 735 bswap_field_64(sample_type); 736 bswap_field_64(read_format); 737 bswap_field_32(wakeup_events); 738 bswap_field_32(bp_type); 739 bswap_field_64(bp_addr); 740 bswap_field_64(bp_len); 741 bswap_field_64(branch_sample_type); 742 bswap_field_64(sample_regs_user); 743 bswap_field_32(sample_stack_user); 744 bswap_field_32(aux_watermark); 745 bswap_field_16(sample_max_stack); 746 747 /* 748 * After read_format are bitfields. Check read_format because 749 * we are unable to use offsetof on bitfield. 750 */ 751 if (bswap_safe(read_format, 1)) 752 swap_bitfield((u8 *) (&attr->read_format + 1), 753 sizeof(u64)); 754 #undef bswap_field_64 755 #undef bswap_field_32 756 #undef bswap_field 757 #undef bswap_safe 758 } 759 760 static void perf_event__hdr_attr_swap(union perf_event *event, 761 bool sample_id_all __maybe_unused) 762 { 763 size_t size; 764 765 perf_event__attr_swap(&event->attr.attr); 766 767 size = event->header.size; 768 size -= (void *)&event->attr.id - (void *)event; 769 mem_bswap_64(event->attr.id, size); 770 } 771 772 static void perf_event__event_update_swap(union perf_event *event, 773 bool sample_id_all __maybe_unused) 774 { 775 event->event_update.type = bswap_64(event->event_update.type); 776 event->event_update.id = bswap_64(event->event_update.id); 777 } 778 779 static void perf_event__event_type_swap(union perf_event *event, 780 bool sample_id_all __maybe_unused) 781 { 782 event->event_type.event_type.event_id = 783 bswap_64(event->event_type.event_type.event_id); 784 } 785 786 static void perf_event__tracing_data_swap(union perf_event *event, 787 bool sample_id_all __maybe_unused) 788 { 789 event->tracing_data.size = bswap_32(event->tracing_data.size); 790 } 791 792 static void perf_event__auxtrace_info_swap(union perf_event *event, 793 bool sample_id_all __maybe_unused) 794 { 795 size_t size; 796 797 event->auxtrace_info.type = bswap_32(event->auxtrace_info.type); 798 799 size = event->header.size; 800 size -= (void *)&event->auxtrace_info.priv - (void *)event; 801 mem_bswap_64(event->auxtrace_info.priv, size); 802 } 803 804 static void perf_event__auxtrace_swap(union perf_event *event, 805 bool sample_id_all __maybe_unused) 806 { 807 event->auxtrace.size = bswap_64(event->auxtrace.size); 808 event->auxtrace.offset = bswap_64(event->auxtrace.offset); 809 event->auxtrace.reference = bswap_64(event->auxtrace.reference); 810 event->auxtrace.idx = bswap_32(event->auxtrace.idx); 811 event->auxtrace.tid = bswap_32(event->auxtrace.tid); 812 event->auxtrace.cpu = bswap_32(event->auxtrace.cpu); 813 } 814 815 static void perf_event__auxtrace_error_swap(union perf_event *event, 816 bool sample_id_all __maybe_unused) 817 { 818 event->auxtrace_error.type = bswap_32(event->auxtrace_error.type); 819 event->auxtrace_error.code = bswap_32(event->auxtrace_error.code); 820 event->auxtrace_error.cpu = bswap_32(event->auxtrace_error.cpu); 821 event->auxtrace_error.pid = bswap_32(event->auxtrace_error.pid); 822 event->auxtrace_error.tid = bswap_32(event->auxtrace_error.tid); 823 event->auxtrace_error.fmt = bswap_32(event->auxtrace_error.fmt); 824 event->auxtrace_error.ip = bswap_64(event->auxtrace_error.ip); 825 if (event->auxtrace_error.fmt) 826 event->auxtrace_error.time = bswap_64(event->auxtrace_error.time); 827 } 828 829 static void perf_event__thread_map_swap(union perf_event *event, 830 bool sample_id_all __maybe_unused) 831 { 832 unsigned i; 833 834 event->thread_map.nr = bswap_64(event->thread_map.nr); 835 836 for (i = 0; i < event->thread_map.nr; i++) 837 event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid); 838 } 839 840 static void perf_event__cpu_map_swap(union perf_event *event, 841 bool sample_id_all __maybe_unused) 842 { 843 struct perf_record_cpu_map_data *data = &event->cpu_map.data; 844 struct cpu_map_entries *cpus; 845 struct perf_record_record_cpu_map *mask; 846 unsigned i; 847 848 data->type = bswap_64(data->type); 849 850 switch (data->type) { 851 case PERF_CPU_MAP__CPUS: 852 cpus = (struct cpu_map_entries *)data->data; 853 854 cpus->nr = bswap_16(cpus->nr); 855 856 for (i = 0; i < cpus->nr; i++) 857 cpus->cpu[i] = bswap_16(cpus->cpu[i]); 858 break; 859 case PERF_CPU_MAP__MASK: 860 mask = (struct perf_record_record_cpu_map *)data->data; 861 862 mask->nr = bswap_16(mask->nr); 863 mask->long_size = bswap_16(mask->long_size); 864 865 switch (mask->long_size) { 866 case 4: mem_bswap_32(&mask->mask, mask->nr); break; 867 case 8: mem_bswap_64(&mask->mask, mask->nr); break; 868 default: 869 pr_err("cpu_map swap: unsupported long size\n"); 870 } 871 default: 872 break; 873 } 874 } 875 876 static void perf_event__stat_config_swap(union perf_event *event, 877 bool sample_id_all __maybe_unused) 878 { 879 u64 size; 880 881 size = event->stat_config.nr * sizeof(event->stat_config.data[0]); 882 size += 1; /* nr item itself */ 883 mem_bswap_64(&event->stat_config.nr, size); 884 } 885 886 static void perf_event__stat_swap(union perf_event *event, 887 bool sample_id_all __maybe_unused) 888 { 889 event->stat.id = bswap_64(event->stat.id); 890 event->stat.thread = bswap_32(event->stat.thread); 891 event->stat.cpu = bswap_32(event->stat.cpu); 892 event->stat.val = bswap_64(event->stat.val); 893 event->stat.ena = bswap_64(event->stat.ena); 894 event->stat.run = bswap_64(event->stat.run); 895 } 896 897 static void perf_event__stat_round_swap(union perf_event *event, 898 bool sample_id_all __maybe_unused) 899 { 900 event->stat_round.type = bswap_64(event->stat_round.type); 901 event->stat_round.time = bswap_64(event->stat_round.time); 902 } 903 904 typedef void (*perf_event__swap_op)(union perf_event *event, 905 bool sample_id_all); 906 907 static perf_event__swap_op perf_event__swap_ops[] = { 908 [PERF_RECORD_MMAP] = perf_event__mmap_swap, 909 [PERF_RECORD_MMAP2] = perf_event__mmap2_swap, 910 [PERF_RECORD_COMM] = perf_event__comm_swap, 911 [PERF_RECORD_FORK] = perf_event__task_swap, 912 [PERF_RECORD_EXIT] = perf_event__task_swap, 913 [PERF_RECORD_LOST] = perf_event__all64_swap, 914 [PERF_RECORD_READ] = perf_event__read_swap, 915 [PERF_RECORD_THROTTLE] = perf_event__throttle_swap, 916 [PERF_RECORD_UNTHROTTLE] = perf_event__throttle_swap, 917 [PERF_RECORD_SAMPLE] = perf_event__all64_swap, 918 [PERF_RECORD_AUX] = perf_event__aux_swap, 919 [PERF_RECORD_ITRACE_START] = perf_event__itrace_start_swap, 920 [PERF_RECORD_LOST_SAMPLES] = perf_event__all64_swap, 921 [PERF_RECORD_SWITCH] = perf_event__switch_swap, 922 [PERF_RECORD_SWITCH_CPU_WIDE] = perf_event__switch_swap, 923 [PERF_RECORD_NAMESPACES] = perf_event__namespaces_swap, 924 [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap, 925 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap, 926 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap, 927 [PERF_RECORD_HEADER_BUILD_ID] = NULL, 928 [PERF_RECORD_ID_INDEX] = perf_event__all64_swap, 929 [PERF_RECORD_AUXTRACE_INFO] = perf_event__auxtrace_info_swap, 930 [PERF_RECORD_AUXTRACE] = perf_event__auxtrace_swap, 931 [PERF_RECORD_AUXTRACE_ERROR] = perf_event__auxtrace_error_swap, 932 [PERF_RECORD_THREAD_MAP] = perf_event__thread_map_swap, 933 [PERF_RECORD_CPU_MAP] = perf_event__cpu_map_swap, 934 [PERF_RECORD_STAT_CONFIG] = perf_event__stat_config_swap, 935 [PERF_RECORD_STAT] = perf_event__stat_swap, 936 [PERF_RECORD_STAT_ROUND] = perf_event__stat_round_swap, 937 [PERF_RECORD_EVENT_UPDATE] = perf_event__event_update_swap, 938 [PERF_RECORD_TIME_CONV] = perf_event__all64_swap, 939 [PERF_RECORD_HEADER_MAX] = NULL, 940 }; 941 942 /* 943 * When perf record finishes a pass on every buffers, it records this pseudo 944 * event. 945 * We record the max timestamp t found in the pass n. 946 * Assuming these timestamps are monotonic across cpus, we know that if 947 * a buffer still has events with timestamps below t, they will be all 948 * available and then read in the pass n + 1. 949 * Hence when we start to read the pass n + 2, we can safely flush every 950 * events with timestamps below t. 951 * 952 * ============ PASS n ================= 953 * CPU 0 | CPU 1 954 * | 955 * cnt1 timestamps | cnt2 timestamps 956 * 1 | 2 957 * 2 | 3 958 * - | 4 <--- max recorded 959 * 960 * ============ PASS n + 1 ============== 961 * CPU 0 | CPU 1 962 * | 963 * cnt1 timestamps | cnt2 timestamps 964 * 3 | 5 965 * 4 | 6 966 * 5 | 7 <---- max recorded 967 * 968 * Flush every events below timestamp 4 969 * 970 * ============ PASS n + 2 ============== 971 * CPU 0 | CPU 1 972 * | 973 * cnt1 timestamps | cnt2 timestamps 974 * 6 | 8 975 * 7 | 9 976 * - | 10 977 * 978 * Flush every events below timestamp 7 979 * etc... 980 */ 981 static int process_finished_round(struct perf_tool *tool __maybe_unused, 982 union perf_event *event __maybe_unused, 983 struct ordered_events *oe) 984 { 985 if (dump_trace) 986 fprintf(stdout, "\n"); 987 return ordered_events__flush(oe, OE_FLUSH__ROUND); 988 } 989 990 int perf_session__queue_event(struct perf_session *s, union perf_event *event, 991 u64 timestamp, u64 file_offset) 992 { 993 return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset); 994 } 995 996 static void callchain__lbr_callstack_printf(struct perf_sample *sample) 997 { 998 struct ip_callchain *callchain = sample->callchain; 999 struct branch_stack *lbr_stack = sample->branch_stack; 1000 u64 kernel_callchain_nr = callchain->nr; 1001 unsigned int i; 1002 1003 for (i = 0; i < kernel_callchain_nr; i++) { 1004 if (callchain->ips[i] == PERF_CONTEXT_USER) 1005 break; 1006 } 1007 1008 if ((i != kernel_callchain_nr) && lbr_stack->nr) { 1009 u64 total_nr; 1010 /* 1011 * LBR callstack can only get user call chain, 1012 * i is kernel call chain number, 1013 * 1 is PERF_CONTEXT_USER. 1014 * 1015 * The user call chain is stored in LBR registers. 1016 * LBR are pair registers. The caller is stored 1017 * in "from" register, while the callee is stored 1018 * in "to" register. 1019 * For example, there is a call stack 1020 * "A"->"B"->"C"->"D". 1021 * The LBR registers will recorde like 1022 * "C"->"D", "B"->"C", "A"->"B". 1023 * So only the first "to" register and all "from" 1024 * registers are needed to construct the whole stack. 1025 */ 1026 total_nr = i + 1 + lbr_stack->nr + 1; 1027 kernel_callchain_nr = i + 1; 1028 1029 printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr); 1030 1031 for (i = 0; i < kernel_callchain_nr; i++) 1032 printf("..... %2d: %016" PRIx64 "\n", 1033 i, callchain->ips[i]); 1034 1035 printf("..... %2d: %016" PRIx64 "\n", 1036 (int)(kernel_callchain_nr), lbr_stack->entries[0].to); 1037 for (i = 0; i < lbr_stack->nr; i++) 1038 printf("..... %2d: %016" PRIx64 "\n", 1039 (int)(i + kernel_callchain_nr + 1), lbr_stack->entries[i].from); 1040 } 1041 } 1042 1043 static void callchain__printf(struct evsel *evsel, 1044 struct perf_sample *sample) 1045 { 1046 unsigned int i; 1047 struct ip_callchain *callchain = sample->callchain; 1048 1049 if (perf_evsel__has_branch_callstack(evsel)) 1050 callchain__lbr_callstack_printf(sample); 1051 1052 printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr); 1053 1054 for (i = 0; i < callchain->nr; i++) 1055 printf("..... %2d: %016" PRIx64 "\n", 1056 i, callchain->ips[i]); 1057 } 1058 1059 static void branch_stack__printf(struct perf_sample *sample, bool callstack) 1060 { 1061 uint64_t i; 1062 1063 printf("%s: nr:%" PRIu64 "\n", 1064 !callstack ? "... branch stack" : "... branch callstack", 1065 sample->branch_stack->nr); 1066 1067 for (i = 0; i < sample->branch_stack->nr; i++) { 1068 struct branch_entry *e = &sample->branch_stack->entries[i]; 1069 1070 if (!callstack) { 1071 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x\n", 1072 i, e->from, e->to, 1073 (unsigned short)e->flags.cycles, 1074 e->flags.mispred ? "M" : " ", 1075 e->flags.predicted ? "P" : " ", 1076 e->flags.abort ? "A" : " ", 1077 e->flags.in_tx ? "T" : " ", 1078 (unsigned)e->flags.reserved); 1079 } else { 1080 printf("..... %2"PRIu64": %016" PRIx64 "\n", 1081 i, i > 0 ? e->from : e->to); 1082 } 1083 } 1084 } 1085 1086 static void regs_dump__printf(u64 mask, u64 *regs) 1087 { 1088 unsigned rid, i = 0; 1089 1090 for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) { 1091 u64 val = regs[i++]; 1092 1093 printf(".... %-5s 0x%" PRIx64 "\n", 1094 perf_reg_name(rid), val); 1095 } 1096 } 1097 1098 static const char *regs_abi[] = { 1099 [PERF_SAMPLE_REGS_ABI_NONE] = "none", 1100 [PERF_SAMPLE_REGS_ABI_32] = "32-bit", 1101 [PERF_SAMPLE_REGS_ABI_64] = "64-bit", 1102 }; 1103 1104 static inline const char *regs_dump_abi(struct regs_dump *d) 1105 { 1106 if (d->abi > PERF_SAMPLE_REGS_ABI_64) 1107 return "unknown"; 1108 1109 return regs_abi[d->abi]; 1110 } 1111 1112 static void regs__printf(const char *type, struct regs_dump *regs) 1113 { 1114 u64 mask = regs->mask; 1115 1116 printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n", 1117 type, 1118 mask, 1119 regs_dump_abi(regs)); 1120 1121 regs_dump__printf(mask, regs->regs); 1122 } 1123 1124 static void regs_user__printf(struct perf_sample *sample) 1125 { 1126 struct regs_dump *user_regs = &sample->user_regs; 1127 1128 if (user_regs->regs) 1129 regs__printf("user", user_regs); 1130 } 1131 1132 static void regs_intr__printf(struct perf_sample *sample) 1133 { 1134 struct regs_dump *intr_regs = &sample->intr_regs; 1135 1136 if (intr_regs->regs) 1137 regs__printf("intr", intr_regs); 1138 } 1139 1140 static void stack_user__printf(struct stack_dump *dump) 1141 { 1142 printf("... ustack: size %" PRIu64 ", offset 0x%x\n", 1143 dump->size, dump->offset); 1144 } 1145 1146 static void perf_evlist__print_tstamp(struct evlist *evlist, 1147 union perf_event *event, 1148 struct perf_sample *sample) 1149 { 1150 u64 sample_type = __perf_evlist__combined_sample_type(evlist); 1151 1152 if (event->header.type != PERF_RECORD_SAMPLE && 1153 !perf_evlist__sample_id_all(evlist)) { 1154 fputs("-1 -1 ", stdout); 1155 return; 1156 } 1157 1158 if ((sample_type & PERF_SAMPLE_CPU)) 1159 printf("%u ", sample->cpu); 1160 1161 if (sample_type & PERF_SAMPLE_TIME) 1162 printf("%" PRIu64 " ", sample->time); 1163 } 1164 1165 static void sample_read__printf(struct perf_sample *sample, u64 read_format) 1166 { 1167 printf("... sample_read:\n"); 1168 1169 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1170 printf("...... time enabled %016" PRIx64 "\n", 1171 sample->read.time_enabled); 1172 1173 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 1174 printf("...... time running %016" PRIx64 "\n", 1175 sample->read.time_running); 1176 1177 if (read_format & PERF_FORMAT_GROUP) { 1178 u64 i; 1179 1180 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr); 1181 1182 for (i = 0; i < sample->read.group.nr; i++) { 1183 struct sample_read_value *value; 1184 1185 value = &sample->read.group.values[i]; 1186 printf("..... id %016" PRIx64 1187 ", value %016" PRIx64 "\n", 1188 value->id, value->value); 1189 } 1190 } else 1191 printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n", 1192 sample->read.one.id, sample->read.one.value); 1193 } 1194 1195 static void dump_event(struct evlist *evlist, union perf_event *event, 1196 u64 file_offset, struct perf_sample *sample) 1197 { 1198 if (!dump_trace) 1199 return; 1200 1201 printf("\n%#" PRIx64 " [%#x]: event: %d\n", 1202 file_offset, event->header.size, event->header.type); 1203 1204 trace_event(event); 1205 if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw) 1206 evlist->trace_event_sample_raw(evlist, event, sample); 1207 1208 if (sample) 1209 perf_evlist__print_tstamp(evlist, event, sample); 1210 1211 printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset, 1212 event->header.size, perf_event__name(event->header.type)); 1213 } 1214 1215 static void dump_sample(struct evsel *evsel, union perf_event *event, 1216 struct perf_sample *sample) 1217 { 1218 u64 sample_type; 1219 1220 if (!dump_trace) 1221 return; 1222 1223 printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n", 1224 event->header.misc, sample->pid, sample->tid, sample->ip, 1225 sample->period, sample->addr); 1226 1227 sample_type = evsel->core.attr.sample_type; 1228 1229 if (evsel__has_callchain(evsel)) 1230 callchain__printf(evsel, sample); 1231 1232 if (sample_type & PERF_SAMPLE_BRANCH_STACK) 1233 branch_stack__printf(sample, perf_evsel__has_branch_callstack(evsel)); 1234 1235 if (sample_type & PERF_SAMPLE_REGS_USER) 1236 regs_user__printf(sample); 1237 1238 if (sample_type & PERF_SAMPLE_REGS_INTR) 1239 regs_intr__printf(sample); 1240 1241 if (sample_type & PERF_SAMPLE_STACK_USER) 1242 stack_user__printf(&sample->user_stack); 1243 1244 if (sample_type & PERF_SAMPLE_WEIGHT) 1245 printf("... weight: %" PRIu64 "\n", sample->weight); 1246 1247 if (sample_type & PERF_SAMPLE_DATA_SRC) 1248 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src); 1249 1250 if (sample_type & PERF_SAMPLE_PHYS_ADDR) 1251 printf(" .. phys_addr: 0x%"PRIx64"\n", sample->phys_addr); 1252 1253 if (sample_type & PERF_SAMPLE_TRANSACTION) 1254 printf("... transaction: %" PRIx64 "\n", sample->transaction); 1255 1256 if (sample_type & PERF_SAMPLE_READ) 1257 sample_read__printf(sample, evsel->core.attr.read_format); 1258 } 1259 1260 static void dump_read(struct evsel *evsel, union perf_event *event) 1261 { 1262 struct perf_record_read *read_event = &event->read; 1263 u64 read_format; 1264 1265 if (!dump_trace) 1266 return; 1267 1268 printf(": %d %d %s %" PRI_lu64 "\n", event->read.pid, event->read.tid, 1269 perf_evsel__name(evsel), 1270 event->read.value); 1271 1272 if (!evsel) 1273 return; 1274 1275 read_format = evsel->core.attr.read_format; 1276 1277 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1278 printf("... time enabled : %" PRI_lu64 "\n", read_event->time_enabled); 1279 1280 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 1281 printf("... time running : %" PRI_lu64 "\n", read_event->time_running); 1282 1283 if (read_format & PERF_FORMAT_ID) 1284 printf("... id : %" PRI_lu64 "\n", read_event->id); 1285 } 1286 1287 static struct machine *machines__find_for_cpumode(struct machines *machines, 1288 union perf_event *event, 1289 struct perf_sample *sample) 1290 { 1291 struct machine *machine; 1292 1293 if (perf_guest && 1294 ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) || 1295 (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) { 1296 u32 pid; 1297 1298 if (event->header.type == PERF_RECORD_MMAP 1299 || event->header.type == PERF_RECORD_MMAP2) 1300 pid = event->mmap.pid; 1301 else 1302 pid = sample->pid; 1303 1304 machine = machines__find(machines, pid); 1305 if (!machine) 1306 machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID); 1307 return machine; 1308 } 1309 1310 return &machines->host; 1311 } 1312 1313 static int deliver_sample_value(struct evlist *evlist, 1314 struct perf_tool *tool, 1315 union perf_event *event, 1316 struct perf_sample *sample, 1317 struct sample_read_value *v, 1318 struct machine *machine) 1319 { 1320 struct perf_sample_id *sid = perf_evlist__id2sid(evlist, v->id); 1321 1322 if (sid) { 1323 sample->id = v->id; 1324 sample->period = v->value - sid->period; 1325 sid->period = v->value; 1326 } 1327 1328 if (!sid || sid->evsel == NULL) { 1329 ++evlist->stats.nr_unknown_id; 1330 return 0; 1331 } 1332 1333 /* 1334 * There's no reason to deliver sample 1335 * for zero period, bail out. 1336 */ 1337 if (!sample->period) 1338 return 0; 1339 1340 return tool->sample(tool, event, sample, sid->evsel, machine); 1341 } 1342 1343 static int deliver_sample_group(struct evlist *evlist, 1344 struct perf_tool *tool, 1345 union perf_event *event, 1346 struct perf_sample *sample, 1347 struct machine *machine) 1348 { 1349 int ret = -EINVAL; 1350 u64 i; 1351 1352 for (i = 0; i < sample->read.group.nr; i++) { 1353 ret = deliver_sample_value(evlist, tool, event, sample, 1354 &sample->read.group.values[i], 1355 machine); 1356 if (ret) 1357 break; 1358 } 1359 1360 return ret; 1361 } 1362 1363 static int 1364 perf_evlist__deliver_sample(struct evlist *evlist, 1365 struct perf_tool *tool, 1366 union perf_event *event, 1367 struct perf_sample *sample, 1368 struct evsel *evsel, 1369 struct machine *machine) 1370 { 1371 /* We know evsel != NULL. */ 1372 u64 sample_type = evsel->core.attr.sample_type; 1373 u64 read_format = evsel->core.attr.read_format; 1374 1375 /* Standard sample delivery. */ 1376 if (!(sample_type & PERF_SAMPLE_READ)) 1377 return tool->sample(tool, event, sample, evsel, machine); 1378 1379 /* For PERF_SAMPLE_READ we have either single or group mode. */ 1380 if (read_format & PERF_FORMAT_GROUP) 1381 return deliver_sample_group(evlist, tool, event, sample, 1382 machine); 1383 else 1384 return deliver_sample_value(evlist, tool, event, sample, 1385 &sample->read.one, machine); 1386 } 1387 1388 static int machines__deliver_event(struct machines *machines, 1389 struct evlist *evlist, 1390 union perf_event *event, 1391 struct perf_sample *sample, 1392 struct perf_tool *tool, u64 file_offset) 1393 { 1394 struct evsel *evsel; 1395 struct machine *machine; 1396 1397 dump_event(evlist, event, file_offset, sample); 1398 1399 evsel = perf_evlist__id2evsel(evlist, sample->id); 1400 1401 machine = machines__find_for_cpumode(machines, event, sample); 1402 1403 switch (event->header.type) { 1404 case PERF_RECORD_SAMPLE: 1405 if (evsel == NULL) { 1406 ++evlist->stats.nr_unknown_id; 1407 return 0; 1408 } 1409 dump_sample(evsel, event, sample); 1410 if (machine == NULL) { 1411 ++evlist->stats.nr_unprocessable_samples; 1412 return 0; 1413 } 1414 return perf_evlist__deliver_sample(evlist, tool, event, sample, evsel, machine); 1415 case PERF_RECORD_MMAP: 1416 return tool->mmap(tool, event, sample, machine); 1417 case PERF_RECORD_MMAP2: 1418 if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT) 1419 ++evlist->stats.nr_proc_map_timeout; 1420 return tool->mmap2(tool, event, sample, machine); 1421 case PERF_RECORD_COMM: 1422 return tool->comm(tool, event, sample, machine); 1423 case PERF_RECORD_NAMESPACES: 1424 return tool->namespaces(tool, event, sample, machine); 1425 case PERF_RECORD_FORK: 1426 return tool->fork(tool, event, sample, machine); 1427 case PERF_RECORD_EXIT: 1428 return tool->exit(tool, event, sample, machine); 1429 case PERF_RECORD_LOST: 1430 if (tool->lost == perf_event__process_lost) 1431 evlist->stats.total_lost += event->lost.lost; 1432 return tool->lost(tool, event, sample, machine); 1433 case PERF_RECORD_LOST_SAMPLES: 1434 if (tool->lost_samples == perf_event__process_lost_samples) 1435 evlist->stats.total_lost_samples += event->lost_samples.lost; 1436 return tool->lost_samples(tool, event, sample, machine); 1437 case PERF_RECORD_READ: 1438 dump_read(evsel, event); 1439 return tool->read(tool, event, sample, evsel, machine); 1440 case PERF_RECORD_THROTTLE: 1441 return tool->throttle(tool, event, sample, machine); 1442 case PERF_RECORD_UNTHROTTLE: 1443 return tool->unthrottle(tool, event, sample, machine); 1444 case PERF_RECORD_AUX: 1445 if (tool->aux == perf_event__process_aux) { 1446 if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) 1447 evlist->stats.total_aux_lost += 1; 1448 if (event->aux.flags & PERF_AUX_FLAG_PARTIAL) 1449 evlist->stats.total_aux_partial += 1; 1450 } 1451 return tool->aux(tool, event, sample, machine); 1452 case PERF_RECORD_ITRACE_START: 1453 return tool->itrace_start(tool, event, sample, machine); 1454 case PERF_RECORD_SWITCH: 1455 case PERF_RECORD_SWITCH_CPU_WIDE: 1456 return tool->context_switch(tool, event, sample, machine); 1457 case PERF_RECORD_KSYMBOL: 1458 return tool->ksymbol(tool, event, sample, machine); 1459 case PERF_RECORD_BPF_EVENT: 1460 return tool->bpf(tool, event, sample, machine); 1461 default: 1462 ++evlist->stats.nr_unknown_events; 1463 return -1; 1464 } 1465 } 1466 1467 static int perf_session__deliver_event(struct perf_session *session, 1468 union perf_event *event, 1469 struct perf_tool *tool, 1470 u64 file_offset) 1471 { 1472 struct perf_sample sample; 1473 int ret; 1474 1475 ret = perf_evlist__parse_sample(session->evlist, event, &sample); 1476 if (ret) { 1477 pr_err("Can't parse sample, err = %d\n", ret); 1478 return ret; 1479 } 1480 1481 ret = auxtrace__process_event(session, event, &sample, tool); 1482 if (ret < 0) 1483 return ret; 1484 if (ret > 0) 1485 return 0; 1486 1487 return machines__deliver_event(&session->machines, session->evlist, 1488 event, &sample, tool, file_offset); 1489 } 1490 1491 static s64 perf_session__process_user_event(struct perf_session *session, 1492 union perf_event *event, 1493 u64 file_offset) 1494 { 1495 struct ordered_events *oe = &session->ordered_events; 1496 struct perf_tool *tool = session->tool; 1497 struct perf_sample sample = { .time = 0, }; 1498 int fd = perf_data__fd(session->data); 1499 int err; 1500 1501 if (event->header.type != PERF_RECORD_COMPRESSED || 1502 tool->compressed == perf_session__process_compressed_event_stub) 1503 dump_event(session->evlist, event, file_offset, &sample); 1504 1505 /* These events are processed right away */ 1506 switch (event->header.type) { 1507 case PERF_RECORD_HEADER_ATTR: 1508 err = tool->attr(tool, event, &session->evlist); 1509 if (err == 0) { 1510 perf_session__set_id_hdr_size(session); 1511 perf_session__set_comm_exec(session); 1512 } 1513 return err; 1514 case PERF_RECORD_EVENT_UPDATE: 1515 return tool->event_update(tool, event, &session->evlist); 1516 case PERF_RECORD_HEADER_EVENT_TYPE: 1517 /* 1518 * Depreceated, but we need to handle it for sake 1519 * of old data files create in pipe mode. 1520 */ 1521 return 0; 1522 case PERF_RECORD_HEADER_TRACING_DATA: 1523 /* setup for reading amidst mmap */ 1524 lseek(fd, file_offset, SEEK_SET); 1525 return tool->tracing_data(session, event); 1526 case PERF_RECORD_HEADER_BUILD_ID: 1527 return tool->build_id(session, event); 1528 case PERF_RECORD_FINISHED_ROUND: 1529 return tool->finished_round(tool, event, oe); 1530 case PERF_RECORD_ID_INDEX: 1531 return tool->id_index(session, event); 1532 case PERF_RECORD_AUXTRACE_INFO: 1533 return tool->auxtrace_info(session, event); 1534 case PERF_RECORD_AUXTRACE: 1535 /* setup for reading amidst mmap */ 1536 lseek(fd, file_offset + event->header.size, SEEK_SET); 1537 return tool->auxtrace(session, event); 1538 case PERF_RECORD_AUXTRACE_ERROR: 1539 perf_session__auxtrace_error_inc(session, event); 1540 return tool->auxtrace_error(session, event); 1541 case PERF_RECORD_THREAD_MAP: 1542 return tool->thread_map(session, event); 1543 case PERF_RECORD_CPU_MAP: 1544 return tool->cpu_map(session, event); 1545 case PERF_RECORD_STAT_CONFIG: 1546 return tool->stat_config(session, event); 1547 case PERF_RECORD_STAT: 1548 return tool->stat(session, event); 1549 case PERF_RECORD_STAT_ROUND: 1550 return tool->stat_round(session, event); 1551 case PERF_RECORD_TIME_CONV: 1552 session->time_conv = event->time_conv; 1553 return tool->time_conv(session, event); 1554 case PERF_RECORD_HEADER_FEATURE: 1555 return tool->feature(session, event); 1556 case PERF_RECORD_COMPRESSED: 1557 err = tool->compressed(session, event, file_offset); 1558 if (err) 1559 dump_event(session->evlist, event, file_offset, &sample); 1560 return err; 1561 default: 1562 return -EINVAL; 1563 } 1564 } 1565 1566 int perf_session__deliver_synth_event(struct perf_session *session, 1567 union perf_event *event, 1568 struct perf_sample *sample) 1569 { 1570 struct evlist *evlist = session->evlist; 1571 struct perf_tool *tool = session->tool; 1572 1573 events_stats__inc(&evlist->stats, event->header.type); 1574 1575 if (event->header.type >= PERF_RECORD_USER_TYPE_START) 1576 return perf_session__process_user_event(session, event, 0); 1577 1578 return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0); 1579 } 1580 1581 static void event_swap(union perf_event *event, bool sample_id_all) 1582 { 1583 perf_event__swap_op swap; 1584 1585 swap = perf_event__swap_ops[event->header.type]; 1586 if (swap) 1587 swap(event, sample_id_all); 1588 } 1589 1590 int perf_session__peek_event(struct perf_session *session, off_t file_offset, 1591 void *buf, size_t buf_sz, 1592 union perf_event **event_ptr, 1593 struct perf_sample *sample) 1594 { 1595 union perf_event *event; 1596 size_t hdr_sz, rest; 1597 int fd; 1598 1599 if (session->one_mmap && !session->header.needs_swap) { 1600 event = file_offset - session->one_mmap_offset + 1601 session->one_mmap_addr; 1602 goto out_parse_sample; 1603 } 1604 1605 if (perf_data__is_pipe(session->data)) 1606 return -1; 1607 1608 fd = perf_data__fd(session->data); 1609 hdr_sz = sizeof(struct perf_event_header); 1610 1611 if (buf_sz < hdr_sz) 1612 return -1; 1613 1614 if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 || 1615 readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz) 1616 return -1; 1617 1618 event = (union perf_event *)buf; 1619 1620 if (session->header.needs_swap) 1621 perf_event_header__bswap(&event->header); 1622 1623 if (event->header.size < hdr_sz || event->header.size > buf_sz) 1624 return -1; 1625 1626 rest = event->header.size - hdr_sz; 1627 1628 if (readn(fd, buf, rest) != (ssize_t)rest) 1629 return -1; 1630 1631 if (session->header.needs_swap) 1632 event_swap(event, perf_evlist__sample_id_all(session->evlist)); 1633 1634 out_parse_sample: 1635 1636 if (sample && event->header.type < PERF_RECORD_USER_TYPE_START && 1637 perf_evlist__parse_sample(session->evlist, event, sample)) 1638 return -1; 1639 1640 *event_ptr = event; 1641 1642 return 0; 1643 } 1644 1645 static s64 perf_session__process_event(struct perf_session *session, 1646 union perf_event *event, u64 file_offset) 1647 { 1648 struct evlist *evlist = session->evlist; 1649 struct perf_tool *tool = session->tool; 1650 int ret; 1651 1652 if (session->header.needs_swap) 1653 event_swap(event, perf_evlist__sample_id_all(evlist)); 1654 1655 if (event->header.type >= PERF_RECORD_HEADER_MAX) 1656 return -EINVAL; 1657 1658 events_stats__inc(&evlist->stats, event->header.type); 1659 1660 if (event->header.type >= PERF_RECORD_USER_TYPE_START) 1661 return perf_session__process_user_event(session, event, file_offset); 1662 1663 if (tool->ordered_events) { 1664 u64 timestamp = -1ULL; 1665 1666 ret = perf_evlist__parse_sample_timestamp(evlist, event, ×tamp); 1667 if (ret && ret != -1) 1668 return ret; 1669 1670 ret = perf_session__queue_event(session, event, timestamp, file_offset); 1671 if (ret != -ETIME) 1672 return ret; 1673 } 1674 1675 return perf_session__deliver_event(session, event, tool, file_offset); 1676 } 1677 1678 void perf_event_header__bswap(struct perf_event_header *hdr) 1679 { 1680 hdr->type = bswap_32(hdr->type); 1681 hdr->misc = bswap_16(hdr->misc); 1682 hdr->size = bswap_16(hdr->size); 1683 } 1684 1685 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid) 1686 { 1687 return machine__findnew_thread(&session->machines.host, -1, pid); 1688 } 1689 1690 /* 1691 * Threads are identified by pid and tid, and the idle task has pid == tid == 0. 1692 * So here a single thread is created for that, but actually there is a separate 1693 * idle task per cpu, so there should be one 'struct thread' per cpu, but there 1694 * is only 1. That causes problems for some tools, requiring workarounds. For 1695 * example get_idle_thread() in builtin-sched.c, or thread_stack__per_cpu(). 1696 */ 1697 int perf_session__register_idle_thread(struct perf_session *session) 1698 { 1699 struct thread *thread; 1700 int err = 0; 1701 1702 thread = machine__findnew_thread(&session->machines.host, 0, 0); 1703 if (thread == NULL || thread__set_comm(thread, "swapper", 0)) { 1704 pr_err("problem inserting idle task.\n"); 1705 err = -1; 1706 } 1707 1708 if (thread == NULL || thread__set_namespaces(thread, 0, NULL)) { 1709 pr_err("problem inserting idle task.\n"); 1710 err = -1; 1711 } 1712 1713 /* machine__findnew_thread() got the thread, so put it */ 1714 thread__put(thread); 1715 return err; 1716 } 1717 1718 static void 1719 perf_session__warn_order(const struct perf_session *session) 1720 { 1721 const struct ordered_events *oe = &session->ordered_events; 1722 struct evsel *evsel; 1723 bool should_warn = true; 1724 1725 evlist__for_each_entry(session->evlist, evsel) { 1726 if (evsel->core.attr.write_backward) 1727 should_warn = false; 1728 } 1729 1730 if (!should_warn) 1731 return; 1732 if (oe->nr_unordered_events != 0) 1733 ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events); 1734 } 1735 1736 static void perf_session__warn_about_errors(const struct perf_session *session) 1737 { 1738 const struct events_stats *stats = &session->evlist->stats; 1739 1740 if (session->tool->lost == perf_event__process_lost && 1741 stats->nr_events[PERF_RECORD_LOST] != 0) { 1742 ui__warning("Processed %d events and lost %d chunks!\n\n" 1743 "Check IO/CPU overload!\n\n", 1744 stats->nr_events[0], 1745 stats->nr_events[PERF_RECORD_LOST]); 1746 } 1747 1748 if (session->tool->lost_samples == perf_event__process_lost_samples) { 1749 double drop_rate; 1750 1751 drop_rate = (double)stats->total_lost_samples / 1752 (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples); 1753 if (drop_rate > 0.05) { 1754 ui__warning("Processed %" PRIu64 " samples and lost %3.2f%%!\n\n", 1755 stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples, 1756 drop_rate * 100.0); 1757 } 1758 } 1759 1760 if (session->tool->aux == perf_event__process_aux && 1761 stats->total_aux_lost != 0) { 1762 ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n", 1763 stats->total_aux_lost, 1764 stats->nr_events[PERF_RECORD_AUX]); 1765 } 1766 1767 if (session->tool->aux == perf_event__process_aux && 1768 stats->total_aux_partial != 0) { 1769 bool vmm_exclusive = false; 1770 1771 (void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive", 1772 &vmm_exclusive); 1773 1774 ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n" 1775 "Are you running a KVM guest in the background?%s\n\n", 1776 stats->total_aux_partial, 1777 stats->nr_events[PERF_RECORD_AUX], 1778 vmm_exclusive ? 1779 "\nReloading kvm_intel module with vmm_exclusive=0\n" 1780 "will reduce the gaps to only guest's timeslices." : 1781 ""); 1782 } 1783 1784 if (stats->nr_unknown_events != 0) { 1785 ui__warning("Found %u unknown events!\n\n" 1786 "Is this an older tool processing a perf.data " 1787 "file generated by a more recent tool?\n\n" 1788 "If that is not the case, consider " 1789 "reporting to linux-kernel@vger.kernel.org.\n\n", 1790 stats->nr_unknown_events); 1791 } 1792 1793 if (stats->nr_unknown_id != 0) { 1794 ui__warning("%u samples with id not present in the header\n", 1795 stats->nr_unknown_id); 1796 } 1797 1798 if (stats->nr_invalid_chains != 0) { 1799 ui__warning("Found invalid callchains!\n\n" 1800 "%u out of %u events were discarded for this reason.\n\n" 1801 "Consider reporting to linux-kernel@vger.kernel.org.\n\n", 1802 stats->nr_invalid_chains, 1803 stats->nr_events[PERF_RECORD_SAMPLE]); 1804 } 1805 1806 if (stats->nr_unprocessable_samples != 0) { 1807 ui__warning("%u unprocessable samples recorded.\n" 1808 "Do you have a KVM guest running and not using 'perf kvm'?\n", 1809 stats->nr_unprocessable_samples); 1810 } 1811 1812 perf_session__warn_order(session); 1813 1814 events_stats__auxtrace_error_warn(stats); 1815 1816 if (stats->nr_proc_map_timeout != 0) { 1817 ui__warning("%d map information files for pre-existing threads were\n" 1818 "not processed, if there are samples for addresses they\n" 1819 "will not be resolved, you may find out which are these\n" 1820 "threads by running with -v and redirecting the output\n" 1821 "to a file.\n" 1822 "The time limit to process proc map is too short?\n" 1823 "Increase it by --proc-map-timeout\n", 1824 stats->nr_proc_map_timeout); 1825 } 1826 } 1827 1828 static int perf_session__flush_thread_stack(struct thread *thread, 1829 void *p __maybe_unused) 1830 { 1831 return thread_stack__flush(thread); 1832 } 1833 1834 static int perf_session__flush_thread_stacks(struct perf_session *session) 1835 { 1836 return machines__for_each_thread(&session->machines, 1837 perf_session__flush_thread_stack, 1838 NULL); 1839 } 1840 1841 volatile int session_done; 1842 1843 static int __perf_session__process_decomp_events(struct perf_session *session); 1844 1845 static int __perf_session__process_pipe_events(struct perf_session *session) 1846 { 1847 struct ordered_events *oe = &session->ordered_events; 1848 struct perf_tool *tool = session->tool; 1849 int fd = perf_data__fd(session->data); 1850 union perf_event *event; 1851 uint32_t size, cur_size = 0; 1852 void *buf = NULL; 1853 s64 skip = 0; 1854 u64 head; 1855 ssize_t err; 1856 void *p; 1857 1858 perf_tool__fill_defaults(tool); 1859 1860 head = 0; 1861 cur_size = sizeof(union perf_event); 1862 1863 buf = malloc(cur_size); 1864 if (!buf) 1865 return -errno; 1866 ordered_events__set_copy_on_queue(oe, true); 1867 more: 1868 event = buf; 1869 err = readn(fd, event, sizeof(struct perf_event_header)); 1870 if (err <= 0) { 1871 if (err == 0) 1872 goto done; 1873 1874 pr_err("failed to read event header\n"); 1875 goto out_err; 1876 } 1877 1878 if (session->header.needs_swap) 1879 perf_event_header__bswap(&event->header); 1880 1881 size = event->header.size; 1882 if (size < sizeof(struct perf_event_header)) { 1883 pr_err("bad event header size\n"); 1884 goto out_err; 1885 } 1886 1887 if (size > cur_size) { 1888 void *new = realloc(buf, size); 1889 if (!new) { 1890 pr_err("failed to allocate memory to read event\n"); 1891 goto out_err; 1892 } 1893 buf = new; 1894 cur_size = size; 1895 event = buf; 1896 } 1897 p = event; 1898 p += sizeof(struct perf_event_header); 1899 1900 if (size - sizeof(struct perf_event_header)) { 1901 err = readn(fd, p, size - sizeof(struct perf_event_header)); 1902 if (err <= 0) { 1903 if (err == 0) { 1904 pr_err("unexpected end of event stream\n"); 1905 goto done; 1906 } 1907 1908 pr_err("failed to read event data\n"); 1909 goto out_err; 1910 } 1911 } 1912 1913 if ((skip = perf_session__process_event(session, event, head)) < 0) { 1914 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n", 1915 head, event->header.size, event->header.type); 1916 err = -EINVAL; 1917 goto out_err; 1918 } 1919 1920 head += size; 1921 1922 if (skip > 0) 1923 head += skip; 1924 1925 err = __perf_session__process_decomp_events(session); 1926 if (err) 1927 goto out_err; 1928 1929 if (!session_done()) 1930 goto more; 1931 done: 1932 /* do the final flush for ordered samples */ 1933 err = ordered_events__flush(oe, OE_FLUSH__FINAL); 1934 if (err) 1935 goto out_err; 1936 err = auxtrace__flush_events(session, tool); 1937 if (err) 1938 goto out_err; 1939 err = perf_session__flush_thread_stacks(session); 1940 out_err: 1941 free(buf); 1942 if (!tool->no_warn) 1943 perf_session__warn_about_errors(session); 1944 ordered_events__free(&session->ordered_events); 1945 auxtrace__free_events(session); 1946 return err; 1947 } 1948 1949 static union perf_event * 1950 fetch_mmaped_event(struct perf_session *session, 1951 u64 head, size_t mmap_size, char *buf) 1952 { 1953 union perf_event *event; 1954 1955 /* 1956 * Ensure we have enough space remaining to read 1957 * the size of the event in the headers. 1958 */ 1959 if (head + sizeof(event->header) > mmap_size) 1960 return NULL; 1961 1962 event = (union perf_event *)(buf + head); 1963 1964 if (session->header.needs_swap) 1965 perf_event_header__bswap(&event->header); 1966 1967 if (head + event->header.size > mmap_size) { 1968 /* We're not fetching the event so swap back again */ 1969 if (session->header.needs_swap) 1970 perf_event_header__bswap(&event->header); 1971 pr_debug("%s: head=%#" PRIx64 " event->header_size=%#x, mmap_size=%#zx: fuzzed perf.data?\n", 1972 __func__, head, event->header.size, mmap_size); 1973 return ERR_PTR(-EINVAL); 1974 } 1975 1976 return event; 1977 } 1978 1979 static int __perf_session__process_decomp_events(struct perf_session *session) 1980 { 1981 s64 skip; 1982 u64 size, file_pos = 0; 1983 struct decomp *decomp = session->decomp_last; 1984 1985 if (!decomp) 1986 return 0; 1987 1988 while (decomp->head < decomp->size && !session_done()) { 1989 union perf_event *event = fetch_mmaped_event(session, decomp->head, decomp->size, decomp->data); 1990 1991 if (IS_ERR(event)) 1992 return PTR_ERR(event); 1993 1994 if (!event) 1995 break; 1996 1997 size = event->header.size; 1998 1999 if (size < sizeof(struct perf_event_header) || 2000 (skip = perf_session__process_event(session, event, file_pos)) < 0) { 2001 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n", 2002 decomp->file_pos + decomp->head, event->header.size, event->header.type); 2003 return -EINVAL; 2004 } 2005 2006 if (skip) 2007 size += skip; 2008 2009 decomp->head += size; 2010 } 2011 2012 return 0; 2013 } 2014 2015 /* 2016 * On 64bit we can mmap the data file in one go. No need for tiny mmap 2017 * slices. On 32bit we use 32MB. 2018 */ 2019 #if BITS_PER_LONG == 64 2020 #define MMAP_SIZE ULLONG_MAX 2021 #define NUM_MMAPS 1 2022 #else 2023 #define MMAP_SIZE (32 * 1024 * 1024ULL) 2024 #define NUM_MMAPS 128 2025 #endif 2026 2027 struct reader; 2028 2029 typedef s64 (*reader_cb_t)(struct perf_session *session, 2030 union perf_event *event, 2031 u64 file_offset); 2032 2033 struct reader { 2034 int fd; 2035 u64 data_size; 2036 u64 data_offset; 2037 reader_cb_t process; 2038 }; 2039 2040 static int 2041 reader__process_events(struct reader *rd, struct perf_session *session, 2042 struct ui_progress *prog) 2043 { 2044 u64 data_size = rd->data_size; 2045 u64 head, page_offset, file_offset, file_pos, size; 2046 int err = 0, mmap_prot, mmap_flags, map_idx = 0; 2047 size_t mmap_size; 2048 char *buf, *mmaps[NUM_MMAPS]; 2049 union perf_event *event; 2050 s64 skip; 2051 2052 page_offset = page_size * (rd->data_offset / page_size); 2053 file_offset = page_offset; 2054 head = rd->data_offset - page_offset; 2055 2056 ui_progress__init_size(prog, data_size, "Processing events..."); 2057 2058 data_size += rd->data_offset; 2059 2060 mmap_size = MMAP_SIZE; 2061 if (mmap_size > data_size) { 2062 mmap_size = data_size; 2063 session->one_mmap = true; 2064 } 2065 2066 memset(mmaps, 0, sizeof(mmaps)); 2067 2068 mmap_prot = PROT_READ; 2069 mmap_flags = MAP_SHARED; 2070 2071 if (session->header.needs_swap) { 2072 mmap_prot |= PROT_WRITE; 2073 mmap_flags = MAP_PRIVATE; 2074 } 2075 remap: 2076 buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, rd->fd, 2077 file_offset); 2078 if (buf == MAP_FAILED) { 2079 pr_err("failed to mmap file\n"); 2080 err = -errno; 2081 goto out; 2082 } 2083 mmaps[map_idx] = buf; 2084 map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1); 2085 file_pos = file_offset + head; 2086 if (session->one_mmap) { 2087 session->one_mmap_addr = buf; 2088 session->one_mmap_offset = file_offset; 2089 } 2090 2091 more: 2092 event = fetch_mmaped_event(session, head, mmap_size, buf); 2093 if (IS_ERR(event)) 2094 return PTR_ERR(event); 2095 2096 if (!event) { 2097 if (mmaps[map_idx]) { 2098 munmap(mmaps[map_idx], mmap_size); 2099 mmaps[map_idx] = NULL; 2100 } 2101 2102 page_offset = page_size * (head / page_size); 2103 file_offset += page_offset; 2104 head -= page_offset; 2105 goto remap; 2106 } 2107 2108 size = event->header.size; 2109 2110 skip = -EINVAL; 2111 2112 if (size < sizeof(struct perf_event_header) || 2113 (skip = rd->process(session, event, file_pos)) < 0) { 2114 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d [%s]\n", 2115 file_offset + head, event->header.size, 2116 event->header.type, strerror(-skip)); 2117 err = skip; 2118 goto out; 2119 } 2120 2121 if (skip) 2122 size += skip; 2123 2124 head += size; 2125 file_pos += size; 2126 2127 err = __perf_session__process_decomp_events(session); 2128 if (err) 2129 goto out; 2130 2131 ui_progress__update(prog, size); 2132 2133 if (session_done()) 2134 goto out; 2135 2136 if (file_pos < data_size) 2137 goto more; 2138 2139 out: 2140 return err; 2141 } 2142 2143 static s64 process_simple(struct perf_session *session, 2144 union perf_event *event, 2145 u64 file_offset) 2146 { 2147 return perf_session__process_event(session, event, file_offset); 2148 } 2149 2150 static int __perf_session__process_events(struct perf_session *session) 2151 { 2152 struct reader rd = { 2153 .fd = perf_data__fd(session->data), 2154 .data_size = session->header.data_size, 2155 .data_offset = session->header.data_offset, 2156 .process = process_simple, 2157 }; 2158 struct ordered_events *oe = &session->ordered_events; 2159 struct perf_tool *tool = session->tool; 2160 struct ui_progress prog; 2161 int err; 2162 2163 perf_tool__fill_defaults(tool); 2164 2165 if (rd.data_size == 0) 2166 return -1; 2167 2168 ui_progress__init_size(&prog, rd.data_size, "Processing events..."); 2169 2170 err = reader__process_events(&rd, session, &prog); 2171 if (err) 2172 goto out_err; 2173 /* do the final flush for ordered samples */ 2174 err = ordered_events__flush(oe, OE_FLUSH__FINAL); 2175 if (err) 2176 goto out_err; 2177 err = auxtrace__flush_events(session, tool); 2178 if (err) 2179 goto out_err; 2180 err = perf_session__flush_thread_stacks(session); 2181 out_err: 2182 ui_progress__finish(); 2183 if (!tool->no_warn) 2184 perf_session__warn_about_errors(session); 2185 /* 2186 * We may switching perf.data output, make ordered_events 2187 * reusable. 2188 */ 2189 ordered_events__reinit(&session->ordered_events); 2190 auxtrace__free_events(session); 2191 session->one_mmap = false; 2192 return err; 2193 } 2194 2195 int perf_session__process_events(struct perf_session *session) 2196 { 2197 if (perf_session__register_idle_thread(session) < 0) 2198 return -ENOMEM; 2199 2200 if (perf_data__is_pipe(session->data)) 2201 return __perf_session__process_pipe_events(session); 2202 2203 return __perf_session__process_events(session); 2204 } 2205 2206 bool perf_session__has_traces(struct perf_session *session, const char *msg) 2207 { 2208 struct evsel *evsel; 2209 2210 evlist__for_each_entry(session->evlist, evsel) { 2211 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) 2212 return true; 2213 } 2214 2215 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg); 2216 return false; 2217 } 2218 2219 int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr) 2220 { 2221 char *bracket; 2222 struct ref_reloc_sym *ref; 2223 struct kmap *kmap; 2224 2225 ref = zalloc(sizeof(struct ref_reloc_sym)); 2226 if (ref == NULL) 2227 return -ENOMEM; 2228 2229 ref->name = strdup(symbol_name); 2230 if (ref->name == NULL) { 2231 free(ref); 2232 return -ENOMEM; 2233 } 2234 2235 bracket = strchr(ref->name, ']'); 2236 if (bracket) 2237 *bracket = '\0'; 2238 2239 ref->addr = addr; 2240 2241 kmap = map__kmap(map); 2242 if (kmap) 2243 kmap->ref_reloc_sym = ref; 2244 2245 return 0; 2246 } 2247 2248 size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp) 2249 { 2250 return machines__fprintf_dsos(&session->machines, fp); 2251 } 2252 2253 size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp, 2254 bool (skip)(struct dso *dso, int parm), int parm) 2255 { 2256 return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm); 2257 } 2258 2259 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp) 2260 { 2261 size_t ret; 2262 const char *msg = ""; 2263 2264 if (perf_header__has_feat(&session->header, HEADER_AUXTRACE)) 2265 msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)"; 2266 2267 ret = fprintf(fp, "\nAggregated stats:%s\n", msg); 2268 2269 ret += events_stats__fprintf(&session->evlist->stats, fp); 2270 return ret; 2271 } 2272 2273 size_t perf_session__fprintf(struct perf_session *session, FILE *fp) 2274 { 2275 /* 2276 * FIXME: Here we have to actually print all the machines in this 2277 * session, not just the host... 2278 */ 2279 return machine__fprintf(&session->machines.host, fp); 2280 } 2281 2282 struct evsel *perf_session__find_first_evtype(struct perf_session *session, 2283 unsigned int type) 2284 { 2285 struct evsel *pos; 2286 2287 evlist__for_each_entry(session->evlist, pos) { 2288 if (pos->core.attr.type == type) 2289 return pos; 2290 } 2291 return NULL; 2292 } 2293 2294 int perf_session__cpu_bitmap(struct perf_session *session, 2295 const char *cpu_list, unsigned long *cpu_bitmap) 2296 { 2297 int i, err = -1; 2298 struct perf_cpu_map *map; 2299 int nr_cpus = min(session->header.env.nr_cpus_online, MAX_NR_CPUS); 2300 2301 for (i = 0; i < PERF_TYPE_MAX; ++i) { 2302 struct evsel *evsel; 2303 2304 evsel = perf_session__find_first_evtype(session, i); 2305 if (!evsel) 2306 continue; 2307 2308 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CPU)) { 2309 pr_err("File does not contain CPU events. " 2310 "Remove -C option to proceed.\n"); 2311 return -1; 2312 } 2313 } 2314 2315 map = perf_cpu_map__new(cpu_list); 2316 if (map == NULL) { 2317 pr_err("Invalid cpu_list\n"); 2318 return -1; 2319 } 2320 2321 for (i = 0; i < map->nr; i++) { 2322 int cpu = map->map[i]; 2323 2324 if (cpu >= nr_cpus) { 2325 pr_err("Requested CPU %d too large. " 2326 "Consider raising MAX_NR_CPUS\n", cpu); 2327 goto out_delete_map; 2328 } 2329 2330 set_bit(cpu, cpu_bitmap); 2331 } 2332 2333 err = 0; 2334 2335 out_delete_map: 2336 perf_cpu_map__put(map); 2337 return err; 2338 } 2339 2340 void perf_session__fprintf_info(struct perf_session *session, FILE *fp, 2341 bool full) 2342 { 2343 if (session == NULL || fp == NULL) 2344 return; 2345 2346 fprintf(fp, "# ========\n"); 2347 perf_header__fprintf_info(session, fp, full); 2348 fprintf(fp, "# ========\n#\n"); 2349 } 2350 2351 2352 int __perf_session__set_tracepoints_handlers(struct perf_session *session, 2353 const struct evsel_str_handler *assocs, 2354 size_t nr_assocs) 2355 { 2356 struct evsel *evsel; 2357 size_t i; 2358 int err; 2359 2360 for (i = 0; i < nr_assocs; i++) { 2361 /* 2362 * Adding a handler for an event not in the session, 2363 * just ignore it. 2364 */ 2365 evsel = perf_evlist__find_tracepoint_by_name(session->evlist, assocs[i].name); 2366 if (evsel == NULL) 2367 continue; 2368 2369 err = -EEXIST; 2370 if (evsel->handler != NULL) 2371 goto out; 2372 evsel->handler = assocs[i].handler; 2373 } 2374 2375 err = 0; 2376 out: 2377 return err; 2378 } 2379 2380 int perf_event__process_id_index(struct perf_session *session, 2381 union perf_event *event) 2382 { 2383 struct evlist *evlist = session->evlist; 2384 struct perf_record_id_index *ie = &event->id_index; 2385 size_t i, nr, max_nr; 2386 2387 max_nr = (ie->header.size - sizeof(struct perf_record_id_index)) / 2388 sizeof(struct id_index_entry); 2389 nr = ie->nr; 2390 if (nr > max_nr) 2391 return -EINVAL; 2392 2393 if (dump_trace) 2394 fprintf(stdout, " nr: %zu\n", nr); 2395 2396 for (i = 0; i < nr; i++) { 2397 struct id_index_entry *e = &ie->entries[i]; 2398 struct perf_sample_id *sid; 2399 2400 if (dump_trace) { 2401 fprintf(stdout, " ... id: %"PRI_lu64, e->id); 2402 fprintf(stdout, " idx: %"PRI_lu64, e->idx); 2403 fprintf(stdout, " cpu: %"PRI_ld64, e->cpu); 2404 fprintf(stdout, " tid: %"PRI_ld64"\n", e->tid); 2405 } 2406 2407 sid = perf_evlist__id2sid(evlist, e->id); 2408 if (!sid) 2409 return -ENOENT; 2410 sid->idx = e->idx; 2411 sid->cpu = e->cpu; 2412 sid->tid = e->tid; 2413 } 2414 return 0; 2415 } 2416 2417 int perf_event__synthesize_id_index(struct perf_tool *tool, 2418 perf_event__handler_t process, 2419 struct evlist *evlist, 2420 struct machine *machine) 2421 { 2422 union perf_event *ev; 2423 struct evsel *evsel; 2424 size_t nr = 0, i = 0, sz, max_nr, n; 2425 int err; 2426 2427 pr_debug2("Synthesizing id index\n"); 2428 2429 max_nr = (UINT16_MAX - sizeof(struct perf_record_id_index)) / 2430 sizeof(struct id_index_entry); 2431 2432 evlist__for_each_entry(evlist, evsel) 2433 nr += evsel->ids; 2434 2435 n = nr > max_nr ? max_nr : nr; 2436 sz = sizeof(struct perf_record_id_index) + n * sizeof(struct id_index_entry); 2437 ev = zalloc(sz); 2438 if (!ev) 2439 return -ENOMEM; 2440 2441 ev->id_index.header.type = PERF_RECORD_ID_INDEX; 2442 ev->id_index.header.size = sz; 2443 ev->id_index.nr = n; 2444 2445 evlist__for_each_entry(evlist, evsel) { 2446 u32 j; 2447 2448 for (j = 0; j < evsel->ids; j++) { 2449 struct id_index_entry *e; 2450 struct perf_sample_id *sid; 2451 2452 if (i >= n) { 2453 err = process(tool, ev, NULL, machine); 2454 if (err) 2455 goto out_err; 2456 nr -= n; 2457 i = 0; 2458 } 2459 2460 e = &ev->id_index.entries[i++]; 2461 2462 e->id = evsel->id[j]; 2463 2464 sid = perf_evlist__id2sid(evlist, e->id); 2465 if (!sid) { 2466 free(ev); 2467 return -ENOENT; 2468 } 2469 2470 e->idx = sid->idx; 2471 e->cpu = sid->cpu; 2472 e->tid = sid->tid; 2473 } 2474 } 2475 2476 sz = sizeof(struct perf_record_id_index) + nr * sizeof(struct id_index_entry); 2477 ev->id_index.header.size = sz; 2478 ev->id_index.nr = nr; 2479 2480 err = process(tool, ev, NULL, machine); 2481 out_err: 2482 free(ev); 2483 2484 return err; 2485 } 2486