1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <inttypes.h> 4 #include <linux/err.h> 5 #include <linux/kernel.h> 6 #include <linux/zalloc.h> 7 #include <api/fs/fs.h> 8 9 #include <byteswap.h> 10 #include <unistd.h> 11 #include <sys/types.h> 12 #include <sys/mman.h> 13 #include <perf/cpumap.h> 14 15 #include "map_symbol.h" 16 #include "branch.h" 17 #include "debug.h" 18 #include "evlist.h" 19 #include "evsel.h" 20 #include "memswap.h" 21 #include "map.h" 22 #include "symbol.h" 23 #include "session.h" 24 #include "tool.h" 25 #include "cpumap.h" 26 #include "perf_regs.h" 27 #include "asm/bug.h" 28 #include "auxtrace.h" 29 #include "thread.h" 30 #include "thread-stack.h" 31 #include "sample-raw.h" 32 #include "stat.h" 33 #include "util.h" 34 #include "ui/progress.h" 35 #include "../perf.h" 36 #include "arch/common.h" 37 38 #ifdef HAVE_ZSTD_SUPPORT 39 static int perf_session__process_compressed_event(struct perf_session *session, 40 union perf_event *event, u64 file_offset) 41 { 42 void *src; 43 size_t decomp_size, src_size; 44 u64 decomp_last_rem = 0; 45 size_t mmap_len, decomp_len = session->header.env.comp_mmap_len; 46 struct decomp *decomp, *decomp_last = session->decomp_last; 47 48 if (decomp_last) { 49 decomp_last_rem = decomp_last->size - decomp_last->head; 50 decomp_len += decomp_last_rem; 51 } 52 53 mmap_len = sizeof(struct decomp) + decomp_len; 54 decomp = mmap(NULL, mmap_len, PROT_READ|PROT_WRITE, 55 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); 56 if (decomp == MAP_FAILED) { 57 pr_err("Couldn't allocate memory for decompression\n"); 58 return -1; 59 } 60 61 decomp->file_pos = file_offset; 62 decomp->mmap_len = mmap_len; 63 decomp->head = 0; 64 65 if (decomp_last_rem) { 66 memcpy(decomp->data, &(decomp_last->data[decomp_last->head]), decomp_last_rem); 67 decomp->size = decomp_last_rem; 68 } 69 70 src = (void *)event + sizeof(struct perf_record_compressed); 71 src_size = event->pack.header.size - sizeof(struct perf_record_compressed); 72 73 decomp_size = zstd_decompress_stream(&(session->zstd_data), src, src_size, 74 &(decomp->data[decomp_last_rem]), decomp_len - decomp_last_rem); 75 if (!decomp_size) { 76 munmap(decomp, mmap_len); 77 pr_err("Couldn't decompress data\n"); 78 return -1; 79 } 80 81 decomp->size += decomp_size; 82 83 if (session->decomp == NULL) { 84 session->decomp = decomp; 85 session->decomp_last = decomp; 86 } else { 87 session->decomp_last->next = decomp; 88 session->decomp_last = decomp; 89 } 90 91 pr_debug("decomp (B): %ld to %ld\n", src_size, decomp_size); 92 93 return 0; 94 } 95 #else /* !HAVE_ZSTD_SUPPORT */ 96 #define perf_session__process_compressed_event perf_session__process_compressed_event_stub 97 #endif 98 99 static int perf_session__deliver_event(struct perf_session *session, 100 union perf_event *event, 101 struct perf_tool *tool, 102 u64 file_offset); 103 104 static int perf_session__open(struct perf_session *session) 105 { 106 struct perf_data *data = session->data; 107 108 if (perf_session__read_header(session) < 0) { 109 pr_err("incompatible file format (rerun with -v to learn more)\n"); 110 return -1; 111 } 112 113 if (perf_data__is_pipe(data)) 114 return 0; 115 116 if (perf_header__has_feat(&session->header, HEADER_STAT)) 117 return 0; 118 119 if (!perf_evlist__valid_sample_type(session->evlist)) { 120 pr_err("non matching sample_type\n"); 121 return -1; 122 } 123 124 if (!perf_evlist__valid_sample_id_all(session->evlist)) { 125 pr_err("non matching sample_id_all\n"); 126 return -1; 127 } 128 129 if (!perf_evlist__valid_read_format(session->evlist)) { 130 pr_err("non matching read_format\n"); 131 return -1; 132 } 133 134 return 0; 135 } 136 137 void perf_session__set_id_hdr_size(struct perf_session *session) 138 { 139 u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist); 140 141 machines__set_id_hdr_size(&session->machines, id_hdr_size); 142 } 143 144 int perf_session__create_kernel_maps(struct perf_session *session) 145 { 146 int ret = machine__create_kernel_maps(&session->machines.host); 147 148 if (ret >= 0) 149 ret = machines__create_guest_kernel_maps(&session->machines); 150 return ret; 151 } 152 153 static void perf_session__destroy_kernel_maps(struct perf_session *session) 154 { 155 machines__destroy_kernel_maps(&session->machines); 156 } 157 158 static bool perf_session__has_comm_exec(struct perf_session *session) 159 { 160 struct evsel *evsel; 161 162 evlist__for_each_entry(session->evlist, evsel) { 163 if (evsel->core.attr.comm_exec) 164 return true; 165 } 166 167 return false; 168 } 169 170 static void perf_session__set_comm_exec(struct perf_session *session) 171 { 172 bool comm_exec = perf_session__has_comm_exec(session); 173 174 machines__set_comm_exec(&session->machines, comm_exec); 175 } 176 177 static int ordered_events__deliver_event(struct ordered_events *oe, 178 struct ordered_event *event) 179 { 180 struct perf_session *session = container_of(oe, struct perf_session, 181 ordered_events); 182 183 return perf_session__deliver_event(session, event->event, 184 session->tool, event->file_offset); 185 } 186 187 struct perf_session *perf_session__new(struct perf_data *data, 188 bool repipe, struct perf_tool *tool) 189 { 190 struct perf_session *session = zalloc(sizeof(*session)); 191 192 if (!session) 193 goto out; 194 195 session->repipe = repipe; 196 session->tool = tool; 197 INIT_LIST_HEAD(&session->auxtrace_index); 198 machines__init(&session->machines); 199 ordered_events__init(&session->ordered_events, 200 ordered_events__deliver_event, NULL); 201 202 perf_env__init(&session->header.env); 203 if (data) { 204 if (perf_data__open(data)) 205 goto out_delete; 206 207 session->data = data; 208 209 if (perf_data__is_read(data)) { 210 if (perf_session__open(session) < 0) 211 goto out_delete; 212 213 /* 214 * set session attributes that are present in perf.data 215 * but not in pipe-mode. 216 */ 217 if (!data->is_pipe) { 218 perf_session__set_id_hdr_size(session); 219 perf_session__set_comm_exec(session); 220 } 221 222 perf_evlist__init_trace_event_sample_raw(session->evlist); 223 224 /* Open the directory data. */ 225 if (data->is_dir && perf_data__open_dir(data)) 226 goto out_delete; 227 } 228 } else { 229 session->machines.host.env = &perf_env; 230 } 231 232 session->machines.host.single_address_space = 233 perf_env__single_address_space(session->machines.host.env); 234 235 if (!data || perf_data__is_write(data)) { 236 /* 237 * In O_RDONLY mode this will be performed when reading the 238 * kernel MMAP event, in perf_event__process_mmap(). 239 */ 240 if (perf_session__create_kernel_maps(session) < 0) 241 pr_warning("Cannot read kernel map\n"); 242 } 243 244 /* 245 * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is 246 * processed, so perf_evlist__sample_id_all is not meaningful here. 247 */ 248 if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps && 249 tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) { 250 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n"); 251 tool->ordered_events = false; 252 } 253 254 return session; 255 256 out_delete: 257 perf_session__delete(session); 258 out: 259 return NULL; 260 } 261 262 static void perf_session__delete_threads(struct perf_session *session) 263 { 264 machine__delete_threads(&session->machines.host); 265 } 266 267 static void perf_session__release_decomp_events(struct perf_session *session) 268 { 269 struct decomp *next, *decomp; 270 size_t mmap_len; 271 next = session->decomp; 272 do { 273 decomp = next; 274 if (decomp == NULL) 275 break; 276 next = decomp->next; 277 mmap_len = decomp->mmap_len; 278 munmap(decomp, mmap_len); 279 } while (1); 280 } 281 282 void perf_session__delete(struct perf_session *session) 283 { 284 if (session == NULL) 285 return; 286 auxtrace__free(session); 287 auxtrace_index__free(&session->auxtrace_index); 288 perf_session__destroy_kernel_maps(session); 289 perf_session__delete_threads(session); 290 perf_session__release_decomp_events(session); 291 perf_env__exit(&session->header.env); 292 machines__exit(&session->machines); 293 if (session->data) 294 perf_data__close(session->data); 295 free(session); 296 } 297 298 static int process_event_synth_tracing_data_stub(struct perf_session *session 299 __maybe_unused, 300 union perf_event *event 301 __maybe_unused) 302 { 303 dump_printf(": unhandled!\n"); 304 return 0; 305 } 306 307 static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused, 308 union perf_event *event __maybe_unused, 309 struct evlist **pevlist 310 __maybe_unused) 311 { 312 dump_printf(": unhandled!\n"); 313 return 0; 314 } 315 316 static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused, 317 union perf_event *event __maybe_unused, 318 struct evlist **pevlist 319 __maybe_unused) 320 { 321 if (dump_trace) 322 perf_event__fprintf_event_update(event, stdout); 323 324 dump_printf(": unhandled!\n"); 325 return 0; 326 } 327 328 static int process_event_sample_stub(struct perf_tool *tool __maybe_unused, 329 union perf_event *event __maybe_unused, 330 struct perf_sample *sample __maybe_unused, 331 struct evsel *evsel __maybe_unused, 332 struct machine *machine __maybe_unused) 333 { 334 dump_printf(": unhandled!\n"); 335 return 0; 336 } 337 338 static int process_event_stub(struct perf_tool *tool __maybe_unused, 339 union perf_event *event __maybe_unused, 340 struct perf_sample *sample __maybe_unused, 341 struct machine *machine __maybe_unused) 342 { 343 dump_printf(": unhandled!\n"); 344 return 0; 345 } 346 347 static int process_finished_round_stub(struct perf_tool *tool __maybe_unused, 348 union perf_event *event __maybe_unused, 349 struct ordered_events *oe __maybe_unused) 350 { 351 dump_printf(": unhandled!\n"); 352 return 0; 353 } 354 355 static int process_finished_round(struct perf_tool *tool, 356 union perf_event *event, 357 struct ordered_events *oe); 358 359 static int skipn(int fd, off_t n) 360 { 361 char buf[4096]; 362 ssize_t ret; 363 364 while (n > 0) { 365 ret = read(fd, buf, min(n, (off_t)sizeof(buf))); 366 if (ret <= 0) 367 return ret; 368 n -= ret; 369 } 370 371 return 0; 372 } 373 374 static s64 process_event_auxtrace_stub(struct perf_session *session __maybe_unused, 375 union perf_event *event) 376 { 377 dump_printf(": unhandled!\n"); 378 if (perf_data__is_pipe(session->data)) 379 skipn(perf_data__fd(session->data), event->auxtrace.size); 380 return event->auxtrace.size; 381 } 382 383 static int process_event_op2_stub(struct perf_session *session __maybe_unused, 384 union perf_event *event __maybe_unused) 385 { 386 dump_printf(": unhandled!\n"); 387 return 0; 388 } 389 390 391 static 392 int process_event_thread_map_stub(struct perf_session *session __maybe_unused, 393 union perf_event *event __maybe_unused) 394 { 395 if (dump_trace) 396 perf_event__fprintf_thread_map(event, stdout); 397 398 dump_printf(": unhandled!\n"); 399 return 0; 400 } 401 402 static 403 int process_event_cpu_map_stub(struct perf_session *session __maybe_unused, 404 union perf_event *event __maybe_unused) 405 { 406 if (dump_trace) 407 perf_event__fprintf_cpu_map(event, stdout); 408 409 dump_printf(": unhandled!\n"); 410 return 0; 411 } 412 413 static 414 int process_event_stat_config_stub(struct perf_session *session __maybe_unused, 415 union perf_event *event __maybe_unused) 416 { 417 if (dump_trace) 418 perf_event__fprintf_stat_config(event, stdout); 419 420 dump_printf(": unhandled!\n"); 421 return 0; 422 } 423 424 static int process_stat_stub(struct perf_session *perf_session __maybe_unused, 425 union perf_event *event) 426 { 427 if (dump_trace) 428 perf_event__fprintf_stat(event, stdout); 429 430 dump_printf(": unhandled!\n"); 431 return 0; 432 } 433 434 static int process_stat_round_stub(struct perf_session *perf_session __maybe_unused, 435 union perf_event *event) 436 { 437 if (dump_trace) 438 perf_event__fprintf_stat_round(event, stdout); 439 440 dump_printf(": unhandled!\n"); 441 return 0; 442 } 443 444 static int perf_session__process_compressed_event_stub(struct perf_session *session __maybe_unused, 445 union perf_event *event __maybe_unused, 446 u64 file_offset __maybe_unused) 447 { 448 dump_printf(": unhandled!\n"); 449 return 0; 450 } 451 452 void perf_tool__fill_defaults(struct perf_tool *tool) 453 { 454 if (tool->sample == NULL) 455 tool->sample = process_event_sample_stub; 456 if (tool->mmap == NULL) 457 tool->mmap = process_event_stub; 458 if (tool->mmap2 == NULL) 459 tool->mmap2 = process_event_stub; 460 if (tool->comm == NULL) 461 tool->comm = process_event_stub; 462 if (tool->namespaces == NULL) 463 tool->namespaces = process_event_stub; 464 if (tool->fork == NULL) 465 tool->fork = process_event_stub; 466 if (tool->exit == NULL) 467 tool->exit = process_event_stub; 468 if (tool->lost == NULL) 469 tool->lost = perf_event__process_lost; 470 if (tool->lost_samples == NULL) 471 tool->lost_samples = perf_event__process_lost_samples; 472 if (tool->aux == NULL) 473 tool->aux = perf_event__process_aux; 474 if (tool->itrace_start == NULL) 475 tool->itrace_start = perf_event__process_itrace_start; 476 if (tool->context_switch == NULL) 477 tool->context_switch = perf_event__process_switch; 478 if (tool->ksymbol == NULL) 479 tool->ksymbol = perf_event__process_ksymbol; 480 if (tool->bpf == NULL) 481 tool->bpf = perf_event__process_bpf; 482 if (tool->read == NULL) 483 tool->read = process_event_sample_stub; 484 if (tool->throttle == NULL) 485 tool->throttle = process_event_stub; 486 if (tool->unthrottle == NULL) 487 tool->unthrottle = process_event_stub; 488 if (tool->attr == NULL) 489 tool->attr = process_event_synth_attr_stub; 490 if (tool->event_update == NULL) 491 tool->event_update = process_event_synth_event_update_stub; 492 if (tool->tracing_data == NULL) 493 tool->tracing_data = process_event_synth_tracing_data_stub; 494 if (tool->build_id == NULL) 495 tool->build_id = process_event_op2_stub; 496 if (tool->finished_round == NULL) { 497 if (tool->ordered_events) 498 tool->finished_round = process_finished_round; 499 else 500 tool->finished_round = process_finished_round_stub; 501 } 502 if (tool->id_index == NULL) 503 tool->id_index = process_event_op2_stub; 504 if (tool->auxtrace_info == NULL) 505 tool->auxtrace_info = process_event_op2_stub; 506 if (tool->auxtrace == NULL) 507 tool->auxtrace = process_event_auxtrace_stub; 508 if (tool->auxtrace_error == NULL) 509 tool->auxtrace_error = process_event_op2_stub; 510 if (tool->thread_map == NULL) 511 tool->thread_map = process_event_thread_map_stub; 512 if (tool->cpu_map == NULL) 513 tool->cpu_map = process_event_cpu_map_stub; 514 if (tool->stat_config == NULL) 515 tool->stat_config = process_event_stat_config_stub; 516 if (tool->stat == NULL) 517 tool->stat = process_stat_stub; 518 if (tool->stat_round == NULL) 519 tool->stat_round = process_stat_round_stub; 520 if (tool->time_conv == NULL) 521 tool->time_conv = process_event_op2_stub; 522 if (tool->feature == NULL) 523 tool->feature = process_event_op2_stub; 524 if (tool->compressed == NULL) 525 tool->compressed = perf_session__process_compressed_event; 526 } 527 528 static void swap_sample_id_all(union perf_event *event, void *data) 529 { 530 void *end = (void *) event + event->header.size; 531 int size = end - data; 532 533 BUG_ON(size % sizeof(u64)); 534 mem_bswap_64(data, size); 535 } 536 537 static void perf_event__all64_swap(union perf_event *event, 538 bool sample_id_all __maybe_unused) 539 { 540 struct perf_event_header *hdr = &event->header; 541 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr)); 542 } 543 544 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all) 545 { 546 event->comm.pid = bswap_32(event->comm.pid); 547 event->comm.tid = bswap_32(event->comm.tid); 548 549 if (sample_id_all) { 550 void *data = &event->comm.comm; 551 552 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); 553 swap_sample_id_all(event, data); 554 } 555 } 556 557 static void perf_event__mmap_swap(union perf_event *event, 558 bool sample_id_all) 559 { 560 event->mmap.pid = bswap_32(event->mmap.pid); 561 event->mmap.tid = bswap_32(event->mmap.tid); 562 event->mmap.start = bswap_64(event->mmap.start); 563 event->mmap.len = bswap_64(event->mmap.len); 564 event->mmap.pgoff = bswap_64(event->mmap.pgoff); 565 566 if (sample_id_all) { 567 void *data = &event->mmap.filename; 568 569 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); 570 swap_sample_id_all(event, data); 571 } 572 } 573 574 static void perf_event__mmap2_swap(union perf_event *event, 575 bool sample_id_all) 576 { 577 event->mmap2.pid = bswap_32(event->mmap2.pid); 578 event->mmap2.tid = bswap_32(event->mmap2.tid); 579 event->mmap2.start = bswap_64(event->mmap2.start); 580 event->mmap2.len = bswap_64(event->mmap2.len); 581 event->mmap2.pgoff = bswap_64(event->mmap2.pgoff); 582 event->mmap2.maj = bswap_32(event->mmap2.maj); 583 event->mmap2.min = bswap_32(event->mmap2.min); 584 event->mmap2.ino = bswap_64(event->mmap2.ino); 585 586 if (sample_id_all) { 587 void *data = &event->mmap2.filename; 588 589 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); 590 swap_sample_id_all(event, data); 591 } 592 } 593 static void perf_event__task_swap(union perf_event *event, bool sample_id_all) 594 { 595 event->fork.pid = bswap_32(event->fork.pid); 596 event->fork.tid = bswap_32(event->fork.tid); 597 event->fork.ppid = bswap_32(event->fork.ppid); 598 event->fork.ptid = bswap_32(event->fork.ptid); 599 event->fork.time = bswap_64(event->fork.time); 600 601 if (sample_id_all) 602 swap_sample_id_all(event, &event->fork + 1); 603 } 604 605 static void perf_event__read_swap(union perf_event *event, bool sample_id_all) 606 { 607 event->read.pid = bswap_32(event->read.pid); 608 event->read.tid = bswap_32(event->read.tid); 609 event->read.value = bswap_64(event->read.value); 610 event->read.time_enabled = bswap_64(event->read.time_enabled); 611 event->read.time_running = bswap_64(event->read.time_running); 612 event->read.id = bswap_64(event->read.id); 613 614 if (sample_id_all) 615 swap_sample_id_all(event, &event->read + 1); 616 } 617 618 static void perf_event__aux_swap(union perf_event *event, bool sample_id_all) 619 { 620 event->aux.aux_offset = bswap_64(event->aux.aux_offset); 621 event->aux.aux_size = bswap_64(event->aux.aux_size); 622 event->aux.flags = bswap_64(event->aux.flags); 623 624 if (sample_id_all) 625 swap_sample_id_all(event, &event->aux + 1); 626 } 627 628 static void perf_event__itrace_start_swap(union perf_event *event, 629 bool sample_id_all) 630 { 631 event->itrace_start.pid = bswap_32(event->itrace_start.pid); 632 event->itrace_start.tid = bswap_32(event->itrace_start.tid); 633 634 if (sample_id_all) 635 swap_sample_id_all(event, &event->itrace_start + 1); 636 } 637 638 static void perf_event__switch_swap(union perf_event *event, bool sample_id_all) 639 { 640 if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) { 641 event->context_switch.next_prev_pid = 642 bswap_32(event->context_switch.next_prev_pid); 643 event->context_switch.next_prev_tid = 644 bswap_32(event->context_switch.next_prev_tid); 645 } 646 647 if (sample_id_all) 648 swap_sample_id_all(event, &event->context_switch + 1); 649 } 650 651 static void perf_event__throttle_swap(union perf_event *event, 652 bool sample_id_all) 653 { 654 event->throttle.time = bswap_64(event->throttle.time); 655 event->throttle.id = bswap_64(event->throttle.id); 656 event->throttle.stream_id = bswap_64(event->throttle.stream_id); 657 658 if (sample_id_all) 659 swap_sample_id_all(event, &event->throttle + 1); 660 } 661 662 static void perf_event__namespaces_swap(union perf_event *event, 663 bool sample_id_all) 664 { 665 u64 i; 666 667 event->namespaces.pid = bswap_32(event->namespaces.pid); 668 event->namespaces.tid = bswap_32(event->namespaces.tid); 669 event->namespaces.nr_namespaces = bswap_64(event->namespaces.nr_namespaces); 670 671 for (i = 0; i < event->namespaces.nr_namespaces; i++) { 672 struct perf_ns_link_info *ns = &event->namespaces.link_info[i]; 673 674 ns->dev = bswap_64(ns->dev); 675 ns->ino = bswap_64(ns->ino); 676 } 677 678 if (sample_id_all) 679 swap_sample_id_all(event, &event->namespaces.link_info[i]); 680 } 681 682 static u8 revbyte(u8 b) 683 { 684 int rev = (b >> 4) | ((b & 0xf) << 4); 685 rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2); 686 rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1); 687 return (u8) rev; 688 } 689 690 /* 691 * XXX this is hack in attempt to carry flags bitfield 692 * through endian village. ABI says: 693 * 694 * Bit-fields are allocated from right to left (least to most significant) 695 * on little-endian implementations and from left to right (most to least 696 * significant) on big-endian implementations. 697 * 698 * The above seems to be byte specific, so we need to reverse each 699 * byte of the bitfield. 'Internet' also says this might be implementation 700 * specific and we probably need proper fix and carry perf_event_attr 701 * bitfield flags in separate data file FEAT_ section. Thought this seems 702 * to work for now. 703 */ 704 static void swap_bitfield(u8 *p, unsigned len) 705 { 706 unsigned i; 707 708 for (i = 0; i < len; i++) { 709 *p = revbyte(*p); 710 p++; 711 } 712 } 713 714 /* exported for swapping attributes in file header */ 715 void perf_event__attr_swap(struct perf_event_attr *attr) 716 { 717 attr->type = bswap_32(attr->type); 718 attr->size = bswap_32(attr->size); 719 720 #define bswap_safe(f, n) \ 721 (attr->size > (offsetof(struct perf_event_attr, f) + \ 722 sizeof(attr->f) * (n))) 723 #define bswap_field(f, sz) \ 724 do { \ 725 if (bswap_safe(f, 0)) \ 726 attr->f = bswap_##sz(attr->f); \ 727 } while(0) 728 #define bswap_field_16(f) bswap_field(f, 16) 729 #define bswap_field_32(f) bswap_field(f, 32) 730 #define bswap_field_64(f) bswap_field(f, 64) 731 732 bswap_field_64(config); 733 bswap_field_64(sample_period); 734 bswap_field_64(sample_type); 735 bswap_field_64(read_format); 736 bswap_field_32(wakeup_events); 737 bswap_field_32(bp_type); 738 bswap_field_64(bp_addr); 739 bswap_field_64(bp_len); 740 bswap_field_64(branch_sample_type); 741 bswap_field_64(sample_regs_user); 742 bswap_field_32(sample_stack_user); 743 bswap_field_32(aux_watermark); 744 bswap_field_16(sample_max_stack); 745 746 /* 747 * After read_format are bitfields. Check read_format because 748 * we are unable to use offsetof on bitfield. 749 */ 750 if (bswap_safe(read_format, 1)) 751 swap_bitfield((u8 *) (&attr->read_format + 1), 752 sizeof(u64)); 753 #undef bswap_field_64 754 #undef bswap_field_32 755 #undef bswap_field 756 #undef bswap_safe 757 } 758 759 static void perf_event__hdr_attr_swap(union perf_event *event, 760 bool sample_id_all __maybe_unused) 761 { 762 size_t size; 763 764 perf_event__attr_swap(&event->attr.attr); 765 766 size = event->header.size; 767 size -= (void *)&event->attr.id - (void *)event; 768 mem_bswap_64(event->attr.id, size); 769 } 770 771 static void perf_event__event_update_swap(union perf_event *event, 772 bool sample_id_all __maybe_unused) 773 { 774 event->event_update.type = bswap_64(event->event_update.type); 775 event->event_update.id = bswap_64(event->event_update.id); 776 } 777 778 static void perf_event__event_type_swap(union perf_event *event, 779 bool sample_id_all __maybe_unused) 780 { 781 event->event_type.event_type.event_id = 782 bswap_64(event->event_type.event_type.event_id); 783 } 784 785 static void perf_event__tracing_data_swap(union perf_event *event, 786 bool sample_id_all __maybe_unused) 787 { 788 event->tracing_data.size = bswap_32(event->tracing_data.size); 789 } 790 791 static void perf_event__auxtrace_info_swap(union perf_event *event, 792 bool sample_id_all __maybe_unused) 793 { 794 size_t size; 795 796 event->auxtrace_info.type = bswap_32(event->auxtrace_info.type); 797 798 size = event->header.size; 799 size -= (void *)&event->auxtrace_info.priv - (void *)event; 800 mem_bswap_64(event->auxtrace_info.priv, size); 801 } 802 803 static void perf_event__auxtrace_swap(union perf_event *event, 804 bool sample_id_all __maybe_unused) 805 { 806 event->auxtrace.size = bswap_64(event->auxtrace.size); 807 event->auxtrace.offset = bswap_64(event->auxtrace.offset); 808 event->auxtrace.reference = bswap_64(event->auxtrace.reference); 809 event->auxtrace.idx = bswap_32(event->auxtrace.idx); 810 event->auxtrace.tid = bswap_32(event->auxtrace.tid); 811 event->auxtrace.cpu = bswap_32(event->auxtrace.cpu); 812 } 813 814 static void perf_event__auxtrace_error_swap(union perf_event *event, 815 bool sample_id_all __maybe_unused) 816 { 817 event->auxtrace_error.type = bswap_32(event->auxtrace_error.type); 818 event->auxtrace_error.code = bswap_32(event->auxtrace_error.code); 819 event->auxtrace_error.cpu = bswap_32(event->auxtrace_error.cpu); 820 event->auxtrace_error.pid = bswap_32(event->auxtrace_error.pid); 821 event->auxtrace_error.tid = bswap_32(event->auxtrace_error.tid); 822 event->auxtrace_error.fmt = bswap_32(event->auxtrace_error.fmt); 823 event->auxtrace_error.ip = bswap_64(event->auxtrace_error.ip); 824 if (event->auxtrace_error.fmt) 825 event->auxtrace_error.time = bswap_64(event->auxtrace_error.time); 826 } 827 828 static void perf_event__thread_map_swap(union perf_event *event, 829 bool sample_id_all __maybe_unused) 830 { 831 unsigned i; 832 833 event->thread_map.nr = bswap_64(event->thread_map.nr); 834 835 for (i = 0; i < event->thread_map.nr; i++) 836 event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid); 837 } 838 839 static void perf_event__cpu_map_swap(union perf_event *event, 840 bool sample_id_all __maybe_unused) 841 { 842 struct perf_record_cpu_map_data *data = &event->cpu_map.data; 843 struct cpu_map_entries *cpus; 844 struct perf_record_record_cpu_map *mask; 845 unsigned i; 846 847 data->type = bswap_64(data->type); 848 849 switch (data->type) { 850 case PERF_CPU_MAP__CPUS: 851 cpus = (struct cpu_map_entries *)data->data; 852 853 cpus->nr = bswap_16(cpus->nr); 854 855 for (i = 0; i < cpus->nr; i++) 856 cpus->cpu[i] = bswap_16(cpus->cpu[i]); 857 break; 858 case PERF_CPU_MAP__MASK: 859 mask = (struct perf_record_record_cpu_map *)data->data; 860 861 mask->nr = bswap_16(mask->nr); 862 mask->long_size = bswap_16(mask->long_size); 863 864 switch (mask->long_size) { 865 case 4: mem_bswap_32(&mask->mask, mask->nr); break; 866 case 8: mem_bswap_64(&mask->mask, mask->nr); break; 867 default: 868 pr_err("cpu_map swap: unsupported long size\n"); 869 } 870 default: 871 break; 872 } 873 } 874 875 static void perf_event__stat_config_swap(union perf_event *event, 876 bool sample_id_all __maybe_unused) 877 { 878 u64 size; 879 880 size = event->stat_config.nr * sizeof(event->stat_config.data[0]); 881 size += 1; /* nr item itself */ 882 mem_bswap_64(&event->stat_config.nr, size); 883 } 884 885 static void perf_event__stat_swap(union perf_event *event, 886 bool sample_id_all __maybe_unused) 887 { 888 event->stat.id = bswap_64(event->stat.id); 889 event->stat.thread = bswap_32(event->stat.thread); 890 event->stat.cpu = bswap_32(event->stat.cpu); 891 event->stat.val = bswap_64(event->stat.val); 892 event->stat.ena = bswap_64(event->stat.ena); 893 event->stat.run = bswap_64(event->stat.run); 894 } 895 896 static void perf_event__stat_round_swap(union perf_event *event, 897 bool sample_id_all __maybe_unused) 898 { 899 event->stat_round.type = bswap_64(event->stat_round.type); 900 event->stat_round.time = bswap_64(event->stat_round.time); 901 } 902 903 typedef void (*perf_event__swap_op)(union perf_event *event, 904 bool sample_id_all); 905 906 static perf_event__swap_op perf_event__swap_ops[] = { 907 [PERF_RECORD_MMAP] = perf_event__mmap_swap, 908 [PERF_RECORD_MMAP2] = perf_event__mmap2_swap, 909 [PERF_RECORD_COMM] = perf_event__comm_swap, 910 [PERF_RECORD_FORK] = perf_event__task_swap, 911 [PERF_RECORD_EXIT] = perf_event__task_swap, 912 [PERF_RECORD_LOST] = perf_event__all64_swap, 913 [PERF_RECORD_READ] = perf_event__read_swap, 914 [PERF_RECORD_THROTTLE] = perf_event__throttle_swap, 915 [PERF_RECORD_UNTHROTTLE] = perf_event__throttle_swap, 916 [PERF_RECORD_SAMPLE] = perf_event__all64_swap, 917 [PERF_RECORD_AUX] = perf_event__aux_swap, 918 [PERF_RECORD_ITRACE_START] = perf_event__itrace_start_swap, 919 [PERF_RECORD_LOST_SAMPLES] = perf_event__all64_swap, 920 [PERF_RECORD_SWITCH] = perf_event__switch_swap, 921 [PERF_RECORD_SWITCH_CPU_WIDE] = perf_event__switch_swap, 922 [PERF_RECORD_NAMESPACES] = perf_event__namespaces_swap, 923 [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap, 924 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap, 925 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap, 926 [PERF_RECORD_HEADER_BUILD_ID] = NULL, 927 [PERF_RECORD_ID_INDEX] = perf_event__all64_swap, 928 [PERF_RECORD_AUXTRACE_INFO] = perf_event__auxtrace_info_swap, 929 [PERF_RECORD_AUXTRACE] = perf_event__auxtrace_swap, 930 [PERF_RECORD_AUXTRACE_ERROR] = perf_event__auxtrace_error_swap, 931 [PERF_RECORD_THREAD_MAP] = perf_event__thread_map_swap, 932 [PERF_RECORD_CPU_MAP] = perf_event__cpu_map_swap, 933 [PERF_RECORD_STAT_CONFIG] = perf_event__stat_config_swap, 934 [PERF_RECORD_STAT] = perf_event__stat_swap, 935 [PERF_RECORD_STAT_ROUND] = perf_event__stat_round_swap, 936 [PERF_RECORD_EVENT_UPDATE] = perf_event__event_update_swap, 937 [PERF_RECORD_TIME_CONV] = perf_event__all64_swap, 938 [PERF_RECORD_HEADER_MAX] = NULL, 939 }; 940 941 /* 942 * When perf record finishes a pass on every buffers, it records this pseudo 943 * event. 944 * We record the max timestamp t found in the pass n. 945 * Assuming these timestamps are monotonic across cpus, we know that if 946 * a buffer still has events with timestamps below t, they will be all 947 * available and then read in the pass n + 1. 948 * Hence when we start to read the pass n + 2, we can safely flush every 949 * events with timestamps below t. 950 * 951 * ============ PASS n ================= 952 * CPU 0 | CPU 1 953 * | 954 * cnt1 timestamps | cnt2 timestamps 955 * 1 | 2 956 * 2 | 3 957 * - | 4 <--- max recorded 958 * 959 * ============ PASS n + 1 ============== 960 * CPU 0 | CPU 1 961 * | 962 * cnt1 timestamps | cnt2 timestamps 963 * 3 | 5 964 * 4 | 6 965 * 5 | 7 <---- max recorded 966 * 967 * Flush every events below timestamp 4 968 * 969 * ============ PASS n + 2 ============== 970 * CPU 0 | CPU 1 971 * | 972 * cnt1 timestamps | cnt2 timestamps 973 * 6 | 8 974 * 7 | 9 975 * - | 10 976 * 977 * Flush every events below timestamp 7 978 * etc... 979 */ 980 static int process_finished_round(struct perf_tool *tool __maybe_unused, 981 union perf_event *event __maybe_unused, 982 struct ordered_events *oe) 983 { 984 if (dump_trace) 985 fprintf(stdout, "\n"); 986 return ordered_events__flush(oe, OE_FLUSH__ROUND); 987 } 988 989 int perf_session__queue_event(struct perf_session *s, union perf_event *event, 990 u64 timestamp, u64 file_offset) 991 { 992 return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset); 993 } 994 995 static void callchain__lbr_callstack_printf(struct perf_sample *sample) 996 { 997 struct ip_callchain *callchain = sample->callchain; 998 struct branch_stack *lbr_stack = sample->branch_stack; 999 u64 kernel_callchain_nr = callchain->nr; 1000 unsigned int i; 1001 1002 for (i = 0; i < kernel_callchain_nr; i++) { 1003 if (callchain->ips[i] == PERF_CONTEXT_USER) 1004 break; 1005 } 1006 1007 if ((i != kernel_callchain_nr) && lbr_stack->nr) { 1008 u64 total_nr; 1009 /* 1010 * LBR callstack can only get user call chain, 1011 * i is kernel call chain number, 1012 * 1 is PERF_CONTEXT_USER. 1013 * 1014 * The user call chain is stored in LBR registers. 1015 * LBR are pair registers. The caller is stored 1016 * in "from" register, while the callee is stored 1017 * in "to" register. 1018 * For example, there is a call stack 1019 * "A"->"B"->"C"->"D". 1020 * The LBR registers will recorde like 1021 * "C"->"D", "B"->"C", "A"->"B". 1022 * So only the first "to" register and all "from" 1023 * registers are needed to construct the whole stack. 1024 */ 1025 total_nr = i + 1 + lbr_stack->nr + 1; 1026 kernel_callchain_nr = i + 1; 1027 1028 printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr); 1029 1030 for (i = 0; i < kernel_callchain_nr; i++) 1031 printf("..... %2d: %016" PRIx64 "\n", 1032 i, callchain->ips[i]); 1033 1034 printf("..... %2d: %016" PRIx64 "\n", 1035 (int)(kernel_callchain_nr), lbr_stack->entries[0].to); 1036 for (i = 0; i < lbr_stack->nr; i++) 1037 printf("..... %2d: %016" PRIx64 "\n", 1038 (int)(i + kernel_callchain_nr + 1), lbr_stack->entries[i].from); 1039 } 1040 } 1041 1042 static void callchain__printf(struct evsel *evsel, 1043 struct perf_sample *sample) 1044 { 1045 unsigned int i; 1046 struct ip_callchain *callchain = sample->callchain; 1047 1048 if (perf_evsel__has_branch_callstack(evsel)) 1049 callchain__lbr_callstack_printf(sample); 1050 1051 printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr); 1052 1053 for (i = 0; i < callchain->nr; i++) 1054 printf("..... %2d: %016" PRIx64 "\n", 1055 i, callchain->ips[i]); 1056 } 1057 1058 static void branch_stack__printf(struct perf_sample *sample, bool callstack) 1059 { 1060 uint64_t i; 1061 1062 printf("%s: nr:%" PRIu64 "\n", 1063 !callstack ? "... branch stack" : "... branch callstack", 1064 sample->branch_stack->nr); 1065 1066 for (i = 0; i < sample->branch_stack->nr; i++) { 1067 struct branch_entry *e = &sample->branch_stack->entries[i]; 1068 1069 if (!callstack) { 1070 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x\n", 1071 i, e->from, e->to, 1072 (unsigned short)e->flags.cycles, 1073 e->flags.mispred ? "M" : " ", 1074 e->flags.predicted ? "P" : " ", 1075 e->flags.abort ? "A" : " ", 1076 e->flags.in_tx ? "T" : " ", 1077 (unsigned)e->flags.reserved); 1078 } else { 1079 printf("..... %2"PRIu64": %016" PRIx64 "\n", 1080 i, i > 0 ? e->from : e->to); 1081 } 1082 } 1083 } 1084 1085 static void regs_dump__printf(u64 mask, u64 *regs) 1086 { 1087 unsigned rid, i = 0; 1088 1089 for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) { 1090 u64 val = regs[i++]; 1091 1092 printf(".... %-5s 0x%" PRIx64 "\n", 1093 perf_reg_name(rid), val); 1094 } 1095 } 1096 1097 static const char *regs_abi[] = { 1098 [PERF_SAMPLE_REGS_ABI_NONE] = "none", 1099 [PERF_SAMPLE_REGS_ABI_32] = "32-bit", 1100 [PERF_SAMPLE_REGS_ABI_64] = "64-bit", 1101 }; 1102 1103 static inline const char *regs_dump_abi(struct regs_dump *d) 1104 { 1105 if (d->abi > PERF_SAMPLE_REGS_ABI_64) 1106 return "unknown"; 1107 1108 return regs_abi[d->abi]; 1109 } 1110 1111 static void regs__printf(const char *type, struct regs_dump *regs) 1112 { 1113 u64 mask = regs->mask; 1114 1115 printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n", 1116 type, 1117 mask, 1118 regs_dump_abi(regs)); 1119 1120 regs_dump__printf(mask, regs->regs); 1121 } 1122 1123 static void regs_user__printf(struct perf_sample *sample) 1124 { 1125 struct regs_dump *user_regs = &sample->user_regs; 1126 1127 if (user_regs->regs) 1128 regs__printf("user", user_regs); 1129 } 1130 1131 static void regs_intr__printf(struct perf_sample *sample) 1132 { 1133 struct regs_dump *intr_regs = &sample->intr_regs; 1134 1135 if (intr_regs->regs) 1136 regs__printf("intr", intr_regs); 1137 } 1138 1139 static void stack_user__printf(struct stack_dump *dump) 1140 { 1141 printf("... ustack: size %" PRIu64 ", offset 0x%x\n", 1142 dump->size, dump->offset); 1143 } 1144 1145 static void perf_evlist__print_tstamp(struct evlist *evlist, 1146 union perf_event *event, 1147 struct perf_sample *sample) 1148 { 1149 u64 sample_type = __perf_evlist__combined_sample_type(evlist); 1150 1151 if (event->header.type != PERF_RECORD_SAMPLE && 1152 !perf_evlist__sample_id_all(evlist)) { 1153 fputs("-1 -1 ", stdout); 1154 return; 1155 } 1156 1157 if ((sample_type & PERF_SAMPLE_CPU)) 1158 printf("%u ", sample->cpu); 1159 1160 if (sample_type & PERF_SAMPLE_TIME) 1161 printf("%" PRIu64 " ", sample->time); 1162 } 1163 1164 static void sample_read__printf(struct perf_sample *sample, u64 read_format) 1165 { 1166 printf("... sample_read:\n"); 1167 1168 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1169 printf("...... time enabled %016" PRIx64 "\n", 1170 sample->read.time_enabled); 1171 1172 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 1173 printf("...... time running %016" PRIx64 "\n", 1174 sample->read.time_running); 1175 1176 if (read_format & PERF_FORMAT_GROUP) { 1177 u64 i; 1178 1179 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr); 1180 1181 for (i = 0; i < sample->read.group.nr; i++) { 1182 struct sample_read_value *value; 1183 1184 value = &sample->read.group.values[i]; 1185 printf("..... id %016" PRIx64 1186 ", value %016" PRIx64 "\n", 1187 value->id, value->value); 1188 } 1189 } else 1190 printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n", 1191 sample->read.one.id, sample->read.one.value); 1192 } 1193 1194 static void dump_event(struct evlist *evlist, union perf_event *event, 1195 u64 file_offset, struct perf_sample *sample) 1196 { 1197 if (!dump_trace) 1198 return; 1199 1200 printf("\n%#" PRIx64 " [%#x]: event: %d\n", 1201 file_offset, event->header.size, event->header.type); 1202 1203 trace_event(event); 1204 if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw) 1205 evlist->trace_event_sample_raw(evlist, event, sample); 1206 1207 if (sample) 1208 perf_evlist__print_tstamp(evlist, event, sample); 1209 1210 printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset, 1211 event->header.size, perf_event__name(event->header.type)); 1212 } 1213 1214 static void dump_sample(struct evsel *evsel, union perf_event *event, 1215 struct perf_sample *sample) 1216 { 1217 u64 sample_type; 1218 1219 if (!dump_trace) 1220 return; 1221 1222 printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n", 1223 event->header.misc, sample->pid, sample->tid, sample->ip, 1224 sample->period, sample->addr); 1225 1226 sample_type = evsel->core.attr.sample_type; 1227 1228 if (evsel__has_callchain(evsel)) 1229 callchain__printf(evsel, sample); 1230 1231 if (sample_type & PERF_SAMPLE_BRANCH_STACK) 1232 branch_stack__printf(sample, perf_evsel__has_branch_callstack(evsel)); 1233 1234 if (sample_type & PERF_SAMPLE_REGS_USER) 1235 regs_user__printf(sample); 1236 1237 if (sample_type & PERF_SAMPLE_REGS_INTR) 1238 regs_intr__printf(sample); 1239 1240 if (sample_type & PERF_SAMPLE_STACK_USER) 1241 stack_user__printf(&sample->user_stack); 1242 1243 if (sample_type & PERF_SAMPLE_WEIGHT) 1244 printf("... weight: %" PRIu64 "\n", sample->weight); 1245 1246 if (sample_type & PERF_SAMPLE_DATA_SRC) 1247 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src); 1248 1249 if (sample_type & PERF_SAMPLE_PHYS_ADDR) 1250 printf(" .. phys_addr: 0x%"PRIx64"\n", sample->phys_addr); 1251 1252 if (sample_type & PERF_SAMPLE_TRANSACTION) 1253 printf("... transaction: %" PRIx64 "\n", sample->transaction); 1254 1255 if (sample_type & PERF_SAMPLE_READ) 1256 sample_read__printf(sample, evsel->core.attr.read_format); 1257 } 1258 1259 static void dump_read(struct evsel *evsel, union perf_event *event) 1260 { 1261 struct perf_record_read *read_event = &event->read; 1262 u64 read_format; 1263 1264 if (!dump_trace) 1265 return; 1266 1267 printf(": %d %d %s %" PRI_lu64 "\n", event->read.pid, event->read.tid, 1268 perf_evsel__name(evsel), 1269 event->read.value); 1270 1271 if (!evsel) 1272 return; 1273 1274 read_format = evsel->core.attr.read_format; 1275 1276 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1277 printf("... time enabled : %" PRI_lu64 "\n", read_event->time_enabled); 1278 1279 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 1280 printf("... time running : %" PRI_lu64 "\n", read_event->time_running); 1281 1282 if (read_format & PERF_FORMAT_ID) 1283 printf("... id : %" PRI_lu64 "\n", read_event->id); 1284 } 1285 1286 static struct machine *machines__find_for_cpumode(struct machines *machines, 1287 union perf_event *event, 1288 struct perf_sample *sample) 1289 { 1290 struct machine *machine; 1291 1292 if (perf_guest && 1293 ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) || 1294 (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) { 1295 u32 pid; 1296 1297 if (event->header.type == PERF_RECORD_MMAP 1298 || event->header.type == PERF_RECORD_MMAP2) 1299 pid = event->mmap.pid; 1300 else 1301 pid = sample->pid; 1302 1303 machine = machines__find(machines, pid); 1304 if (!machine) 1305 machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID); 1306 return machine; 1307 } 1308 1309 return &machines->host; 1310 } 1311 1312 static int deliver_sample_value(struct evlist *evlist, 1313 struct perf_tool *tool, 1314 union perf_event *event, 1315 struct perf_sample *sample, 1316 struct sample_read_value *v, 1317 struct machine *machine) 1318 { 1319 struct perf_sample_id *sid = perf_evlist__id2sid(evlist, v->id); 1320 1321 if (sid) { 1322 sample->id = v->id; 1323 sample->period = v->value - sid->period; 1324 sid->period = v->value; 1325 } 1326 1327 if (!sid || sid->evsel == NULL) { 1328 ++evlist->stats.nr_unknown_id; 1329 return 0; 1330 } 1331 1332 /* 1333 * There's no reason to deliver sample 1334 * for zero period, bail out. 1335 */ 1336 if (!sample->period) 1337 return 0; 1338 1339 return tool->sample(tool, event, sample, sid->evsel, machine); 1340 } 1341 1342 static int deliver_sample_group(struct evlist *evlist, 1343 struct perf_tool *tool, 1344 union perf_event *event, 1345 struct perf_sample *sample, 1346 struct machine *machine) 1347 { 1348 int ret = -EINVAL; 1349 u64 i; 1350 1351 for (i = 0; i < sample->read.group.nr; i++) { 1352 ret = deliver_sample_value(evlist, tool, event, sample, 1353 &sample->read.group.values[i], 1354 machine); 1355 if (ret) 1356 break; 1357 } 1358 1359 return ret; 1360 } 1361 1362 static int 1363 perf_evlist__deliver_sample(struct evlist *evlist, 1364 struct perf_tool *tool, 1365 union perf_event *event, 1366 struct perf_sample *sample, 1367 struct evsel *evsel, 1368 struct machine *machine) 1369 { 1370 /* We know evsel != NULL. */ 1371 u64 sample_type = evsel->core.attr.sample_type; 1372 u64 read_format = evsel->core.attr.read_format; 1373 1374 /* Standard sample delivery. */ 1375 if (!(sample_type & PERF_SAMPLE_READ)) 1376 return tool->sample(tool, event, sample, evsel, machine); 1377 1378 /* For PERF_SAMPLE_READ we have either single or group mode. */ 1379 if (read_format & PERF_FORMAT_GROUP) 1380 return deliver_sample_group(evlist, tool, event, sample, 1381 machine); 1382 else 1383 return deliver_sample_value(evlist, tool, event, sample, 1384 &sample->read.one, machine); 1385 } 1386 1387 static int machines__deliver_event(struct machines *machines, 1388 struct evlist *evlist, 1389 union perf_event *event, 1390 struct perf_sample *sample, 1391 struct perf_tool *tool, u64 file_offset) 1392 { 1393 struct evsel *evsel; 1394 struct machine *machine; 1395 1396 dump_event(evlist, event, file_offset, sample); 1397 1398 evsel = perf_evlist__id2evsel(evlist, sample->id); 1399 1400 machine = machines__find_for_cpumode(machines, event, sample); 1401 1402 switch (event->header.type) { 1403 case PERF_RECORD_SAMPLE: 1404 if (evsel == NULL) { 1405 ++evlist->stats.nr_unknown_id; 1406 return 0; 1407 } 1408 dump_sample(evsel, event, sample); 1409 if (machine == NULL) { 1410 ++evlist->stats.nr_unprocessable_samples; 1411 return 0; 1412 } 1413 return perf_evlist__deliver_sample(evlist, tool, event, sample, evsel, machine); 1414 case PERF_RECORD_MMAP: 1415 return tool->mmap(tool, event, sample, machine); 1416 case PERF_RECORD_MMAP2: 1417 if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT) 1418 ++evlist->stats.nr_proc_map_timeout; 1419 return tool->mmap2(tool, event, sample, machine); 1420 case PERF_RECORD_COMM: 1421 return tool->comm(tool, event, sample, machine); 1422 case PERF_RECORD_NAMESPACES: 1423 return tool->namespaces(tool, event, sample, machine); 1424 case PERF_RECORD_FORK: 1425 return tool->fork(tool, event, sample, machine); 1426 case PERF_RECORD_EXIT: 1427 return tool->exit(tool, event, sample, machine); 1428 case PERF_RECORD_LOST: 1429 if (tool->lost == perf_event__process_lost) 1430 evlist->stats.total_lost += event->lost.lost; 1431 return tool->lost(tool, event, sample, machine); 1432 case PERF_RECORD_LOST_SAMPLES: 1433 if (tool->lost_samples == perf_event__process_lost_samples) 1434 evlist->stats.total_lost_samples += event->lost_samples.lost; 1435 return tool->lost_samples(tool, event, sample, machine); 1436 case PERF_RECORD_READ: 1437 dump_read(evsel, event); 1438 return tool->read(tool, event, sample, evsel, machine); 1439 case PERF_RECORD_THROTTLE: 1440 return tool->throttle(tool, event, sample, machine); 1441 case PERF_RECORD_UNTHROTTLE: 1442 return tool->unthrottle(tool, event, sample, machine); 1443 case PERF_RECORD_AUX: 1444 if (tool->aux == perf_event__process_aux) { 1445 if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) 1446 evlist->stats.total_aux_lost += 1; 1447 if (event->aux.flags & PERF_AUX_FLAG_PARTIAL) 1448 evlist->stats.total_aux_partial += 1; 1449 } 1450 return tool->aux(tool, event, sample, machine); 1451 case PERF_RECORD_ITRACE_START: 1452 return tool->itrace_start(tool, event, sample, machine); 1453 case PERF_RECORD_SWITCH: 1454 case PERF_RECORD_SWITCH_CPU_WIDE: 1455 return tool->context_switch(tool, event, sample, machine); 1456 case PERF_RECORD_KSYMBOL: 1457 return tool->ksymbol(tool, event, sample, machine); 1458 case PERF_RECORD_BPF_EVENT: 1459 return tool->bpf(tool, event, sample, machine); 1460 default: 1461 ++evlist->stats.nr_unknown_events; 1462 return -1; 1463 } 1464 } 1465 1466 static int perf_session__deliver_event(struct perf_session *session, 1467 union perf_event *event, 1468 struct perf_tool *tool, 1469 u64 file_offset) 1470 { 1471 struct perf_sample sample; 1472 int ret; 1473 1474 ret = perf_evlist__parse_sample(session->evlist, event, &sample); 1475 if (ret) { 1476 pr_err("Can't parse sample, err = %d\n", ret); 1477 return ret; 1478 } 1479 1480 ret = auxtrace__process_event(session, event, &sample, tool); 1481 if (ret < 0) 1482 return ret; 1483 if (ret > 0) 1484 return 0; 1485 1486 return machines__deliver_event(&session->machines, session->evlist, 1487 event, &sample, tool, file_offset); 1488 } 1489 1490 static s64 perf_session__process_user_event(struct perf_session *session, 1491 union perf_event *event, 1492 u64 file_offset) 1493 { 1494 struct ordered_events *oe = &session->ordered_events; 1495 struct perf_tool *tool = session->tool; 1496 struct perf_sample sample = { .time = 0, }; 1497 int fd = perf_data__fd(session->data); 1498 int err; 1499 1500 if (event->header.type != PERF_RECORD_COMPRESSED || 1501 tool->compressed == perf_session__process_compressed_event_stub) 1502 dump_event(session->evlist, event, file_offset, &sample); 1503 1504 /* These events are processed right away */ 1505 switch (event->header.type) { 1506 case PERF_RECORD_HEADER_ATTR: 1507 err = tool->attr(tool, event, &session->evlist); 1508 if (err == 0) { 1509 perf_session__set_id_hdr_size(session); 1510 perf_session__set_comm_exec(session); 1511 } 1512 return err; 1513 case PERF_RECORD_EVENT_UPDATE: 1514 return tool->event_update(tool, event, &session->evlist); 1515 case PERF_RECORD_HEADER_EVENT_TYPE: 1516 /* 1517 * Depreceated, but we need to handle it for sake 1518 * of old data files create in pipe mode. 1519 */ 1520 return 0; 1521 case PERF_RECORD_HEADER_TRACING_DATA: 1522 /* setup for reading amidst mmap */ 1523 lseek(fd, file_offset, SEEK_SET); 1524 return tool->tracing_data(session, event); 1525 case PERF_RECORD_HEADER_BUILD_ID: 1526 return tool->build_id(session, event); 1527 case PERF_RECORD_FINISHED_ROUND: 1528 return tool->finished_round(tool, event, oe); 1529 case PERF_RECORD_ID_INDEX: 1530 return tool->id_index(session, event); 1531 case PERF_RECORD_AUXTRACE_INFO: 1532 return tool->auxtrace_info(session, event); 1533 case PERF_RECORD_AUXTRACE: 1534 /* setup for reading amidst mmap */ 1535 lseek(fd, file_offset + event->header.size, SEEK_SET); 1536 return tool->auxtrace(session, event); 1537 case PERF_RECORD_AUXTRACE_ERROR: 1538 perf_session__auxtrace_error_inc(session, event); 1539 return tool->auxtrace_error(session, event); 1540 case PERF_RECORD_THREAD_MAP: 1541 return tool->thread_map(session, event); 1542 case PERF_RECORD_CPU_MAP: 1543 return tool->cpu_map(session, event); 1544 case PERF_RECORD_STAT_CONFIG: 1545 return tool->stat_config(session, event); 1546 case PERF_RECORD_STAT: 1547 return tool->stat(session, event); 1548 case PERF_RECORD_STAT_ROUND: 1549 return tool->stat_round(session, event); 1550 case PERF_RECORD_TIME_CONV: 1551 session->time_conv = event->time_conv; 1552 return tool->time_conv(session, event); 1553 case PERF_RECORD_HEADER_FEATURE: 1554 return tool->feature(session, event); 1555 case PERF_RECORD_COMPRESSED: 1556 err = tool->compressed(session, event, file_offset); 1557 if (err) 1558 dump_event(session->evlist, event, file_offset, &sample); 1559 return err; 1560 default: 1561 return -EINVAL; 1562 } 1563 } 1564 1565 int perf_session__deliver_synth_event(struct perf_session *session, 1566 union perf_event *event, 1567 struct perf_sample *sample) 1568 { 1569 struct evlist *evlist = session->evlist; 1570 struct perf_tool *tool = session->tool; 1571 1572 events_stats__inc(&evlist->stats, event->header.type); 1573 1574 if (event->header.type >= PERF_RECORD_USER_TYPE_START) 1575 return perf_session__process_user_event(session, event, 0); 1576 1577 return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0); 1578 } 1579 1580 static void event_swap(union perf_event *event, bool sample_id_all) 1581 { 1582 perf_event__swap_op swap; 1583 1584 swap = perf_event__swap_ops[event->header.type]; 1585 if (swap) 1586 swap(event, sample_id_all); 1587 } 1588 1589 int perf_session__peek_event(struct perf_session *session, off_t file_offset, 1590 void *buf, size_t buf_sz, 1591 union perf_event **event_ptr, 1592 struct perf_sample *sample) 1593 { 1594 union perf_event *event; 1595 size_t hdr_sz, rest; 1596 int fd; 1597 1598 if (session->one_mmap && !session->header.needs_swap) { 1599 event = file_offset - session->one_mmap_offset + 1600 session->one_mmap_addr; 1601 goto out_parse_sample; 1602 } 1603 1604 if (perf_data__is_pipe(session->data)) 1605 return -1; 1606 1607 fd = perf_data__fd(session->data); 1608 hdr_sz = sizeof(struct perf_event_header); 1609 1610 if (buf_sz < hdr_sz) 1611 return -1; 1612 1613 if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 || 1614 readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz) 1615 return -1; 1616 1617 event = (union perf_event *)buf; 1618 1619 if (session->header.needs_swap) 1620 perf_event_header__bswap(&event->header); 1621 1622 if (event->header.size < hdr_sz || event->header.size > buf_sz) 1623 return -1; 1624 1625 rest = event->header.size - hdr_sz; 1626 1627 if (readn(fd, buf, rest) != (ssize_t)rest) 1628 return -1; 1629 1630 if (session->header.needs_swap) 1631 event_swap(event, perf_evlist__sample_id_all(session->evlist)); 1632 1633 out_parse_sample: 1634 1635 if (sample && event->header.type < PERF_RECORD_USER_TYPE_START && 1636 perf_evlist__parse_sample(session->evlist, event, sample)) 1637 return -1; 1638 1639 *event_ptr = event; 1640 1641 return 0; 1642 } 1643 1644 static s64 perf_session__process_event(struct perf_session *session, 1645 union perf_event *event, u64 file_offset) 1646 { 1647 struct evlist *evlist = session->evlist; 1648 struct perf_tool *tool = session->tool; 1649 int ret; 1650 1651 if (session->header.needs_swap) 1652 event_swap(event, perf_evlist__sample_id_all(evlist)); 1653 1654 if (event->header.type >= PERF_RECORD_HEADER_MAX) 1655 return -EINVAL; 1656 1657 events_stats__inc(&evlist->stats, event->header.type); 1658 1659 if (event->header.type >= PERF_RECORD_USER_TYPE_START) 1660 return perf_session__process_user_event(session, event, file_offset); 1661 1662 if (tool->ordered_events) { 1663 u64 timestamp = -1ULL; 1664 1665 ret = perf_evlist__parse_sample_timestamp(evlist, event, ×tamp); 1666 if (ret && ret != -1) 1667 return ret; 1668 1669 ret = perf_session__queue_event(session, event, timestamp, file_offset); 1670 if (ret != -ETIME) 1671 return ret; 1672 } 1673 1674 return perf_session__deliver_event(session, event, tool, file_offset); 1675 } 1676 1677 void perf_event_header__bswap(struct perf_event_header *hdr) 1678 { 1679 hdr->type = bswap_32(hdr->type); 1680 hdr->misc = bswap_16(hdr->misc); 1681 hdr->size = bswap_16(hdr->size); 1682 } 1683 1684 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid) 1685 { 1686 return machine__findnew_thread(&session->machines.host, -1, pid); 1687 } 1688 1689 /* 1690 * Threads are identified by pid and tid, and the idle task has pid == tid == 0. 1691 * So here a single thread is created for that, but actually there is a separate 1692 * idle task per cpu, so there should be one 'struct thread' per cpu, but there 1693 * is only 1. That causes problems for some tools, requiring workarounds. For 1694 * example get_idle_thread() in builtin-sched.c, or thread_stack__per_cpu(). 1695 */ 1696 int perf_session__register_idle_thread(struct perf_session *session) 1697 { 1698 struct thread *thread; 1699 int err = 0; 1700 1701 thread = machine__findnew_thread(&session->machines.host, 0, 0); 1702 if (thread == NULL || thread__set_comm(thread, "swapper", 0)) { 1703 pr_err("problem inserting idle task.\n"); 1704 err = -1; 1705 } 1706 1707 if (thread == NULL || thread__set_namespaces(thread, 0, NULL)) { 1708 pr_err("problem inserting idle task.\n"); 1709 err = -1; 1710 } 1711 1712 /* machine__findnew_thread() got the thread, so put it */ 1713 thread__put(thread); 1714 return err; 1715 } 1716 1717 static void 1718 perf_session__warn_order(const struct perf_session *session) 1719 { 1720 const struct ordered_events *oe = &session->ordered_events; 1721 struct evsel *evsel; 1722 bool should_warn = true; 1723 1724 evlist__for_each_entry(session->evlist, evsel) { 1725 if (evsel->core.attr.write_backward) 1726 should_warn = false; 1727 } 1728 1729 if (!should_warn) 1730 return; 1731 if (oe->nr_unordered_events != 0) 1732 ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events); 1733 } 1734 1735 static void perf_session__warn_about_errors(const struct perf_session *session) 1736 { 1737 const struct events_stats *stats = &session->evlist->stats; 1738 1739 if (session->tool->lost == perf_event__process_lost && 1740 stats->nr_events[PERF_RECORD_LOST] != 0) { 1741 ui__warning("Processed %d events and lost %d chunks!\n\n" 1742 "Check IO/CPU overload!\n\n", 1743 stats->nr_events[0], 1744 stats->nr_events[PERF_RECORD_LOST]); 1745 } 1746 1747 if (session->tool->lost_samples == perf_event__process_lost_samples) { 1748 double drop_rate; 1749 1750 drop_rate = (double)stats->total_lost_samples / 1751 (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples); 1752 if (drop_rate > 0.05) { 1753 ui__warning("Processed %" PRIu64 " samples and lost %3.2f%%!\n\n", 1754 stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples, 1755 drop_rate * 100.0); 1756 } 1757 } 1758 1759 if (session->tool->aux == perf_event__process_aux && 1760 stats->total_aux_lost != 0) { 1761 ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n", 1762 stats->total_aux_lost, 1763 stats->nr_events[PERF_RECORD_AUX]); 1764 } 1765 1766 if (session->tool->aux == perf_event__process_aux && 1767 stats->total_aux_partial != 0) { 1768 bool vmm_exclusive = false; 1769 1770 (void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive", 1771 &vmm_exclusive); 1772 1773 ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n" 1774 "Are you running a KVM guest in the background?%s\n\n", 1775 stats->total_aux_partial, 1776 stats->nr_events[PERF_RECORD_AUX], 1777 vmm_exclusive ? 1778 "\nReloading kvm_intel module with vmm_exclusive=0\n" 1779 "will reduce the gaps to only guest's timeslices." : 1780 ""); 1781 } 1782 1783 if (stats->nr_unknown_events != 0) { 1784 ui__warning("Found %u unknown events!\n\n" 1785 "Is this an older tool processing a perf.data " 1786 "file generated by a more recent tool?\n\n" 1787 "If that is not the case, consider " 1788 "reporting to linux-kernel@vger.kernel.org.\n\n", 1789 stats->nr_unknown_events); 1790 } 1791 1792 if (stats->nr_unknown_id != 0) { 1793 ui__warning("%u samples with id not present in the header\n", 1794 stats->nr_unknown_id); 1795 } 1796 1797 if (stats->nr_invalid_chains != 0) { 1798 ui__warning("Found invalid callchains!\n\n" 1799 "%u out of %u events were discarded for this reason.\n\n" 1800 "Consider reporting to linux-kernel@vger.kernel.org.\n\n", 1801 stats->nr_invalid_chains, 1802 stats->nr_events[PERF_RECORD_SAMPLE]); 1803 } 1804 1805 if (stats->nr_unprocessable_samples != 0) { 1806 ui__warning("%u unprocessable samples recorded.\n" 1807 "Do you have a KVM guest running and not using 'perf kvm'?\n", 1808 stats->nr_unprocessable_samples); 1809 } 1810 1811 perf_session__warn_order(session); 1812 1813 events_stats__auxtrace_error_warn(stats); 1814 1815 if (stats->nr_proc_map_timeout != 0) { 1816 ui__warning("%d map information files for pre-existing threads were\n" 1817 "not processed, if there are samples for addresses they\n" 1818 "will not be resolved, you may find out which are these\n" 1819 "threads by running with -v and redirecting the output\n" 1820 "to a file.\n" 1821 "The time limit to process proc map is too short?\n" 1822 "Increase it by --proc-map-timeout\n", 1823 stats->nr_proc_map_timeout); 1824 } 1825 } 1826 1827 static int perf_session__flush_thread_stack(struct thread *thread, 1828 void *p __maybe_unused) 1829 { 1830 return thread_stack__flush(thread); 1831 } 1832 1833 static int perf_session__flush_thread_stacks(struct perf_session *session) 1834 { 1835 return machines__for_each_thread(&session->machines, 1836 perf_session__flush_thread_stack, 1837 NULL); 1838 } 1839 1840 volatile int session_done; 1841 1842 static int __perf_session__process_decomp_events(struct perf_session *session); 1843 1844 static int __perf_session__process_pipe_events(struct perf_session *session) 1845 { 1846 struct ordered_events *oe = &session->ordered_events; 1847 struct perf_tool *tool = session->tool; 1848 int fd = perf_data__fd(session->data); 1849 union perf_event *event; 1850 uint32_t size, cur_size = 0; 1851 void *buf = NULL; 1852 s64 skip = 0; 1853 u64 head; 1854 ssize_t err; 1855 void *p; 1856 1857 perf_tool__fill_defaults(tool); 1858 1859 head = 0; 1860 cur_size = sizeof(union perf_event); 1861 1862 buf = malloc(cur_size); 1863 if (!buf) 1864 return -errno; 1865 ordered_events__set_copy_on_queue(oe, true); 1866 more: 1867 event = buf; 1868 err = readn(fd, event, sizeof(struct perf_event_header)); 1869 if (err <= 0) { 1870 if (err == 0) 1871 goto done; 1872 1873 pr_err("failed to read event header\n"); 1874 goto out_err; 1875 } 1876 1877 if (session->header.needs_swap) 1878 perf_event_header__bswap(&event->header); 1879 1880 size = event->header.size; 1881 if (size < sizeof(struct perf_event_header)) { 1882 pr_err("bad event header size\n"); 1883 goto out_err; 1884 } 1885 1886 if (size > cur_size) { 1887 void *new = realloc(buf, size); 1888 if (!new) { 1889 pr_err("failed to allocate memory to read event\n"); 1890 goto out_err; 1891 } 1892 buf = new; 1893 cur_size = size; 1894 event = buf; 1895 } 1896 p = event; 1897 p += sizeof(struct perf_event_header); 1898 1899 if (size - sizeof(struct perf_event_header)) { 1900 err = readn(fd, p, size - sizeof(struct perf_event_header)); 1901 if (err <= 0) { 1902 if (err == 0) { 1903 pr_err("unexpected end of event stream\n"); 1904 goto done; 1905 } 1906 1907 pr_err("failed to read event data\n"); 1908 goto out_err; 1909 } 1910 } 1911 1912 if ((skip = perf_session__process_event(session, event, head)) < 0) { 1913 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n", 1914 head, event->header.size, event->header.type); 1915 err = -EINVAL; 1916 goto out_err; 1917 } 1918 1919 head += size; 1920 1921 if (skip > 0) 1922 head += skip; 1923 1924 err = __perf_session__process_decomp_events(session); 1925 if (err) 1926 goto out_err; 1927 1928 if (!session_done()) 1929 goto more; 1930 done: 1931 /* do the final flush for ordered samples */ 1932 err = ordered_events__flush(oe, OE_FLUSH__FINAL); 1933 if (err) 1934 goto out_err; 1935 err = auxtrace__flush_events(session, tool); 1936 if (err) 1937 goto out_err; 1938 err = perf_session__flush_thread_stacks(session); 1939 out_err: 1940 free(buf); 1941 if (!tool->no_warn) 1942 perf_session__warn_about_errors(session); 1943 ordered_events__free(&session->ordered_events); 1944 auxtrace__free_events(session); 1945 return err; 1946 } 1947 1948 static union perf_event * 1949 fetch_mmaped_event(struct perf_session *session, 1950 u64 head, size_t mmap_size, char *buf) 1951 { 1952 union perf_event *event; 1953 1954 /* 1955 * Ensure we have enough space remaining to read 1956 * the size of the event in the headers. 1957 */ 1958 if (head + sizeof(event->header) > mmap_size) 1959 return NULL; 1960 1961 event = (union perf_event *)(buf + head); 1962 1963 if (session->header.needs_swap) 1964 perf_event_header__bswap(&event->header); 1965 1966 if (head + event->header.size > mmap_size) { 1967 /* We're not fetching the event so swap back again */ 1968 if (session->header.needs_swap) 1969 perf_event_header__bswap(&event->header); 1970 pr_debug("%s: head=%#" PRIx64 " event->header_size=%#x, mmap_size=%#zx: fuzzed perf.data?\n", 1971 __func__, head, event->header.size, mmap_size); 1972 return ERR_PTR(-EINVAL); 1973 } 1974 1975 return event; 1976 } 1977 1978 static int __perf_session__process_decomp_events(struct perf_session *session) 1979 { 1980 s64 skip; 1981 u64 size, file_pos = 0; 1982 struct decomp *decomp = session->decomp_last; 1983 1984 if (!decomp) 1985 return 0; 1986 1987 while (decomp->head < decomp->size && !session_done()) { 1988 union perf_event *event = fetch_mmaped_event(session, decomp->head, decomp->size, decomp->data); 1989 1990 if (IS_ERR(event)) 1991 return PTR_ERR(event); 1992 1993 if (!event) 1994 break; 1995 1996 size = event->header.size; 1997 1998 if (size < sizeof(struct perf_event_header) || 1999 (skip = perf_session__process_event(session, event, file_pos)) < 0) { 2000 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n", 2001 decomp->file_pos + decomp->head, event->header.size, event->header.type); 2002 return -EINVAL; 2003 } 2004 2005 if (skip) 2006 size += skip; 2007 2008 decomp->head += size; 2009 } 2010 2011 return 0; 2012 } 2013 2014 /* 2015 * On 64bit we can mmap the data file in one go. No need for tiny mmap 2016 * slices. On 32bit we use 32MB. 2017 */ 2018 #if BITS_PER_LONG == 64 2019 #define MMAP_SIZE ULLONG_MAX 2020 #define NUM_MMAPS 1 2021 #else 2022 #define MMAP_SIZE (32 * 1024 * 1024ULL) 2023 #define NUM_MMAPS 128 2024 #endif 2025 2026 struct reader; 2027 2028 typedef s64 (*reader_cb_t)(struct perf_session *session, 2029 union perf_event *event, 2030 u64 file_offset); 2031 2032 struct reader { 2033 int fd; 2034 u64 data_size; 2035 u64 data_offset; 2036 reader_cb_t process; 2037 }; 2038 2039 static int 2040 reader__process_events(struct reader *rd, struct perf_session *session, 2041 struct ui_progress *prog) 2042 { 2043 u64 data_size = rd->data_size; 2044 u64 head, page_offset, file_offset, file_pos, size; 2045 int err = 0, mmap_prot, mmap_flags, map_idx = 0; 2046 size_t mmap_size; 2047 char *buf, *mmaps[NUM_MMAPS]; 2048 union perf_event *event; 2049 s64 skip; 2050 2051 page_offset = page_size * (rd->data_offset / page_size); 2052 file_offset = page_offset; 2053 head = rd->data_offset - page_offset; 2054 2055 ui_progress__init_size(prog, data_size, "Processing events..."); 2056 2057 data_size += rd->data_offset; 2058 2059 mmap_size = MMAP_SIZE; 2060 if (mmap_size > data_size) { 2061 mmap_size = data_size; 2062 session->one_mmap = true; 2063 } 2064 2065 memset(mmaps, 0, sizeof(mmaps)); 2066 2067 mmap_prot = PROT_READ; 2068 mmap_flags = MAP_SHARED; 2069 2070 if (session->header.needs_swap) { 2071 mmap_prot |= PROT_WRITE; 2072 mmap_flags = MAP_PRIVATE; 2073 } 2074 remap: 2075 buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, rd->fd, 2076 file_offset); 2077 if (buf == MAP_FAILED) { 2078 pr_err("failed to mmap file\n"); 2079 err = -errno; 2080 goto out; 2081 } 2082 mmaps[map_idx] = buf; 2083 map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1); 2084 file_pos = file_offset + head; 2085 if (session->one_mmap) { 2086 session->one_mmap_addr = buf; 2087 session->one_mmap_offset = file_offset; 2088 } 2089 2090 more: 2091 event = fetch_mmaped_event(session, head, mmap_size, buf); 2092 if (IS_ERR(event)) 2093 return PTR_ERR(event); 2094 2095 if (!event) { 2096 if (mmaps[map_idx]) { 2097 munmap(mmaps[map_idx], mmap_size); 2098 mmaps[map_idx] = NULL; 2099 } 2100 2101 page_offset = page_size * (head / page_size); 2102 file_offset += page_offset; 2103 head -= page_offset; 2104 goto remap; 2105 } 2106 2107 size = event->header.size; 2108 2109 skip = -EINVAL; 2110 2111 if (size < sizeof(struct perf_event_header) || 2112 (skip = rd->process(session, event, file_pos)) < 0) { 2113 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d [%s]\n", 2114 file_offset + head, event->header.size, 2115 event->header.type, strerror(-skip)); 2116 err = skip; 2117 goto out; 2118 } 2119 2120 if (skip) 2121 size += skip; 2122 2123 head += size; 2124 file_pos += size; 2125 2126 err = __perf_session__process_decomp_events(session); 2127 if (err) 2128 goto out; 2129 2130 ui_progress__update(prog, size); 2131 2132 if (session_done()) 2133 goto out; 2134 2135 if (file_pos < data_size) 2136 goto more; 2137 2138 out: 2139 return err; 2140 } 2141 2142 static s64 process_simple(struct perf_session *session, 2143 union perf_event *event, 2144 u64 file_offset) 2145 { 2146 return perf_session__process_event(session, event, file_offset); 2147 } 2148 2149 static int __perf_session__process_events(struct perf_session *session) 2150 { 2151 struct reader rd = { 2152 .fd = perf_data__fd(session->data), 2153 .data_size = session->header.data_size, 2154 .data_offset = session->header.data_offset, 2155 .process = process_simple, 2156 }; 2157 struct ordered_events *oe = &session->ordered_events; 2158 struct perf_tool *tool = session->tool; 2159 struct ui_progress prog; 2160 int err; 2161 2162 perf_tool__fill_defaults(tool); 2163 2164 if (rd.data_size == 0) 2165 return -1; 2166 2167 ui_progress__init_size(&prog, rd.data_size, "Processing events..."); 2168 2169 err = reader__process_events(&rd, session, &prog); 2170 if (err) 2171 goto out_err; 2172 /* do the final flush for ordered samples */ 2173 err = ordered_events__flush(oe, OE_FLUSH__FINAL); 2174 if (err) 2175 goto out_err; 2176 err = auxtrace__flush_events(session, tool); 2177 if (err) 2178 goto out_err; 2179 err = perf_session__flush_thread_stacks(session); 2180 out_err: 2181 ui_progress__finish(); 2182 if (!tool->no_warn) 2183 perf_session__warn_about_errors(session); 2184 /* 2185 * We may switching perf.data output, make ordered_events 2186 * reusable. 2187 */ 2188 ordered_events__reinit(&session->ordered_events); 2189 auxtrace__free_events(session); 2190 session->one_mmap = false; 2191 return err; 2192 } 2193 2194 int perf_session__process_events(struct perf_session *session) 2195 { 2196 if (perf_session__register_idle_thread(session) < 0) 2197 return -ENOMEM; 2198 2199 if (perf_data__is_pipe(session->data)) 2200 return __perf_session__process_pipe_events(session); 2201 2202 return __perf_session__process_events(session); 2203 } 2204 2205 bool perf_session__has_traces(struct perf_session *session, const char *msg) 2206 { 2207 struct evsel *evsel; 2208 2209 evlist__for_each_entry(session->evlist, evsel) { 2210 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) 2211 return true; 2212 } 2213 2214 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg); 2215 return false; 2216 } 2217 2218 int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr) 2219 { 2220 char *bracket; 2221 struct ref_reloc_sym *ref; 2222 struct kmap *kmap; 2223 2224 ref = zalloc(sizeof(struct ref_reloc_sym)); 2225 if (ref == NULL) 2226 return -ENOMEM; 2227 2228 ref->name = strdup(symbol_name); 2229 if (ref->name == NULL) { 2230 free(ref); 2231 return -ENOMEM; 2232 } 2233 2234 bracket = strchr(ref->name, ']'); 2235 if (bracket) 2236 *bracket = '\0'; 2237 2238 ref->addr = addr; 2239 2240 kmap = map__kmap(map); 2241 if (kmap) 2242 kmap->ref_reloc_sym = ref; 2243 2244 return 0; 2245 } 2246 2247 size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp) 2248 { 2249 return machines__fprintf_dsos(&session->machines, fp); 2250 } 2251 2252 size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp, 2253 bool (skip)(struct dso *dso, int parm), int parm) 2254 { 2255 return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm); 2256 } 2257 2258 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp) 2259 { 2260 size_t ret; 2261 const char *msg = ""; 2262 2263 if (perf_header__has_feat(&session->header, HEADER_AUXTRACE)) 2264 msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)"; 2265 2266 ret = fprintf(fp, "\nAggregated stats:%s\n", msg); 2267 2268 ret += events_stats__fprintf(&session->evlist->stats, fp); 2269 return ret; 2270 } 2271 2272 size_t perf_session__fprintf(struct perf_session *session, FILE *fp) 2273 { 2274 /* 2275 * FIXME: Here we have to actually print all the machines in this 2276 * session, not just the host... 2277 */ 2278 return machine__fprintf(&session->machines.host, fp); 2279 } 2280 2281 struct evsel *perf_session__find_first_evtype(struct perf_session *session, 2282 unsigned int type) 2283 { 2284 struct evsel *pos; 2285 2286 evlist__for_each_entry(session->evlist, pos) { 2287 if (pos->core.attr.type == type) 2288 return pos; 2289 } 2290 return NULL; 2291 } 2292 2293 int perf_session__cpu_bitmap(struct perf_session *session, 2294 const char *cpu_list, unsigned long *cpu_bitmap) 2295 { 2296 int i, err = -1; 2297 struct perf_cpu_map *map; 2298 int nr_cpus = min(session->header.env.nr_cpus_online, MAX_NR_CPUS); 2299 2300 for (i = 0; i < PERF_TYPE_MAX; ++i) { 2301 struct evsel *evsel; 2302 2303 evsel = perf_session__find_first_evtype(session, i); 2304 if (!evsel) 2305 continue; 2306 2307 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CPU)) { 2308 pr_err("File does not contain CPU events. " 2309 "Remove -C option to proceed.\n"); 2310 return -1; 2311 } 2312 } 2313 2314 map = perf_cpu_map__new(cpu_list); 2315 if (map == NULL) { 2316 pr_err("Invalid cpu_list\n"); 2317 return -1; 2318 } 2319 2320 for (i = 0; i < map->nr; i++) { 2321 int cpu = map->map[i]; 2322 2323 if (cpu >= nr_cpus) { 2324 pr_err("Requested CPU %d too large. " 2325 "Consider raising MAX_NR_CPUS\n", cpu); 2326 goto out_delete_map; 2327 } 2328 2329 set_bit(cpu, cpu_bitmap); 2330 } 2331 2332 err = 0; 2333 2334 out_delete_map: 2335 perf_cpu_map__put(map); 2336 return err; 2337 } 2338 2339 void perf_session__fprintf_info(struct perf_session *session, FILE *fp, 2340 bool full) 2341 { 2342 if (session == NULL || fp == NULL) 2343 return; 2344 2345 fprintf(fp, "# ========\n"); 2346 perf_header__fprintf_info(session, fp, full); 2347 fprintf(fp, "# ========\n#\n"); 2348 } 2349 2350 2351 int __perf_session__set_tracepoints_handlers(struct perf_session *session, 2352 const struct evsel_str_handler *assocs, 2353 size_t nr_assocs) 2354 { 2355 struct evsel *evsel; 2356 size_t i; 2357 int err; 2358 2359 for (i = 0; i < nr_assocs; i++) { 2360 /* 2361 * Adding a handler for an event not in the session, 2362 * just ignore it. 2363 */ 2364 evsel = perf_evlist__find_tracepoint_by_name(session->evlist, assocs[i].name); 2365 if (evsel == NULL) 2366 continue; 2367 2368 err = -EEXIST; 2369 if (evsel->handler != NULL) 2370 goto out; 2371 evsel->handler = assocs[i].handler; 2372 } 2373 2374 err = 0; 2375 out: 2376 return err; 2377 } 2378 2379 int perf_event__process_id_index(struct perf_session *session, 2380 union perf_event *event) 2381 { 2382 struct evlist *evlist = session->evlist; 2383 struct perf_record_id_index *ie = &event->id_index; 2384 size_t i, nr, max_nr; 2385 2386 max_nr = (ie->header.size - sizeof(struct perf_record_id_index)) / 2387 sizeof(struct id_index_entry); 2388 nr = ie->nr; 2389 if (nr > max_nr) 2390 return -EINVAL; 2391 2392 if (dump_trace) 2393 fprintf(stdout, " nr: %zu\n", nr); 2394 2395 for (i = 0; i < nr; i++) { 2396 struct id_index_entry *e = &ie->entries[i]; 2397 struct perf_sample_id *sid; 2398 2399 if (dump_trace) { 2400 fprintf(stdout, " ... id: %"PRI_lu64, e->id); 2401 fprintf(stdout, " idx: %"PRI_lu64, e->idx); 2402 fprintf(stdout, " cpu: %"PRI_ld64, e->cpu); 2403 fprintf(stdout, " tid: %"PRI_ld64"\n", e->tid); 2404 } 2405 2406 sid = perf_evlist__id2sid(evlist, e->id); 2407 if (!sid) 2408 return -ENOENT; 2409 sid->idx = e->idx; 2410 sid->cpu = e->cpu; 2411 sid->tid = e->tid; 2412 } 2413 return 0; 2414 } 2415 2416 int perf_event__synthesize_id_index(struct perf_tool *tool, 2417 perf_event__handler_t process, 2418 struct evlist *evlist, 2419 struct machine *machine) 2420 { 2421 union perf_event *ev; 2422 struct evsel *evsel; 2423 size_t nr = 0, i = 0, sz, max_nr, n; 2424 int err; 2425 2426 pr_debug2("Synthesizing id index\n"); 2427 2428 max_nr = (UINT16_MAX - sizeof(struct perf_record_id_index)) / 2429 sizeof(struct id_index_entry); 2430 2431 evlist__for_each_entry(evlist, evsel) 2432 nr += evsel->ids; 2433 2434 n = nr > max_nr ? max_nr : nr; 2435 sz = sizeof(struct perf_record_id_index) + n * sizeof(struct id_index_entry); 2436 ev = zalloc(sz); 2437 if (!ev) 2438 return -ENOMEM; 2439 2440 ev->id_index.header.type = PERF_RECORD_ID_INDEX; 2441 ev->id_index.header.size = sz; 2442 ev->id_index.nr = n; 2443 2444 evlist__for_each_entry(evlist, evsel) { 2445 u32 j; 2446 2447 for (j = 0; j < evsel->ids; j++) { 2448 struct id_index_entry *e; 2449 struct perf_sample_id *sid; 2450 2451 if (i >= n) { 2452 err = process(tool, ev, NULL, machine); 2453 if (err) 2454 goto out_err; 2455 nr -= n; 2456 i = 0; 2457 } 2458 2459 e = &ev->id_index.entries[i++]; 2460 2461 e->id = evsel->id[j]; 2462 2463 sid = perf_evlist__id2sid(evlist, e->id); 2464 if (!sid) { 2465 free(ev); 2466 return -ENOENT; 2467 } 2468 2469 e->idx = sid->idx; 2470 e->cpu = sid->cpu; 2471 e->tid = sid->tid; 2472 } 2473 } 2474 2475 sz = sizeof(struct perf_record_id_index) + nr * sizeof(struct id_index_entry); 2476 ev->id_index.header.size = sz; 2477 ev->id_index.nr = nr; 2478 2479 err = process(tool, ev, NULL, machine); 2480 out_err: 2481 free(ev); 2482 2483 return err; 2484 } 2485