1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <inttypes.h> 4 #include <linux/kernel.h> 5 #include <traceevent/event-parse.h> 6 #include <api/fs/fs.h> 7 8 #include <byteswap.h> 9 #include <unistd.h> 10 #include <sys/types.h> 11 #include <sys/mman.h> 12 13 #include "evlist.h" 14 #include "evsel.h" 15 #include "memswap.h" 16 #include "map.h" 17 #include "session.h" 18 #include "tool.h" 19 #include "sort.h" 20 #include "util.h" 21 #include "cpumap.h" 22 #include "perf_regs.h" 23 #include "asm/bug.h" 24 #include "auxtrace.h" 25 #include "thread.h" 26 #include "thread-stack.h" 27 #include "sample-raw.h" 28 #include "stat.h" 29 #include "arch/common.h" 30 31 static int perf_session__deliver_event(struct perf_session *session, 32 union perf_event *event, 33 struct perf_tool *tool, 34 u64 file_offset); 35 36 static int perf_session__open(struct perf_session *session) 37 { 38 struct perf_data *data = session->data; 39 40 if (perf_session__read_header(session) < 0) { 41 pr_err("incompatible file format (rerun with -v to learn more)\n"); 42 return -1; 43 } 44 45 if (perf_data__is_pipe(data)) 46 return 0; 47 48 if (perf_header__has_feat(&session->header, HEADER_STAT)) 49 return 0; 50 51 if (!perf_evlist__valid_sample_type(session->evlist)) { 52 pr_err("non matching sample_type\n"); 53 return -1; 54 } 55 56 if (!perf_evlist__valid_sample_id_all(session->evlist)) { 57 pr_err("non matching sample_id_all\n"); 58 return -1; 59 } 60 61 if (!perf_evlist__valid_read_format(session->evlist)) { 62 pr_err("non matching read_format\n"); 63 return -1; 64 } 65 66 return 0; 67 } 68 69 void perf_session__set_id_hdr_size(struct perf_session *session) 70 { 71 u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist); 72 73 machines__set_id_hdr_size(&session->machines, id_hdr_size); 74 } 75 76 int perf_session__create_kernel_maps(struct perf_session *session) 77 { 78 int ret = machine__create_kernel_maps(&session->machines.host); 79 80 if (ret >= 0) 81 ret = machines__create_guest_kernel_maps(&session->machines); 82 return ret; 83 } 84 85 static void perf_session__destroy_kernel_maps(struct perf_session *session) 86 { 87 machines__destroy_kernel_maps(&session->machines); 88 } 89 90 static bool perf_session__has_comm_exec(struct perf_session *session) 91 { 92 struct perf_evsel *evsel; 93 94 evlist__for_each_entry(session->evlist, evsel) { 95 if (evsel->attr.comm_exec) 96 return true; 97 } 98 99 return false; 100 } 101 102 static void perf_session__set_comm_exec(struct perf_session *session) 103 { 104 bool comm_exec = perf_session__has_comm_exec(session); 105 106 machines__set_comm_exec(&session->machines, comm_exec); 107 } 108 109 static int ordered_events__deliver_event(struct ordered_events *oe, 110 struct ordered_event *event) 111 { 112 struct perf_session *session = container_of(oe, struct perf_session, 113 ordered_events); 114 115 return perf_session__deliver_event(session, event->event, 116 session->tool, event->file_offset); 117 } 118 119 struct perf_session *perf_session__new(struct perf_data *data, 120 bool repipe, struct perf_tool *tool) 121 { 122 struct perf_session *session = zalloc(sizeof(*session)); 123 124 if (!session) 125 goto out; 126 127 session->repipe = repipe; 128 session->tool = tool; 129 INIT_LIST_HEAD(&session->auxtrace_index); 130 machines__init(&session->machines); 131 ordered_events__init(&session->ordered_events, 132 ordered_events__deliver_event, NULL); 133 134 if (data) { 135 if (perf_data__open(data)) 136 goto out_delete; 137 138 session->data = data; 139 140 if (perf_data__is_read(data)) { 141 if (perf_session__open(session) < 0) 142 goto out_close; 143 144 /* 145 * set session attributes that are present in perf.data 146 * but not in pipe-mode. 147 */ 148 if (!data->is_pipe) { 149 perf_session__set_id_hdr_size(session); 150 perf_session__set_comm_exec(session); 151 } 152 153 perf_evlist__init_trace_event_sample_raw(session->evlist); 154 } 155 } else { 156 session->machines.host.env = &perf_env; 157 } 158 159 session->machines.host.single_address_space = 160 perf_env__single_address_space(session->machines.host.env); 161 162 if (!data || perf_data__is_write(data)) { 163 /* 164 * In O_RDONLY mode this will be performed when reading the 165 * kernel MMAP event, in perf_event__process_mmap(). 166 */ 167 if (perf_session__create_kernel_maps(session) < 0) 168 pr_warning("Cannot read kernel map\n"); 169 } 170 171 /* 172 * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is 173 * processed, so perf_evlist__sample_id_all is not meaningful here. 174 */ 175 if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps && 176 tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) { 177 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n"); 178 tool->ordered_events = false; 179 } 180 181 return session; 182 183 out_close: 184 perf_data__close(data); 185 out_delete: 186 perf_session__delete(session); 187 out: 188 return NULL; 189 } 190 191 static void perf_session__delete_threads(struct perf_session *session) 192 { 193 machine__delete_threads(&session->machines.host); 194 } 195 196 void perf_session__delete(struct perf_session *session) 197 { 198 if (session == NULL) 199 return; 200 auxtrace__free(session); 201 auxtrace_index__free(&session->auxtrace_index); 202 perf_session__destroy_kernel_maps(session); 203 perf_session__delete_threads(session); 204 perf_env__exit(&session->header.env); 205 machines__exit(&session->machines); 206 if (session->data) 207 perf_data__close(session->data); 208 free(session); 209 } 210 211 static int process_event_synth_tracing_data_stub(struct perf_session *session 212 __maybe_unused, 213 union perf_event *event 214 __maybe_unused) 215 { 216 dump_printf(": unhandled!\n"); 217 return 0; 218 } 219 220 static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused, 221 union perf_event *event __maybe_unused, 222 struct perf_evlist **pevlist 223 __maybe_unused) 224 { 225 dump_printf(": unhandled!\n"); 226 return 0; 227 } 228 229 static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused, 230 union perf_event *event __maybe_unused, 231 struct perf_evlist **pevlist 232 __maybe_unused) 233 { 234 if (dump_trace) 235 perf_event__fprintf_event_update(event, stdout); 236 237 dump_printf(": unhandled!\n"); 238 return 0; 239 } 240 241 static int process_event_sample_stub(struct perf_tool *tool __maybe_unused, 242 union perf_event *event __maybe_unused, 243 struct perf_sample *sample __maybe_unused, 244 struct perf_evsel *evsel __maybe_unused, 245 struct machine *machine __maybe_unused) 246 { 247 dump_printf(": unhandled!\n"); 248 return 0; 249 } 250 251 static int process_event_stub(struct perf_tool *tool __maybe_unused, 252 union perf_event *event __maybe_unused, 253 struct perf_sample *sample __maybe_unused, 254 struct machine *machine __maybe_unused) 255 { 256 dump_printf(": unhandled!\n"); 257 return 0; 258 } 259 260 static int process_finished_round_stub(struct perf_tool *tool __maybe_unused, 261 union perf_event *event __maybe_unused, 262 struct ordered_events *oe __maybe_unused) 263 { 264 dump_printf(": unhandled!\n"); 265 return 0; 266 } 267 268 static int process_finished_round(struct perf_tool *tool, 269 union perf_event *event, 270 struct ordered_events *oe); 271 272 static int skipn(int fd, off_t n) 273 { 274 char buf[4096]; 275 ssize_t ret; 276 277 while (n > 0) { 278 ret = read(fd, buf, min(n, (off_t)sizeof(buf))); 279 if (ret <= 0) 280 return ret; 281 n -= ret; 282 } 283 284 return 0; 285 } 286 287 static s64 process_event_auxtrace_stub(struct perf_session *session __maybe_unused, 288 union perf_event *event) 289 { 290 dump_printf(": unhandled!\n"); 291 if (perf_data__is_pipe(session->data)) 292 skipn(perf_data__fd(session->data), event->auxtrace.size); 293 return event->auxtrace.size; 294 } 295 296 static int process_event_op2_stub(struct perf_session *session __maybe_unused, 297 union perf_event *event __maybe_unused) 298 { 299 dump_printf(": unhandled!\n"); 300 return 0; 301 } 302 303 304 static 305 int process_event_thread_map_stub(struct perf_session *session __maybe_unused, 306 union perf_event *event __maybe_unused) 307 { 308 if (dump_trace) 309 perf_event__fprintf_thread_map(event, stdout); 310 311 dump_printf(": unhandled!\n"); 312 return 0; 313 } 314 315 static 316 int process_event_cpu_map_stub(struct perf_session *session __maybe_unused, 317 union perf_event *event __maybe_unused) 318 { 319 if (dump_trace) 320 perf_event__fprintf_cpu_map(event, stdout); 321 322 dump_printf(": unhandled!\n"); 323 return 0; 324 } 325 326 static 327 int process_event_stat_config_stub(struct perf_session *session __maybe_unused, 328 union perf_event *event __maybe_unused) 329 { 330 if (dump_trace) 331 perf_event__fprintf_stat_config(event, stdout); 332 333 dump_printf(": unhandled!\n"); 334 return 0; 335 } 336 337 static int process_stat_stub(struct perf_session *perf_session __maybe_unused, 338 union perf_event *event) 339 { 340 if (dump_trace) 341 perf_event__fprintf_stat(event, stdout); 342 343 dump_printf(": unhandled!\n"); 344 return 0; 345 } 346 347 static int process_stat_round_stub(struct perf_session *perf_session __maybe_unused, 348 union perf_event *event) 349 { 350 if (dump_trace) 351 perf_event__fprintf_stat_round(event, stdout); 352 353 dump_printf(": unhandled!\n"); 354 return 0; 355 } 356 357 void perf_tool__fill_defaults(struct perf_tool *tool) 358 { 359 if (tool->sample == NULL) 360 tool->sample = process_event_sample_stub; 361 if (tool->mmap == NULL) 362 tool->mmap = process_event_stub; 363 if (tool->mmap2 == NULL) 364 tool->mmap2 = process_event_stub; 365 if (tool->comm == NULL) 366 tool->comm = process_event_stub; 367 if (tool->namespaces == NULL) 368 tool->namespaces = process_event_stub; 369 if (tool->fork == NULL) 370 tool->fork = process_event_stub; 371 if (tool->exit == NULL) 372 tool->exit = process_event_stub; 373 if (tool->lost == NULL) 374 tool->lost = perf_event__process_lost; 375 if (tool->lost_samples == NULL) 376 tool->lost_samples = perf_event__process_lost_samples; 377 if (tool->aux == NULL) 378 tool->aux = perf_event__process_aux; 379 if (tool->itrace_start == NULL) 380 tool->itrace_start = perf_event__process_itrace_start; 381 if (tool->context_switch == NULL) 382 tool->context_switch = perf_event__process_switch; 383 if (tool->ksymbol == NULL) 384 tool->ksymbol = perf_event__process_ksymbol; 385 if (tool->bpf_event == NULL) 386 tool->bpf_event = perf_event__process_bpf_event; 387 if (tool->read == NULL) 388 tool->read = process_event_sample_stub; 389 if (tool->throttle == NULL) 390 tool->throttle = process_event_stub; 391 if (tool->unthrottle == NULL) 392 tool->unthrottle = process_event_stub; 393 if (tool->attr == NULL) 394 tool->attr = process_event_synth_attr_stub; 395 if (tool->event_update == NULL) 396 tool->event_update = process_event_synth_event_update_stub; 397 if (tool->tracing_data == NULL) 398 tool->tracing_data = process_event_synth_tracing_data_stub; 399 if (tool->build_id == NULL) 400 tool->build_id = process_event_op2_stub; 401 if (tool->finished_round == NULL) { 402 if (tool->ordered_events) 403 tool->finished_round = process_finished_round; 404 else 405 tool->finished_round = process_finished_round_stub; 406 } 407 if (tool->id_index == NULL) 408 tool->id_index = process_event_op2_stub; 409 if (tool->auxtrace_info == NULL) 410 tool->auxtrace_info = process_event_op2_stub; 411 if (tool->auxtrace == NULL) 412 tool->auxtrace = process_event_auxtrace_stub; 413 if (tool->auxtrace_error == NULL) 414 tool->auxtrace_error = process_event_op2_stub; 415 if (tool->thread_map == NULL) 416 tool->thread_map = process_event_thread_map_stub; 417 if (tool->cpu_map == NULL) 418 tool->cpu_map = process_event_cpu_map_stub; 419 if (tool->stat_config == NULL) 420 tool->stat_config = process_event_stat_config_stub; 421 if (tool->stat == NULL) 422 tool->stat = process_stat_stub; 423 if (tool->stat_round == NULL) 424 tool->stat_round = process_stat_round_stub; 425 if (tool->time_conv == NULL) 426 tool->time_conv = process_event_op2_stub; 427 if (tool->feature == NULL) 428 tool->feature = process_event_op2_stub; 429 } 430 431 static void swap_sample_id_all(union perf_event *event, void *data) 432 { 433 void *end = (void *) event + event->header.size; 434 int size = end - data; 435 436 BUG_ON(size % sizeof(u64)); 437 mem_bswap_64(data, size); 438 } 439 440 static void perf_event__all64_swap(union perf_event *event, 441 bool sample_id_all __maybe_unused) 442 { 443 struct perf_event_header *hdr = &event->header; 444 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr)); 445 } 446 447 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all) 448 { 449 event->comm.pid = bswap_32(event->comm.pid); 450 event->comm.tid = bswap_32(event->comm.tid); 451 452 if (sample_id_all) { 453 void *data = &event->comm.comm; 454 455 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); 456 swap_sample_id_all(event, data); 457 } 458 } 459 460 static void perf_event__mmap_swap(union perf_event *event, 461 bool sample_id_all) 462 { 463 event->mmap.pid = bswap_32(event->mmap.pid); 464 event->mmap.tid = bswap_32(event->mmap.tid); 465 event->mmap.start = bswap_64(event->mmap.start); 466 event->mmap.len = bswap_64(event->mmap.len); 467 event->mmap.pgoff = bswap_64(event->mmap.pgoff); 468 469 if (sample_id_all) { 470 void *data = &event->mmap.filename; 471 472 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); 473 swap_sample_id_all(event, data); 474 } 475 } 476 477 static void perf_event__mmap2_swap(union perf_event *event, 478 bool sample_id_all) 479 { 480 event->mmap2.pid = bswap_32(event->mmap2.pid); 481 event->mmap2.tid = bswap_32(event->mmap2.tid); 482 event->mmap2.start = bswap_64(event->mmap2.start); 483 event->mmap2.len = bswap_64(event->mmap2.len); 484 event->mmap2.pgoff = bswap_64(event->mmap2.pgoff); 485 event->mmap2.maj = bswap_32(event->mmap2.maj); 486 event->mmap2.min = bswap_32(event->mmap2.min); 487 event->mmap2.ino = bswap_64(event->mmap2.ino); 488 489 if (sample_id_all) { 490 void *data = &event->mmap2.filename; 491 492 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); 493 swap_sample_id_all(event, data); 494 } 495 } 496 static void perf_event__task_swap(union perf_event *event, bool sample_id_all) 497 { 498 event->fork.pid = bswap_32(event->fork.pid); 499 event->fork.tid = bswap_32(event->fork.tid); 500 event->fork.ppid = bswap_32(event->fork.ppid); 501 event->fork.ptid = bswap_32(event->fork.ptid); 502 event->fork.time = bswap_64(event->fork.time); 503 504 if (sample_id_all) 505 swap_sample_id_all(event, &event->fork + 1); 506 } 507 508 static void perf_event__read_swap(union perf_event *event, bool sample_id_all) 509 { 510 event->read.pid = bswap_32(event->read.pid); 511 event->read.tid = bswap_32(event->read.tid); 512 event->read.value = bswap_64(event->read.value); 513 event->read.time_enabled = bswap_64(event->read.time_enabled); 514 event->read.time_running = bswap_64(event->read.time_running); 515 event->read.id = bswap_64(event->read.id); 516 517 if (sample_id_all) 518 swap_sample_id_all(event, &event->read + 1); 519 } 520 521 static void perf_event__aux_swap(union perf_event *event, bool sample_id_all) 522 { 523 event->aux.aux_offset = bswap_64(event->aux.aux_offset); 524 event->aux.aux_size = bswap_64(event->aux.aux_size); 525 event->aux.flags = bswap_64(event->aux.flags); 526 527 if (sample_id_all) 528 swap_sample_id_all(event, &event->aux + 1); 529 } 530 531 static void perf_event__itrace_start_swap(union perf_event *event, 532 bool sample_id_all) 533 { 534 event->itrace_start.pid = bswap_32(event->itrace_start.pid); 535 event->itrace_start.tid = bswap_32(event->itrace_start.tid); 536 537 if (sample_id_all) 538 swap_sample_id_all(event, &event->itrace_start + 1); 539 } 540 541 static void perf_event__switch_swap(union perf_event *event, bool sample_id_all) 542 { 543 if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) { 544 event->context_switch.next_prev_pid = 545 bswap_32(event->context_switch.next_prev_pid); 546 event->context_switch.next_prev_tid = 547 bswap_32(event->context_switch.next_prev_tid); 548 } 549 550 if (sample_id_all) 551 swap_sample_id_all(event, &event->context_switch + 1); 552 } 553 554 static void perf_event__throttle_swap(union perf_event *event, 555 bool sample_id_all) 556 { 557 event->throttle.time = bswap_64(event->throttle.time); 558 event->throttle.id = bswap_64(event->throttle.id); 559 event->throttle.stream_id = bswap_64(event->throttle.stream_id); 560 561 if (sample_id_all) 562 swap_sample_id_all(event, &event->throttle + 1); 563 } 564 565 static u8 revbyte(u8 b) 566 { 567 int rev = (b >> 4) | ((b & 0xf) << 4); 568 rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2); 569 rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1); 570 return (u8) rev; 571 } 572 573 /* 574 * XXX this is hack in attempt to carry flags bitfield 575 * through endian village. ABI says: 576 * 577 * Bit-fields are allocated from right to left (least to most significant) 578 * on little-endian implementations and from left to right (most to least 579 * significant) on big-endian implementations. 580 * 581 * The above seems to be byte specific, so we need to reverse each 582 * byte of the bitfield. 'Internet' also says this might be implementation 583 * specific and we probably need proper fix and carry perf_event_attr 584 * bitfield flags in separate data file FEAT_ section. Thought this seems 585 * to work for now. 586 */ 587 static void swap_bitfield(u8 *p, unsigned len) 588 { 589 unsigned i; 590 591 for (i = 0; i < len; i++) { 592 *p = revbyte(*p); 593 p++; 594 } 595 } 596 597 /* exported for swapping attributes in file header */ 598 void perf_event__attr_swap(struct perf_event_attr *attr) 599 { 600 attr->type = bswap_32(attr->type); 601 attr->size = bswap_32(attr->size); 602 603 #define bswap_safe(f, n) \ 604 (attr->size > (offsetof(struct perf_event_attr, f) + \ 605 sizeof(attr->f) * (n))) 606 #define bswap_field(f, sz) \ 607 do { \ 608 if (bswap_safe(f, 0)) \ 609 attr->f = bswap_##sz(attr->f); \ 610 } while(0) 611 #define bswap_field_16(f) bswap_field(f, 16) 612 #define bswap_field_32(f) bswap_field(f, 32) 613 #define bswap_field_64(f) bswap_field(f, 64) 614 615 bswap_field_64(config); 616 bswap_field_64(sample_period); 617 bswap_field_64(sample_type); 618 bswap_field_64(read_format); 619 bswap_field_32(wakeup_events); 620 bswap_field_32(bp_type); 621 bswap_field_64(bp_addr); 622 bswap_field_64(bp_len); 623 bswap_field_64(branch_sample_type); 624 bswap_field_64(sample_regs_user); 625 bswap_field_32(sample_stack_user); 626 bswap_field_32(aux_watermark); 627 bswap_field_16(sample_max_stack); 628 629 /* 630 * After read_format are bitfields. Check read_format because 631 * we are unable to use offsetof on bitfield. 632 */ 633 if (bswap_safe(read_format, 1)) 634 swap_bitfield((u8 *) (&attr->read_format + 1), 635 sizeof(u64)); 636 #undef bswap_field_64 637 #undef bswap_field_32 638 #undef bswap_field 639 #undef bswap_safe 640 } 641 642 static void perf_event__hdr_attr_swap(union perf_event *event, 643 bool sample_id_all __maybe_unused) 644 { 645 size_t size; 646 647 perf_event__attr_swap(&event->attr.attr); 648 649 size = event->header.size; 650 size -= (void *)&event->attr.id - (void *)event; 651 mem_bswap_64(event->attr.id, size); 652 } 653 654 static void perf_event__event_update_swap(union perf_event *event, 655 bool sample_id_all __maybe_unused) 656 { 657 event->event_update.type = bswap_64(event->event_update.type); 658 event->event_update.id = bswap_64(event->event_update.id); 659 } 660 661 static void perf_event__event_type_swap(union perf_event *event, 662 bool sample_id_all __maybe_unused) 663 { 664 event->event_type.event_type.event_id = 665 bswap_64(event->event_type.event_type.event_id); 666 } 667 668 static void perf_event__tracing_data_swap(union perf_event *event, 669 bool sample_id_all __maybe_unused) 670 { 671 event->tracing_data.size = bswap_32(event->tracing_data.size); 672 } 673 674 static void perf_event__auxtrace_info_swap(union perf_event *event, 675 bool sample_id_all __maybe_unused) 676 { 677 size_t size; 678 679 event->auxtrace_info.type = bswap_32(event->auxtrace_info.type); 680 681 size = event->header.size; 682 size -= (void *)&event->auxtrace_info.priv - (void *)event; 683 mem_bswap_64(event->auxtrace_info.priv, size); 684 } 685 686 static void perf_event__auxtrace_swap(union perf_event *event, 687 bool sample_id_all __maybe_unused) 688 { 689 event->auxtrace.size = bswap_64(event->auxtrace.size); 690 event->auxtrace.offset = bswap_64(event->auxtrace.offset); 691 event->auxtrace.reference = bswap_64(event->auxtrace.reference); 692 event->auxtrace.idx = bswap_32(event->auxtrace.idx); 693 event->auxtrace.tid = bswap_32(event->auxtrace.tid); 694 event->auxtrace.cpu = bswap_32(event->auxtrace.cpu); 695 } 696 697 static void perf_event__auxtrace_error_swap(union perf_event *event, 698 bool sample_id_all __maybe_unused) 699 { 700 event->auxtrace_error.type = bswap_32(event->auxtrace_error.type); 701 event->auxtrace_error.code = bswap_32(event->auxtrace_error.code); 702 event->auxtrace_error.cpu = bswap_32(event->auxtrace_error.cpu); 703 event->auxtrace_error.pid = bswap_32(event->auxtrace_error.pid); 704 event->auxtrace_error.tid = bswap_32(event->auxtrace_error.tid); 705 event->auxtrace_error.ip = bswap_64(event->auxtrace_error.ip); 706 } 707 708 static void perf_event__thread_map_swap(union perf_event *event, 709 bool sample_id_all __maybe_unused) 710 { 711 unsigned i; 712 713 event->thread_map.nr = bswap_64(event->thread_map.nr); 714 715 for (i = 0; i < event->thread_map.nr; i++) 716 event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid); 717 } 718 719 static void perf_event__cpu_map_swap(union perf_event *event, 720 bool sample_id_all __maybe_unused) 721 { 722 struct cpu_map_data *data = &event->cpu_map.data; 723 struct cpu_map_entries *cpus; 724 struct cpu_map_mask *mask; 725 unsigned i; 726 727 data->type = bswap_64(data->type); 728 729 switch (data->type) { 730 case PERF_CPU_MAP__CPUS: 731 cpus = (struct cpu_map_entries *)data->data; 732 733 cpus->nr = bswap_16(cpus->nr); 734 735 for (i = 0; i < cpus->nr; i++) 736 cpus->cpu[i] = bswap_16(cpus->cpu[i]); 737 break; 738 case PERF_CPU_MAP__MASK: 739 mask = (struct cpu_map_mask *) data->data; 740 741 mask->nr = bswap_16(mask->nr); 742 mask->long_size = bswap_16(mask->long_size); 743 744 switch (mask->long_size) { 745 case 4: mem_bswap_32(&mask->mask, mask->nr); break; 746 case 8: mem_bswap_64(&mask->mask, mask->nr); break; 747 default: 748 pr_err("cpu_map swap: unsupported long size\n"); 749 } 750 default: 751 break; 752 } 753 } 754 755 static void perf_event__stat_config_swap(union perf_event *event, 756 bool sample_id_all __maybe_unused) 757 { 758 u64 size; 759 760 size = event->stat_config.nr * sizeof(event->stat_config.data[0]); 761 size += 1; /* nr item itself */ 762 mem_bswap_64(&event->stat_config.nr, size); 763 } 764 765 static void perf_event__stat_swap(union perf_event *event, 766 bool sample_id_all __maybe_unused) 767 { 768 event->stat.id = bswap_64(event->stat.id); 769 event->stat.thread = bswap_32(event->stat.thread); 770 event->stat.cpu = bswap_32(event->stat.cpu); 771 event->stat.val = bswap_64(event->stat.val); 772 event->stat.ena = bswap_64(event->stat.ena); 773 event->stat.run = bswap_64(event->stat.run); 774 } 775 776 static void perf_event__stat_round_swap(union perf_event *event, 777 bool sample_id_all __maybe_unused) 778 { 779 event->stat_round.type = bswap_64(event->stat_round.type); 780 event->stat_round.time = bswap_64(event->stat_round.time); 781 } 782 783 typedef void (*perf_event__swap_op)(union perf_event *event, 784 bool sample_id_all); 785 786 static perf_event__swap_op perf_event__swap_ops[] = { 787 [PERF_RECORD_MMAP] = perf_event__mmap_swap, 788 [PERF_RECORD_MMAP2] = perf_event__mmap2_swap, 789 [PERF_RECORD_COMM] = perf_event__comm_swap, 790 [PERF_RECORD_FORK] = perf_event__task_swap, 791 [PERF_RECORD_EXIT] = perf_event__task_swap, 792 [PERF_RECORD_LOST] = perf_event__all64_swap, 793 [PERF_RECORD_READ] = perf_event__read_swap, 794 [PERF_RECORD_THROTTLE] = perf_event__throttle_swap, 795 [PERF_RECORD_UNTHROTTLE] = perf_event__throttle_swap, 796 [PERF_RECORD_SAMPLE] = perf_event__all64_swap, 797 [PERF_RECORD_AUX] = perf_event__aux_swap, 798 [PERF_RECORD_ITRACE_START] = perf_event__itrace_start_swap, 799 [PERF_RECORD_LOST_SAMPLES] = perf_event__all64_swap, 800 [PERF_RECORD_SWITCH] = perf_event__switch_swap, 801 [PERF_RECORD_SWITCH_CPU_WIDE] = perf_event__switch_swap, 802 [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap, 803 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap, 804 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap, 805 [PERF_RECORD_HEADER_BUILD_ID] = NULL, 806 [PERF_RECORD_ID_INDEX] = perf_event__all64_swap, 807 [PERF_RECORD_AUXTRACE_INFO] = perf_event__auxtrace_info_swap, 808 [PERF_RECORD_AUXTRACE] = perf_event__auxtrace_swap, 809 [PERF_RECORD_AUXTRACE_ERROR] = perf_event__auxtrace_error_swap, 810 [PERF_RECORD_THREAD_MAP] = perf_event__thread_map_swap, 811 [PERF_RECORD_CPU_MAP] = perf_event__cpu_map_swap, 812 [PERF_RECORD_STAT_CONFIG] = perf_event__stat_config_swap, 813 [PERF_RECORD_STAT] = perf_event__stat_swap, 814 [PERF_RECORD_STAT_ROUND] = perf_event__stat_round_swap, 815 [PERF_RECORD_EVENT_UPDATE] = perf_event__event_update_swap, 816 [PERF_RECORD_TIME_CONV] = perf_event__all64_swap, 817 [PERF_RECORD_HEADER_MAX] = NULL, 818 }; 819 820 /* 821 * When perf record finishes a pass on every buffers, it records this pseudo 822 * event. 823 * We record the max timestamp t found in the pass n. 824 * Assuming these timestamps are monotonic across cpus, we know that if 825 * a buffer still has events with timestamps below t, they will be all 826 * available and then read in the pass n + 1. 827 * Hence when we start to read the pass n + 2, we can safely flush every 828 * events with timestamps below t. 829 * 830 * ============ PASS n ================= 831 * CPU 0 | CPU 1 832 * | 833 * cnt1 timestamps | cnt2 timestamps 834 * 1 | 2 835 * 2 | 3 836 * - | 4 <--- max recorded 837 * 838 * ============ PASS n + 1 ============== 839 * CPU 0 | CPU 1 840 * | 841 * cnt1 timestamps | cnt2 timestamps 842 * 3 | 5 843 * 4 | 6 844 * 5 | 7 <---- max recorded 845 * 846 * Flush every events below timestamp 4 847 * 848 * ============ PASS n + 2 ============== 849 * CPU 0 | CPU 1 850 * | 851 * cnt1 timestamps | cnt2 timestamps 852 * 6 | 8 853 * 7 | 9 854 * - | 10 855 * 856 * Flush every events below timestamp 7 857 * etc... 858 */ 859 static int process_finished_round(struct perf_tool *tool __maybe_unused, 860 union perf_event *event __maybe_unused, 861 struct ordered_events *oe) 862 { 863 if (dump_trace) 864 fprintf(stdout, "\n"); 865 return ordered_events__flush(oe, OE_FLUSH__ROUND); 866 } 867 868 int perf_session__queue_event(struct perf_session *s, union perf_event *event, 869 u64 timestamp, u64 file_offset) 870 { 871 return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset); 872 } 873 874 static void callchain__lbr_callstack_printf(struct perf_sample *sample) 875 { 876 struct ip_callchain *callchain = sample->callchain; 877 struct branch_stack *lbr_stack = sample->branch_stack; 878 u64 kernel_callchain_nr = callchain->nr; 879 unsigned int i; 880 881 for (i = 0; i < kernel_callchain_nr; i++) { 882 if (callchain->ips[i] == PERF_CONTEXT_USER) 883 break; 884 } 885 886 if ((i != kernel_callchain_nr) && lbr_stack->nr) { 887 u64 total_nr; 888 /* 889 * LBR callstack can only get user call chain, 890 * i is kernel call chain number, 891 * 1 is PERF_CONTEXT_USER. 892 * 893 * The user call chain is stored in LBR registers. 894 * LBR are pair registers. The caller is stored 895 * in "from" register, while the callee is stored 896 * in "to" register. 897 * For example, there is a call stack 898 * "A"->"B"->"C"->"D". 899 * The LBR registers will recorde like 900 * "C"->"D", "B"->"C", "A"->"B". 901 * So only the first "to" register and all "from" 902 * registers are needed to construct the whole stack. 903 */ 904 total_nr = i + 1 + lbr_stack->nr + 1; 905 kernel_callchain_nr = i + 1; 906 907 printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr); 908 909 for (i = 0; i < kernel_callchain_nr; i++) 910 printf("..... %2d: %016" PRIx64 "\n", 911 i, callchain->ips[i]); 912 913 printf("..... %2d: %016" PRIx64 "\n", 914 (int)(kernel_callchain_nr), lbr_stack->entries[0].to); 915 for (i = 0; i < lbr_stack->nr; i++) 916 printf("..... %2d: %016" PRIx64 "\n", 917 (int)(i + kernel_callchain_nr + 1), lbr_stack->entries[i].from); 918 } 919 } 920 921 static void callchain__printf(struct perf_evsel *evsel, 922 struct perf_sample *sample) 923 { 924 unsigned int i; 925 struct ip_callchain *callchain = sample->callchain; 926 927 if (perf_evsel__has_branch_callstack(evsel)) 928 callchain__lbr_callstack_printf(sample); 929 930 printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr); 931 932 for (i = 0; i < callchain->nr; i++) 933 printf("..... %2d: %016" PRIx64 "\n", 934 i, callchain->ips[i]); 935 } 936 937 static void branch_stack__printf(struct perf_sample *sample) 938 { 939 uint64_t i; 940 941 printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr); 942 943 for (i = 0; i < sample->branch_stack->nr; i++) { 944 struct branch_entry *e = &sample->branch_stack->entries[i]; 945 946 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x\n", 947 i, e->from, e->to, 948 (unsigned short)e->flags.cycles, 949 e->flags.mispred ? "M" : " ", 950 e->flags.predicted ? "P" : " ", 951 e->flags.abort ? "A" : " ", 952 e->flags.in_tx ? "T" : " ", 953 (unsigned)e->flags.reserved); 954 } 955 } 956 957 static void regs_dump__printf(u64 mask, u64 *regs) 958 { 959 unsigned rid, i = 0; 960 961 for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) { 962 u64 val = regs[i++]; 963 964 printf(".... %-5s 0x%" PRIx64 "\n", 965 perf_reg_name(rid), val); 966 } 967 } 968 969 static const char *regs_abi[] = { 970 [PERF_SAMPLE_REGS_ABI_NONE] = "none", 971 [PERF_SAMPLE_REGS_ABI_32] = "32-bit", 972 [PERF_SAMPLE_REGS_ABI_64] = "64-bit", 973 }; 974 975 static inline const char *regs_dump_abi(struct regs_dump *d) 976 { 977 if (d->abi > PERF_SAMPLE_REGS_ABI_64) 978 return "unknown"; 979 980 return regs_abi[d->abi]; 981 } 982 983 static void regs__printf(const char *type, struct regs_dump *regs) 984 { 985 u64 mask = regs->mask; 986 987 printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n", 988 type, 989 mask, 990 regs_dump_abi(regs)); 991 992 regs_dump__printf(mask, regs->regs); 993 } 994 995 static void regs_user__printf(struct perf_sample *sample) 996 { 997 struct regs_dump *user_regs = &sample->user_regs; 998 999 if (user_regs->regs) 1000 regs__printf("user", user_regs); 1001 } 1002 1003 static void regs_intr__printf(struct perf_sample *sample) 1004 { 1005 struct regs_dump *intr_regs = &sample->intr_regs; 1006 1007 if (intr_regs->regs) 1008 regs__printf("intr", intr_regs); 1009 } 1010 1011 static void stack_user__printf(struct stack_dump *dump) 1012 { 1013 printf("... ustack: size %" PRIu64 ", offset 0x%x\n", 1014 dump->size, dump->offset); 1015 } 1016 1017 static void perf_evlist__print_tstamp(struct perf_evlist *evlist, 1018 union perf_event *event, 1019 struct perf_sample *sample) 1020 { 1021 u64 sample_type = __perf_evlist__combined_sample_type(evlist); 1022 1023 if (event->header.type != PERF_RECORD_SAMPLE && 1024 !perf_evlist__sample_id_all(evlist)) { 1025 fputs("-1 -1 ", stdout); 1026 return; 1027 } 1028 1029 if ((sample_type & PERF_SAMPLE_CPU)) 1030 printf("%u ", sample->cpu); 1031 1032 if (sample_type & PERF_SAMPLE_TIME) 1033 printf("%" PRIu64 " ", sample->time); 1034 } 1035 1036 static void sample_read__printf(struct perf_sample *sample, u64 read_format) 1037 { 1038 printf("... sample_read:\n"); 1039 1040 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1041 printf("...... time enabled %016" PRIx64 "\n", 1042 sample->read.time_enabled); 1043 1044 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 1045 printf("...... time running %016" PRIx64 "\n", 1046 sample->read.time_running); 1047 1048 if (read_format & PERF_FORMAT_GROUP) { 1049 u64 i; 1050 1051 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr); 1052 1053 for (i = 0; i < sample->read.group.nr; i++) { 1054 struct sample_read_value *value; 1055 1056 value = &sample->read.group.values[i]; 1057 printf("..... id %016" PRIx64 1058 ", value %016" PRIx64 "\n", 1059 value->id, value->value); 1060 } 1061 } else 1062 printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n", 1063 sample->read.one.id, sample->read.one.value); 1064 } 1065 1066 static void dump_event(struct perf_evlist *evlist, union perf_event *event, 1067 u64 file_offset, struct perf_sample *sample) 1068 { 1069 if (!dump_trace) 1070 return; 1071 1072 printf("\n%#" PRIx64 " [%#x]: event: %d\n", 1073 file_offset, event->header.size, event->header.type); 1074 1075 trace_event(event); 1076 if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw) 1077 evlist->trace_event_sample_raw(evlist, event, sample); 1078 1079 if (sample) 1080 perf_evlist__print_tstamp(evlist, event, sample); 1081 1082 printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset, 1083 event->header.size, perf_event__name(event->header.type)); 1084 } 1085 1086 static void dump_sample(struct perf_evsel *evsel, union perf_event *event, 1087 struct perf_sample *sample) 1088 { 1089 u64 sample_type; 1090 1091 if (!dump_trace) 1092 return; 1093 1094 printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n", 1095 event->header.misc, sample->pid, sample->tid, sample->ip, 1096 sample->period, sample->addr); 1097 1098 sample_type = evsel->attr.sample_type; 1099 1100 if (evsel__has_callchain(evsel)) 1101 callchain__printf(evsel, sample); 1102 1103 if ((sample_type & PERF_SAMPLE_BRANCH_STACK) && !perf_evsel__has_branch_callstack(evsel)) 1104 branch_stack__printf(sample); 1105 1106 if (sample_type & PERF_SAMPLE_REGS_USER) 1107 regs_user__printf(sample); 1108 1109 if (sample_type & PERF_SAMPLE_REGS_INTR) 1110 regs_intr__printf(sample); 1111 1112 if (sample_type & PERF_SAMPLE_STACK_USER) 1113 stack_user__printf(&sample->user_stack); 1114 1115 if (sample_type & PERF_SAMPLE_WEIGHT) 1116 printf("... weight: %" PRIu64 "\n", sample->weight); 1117 1118 if (sample_type & PERF_SAMPLE_DATA_SRC) 1119 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src); 1120 1121 if (sample_type & PERF_SAMPLE_PHYS_ADDR) 1122 printf(" .. phys_addr: 0x%"PRIx64"\n", sample->phys_addr); 1123 1124 if (sample_type & PERF_SAMPLE_TRANSACTION) 1125 printf("... transaction: %" PRIx64 "\n", sample->transaction); 1126 1127 if (sample_type & PERF_SAMPLE_READ) 1128 sample_read__printf(sample, evsel->attr.read_format); 1129 } 1130 1131 static void dump_read(struct perf_evsel *evsel, union perf_event *event) 1132 { 1133 struct read_event *read_event = &event->read; 1134 u64 read_format; 1135 1136 if (!dump_trace) 1137 return; 1138 1139 printf(": %d %d %s %" PRIu64 "\n", event->read.pid, event->read.tid, 1140 evsel ? perf_evsel__name(evsel) : "FAIL", 1141 event->read.value); 1142 1143 read_format = evsel->attr.read_format; 1144 1145 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1146 printf("... time enabled : %" PRIu64 "\n", read_event->time_enabled); 1147 1148 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 1149 printf("... time running : %" PRIu64 "\n", read_event->time_running); 1150 1151 if (read_format & PERF_FORMAT_ID) 1152 printf("... id : %" PRIu64 "\n", read_event->id); 1153 } 1154 1155 static struct machine *machines__find_for_cpumode(struct machines *machines, 1156 union perf_event *event, 1157 struct perf_sample *sample) 1158 { 1159 struct machine *machine; 1160 1161 if (perf_guest && 1162 ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) || 1163 (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) { 1164 u32 pid; 1165 1166 if (event->header.type == PERF_RECORD_MMAP 1167 || event->header.type == PERF_RECORD_MMAP2) 1168 pid = event->mmap.pid; 1169 else 1170 pid = sample->pid; 1171 1172 machine = machines__find(machines, pid); 1173 if (!machine) 1174 machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID); 1175 return machine; 1176 } 1177 1178 return &machines->host; 1179 } 1180 1181 static int deliver_sample_value(struct perf_evlist *evlist, 1182 struct perf_tool *tool, 1183 union perf_event *event, 1184 struct perf_sample *sample, 1185 struct sample_read_value *v, 1186 struct machine *machine) 1187 { 1188 struct perf_sample_id *sid = perf_evlist__id2sid(evlist, v->id); 1189 1190 if (sid) { 1191 sample->id = v->id; 1192 sample->period = v->value - sid->period; 1193 sid->period = v->value; 1194 } 1195 1196 if (!sid || sid->evsel == NULL) { 1197 ++evlist->stats.nr_unknown_id; 1198 return 0; 1199 } 1200 1201 return tool->sample(tool, event, sample, sid->evsel, machine); 1202 } 1203 1204 static int deliver_sample_group(struct perf_evlist *evlist, 1205 struct perf_tool *tool, 1206 union perf_event *event, 1207 struct perf_sample *sample, 1208 struct machine *machine) 1209 { 1210 int ret = -EINVAL; 1211 u64 i; 1212 1213 for (i = 0; i < sample->read.group.nr; i++) { 1214 ret = deliver_sample_value(evlist, tool, event, sample, 1215 &sample->read.group.values[i], 1216 machine); 1217 if (ret) 1218 break; 1219 } 1220 1221 return ret; 1222 } 1223 1224 static int 1225 perf_evlist__deliver_sample(struct perf_evlist *evlist, 1226 struct perf_tool *tool, 1227 union perf_event *event, 1228 struct perf_sample *sample, 1229 struct perf_evsel *evsel, 1230 struct machine *machine) 1231 { 1232 /* We know evsel != NULL. */ 1233 u64 sample_type = evsel->attr.sample_type; 1234 u64 read_format = evsel->attr.read_format; 1235 1236 /* Standard sample delivery. */ 1237 if (!(sample_type & PERF_SAMPLE_READ)) 1238 return tool->sample(tool, event, sample, evsel, machine); 1239 1240 /* For PERF_SAMPLE_READ we have either single or group mode. */ 1241 if (read_format & PERF_FORMAT_GROUP) 1242 return deliver_sample_group(evlist, tool, event, sample, 1243 machine); 1244 else 1245 return deliver_sample_value(evlist, tool, event, sample, 1246 &sample->read.one, machine); 1247 } 1248 1249 static int machines__deliver_event(struct machines *machines, 1250 struct perf_evlist *evlist, 1251 union perf_event *event, 1252 struct perf_sample *sample, 1253 struct perf_tool *tool, u64 file_offset) 1254 { 1255 struct perf_evsel *evsel; 1256 struct machine *machine; 1257 1258 dump_event(evlist, event, file_offset, sample); 1259 1260 evsel = perf_evlist__id2evsel(evlist, sample->id); 1261 1262 machine = machines__find_for_cpumode(machines, event, sample); 1263 1264 switch (event->header.type) { 1265 case PERF_RECORD_SAMPLE: 1266 if (evsel == NULL) { 1267 ++evlist->stats.nr_unknown_id; 1268 return 0; 1269 } 1270 dump_sample(evsel, event, sample); 1271 if (machine == NULL) { 1272 ++evlist->stats.nr_unprocessable_samples; 1273 return 0; 1274 } 1275 return perf_evlist__deliver_sample(evlist, tool, event, sample, evsel, machine); 1276 case PERF_RECORD_MMAP: 1277 return tool->mmap(tool, event, sample, machine); 1278 case PERF_RECORD_MMAP2: 1279 if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT) 1280 ++evlist->stats.nr_proc_map_timeout; 1281 return tool->mmap2(tool, event, sample, machine); 1282 case PERF_RECORD_COMM: 1283 return tool->comm(tool, event, sample, machine); 1284 case PERF_RECORD_NAMESPACES: 1285 return tool->namespaces(tool, event, sample, machine); 1286 case PERF_RECORD_FORK: 1287 return tool->fork(tool, event, sample, machine); 1288 case PERF_RECORD_EXIT: 1289 return tool->exit(tool, event, sample, machine); 1290 case PERF_RECORD_LOST: 1291 if (tool->lost == perf_event__process_lost) 1292 evlist->stats.total_lost += event->lost.lost; 1293 return tool->lost(tool, event, sample, machine); 1294 case PERF_RECORD_LOST_SAMPLES: 1295 if (tool->lost_samples == perf_event__process_lost_samples) 1296 evlist->stats.total_lost_samples += event->lost_samples.lost; 1297 return tool->lost_samples(tool, event, sample, machine); 1298 case PERF_RECORD_READ: 1299 dump_read(evsel, event); 1300 return tool->read(tool, event, sample, evsel, machine); 1301 case PERF_RECORD_THROTTLE: 1302 return tool->throttle(tool, event, sample, machine); 1303 case PERF_RECORD_UNTHROTTLE: 1304 return tool->unthrottle(tool, event, sample, machine); 1305 case PERF_RECORD_AUX: 1306 if (tool->aux == perf_event__process_aux) { 1307 if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) 1308 evlist->stats.total_aux_lost += 1; 1309 if (event->aux.flags & PERF_AUX_FLAG_PARTIAL) 1310 evlist->stats.total_aux_partial += 1; 1311 } 1312 return tool->aux(tool, event, sample, machine); 1313 case PERF_RECORD_ITRACE_START: 1314 return tool->itrace_start(tool, event, sample, machine); 1315 case PERF_RECORD_SWITCH: 1316 case PERF_RECORD_SWITCH_CPU_WIDE: 1317 return tool->context_switch(tool, event, sample, machine); 1318 case PERF_RECORD_KSYMBOL: 1319 return tool->ksymbol(tool, event, sample, machine); 1320 case PERF_RECORD_BPF_EVENT: 1321 return tool->bpf_event(tool, event, sample, machine); 1322 default: 1323 ++evlist->stats.nr_unknown_events; 1324 return -1; 1325 } 1326 } 1327 1328 static int perf_session__deliver_event(struct perf_session *session, 1329 union perf_event *event, 1330 struct perf_tool *tool, 1331 u64 file_offset) 1332 { 1333 struct perf_sample sample; 1334 int ret; 1335 1336 ret = perf_evlist__parse_sample(session->evlist, event, &sample); 1337 if (ret) { 1338 pr_err("Can't parse sample, err = %d\n", ret); 1339 return ret; 1340 } 1341 1342 ret = auxtrace__process_event(session, event, &sample, tool); 1343 if (ret < 0) 1344 return ret; 1345 if (ret > 0) 1346 return 0; 1347 1348 return machines__deliver_event(&session->machines, session->evlist, 1349 event, &sample, tool, file_offset); 1350 } 1351 1352 static s64 perf_session__process_user_event(struct perf_session *session, 1353 union perf_event *event, 1354 u64 file_offset) 1355 { 1356 struct ordered_events *oe = &session->ordered_events; 1357 struct perf_tool *tool = session->tool; 1358 struct perf_sample sample = { .time = 0, }; 1359 int fd = perf_data__fd(session->data); 1360 int err; 1361 1362 dump_event(session->evlist, event, file_offset, &sample); 1363 1364 /* These events are processed right away */ 1365 switch (event->header.type) { 1366 case PERF_RECORD_HEADER_ATTR: 1367 err = tool->attr(tool, event, &session->evlist); 1368 if (err == 0) { 1369 perf_session__set_id_hdr_size(session); 1370 perf_session__set_comm_exec(session); 1371 } 1372 return err; 1373 case PERF_RECORD_EVENT_UPDATE: 1374 return tool->event_update(tool, event, &session->evlist); 1375 case PERF_RECORD_HEADER_EVENT_TYPE: 1376 /* 1377 * Depreceated, but we need to handle it for sake 1378 * of old data files create in pipe mode. 1379 */ 1380 return 0; 1381 case PERF_RECORD_HEADER_TRACING_DATA: 1382 /* setup for reading amidst mmap */ 1383 lseek(fd, file_offset, SEEK_SET); 1384 return tool->tracing_data(session, event); 1385 case PERF_RECORD_HEADER_BUILD_ID: 1386 return tool->build_id(session, event); 1387 case PERF_RECORD_FINISHED_ROUND: 1388 return tool->finished_round(tool, event, oe); 1389 case PERF_RECORD_ID_INDEX: 1390 return tool->id_index(session, event); 1391 case PERF_RECORD_AUXTRACE_INFO: 1392 return tool->auxtrace_info(session, event); 1393 case PERF_RECORD_AUXTRACE: 1394 /* setup for reading amidst mmap */ 1395 lseek(fd, file_offset + event->header.size, SEEK_SET); 1396 return tool->auxtrace(session, event); 1397 case PERF_RECORD_AUXTRACE_ERROR: 1398 perf_session__auxtrace_error_inc(session, event); 1399 return tool->auxtrace_error(session, event); 1400 case PERF_RECORD_THREAD_MAP: 1401 return tool->thread_map(session, event); 1402 case PERF_RECORD_CPU_MAP: 1403 return tool->cpu_map(session, event); 1404 case PERF_RECORD_STAT_CONFIG: 1405 return tool->stat_config(session, event); 1406 case PERF_RECORD_STAT: 1407 return tool->stat(session, event); 1408 case PERF_RECORD_STAT_ROUND: 1409 return tool->stat_round(session, event); 1410 case PERF_RECORD_TIME_CONV: 1411 session->time_conv = event->time_conv; 1412 return tool->time_conv(session, event); 1413 case PERF_RECORD_HEADER_FEATURE: 1414 return tool->feature(session, event); 1415 default: 1416 return -EINVAL; 1417 } 1418 } 1419 1420 int perf_session__deliver_synth_event(struct perf_session *session, 1421 union perf_event *event, 1422 struct perf_sample *sample) 1423 { 1424 struct perf_evlist *evlist = session->evlist; 1425 struct perf_tool *tool = session->tool; 1426 1427 events_stats__inc(&evlist->stats, event->header.type); 1428 1429 if (event->header.type >= PERF_RECORD_USER_TYPE_START) 1430 return perf_session__process_user_event(session, event, 0); 1431 1432 return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0); 1433 } 1434 1435 static void event_swap(union perf_event *event, bool sample_id_all) 1436 { 1437 perf_event__swap_op swap; 1438 1439 swap = perf_event__swap_ops[event->header.type]; 1440 if (swap) 1441 swap(event, sample_id_all); 1442 } 1443 1444 int perf_session__peek_event(struct perf_session *session, off_t file_offset, 1445 void *buf, size_t buf_sz, 1446 union perf_event **event_ptr, 1447 struct perf_sample *sample) 1448 { 1449 union perf_event *event; 1450 size_t hdr_sz, rest; 1451 int fd; 1452 1453 if (session->one_mmap && !session->header.needs_swap) { 1454 event = file_offset - session->one_mmap_offset + 1455 session->one_mmap_addr; 1456 goto out_parse_sample; 1457 } 1458 1459 if (perf_data__is_pipe(session->data)) 1460 return -1; 1461 1462 fd = perf_data__fd(session->data); 1463 hdr_sz = sizeof(struct perf_event_header); 1464 1465 if (buf_sz < hdr_sz) 1466 return -1; 1467 1468 if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 || 1469 readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz) 1470 return -1; 1471 1472 event = (union perf_event *)buf; 1473 1474 if (session->header.needs_swap) 1475 perf_event_header__bswap(&event->header); 1476 1477 if (event->header.size < hdr_sz || event->header.size > buf_sz) 1478 return -1; 1479 1480 rest = event->header.size - hdr_sz; 1481 1482 if (readn(fd, buf, rest) != (ssize_t)rest) 1483 return -1; 1484 1485 if (session->header.needs_swap) 1486 event_swap(event, perf_evlist__sample_id_all(session->evlist)); 1487 1488 out_parse_sample: 1489 1490 if (sample && event->header.type < PERF_RECORD_USER_TYPE_START && 1491 perf_evlist__parse_sample(session->evlist, event, sample)) 1492 return -1; 1493 1494 *event_ptr = event; 1495 1496 return 0; 1497 } 1498 1499 static s64 perf_session__process_event(struct perf_session *session, 1500 union perf_event *event, u64 file_offset) 1501 { 1502 struct perf_evlist *evlist = session->evlist; 1503 struct perf_tool *tool = session->tool; 1504 int ret; 1505 1506 if (session->header.needs_swap) 1507 event_swap(event, perf_evlist__sample_id_all(evlist)); 1508 1509 if (event->header.type >= PERF_RECORD_HEADER_MAX) 1510 return -EINVAL; 1511 1512 events_stats__inc(&evlist->stats, event->header.type); 1513 1514 if (event->header.type >= PERF_RECORD_USER_TYPE_START) 1515 return perf_session__process_user_event(session, event, file_offset); 1516 1517 if (tool->ordered_events) { 1518 u64 timestamp = -1ULL; 1519 1520 ret = perf_evlist__parse_sample_timestamp(evlist, event, ×tamp); 1521 if (ret && ret != -1) 1522 return ret; 1523 1524 ret = perf_session__queue_event(session, event, timestamp, file_offset); 1525 if (ret != -ETIME) 1526 return ret; 1527 } 1528 1529 return perf_session__deliver_event(session, event, tool, file_offset); 1530 } 1531 1532 void perf_event_header__bswap(struct perf_event_header *hdr) 1533 { 1534 hdr->type = bswap_32(hdr->type); 1535 hdr->misc = bswap_16(hdr->misc); 1536 hdr->size = bswap_16(hdr->size); 1537 } 1538 1539 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid) 1540 { 1541 return machine__findnew_thread(&session->machines.host, -1, pid); 1542 } 1543 1544 /* 1545 * Threads are identified by pid and tid, and the idle task has pid == tid == 0. 1546 * So here a single thread is created for that, but actually there is a separate 1547 * idle task per cpu, so there should be one 'struct thread' per cpu, but there 1548 * is only 1. That causes problems for some tools, requiring workarounds. For 1549 * example get_idle_thread() in builtin-sched.c, or thread_stack__per_cpu(). 1550 */ 1551 int perf_session__register_idle_thread(struct perf_session *session) 1552 { 1553 struct thread *thread; 1554 int err = 0; 1555 1556 thread = machine__findnew_thread(&session->machines.host, 0, 0); 1557 if (thread == NULL || thread__set_comm(thread, "swapper", 0)) { 1558 pr_err("problem inserting idle task.\n"); 1559 err = -1; 1560 } 1561 1562 if (thread == NULL || thread__set_namespaces(thread, 0, NULL)) { 1563 pr_err("problem inserting idle task.\n"); 1564 err = -1; 1565 } 1566 1567 /* machine__findnew_thread() got the thread, so put it */ 1568 thread__put(thread); 1569 return err; 1570 } 1571 1572 static void 1573 perf_session__warn_order(const struct perf_session *session) 1574 { 1575 const struct ordered_events *oe = &session->ordered_events; 1576 struct perf_evsel *evsel; 1577 bool should_warn = true; 1578 1579 evlist__for_each_entry(session->evlist, evsel) { 1580 if (evsel->attr.write_backward) 1581 should_warn = false; 1582 } 1583 1584 if (!should_warn) 1585 return; 1586 if (oe->nr_unordered_events != 0) 1587 ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events); 1588 } 1589 1590 static void perf_session__warn_about_errors(const struct perf_session *session) 1591 { 1592 const struct events_stats *stats = &session->evlist->stats; 1593 1594 if (session->tool->lost == perf_event__process_lost && 1595 stats->nr_events[PERF_RECORD_LOST] != 0) { 1596 ui__warning("Processed %d events and lost %d chunks!\n\n" 1597 "Check IO/CPU overload!\n\n", 1598 stats->nr_events[0], 1599 stats->nr_events[PERF_RECORD_LOST]); 1600 } 1601 1602 if (session->tool->lost_samples == perf_event__process_lost_samples) { 1603 double drop_rate; 1604 1605 drop_rate = (double)stats->total_lost_samples / 1606 (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples); 1607 if (drop_rate > 0.05) { 1608 ui__warning("Processed %" PRIu64 " samples and lost %3.2f%%!\n\n", 1609 stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples, 1610 drop_rate * 100.0); 1611 } 1612 } 1613 1614 if (session->tool->aux == perf_event__process_aux && 1615 stats->total_aux_lost != 0) { 1616 ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n", 1617 stats->total_aux_lost, 1618 stats->nr_events[PERF_RECORD_AUX]); 1619 } 1620 1621 if (session->tool->aux == perf_event__process_aux && 1622 stats->total_aux_partial != 0) { 1623 bool vmm_exclusive = false; 1624 1625 (void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive", 1626 &vmm_exclusive); 1627 1628 ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n" 1629 "Are you running a KVM guest in the background?%s\n\n", 1630 stats->total_aux_partial, 1631 stats->nr_events[PERF_RECORD_AUX], 1632 vmm_exclusive ? 1633 "\nReloading kvm_intel module with vmm_exclusive=0\n" 1634 "will reduce the gaps to only guest's timeslices." : 1635 ""); 1636 } 1637 1638 if (stats->nr_unknown_events != 0) { 1639 ui__warning("Found %u unknown events!\n\n" 1640 "Is this an older tool processing a perf.data " 1641 "file generated by a more recent tool?\n\n" 1642 "If that is not the case, consider " 1643 "reporting to linux-kernel@vger.kernel.org.\n\n", 1644 stats->nr_unknown_events); 1645 } 1646 1647 if (stats->nr_unknown_id != 0) { 1648 ui__warning("%u samples with id not present in the header\n", 1649 stats->nr_unknown_id); 1650 } 1651 1652 if (stats->nr_invalid_chains != 0) { 1653 ui__warning("Found invalid callchains!\n\n" 1654 "%u out of %u events were discarded for this reason.\n\n" 1655 "Consider reporting to linux-kernel@vger.kernel.org.\n\n", 1656 stats->nr_invalid_chains, 1657 stats->nr_events[PERF_RECORD_SAMPLE]); 1658 } 1659 1660 if (stats->nr_unprocessable_samples != 0) { 1661 ui__warning("%u unprocessable samples recorded.\n" 1662 "Do you have a KVM guest running and not using 'perf kvm'?\n", 1663 stats->nr_unprocessable_samples); 1664 } 1665 1666 perf_session__warn_order(session); 1667 1668 events_stats__auxtrace_error_warn(stats); 1669 1670 if (stats->nr_proc_map_timeout != 0) { 1671 ui__warning("%d map information files for pre-existing threads were\n" 1672 "not processed, if there are samples for addresses they\n" 1673 "will not be resolved, you may find out which are these\n" 1674 "threads by running with -v and redirecting the output\n" 1675 "to a file.\n" 1676 "The time limit to process proc map is too short?\n" 1677 "Increase it by --proc-map-timeout\n", 1678 stats->nr_proc_map_timeout); 1679 } 1680 } 1681 1682 static int perf_session__flush_thread_stack(struct thread *thread, 1683 void *p __maybe_unused) 1684 { 1685 return thread_stack__flush(thread); 1686 } 1687 1688 static int perf_session__flush_thread_stacks(struct perf_session *session) 1689 { 1690 return machines__for_each_thread(&session->machines, 1691 perf_session__flush_thread_stack, 1692 NULL); 1693 } 1694 1695 volatile int session_done; 1696 1697 static int __perf_session__process_pipe_events(struct perf_session *session) 1698 { 1699 struct ordered_events *oe = &session->ordered_events; 1700 struct perf_tool *tool = session->tool; 1701 int fd = perf_data__fd(session->data); 1702 union perf_event *event; 1703 uint32_t size, cur_size = 0; 1704 void *buf = NULL; 1705 s64 skip = 0; 1706 u64 head; 1707 ssize_t err; 1708 void *p; 1709 1710 perf_tool__fill_defaults(tool); 1711 1712 head = 0; 1713 cur_size = sizeof(union perf_event); 1714 1715 buf = malloc(cur_size); 1716 if (!buf) 1717 return -errno; 1718 ordered_events__set_copy_on_queue(oe, true); 1719 more: 1720 event = buf; 1721 err = readn(fd, event, sizeof(struct perf_event_header)); 1722 if (err <= 0) { 1723 if (err == 0) 1724 goto done; 1725 1726 pr_err("failed to read event header\n"); 1727 goto out_err; 1728 } 1729 1730 if (session->header.needs_swap) 1731 perf_event_header__bswap(&event->header); 1732 1733 size = event->header.size; 1734 if (size < sizeof(struct perf_event_header)) { 1735 pr_err("bad event header size\n"); 1736 goto out_err; 1737 } 1738 1739 if (size > cur_size) { 1740 void *new = realloc(buf, size); 1741 if (!new) { 1742 pr_err("failed to allocate memory to read event\n"); 1743 goto out_err; 1744 } 1745 buf = new; 1746 cur_size = size; 1747 event = buf; 1748 } 1749 p = event; 1750 p += sizeof(struct perf_event_header); 1751 1752 if (size - sizeof(struct perf_event_header)) { 1753 err = readn(fd, p, size - sizeof(struct perf_event_header)); 1754 if (err <= 0) { 1755 if (err == 0) { 1756 pr_err("unexpected end of event stream\n"); 1757 goto done; 1758 } 1759 1760 pr_err("failed to read event data\n"); 1761 goto out_err; 1762 } 1763 } 1764 1765 if ((skip = perf_session__process_event(session, event, head)) < 0) { 1766 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n", 1767 head, event->header.size, event->header.type); 1768 err = -EINVAL; 1769 goto out_err; 1770 } 1771 1772 head += size; 1773 1774 if (skip > 0) 1775 head += skip; 1776 1777 if (!session_done()) 1778 goto more; 1779 done: 1780 /* do the final flush for ordered samples */ 1781 err = ordered_events__flush(oe, OE_FLUSH__FINAL); 1782 if (err) 1783 goto out_err; 1784 err = auxtrace__flush_events(session, tool); 1785 if (err) 1786 goto out_err; 1787 err = perf_session__flush_thread_stacks(session); 1788 out_err: 1789 free(buf); 1790 if (!tool->no_warn) 1791 perf_session__warn_about_errors(session); 1792 ordered_events__free(&session->ordered_events); 1793 auxtrace__free_events(session); 1794 return err; 1795 } 1796 1797 static union perf_event * 1798 fetch_mmaped_event(struct perf_session *session, 1799 u64 head, size_t mmap_size, char *buf) 1800 { 1801 union perf_event *event; 1802 1803 /* 1804 * Ensure we have enough space remaining to read 1805 * the size of the event in the headers. 1806 */ 1807 if (head + sizeof(event->header) > mmap_size) 1808 return NULL; 1809 1810 event = (union perf_event *)(buf + head); 1811 1812 if (session->header.needs_swap) 1813 perf_event_header__bswap(&event->header); 1814 1815 if (head + event->header.size > mmap_size) { 1816 /* We're not fetching the event so swap back again */ 1817 if (session->header.needs_swap) 1818 perf_event_header__bswap(&event->header); 1819 return NULL; 1820 } 1821 1822 return event; 1823 } 1824 1825 /* 1826 * On 64bit we can mmap the data file in one go. No need for tiny mmap 1827 * slices. On 32bit we use 32MB. 1828 */ 1829 #if BITS_PER_LONG == 64 1830 #define MMAP_SIZE ULLONG_MAX 1831 #define NUM_MMAPS 1 1832 #else 1833 #define MMAP_SIZE (32 * 1024 * 1024ULL) 1834 #define NUM_MMAPS 128 1835 #endif 1836 1837 struct reader { 1838 int fd; 1839 u64 data_size; 1840 u64 data_offset; 1841 }; 1842 1843 static int 1844 reader__process_events(struct reader *rd, struct perf_session *session, 1845 struct ui_progress *prog) 1846 { 1847 u64 data_size = rd->data_size; 1848 u64 head, page_offset, file_offset, file_pos, size; 1849 int err = 0, mmap_prot, mmap_flags, map_idx = 0; 1850 size_t mmap_size; 1851 char *buf, *mmaps[NUM_MMAPS]; 1852 union perf_event *event; 1853 s64 skip; 1854 1855 page_offset = page_size * (rd->data_offset / page_size); 1856 file_offset = page_offset; 1857 head = rd->data_offset - page_offset; 1858 1859 ui_progress__init_size(prog, data_size, "Processing events..."); 1860 1861 data_size += rd->data_offset; 1862 1863 mmap_size = MMAP_SIZE; 1864 if (mmap_size > data_size) { 1865 mmap_size = data_size; 1866 session->one_mmap = true; 1867 } 1868 1869 memset(mmaps, 0, sizeof(mmaps)); 1870 1871 mmap_prot = PROT_READ; 1872 mmap_flags = MAP_SHARED; 1873 1874 if (session->header.needs_swap) { 1875 mmap_prot |= PROT_WRITE; 1876 mmap_flags = MAP_PRIVATE; 1877 } 1878 remap: 1879 buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, rd->fd, 1880 file_offset); 1881 if (buf == MAP_FAILED) { 1882 pr_err("failed to mmap file\n"); 1883 err = -errno; 1884 goto out; 1885 } 1886 mmaps[map_idx] = buf; 1887 map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1); 1888 file_pos = file_offset + head; 1889 if (session->one_mmap) { 1890 session->one_mmap_addr = buf; 1891 session->one_mmap_offset = file_offset; 1892 } 1893 1894 more: 1895 event = fetch_mmaped_event(session, head, mmap_size, buf); 1896 if (!event) { 1897 if (mmaps[map_idx]) { 1898 munmap(mmaps[map_idx], mmap_size); 1899 mmaps[map_idx] = NULL; 1900 } 1901 1902 page_offset = page_size * (head / page_size); 1903 file_offset += page_offset; 1904 head -= page_offset; 1905 goto remap; 1906 } 1907 1908 size = event->header.size; 1909 1910 if (size < sizeof(struct perf_event_header) || 1911 (skip = perf_session__process_event(session, event, file_pos)) < 0) { 1912 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n", 1913 file_offset + head, event->header.size, 1914 event->header.type); 1915 err = -EINVAL; 1916 goto out; 1917 } 1918 1919 if (skip) 1920 size += skip; 1921 1922 head += size; 1923 file_pos += size; 1924 1925 ui_progress__update(prog, size); 1926 1927 if (session_done()) 1928 goto out; 1929 1930 if (file_pos < data_size) 1931 goto more; 1932 1933 out: 1934 return err; 1935 } 1936 1937 static int __perf_session__process_events(struct perf_session *session) 1938 { 1939 struct reader rd = { 1940 .fd = perf_data__fd(session->data), 1941 .data_size = session->header.data_size, 1942 .data_offset = session->header.data_offset, 1943 }; 1944 struct ordered_events *oe = &session->ordered_events; 1945 struct perf_tool *tool = session->tool; 1946 struct ui_progress prog; 1947 int err; 1948 1949 perf_tool__fill_defaults(tool); 1950 1951 if (rd.data_size == 0) 1952 return -1; 1953 1954 ui_progress__init_size(&prog, rd.data_size, "Processing events..."); 1955 1956 err = reader__process_events(&rd, session, &prog); 1957 if (err) 1958 goto out_err; 1959 /* do the final flush for ordered samples */ 1960 err = ordered_events__flush(oe, OE_FLUSH__FINAL); 1961 if (err) 1962 goto out_err; 1963 err = auxtrace__flush_events(session, tool); 1964 if (err) 1965 goto out_err; 1966 err = perf_session__flush_thread_stacks(session); 1967 out_err: 1968 ui_progress__finish(); 1969 if (!tool->no_warn) 1970 perf_session__warn_about_errors(session); 1971 /* 1972 * We may switching perf.data output, make ordered_events 1973 * reusable. 1974 */ 1975 ordered_events__reinit(&session->ordered_events); 1976 auxtrace__free_events(session); 1977 session->one_mmap = false; 1978 return err; 1979 } 1980 1981 int perf_session__process_events(struct perf_session *session) 1982 { 1983 if (perf_session__register_idle_thread(session) < 0) 1984 return -ENOMEM; 1985 1986 if (perf_data__is_pipe(session->data)) 1987 return __perf_session__process_pipe_events(session); 1988 1989 return __perf_session__process_events(session); 1990 } 1991 1992 bool perf_session__has_traces(struct perf_session *session, const char *msg) 1993 { 1994 struct perf_evsel *evsel; 1995 1996 evlist__for_each_entry(session->evlist, evsel) { 1997 if (evsel->attr.type == PERF_TYPE_TRACEPOINT) 1998 return true; 1999 } 2000 2001 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg); 2002 return false; 2003 } 2004 2005 int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr) 2006 { 2007 char *bracket; 2008 struct ref_reloc_sym *ref; 2009 struct kmap *kmap; 2010 2011 ref = zalloc(sizeof(struct ref_reloc_sym)); 2012 if (ref == NULL) 2013 return -ENOMEM; 2014 2015 ref->name = strdup(symbol_name); 2016 if (ref->name == NULL) { 2017 free(ref); 2018 return -ENOMEM; 2019 } 2020 2021 bracket = strchr(ref->name, ']'); 2022 if (bracket) 2023 *bracket = '\0'; 2024 2025 ref->addr = addr; 2026 2027 kmap = map__kmap(map); 2028 if (kmap) 2029 kmap->ref_reloc_sym = ref; 2030 2031 return 0; 2032 } 2033 2034 size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp) 2035 { 2036 return machines__fprintf_dsos(&session->machines, fp); 2037 } 2038 2039 size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp, 2040 bool (skip)(struct dso *dso, int parm), int parm) 2041 { 2042 return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm); 2043 } 2044 2045 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp) 2046 { 2047 size_t ret; 2048 const char *msg = ""; 2049 2050 if (perf_header__has_feat(&session->header, HEADER_AUXTRACE)) 2051 msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)"; 2052 2053 ret = fprintf(fp, "\nAggregated stats:%s\n", msg); 2054 2055 ret += events_stats__fprintf(&session->evlist->stats, fp); 2056 return ret; 2057 } 2058 2059 size_t perf_session__fprintf(struct perf_session *session, FILE *fp) 2060 { 2061 /* 2062 * FIXME: Here we have to actually print all the machines in this 2063 * session, not just the host... 2064 */ 2065 return machine__fprintf(&session->machines.host, fp); 2066 } 2067 2068 struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, 2069 unsigned int type) 2070 { 2071 struct perf_evsel *pos; 2072 2073 evlist__for_each_entry(session->evlist, pos) { 2074 if (pos->attr.type == type) 2075 return pos; 2076 } 2077 return NULL; 2078 } 2079 2080 int perf_session__cpu_bitmap(struct perf_session *session, 2081 const char *cpu_list, unsigned long *cpu_bitmap) 2082 { 2083 int i, err = -1; 2084 struct cpu_map *map; 2085 2086 for (i = 0; i < PERF_TYPE_MAX; ++i) { 2087 struct perf_evsel *evsel; 2088 2089 evsel = perf_session__find_first_evtype(session, i); 2090 if (!evsel) 2091 continue; 2092 2093 if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) { 2094 pr_err("File does not contain CPU events. " 2095 "Remove -C option to proceed.\n"); 2096 return -1; 2097 } 2098 } 2099 2100 map = cpu_map__new(cpu_list); 2101 if (map == NULL) { 2102 pr_err("Invalid cpu_list\n"); 2103 return -1; 2104 } 2105 2106 for (i = 0; i < map->nr; i++) { 2107 int cpu = map->map[i]; 2108 2109 if (cpu >= MAX_NR_CPUS) { 2110 pr_err("Requested CPU %d too large. " 2111 "Consider raising MAX_NR_CPUS\n", cpu); 2112 goto out_delete_map; 2113 } 2114 2115 set_bit(cpu, cpu_bitmap); 2116 } 2117 2118 err = 0; 2119 2120 out_delete_map: 2121 cpu_map__put(map); 2122 return err; 2123 } 2124 2125 void perf_session__fprintf_info(struct perf_session *session, FILE *fp, 2126 bool full) 2127 { 2128 if (session == NULL || fp == NULL) 2129 return; 2130 2131 fprintf(fp, "# ========\n"); 2132 perf_header__fprintf_info(session, fp, full); 2133 fprintf(fp, "# ========\n#\n"); 2134 } 2135 2136 2137 int __perf_session__set_tracepoints_handlers(struct perf_session *session, 2138 const struct perf_evsel_str_handler *assocs, 2139 size_t nr_assocs) 2140 { 2141 struct perf_evsel *evsel; 2142 size_t i; 2143 int err; 2144 2145 for (i = 0; i < nr_assocs; i++) { 2146 /* 2147 * Adding a handler for an event not in the session, 2148 * just ignore it. 2149 */ 2150 evsel = perf_evlist__find_tracepoint_by_name(session->evlist, assocs[i].name); 2151 if (evsel == NULL) 2152 continue; 2153 2154 err = -EEXIST; 2155 if (evsel->handler != NULL) 2156 goto out; 2157 evsel->handler = assocs[i].handler; 2158 } 2159 2160 err = 0; 2161 out: 2162 return err; 2163 } 2164 2165 int perf_event__process_id_index(struct perf_session *session, 2166 union perf_event *event) 2167 { 2168 struct perf_evlist *evlist = session->evlist; 2169 struct id_index_event *ie = &event->id_index; 2170 size_t i, nr, max_nr; 2171 2172 max_nr = (ie->header.size - sizeof(struct id_index_event)) / 2173 sizeof(struct id_index_entry); 2174 nr = ie->nr; 2175 if (nr > max_nr) 2176 return -EINVAL; 2177 2178 if (dump_trace) 2179 fprintf(stdout, " nr: %zu\n", nr); 2180 2181 for (i = 0; i < nr; i++) { 2182 struct id_index_entry *e = &ie->entries[i]; 2183 struct perf_sample_id *sid; 2184 2185 if (dump_trace) { 2186 fprintf(stdout, " ... id: %"PRIu64, e->id); 2187 fprintf(stdout, " idx: %"PRIu64, e->idx); 2188 fprintf(stdout, " cpu: %"PRId64, e->cpu); 2189 fprintf(stdout, " tid: %"PRId64"\n", e->tid); 2190 } 2191 2192 sid = perf_evlist__id2sid(evlist, e->id); 2193 if (!sid) 2194 return -ENOENT; 2195 sid->idx = e->idx; 2196 sid->cpu = e->cpu; 2197 sid->tid = e->tid; 2198 } 2199 return 0; 2200 } 2201 2202 int perf_event__synthesize_id_index(struct perf_tool *tool, 2203 perf_event__handler_t process, 2204 struct perf_evlist *evlist, 2205 struct machine *machine) 2206 { 2207 union perf_event *ev; 2208 struct perf_evsel *evsel; 2209 size_t nr = 0, i = 0, sz, max_nr, n; 2210 int err; 2211 2212 pr_debug2("Synthesizing id index\n"); 2213 2214 max_nr = (UINT16_MAX - sizeof(struct id_index_event)) / 2215 sizeof(struct id_index_entry); 2216 2217 evlist__for_each_entry(evlist, evsel) 2218 nr += evsel->ids; 2219 2220 n = nr > max_nr ? max_nr : nr; 2221 sz = sizeof(struct id_index_event) + n * sizeof(struct id_index_entry); 2222 ev = zalloc(sz); 2223 if (!ev) 2224 return -ENOMEM; 2225 2226 ev->id_index.header.type = PERF_RECORD_ID_INDEX; 2227 ev->id_index.header.size = sz; 2228 ev->id_index.nr = n; 2229 2230 evlist__for_each_entry(evlist, evsel) { 2231 u32 j; 2232 2233 for (j = 0; j < evsel->ids; j++) { 2234 struct id_index_entry *e; 2235 struct perf_sample_id *sid; 2236 2237 if (i >= n) { 2238 err = process(tool, ev, NULL, machine); 2239 if (err) 2240 goto out_err; 2241 nr -= n; 2242 i = 0; 2243 } 2244 2245 e = &ev->id_index.entries[i++]; 2246 2247 e->id = evsel->id[j]; 2248 2249 sid = perf_evlist__id2sid(evlist, e->id); 2250 if (!sid) { 2251 free(ev); 2252 return -ENOENT; 2253 } 2254 2255 e->idx = sid->idx; 2256 e->cpu = sid->cpu; 2257 e->tid = sid->tid; 2258 } 2259 } 2260 2261 sz = sizeof(struct id_index_event) + nr * sizeof(struct id_index_entry); 2262 ev->id_index.header.size = sz; 2263 ev->id_index.nr = nr; 2264 2265 err = process(tool, ev, NULL, machine); 2266 out_err: 2267 free(ev); 2268 2269 return err; 2270 } 2271