1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * intel_pt.c: Intel Processor Trace support 4 * Copyright (c) 2013-2015, Intel Corporation. 5 */ 6 7 #include <inttypes.h> 8 #include <stdio.h> 9 #include <stdbool.h> 10 #include <errno.h> 11 #include <linux/kernel.h> 12 #include <linux/string.h> 13 #include <linux/types.h> 14 #include <linux/zalloc.h> 15 16 #include "session.h" 17 #include "machine.h" 18 #include "memswap.h" 19 #include "sort.h" 20 #include "tool.h" 21 #include "event.h" 22 #include "evlist.h" 23 #include "evsel.h" 24 #include "map.h" 25 #include "color.h" 26 #include "thread.h" 27 #include "thread-stack.h" 28 #include "symbol.h" 29 #include "callchain.h" 30 #include "dso.h" 31 #include "debug.h" 32 #include "auxtrace.h" 33 #include "tsc.h" 34 #include "intel-pt.h" 35 #include "config.h" 36 #include "util/perf_api_probe.h" 37 #include "util/synthetic-events.h" 38 #include "time-utils.h" 39 40 #include "../arch/x86/include/uapi/asm/perf_regs.h" 41 42 #include "intel-pt-decoder/intel-pt-log.h" 43 #include "intel-pt-decoder/intel-pt-decoder.h" 44 #include "intel-pt-decoder/intel-pt-insn-decoder.h" 45 #include "intel-pt-decoder/intel-pt-pkt-decoder.h" 46 47 #define MAX_TIMESTAMP (~0ULL) 48 49 #define INTEL_PT_CFG_PASS_THRU BIT_ULL(0) 50 #define INTEL_PT_CFG_PWR_EVT_EN BIT_ULL(4) 51 #define INTEL_PT_CFG_BRANCH_EN BIT_ULL(13) 52 #define INTEL_PT_CFG_EVT_EN BIT_ULL(31) 53 #define INTEL_PT_CFG_TNT_DIS BIT_ULL(55) 54 55 struct range { 56 u64 start; 57 u64 end; 58 }; 59 60 struct intel_pt { 61 struct auxtrace auxtrace; 62 struct auxtrace_queues queues; 63 struct auxtrace_heap heap; 64 u32 auxtrace_type; 65 struct perf_session *session; 66 struct machine *machine; 67 struct evsel *switch_evsel; 68 struct thread *unknown_thread; 69 bool timeless_decoding; 70 bool sampling_mode; 71 bool snapshot_mode; 72 bool per_cpu_mmaps; 73 bool have_tsc; 74 bool data_queued; 75 bool est_tsc; 76 bool sync_switch; 77 bool mispred_all; 78 bool use_thread_stack; 79 bool callstack; 80 bool cap_event_trace; 81 unsigned int br_stack_sz; 82 unsigned int br_stack_sz_plus; 83 int have_sched_switch; 84 u32 pmu_type; 85 u64 kernel_start; 86 u64 switch_ip; 87 u64 ptss_ip; 88 u64 first_timestamp; 89 90 struct perf_tsc_conversion tc; 91 bool cap_user_time_zero; 92 93 struct itrace_synth_opts synth_opts; 94 95 bool sample_instructions; 96 u64 instructions_sample_type; 97 u64 instructions_id; 98 99 bool sample_branches; 100 u32 branches_filter; 101 u64 branches_sample_type; 102 u64 branches_id; 103 104 bool sample_transactions; 105 u64 transactions_sample_type; 106 u64 transactions_id; 107 108 bool sample_ptwrites; 109 u64 ptwrites_sample_type; 110 u64 ptwrites_id; 111 112 bool sample_pwr_events; 113 u64 pwr_events_sample_type; 114 u64 mwait_id; 115 u64 pwre_id; 116 u64 exstop_id; 117 u64 pwrx_id; 118 u64 cbr_id; 119 u64 psb_id; 120 121 bool single_pebs; 122 bool sample_pebs; 123 struct evsel *pebs_evsel; 124 125 u64 evt_sample_type; 126 u64 evt_id; 127 128 u64 iflag_chg_sample_type; 129 u64 iflag_chg_id; 130 131 u64 tsc_bit; 132 u64 mtc_bit; 133 u64 mtc_freq_bits; 134 u32 tsc_ctc_ratio_n; 135 u32 tsc_ctc_ratio_d; 136 u64 cyc_bit; 137 u64 noretcomp_bit; 138 unsigned max_non_turbo_ratio; 139 unsigned cbr2khz; 140 int max_loops; 141 142 unsigned long num_events; 143 144 char *filter; 145 struct addr_filters filts; 146 147 struct range *time_ranges; 148 unsigned int range_cnt; 149 150 struct ip_callchain *chain; 151 struct branch_stack *br_stack; 152 153 u64 dflt_tsc_offset; 154 struct rb_root vmcs_info; 155 }; 156 157 enum switch_state { 158 INTEL_PT_SS_NOT_TRACING, 159 INTEL_PT_SS_UNKNOWN, 160 INTEL_PT_SS_TRACING, 161 INTEL_PT_SS_EXPECTING_SWITCH_EVENT, 162 INTEL_PT_SS_EXPECTING_SWITCH_IP, 163 }; 164 165 /* applicable_counters is 64-bits */ 166 #define INTEL_PT_MAX_PEBS 64 167 168 struct intel_pt_pebs_event { 169 struct evsel *evsel; 170 u64 id; 171 }; 172 173 struct intel_pt_queue { 174 struct intel_pt *pt; 175 unsigned int queue_nr; 176 struct auxtrace_buffer *buffer; 177 struct auxtrace_buffer *old_buffer; 178 void *decoder; 179 const struct intel_pt_state *state; 180 struct ip_callchain *chain; 181 struct branch_stack *last_branch; 182 union perf_event *event_buf; 183 bool on_heap; 184 bool stop; 185 bool step_through_buffers; 186 bool use_buffer_pid_tid; 187 bool sync_switch; 188 bool sample_ipc; 189 pid_t pid, tid; 190 int cpu; 191 int switch_state; 192 pid_t next_tid; 193 struct thread *thread; 194 struct machine *guest_machine; 195 struct thread *unknown_guest_thread; 196 pid_t guest_machine_pid; 197 bool exclude_kernel; 198 bool have_sample; 199 u64 time; 200 u64 timestamp; 201 u64 sel_timestamp; 202 bool sel_start; 203 unsigned int sel_idx; 204 u32 flags; 205 u16 insn_len; 206 u64 last_insn_cnt; 207 u64 ipc_insn_cnt; 208 u64 ipc_cyc_cnt; 209 u64 last_in_insn_cnt; 210 u64 last_in_cyc_cnt; 211 u64 last_br_insn_cnt; 212 u64 last_br_cyc_cnt; 213 unsigned int cbr_seen; 214 char insn[INTEL_PT_INSN_BUF_SZ]; 215 struct intel_pt_pebs_event pebs[INTEL_PT_MAX_PEBS]; 216 }; 217 218 static void intel_pt_dump(struct intel_pt *pt __maybe_unused, 219 unsigned char *buf, size_t len) 220 { 221 struct intel_pt_pkt packet; 222 size_t pos = 0; 223 int ret, pkt_len, i; 224 char desc[INTEL_PT_PKT_DESC_MAX]; 225 const char *color = PERF_COLOR_BLUE; 226 enum intel_pt_pkt_ctx ctx = INTEL_PT_NO_CTX; 227 228 color_fprintf(stdout, color, 229 ". ... Intel Processor Trace data: size %zu bytes\n", 230 len); 231 232 while (len) { 233 ret = intel_pt_get_packet(buf, len, &packet, &ctx); 234 if (ret > 0) 235 pkt_len = ret; 236 else 237 pkt_len = 1; 238 printf("."); 239 color_fprintf(stdout, color, " %08x: ", pos); 240 for (i = 0; i < pkt_len; i++) 241 color_fprintf(stdout, color, " %02x", buf[i]); 242 for (; i < 16; i++) 243 color_fprintf(stdout, color, " "); 244 if (ret > 0) { 245 ret = intel_pt_pkt_desc(&packet, desc, 246 INTEL_PT_PKT_DESC_MAX); 247 if (ret > 0) 248 color_fprintf(stdout, color, " %s\n", desc); 249 } else { 250 color_fprintf(stdout, color, " Bad packet!\n"); 251 } 252 pos += pkt_len; 253 buf += pkt_len; 254 len -= pkt_len; 255 } 256 } 257 258 static void intel_pt_dump_event(struct intel_pt *pt, unsigned char *buf, 259 size_t len) 260 { 261 printf(".\n"); 262 intel_pt_dump(pt, buf, len); 263 } 264 265 static void intel_pt_log_event(union perf_event *event) 266 { 267 FILE *f = intel_pt_log_fp(); 268 269 if (!intel_pt_enable_logging || !f) 270 return; 271 272 perf_event__fprintf(event, NULL, f); 273 } 274 275 static void intel_pt_dump_sample(struct perf_session *session, 276 struct perf_sample *sample) 277 { 278 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt, 279 auxtrace); 280 281 printf("\n"); 282 intel_pt_dump(pt, sample->aux_sample.data, sample->aux_sample.size); 283 } 284 285 static bool intel_pt_log_events(struct intel_pt *pt, u64 tm) 286 { 287 struct perf_time_interval *range = pt->synth_opts.ptime_range; 288 int n = pt->synth_opts.range_num; 289 290 if (pt->synth_opts.log_plus_flags & AUXTRACE_LOG_FLG_ALL_PERF_EVTS) 291 return true; 292 293 if (pt->synth_opts.log_minus_flags & AUXTRACE_LOG_FLG_ALL_PERF_EVTS) 294 return false; 295 296 /* perf_time__ranges_skip_sample does not work if time is zero */ 297 if (!tm) 298 tm = 1; 299 300 return !n || !perf_time__ranges_skip_sample(range, n, tm); 301 } 302 303 static struct intel_pt_vmcs_info *intel_pt_findnew_vmcs(struct rb_root *rb_root, 304 u64 vmcs, 305 u64 dflt_tsc_offset) 306 { 307 struct rb_node **p = &rb_root->rb_node; 308 struct rb_node *parent = NULL; 309 struct intel_pt_vmcs_info *v; 310 311 while (*p) { 312 parent = *p; 313 v = rb_entry(parent, struct intel_pt_vmcs_info, rb_node); 314 315 if (v->vmcs == vmcs) 316 return v; 317 318 if (vmcs < v->vmcs) 319 p = &(*p)->rb_left; 320 else 321 p = &(*p)->rb_right; 322 } 323 324 v = zalloc(sizeof(*v)); 325 if (v) { 326 v->vmcs = vmcs; 327 v->tsc_offset = dflt_tsc_offset; 328 v->reliable = dflt_tsc_offset; 329 330 rb_link_node(&v->rb_node, parent, p); 331 rb_insert_color(&v->rb_node, rb_root); 332 } 333 334 return v; 335 } 336 337 static struct intel_pt_vmcs_info *intel_pt_findnew_vmcs_info(void *data, uint64_t vmcs) 338 { 339 struct intel_pt_queue *ptq = data; 340 struct intel_pt *pt = ptq->pt; 341 342 if (!vmcs && !pt->dflt_tsc_offset) 343 return NULL; 344 345 return intel_pt_findnew_vmcs(&pt->vmcs_info, vmcs, pt->dflt_tsc_offset); 346 } 347 348 static void intel_pt_free_vmcs_info(struct intel_pt *pt) 349 { 350 struct intel_pt_vmcs_info *v; 351 struct rb_node *n; 352 353 n = rb_first(&pt->vmcs_info); 354 while (n) { 355 v = rb_entry(n, struct intel_pt_vmcs_info, rb_node); 356 n = rb_next(n); 357 rb_erase(&v->rb_node, &pt->vmcs_info); 358 free(v); 359 } 360 } 361 362 static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a, 363 struct auxtrace_buffer *b) 364 { 365 bool consecutive = false; 366 void *start; 367 368 start = intel_pt_find_overlap(a->data, a->size, b->data, b->size, 369 pt->have_tsc, &consecutive, 370 pt->synth_opts.vm_time_correlation); 371 if (!start) 372 return -EINVAL; 373 /* 374 * In the case of vm_time_correlation, the overlap might contain TSC 375 * packets that will not be fixed, and that will then no longer work for 376 * overlap detection. Avoid that by zeroing out the overlap. 377 */ 378 if (pt->synth_opts.vm_time_correlation) 379 memset(b->data, 0, start - b->data); 380 b->use_size = b->data + b->size - start; 381 b->use_data = start; 382 if (b->use_size && consecutive) 383 b->consecutive = true; 384 return 0; 385 } 386 387 static int intel_pt_get_buffer(struct intel_pt_queue *ptq, 388 struct auxtrace_buffer *buffer, 389 struct auxtrace_buffer *old_buffer, 390 struct intel_pt_buffer *b) 391 { 392 bool might_overlap; 393 394 if (!buffer->data) { 395 int fd = perf_data__fd(ptq->pt->session->data); 396 397 buffer->data = auxtrace_buffer__get_data(buffer, fd); 398 if (!buffer->data) 399 return -ENOMEM; 400 } 401 402 might_overlap = ptq->pt->snapshot_mode || ptq->pt->sampling_mode; 403 if (might_overlap && !buffer->consecutive && old_buffer && 404 intel_pt_do_fix_overlap(ptq->pt, old_buffer, buffer)) 405 return -ENOMEM; 406 407 if (buffer->use_data) { 408 b->len = buffer->use_size; 409 b->buf = buffer->use_data; 410 } else { 411 b->len = buffer->size; 412 b->buf = buffer->data; 413 } 414 b->ref_timestamp = buffer->reference; 415 416 if (!old_buffer || (might_overlap && !buffer->consecutive)) { 417 b->consecutive = false; 418 b->trace_nr = buffer->buffer_nr + 1; 419 } else { 420 b->consecutive = true; 421 } 422 423 return 0; 424 } 425 426 /* Do not drop buffers with references - refer intel_pt_get_trace() */ 427 static void intel_pt_lookahead_drop_buffer(struct intel_pt_queue *ptq, 428 struct auxtrace_buffer *buffer) 429 { 430 if (!buffer || buffer == ptq->buffer || buffer == ptq->old_buffer) 431 return; 432 433 auxtrace_buffer__drop_data(buffer); 434 } 435 436 /* Must be serialized with respect to intel_pt_get_trace() */ 437 static int intel_pt_lookahead(void *data, intel_pt_lookahead_cb_t cb, 438 void *cb_data) 439 { 440 struct intel_pt_queue *ptq = data; 441 struct auxtrace_buffer *buffer = ptq->buffer; 442 struct auxtrace_buffer *old_buffer = ptq->old_buffer; 443 struct auxtrace_queue *queue; 444 int err = 0; 445 446 queue = &ptq->pt->queues.queue_array[ptq->queue_nr]; 447 448 while (1) { 449 struct intel_pt_buffer b = { .len = 0 }; 450 451 buffer = auxtrace_buffer__next(queue, buffer); 452 if (!buffer) 453 break; 454 455 err = intel_pt_get_buffer(ptq, buffer, old_buffer, &b); 456 if (err) 457 break; 458 459 if (b.len) { 460 intel_pt_lookahead_drop_buffer(ptq, old_buffer); 461 old_buffer = buffer; 462 } else { 463 intel_pt_lookahead_drop_buffer(ptq, buffer); 464 continue; 465 } 466 467 err = cb(&b, cb_data); 468 if (err) 469 break; 470 } 471 472 if (buffer != old_buffer) 473 intel_pt_lookahead_drop_buffer(ptq, buffer); 474 intel_pt_lookahead_drop_buffer(ptq, old_buffer); 475 476 return err; 477 } 478 479 /* 480 * This function assumes data is processed sequentially only. 481 * Must be serialized with respect to intel_pt_lookahead() 482 */ 483 static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data) 484 { 485 struct intel_pt_queue *ptq = data; 486 struct auxtrace_buffer *buffer = ptq->buffer; 487 struct auxtrace_buffer *old_buffer = ptq->old_buffer; 488 struct auxtrace_queue *queue; 489 int err; 490 491 if (ptq->stop) { 492 b->len = 0; 493 return 0; 494 } 495 496 queue = &ptq->pt->queues.queue_array[ptq->queue_nr]; 497 498 buffer = auxtrace_buffer__next(queue, buffer); 499 if (!buffer) { 500 if (old_buffer) 501 auxtrace_buffer__drop_data(old_buffer); 502 b->len = 0; 503 return 0; 504 } 505 506 ptq->buffer = buffer; 507 508 err = intel_pt_get_buffer(ptq, buffer, old_buffer, b); 509 if (err) 510 return err; 511 512 if (ptq->step_through_buffers) 513 ptq->stop = true; 514 515 if (b->len) { 516 if (old_buffer) 517 auxtrace_buffer__drop_data(old_buffer); 518 ptq->old_buffer = buffer; 519 } else { 520 auxtrace_buffer__drop_data(buffer); 521 return intel_pt_get_trace(b, data); 522 } 523 524 return 0; 525 } 526 527 struct intel_pt_cache_entry { 528 struct auxtrace_cache_entry entry; 529 u64 insn_cnt; 530 u64 byte_cnt; 531 enum intel_pt_insn_op op; 532 enum intel_pt_insn_branch branch; 533 bool emulated_ptwrite; 534 int length; 535 int32_t rel; 536 char insn[INTEL_PT_INSN_BUF_SZ]; 537 }; 538 539 static int intel_pt_config_div(const char *var, const char *value, void *data) 540 { 541 int *d = data; 542 long val; 543 544 if (!strcmp(var, "intel-pt.cache-divisor")) { 545 val = strtol(value, NULL, 0); 546 if (val > 0 && val <= INT_MAX) 547 *d = val; 548 } 549 550 return 0; 551 } 552 553 static int intel_pt_cache_divisor(void) 554 { 555 static int d; 556 557 if (d) 558 return d; 559 560 perf_config(intel_pt_config_div, &d); 561 562 if (!d) 563 d = 64; 564 565 return d; 566 } 567 568 static unsigned int intel_pt_cache_size(struct dso *dso, 569 struct machine *machine) 570 { 571 off_t size; 572 573 size = dso__data_size(dso, machine); 574 size /= intel_pt_cache_divisor(); 575 if (size < 1000) 576 return 10; 577 if (size > (1 << 21)) 578 return 21; 579 return 32 - __builtin_clz(size); 580 } 581 582 static struct auxtrace_cache *intel_pt_cache(struct dso *dso, 583 struct machine *machine) 584 { 585 struct auxtrace_cache *c; 586 unsigned int bits; 587 588 if (dso->auxtrace_cache) 589 return dso->auxtrace_cache; 590 591 bits = intel_pt_cache_size(dso, machine); 592 593 /* Ignoring cache creation failure */ 594 c = auxtrace_cache__new(bits, sizeof(struct intel_pt_cache_entry), 200); 595 596 dso->auxtrace_cache = c; 597 598 return c; 599 } 600 601 static int intel_pt_cache_add(struct dso *dso, struct machine *machine, 602 u64 offset, u64 insn_cnt, u64 byte_cnt, 603 struct intel_pt_insn *intel_pt_insn) 604 { 605 struct auxtrace_cache *c = intel_pt_cache(dso, machine); 606 struct intel_pt_cache_entry *e; 607 int err; 608 609 if (!c) 610 return -ENOMEM; 611 612 e = auxtrace_cache__alloc_entry(c); 613 if (!e) 614 return -ENOMEM; 615 616 e->insn_cnt = insn_cnt; 617 e->byte_cnt = byte_cnt; 618 e->op = intel_pt_insn->op; 619 e->branch = intel_pt_insn->branch; 620 e->emulated_ptwrite = intel_pt_insn->emulated_ptwrite; 621 e->length = intel_pt_insn->length; 622 e->rel = intel_pt_insn->rel; 623 memcpy(e->insn, intel_pt_insn->buf, INTEL_PT_INSN_BUF_SZ); 624 625 err = auxtrace_cache__add(c, offset, &e->entry); 626 if (err) 627 auxtrace_cache__free_entry(c, e); 628 629 return err; 630 } 631 632 static struct intel_pt_cache_entry * 633 intel_pt_cache_lookup(struct dso *dso, struct machine *machine, u64 offset) 634 { 635 struct auxtrace_cache *c = intel_pt_cache(dso, machine); 636 637 if (!c) 638 return NULL; 639 640 return auxtrace_cache__lookup(dso->auxtrace_cache, offset); 641 } 642 643 static void intel_pt_cache_invalidate(struct dso *dso, struct machine *machine, 644 u64 offset) 645 { 646 struct auxtrace_cache *c = intel_pt_cache(dso, machine); 647 648 if (!c) 649 return; 650 651 auxtrace_cache__remove(dso->auxtrace_cache, offset); 652 } 653 654 static inline bool intel_pt_guest_kernel_ip(uint64_t ip) 655 { 656 /* Assumes 64-bit kernel */ 657 return ip & (1ULL << 63); 658 } 659 660 static inline u8 intel_pt_nr_cpumode(struct intel_pt_queue *ptq, uint64_t ip, bool nr) 661 { 662 if (nr) { 663 return intel_pt_guest_kernel_ip(ip) ? 664 PERF_RECORD_MISC_GUEST_KERNEL : 665 PERF_RECORD_MISC_GUEST_USER; 666 } 667 668 return ip >= ptq->pt->kernel_start ? 669 PERF_RECORD_MISC_KERNEL : 670 PERF_RECORD_MISC_USER; 671 } 672 673 static inline u8 intel_pt_cpumode(struct intel_pt_queue *ptq, uint64_t from_ip, uint64_t to_ip) 674 { 675 /* No support for non-zero CS base */ 676 if (from_ip) 677 return intel_pt_nr_cpumode(ptq, from_ip, ptq->state->from_nr); 678 return intel_pt_nr_cpumode(ptq, to_ip, ptq->state->to_nr); 679 } 680 681 static int intel_pt_get_guest(struct intel_pt_queue *ptq) 682 { 683 struct machines *machines = &ptq->pt->session->machines; 684 struct machine *machine; 685 pid_t pid = ptq->pid <= 0 ? DEFAULT_GUEST_KERNEL_ID : ptq->pid; 686 687 if (ptq->guest_machine && pid == ptq->guest_machine_pid) 688 return 0; 689 690 ptq->guest_machine = NULL; 691 thread__zput(ptq->unknown_guest_thread); 692 693 machine = machines__find_guest(machines, pid); 694 if (!machine) 695 return -1; 696 697 ptq->unknown_guest_thread = machine__idle_thread(machine); 698 if (!ptq->unknown_guest_thread) 699 return -1; 700 701 ptq->guest_machine = machine; 702 ptq->guest_machine_pid = pid; 703 704 return 0; 705 } 706 707 static inline bool intel_pt_jmp_16(struct intel_pt_insn *intel_pt_insn) 708 { 709 return intel_pt_insn->rel == 16 && intel_pt_insn->branch == INTEL_PT_BR_UNCONDITIONAL; 710 } 711 712 #define PTWRITE_MAGIC "\x0f\x0bperf,ptwrite " 713 #define PTWRITE_MAGIC_LEN 16 714 715 static bool intel_pt_emulated_ptwrite(struct dso *dso, struct machine *machine, u64 offset) 716 { 717 unsigned char buf[PTWRITE_MAGIC_LEN]; 718 ssize_t len; 719 720 len = dso__data_read_offset(dso, machine, offset, buf, PTWRITE_MAGIC_LEN); 721 if (len == PTWRITE_MAGIC_LEN && !memcmp(buf, PTWRITE_MAGIC, PTWRITE_MAGIC_LEN)) { 722 intel_pt_log("Emulated ptwrite signature found\n"); 723 return true; 724 } 725 intel_pt_log("Emulated ptwrite signature not found\n"); 726 return false; 727 } 728 729 static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn, 730 uint64_t *insn_cnt_ptr, uint64_t *ip, 731 uint64_t to_ip, uint64_t max_insn_cnt, 732 void *data) 733 { 734 struct intel_pt_queue *ptq = data; 735 struct machine *machine = ptq->pt->machine; 736 struct thread *thread; 737 struct addr_location al; 738 unsigned char buf[INTEL_PT_INSN_BUF_SZ]; 739 ssize_t len; 740 int x86_64; 741 u8 cpumode; 742 u64 offset, start_offset, start_ip; 743 u64 insn_cnt = 0; 744 bool one_map = true; 745 bool nr; 746 747 intel_pt_insn->length = 0; 748 749 if (to_ip && *ip == to_ip) 750 goto out_no_cache; 751 752 nr = ptq->state->to_nr; 753 cpumode = intel_pt_nr_cpumode(ptq, *ip, nr); 754 755 if (nr) { 756 if (cpumode != PERF_RECORD_MISC_GUEST_KERNEL || 757 intel_pt_get_guest(ptq)) 758 return -EINVAL; 759 machine = ptq->guest_machine; 760 thread = ptq->unknown_guest_thread; 761 } else { 762 thread = ptq->thread; 763 if (!thread) { 764 if (cpumode != PERF_RECORD_MISC_KERNEL) 765 return -EINVAL; 766 thread = ptq->pt->unknown_thread; 767 } 768 } 769 770 while (1) { 771 if (!thread__find_map(thread, cpumode, *ip, &al) || !al.map->dso) 772 return -EINVAL; 773 774 if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR && 775 dso__data_status_seen(al.map->dso, 776 DSO_DATA_STATUS_SEEN_ITRACE)) 777 return -ENOENT; 778 779 offset = al.map->map_ip(al.map, *ip); 780 781 if (!to_ip && one_map) { 782 struct intel_pt_cache_entry *e; 783 784 e = intel_pt_cache_lookup(al.map->dso, machine, offset); 785 if (e && 786 (!max_insn_cnt || e->insn_cnt <= max_insn_cnt)) { 787 *insn_cnt_ptr = e->insn_cnt; 788 *ip += e->byte_cnt; 789 intel_pt_insn->op = e->op; 790 intel_pt_insn->branch = e->branch; 791 intel_pt_insn->emulated_ptwrite = e->emulated_ptwrite; 792 intel_pt_insn->length = e->length; 793 intel_pt_insn->rel = e->rel; 794 memcpy(intel_pt_insn->buf, e->insn, 795 INTEL_PT_INSN_BUF_SZ); 796 intel_pt_log_insn_no_data(intel_pt_insn, *ip); 797 return 0; 798 } 799 } 800 801 start_offset = offset; 802 start_ip = *ip; 803 804 /* Load maps to ensure dso->is_64_bit has been updated */ 805 map__load(al.map); 806 807 x86_64 = al.map->dso->is_64_bit; 808 809 while (1) { 810 len = dso__data_read_offset(al.map->dso, machine, 811 offset, buf, 812 INTEL_PT_INSN_BUF_SZ); 813 if (len <= 0) 814 return -EINVAL; 815 816 if (intel_pt_get_insn(buf, len, x86_64, intel_pt_insn)) 817 return -EINVAL; 818 819 intel_pt_log_insn(intel_pt_insn, *ip); 820 821 insn_cnt += 1; 822 823 if (intel_pt_insn->branch != INTEL_PT_BR_NO_BRANCH) { 824 bool eptw; 825 u64 offs; 826 827 if (!intel_pt_jmp_16(intel_pt_insn)) 828 goto out; 829 /* Check for emulated ptwrite */ 830 offs = offset + intel_pt_insn->length; 831 eptw = intel_pt_emulated_ptwrite(al.map->dso, machine, offs); 832 intel_pt_insn->emulated_ptwrite = eptw; 833 goto out; 834 } 835 836 if (max_insn_cnt && insn_cnt >= max_insn_cnt) 837 goto out_no_cache; 838 839 *ip += intel_pt_insn->length; 840 841 if (to_ip && *ip == to_ip) { 842 intel_pt_insn->length = 0; 843 goto out_no_cache; 844 } 845 846 if (*ip >= al.map->end) 847 break; 848 849 offset += intel_pt_insn->length; 850 } 851 one_map = false; 852 } 853 out: 854 *insn_cnt_ptr = insn_cnt; 855 856 if (!one_map) 857 goto out_no_cache; 858 859 /* 860 * Didn't lookup in the 'to_ip' case, so do it now to prevent duplicate 861 * entries. 862 */ 863 if (to_ip) { 864 struct intel_pt_cache_entry *e; 865 866 e = intel_pt_cache_lookup(al.map->dso, machine, start_offset); 867 if (e) 868 return 0; 869 } 870 871 /* Ignore cache errors */ 872 intel_pt_cache_add(al.map->dso, machine, start_offset, insn_cnt, 873 *ip - start_ip, intel_pt_insn); 874 875 return 0; 876 877 out_no_cache: 878 *insn_cnt_ptr = insn_cnt; 879 return 0; 880 } 881 882 static bool intel_pt_match_pgd_ip(struct intel_pt *pt, uint64_t ip, 883 uint64_t offset, const char *filename) 884 { 885 struct addr_filter *filt; 886 bool have_filter = false; 887 bool hit_tracestop = false; 888 bool hit_filter = false; 889 890 list_for_each_entry(filt, &pt->filts.head, list) { 891 if (filt->start) 892 have_filter = true; 893 894 if ((filename && !filt->filename) || 895 (!filename && filt->filename) || 896 (filename && strcmp(filename, filt->filename))) 897 continue; 898 899 if (!(offset >= filt->addr && offset < filt->addr + filt->size)) 900 continue; 901 902 intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s hit filter: %s offset %#"PRIx64" size %#"PRIx64"\n", 903 ip, offset, filename ? filename : "[kernel]", 904 filt->start ? "filter" : "stop", 905 filt->addr, filt->size); 906 907 if (filt->start) 908 hit_filter = true; 909 else 910 hit_tracestop = true; 911 } 912 913 if (!hit_tracestop && !hit_filter) 914 intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s is not in a filter region\n", 915 ip, offset, filename ? filename : "[kernel]"); 916 917 return hit_tracestop || (have_filter && !hit_filter); 918 } 919 920 static int __intel_pt_pgd_ip(uint64_t ip, void *data) 921 { 922 struct intel_pt_queue *ptq = data; 923 struct thread *thread; 924 struct addr_location al; 925 u8 cpumode; 926 u64 offset; 927 928 if (ptq->state->to_nr) { 929 if (intel_pt_guest_kernel_ip(ip)) 930 return intel_pt_match_pgd_ip(ptq->pt, ip, ip, NULL); 931 /* No support for decoding guest user space */ 932 return -EINVAL; 933 } else if (ip >= ptq->pt->kernel_start) { 934 return intel_pt_match_pgd_ip(ptq->pt, ip, ip, NULL); 935 } 936 937 cpumode = PERF_RECORD_MISC_USER; 938 939 thread = ptq->thread; 940 if (!thread) 941 return -EINVAL; 942 943 if (!thread__find_map(thread, cpumode, ip, &al) || !al.map->dso) 944 return -EINVAL; 945 946 offset = al.map->map_ip(al.map, ip); 947 948 return intel_pt_match_pgd_ip(ptq->pt, ip, offset, 949 al.map->dso->long_name); 950 } 951 952 static bool intel_pt_pgd_ip(uint64_t ip, void *data) 953 { 954 return __intel_pt_pgd_ip(ip, data) > 0; 955 } 956 957 static bool intel_pt_get_config(struct intel_pt *pt, 958 struct perf_event_attr *attr, u64 *config) 959 { 960 if (attr->type == pt->pmu_type) { 961 if (config) 962 *config = attr->config; 963 return true; 964 } 965 966 return false; 967 } 968 969 static bool intel_pt_exclude_kernel(struct intel_pt *pt) 970 { 971 struct evsel *evsel; 972 973 evlist__for_each_entry(pt->session->evlist, evsel) { 974 if (intel_pt_get_config(pt, &evsel->core.attr, NULL) && 975 !evsel->core.attr.exclude_kernel) 976 return false; 977 } 978 return true; 979 } 980 981 static bool intel_pt_return_compression(struct intel_pt *pt) 982 { 983 struct evsel *evsel; 984 u64 config; 985 986 if (!pt->noretcomp_bit) 987 return true; 988 989 evlist__for_each_entry(pt->session->evlist, evsel) { 990 if (intel_pt_get_config(pt, &evsel->core.attr, &config) && 991 (config & pt->noretcomp_bit)) 992 return false; 993 } 994 return true; 995 } 996 997 static bool intel_pt_branch_enable(struct intel_pt *pt) 998 { 999 struct evsel *evsel; 1000 u64 config; 1001 1002 evlist__for_each_entry(pt->session->evlist, evsel) { 1003 if (intel_pt_get_config(pt, &evsel->core.attr, &config) && 1004 (config & INTEL_PT_CFG_PASS_THRU) && 1005 !(config & INTEL_PT_CFG_BRANCH_EN)) 1006 return false; 1007 } 1008 return true; 1009 } 1010 1011 static bool intel_pt_disabled_tnt(struct intel_pt *pt) 1012 { 1013 struct evsel *evsel; 1014 u64 config; 1015 1016 evlist__for_each_entry(pt->session->evlist, evsel) { 1017 if (intel_pt_get_config(pt, &evsel->core.attr, &config) && 1018 config & INTEL_PT_CFG_TNT_DIS) 1019 return true; 1020 } 1021 return false; 1022 } 1023 1024 static unsigned int intel_pt_mtc_period(struct intel_pt *pt) 1025 { 1026 struct evsel *evsel; 1027 unsigned int shift; 1028 u64 config; 1029 1030 if (!pt->mtc_freq_bits) 1031 return 0; 1032 1033 for (shift = 0, config = pt->mtc_freq_bits; !(config & 1); shift++) 1034 config >>= 1; 1035 1036 evlist__for_each_entry(pt->session->evlist, evsel) { 1037 if (intel_pt_get_config(pt, &evsel->core.attr, &config)) 1038 return (config & pt->mtc_freq_bits) >> shift; 1039 } 1040 return 0; 1041 } 1042 1043 static bool intel_pt_timeless_decoding(struct intel_pt *pt) 1044 { 1045 struct evsel *evsel; 1046 bool timeless_decoding = true; 1047 u64 config; 1048 1049 if (!pt->tsc_bit || !pt->cap_user_time_zero || pt->synth_opts.timeless_decoding) 1050 return true; 1051 1052 evlist__for_each_entry(pt->session->evlist, evsel) { 1053 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_TIME)) 1054 return true; 1055 if (intel_pt_get_config(pt, &evsel->core.attr, &config)) { 1056 if (config & pt->tsc_bit) 1057 timeless_decoding = false; 1058 else 1059 return true; 1060 } 1061 } 1062 return timeless_decoding; 1063 } 1064 1065 static bool intel_pt_tracing_kernel(struct intel_pt *pt) 1066 { 1067 struct evsel *evsel; 1068 1069 evlist__for_each_entry(pt->session->evlist, evsel) { 1070 if (intel_pt_get_config(pt, &evsel->core.attr, NULL) && 1071 !evsel->core.attr.exclude_kernel) 1072 return true; 1073 } 1074 return false; 1075 } 1076 1077 static bool intel_pt_have_tsc(struct intel_pt *pt) 1078 { 1079 struct evsel *evsel; 1080 bool have_tsc = false; 1081 u64 config; 1082 1083 if (!pt->tsc_bit) 1084 return false; 1085 1086 evlist__for_each_entry(pt->session->evlist, evsel) { 1087 if (intel_pt_get_config(pt, &evsel->core.attr, &config)) { 1088 if (config & pt->tsc_bit) 1089 have_tsc = true; 1090 else 1091 return false; 1092 } 1093 } 1094 return have_tsc; 1095 } 1096 1097 static bool intel_pt_have_mtc(struct intel_pt *pt) 1098 { 1099 struct evsel *evsel; 1100 u64 config; 1101 1102 evlist__for_each_entry(pt->session->evlist, evsel) { 1103 if (intel_pt_get_config(pt, &evsel->core.attr, &config) && 1104 (config & pt->mtc_bit)) 1105 return true; 1106 } 1107 return false; 1108 } 1109 1110 static bool intel_pt_sampling_mode(struct intel_pt *pt) 1111 { 1112 struct evsel *evsel; 1113 1114 evlist__for_each_entry(pt->session->evlist, evsel) { 1115 if ((evsel->core.attr.sample_type & PERF_SAMPLE_AUX) && 1116 evsel->core.attr.aux_sample_size) 1117 return true; 1118 } 1119 return false; 1120 } 1121 1122 static u64 intel_pt_ctl(struct intel_pt *pt) 1123 { 1124 struct evsel *evsel; 1125 u64 config; 1126 1127 evlist__for_each_entry(pt->session->evlist, evsel) { 1128 if (intel_pt_get_config(pt, &evsel->core.attr, &config)) 1129 return config; 1130 } 1131 return 0; 1132 } 1133 1134 static u64 intel_pt_ns_to_ticks(const struct intel_pt *pt, u64 ns) 1135 { 1136 u64 quot, rem; 1137 1138 quot = ns / pt->tc.time_mult; 1139 rem = ns % pt->tc.time_mult; 1140 return (quot << pt->tc.time_shift) + (rem << pt->tc.time_shift) / 1141 pt->tc.time_mult; 1142 } 1143 1144 static struct ip_callchain *intel_pt_alloc_chain(struct intel_pt *pt) 1145 { 1146 size_t sz = sizeof(struct ip_callchain); 1147 1148 /* Add 1 to callchain_sz for callchain context */ 1149 sz += (pt->synth_opts.callchain_sz + 1) * sizeof(u64); 1150 return zalloc(sz); 1151 } 1152 1153 static int intel_pt_callchain_init(struct intel_pt *pt) 1154 { 1155 struct evsel *evsel; 1156 1157 evlist__for_each_entry(pt->session->evlist, evsel) { 1158 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CALLCHAIN)) 1159 evsel->synth_sample_type |= PERF_SAMPLE_CALLCHAIN; 1160 } 1161 1162 pt->chain = intel_pt_alloc_chain(pt); 1163 if (!pt->chain) 1164 return -ENOMEM; 1165 1166 return 0; 1167 } 1168 1169 static void intel_pt_add_callchain(struct intel_pt *pt, 1170 struct perf_sample *sample) 1171 { 1172 struct thread *thread = machine__findnew_thread(pt->machine, 1173 sample->pid, 1174 sample->tid); 1175 1176 thread_stack__sample_late(thread, sample->cpu, pt->chain, 1177 pt->synth_opts.callchain_sz + 1, sample->ip, 1178 pt->kernel_start); 1179 1180 sample->callchain = pt->chain; 1181 } 1182 1183 static struct branch_stack *intel_pt_alloc_br_stack(unsigned int entry_cnt) 1184 { 1185 size_t sz = sizeof(struct branch_stack); 1186 1187 sz += entry_cnt * sizeof(struct branch_entry); 1188 return zalloc(sz); 1189 } 1190 1191 static int intel_pt_br_stack_init(struct intel_pt *pt) 1192 { 1193 struct evsel *evsel; 1194 1195 evlist__for_each_entry(pt->session->evlist, evsel) { 1196 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_BRANCH_STACK)) 1197 evsel->synth_sample_type |= PERF_SAMPLE_BRANCH_STACK; 1198 } 1199 1200 pt->br_stack = intel_pt_alloc_br_stack(pt->br_stack_sz); 1201 if (!pt->br_stack) 1202 return -ENOMEM; 1203 1204 return 0; 1205 } 1206 1207 static void intel_pt_add_br_stack(struct intel_pt *pt, 1208 struct perf_sample *sample) 1209 { 1210 struct thread *thread = machine__findnew_thread(pt->machine, 1211 sample->pid, 1212 sample->tid); 1213 1214 thread_stack__br_sample_late(thread, sample->cpu, pt->br_stack, 1215 pt->br_stack_sz, sample->ip, 1216 pt->kernel_start); 1217 1218 sample->branch_stack = pt->br_stack; 1219 } 1220 1221 /* INTEL_PT_LBR_0, INTEL_PT_LBR_1 and INTEL_PT_LBR_2 */ 1222 #define LBRS_MAX (INTEL_PT_BLK_ITEM_ID_CNT * 3U) 1223 1224 static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt, 1225 unsigned int queue_nr) 1226 { 1227 struct intel_pt_params params = { .get_trace = 0, }; 1228 struct perf_env *env = pt->machine->env; 1229 struct intel_pt_queue *ptq; 1230 1231 ptq = zalloc(sizeof(struct intel_pt_queue)); 1232 if (!ptq) 1233 return NULL; 1234 1235 if (pt->synth_opts.callchain) { 1236 ptq->chain = intel_pt_alloc_chain(pt); 1237 if (!ptq->chain) 1238 goto out_free; 1239 } 1240 1241 if (pt->synth_opts.last_branch || pt->synth_opts.other_events) { 1242 unsigned int entry_cnt = max(LBRS_MAX, pt->br_stack_sz); 1243 1244 ptq->last_branch = intel_pt_alloc_br_stack(entry_cnt); 1245 if (!ptq->last_branch) 1246 goto out_free; 1247 } 1248 1249 ptq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE); 1250 if (!ptq->event_buf) 1251 goto out_free; 1252 1253 ptq->pt = pt; 1254 ptq->queue_nr = queue_nr; 1255 ptq->exclude_kernel = intel_pt_exclude_kernel(pt); 1256 ptq->pid = -1; 1257 ptq->tid = -1; 1258 ptq->cpu = -1; 1259 ptq->next_tid = -1; 1260 1261 params.get_trace = intel_pt_get_trace; 1262 params.walk_insn = intel_pt_walk_next_insn; 1263 params.lookahead = intel_pt_lookahead; 1264 params.findnew_vmcs_info = intel_pt_findnew_vmcs_info; 1265 params.data = ptq; 1266 params.return_compression = intel_pt_return_compression(pt); 1267 params.branch_enable = intel_pt_branch_enable(pt); 1268 params.ctl = intel_pt_ctl(pt); 1269 params.max_non_turbo_ratio = pt->max_non_turbo_ratio; 1270 params.mtc_period = intel_pt_mtc_period(pt); 1271 params.tsc_ctc_ratio_n = pt->tsc_ctc_ratio_n; 1272 params.tsc_ctc_ratio_d = pt->tsc_ctc_ratio_d; 1273 params.quick = pt->synth_opts.quick; 1274 params.vm_time_correlation = pt->synth_opts.vm_time_correlation; 1275 params.vm_tm_corr_dry_run = pt->synth_opts.vm_tm_corr_dry_run; 1276 params.first_timestamp = pt->first_timestamp; 1277 params.max_loops = pt->max_loops; 1278 1279 /* Cannot walk code without TNT, so force 'quick' mode */ 1280 if (params.branch_enable && intel_pt_disabled_tnt(pt) && !params.quick) 1281 params.quick = 1; 1282 1283 if (pt->filts.cnt > 0) 1284 params.pgd_ip = intel_pt_pgd_ip; 1285 1286 if (pt->synth_opts.instructions) { 1287 if (pt->synth_opts.period) { 1288 switch (pt->synth_opts.period_type) { 1289 case PERF_ITRACE_PERIOD_INSTRUCTIONS: 1290 params.period_type = 1291 INTEL_PT_PERIOD_INSTRUCTIONS; 1292 params.period = pt->synth_opts.period; 1293 break; 1294 case PERF_ITRACE_PERIOD_TICKS: 1295 params.period_type = INTEL_PT_PERIOD_TICKS; 1296 params.period = pt->synth_opts.period; 1297 break; 1298 case PERF_ITRACE_PERIOD_NANOSECS: 1299 params.period_type = INTEL_PT_PERIOD_TICKS; 1300 params.period = intel_pt_ns_to_ticks(pt, 1301 pt->synth_opts.period); 1302 break; 1303 default: 1304 break; 1305 } 1306 } 1307 1308 if (!params.period) { 1309 params.period_type = INTEL_PT_PERIOD_INSTRUCTIONS; 1310 params.period = 1; 1311 } 1312 } 1313 1314 if (env->cpuid && !strncmp(env->cpuid, "GenuineIntel,6,92,", 18)) 1315 params.flags |= INTEL_PT_FUP_WITH_NLIP; 1316 1317 ptq->decoder = intel_pt_decoder_new(¶ms); 1318 if (!ptq->decoder) 1319 goto out_free; 1320 1321 return ptq; 1322 1323 out_free: 1324 zfree(&ptq->event_buf); 1325 zfree(&ptq->last_branch); 1326 zfree(&ptq->chain); 1327 free(ptq); 1328 return NULL; 1329 } 1330 1331 static void intel_pt_free_queue(void *priv) 1332 { 1333 struct intel_pt_queue *ptq = priv; 1334 1335 if (!ptq) 1336 return; 1337 thread__zput(ptq->thread); 1338 thread__zput(ptq->unknown_guest_thread); 1339 intel_pt_decoder_free(ptq->decoder); 1340 zfree(&ptq->event_buf); 1341 zfree(&ptq->last_branch); 1342 zfree(&ptq->chain); 1343 free(ptq); 1344 } 1345 1346 static void intel_pt_first_timestamp(struct intel_pt *pt, u64 timestamp) 1347 { 1348 unsigned int i; 1349 1350 pt->first_timestamp = timestamp; 1351 1352 for (i = 0; i < pt->queues.nr_queues; i++) { 1353 struct auxtrace_queue *queue = &pt->queues.queue_array[i]; 1354 struct intel_pt_queue *ptq = queue->priv; 1355 1356 if (ptq && ptq->decoder) 1357 intel_pt_set_first_timestamp(ptq->decoder, timestamp); 1358 } 1359 } 1360 1361 static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt, 1362 struct auxtrace_queue *queue) 1363 { 1364 struct intel_pt_queue *ptq = queue->priv; 1365 1366 if (queue->tid == -1 || pt->have_sched_switch) { 1367 ptq->tid = machine__get_current_tid(pt->machine, ptq->cpu); 1368 if (ptq->tid == -1) 1369 ptq->pid = -1; 1370 thread__zput(ptq->thread); 1371 } 1372 1373 if (!ptq->thread && ptq->tid != -1) 1374 ptq->thread = machine__find_thread(pt->machine, -1, ptq->tid); 1375 1376 if (ptq->thread) { 1377 ptq->pid = ptq->thread->pid_; 1378 if (queue->cpu == -1) 1379 ptq->cpu = ptq->thread->cpu; 1380 } 1381 } 1382 1383 static void intel_pt_sample_flags(struct intel_pt_queue *ptq) 1384 { 1385 struct intel_pt *pt = ptq->pt; 1386 1387 ptq->insn_len = 0; 1388 if (ptq->state->flags & INTEL_PT_ABORT_TX) { 1389 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT; 1390 } else if (ptq->state->flags & INTEL_PT_ASYNC) { 1391 if (!ptq->state->to_ip) 1392 ptq->flags = PERF_IP_FLAG_BRANCH | 1393 PERF_IP_FLAG_TRACE_END; 1394 else if (ptq->state->from_nr && !ptq->state->to_nr) 1395 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | 1396 PERF_IP_FLAG_VMEXIT; 1397 else 1398 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | 1399 PERF_IP_FLAG_ASYNC | 1400 PERF_IP_FLAG_INTERRUPT; 1401 } else { 1402 if (ptq->state->from_ip) 1403 ptq->flags = intel_pt_insn_type(ptq->state->insn_op); 1404 else 1405 ptq->flags = PERF_IP_FLAG_BRANCH | 1406 PERF_IP_FLAG_TRACE_BEGIN; 1407 if (ptq->state->flags & INTEL_PT_IN_TX) 1408 ptq->flags |= PERF_IP_FLAG_IN_TX; 1409 ptq->insn_len = ptq->state->insn_len; 1410 memcpy(ptq->insn, ptq->state->insn, INTEL_PT_INSN_BUF_SZ); 1411 } 1412 1413 if (ptq->state->type & INTEL_PT_TRACE_BEGIN) 1414 ptq->flags |= PERF_IP_FLAG_TRACE_BEGIN; 1415 if (ptq->state->type & INTEL_PT_TRACE_END) 1416 ptq->flags |= PERF_IP_FLAG_TRACE_END; 1417 1418 if (pt->cap_event_trace) { 1419 if (ptq->state->type & INTEL_PT_IFLAG_CHG) { 1420 if (!ptq->state->from_iflag) 1421 ptq->flags |= PERF_IP_FLAG_INTR_DISABLE; 1422 if (ptq->state->from_iflag != ptq->state->to_iflag) 1423 ptq->flags |= PERF_IP_FLAG_INTR_TOGGLE; 1424 } else if (!ptq->state->to_iflag) { 1425 ptq->flags |= PERF_IP_FLAG_INTR_DISABLE; 1426 } 1427 } 1428 } 1429 1430 static void intel_pt_setup_time_range(struct intel_pt *pt, 1431 struct intel_pt_queue *ptq) 1432 { 1433 if (!pt->range_cnt) 1434 return; 1435 1436 ptq->sel_timestamp = pt->time_ranges[0].start; 1437 ptq->sel_idx = 0; 1438 1439 if (ptq->sel_timestamp) { 1440 ptq->sel_start = true; 1441 } else { 1442 ptq->sel_timestamp = pt->time_ranges[0].end; 1443 ptq->sel_start = false; 1444 } 1445 } 1446 1447 static int intel_pt_setup_queue(struct intel_pt *pt, 1448 struct auxtrace_queue *queue, 1449 unsigned int queue_nr) 1450 { 1451 struct intel_pt_queue *ptq = queue->priv; 1452 1453 if (list_empty(&queue->head)) 1454 return 0; 1455 1456 if (!ptq) { 1457 ptq = intel_pt_alloc_queue(pt, queue_nr); 1458 if (!ptq) 1459 return -ENOMEM; 1460 queue->priv = ptq; 1461 1462 if (queue->cpu != -1) 1463 ptq->cpu = queue->cpu; 1464 ptq->tid = queue->tid; 1465 1466 ptq->cbr_seen = UINT_MAX; 1467 1468 if (pt->sampling_mode && !pt->snapshot_mode && 1469 pt->timeless_decoding) 1470 ptq->step_through_buffers = true; 1471 1472 ptq->sync_switch = pt->sync_switch; 1473 1474 intel_pt_setup_time_range(pt, ptq); 1475 } 1476 1477 if (!ptq->on_heap && 1478 (!ptq->sync_switch || 1479 ptq->switch_state != INTEL_PT_SS_EXPECTING_SWITCH_EVENT)) { 1480 const struct intel_pt_state *state; 1481 int ret; 1482 1483 if (pt->timeless_decoding) 1484 return 0; 1485 1486 intel_pt_log("queue %u getting timestamp\n", queue_nr); 1487 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n", 1488 queue_nr, ptq->cpu, ptq->pid, ptq->tid); 1489 1490 if (ptq->sel_start && ptq->sel_timestamp) { 1491 ret = intel_pt_fast_forward(ptq->decoder, 1492 ptq->sel_timestamp); 1493 if (ret) 1494 return ret; 1495 } 1496 1497 while (1) { 1498 state = intel_pt_decode(ptq->decoder); 1499 if (state->err) { 1500 if (state->err == INTEL_PT_ERR_NODATA) { 1501 intel_pt_log("queue %u has no timestamp\n", 1502 queue_nr); 1503 return 0; 1504 } 1505 continue; 1506 } 1507 if (state->timestamp) 1508 break; 1509 } 1510 1511 ptq->timestamp = state->timestamp; 1512 intel_pt_log("queue %u timestamp 0x%" PRIx64 "\n", 1513 queue_nr, ptq->timestamp); 1514 ptq->state = state; 1515 ptq->have_sample = true; 1516 if (ptq->sel_start && ptq->sel_timestamp && 1517 ptq->timestamp < ptq->sel_timestamp) 1518 ptq->have_sample = false; 1519 intel_pt_sample_flags(ptq); 1520 ret = auxtrace_heap__add(&pt->heap, queue_nr, ptq->timestamp); 1521 if (ret) 1522 return ret; 1523 ptq->on_heap = true; 1524 } 1525 1526 return 0; 1527 } 1528 1529 static int intel_pt_setup_queues(struct intel_pt *pt) 1530 { 1531 unsigned int i; 1532 int ret; 1533 1534 for (i = 0; i < pt->queues.nr_queues; i++) { 1535 ret = intel_pt_setup_queue(pt, &pt->queues.queue_array[i], i); 1536 if (ret) 1537 return ret; 1538 } 1539 return 0; 1540 } 1541 1542 static inline bool intel_pt_skip_event(struct intel_pt *pt) 1543 { 1544 return pt->synth_opts.initial_skip && 1545 pt->num_events++ < pt->synth_opts.initial_skip; 1546 } 1547 1548 /* 1549 * Cannot count CBR as skipped because it won't go away until cbr == cbr_seen. 1550 * Also ensure CBR is first non-skipped event by allowing for 4 more samples 1551 * from this decoder state. 1552 */ 1553 static inline bool intel_pt_skip_cbr_event(struct intel_pt *pt) 1554 { 1555 return pt->synth_opts.initial_skip && 1556 pt->num_events + 4 < pt->synth_opts.initial_skip; 1557 } 1558 1559 static void intel_pt_prep_a_sample(struct intel_pt_queue *ptq, 1560 union perf_event *event, 1561 struct perf_sample *sample) 1562 { 1563 event->sample.header.type = PERF_RECORD_SAMPLE; 1564 event->sample.header.size = sizeof(struct perf_event_header); 1565 1566 sample->pid = ptq->pid; 1567 sample->tid = ptq->tid; 1568 sample->cpu = ptq->cpu; 1569 sample->insn_len = ptq->insn_len; 1570 memcpy(sample->insn, ptq->insn, INTEL_PT_INSN_BUF_SZ); 1571 } 1572 1573 static void intel_pt_prep_b_sample(struct intel_pt *pt, 1574 struct intel_pt_queue *ptq, 1575 union perf_event *event, 1576 struct perf_sample *sample) 1577 { 1578 intel_pt_prep_a_sample(ptq, event, sample); 1579 1580 if (!pt->timeless_decoding) 1581 sample->time = tsc_to_perf_time(ptq->timestamp, &pt->tc); 1582 1583 sample->ip = ptq->state->from_ip; 1584 sample->addr = ptq->state->to_ip; 1585 sample->cpumode = intel_pt_cpumode(ptq, sample->ip, sample->addr); 1586 sample->period = 1; 1587 sample->flags = ptq->flags; 1588 1589 event->sample.header.misc = sample->cpumode; 1590 } 1591 1592 static int intel_pt_inject_event(union perf_event *event, 1593 struct perf_sample *sample, u64 type) 1594 { 1595 event->header.size = perf_event__sample_event_size(sample, type, 0); 1596 return perf_event__synthesize_sample(event, type, 0, sample); 1597 } 1598 1599 static inline int intel_pt_opt_inject(struct intel_pt *pt, 1600 union perf_event *event, 1601 struct perf_sample *sample, u64 type) 1602 { 1603 if (!pt->synth_opts.inject) 1604 return 0; 1605 1606 return intel_pt_inject_event(event, sample, type); 1607 } 1608 1609 static int intel_pt_deliver_synth_event(struct intel_pt *pt, 1610 union perf_event *event, 1611 struct perf_sample *sample, u64 type) 1612 { 1613 int ret; 1614 1615 ret = intel_pt_opt_inject(pt, event, sample, type); 1616 if (ret) 1617 return ret; 1618 1619 ret = perf_session__deliver_synth_event(pt->session, event, sample); 1620 if (ret) 1621 pr_err("Intel PT: failed to deliver event, error %d\n", ret); 1622 1623 return ret; 1624 } 1625 1626 static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq) 1627 { 1628 struct intel_pt *pt = ptq->pt; 1629 union perf_event *event = ptq->event_buf; 1630 struct perf_sample sample = { .ip = 0, }; 1631 struct dummy_branch_stack { 1632 u64 nr; 1633 u64 hw_idx; 1634 struct branch_entry entries; 1635 } dummy_bs; 1636 1637 if (pt->branches_filter && !(pt->branches_filter & ptq->flags)) 1638 return 0; 1639 1640 if (intel_pt_skip_event(pt)) 1641 return 0; 1642 1643 intel_pt_prep_b_sample(pt, ptq, event, &sample); 1644 1645 sample.id = ptq->pt->branches_id; 1646 sample.stream_id = ptq->pt->branches_id; 1647 1648 /* 1649 * perf report cannot handle events without a branch stack when using 1650 * SORT_MODE__BRANCH so make a dummy one. 1651 */ 1652 if (pt->synth_opts.last_branch && sort__mode == SORT_MODE__BRANCH) { 1653 dummy_bs = (struct dummy_branch_stack){ 1654 .nr = 1, 1655 .hw_idx = -1ULL, 1656 .entries = { 1657 .from = sample.ip, 1658 .to = sample.addr, 1659 }, 1660 }; 1661 sample.branch_stack = (struct branch_stack *)&dummy_bs; 1662 } 1663 1664 if (ptq->sample_ipc) 1665 sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_br_cyc_cnt; 1666 if (sample.cyc_cnt) { 1667 sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_br_insn_cnt; 1668 ptq->last_br_insn_cnt = ptq->ipc_insn_cnt; 1669 ptq->last_br_cyc_cnt = ptq->ipc_cyc_cnt; 1670 } 1671 1672 return intel_pt_deliver_synth_event(pt, event, &sample, 1673 pt->branches_sample_type); 1674 } 1675 1676 static void intel_pt_prep_sample(struct intel_pt *pt, 1677 struct intel_pt_queue *ptq, 1678 union perf_event *event, 1679 struct perf_sample *sample) 1680 { 1681 intel_pt_prep_b_sample(pt, ptq, event, sample); 1682 1683 if (pt->synth_opts.callchain) { 1684 thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain, 1685 pt->synth_opts.callchain_sz + 1, 1686 sample->ip, pt->kernel_start); 1687 sample->callchain = ptq->chain; 1688 } 1689 1690 if (pt->synth_opts.last_branch) { 1691 thread_stack__br_sample(ptq->thread, ptq->cpu, ptq->last_branch, 1692 pt->br_stack_sz); 1693 sample->branch_stack = ptq->last_branch; 1694 } 1695 } 1696 1697 static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq) 1698 { 1699 struct intel_pt *pt = ptq->pt; 1700 union perf_event *event = ptq->event_buf; 1701 struct perf_sample sample = { .ip = 0, }; 1702 1703 if (intel_pt_skip_event(pt)) 1704 return 0; 1705 1706 intel_pt_prep_sample(pt, ptq, event, &sample); 1707 1708 sample.id = ptq->pt->instructions_id; 1709 sample.stream_id = ptq->pt->instructions_id; 1710 if (pt->synth_opts.quick) 1711 sample.period = 1; 1712 else 1713 sample.period = ptq->state->tot_insn_cnt - ptq->last_insn_cnt; 1714 1715 if (ptq->sample_ipc) 1716 sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_in_cyc_cnt; 1717 if (sample.cyc_cnt) { 1718 sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_in_insn_cnt; 1719 ptq->last_in_insn_cnt = ptq->ipc_insn_cnt; 1720 ptq->last_in_cyc_cnt = ptq->ipc_cyc_cnt; 1721 } 1722 1723 ptq->last_insn_cnt = ptq->state->tot_insn_cnt; 1724 1725 return intel_pt_deliver_synth_event(pt, event, &sample, 1726 pt->instructions_sample_type); 1727 } 1728 1729 static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq) 1730 { 1731 struct intel_pt *pt = ptq->pt; 1732 union perf_event *event = ptq->event_buf; 1733 struct perf_sample sample = { .ip = 0, }; 1734 1735 if (intel_pt_skip_event(pt)) 1736 return 0; 1737 1738 intel_pt_prep_sample(pt, ptq, event, &sample); 1739 1740 sample.id = ptq->pt->transactions_id; 1741 sample.stream_id = ptq->pt->transactions_id; 1742 1743 return intel_pt_deliver_synth_event(pt, event, &sample, 1744 pt->transactions_sample_type); 1745 } 1746 1747 static void intel_pt_prep_p_sample(struct intel_pt *pt, 1748 struct intel_pt_queue *ptq, 1749 union perf_event *event, 1750 struct perf_sample *sample) 1751 { 1752 intel_pt_prep_sample(pt, ptq, event, sample); 1753 1754 /* 1755 * Zero IP is used to mean "trace start" but that is not the case for 1756 * power or PTWRITE events with no IP, so clear the flags. 1757 */ 1758 if (!sample->ip) 1759 sample->flags = 0; 1760 } 1761 1762 static int intel_pt_synth_ptwrite_sample(struct intel_pt_queue *ptq) 1763 { 1764 struct intel_pt *pt = ptq->pt; 1765 union perf_event *event = ptq->event_buf; 1766 struct perf_sample sample = { .ip = 0, }; 1767 struct perf_synth_intel_ptwrite raw; 1768 1769 if (intel_pt_skip_event(pt)) 1770 return 0; 1771 1772 intel_pt_prep_p_sample(pt, ptq, event, &sample); 1773 1774 sample.id = ptq->pt->ptwrites_id; 1775 sample.stream_id = ptq->pt->ptwrites_id; 1776 1777 raw.flags = 0; 1778 raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP); 1779 raw.payload = cpu_to_le64(ptq->state->ptw_payload); 1780 1781 sample.raw_size = perf_synth__raw_size(raw); 1782 sample.raw_data = perf_synth__raw_data(&raw); 1783 1784 return intel_pt_deliver_synth_event(pt, event, &sample, 1785 pt->ptwrites_sample_type); 1786 } 1787 1788 static int intel_pt_synth_cbr_sample(struct intel_pt_queue *ptq) 1789 { 1790 struct intel_pt *pt = ptq->pt; 1791 union perf_event *event = ptq->event_buf; 1792 struct perf_sample sample = { .ip = 0, }; 1793 struct perf_synth_intel_cbr raw; 1794 u32 flags; 1795 1796 if (intel_pt_skip_cbr_event(pt)) 1797 return 0; 1798 1799 ptq->cbr_seen = ptq->state->cbr; 1800 1801 intel_pt_prep_p_sample(pt, ptq, event, &sample); 1802 1803 sample.id = ptq->pt->cbr_id; 1804 sample.stream_id = ptq->pt->cbr_id; 1805 1806 flags = (u16)ptq->state->cbr_payload | (pt->max_non_turbo_ratio << 16); 1807 raw.flags = cpu_to_le32(flags); 1808 raw.freq = cpu_to_le32(raw.cbr * pt->cbr2khz); 1809 raw.reserved3 = 0; 1810 1811 sample.raw_size = perf_synth__raw_size(raw); 1812 sample.raw_data = perf_synth__raw_data(&raw); 1813 1814 return intel_pt_deliver_synth_event(pt, event, &sample, 1815 pt->pwr_events_sample_type); 1816 } 1817 1818 static int intel_pt_synth_psb_sample(struct intel_pt_queue *ptq) 1819 { 1820 struct intel_pt *pt = ptq->pt; 1821 union perf_event *event = ptq->event_buf; 1822 struct perf_sample sample = { .ip = 0, }; 1823 struct perf_synth_intel_psb raw; 1824 1825 if (intel_pt_skip_event(pt)) 1826 return 0; 1827 1828 intel_pt_prep_p_sample(pt, ptq, event, &sample); 1829 1830 sample.id = ptq->pt->psb_id; 1831 sample.stream_id = ptq->pt->psb_id; 1832 sample.flags = 0; 1833 1834 raw.reserved = 0; 1835 raw.offset = ptq->state->psb_offset; 1836 1837 sample.raw_size = perf_synth__raw_size(raw); 1838 sample.raw_data = perf_synth__raw_data(&raw); 1839 1840 return intel_pt_deliver_synth_event(pt, event, &sample, 1841 pt->pwr_events_sample_type); 1842 } 1843 1844 static int intel_pt_synth_mwait_sample(struct intel_pt_queue *ptq) 1845 { 1846 struct intel_pt *pt = ptq->pt; 1847 union perf_event *event = ptq->event_buf; 1848 struct perf_sample sample = { .ip = 0, }; 1849 struct perf_synth_intel_mwait raw; 1850 1851 if (intel_pt_skip_event(pt)) 1852 return 0; 1853 1854 intel_pt_prep_p_sample(pt, ptq, event, &sample); 1855 1856 sample.id = ptq->pt->mwait_id; 1857 sample.stream_id = ptq->pt->mwait_id; 1858 1859 raw.reserved = 0; 1860 raw.payload = cpu_to_le64(ptq->state->mwait_payload); 1861 1862 sample.raw_size = perf_synth__raw_size(raw); 1863 sample.raw_data = perf_synth__raw_data(&raw); 1864 1865 return intel_pt_deliver_synth_event(pt, event, &sample, 1866 pt->pwr_events_sample_type); 1867 } 1868 1869 static int intel_pt_synth_pwre_sample(struct intel_pt_queue *ptq) 1870 { 1871 struct intel_pt *pt = ptq->pt; 1872 union perf_event *event = ptq->event_buf; 1873 struct perf_sample sample = { .ip = 0, }; 1874 struct perf_synth_intel_pwre raw; 1875 1876 if (intel_pt_skip_event(pt)) 1877 return 0; 1878 1879 intel_pt_prep_p_sample(pt, ptq, event, &sample); 1880 1881 sample.id = ptq->pt->pwre_id; 1882 sample.stream_id = ptq->pt->pwre_id; 1883 1884 raw.reserved = 0; 1885 raw.payload = cpu_to_le64(ptq->state->pwre_payload); 1886 1887 sample.raw_size = perf_synth__raw_size(raw); 1888 sample.raw_data = perf_synth__raw_data(&raw); 1889 1890 return intel_pt_deliver_synth_event(pt, event, &sample, 1891 pt->pwr_events_sample_type); 1892 } 1893 1894 static int intel_pt_synth_exstop_sample(struct intel_pt_queue *ptq) 1895 { 1896 struct intel_pt *pt = ptq->pt; 1897 union perf_event *event = ptq->event_buf; 1898 struct perf_sample sample = { .ip = 0, }; 1899 struct perf_synth_intel_exstop raw; 1900 1901 if (intel_pt_skip_event(pt)) 1902 return 0; 1903 1904 intel_pt_prep_p_sample(pt, ptq, event, &sample); 1905 1906 sample.id = ptq->pt->exstop_id; 1907 sample.stream_id = ptq->pt->exstop_id; 1908 1909 raw.flags = 0; 1910 raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP); 1911 1912 sample.raw_size = perf_synth__raw_size(raw); 1913 sample.raw_data = perf_synth__raw_data(&raw); 1914 1915 return intel_pt_deliver_synth_event(pt, event, &sample, 1916 pt->pwr_events_sample_type); 1917 } 1918 1919 static int intel_pt_synth_pwrx_sample(struct intel_pt_queue *ptq) 1920 { 1921 struct intel_pt *pt = ptq->pt; 1922 union perf_event *event = ptq->event_buf; 1923 struct perf_sample sample = { .ip = 0, }; 1924 struct perf_synth_intel_pwrx raw; 1925 1926 if (intel_pt_skip_event(pt)) 1927 return 0; 1928 1929 intel_pt_prep_p_sample(pt, ptq, event, &sample); 1930 1931 sample.id = ptq->pt->pwrx_id; 1932 sample.stream_id = ptq->pt->pwrx_id; 1933 1934 raw.reserved = 0; 1935 raw.payload = cpu_to_le64(ptq->state->pwrx_payload); 1936 1937 sample.raw_size = perf_synth__raw_size(raw); 1938 sample.raw_data = perf_synth__raw_data(&raw); 1939 1940 return intel_pt_deliver_synth_event(pt, event, &sample, 1941 pt->pwr_events_sample_type); 1942 } 1943 1944 /* 1945 * PEBS gp_regs array indexes plus 1 so that 0 means not present. Refer 1946 * intel_pt_add_gp_regs(). 1947 */ 1948 static const int pebs_gp_regs[] = { 1949 [PERF_REG_X86_FLAGS] = 1, 1950 [PERF_REG_X86_IP] = 2, 1951 [PERF_REG_X86_AX] = 3, 1952 [PERF_REG_X86_CX] = 4, 1953 [PERF_REG_X86_DX] = 5, 1954 [PERF_REG_X86_BX] = 6, 1955 [PERF_REG_X86_SP] = 7, 1956 [PERF_REG_X86_BP] = 8, 1957 [PERF_REG_X86_SI] = 9, 1958 [PERF_REG_X86_DI] = 10, 1959 [PERF_REG_X86_R8] = 11, 1960 [PERF_REG_X86_R9] = 12, 1961 [PERF_REG_X86_R10] = 13, 1962 [PERF_REG_X86_R11] = 14, 1963 [PERF_REG_X86_R12] = 15, 1964 [PERF_REG_X86_R13] = 16, 1965 [PERF_REG_X86_R14] = 17, 1966 [PERF_REG_X86_R15] = 18, 1967 }; 1968 1969 static u64 *intel_pt_add_gp_regs(struct regs_dump *intr_regs, u64 *pos, 1970 const struct intel_pt_blk_items *items, 1971 u64 regs_mask) 1972 { 1973 const u64 *gp_regs = items->val[INTEL_PT_GP_REGS_POS]; 1974 u32 mask = items->mask[INTEL_PT_GP_REGS_POS]; 1975 u32 bit; 1976 int i; 1977 1978 for (i = 0, bit = 1; i < PERF_REG_X86_64_MAX; i++, bit <<= 1) { 1979 /* Get the PEBS gp_regs array index */ 1980 int n = pebs_gp_regs[i] - 1; 1981 1982 if (n < 0) 1983 continue; 1984 /* 1985 * Add only registers that were requested (i.e. 'regs_mask') and 1986 * that were provided (i.e. 'mask'), and update the resulting 1987 * mask (i.e. 'intr_regs->mask') accordingly. 1988 */ 1989 if (mask & 1 << n && regs_mask & bit) { 1990 intr_regs->mask |= bit; 1991 *pos++ = gp_regs[n]; 1992 } 1993 } 1994 1995 return pos; 1996 } 1997 1998 #ifndef PERF_REG_X86_XMM0 1999 #define PERF_REG_X86_XMM0 32 2000 #endif 2001 2002 static void intel_pt_add_xmm(struct regs_dump *intr_regs, u64 *pos, 2003 const struct intel_pt_blk_items *items, 2004 u64 regs_mask) 2005 { 2006 u32 mask = items->has_xmm & (regs_mask >> PERF_REG_X86_XMM0); 2007 const u64 *xmm = items->xmm; 2008 2009 /* 2010 * If there are any XMM registers, then there should be all of them. 2011 * Nevertheless, follow the logic to add only registers that were 2012 * requested (i.e. 'regs_mask') and that were provided (i.e. 'mask'), 2013 * and update the resulting mask (i.e. 'intr_regs->mask') accordingly. 2014 */ 2015 intr_regs->mask |= (u64)mask << PERF_REG_X86_XMM0; 2016 2017 for (; mask; mask >>= 1, xmm++) { 2018 if (mask & 1) 2019 *pos++ = *xmm; 2020 } 2021 } 2022 2023 #define LBR_INFO_MISPRED (1ULL << 63) 2024 #define LBR_INFO_IN_TX (1ULL << 62) 2025 #define LBR_INFO_ABORT (1ULL << 61) 2026 #define LBR_INFO_CYCLES 0xffff 2027 2028 /* Refer kernel's intel_pmu_store_pebs_lbrs() */ 2029 static u64 intel_pt_lbr_flags(u64 info) 2030 { 2031 union { 2032 struct branch_flags flags; 2033 u64 result; 2034 } u; 2035 2036 u.result = 0; 2037 u.flags.mispred = !!(info & LBR_INFO_MISPRED); 2038 u.flags.predicted = !(info & LBR_INFO_MISPRED); 2039 u.flags.in_tx = !!(info & LBR_INFO_IN_TX); 2040 u.flags.abort = !!(info & LBR_INFO_ABORT); 2041 u.flags.cycles = info & LBR_INFO_CYCLES; 2042 2043 return u.result; 2044 } 2045 2046 static void intel_pt_add_lbrs(struct branch_stack *br_stack, 2047 const struct intel_pt_blk_items *items) 2048 { 2049 u64 *to; 2050 int i; 2051 2052 br_stack->nr = 0; 2053 2054 to = &br_stack->entries[0].from; 2055 2056 for (i = INTEL_PT_LBR_0_POS; i <= INTEL_PT_LBR_2_POS; i++) { 2057 u32 mask = items->mask[i]; 2058 const u64 *from = items->val[i]; 2059 2060 for (; mask; mask >>= 3, from += 3) { 2061 if ((mask & 7) == 7) { 2062 *to++ = from[0]; 2063 *to++ = from[1]; 2064 *to++ = intel_pt_lbr_flags(from[2]); 2065 br_stack->nr += 1; 2066 } 2067 } 2068 } 2069 } 2070 2071 static int intel_pt_do_synth_pebs_sample(struct intel_pt_queue *ptq, struct evsel *evsel, u64 id) 2072 { 2073 const struct intel_pt_blk_items *items = &ptq->state->items; 2074 struct perf_sample sample = { .ip = 0, }; 2075 union perf_event *event = ptq->event_buf; 2076 struct intel_pt *pt = ptq->pt; 2077 u64 sample_type = evsel->core.attr.sample_type; 2078 u8 cpumode; 2079 u64 regs[8 * sizeof(sample.intr_regs.mask)]; 2080 2081 if (intel_pt_skip_event(pt)) 2082 return 0; 2083 2084 intel_pt_prep_a_sample(ptq, event, &sample); 2085 2086 sample.id = id; 2087 sample.stream_id = id; 2088 2089 if (!evsel->core.attr.freq) 2090 sample.period = evsel->core.attr.sample_period; 2091 2092 /* No support for non-zero CS base */ 2093 if (items->has_ip) 2094 sample.ip = items->ip; 2095 else if (items->has_rip) 2096 sample.ip = items->rip; 2097 else 2098 sample.ip = ptq->state->from_ip; 2099 2100 cpumode = intel_pt_cpumode(ptq, sample.ip, 0); 2101 2102 event->sample.header.misc = cpumode | PERF_RECORD_MISC_EXACT_IP; 2103 2104 sample.cpumode = cpumode; 2105 2106 if (sample_type & PERF_SAMPLE_TIME) { 2107 u64 timestamp = 0; 2108 2109 if (items->has_timestamp) 2110 timestamp = items->timestamp; 2111 else if (!pt->timeless_decoding) 2112 timestamp = ptq->timestamp; 2113 if (timestamp) 2114 sample.time = tsc_to_perf_time(timestamp, &pt->tc); 2115 } 2116 2117 if (sample_type & PERF_SAMPLE_CALLCHAIN && 2118 pt->synth_opts.callchain) { 2119 thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain, 2120 pt->synth_opts.callchain_sz, sample.ip, 2121 pt->kernel_start); 2122 sample.callchain = ptq->chain; 2123 } 2124 2125 if (sample_type & PERF_SAMPLE_REGS_INTR && 2126 (items->mask[INTEL_PT_GP_REGS_POS] || 2127 items->mask[INTEL_PT_XMM_POS])) { 2128 u64 regs_mask = evsel->core.attr.sample_regs_intr; 2129 u64 *pos; 2130 2131 sample.intr_regs.abi = items->is_32_bit ? 2132 PERF_SAMPLE_REGS_ABI_32 : 2133 PERF_SAMPLE_REGS_ABI_64; 2134 sample.intr_regs.regs = regs; 2135 2136 pos = intel_pt_add_gp_regs(&sample.intr_regs, regs, items, regs_mask); 2137 2138 intel_pt_add_xmm(&sample.intr_regs, pos, items, regs_mask); 2139 } 2140 2141 if (sample_type & PERF_SAMPLE_BRANCH_STACK) { 2142 if (items->mask[INTEL_PT_LBR_0_POS] || 2143 items->mask[INTEL_PT_LBR_1_POS] || 2144 items->mask[INTEL_PT_LBR_2_POS]) { 2145 intel_pt_add_lbrs(ptq->last_branch, items); 2146 } else if (pt->synth_opts.last_branch) { 2147 thread_stack__br_sample(ptq->thread, ptq->cpu, 2148 ptq->last_branch, 2149 pt->br_stack_sz); 2150 } else { 2151 ptq->last_branch->nr = 0; 2152 } 2153 sample.branch_stack = ptq->last_branch; 2154 } 2155 2156 if (sample_type & PERF_SAMPLE_ADDR && items->has_mem_access_address) 2157 sample.addr = items->mem_access_address; 2158 2159 if (sample_type & PERF_SAMPLE_WEIGHT_TYPE) { 2160 /* 2161 * Refer kernel's setup_pebs_adaptive_sample_data() and 2162 * intel_hsw_weight(). 2163 */ 2164 if (items->has_mem_access_latency) { 2165 u64 weight = items->mem_access_latency >> 32; 2166 2167 /* 2168 * Starts from SPR, the mem access latency field 2169 * contains both cache latency [47:32] and instruction 2170 * latency [15:0]. The cache latency is the same as the 2171 * mem access latency on previous platforms. 2172 * 2173 * In practice, no memory access could last than 4G 2174 * cycles. Use latency >> 32 to distinguish the 2175 * different format of the mem access latency field. 2176 */ 2177 if (weight > 0) { 2178 sample.weight = weight & 0xffff; 2179 sample.ins_lat = items->mem_access_latency & 0xffff; 2180 } else 2181 sample.weight = items->mem_access_latency; 2182 } 2183 if (!sample.weight && items->has_tsx_aux_info) { 2184 /* Cycles last block */ 2185 sample.weight = (u32)items->tsx_aux_info; 2186 } 2187 } 2188 2189 if (sample_type & PERF_SAMPLE_TRANSACTION && items->has_tsx_aux_info) { 2190 u64 ax = items->has_rax ? items->rax : 0; 2191 /* Refer kernel's intel_hsw_transaction() */ 2192 u64 txn = (u8)(items->tsx_aux_info >> 32); 2193 2194 /* For RTM XABORTs also log the abort code from AX */ 2195 if (txn & PERF_TXN_TRANSACTION && ax & 1) 2196 txn |= ((ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT; 2197 sample.transaction = txn; 2198 } 2199 2200 return intel_pt_deliver_synth_event(pt, event, &sample, sample_type); 2201 } 2202 2203 static int intel_pt_synth_single_pebs_sample(struct intel_pt_queue *ptq) 2204 { 2205 struct intel_pt *pt = ptq->pt; 2206 struct evsel *evsel = pt->pebs_evsel; 2207 u64 id = evsel->core.id[0]; 2208 2209 return intel_pt_do_synth_pebs_sample(ptq, evsel, id); 2210 } 2211 2212 static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq) 2213 { 2214 const struct intel_pt_blk_items *items = &ptq->state->items; 2215 struct intel_pt_pebs_event *pe; 2216 struct intel_pt *pt = ptq->pt; 2217 int err = -EINVAL; 2218 int hw_id; 2219 2220 if (!items->has_applicable_counters || !items->applicable_counters) { 2221 if (!pt->single_pebs) 2222 pr_err("PEBS-via-PT record with no applicable_counters\n"); 2223 return intel_pt_synth_single_pebs_sample(ptq); 2224 } 2225 2226 for_each_set_bit(hw_id, (unsigned long *)&items->applicable_counters, INTEL_PT_MAX_PEBS) { 2227 pe = &ptq->pebs[hw_id]; 2228 if (!pe->evsel) { 2229 if (!pt->single_pebs) 2230 pr_err("PEBS-via-PT record with no matching event, hw_id %d\n", 2231 hw_id); 2232 return intel_pt_synth_single_pebs_sample(ptq); 2233 } 2234 err = intel_pt_do_synth_pebs_sample(ptq, pe->evsel, pe->id); 2235 if (err) 2236 return err; 2237 } 2238 2239 return err; 2240 } 2241 2242 static int intel_pt_synth_events_sample(struct intel_pt_queue *ptq) 2243 { 2244 struct intel_pt *pt = ptq->pt; 2245 union perf_event *event = ptq->event_buf; 2246 struct perf_sample sample = { .ip = 0, }; 2247 struct { 2248 struct perf_synth_intel_evt cfe; 2249 struct perf_synth_intel_evd evd[INTEL_PT_MAX_EVDS]; 2250 } raw; 2251 int i; 2252 2253 if (intel_pt_skip_event(pt)) 2254 return 0; 2255 2256 intel_pt_prep_p_sample(pt, ptq, event, &sample); 2257 2258 sample.id = ptq->pt->evt_id; 2259 sample.stream_id = ptq->pt->evt_id; 2260 2261 raw.cfe.type = ptq->state->cfe_type; 2262 raw.cfe.reserved = 0; 2263 raw.cfe.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP); 2264 raw.cfe.vector = ptq->state->cfe_vector; 2265 raw.cfe.evd_cnt = ptq->state->evd_cnt; 2266 2267 for (i = 0; i < ptq->state->evd_cnt; i++) { 2268 raw.evd[i].et = 0; 2269 raw.evd[i].evd_type = ptq->state->evd[i].type; 2270 raw.evd[i].payload = ptq->state->evd[i].payload; 2271 } 2272 2273 sample.raw_size = perf_synth__raw_size(raw) + 2274 ptq->state->evd_cnt * sizeof(struct perf_synth_intel_evd); 2275 sample.raw_data = perf_synth__raw_data(&raw); 2276 2277 return intel_pt_deliver_synth_event(pt, event, &sample, 2278 pt->evt_sample_type); 2279 } 2280 2281 static int intel_pt_synth_iflag_chg_sample(struct intel_pt_queue *ptq) 2282 { 2283 struct intel_pt *pt = ptq->pt; 2284 union perf_event *event = ptq->event_buf; 2285 struct perf_sample sample = { .ip = 0, }; 2286 struct perf_synth_intel_iflag_chg raw; 2287 2288 if (intel_pt_skip_event(pt)) 2289 return 0; 2290 2291 intel_pt_prep_p_sample(pt, ptq, event, &sample); 2292 2293 sample.id = ptq->pt->iflag_chg_id; 2294 sample.stream_id = ptq->pt->iflag_chg_id; 2295 2296 raw.flags = 0; 2297 raw.iflag = ptq->state->to_iflag; 2298 2299 if (ptq->state->type & INTEL_PT_BRANCH) { 2300 raw.via_branch = 1; 2301 raw.branch_ip = ptq->state->to_ip; 2302 } else { 2303 sample.addr = 0; 2304 } 2305 sample.flags = ptq->flags; 2306 2307 sample.raw_size = perf_synth__raw_size(raw); 2308 sample.raw_data = perf_synth__raw_data(&raw); 2309 2310 return intel_pt_deliver_synth_event(pt, event, &sample, 2311 pt->iflag_chg_sample_type); 2312 } 2313 2314 static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu, 2315 pid_t pid, pid_t tid, u64 ip, u64 timestamp) 2316 { 2317 union perf_event event; 2318 char msg[MAX_AUXTRACE_ERROR_MSG]; 2319 int err; 2320 2321 if (pt->synth_opts.error_minus_flags) { 2322 if (code == INTEL_PT_ERR_OVR && 2323 pt->synth_opts.error_minus_flags & AUXTRACE_ERR_FLG_OVERFLOW) 2324 return 0; 2325 if (code == INTEL_PT_ERR_LOST && 2326 pt->synth_opts.error_minus_flags & AUXTRACE_ERR_FLG_DATA_LOST) 2327 return 0; 2328 } 2329 2330 intel_pt__strerror(code, msg, MAX_AUXTRACE_ERROR_MSG); 2331 2332 auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE, 2333 code, cpu, pid, tid, ip, msg, timestamp); 2334 2335 err = perf_session__deliver_synth_event(pt->session, &event, NULL); 2336 if (err) 2337 pr_err("Intel Processor Trace: failed to deliver error event, error %d\n", 2338 err); 2339 2340 return err; 2341 } 2342 2343 static int intel_ptq_synth_error(struct intel_pt_queue *ptq, 2344 const struct intel_pt_state *state) 2345 { 2346 struct intel_pt *pt = ptq->pt; 2347 u64 tm = ptq->timestamp; 2348 2349 tm = pt->timeless_decoding ? 0 : tsc_to_perf_time(tm, &pt->tc); 2350 2351 return intel_pt_synth_error(pt, state->err, ptq->cpu, ptq->pid, 2352 ptq->tid, state->from_ip, tm); 2353 } 2354 2355 static int intel_pt_next_tid(struct intel_pt *pt, struct intel_pt_queue *ptq) 2356 { 2357 struct auxtrace_queue *queue; 2358 pid_t tid = ptq->next_tid; 2359 int err; 2360 2361 if (tid == -1) 2362 return 0; 2363 2364 intel_pt_log("switch: cpu %d tid %d\n", ptq->cpu, tid); 2365 2366 err = machine__set_current_tid(pt->machine, ptq->cpu, -1, tid); 2367 2368 queue = &pt->queues.queue_array[ptq->queue_nr]; 2369 intel_pt_set_pid_tid_cpu(pt, queue); 2370 2371 ptq->next_tid = -1; 2372 2373 return err; 2374 } 2375 2376 static inline bool intel_pt_is_switch_ip(struct intel_pt_queue *ptq, u64 ip) 2377 { 2378 struct intel_pt *pt = ptq->pt; 2379 2380 return ip == pt->switch_ip && 2381 (ptq->flags & PERF_IP_FLAG_BRANCH) && 2382 !(ptq->flags & (PERF_IP_FLAG_CONDITIONAL | PERF_IP_FLAG_ASYNC | 2383 PERF_IP_FLAG_INTERRUPT | PERF_IP_FLAG_TX_ABORT)); 2384 } 2385 2386 #define INTEL_PT_PWR_EVT (INTEL_PT_MWAIT_OP | INTEL_PT_PWR_ENTRY | \ 2387 INTEL_PT_EX_STOP | INTEL_PT_PWR_EXIT) 2388 2389 static int intel_pt_sample(struct intel_pt_queue *ptq) 2390 { 2391 const struct intel_pt_state *state = ptq->state; 2392 struct intel_pt *pt = ptq->pt; 2393 int err; 2394 2395 if (!ptq->have_sample) 2396 return 0; 2397 2398 ptq->have_sample = false; 2399 2400 if (pt->synth_opts.approx_ipc) { 2401 ptq->ipc_insn_cnt = ptq->state->tot_insn_cnt; 2402 ptq->ipc_cyc_cnt = ptq->state->cycles; 2403 ptq->sample_ipc = true; 2404 } else { 2405 ptq->ipc_insn_cnt = ptq->state->tot_insn_cnt; 2406 ptq->ipc_cyc_cnt = ptq->state->tot_cyc_cnt; 2407 ptq->sample_ipc = ptq->state->flags & INTEL_PT_SAMPLE_IPC; 2408 } 2409 2410 /* 2411 * Do PEBS first to allow for the possibility that the PEBS timestamp 2412 * precedes the current timestamp. 2413 */ 2414 if (pt->sample_pebs && state->type & INTEL_PT_BLK_ITEMS) { 2415 err = intel_pt_synth_pebs_sample(ptq); 2416 if (err) 2417 return err; 2418 } 2419 2420 if (pt->synth_opts.intr_events) { 2421 if (state->type & INTEL_PT_EVT) { 2422 err = intel_pt_synth_events_sample(ptq); 2423 if (err) 2424 return err; 2425 } 2426 if (state->type & INTEL_PT_IFLAG_CHG) { 2427 err = intel_pt_synth_iflag_chg_sample(ptq); 2428 if (err) 2429 return err; 2430 } 2431 } 2432 2433 if (pt->sample_pwr_events) { 2434 if (state->type & INTEL_PT_PSB_EVT) { 2435 err = intel_pt_synth_psb_sample(ptq); 2436 if (err) 2437 return err; 2438 } 2439 if (ptq->state->cbr != ptq->cbr_seen) { 2440 err = intel_pt_synth_cbr_sample(ptq); 2441 if (err) 2442 return err; 2443 } 2444 if (state->type & INTEL_PT_PWR_EVT) { 2445 if (state->type & INTEL_PT_MWAIT_OP) { 2446 err = intel_pt_synth_mwait_sample(ptq); 2447 if (err) 2448 return err; 2449 } 2450 if (state->type & INTEL_PT_PWR_ENTRY) { 2451 err = intel_pt_synth_pwre_sample(ptq); 2452 if (err) 2453 return err; 2454 } 2455 if (state->type & INTEL_PT_EX_STOP) { 2456 err = intel_pt_synth_exstop_sample(ptq); 2457 if (err) 2458 return err; 2459 } 2460 if (state->type & INTEL_PT_PWR_EXIT) { 2461 err = intel_pt_synth_pwrx_sample(ptq); 2462 if (err) 2463 return err; 2464 } 2465 } 2466 } 2467 2468 if (pt->sample_instructions && (state->type & INTEL_PT_INSTRUCTION)) { 2469 err = intel_pt_synth_instruction_sample(ptq); 2470 if (err) 2471 return err; 2472 } 2473 2474 if (pt->sample_transactions && (state->type & INTEL_PT_TRANSACTION)) { 2475 err = intel_pt_synth_transaction_sample(ptq); 2476 if (err) 2477 return err; 2478 } 2479 2480 if (pt->sample_ptwrites && (state->type & INTEL_PT_PTW)) { 2481 err = intel_pt_synth_ptwrite_sample(ptq); 2482 if (err) 2483 return err; 2484 } 2485 2486 if (!(state->type & INTEL_PT_BRANCH)) 2487 return 0; 2488 2489 if (pt->use_thread_stack) { 2490 thread_stack__event(ptq->thread, ptq->cpu, ptq->flags, 2491 state->from_ip, state->to_ip, ptq->insn_len, 2492 state->trace_nr, pt->callstack, 2493 pt->br_stack_sz_plus, 2494 pt->mispred_all); 2495 } else { 2496 thread_stack__set_trace_nr(ptq->thread, ptq->cpu, state->trace_nr); 2497 } 2498 2499 if (pt->sample_branches) { 2500 if (state->from_nr != state->to_nr && 2501 state->from_ip && state->to_ip) { 2502 struct intel_pt_state *st = (struct intel_pt_state *)state; 2503 u64 to_ip = st->to_ip; 2504 u64 from_ip = st->from_ip; 2505 2506 /* 2507 * perf cannot handle having different machines for ip 2508 * and addr, so create 2 branches. 2509 */ 2510 st->to_ip = 0; 2511 err = intel_pt_synth_branch_sample(ptq); 2512 if (err) 2513 return err; 2514 st->from_ip = 0; 2515 st->to_ip = to_ip; 2516 err = intel_pt_synth_branch_sample(ptq); 2517 st->from_ip = from_ip; 2518 } else { 2519 err = intel_pt_synth_branch_sample(ptq); 2520 } 2521 if (err) 2522 return err; 2523 } 2524 2525 if (!ptq->sync_switch) 2526 return 0; 2527 2528 if (intel_pt_is_switch_ip(ptq, state->to_ip)) { 2529 switch (ptq->switch_state) { 2530 case INTEL_PT_SS_NOT_TRACING: 2531 case INTEL_PT_SS_UNKNOWN: 2532 case INTEL_PT_SS_EXPECTING_SWITCH_IP: 2533 err = intel_pt_next_tid(pt, ptq); 2534 if (err) 2535 return err; 2536 ptq->switch_state = INTEL_PT_SS_TRACING; 2537 break; 2538 default: 2539 ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_EVENT; 2540 return 1; 2541 } 2542 } else if (!state->to_ip) { 2543 ptq->switch_state = INTEL_PT_SS_NOT_TRACING; 2544 } else if (ptq->switch_state == INTEL_PT_SS_NOT_TRACING) { 2545 ptq->switch_state = INTEL_PT_SS_UNKNOWN; 2546 } else if (ptq->switch_state == INTEL_PT_SS_UNKNOWN && 2547 state->to_ip == pt->ptss_ip && 2548 (ptq->flags & PERF_IP_FLAG_CALL)) { 2549 ptq->switch_state = INTEL_PT_SS_TRACING; 2550 } 2551 2552 return 0; 2553 } 2554 2555 static u64 intel_pt_switch_ip(struct intel_pt *pt, u64 *ptss_ip) 2556 { 2557 struct machine *machine = pt->machine; 2558 struct map *map; 2559 struct symbol *sym, *start; 2560 u64 ip, switch_ip = 0; 2561 const char *ptss; 2562 2563 if (ptss_ip) 2564 *ptss_ip = 0; 2565 2566 map = machine__kernel_map(machine); 2567 if (!map) 2568 return 0; 2569 2570 if (map__load(map)) 2571 return 0; 2572 2573 start = dso__first_symbol(map->dso); 2574 2575 for (sym = start; sym; sym = dso__next_symbol(sym)) { 2576 if (sym->binding == STB_GLOBAL && 2577 !strcmp(sym->name, "__switch_to")) { 2578 ip = map->unmap_ip(map, sym->start); 2579 if (ip >= map->start && ip < map->end) { 2580 switch_ip = ip; 2581 break; 2582 } 2583 } 2584 } 2585 2586 if (!switch_ip || !ptss_ip) 2587 return 0; 2588 2589 if (pt->have_sched_switch == 1) 2590 ptss = "perf_trace_sched_switch"; 2591 else 2592 ptss = "__perf_event_task_sched_out"; 2593 2594 for (sym = start; sym; sym = dso__next_symbol(sym)) { 2595 if (!strcmp(sym->name, ptss)) { 2596 ip = map->unmap_ip(map, sym->start); 2597 if (ip >= map->start && ip < map->end) { 2598 *ptss_ip = ip; 2599 break; 2600 } 2601 } 2602 } 2603 2604 return switch_ip; 2605 } 2606 2607 static void intel_pt_enable_sync_switch(struct intel_pt *pt) 2608 { 2609 unsigned int i; 2610 2611 pt->sync_switch = true; 2612 2613 for (i = 0; i < pt->queues.nr_queues; i++) { 2614 struct auxtrace_queue *queue = &pt->queues.queue_array[i]; 2615 struct intel_pt_queue *ptq = queue->priv; 2616 2617 if (ptq) 2618 ptq->sync_switch = true; 2619 } 2620 } 2621 2622 /* 2623 * To filter against time ranges, it is only necessary to look at the next start 2624 * or end time. 2625 */ 2626 static bool intel_pt_next_time(struct intel_pt_queue *ptq) 2627 { 2628 struct intel_pt *pt = ptq->pt; 2629 2630 if (ptq->sel_start) { 2631 /* Next time is an end time */ 2632 ptq->sel_start = false; 2633 ptq->sel_timestamp = pt->time_ranges[ptq->sel_idx].end; 2634 return true; 2635 } else if (ptq->sel_idx + 1 < pt->range_cnt) { 2636 /* Next time is a start time */ 2637 ptq->sel_start = true; 2638 ptq->sel_idx += 1; 2639 ptq->sel_timestamp = pt->time_ranges[ptq->sel_idx].start; 2640 return true; 2641 } 2642 2643 /* No next time */ 2644 return false; 2645 } 2646 2647 static int intel_pt_time_filter(struct intel_pt_queue *ptq, u64 *ff_timestamp) 2648 { 2649 int err; 2650 2651 while (1) { 2652 if (ptq->sel_start) { 2653 if (ptq->timestamp >= ptq->sel_timestamp) { 2654 /* After start time, so consider next time */ 2655 intel_pt_next_time(ptq); 2656 if (!ptq->sel_timestamp) { 2657 /* No end time */ 2658 return 0; 2659 } 2660 /* Check against end time */ 2661 continue; 2662 } 2663 /* Before start time, so fast forward */ 2664 ptq->have_sample = false; 2665 if (ptq->sel_timestamp > *ff_timestamp) { 2666 if (ptq->sync_switch) { 2667 intel_pt_next_tid(ptq->pt, ptq); 2668 ptq->switch_state = INTEL_PT_SS_UNKNOWN; 2669 } 2670 *ff_timestamp = ptq->sel_timestamp; 2671 err = intel_pt_fast_forward(ptq->decoder, 2672 ptq->sel_timestamp); 2673 if (err) 2674 return err; 2675 } 2676 return 0; 2677 } else if (ptq->timestamp > ptq->sel_timestamp) { 2678 /* After end time, so consider next time */ 2679 if (!intel_pt_next_time(ptq)) { 2680 /* No next time range, so stop decoding */ 2681 ptq->have_sample = false; 2682 ptq->switch_state = INTEL_PT_SS_NOT_TRACING; 2683 return 1; 2684 } 2685 /* Check against next start time */ 2686 continue; 2687 } else { 2688 /* Before end time */ 2689 return 0; 2690 } 2691 } 2692 } 2693 2694 static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp) 2695 { 2696 const struct intel_pt_state *state = ptq->state; 2697 struct intel_pt *pt = ptq->pt; 2698 u64 ff_timestamp = 0; 2699 int err; 2700 2701 if (!pt->kernel_start) { 2702 pt->kernel_start = machine__kernel_start(pt->machine); 2703 if (pt->per_cpu_mmaps && 2704 (pt->have_sched_switch == 1 || pt->have_sched_switch == 3) && 2705 !pt->timeless_decoding && intel_pt_tracing_kernel(pt) && 2706 !pt->sampling_mode && !pt->synth_opts.vm_time_correlation) { 2707 pt->switch_ip = intel_pt_switch_ip(pt, &pt->ptss_ip); 2708 if (pt->switch_ip) { 2709 intel_pt_log("switch_ip: %"PRIx64" ptss_ip: %"PRIx64"\n", 2710 pt->switch_ip, pt->ptss_ip); 2711 intel_pt_enable_sync_switch(pt); 2712 } 2713 } 2714 } 2715 2716 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n", 2717 ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid); 2718 while (1) { 2719 err = intel_pt_sample(ptq); 2720 if (err) 2721 return err; 2722 2723 state = intel_pt_decode(ptq->decoder); 2724 if (state->err) { 2725 if (state->err == INTEL_PT_ERR_NODATA) 2726 return 1; 2727 if (ptq->sync_switch && 2728 state->from_ip >= pt->kernel_start) { 2729 ptq->sync_switch = false; 2730 intel_pt_next_tid(pt, ptq); 2731 } 2732 ptq->timestamp = state->est_timestamp; 2733 if (pt->synth_opts.errors) { 2734 err = intel_ptq_synth_error(ptq, state); 2735 if (err) 2736 return err; 2737 } 2738 continue; 2739 } 2740 2741 ptq->state = state; 2742 ptq->have_sample = true; 2743 intel_pt_sample_flags(ptq); 2744 2745 /* Use estimated TSC upon return to user space */ 2746 if (pt->est_tsc && 2747 (state->from_ip >= pt->kernel_start || !state->from_ip) && 2748 state->to_ip && state->to_ip < pt->kernel_start) { 2749 intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n", 2750 state->timestamp, state->est_timestamp); 2751 ptq->timestamp = state->est_timestamp; 2752 /* Use estimated TSC in unknown switch state */ 2753 } else if (ptq->sync_switch && 2754 ptq->switch_state == INTEL_PT_SS_UNKNOWN && 2755 intel_pt_is_switch_ip(ptq, state->to_ip) && 2756 ptq->next_tid == -1) { 2757 intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n", 2758 state->timestamp, state->est_timestamp); 2759 ptq->timestamp = state->est_timestamp; 2760 } else if (state->timestamp > ptq->timestamp) { 2761 ptq->timestamp = state->timestamp; 2762 } 2763 2764 if (ptq->sel_timestamp) { 2765 err = intel_pt_time_filter(ptq, &ff_timestamp); 2766 if (err) 2767 return err; 2768 } 2769 2770 if (!pt->timeless_decoding && ptq->timestamp >= *timestamp) { 2771 *timestamp = ptq->timestamp; 2772 return 0; 2773 } 2774 } 2775 return 0; 2776 } 2777 2778 static inline int intel_pt_update_queues(struct intel_pt *pt) 2779 { 2780 if (pt->queues.new_data) { 2781 pt->queues.new_data = false; 2782 return intel_pt_setup_queues(pt); 2783 } 2784 return 0; 2785 } 2786 2787 static int intel_pt_process_queues(struct intel_pt *pt, u64 timestamp) 2788 { 2789 unsigned int queue_nr; 2790 u64 ts; 2791 int ret; 2792 2793 while (1) { 2794 struct auxtrace_queue *queue; 2795 struct intel_pt_queue *ptq; 2796 2797 if (!pt->heap.heap_cnt) 2798 return 0; 2799 2800 if (pt->heap.heap_array[0].ordinal >= timestamp) 2801 return 0; 2802 2803 queue_nr = pt->heap.heap_array[0].queue_nr; 2804 queue = &pt->queues.queue_array[queue_nr]; 2805 ptq = queue->priv; 2806 2807 intel_pt_log("queue %u processing 0x%" PRIx64 " to 0x%" PRIx64 "\n", 2808 queue_nr, pt->heap.heap_array[0].ordinal, 2809 timestamp); 2810 2811 auxtrace_heap__pop(&pt->heap); 2812 2813 if (pt->heap.heap_cnt) { 2814 ts = pt->heap.heap_array[0].ordinal + 1; 2815 if (ts > timestamp) 2816 ts = timestamp; 2817 } else { 2818 ts = timestamp; 2819 } 2820 2821 intel_pt_set_pid_tid_cpu(pt, queue); 2822 2823 ret = intel_pt_run_decoder(ptq, &ts); 2824 2825 if (ret < 0) { 2826 auxtrace_heap__add(&pt->heap, queue_nr, ts); 2827 return ret; 2828 } 2829 2830 if (!ret) { 2831 ret = auxtrace_heap__add(&pt->heap, queue_nr, ts); 2832 if (ret < 0) 2833 return ret; 2834 } else { 2835 ptq->on_heap = false; 2836 } 2837 } 2838 2839 return 0; 2840 } 2841 2842 static int intel_pt_process_timeless_queues(struct intel_pt *pt, pid_t tid, 2843 u64 time_) 2844 { 2845 struct auxtrace_queues *queues = &pt->queues; 2846 unsigned int i; 2847 u64 ts = 0; 2848 2849 for (i = 0; i < queues->nr_queues; i++) { 2850 struct auxtrace_queue *queue = &pt->queues.queue_array[i]; 2851 struct intel_pt_queue *ptq = queue->priv; 2852 2853 if (ptq && (tid == -1 || ptq->tid == tid)) { 2854 ptq->time = time_; 2855 intel_pt_set_pid_tid_cpu(pt, queue); 2856 intel_pt_run_decoder(ptq, &ts); 2857 } 2858 } 2859 return 0; 2860 } 2861 2862 static void intel_pt_sample_set_pid_tid_cpu(struct intel_pt_queue *ptq, 2863 struct auxtrace_queue *queue, 2864 struct perf_sample *sample) 2865 { 2866 struct machine *m = ptq->pt->machine; 2867 2868 ptq->pid = sample->pid; 2869 ptq->tid = sample->tid; 2870 ptq->cpu = queue->cpu; 2871 2872 intel_pt_log("queue %u cpu %d pid %d tid %d\n", 2873 ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid); 2874 2875 thread__zput(ptq->thread); 2876 2877 if (ptq->tid == -1) 2878 return; 2879 2880 if (ptq->pid == -1) { 2881 ptq->thread = machine__find_thread(m, -1, ptq->tid); 2882 if (ptq->thread) 2883 ptq->pid = ptq->thread->pid_; 2884 return; 2885 } 2886 2887 ptq->thread = machine__findnew_thread(m, ptq->pid, ptq->tid); 2888 } 2889 2890 static int intel_pt_process_timeless_sample(struct intel_pt *pt, 2891 struct perf_sample *sample) 2892 { 2893 struct auxtrace_queue *queue; 2894 struct intel_pt_queue *ptq; 2895 u64 ts = 0; 2896 2897 queue = auxtrace_queues__sample_queue(&pt->queues, sample, pt->session); 2898 if (!queue) 2899 return -EINVAL; 2900 2901 ptq = queue->priv; 2902 if (!ptq) 2903 return 0; 2904 2905 ptq->stop = false; 2906 ptq->time = sample->time; 2907 intel_pt_sample_set_pid_tid_cpu(ptq, queue, sample); 2908 intel_pt_run_decoder(ptq, &ts); 2909 return 0; 2910 } 2911 2912 static int intel_pt_lost(struct intel_pt *pt, struct perf_sample *sample) 2913 { 2914 return intel_pt_synth_error(pt, INTEL_PT_ERR_LOST, sample->cpu, 2915 sample->pid, sample->tid, 0, sample->time); 2916 } 2917 2918 static struct intel_pt_queue *intel_pt_cpu_to_ptq(struct intel_pt *pt, int cpu) 2919 { 2920 unsigned i, j; 2921 2922 if (cpu < 0 || !pt->queues.nr_queues) 2923 return NULL; 2924 2925 if ((unsigned)cpu >= pt->queues.nr_queues) 2926 i = pt->queues.nr_queues - 1; 2927 else 2928 i = cpu; 2929 2930 if (pt->queues.queue_array[i].cpu == cpu) 2931 return pt->queues.queue_array[i].priv; 2932 2933 for (j = 0; i > 0; j++) { 2934 if (pt->queues.queue_array[--i].cpu == cpu) 2935 return pt->queues.queue_array[i].priv; 2936 } 2937 2938 for (; j < pt->queues.nr_queues; j++) { 2939 if (pt->queues.queue_array[j].cpu == cpu) 2940 return pt->queues.queue_array[j].priv; 2941 } 2942 2943 return NULL; 2944 } 2945 2946 static int intel_pt_sync_switch(struct intel_pt *pt, int cpu, pid_t tid, 2947 u64 timestamp) 2948 { 2949 struct intel_pt_queue *ptq; 2950 int err; 2951 2952 if (!pt->sync_switch) 2953 return 1; 2954 2955 ptq = intel_pt_cpu_to_ptq(pt, cpu); 2956 if (!ptq || !ptq->sync_switch) 2957 return 1; 2958 2959 switch (ptq->switch_state) { 2960 case INTEL_PT_SS_NOT_TRACING: 2961 break; 2962 case INTEL_PT_SS_UNKNOWN: 2963 case INTEL_PT_SS_TRACING: 2964 ptq->next_tid = tid; 2965 ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_IP; 2966 return 0; 2967 case INTEL_PT_SS_EXPECTING_SWITCH_EVENT: 2968 if (!ptq->on_heap) { 2969 ptq->timestamp = perf_time_to_tsc(timestamp, 2970 &pt->tc); 2971 err = auxtrace_heap__add(&pt->heap, ptq->queue_nr, 2972 ptq->timestamp); 2973 if (err) 2974 return err; 2975 ptq->on_heap = true; 2976 } 2977 ptq->switch_state = INTEL_PT_SS_TRACING; 2978 break; 2979 case INTEL_PT_SS_EXPECTING_SWITCH_IP: 2980 intel_pt_log("ERROR: cpu %d expecting switch ip\n", cpu); 2981 break; 2982 default: 2983 break; 2984 } 2985 2986 ptq->next_tid = -1; 2987 2988 return 1; 2989 } 2990 2991 static int intel_pt_process_switch(struct intel_pt *pt, 2992 struct perf_sample *sample) 2993 { 2994 pid_t tid; 2995 int cpu, ret; 2996 struct evsel *evsel = evlist__id2evsel(pt->session->evlist, sample->id); 2997 2998 if (evsel != pt->switch_evsel) 2999 return 0; 3000 3001 tid = evsel__intval(evsel, sample, "next_pid"); 3002 cpu = sample->cpu; 3003 3004 intel_pt_log("sched_switch: cpu %d tid %d time %"PRIu64" tsc %#"PRIx64"\n", 3005 cpu, tid, sample->time, perf_time_to_tsc(sample->time, 3006 &pt->tc)); 3007 3008 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time); 3009 if (ret <= 0) 3010 return ret; 3011 3012 return machine__set_current_tid(pt->machine, cpu, -1, tid); 3013 } 3014 3015 static int intel_pt_context_switch_in(struct intel_pt *pt, 3016 struct perf_sample *sample) 3017 { 3018 pid_t pid = sample->pid; 3019 pid_t tid = sample->tid; 3020 int cpu = sample->cpu; 3021 3022 if (pt->sync_switch) { 3023 struct intel_pt_queue *ptq; 3024 3025 ptq = intel_pt_cpu_to_ptq(pt, cpu); 3026 if (ptq && ptq->sync_switch) { 3027 ptq->next_tid = -1; 3028 switch (ptq->switch_state) { 3029 case INTEL_PT_SS_NOT_TRACING: 3030 case INTEL_PT_SS_UNKNOWN: 3031 case INTEL_PT_SS_TRACING: 3032 break; 3033 case INTEL_PT_SS_EXPECTING_SWITCH_EVENT: 3034 case INTEL_PT_SS_EXPECTING_SWITCH_IP: 3035 ptq->switch_state = INTEL_PT_SS_TRACING; 3036 break; 3037 default: 3038 break; 3039 } 3040 } 3041 } 3042 3043 /* 3044 * If the current tid has not been updated yet, ensure it is now that 3045 * a "switch in" event has occurred. 3046 */ 3047 if (machine__get_current_tid(pt->machine, cpu) == tid) 3048 return 0; 3049 3050 return machine__set_current_tid(pt->machine, cpu, pid, tid); 3051 } 3052 3053 static int intel_pt_context_switch(struct intel_pt *pt, union perf_event *event, 3054 struct perf_sample *sample) 3055 { 3056 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT; 3057 pid_t pid, tid; 3058 int cpu, ret; 3059 3060 cpu = sample->cpu; 3061 3062 if (pt->have_sched_switch == 3) { 3063 if (!out) 3064 return intel_pt_context_switch_in(pt, sample); 3065 if (event->header.type != PERF_RECORD_SWITCH_CPU_WIDE) { 3066 pr_err("Expecting CPU-wide context switch event\n"); 3067 return -EINVAL; 3068 } 3069 pid = event->context_switch.next_prev_pid; 3070 tid = event->context_switch.next_prev_tid; 3071 } else { 3072 if (out) 3073 return 0; 3074 pid = sample->pid; 3075 tid = sample->tid; 3076 } 3077 3078 if (tid == -1) 3079 intel_pt_log("context_switch event has no tid\n"); 3080 3081 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time); 3082 if (ret <= 0) 3083 return ret; 3084 3085 return machine__set_current_tid(pt->machine, cpu, pid, tid); 3086 } 3087 3088 static int intel_pt_process_itrace_start(struct intel_pt *pt, 3089 union perf_event *event, 3090 struct perf_sample *sample) 3091 { 3092 if (!pt->per_cpu_mmaps) 3093 return 0; 3094 3095 intel_pt_log("itrace_start: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n", 3096 sample->cpu, event->itrace_start.pid, 3097 event->itrace_start.tid, sample->time, 3098 perf_time_to_tsc(sample->time, &pt->tc)); 3099 3100 return machine__set_current_tid(pt->machine, sample->cpu, 3101 event->itrace_start.pid, 3102 event->itrace_start.tid); 3103 } 3104 3105 static int intel_pt_process_aux_output_hw_id(struct intel_pt *pt, 3106 union perf_event *event, 3107 struct perf_sample *sample) 3108 { 3109 u64 hw_id = event->aux_output_hw_id.hw_id; 3110 struct auxtrace_queue *queue; 3111 struct intel_pt_queue *ptq; 3112 struct evsel *evsel; 3113 3114 queue = auxtrace_queues__sample_queue(&pt->queues, sample, pt->session); 3115 evsel = evlist__id2evsel_strict(pt->session->evlist, sample->id); 3116 if (!queue || !queue->priv || !evsel || hw_id > INTEL_PT_MAX_PEBS) { 3117 pr_err("Bad AUX output hardware ID\n"); 3118 return -EINVAL; 3119 } 3120 3121 ptq = queue->priv; 3122 3123 ptq->pebs[hw_id].evsel = evsel; 3124 ptq->pebs[hw_id].id = sample->id; 3125 3126 return 0; 3127 } 3128 3129 static int intel_pt_find_map(struct thread *thread, u8 cpumode, u64 addr, 3130 struct addr_location *al) 3131 { 3132 if (!al->map || addr < al->map->start || addr >= al->map->end) { 3133 if (!thread__find_map(thread, cpumode, addr, al)) 3134 return -1; 3135 } 3136 3137 return 0; 3138 } 3139 3140 /* Invalidate all instruction cache entries that overlap the text poke */ 3141 static int intel_pt_text_poke(struct intel_pt *pt, union perf_event *event) 3142 { 3143 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 3144 u64 addr = event->text_poke.addr + event->text_poke.new_len - 1; 3145 /* Assume text poke begins in a basic block no more than 4096 bytes */ 3146 int cnt = 4096 + event->text_poke.new_len; 3147 struct thread *thread = pt->unknown_thread; 3148 struct addr_location al = { .map = NULL }; 3149 struct machine *machine = pt->machine; 3150 struct intel_pt_cache_entry *e; 3151 u64 offset; 3152 3153 if (!event->text_poke.new_len) 3154 return 0; 3155 3156 for (; cnt; cnt--, addr--) { 3157 if (intel_pt_find_map(thread, cpumode, addr, &al)) { 3158 if (addr < event->text_poke.addr) 3159 return 0; 3160 continue; 3161 } 3162 3163 if (!al.map->dso || !al.map->dso->auxtrace_cache) 3164 continue; 3165 3166 offset = al.map->map_ip(al.map, addr); 3167 3168 e = intel_pt_cache_lookup(al.map->dso, machine, offset); 3169 if (!e) 3170 continue; 3171 3172 if (addr + e->byte_cnt + e->length <= event->text_poke.addr) { 3173 /* 3174 * No overlap. Working backwards there cannot be another 3175 * basic block that overlaps the text poke if there is a 3176 * branch instruction before the text poke address. 3177 */ 3178 if (e->branch != INTEL_PT_BR_NO_BRANCH) 3179 return 0; 3180 } else { 3181 intel_pt_cache_invalidate(al.map->dso, machine, offset); 3182 intel_pt_log("Invalidated instruction cache for %s at %#"PRIx64"\n", 3183 al.map->dso->long_name, addr); 3184 } 3185 } 3186 3187 return 0; 3188 } 3189 3190 static int intel_pt_process_event(struct perf_session *session, 3191 union perf_event *event, 3192 struct perf_sample *sample, 3193 struct perf_tool *tool) 3194 { 3195 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt, 3196 auxtrace); 3197 u64 timestamp; 3198 int err = 0; 3199 3200 if (dump_trace) 3201 return 0; 3202 3203 if (!tool->ordered_events) { 3204 pr_err("Intel Processor Trace requires ordered events\n"); 3205 return -EINVAL; 3206 } 3207 3208 if (sample->time && sample->time != (u64)-1) 3209 timestamp = perf_time_to_tsc(sample->time, &pt->tc); 3210 else 3211 timestamp = 0; 3212 3213 if (timestamp || pt->timeless_decoding) { 3214 err = intel_pt_update_queues(pt); 3215 if (err) 3216 return err; 3217 } 3218 3219 if (pt->timeless_decoding) { 3220 if (pt->sampling_mode) { 3221 if (sample->aux_sample.size) 3222 err = intel_pt_process_timeless_sample(pt, 3223 sample); 3224 } else if (event->header.type == PERF_RECORD_EXIT) { 3225 err = intel_pt_process_timeless_queues(pt, 3226 event->fork.tid, 3227 sample->time); 3228 } 3229 } else if (timestamp) { 3230 if (!pt->first_timestamp) 3231 intel_pt_first_timestamp(pt, timestamp); 3232 err = intel_pt_process_queues(pt, timestamp); 3233 } 3234 if (err) 3235 return err; 3236 3237 if (event->header.type == PERF_RECORD_SAMPLE) { 3238 if (pt->synth_opts.add_callchain && !sample->callchain) 3239 intel_pt_add_callchain(pt, sample); 3240 if (pt->synth_opts.add_last_branch && !sample->branch_stack) 3241 intel_pt_add_br_stack(pt, sample); 3242 } 3243 3244 if (event->header.type == PERF_RECORD_AUX && 3245 (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) && 3246 pt->synth_opts.errors) { 3247 err = intel_pt_lost(pt, sample); 3248 if (err) 3249 return err; 3250 } 3251 3252 if (pt->switch_evsel && event->header.type == PERF_RECORD_SAMPLE) 3253 err = intel_pt_process_switch(pt, sample); 3254 else if (event->header.type == PERF_RECORD_ITRACE_START) 3255 err = intel_pt_process_itrace_start(pt, event, sample); 3256 else if (event->header.type == PERF_RECORD_AUX_OUTPUT_HW_ID) 3257 err = intel_pt_process_aux_output_hw_id(pt, event, sample); 3258 else if (event->header.type == PERF_RECORD_SWITCH || 3259 event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) 3260 err = intel_pt_context_switch(pt, event, sample); 3261 3262 if (!err && event->header.type == PERF_RECORD_TEXT_POKE) 3263 err = intel_pt_text_poke(pt, event); 3264 3265 if (intel_pt_enable_logging && intel_pt_log_events(pt, sample->time)) { 3266 intel_pt_log("event %u: cpu %d time %"PRIu64" tsc %#"PRIx64" ", 3267 event->header.type, sample->cpu, sample->time, timestamp); 3268 intel_pt_log_event(event); 3269 } 3270 3271 return err; 3272 } 3273 3274 static int intel_pt_flush(struct perf_session *session, struct perf_tool *tool) 3275 { 3276 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt, 3277 auxtrace); 3278 int ret; 3279 3280 if (dump_trace) 3281 return 0; 3282 3283 if (!tool->ordered_events) 3284 return -EINVAL; 3285 3286 ret = intel_pt_update_queues(pt); 3287 if (ret < 0) 3288 return ret; 3289 3290 if (pt->timeless_decoding) 3291 return intel_pt_process_timeless_queues(pt, -1, 3292 MAX_TIMESTAMP - 1); 3293 3294 return intel_pt_process_queues(pt, MAX_TIMESTAMP); 3295 } 3296 3297 static void intel_pt_free_events(struct perf_session *session) 3298 { 3299 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt, 3300 auxtrace); 3301 struct auxtrace_queues *queues = &pt->queues; 3302 unsigned int i; 3303 3304 for (i = 0; i < queues->nr_queues; i++) { 3305 intel_pt_free_queue(queues->queue_array[i].priv); 3306 queues->queue_array[i].priv = NULL; 3307 } 3308 intel_pt_log_disable(); 3309 auxtrace_queues__free(queues); 3310 } 3311 3312 static void intel_pt_free(struct perf_session *session) 3313 { 3314 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt, 3315 auxtrace); 3316 3317 auxtrace_heap__free(&pt->heap); 3318 intel_pt_free_events(session); 3319 session->auxtrace = NULL; 3320 intel_pt_free_vmcs_info(pt); 3321 thread__put(pt->unknown_thread); 3322 addr_filters__exit(&pt->filts); 3323 zfree(&pt->chain); 3324 zfree(&pt->filter); 3325 zfree(&pt->time_ranges); 3326 free(pt); 3327 } 3328 3329 static bool intel_pt_evsel_is_auxtrace(struct perf_session *session, 3330 struct evsel *evsel) 3331 { 3332 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt, 3333 auxtrace); 3334 3335 return evsel->core.attr.type == pt->pmu_type; 3336 } 3337 3338 static int intel_pt_process_auxtrace_event(struct perf_session *session, 3339 union perf_event *event, 3340 struct perf_tool *tool __maybe_unused) 3341 { 3342 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt, 3343 auxtrace); 3344 3345 if (!pt->data_queued) { 3346 struct auxtrace_buffer *buffer; 3347 off_t data_offset; 3348 int fd = perf_data__fd(session->data); 3349 int err; 3350 3351 if (perf_data__is_pipe(session->data)) { 3352 data_offset = 0; 3353 } else { 3354 data_offset = lseek(fd, 0, SEEK_CUR); 3355 if (data_offset == -1) 3356 return -errno; 3357 } 3358 3359 err = auxtrace_queues__add_event(&pt->queues, session, event, 3360 data_offset, &buffer); 3361 if (err) 3362 return err; 3363 3364 /* Dump here now we have copied a piped trace out of the pipe */ 3365 if (dump_trace) { 3366 if (auxtrace_buffer__get_data(buffer, fd)) { 3367 intel_pt_dump_event(pt, buffer->data, 3368 buffer->size); 3369 auxtrace_buffer__put_data(buffer); 3370 } 3371 } 3372 } 3373 3374 return 0; 3375 } 3376 3377 static int intel_pt_queue_data(struct perf_session *session, 3378 struct perf_sample *sample, 3379 union perf_event *event, u64 data_offset) 3380 { 3381 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt, 3382 auxtrace); 3383 u64 timestamp; 3384 3385 if (event) { 3386 return auxtrace_queues__add_event(&pt->queues, session, event, 3387 data_offset, NULL); 3388 } 3389 3390 if (sample->time && sample->time != (u64)-1) 3391 timestamp = perf_time_to_tsc(sample->time, &pt->tc); 3392 else 3393 timestamp = 0; 3394 3395 return auxtrace_queues__add_sample(&pt->queues, session, sample, 3396 data_offset, timestamp); 3397 } 3398 3399 struct intel_pt_synth { 3400 struct perf_tool dummy_tool; 3401 struct perf_session *session; 3402 }; 3403 3404 static int intel_pt_event_synth(struct perf_tool *tool, 3405 union perf_event *event, 3406 struct perf_sample *sample __maybe_unused, 3407 struct machine *machine __maybe_unused) 3408 { 3409 struct intel_pt_synth *intel_pt_synth = 3410 container_of(tool, struct intel_pt_synth, dummy_tool); 3411 3412 return perf_session__deliver_synth_event(intel_pt_synth->session, event, 3413 NULL); 3414 } 3415 3416 static int intel_pt_synth_event(struct perf_session *session, const char *name, 3417 struct perf_event_attr *attr, u64 id) 3418 { 3419 struct intel_pt_synth intel_pt_synth; 3420 int err; 3421 3422 pr_debug("Synthesizing '%s' event with id %" PRIu64 " sample type %#" PRIx64 "\n", 3423 name, id, (u64)attr->sample_type); 3424 3425 memset(&intel_pt_synth, 0, sizeof(struct intel_pt_synth)); 3426 intel_pt_synth.session = session; 3427 3428 err = perf_event__synthesize_attr(&intel_pt_synth.dummy_tool, attr, 1, 3429 &id, intel_pt_event_synth); 3430 if (err) 3431 pr_err("%s: failed to synthesize '%s' event type\n", 3432 __func__, name); 3433 3434 return err; 3435 } 3436 3437 static void intel_pt_set_event_name(struct evlist *evlist, u64 id, 3438 const char *name) 3439 { 3440 struct evsel *evsel; 3441 3442 evlist__for_each_entry(evlist, evsel) { 3443 if (evsel->core.id && evsel->core.id[0] == id) { 3444 if (evsel->name) 3445 zfree(&evsel->name); 3446 evsel->name = strdup(name); 3447 break; 3448 } 3449 } 3450 } 3451 3452 static struct evsel *intel_pt_evsel(struct intel_pt *pt, 3453 struct evlist *evlist) 3454 { 3455 struct evsel *evsel; 3456 3457 evlist__for_each_entry(evlist, evsel) { 3458 if (evsel->core.attr.type == pt->pmu_type && evsel->core.ids) 3459 return evsel; 3460 } 3461 3462 return NULL; 3463 } 3464 3465 static int intel_pt_synth_events(struct intel_pt *pt, 3466 struct perf_session *session) 3467 { 3468 struct evlist *evlist = session->evlist; 3469 struct evsel *evsel = intel_pt_evsel(pt, evlist); 3470 struct perf_event_attr attr; 3471 u64 id; 3472 int err; 3473 3474 if (!evsel) { 3475 pr_debug("There are no selected events with Intel Processor Trace data\n"); 3476 return 0; 3477 } 3478 3479 memset(&attr, 0, sizeof(struct perf_event_attr)); 3480 attr.size = sizeof(struct perf_event_attr); 3481 attr.type = PERF_TYPE_HARDWARE; 3482 attr.sample_type = evsel->core.attr.sample_type & PERF_SAMPLE_MASK; 3483 attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID | 3484 PERF_SAMPLE_PERIOD; 3485 if (pt->timeless_decoding) 3486 attr.sample_type &= ~(u64)PERF_SAMPLE_TIME; 3487 else 3488 attr.sample_type |= PERF_SAMPLE_TIME; 3489 if (!pt->per_cpu_mmaps) 3490 attr.sample_type &= ~(u64)PERF_SAMPLE_CPU; 3491 attr.exclude_user = evsel->core.attr.exclude_user; 3492 attr.exclude_kernel = evsel->core.attr.exclude_kernel; 3493 attr.exclude_hv = evsel->core.attr.exclude_hv; 3494 attr.exclude_host = evsel->core.attr.exclude_host; 3495 attr.exclude_guest = evsel->core.attr.exclude_guest; 3496 attr.sample_id_all = evsel->core.attr.sample_id_all; 3497 attr.read_format = evsel->core.attr.read_format; 3498 3499 id = evsel->core.id[0] + 1000000000; 3500 if (!id) 3501 id = 1; 3502 3503 if (pt->synth_opts.branches) { 3504 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS; 3505 attr.sample_period = 1; 3506 attr.sample_type |= PERF_SAMPLE_ADDR; 3507 err = intel_pt_synth_event(session, "branches", &attr, id); 3508 if (err) 3509 return err; 3510 pt->sample_branches = true; 3511 pt->branches_sample_type = attr.sample_type; 3512 pt->branches_id = id; 3513 id += 1; 3514 attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR; 3515 } 3516 3517 if (pt->synth_opts.callchain) 3518 attr.sample_type |= PERF_SAMPLE_CALLCHAIN; 3519 if (pt->synth_opts.last_branch) { 3520 attr.sample_type |= PERF_SAMPLE_BRANCH_STACK; 3521 /* 3522 * We don't use the hardware index, but the sample generation 3523 * code uses the new format branch_stack with this field, 3524 * so the event attributes must indicate that it's present. 3525 */ 3526 attr.branch_sample_type |= PERF_SAMPLE_BRANCH_HW_INDEX; 3527 } 3528 3529 if (pt->synth_opts.instructions) { 3530 attr.config = PERF_COUNT_HW_INSTRUCTIONS; 3531 if (pt->synth_opts.period_type == PERF_ITRACE_PERIOD_NANOSECS) 3532 attr.sample_period = 3533 intel_pt_ns_to_ticks(pt, pt->synth_opts.period); 3534 else 3535 attr.sample_period = pt->synth_opts.period; 3536 err = intel_pt_synth_event(session, "instructions", &attr, id); 3537 if (err) 3538 return err; 3539 pt->sample_instructions = true; 3540 pt->instructions_sample_type = attr.sample_type; 3541 pt->instructions_id = id; 3542 id += 1; 3543 } 3544 3545 attr.sample_type &= ~(u64)PERF_SAMPLE_PERIOD; 3546 attr.sample_period = 1; 3547 3548 if (pt->synth_opts.transactions) { 3549 attr.config = PERF_COUNT_HW_INSTRUCTIONS; 3550 err = intel_pt_synth_event(session, "transactions", &attr, id); 3551 if (err) 3552 return err; 3553 pt->sample_transactions = true; 3554 pt->transactions_sample_type = attr.sample_type; 3555 pt->transactions_id = id; 3556 intel_pt_set_event_name(evlist, id, "transactions"); 3557 id += 1; 3558 } 3559 3560 attr.type = PERF_TYPE_SYNTH; 3561 attr.sample_type |= PERF_SAMPLE_RAW; 3562 3563 if (pt->synth_opts.ptwrites) { 3564 attr.config = PERF_SYNTH_INTEL_PTWRITE; 3565 err = intel_pt_synth_event(session, "ptwrite", &attr, id); 3566 if (err) 3567 return err; 3568 pt->sample_ptwrites = true; 3569 pt->ptwrites_sample_type = attr.sample_type; 3570 pt->ptwrites_id = id; 3571 intel_pt_set_event_name(evlist, id, "ptwrite"); 3572 id += 1; 3573 } 3574 3575 if (pt->synth_opts.pwr_events) { 3576 pt->sample_pwr_events = true; 3577 pt->pwr_events_sample_type = attr.sample_type; 3578 3579 attr.config = PERF_SYNTH_INTEL_CBR; 3580 err = intel_pt_synth_event(session, "cbr", &attr, id); 3581 if (err) 3582 return err; 3583 pt->cbr_id = id; 3584 intel_pt_set_event_name(evlist, id, "cbr"); 3585 id += 1; 3586 3587 attr.config = PERF_SYNTH_INTEL_PSB; 3588 err = intel_pt_synth_event(session, "psb", &attr, id); 3589 if (err) 3590 return err; 3591 pt->psb_id = id; 3592 intel_pt_set_event_name(evlist, id, "psb"); 3593 id += 1; 3594 } 3595 3596 if (pt->synth_opts.pwr_events && (evsel->core.attr.config & INTEL_PT_CFG_PWR_EVT_EN)) { 3597 attr.config = PERF_SYNTH_INTEL_MWAIT; 3598 err = intel_pt_synth_event(session, "mwait", &attr, id); 3599 if (err) 3600 return err; 3601 pt->mwait_id = id; 3602 intel_pt_set_event_name(evlist, id, "mwait"); 3603 id += 1; 3604 3605 attr.config = PERF_SYNTH_INTEL_PWRE; 3606 err = intel_pt_synth_event(session, "pwre", &attr, id); 3607 if (err) 3608 return err; 3609 pt->pwre_id = id; 3610 intel_pt_set_event_name(evlist, id, "pwre"); 3611 id += 1; 3612 3613 attr.config = PERF_SYNTH_INTEL_EXSTOP; 3614 err = intel_pt_synth_event(session, "exstop", &attr, id); 3615 if (err) 3616 return err; 3617 pt->exstop_id = id; 3618 intel_pt_set_event_name(evlist, id, "exstop"); 3619 id += 1; 3620 3621 attr.config = PERF_SYNTH_INTEL_PWRX; 3622 err = intel_pt_synth_event(session, "pwrx", &attr, id); 3623 if (err) 3624 return err; 3625 pt->pwrx_id = id; 3626 intel_pt_set_event_name(evlist, id, "pwrx"); 3627 id += 1; 3628 } 3629 3630 if (pt->synth_opts.intr_events && (evsel->core.attr.config & INTEL_PT_CFG_EVT_EN)) { 3631 attr.config = PERF_SYNTH_INTEL_EVT; 3632 err = intel_pt_synth_event(session, "evt", &attr, id); 3633 if (err) 3634 return err; 3635 pt->evt_sample_type = attr.sample_type; 3636 pt->evt_id = id; 3637 intel_pt_set_event_name(evlist, id, "evt"); 3638 id += 1; 3639 } 3640 3641 if (pt->synth_opts.intr_events && pt->cap_event_trace) { 3642 attr.config = PERF_SYNTH_INTEL_IFLAG_CHG; 3643 err = intel_pt_synth_event(session, "iflag", &attr, id); 3644 if (err) 3645 return err; 3646 pt->iflag_chg_sample_type = attr.sample_type; 3647 pt->iflag_chg_id = id; 3648 intel_pt_set_event_name(evlist, id, "iflag"); 3649 id += 1; 3650 } 3651 3652 return 0; 3653 } 3654 3655 static void intel_pt_setup_pebs_events(struct intel_pt *pt) 3656 { 3657 struct evsel *evsel; 3658 3659 if (!pt->synth_opts.other_events) 3660 return; 3661 3662 evlist__for_each_entry(pt->session->evlist, evsel) { 3663 if (evsel->core.attr.aux_output && evsel->core.id) { 3664 if (pt->single_pebs) { 3665 pt->single_pebs = false; 3666 return; 3667 } 3668 pt->single_pebs = true; 3669 pt->sample_pebs = true; 3670 pt->pebs_evsel = evsel; 3671 } 3672 } 3673 } 3674 3675 static struct evsel *intel_pt_find_sched_switch(struct evlist *evlist) 3676 { 3677 struct evsel *evsel; 3678 3679 evlist__for_each_entry_reverse(evlist, evsel) { 3680 const char *name = evsel__name(evsel); 3681 3682 if (!strcmp(name, "sched:sched_switch")) 3683 return evsel; 3684 } 3685 3686 return NULL; 3687 } 3688 3689 static bool intel_pt_find_switch(struct evlist *evlist) 3690 { 3691 struct evsel *evsel; 3692 3693 evlist__for_each_entry(evlist, evsel) { 3694 if (evsel->core.attr.context_switch) 3695 return true; 3696 } 3697 3698 return false; 3699 } 3700 3701 static int intel_pt_perf_config(const char *var, const char *value, void *data) 3702 { 3703 struct intel_pt *pt = data; 3704 3705 if (!strcmp(var, "intel-pt.mispred-all")) 3706 pt->mispred_all = perf_config_bool(var, value); 3707 3708 if (!strcmp(var, "intel-pt.max-loops")) 3709 perf_config_int(&pt->max_loops, var, value); 3710 3711 return 0; 3712 } 3713 3714 /* Find least TSC which converts to ns or later */ 3715 static u64 intel_pt_tsc_start(u64 ns, struct intel_pt *pt) 3716 { 3717 u64 tsc, tm; 3718 3719 tsc = perf_time_to_tsc(ns, &pt->tc); 3720 3721 while (1) { 3722 tm = tsc_to_perf_time(tsc, &pt->tc); 3723 if (tm < ns) 3724 break; 3725 tsc -= 1; 3726 } 3727 3728 while (tm < ns) 3729 tm = tsc_to_perf_time(++tsc, &pt->tc); 3730 3731 return tsc; 3732 } 3733 3734 /* Find greatest TSC which converts to ns or earlier */ 3735 static u64 intel_pt_tsc_end(u64 ns, struct intel_pt *pt) 3736 { 3737 u64 tsc, tm; 3738 3739 tsc = perf_time_to_tsc(ns, &pt->tc); 3740 3741 while (1) { 3742 tm = tsc_to_perf_time(tsc, &pt->tc); 3743 if (tm > ns) 3744 break; 3745 tsc += 1; 3746 } 3747 3748 while (tm > ns) 3749 tm = tsc_to_perf_time(--tsc, &pt->tc); 3750 3751 return tsc; 3752 } 3753 3754 static int intel_pt_setup_time_ranges(struct intel_pt *pt, 3755 struct itrace_synth_opts *opts) 3756 { 3757 struct perf_time_interval *p = opts->ptime_range; 3758 int n = opts->range_num; 3759 int i; 3760 3761 if (!n || !p || pt->timeless_decoding) 3762 return 0; 3763 3764 pt->time_ranges = calloc(n, sizeof(struct range)); 3765 if (!pt->time_ranges) 3766 return -ENOMEM; 3767 3768 pt->range_cnt = n; 3769 3770 intel_pt_log("%s: %u range(s)\n", __func__, n); 3771 3772 for (i = 0; i < n; i++) { 3773 struct range *r = &pt->time_ranges[i]; 3774 u64 ts = p[i].start; 3775 u64 te = p[i].end; 3776 3777 /* 3778 * Take care to ensure the TSC range matches the perf-time range 3779 * when converted back to perf-time. 3780 */ 3781 r->start = ts ? intel_pt_tsc_start(ts, pt) : 0; 3782 r->end = te ? intel_pt_tsc_end(te, pt) : 0; 3783 3784 intel_pt_log("range %d: perf time interval: %"PRIu64" to %"PRIu64"\n", 3785 i, ts, te); 3786 intel_pt_log("range %d: TSC time interval: %#"PRIx64" to %#"PRIx64"\n", 3787 i, r->start, r->end); 3788 } 3789 3790 return 0; 3791 } 3792 3793 static int intel_pt_parse_vm_tm_corr_arg(struct intel_pt *pt, char **args) 3794 { 3795 struct intel_pt_vmcs_info *vmcs_info; 3796 u64 tsc_offset, vmcs; 3797 char *p = *args; 3798 3799 errno = 0; 3800 3801 p = skip_spaces(p); 3802 if (!*p) 3803 return 1; 3804 3805 tsc_offset = strtoull(p, &p, 0); 3806 if (errno) 3807 return -errno; 3808 p = skip_spaces(p); 3809 if (*p != ':') { 3810 pt->dflt_tsc_offset = tsc_offset; 3811 *args = p; 3812 return 0; 3813 } 3814 p += 1; 3815 while (1) { 3816 vmcs = strtoull(p, &p, 0); 3817 if (errno) 3818 return -errno; 3819 if (!vmcs) 3820 return -EINVAL; 3821 vmcs_info = intel_pt_findnew_vmcs(&pt->vmcs_info, vmcs, tsc_offset); 3822 if (!vmcs_info) 3823 return -ENOMEM; 3824 p = skip_spaces(p); 3825 if (*p != ',') 3826 break; 3827 p += 1; 3828 } 3829 *args = p; 3830 return 0; 3831 } 3832 3833 static int intel_pt_parse_vm_tm_corr_args(struct intel_pt *pt) 3834 { 3835 char *args = pt->synth_opts.vm_tm_corr_args; 3836 int ret; 3837 3838 if (!args) 3839 return 0; 3840 3841 do { 3842 ret = intel_pt_parse_vm_tm_corr_arg(pt, &args); 3843 } while (!ret); 3844 3845 if (ret < 0) { 3846 pr_err("Failed to parse VM Time Correlation options\n"); 3847 return ret; 3848 } 3849 3850 return 0; 3851 } 3852 3853 static const char * const intel_pt_info_fmts[] = { 3854 [INTEL_PT_PMU_TYPE] = " PMU Type %"PRId64"\n", 3855 [INTEL_PT_TIME_SHIFT] = " Time Shift %"PRIu64"\n", 3856 [INTEL_PT_TIME_MULT] = " Time Muliplier %"PRIu64"\n", 3857 [INTEL_PT_TIME_ZERO] = " Time Zero %"PRIu64"\n", 3858 [INTEL_PT_CAP_USER_TIME_ZERO] = " Cap Time Zero %"PRId64"\n", 3859 [INTEL_PT_TSC_BIT] = " TSC bit %#"PRIx64"\n", 3860 [INTEL_PT_NORETCOMP_BIT] = " NoRETComp bit %#"PRIx64"\n", 3861 [INTEL_PT_HAVE_SCHED_SWITCH] = " Have sched_switch %"PRId64"\n", 3862 [INTEL_PT_SNAPSHOT_MODE] = " Snapshot mode %"PRId64"\n", 3863 [INTEL_PT_PER_CPU_MMAPS] = " Per-cpu maps %"PRId64"\n", 3864 [INTEL_PT_MTC_BIT] = " MTC bit %#"PRIx64"\n", 3865 [INTEL_PT_TSC_CTC_N] = " TSC:CTC numerator %"PRIu64"\n", 3866 [INTEL_PT_TSC_CTC_D] = " TSC:CTC denominator %"PRIu64"\n", 3867 [INTEL_PT_CYC_BIT] = " CYC bit %#"PRIx64"\n", 3868 [INTEL_PT_MAX_NONTURBO_RATIO] = " Max non-turbo ratio %"PRIu64"\n", 3869 [INTEL_PT_FILTER_STR_LEN] = " Filter string len. %"PRIu64"\n", 3870 }; 3871 3872 static void intel_pt_print_info(__u64 *arr, int start, int finish) 3873 { 3874 int i; 3875 3876 if (!dump_trace) 3877 return; 3878 3879 for (i = start; i <= finish; i++) 3880 fprintf(stdout, intel_pt_info_fmts[i], arr[i]); 3881 } 3882 3883 static void intel_pt_print_info_str(const char *name, const char *str) 3884 { 3885 if (!dump_trace) 3886 return; 3887 3888 fprintf(stdout, " %-20s%s\n", name, str ? str : ""); 3889 } 3890 3891 static bool intel_pt_has(struct perf_record_auxtrace_info *auxtrace_info, int pos) 3892 { 3893 return auxtrace_info->header.size >= 3894 sizeof(struct perf_record_auxtrace_info) + (sizeof(u64) * (pos + 1)); 3895 } 3896 3897 int intel_pt_process_auxtrace_info(union perf_event *event, 3898 struct perf_session *session) 3899 { 3900 struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info; 3901 size_t min_sz = sizeof(u64) * INTEL_PT_PER_CPU_MMAPS; 3902 struct intel_pt *pt; 3903 void *info_end; 3904 __u64 *info; 3905 int err; 3906 3907 if (auxtrace_info->header.size < sizeof(struct perf_record_auxtrace_info) + 3908 min_sz) 3909 return -EINVAL; 3910 3911 pt = zalloc(sizeof(struct intel_pt)); 3912 if (!pt) 3913 return -ENOMEM; 3914 3915 pt->vmcs_info = RB_ROOT; 3916 3917 addr_filters__init(&pt->filts); 3918 3919 err = perf_config(intel_pt_perf_config, pt); 3920 if (err) 3921 goto err_free; 3922 3923 err = auxtrace_queues__init(&pt->queues); 3924 if (err) 3925 goto err_free; 3926 3927 if (session->itrace_synth_opts->set) { 3928 pt->synth_opts = *session->itrace_synth_opts; 3929 } else { 3930 struct itrace_synth_opts *opts = session->itrace_synth_opts; 3931 3932 itrace_synth_opts__set_default(&pt->synth_opts, opts->default_no_sample); 3933 if (!opts->default_no_sample && !opts->inject) { 3934 pt->synth_opts.branches = false; 3935 pt->synth_opts.callchain = true; 3936 pt->synth_opts.add_callchain = true; 3937 } 3938 pt->synth_opts.thread_stack = opts->thread_stack; 3939 } 3940 3941 if (!(pt->synth_opts.log_plus_flags & AUXTRACE_LOG_FLG_USE_STDOUT)) 3942 intel_pt_log_set_name(INTEL_PT_PMU_NAME); 3943 3944 pt->session = session; 3945 pt->machine = &session->machines.host; /* No kvm support */ 3946 pt->auxtrace_type = auxtrace_info->type; 3947 pt->pmu_type = auxtrace_info->priv[INTEL_PT_PMU_TYPE]; 3948 pt->tc.time_shift = auxtrace_info->priv[INTEL_PT_TIME_SHIFT]; 3949 pt->tc.time_mult = auxtrace_info->priv[INTEL_PT_TIME_MULT]; 3950 pt->tc.time_zero = auxtrace_info->priv[INTEL_PT_TIME_ZERO]; 3951 pt->cap_user_time_zero = auxtrace_info->priv[INTEL_PT_CAP_USER_TIME_ZERO]; 3952 pt->tsc_bit = auxtrace_info->priv[INTEL_PT_TSC_BIT]; 3953 pt->noretcomp_bit = auxtrace_info->priv[INTEL_PT_NORETCOMP_BIT]; 3954 pt->have_sched_switch = auxtrace_info->priv[INTEL_PT_HAVE_SCHED_SWITCH]; 3955 pt->snapshot_mode = auxtrace_info->priv[INTEL_PT_SNAPSHOT_MODE]; 3956 pt->per_cpu_mmaps = auxtrace_info->priv[INTEL_PT_PER_CPU_MMAPS]; 3957 intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_PMU_TYPE, 3958 INTEL_PT_PER_CPU_MMAPS); 3959 3960 if (intel_pt_has(auxtrace_info, INTEL_PT_CYC_BIT)) { 3961 pt->mtc_bit = auxtrace_info->priv[INTEL_PT_MTC_BIT]; 3962 pt->mtc_freq_bits = auxtrace_info->priv[INTEL_PT_MTC_FREQ_BITS]; 3963 pt->tsc_ctc_ratio_n = auxtrace_info->priv[INTEL_PT_TSC_CTC_N]; 3964 pt->tsc_ctc_ratio_d = auxtrace_info->priv[INTEL_PT_TSC_CTC_D]; 3965 pt->cyc_bit = auxtrace_info->priv[INTEL_PT_CYC_BIT]; 3966 intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_MTC_BIT, 3967 INTEL_PT_CYC_BIT); 3968 } 3969 3970 if (intel_pt_has(auxtrace_info, INTEL_PT_MAX_NONTURBO_RATIO)) { 3971 pt->max_non_turbo_ratio = 3972 auxtrace_info->priv[INTEL_PT_MAX_NONTURBO_RATIO]; 3973 intel_pt_print_info(&auxtrace_info->priv[0], 3974 INTEL_PT_MAX_NONTURBO_RATIO, 3975 INTEL_PT_MAX_NONTURBO_RATIO); 3976 } 3977 3978 info = &auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN] + 1; 3979 info_end = (void *)auxtrace_info + auxtrace_info->header.size; 3980 3981 if (intel_pt_has(auxtrace_info, INTEL_PT_FILTER_STR_LEN)) { 3982 size_t len; 3983 3984 len = auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN]; 3985 intel_pt_print_info(&auxtrace_info->priv[0], 3986 INTEL_PT_FILTER_STR_LEN, 3987 INTEL_PT_FILTER_STR_LEN); 3988 if (len) { 3989 const char *filter = (const char *)info; 3990 3991 len = roundup(len + 1, 8); 3992 info += len >> 3; 3993 if ((void *)info > info_end) { 3994 pr_err("%s: bad filter string length\n", __func__); 3995 err = -EINVAL; 3996 goto err_free_queues; 3997 } 3998 pt->filter = memdup(filter, len); 3999 if (!pt->filter) { 4000 err = -ENOMEM; 4001 goto err_free_queues; 4002 } 4003 if (session->header.needs_swap) 4004 mem_bswap_64(pt->filter, len); 4005 if (pt->filter[len - 1]) { 4006 pr_err("%s: filter string not null terminated\n", __func__); 4007 err = -EINVAL; 4008 goto err_free_queues; 4009 } 4010 err = addr_filters__parse_bare_filter(&pt->filts, 4011 filter); 4012 if (err) 4013 goto err_free_queues; 4014 } 4015 intel_pt_print_info_str("Filter string", pt->filter); 4016 } 4017 4018 if ((void *)info < info_end) { 4019 pt->cap_event_trace = *info++; 4020 if (dump_trace) 4021 fprintf(stdout, " Cap Event Trace %d\n", 4022 pt->cap_event_trace); 4023 } 4024 4025 pt->timeless_decoding = intel_pt_timeless_decoding(pt); 4026 if (pt->timeless_decoding && !pt->tc.time_mult) 4027 pt->tc.time_mult = 1; 4028 pt->have_tsc = intel_pt_have_tsc(pt); 4029 pt->sampling_mode = intel_pt_sampling_mode(pt); 4030 pt->est_tsc = !pt->timeless_decoding; 4031 4032 if (pt->synth_opts.vm_time_correlation) { 4033 if (pt->timeless_decoding) { 4034 pr_err("Intel PT has no time information for VM Time Correlation\n"); 4035 err = -EINVAL; 4036 goto err_free_queues; 4037 } 4038 if (session->itrace_synth_opts->ptime_range) { 4039 pr_err("Time ranges cannot be specified with VM Time Correlation\n"); 4040 err = -EINVAL; 4041 goto err_free_queues; 4042 } 4043 /* Currently TSC Offset is calculated using MTC packets */ 4044 if (!intel_pt_have_mtc(pt)) { 4045 pr_err("MTC packets must have been enabled for VM Time Correlation\n"); 4046 err = -EINVAL; 4047 goto err_free_queues; 4048 } 4049 err = intel_pt_parse_vm_tm_corr_args(pt); 4050 if (err) 4051 goto err_free_queues; 4052 } 4053 4054 pt->unknown_thread = thread__new(999999999, 999999999); 4055 if (!pt->unknown_thread) { 4056 err = -ENOMEM; 4057 goto err_free_queues; 4058 } 4059 4060 /* 4061 * Since this thread will not be kept in any rbtree not in a 4062 * list, initialize its list node so that at thread__put() the 4063 * current thread lifetime assumption is kept and we don't segfault 4064 * at list_del_init(). 4065 */ 4066 INIT_LIST_HEAD(&pt->unknown_thread->node); 4067 4068 err = thread__set_comm(pt->unknown_thread, "unknown", 0); 4069 if (err) 4070 goto err_delete_thread; 4071 if (thread__init_maps(pt->unknown_thread, pt->machine)) { 4072 err = -ENOMEM; 4073 goto err_delete_thread; 4074 } 4075 4076 pt->auxtrace.process_event = intel_pt_process_event; 4077 pt->auxtrace.process_auxtrace_event = intel_pt_process_auxtrace_event; 4078 pt->auxtrace.queue_data = intel_pt_queue_data; 4079 pt->auxtrace.dump_auxtrace_sample = intel_pt_dump_sample; 4080 pt->auxtrace.flush_events = intel_pt_flush; 4081 pt->auxtrace.free_events = intel_pt_free_events; 4082 pt->auxtrace.free = intel_pt_free; 4083 pt->auxtrace.evsel_is_auxtrace = intel_pt_evsel_is_auxtrace; 4084 session->auxtrace = &pt->auxtrace; 4085 4086 if (dump_trace) 4087 return 0; 4088 4089 if (pt->have_sched_switch == 1) { 4090 pt->switch_evsel = intel_pt_find_sched_switch(session->evlist); 4091 if (!pt->switch_evsel) { 4092 pr_err("%s: missing sched_switch event\n", __func__); 4093 err = -EINVAL; 4094 goto err_delete_thread; 4095 } 4096 } else if (pt->have_sched_switch == 2 && 4097 !intel_pt_find_switch(session->evlist)) { 4098 pr_err("%s: missing context_switch attribute flag\n", __func__); 4099 err = -EINVAL; 4100 goto err_delete_thread; 4101 } 4102 4103 if (pt->synth_opts.log) 4104 intel_pt_log_enable(); 4105 4106 /* Maximum non-turbo ratio is TSC freq / 100 MHz */ 4107 if (pt->tc.time_mult) { 4108 u64 tsc_freq = intel_pt_ns_to_ticks(pt, 1000000000); 4109 4110 if (!pt->max_non_turbo_ratio) 4111 pt->max_non_turbo_ratio = 4112 (tsc_freq + 50000000) / 100000000; 4113 intel_pt_log("TSC frequency %"PRIu64"\n", tsc_freq); 4114 intel_pt_log("Maximum non-turbo ratio %u\n", 4115 pt->max_non_turbo_ratio); 4116 pt->cbr2khz = tsc_freq / pt->max_non_turbo_ratio / 1000; 4117 } 4118 4119 err = intel_pt_setup_time_ranges(pt, session->itrace_synth_opts); 4120 if (err) 4121 goto err_delete_thread; 4122 4123 if (pt->synth_opts.calls) 4124 pt->branches_filter |= PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC | 4125 PERF_IP_FLAG_TRACE_END; 4126 if (pt->synth_opts.returns) 4127 pt->branches_filter |= PERF_IP_FLAG_RETURN | 4128 PERF_IP_FLAG_TRACE_BEGIN; 4129 4130 if ((pt->synth_opts.callchain || pt->synth_opts.add_callchain) && 4131 !symbol_conf.use_callchain) { 4132 symbol_conf.use_callchain = true; 4133 if (callchain_register_param(&callchain_param) < 0) { 4134 symbol_conf.use_callchain = false; 4135 pt->synth_opts.callchain = false; 4136 pt->synth_opts.add_callchain = false; 4137 } 4138 } 4139 4140 if (pt->synth_opts.add_callchain) { 4141 err = intel_pt_callchain_init(pt); 4142 if (err) 4143 goto err_delete_thread; 4144 } 4145 4146 if (pt->synth_opts.last_branch || pt->synth_opts.add_last_branch) { 4147 pt->br_stack_sz = pt->synth_opts.last_branch_sz; 4148 pt->br_stack_sz_plus = pt->br_stack_sz; 4149 } 4150 4151 if (pt->synth_opts.add_last_branch) { 4152 err = intel_pt_br_stack_init(pt); 4153 if (err) 4154 goto err_delete_thread; 4155 /* 4156 * Additional branch stack size to cater for tracing from the 4157 * actual sample ip to where the sample time is recorded. 4158 * Measured at about 200 branches, but generously set to 1024. 4159 * If kernel space is not being traced, then add just 1 for the 4160 * branch to kernel space. 4161 */ 4162 if (intel_pt_tracing_kernel(pt)) 4163 pt->br_stack_sz_plus += 1024; 4164 else 4165 pt->br_stack_sz_plus += 1; 4166 } 4167 4168 pt->use_thread_stack = pt->synth_opts.callchain || 4169 pt->synth_opts.add_callchain || 4170 pt->synth_opts.thread_stack || 4171 pt->synth_opts.last_branch || 4172 pt->synth_opts.add_last_branch; 4173 4174 pt->callstack = pt->synth_opts.callchain || 4175 pt->synth_opts.add_callchain || 4176 pt->synth_opts.thread_stack; 4177 4178 err = intel_pt_synth_events(pt, session); 4179 if (err) 4180 goto err_delete_thread; 4181 4182 intel_pt_setup_pebs_events(pt); 4183 4184 if (pt->sampling_mode || list_empty(&session->auxtrace_index)) 4185 err = auxtrace_queue_data(session, true, true); 4186 else 4187 err = auxtrace_queues__process_index(&pt->queues, session); 4188 if (err) 4189 goto err_delete_thread; 4190 4191 if (pt->queues.populated) 4192 pt->data_queued = true; 4193 4194 if (pt->timeless_decoding) 4195 pr_debug2("Intel PT decoding without timestamps\n"); 4196 4197 return 0; 4198 4199 err_delete_thread: 4200 zfree(&pt->chain); 4201 thread__zput(pt->unknown_thread); 4202 err_free_queues: 4203 intel_pt_log_disable(); 4204 auxtrace_queues__free(&pt->queues); 4205 session->auxtrace = NULL; 4206 err_free: 4207 addr_filters__exit(&pt->filts); 4208 zfree(&pt->filter); 4209 zfree(&pt->time_ranges); 4210 free(pt); 4211 return err; 4212 } 4213