1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * intel_pt.c: Intel Processor Trace support 4 * Copyright (c) 2013-2015, Intel Corporation. 5 */ 6 7 #include <inttypes.h> 8 #include <stdio.h> 9 #include <stdbool.h> 10 #include <errno.h> 11 #include <linux/kernel.h> 12 #include <linux/types.h> 13 14 #include "../perf.h" 15 #include "session.h" 16 #include "machine.h" 17 #include "memswap.h" 18 #include "sort.h" 19 #include "tool.h" 20 #include "event.h" 21 #include "evlist.h" 22 #include "evsel.h" 23 #include "map.h" 24 #include "color.h" 25 #include "util.h" 26 #include "thread.h" 27 #include "thread-stack.h" 28 #include "symbol.h" 29 #include "callchain.h" 30 #include "dso.h" 31 #include "debug.h" 32 #include "auxtrace.h" 33 #include "tsc.h" 34 #include "intel-pt.h" 35 #include "config.h" 36 37 #include "intel-pt-decoder/intel-pt-log.h" 38 #include "intel-pt-decoder/intel-pt-decoder.h" 39 #include "intel-pt-decoder/intel-pt-insn-decoder.h" 40 #include "intel-pt-decoder/intel-pt-pkt-decoder.h" 41 42 #define MAX_TIMESTAMP (~0ULL) 43 44 struct intel_pt { 45 struct auxtrace auxtrace; 46 struct auxtrace_queues queues; 47 struct auxtrace_heap heap; 48 u32 auxtrace_type; 49 struct perf_session *session; 50 struct machine *machine; 51 struct perf_evsel *switch_evsel; 52 struct thread *unknown_thread; 53 bool timeless_decoding; 54 bool sampling_mode; 55 bool snapshot_mode; 56 bool per_cpu_mmaps; 57 bool have_tsc; 58 bool data_queued; 59 bool est_tsc; 60 bool sync_switch; 61 bool mispred_all; 62 int have_sched_switch; 63 u32 pmu_type; 64 u64 kernel_start; 65 u64 switch_ip; 66 u64 ptss_ip; 67 68 struct perf_tsc_conversion tc; 69 bool cap_user_time_zero; 70 71 struct itrace_synth_opts synth_opts; 72 73 bool sample_instructions; 74 u64 instructions_sample_type; 75 u64 instructions_id; 76 77 bool sample_branches; 78 u32 branches_filter; 79 u64 branches_sample_type; 80 u64 branches_id; 81 82 bool sample_transactions; 83 u64 transactions_sample_type; 84 u64 transactions_id; 85 86 bool sample_ptwrites; 87 u64 ptwrites_sample_type; 88 u64 ptwrites_id; 89 90 bool sample_pwr_events; 91 u64 pwr_events_sample_type; 92 u64 mwait_id; 93 u64 pwre_id; 94 u64 exstop_id; 95 u64 pwrx_id; 96 u64 cbr_id; 97 98 u64 tsc_bit; 99 u64 mtc_bit; 100 u64 mtc_freq_bits; 101 u32 tsc_ctc_ratio_n; 102 u32 tsc_ctc_ratio_d; 103 u64 cyc_bit; 104 u64 noretcomp_bit; 105 unsigned max_non_turbo_ratio; 106 unsigned cbr2khz; 107 108 unsigned long num_events; 109 110 char *filter; 111 struct addr_filters filts; 112 }; 113 114 enum switch_state { 115 INTEL_PT_SS_NOT_TRACING, 116 INTEL_PT_SS_UNKNOWN, 117 INTEL_PT_SS_TRACING, 118 INTEL_PT_SS_EXPECTING_SWITCH_EVENT, 119 INTEL_PT_SS_EXPECTING_SWITCH_IP, 120 }; 121 122 struct intel_pt_queue { 123 struct intel_pt *pt; 124 unsigned int queue_nr; 125 struct auxtrace_buffer *buffer; 126 struct auxtrace_buffer *old_buffer; 127 void *decoder; 128 const struct intel_pt_state *state; 129 struct ip_callchain *chain; 130 struct branch_stack *last_branch; 131 struct branch_stack *last_branch_rb; 132 size_t last_branch_pos; 133 union perf_event *event_buf; 134 bool on_heap; 135 bool stop; 136 bool step_through_buffers; 137 bool use_buffer_pid_tid; 138 bool sync_switch; 139 pid_t pid, tid; 140 int cpu; 141 int switch_state; 142 pid_t next_tid; 143 struct thread *thread; 144 bool exclude_kernel; 145 bool have_sample; 146 u64 time; 147 u64 timestamp; 148 u32 flags; 149 u16 insn_len; 150 u64 last_insn_cnt; 151 char insn[INTEL_PT_INSN_BUF_SZ]; 152 }; 153 154 static void intel_pt_dump(struct intel_pt *pt __maybe_unused, 155 unsigned char *buf, size_t len) 156 { 157 struct intel_pt_pkt packet; 158 size_t pos = 0; 159 int ret, pkt_len, i; 160 char desc[INTEL_PT_PKT_DESC_MAX]; 161 const char *color = PERF_COLOR_BLUE; 162 163 color_fprintf(stdout, color, 164 ". ... Intel Processor Trace data: size %zu bytes\n", 165 len); 166 167 while (len) { 168 ret = intel_pt_get_packet(buf, len, &packet); 169 if (ret > 0) 170 pkt_len = ret; 171 else 172 pkt_len = 1; 173 printf("."); 174 color_fprintf(stdout, color, " %08x: ", pos); 175 for (i = 0; i < pkt_len; i++) 176 color_fprintf(stdout, color, " %02x", buf[i]); 177 for (; i < 16; i++) 178 color_fprintf(stdout, color, " "); 179 if (ret > 0) { 180 ret = intel_pt_pkt_desc(&packet, desc, 181 INTEL_PT_PKT_DESC_MAX); 182 if (ret > 0) 183 color_fprintf(stdout, color, " %s\n", desc); 184 } else { 185 color_fprintf(stdout, color, " Bad packet!\n"); 186 } 187 pos += pkt_len; 188 buf += pkt_len; 189 len -= pkt_len; 190 } 191 } 192 193 static void intel_pt_dump_event(struct intel_pt *pt, unsigned char *buf, 194 size_t len) 195 { 196 printf(".\n"); 197 intel_pt_dump(pt, buf, len); 198 } 199 200 static void intel_pt_log_event(union perf_event *event) 201 { 202 FILE *f = intel_pt_log_fp(); 203 204 if (!intel_pt_enable_logging || !f) 205 return; 206 207 perf_event__fprintf(event, f); 208 } 209 210 static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a, 211 struct auxtrace_buffer *b) 212 { 213 bool consecutive = false; 214 void *start; 215 216 start = intel_pt_find_overlap(a->data, a->size, b->data, b->size, 217 pt->have_tsc, &consecutive); 218 if (!start) 219 return -EINVAL; 220 b->use_size = b->data + b->size - start; 221 b->use_data = start; 222 if (b->use_size && consecutive) 223 b->consecutive = true; 224 return 0; 225 } 226 227 /* This function assumes data is processed sequentially only */ 228 static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data) 229 { 230 struct intel_pt_queue *ptq = data; 231 struct auxtrace_buffer *buffer = ptq->buffer; 232 struct auxtrace_buffer *old_buffer = ptq->old_buffer; 233 struct auxtrace_queue *queue; 234 bool might_overlap; 235 236 if (ptq->stop) { 237 b->len = 0; 238 return 0; 239 } 240 241 queue = &ptq->pt->queues.queue_array[ptq->queue_nr]; 242 243 buffer = auxtrace_buffer__next(queue, buffer); 244 if (!buffer) { 245 if (old_buffer) 246 auxtrace_buffer__drop_data(old_buffer); 247 b->len = 0; 248 return 0; 249 } 250 251 ptq->buffer = buffer; 252 253 if (!buffer->data) { 254 int fd = perf_data__fd(ptq->pt->session->data); 255 256 buffer->data = auxtrace_buffer__get_data(buffer, fd); 257 if (!buffer->data) 258 return -ENOMEM; 259 } 260 261 might_overlap = ptq->pt->snapshot_mode || ptq->pt->sampling_mode; 262 if (might_overlap && !buffer->consecutive && old_buffer && 263 intel_pt_do_fix_overlap(ptq->pt, old_buffer, buffer)) 264 return -ENOMEM; 265 266 if (buffer->use_data) { 267 b->len = buffer->use_size; 268 b->buf = buffer->use_data; 269 } else { 270 b->len = buffer->size; 271 b->buf = buffer->data; 272 } 273 b->ref_timestamp = buffer->reference; 274 275 if (!old_buffer || (might_overlap && !buffer->consecutive)) { 276 b->consecutive = false; 277 b->trace_nr = buffer->buffer_nr + 1; 278 } else { 279 b->consecutive = true; 280 } 281 282 if (ptq->step_through_buffers) 283 ptq->stop = true; 284 285 if (b->len) { 286 if (old_buffer) 287 auxtrace_buffer__drop_data(old_buffer); 288 ptq->old_buffer = buffer; 289 } else { 290 auxtrace_buffer__drop_data(buffer); 291 return intel_pt_get_trace(b, data); 292 } 293 294 return 0; 295 } 296 297 struct intel_pt_cache_entry { 298 struct auxtrace_cache_entry entry; 299 u64 insn_cnt; 300 u64 byte_cnt; 301 enum intel_pt_insn_op op; 302 enum intel_pt_insn_branch branch; 303 int length; 304 int32_t rel; 305 char insn[INTEL_PT_INSN_BUF_SZ]; 306 }; 307 308 static int intel_pt_config_div(const char *var, const char *value, void *data) 309 { 310 int *d = data; 311 long val; 312 313 if (!strcmp(var, "intel-pt.cache-divisor")) { 314 val = strtol(value, NULL, 0); 315 if (val > 0 && val <= INT_MAX) 316 *d = val; 317 } 318 319 return 0; 320 } 321 322 static int intel_pt_cache_divisor(void) 323 { 324 static int d; 325 326 if (d) 327 return d; 328 329 perf_config(intel_pt_config_div, &d); 330 331 if (!d) 332 d = 64; 333 334 return d; 335 } 336 337 static unsigned int intel_pt_cache_size(struct dso *dso, 338 struct machine *machine) 339 { 340 off_t size; 341 342 size = dso__data_size(dso, machine); 343 size /= intel_pt_cache_divisor(); 344 if (size < 1000) 345 return 10; 346 if (size > (1 << 21)) 347 return 21; 348 return 32 - __builtin_clz(size); 349 } 350 351 static struct auxtrace_cache *intel_pt_cache(struct dso *dso, 352 struct machine *machine) 353 { 354 struct auxtrace_cache *c; 355 unsigned int bits; 356 357 if (dso->auxtrace_cache) 358 return dso->auxtrace_cache; 359 360 bits = intel_pt_cache_size(dso, machine); 361 362 /* Ignoring cache creation failure */ 363 c = auxtrace_cache__new(bits, sizeof(struct intel_pt_cache_entry), 200); 364 365 dso->auxtrace_cache = c; 366 367 return c; 368 } 369 370 static int intel_pt_cache_add(struct dso *dso, struct machine *machine, 371 u64 offset, u64 insn_cnt, u64 byte_cnt, 372 struct intel_pt_insn *intel_pt_insn) 373 { 374 struct auxtrace_cache *c = intel_pt_cache(dso, machine); 375 struct intel_pt_cache_entry *e; 376 int err; 377 378 if (!c) 379 return -ENOMEM; 380 381 e = auxtrace_cache__alloc_entry(c); 382 if (!e) 383 return -ENOMEM; 384 385 e->insn_cnt = insn_cnt; 386 e->byte_cnt = byte_cnt; 387 e->op = intel_pt_insn->op; 388 e->branch = intel_pt_insn->branch; 389 e->length = intel_pt_insn->length; 390 e->rel = intel_pt_insn->rel; 391 memcpy(e->insn, intel_pt_insn->buf, INTEL_PT_INSN_BUF_SZ); 392 393 err = auxtrace_cache__add(c, offset, &e->entry); 394 if (err) 395 auxtrace_cache__free_entry(c, e); 396 397 return err; 398 } 399 400 static struct intel_pt_cache_entry * 401 intel_pt_cache_lookup(struct dso *dso, struct machine *machine, u64 offset) 402 { 403 struct auxtrace_cache *c = intel_pt_cache(dso, machine); 404 405 if (!c) 406 return NULL; 407 408 return auxtrace_cache__lookup(dso->auxtrace_cache, offset); 409 } 410 411 static inline u8 intel_pt_cpumode(struct intel_pt *pt, uint64_t ip) 412 { 413 return ip >= pt->kernel_start ? 414 PERF_RECORD_MISC_KERNEL : 415 PERF_RECORD_MISC_USER; 416 } 417 418 static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn, 419 uint64_t *insn_cnt_ptr, uint64_t *ip, 420 uint64_t to_ip, uint64_t max_insn_cnt, 421 void *data) 422 { 423 struct intel_pt_queue *ptq = data; 424 struct machine *machine = ptq->pt->machine; 425 struct thread *thread; 426 struct addr_location al; 427 unsigned char buf[INTEL_PT_INSN_BUF_SZ]; 428 ssize_t len; 429 int x86_64; 430 u8 cpumode; 431 u64 offset, start_offset, start_ip; 432 u64 insn_cnt = 0; 433 bool one_map = true; 434 435 intel_pt_insn->length = 0; 436 437 if (to_ip && *ip == to_ip) 438 goto out_no_cache; 439 440 cpumode = intel_pt_cpumode(ptq->pt, *ip); 441 442 thread = ptq->thread; 443 if (!thread) { 444 if (cpumode != PERF_RECORD_MISC_KERNEL) 445 return -EINVAL; 446 thread = ptq->pt->unknown_thread; 447 } 448 449 while (1) { 450 if (!thread__find_map(thread, cpumode, *ip, &al) || !al.map->dso) 451 return -EINVAL; 452 453 if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR && 454 dso__data_status_seen(al.map->dso, 455 DSO_DATA_STATUS_SEEN_ITRACE)) 456 return -ENOENT; 457 458 offset = al.map->map_ip(al.map, *ip); 459 460 if (!to_ip && one_map) { 461 struct intel_pt_cache_entry *e; 462 463 e = intel_pt_cache_lookup(al.map->dso, machine, offset); 464 if (e && 465 (!max_insn_cnt || e->insn_cnt <= max_insn_cnt)) { 466 *insn_cnt_ptr = e->insn_cnt; 467 *ip += e->byte_cnt; 468 intel_pt_insn->op = e->op; 469 intel_pt_insn->branch = e->branch; 470 intel_pt_insn->length = e->length; 471 intel_pt_insn->rel = e->rel; 472 memcpy(intel_pt_insn->buf, e->insn, 473 INTEL_PT_INSN_BUF_SZ); 474 intel_pt_log_insn_no_data(intel_pt_insn, *ip); 475 return 0; 476 } 477 } 478 479 start_offset = offset; 480 start_ip = *ip; 481 482 /* Load maps to ensure dso->is_64_bit has been updated */ 483 map__load(al.map); 484 485 x86_64 = al.map->dso->is_64_bit; 486 487 while (1) { 488 len = dso__data_read_offset(al.map->dso, machine, 489 offset, buf, 490 INTEL_PT_INSN_BUF_SZ); 491 if (len <= 0) 492 return -EINVAL; 493 494 if (intel_pt_get_insn(buf, len, x86_64, intel_pt_insn)) 495 return -EINVAL; 496 497 intel_pt_log_insn(intel_pt_insn, *ip); 498 499 insn_cnt += 1; 500 501 if (intel_pt_insn->branch != INTEL_PT_BR_NO_BRANCH) 502 goto out; 503 504 if (max_insn_cnt && insn_cnt >= max_insn_cnt) 505 goto out_no_cache; 506 507 *ip += intel_pt_insn->length; 508 509 if (to_ip && *ip == to_ip) 510 goto out_no_cache; 511 512 if (*ip >= al.map->end) 513 break; 514 515 offset += intel_pt_insn->length; 516 } 517 one_map = false; 518 } 519 out: 520 *insn_cnt_ptr = insn_cnt; 521 522 if (!one_map) 523 goto out_no_cache; 524 525 /* 526 * Didn't lookup in the 'to_ip' case, so do it now to prevent duplicate 527 * entries. 528 */ 529 if (to_ip) { 530 struct intel_pt_cache_entry *e; 531 532 e = intel_pt_cache_lookup(al.map->dso, machine, start_offset); 533 if (e) 534 return 0; 535 } 536 537 /* Ignore cache errors */ 538 intel_pt_cache_add(al.map->dso, machine, start_offset, insn_cnt, 539 *ip - start_ip, intel_pt_insn); 540 541 return 0; 542 543 out_no_cache: 544 *insn_cnt_ptr = insn_cnt; 545 return 0; 546 } 547 548 static bool intel_pt_match_pgd_ip(struct intel_pt *pt, uint64_t ip, 549 uint64_t offset, const char *filename) 550 { 551 struct addr_filter *filt; 552 bool have_filter = false; 553 bool hit_tracestop = false; 554 bool hit_filter = false; 555 556 list_for_each_entry(filt, &pt->filts.head, list) { 557 if (filt->start) 558 have_filter = true; 559 560 if ((filename && !filt->filename) || 561 (!filename && filt->filename) || 562 (filename && strcmp(filename, filt->filename))) 563 continue; 564 565 if (!(offset >= filt->addr && offset < filt->addr + filt->size)) 566 continue; 567 568 intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s hit filter: %s offset %#"PRIx64" size %#"PRIx64"\n", 569 ip, offset, filename ? filename : "[kernel]", 570 filt->start ? "filter" : "stop", 571 filt->addr, filt->size); 572 573 if (filt->start) 574 hit_filter = true; 575 else 576 hit_tracestop = true; 577 } 578 579 if (!hit_tracestop && !hit_filter) 580 intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s is not in a filter region\n", 581 ip, offset, filename ? filename : "[kernel]"); 582 583 return hit_tracestop || (have_filter && !hit_filter); 584 } 585 586 static int __intel_pt_pgd_ip(uint64_t ip, void *data) 587 { 588 struct intel_pt_queue *ptq = data; 589 struct thread *thread; 590 struct addr_location al; 591 u8 cpumode; 592 u64 offset; 593 594 if (ip >= ptq->pt->kernel_start) 595 return intel_pt_match_pgd_ip(ptq->pt, ip, ip, NULL); 596 597 cpumode = PERF_RECORD_MISC_USER; 598 599 thread = ptq->thread; 600 if (!thread) 601 return -EINVAL; 602 603 if (!thread__find_map(thread, cpumode, ip, &al) || !al.map->dso) 604 return -EINVAL; 605 606 offset = al.map->map_ip(al.map, ip); 607 608 return intel_pt_match_pgd_ip(ptq->pt, ip, offset, 609 al.map->dso->long_name); 610 } 611 612 static bool intel_pt_pgd_ip(uint64_t ip, void *data) 613 { 614 return __intel_pt_pgd_ip(ip, data) > 0; 615 } 616 617 static bool intel_pt_get_config(struct intel_pt *pt, 618 struct perf_event_attr *attr, u64 *config) 619 { 620 if (attr->type == pt->pmu_type) { 621 if (config) 622 *config = attr->config; 623 return true; 624 } 625 626 return false; 627 } 628 629 static bool intel_pt_exclude_kernel(struct intel_pt *pt) 630 { 631 struct perf_evsel *evsel; 632 633 evlist__for_each_entry(pt->session->evlist, evsel) { 634 if (intel_pt_get_config(pt, &evsel->attr, NULL) && 635 !evsel->attr.exclude_kernel) 636 return false; 637 } 638 return true; 639 } 640 641 static bool intel_pt_return_compression(struct intel_pt *pt) 642 { 643 struct perf_evsel *evsel; 644 u64 config; 645 646 if (!pt->noretcomp_bit) 647 return true; 648 649 evlist__for_each_entry(pt->session->evlist, evsel) { 650 if (intel_pt_get_config(pt, &evsel->attr, &config) && 651 (config & pt->noretcomp_bit)) 652 return false; 653 } 654 return true; 655 } 656 657 static bool intel_pt_branch_enable(struct intel_pt *pt) 658 { 659 struct perf_evsel *evsel; 660 u64 config; 661 662 evlist__for_each_entry(pt->session->evlist, evsel) { 663 if (intel_pt_get_config(pt, &evsel->attr, &config) && 664 (config & 1) && !(config & 0x2000)) 665 return false; 666 } 667 return true; 668 } 669 670 static unsigned int intel_pt_mtc_period(struct intel_pt *pt) 671 { 672 struct perf_evsel *evsel; 673 unsigned int shift; 674 u64 config; 675 676 if (!pt->mtc_freq_bits) 677 return 0; 678 679 for (shift = 0, config = pt->mtc_freq_bits; !(config & 1); shift++) 680 config >>= 1; 681 682 evlist__for_each_entry(pt->session->evlist, evsel) { 683 if (intel_pt_get_config(pt, &evsel->attr, &config)) 684 return (config & pt->mtc_freq_bits) >> shift; 685 } 686 return 0; 687 } 688 689 static bool intel_pt_timeless_decoding(struct intel_pt *pt) 690 { 691 struct perf_evsel *evsel; 692 bool timeless_decoding = true; 693 u64 config; 694 695 if (!pt->tsc_bit || !pt->cap_user_time_zero) 696 return true; 697 698 evlist__for_each_entry(pt->session->evlist, evsel) { 699 if (!(evsel->attr.sample_type & PERF_SAMPLE_TIME)) 700 return true; 701 if (intel_pt_get_config(pt, &evsel->attr, &config)) { 702 if (config & pt->tsc_bit) 703 timeless_decoding = false; 704 else 705 return true; 706 } 707 } 708 return timeless_decoding; 709 } 710 711 static bool intel_pt_tracing_kernel(struct intel_pt *pt) 712 { 713 struct perf_evsel *evsel; 714 715 evlist__for_each_entry(pt->session->evlist, evsel) { 716 if (intel_pt_get_config(pt, &evsel->attr, NULL) && 717 !evsel->attr.exclude_kernel) 718 return true; 719 } 720 return false; 721 } 722 723 static bool intel_pt_have_tsc(struct intel_pt *pt) 724 { 725 struct perf_evsel *evsel; 726 bool have_tsc = false; 727 u64 config; 728 729 if (!pt->tsc_bit) 730 return false; 731 732 evlist__for_each_entry(pt->session->evlist, evsel) { 733 if (intel_pt_get_config(pt, &evsel->attr, &config)) { 734 if (config & pt->tsc_bit) 735 have_tsc = true; 736 else 737 return false; 738 } 739 } 740 return have_tsc; 741 } 742 743 static u64 intel_pt_ns_to_ticks(const struct intel_pt *pt, u64 ns) 744 { 745 u64 quot, rem; 746 747 quot = ns / pt->tc.time_mult; 748 rem = ns % pt->tc.time_mult; 749 return (quot << pt->tc.time_shift) + (rem << pt->tc.time_shift) / 750 pt->tc.time_mult; 751 } 752 753 static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt, 754 unsigned int queue_nr) 755 { 756 struct intel_pt_params params = { .get_trace = 0, }; 757 struct perf_env *env = pt->machine->env; 758 struct intel_pt_queue *ptq; 759 760 ptq = zalloc(sizeof(struct intel_pt_queue)); 761 if (!ptq) 762 return NULL; 763 764 if (pt->synth_opts.callchain) { 765 size_t sz = sizeof(struct ip_callchain); 766 767 /* Add 1 to callchain_sz for callchain context */ 768 sz += (pt->synth_opts.callchain_sz + 1) * sizeof(u64); 769 ptq->chain = zalloc(sz); 770 if (!ptq->chain) 771 goto out_free; 772 } 773 774 if (pt->synth_opts.last_branch) { 775 size_t sz = sizeof(struct branch_stack); 776 777 sz += pt->synth_opts.last_branch_sz * 778 sizeof(struct branch_entry); 779 ptq->last_branch = zalloc(sz); 780 if (!ptq->last_branch) 781 goto out_free; 782 ptq->last_branch_rb = zalloc(sz); 783 if (!ptq->last_branch_rb) 784 goto out_free; 785 } 786 787 ptq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE); 788 if (!ptq->event_buf) 789 goto out_free; 790 791 ptq->pt = pt; 792 ptq->queue_nr = queue_nr; 793 ptq->exclude_kernel = intel_pt_exclude_kernel(pt); 794 ptq->pid = -1; 795 ptq->tid = -1; 796 ptq->cpu = -1; 797 ptq->next_tid = -1; 798 799 params.get_trace = intel_pt_get_trace; 800 params.walk_insn = intel_pt_walk_next_insn; 801 params.data = ptq; 802 params.return_compression = intel_pt_return_compression(pt); 803 params.branch_enable = intel_pt_branch_enable(pt); 804 params.max_non_turbo_ratio = pt->max_non_turbo_ratio; 805 params.mtc_period = intel_pt_mtc_period(pt); 806 params.tsc_ctc_ratio_n = pt->tsc_ctc_ratio_n; 807 params.tsc_ctc_ratio_d = pt->tsc_ctc_ratio_d; 808 809 if (pt->filts.cnt > 0) 810 params.pgd_ip = intel_pt_pgd_ip; 811 812 if (pt->synth_opts.instructions) { 813 if (pt->synth_opts.period) { 814 switch (pt->synth_opts.period_type) { 815 case PERF_ITRACE_PERIOD_INSTRUCTIONS: 816 params.period_type = 817 INTEL_PT_PERIOD_INSTRUCTIONS; 818 params.period = pt->synth_opts.period; 819 break; 820 case PERF_ITRACE_PERIOD_TICKS: 821 params.period_type = INTEL_PT_PERIOD_TICKS; 822 params.period = pt->synth_opts.period; 823 break; 824 case PERF_ITRACE_PERIOD_NANOSECS: 825 params.period_type = INTEL_PT_PERIOD_TICKS; 826 params.period = intel_pt_ns_to_ticks(pt, 827 pt->synth_opts.period); 828 break; 829 default: 830 break; 831 } 832 } 833 834 if (!params.period) { 835 params.period_type = INTEL_PT_PERIOD_INSTRUCTIONS; 836 params.period = 1; 837 } 838 } 839 840 if (env->cpuid && !strncmp(env->cpuid, "GenuineIntel,6,92,", 18)) 841 params.flags |= INTEL_PT_FUP_WITH_NLIP; 842 843 ptq->decoder = intel_pt_decoder_new(¶ms); 844 if (!ptq->decoder) 845 goto out_free; 846 847 return ptq; 848 849 out_free: 850 zfree(&ptq->event_buf); 851 zfree(&ptq->last_branch); 852 zfree(&ptq->last_branch_rb); 853 zfree(&ptq->chain); 854 free(ptq); 855 return NULL; 856 } 857 858 static void intel_pt_free_queue(void *priv) 859 { 860 struct intel_pt_queue *ptq = priv; 861 862 if (!ptq) 863 return; 864 thread__zput(ptq->thread); 865 intel_pt_decoder_free(ptq->decoder); 866 zfree(&ptq->event_buf); 867 zfree(&ptq->last_branch); 868 zfree(&ptq->last_branch_rb); 869 zfree(&ptq->chain); 870 free(ptq); 871 } 872 873 static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt, 874 struct auxtrace_queue *queue) 875 { 876 struct intel_pt_queue *ptq = queue->priv; 877 878 if (queue->tid == -1 || pt->have_sched_switch) { 879 ptq->tid = machine__get_current_tid(pt->machine, ptq->cpu); 880 thread__zput(ptq->thread); 881 } 882 883 if (!ptq->thread && ptq->tid != -1) 884 ptq->thread = machine__find_thread(pt->machine, -1, ptq->tid); 885 886 if (ptq->thread) { 887 ptq->pid = ptq->thread->pid_; 888 if (queue->cpu == -1) 889 ptq->cpu = ptq->thread->cpu; 890 } 891 } 892 893 static void intel_pt_sample_flags(struct intel_pt_queue *ptq) 894 { 895 if (ptq->state->flags & INTEL_PT_ABORT_TX) { 896 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT; 897 } else if (ptq->state->flags & INTEL_PT_ASYNC) { 898 if (ptq->state->to_ip) 899 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | 900 PERF_IP_FLAG_ASYNC | 901 PERF_IP_FLAG_INTERRUPT; 902 else 903 ptq->flags = PERF_IP_FLAG_BRANCH | 904 PERF_IP_FLAG_TRACE_END; 905 ptq->insn_len = 0; 906 } else { 907 if (ptq->state->from_ip) 908 ptq->flags = intel_pt_insn_type(ptq->state->insn_op); 909 else 910 ptq->flags = PERF_IP_FLAG_BRANCH | 911 PERF_IP_FLAG_TRACE_BEGIN; 912 if (ptq->state->flags & INTEL_PT_IN_TX) 913 ptq->flags |= PERF_IP_FLAG_IN_TX; 914 ptq->insn_len = ptq->state->insn_len; 915 memcpy(ptq->insn, ptq->state->insn, INTEL_PT_INSN_BUF_SZ); 916 } 917 918 if (ptq->state->type & INTEL_PT_TRACE_BEGIN) 919 ptq->flags |= PERF_IP_FLAG_TRACE_BEGIN; 920 if (ptq->state->type & INTEL_PT_TRACE_END) 921 ptq->flags |= PERF_IP_FLAG_TRACE_END; 922 } 923 924 static int intel_pt_setup_queue(struct intel_pt *pt, 925 struct auxtrace_queue *queue, 926 unsigned int queue_nr) 927 { 928 struct intel_pt_queue *ptq = queue->priv; 929 930 if (list_empty(&queue->head)) 931 return 0; 932 933 if (!ptq) { 934 ptq = intel_pt_alloc_queue(pt, queue_nr); 935 if (!ptq) 936 return -ENOMEM; 937 queue->priv = ptq; 938 939 if (queue->cpu != -1) 940 ptq->cpu = queue->cpu; 941 ptq->tid = queue->tid; 942 943 if (pt->sampling_mode && !pt->snapshot_mode && 944 pt->timeless_decoding) 945 ptq->step_through_buffers = true; 946 947 ptq->sync_switch = pt->sync_switch; 948 } 949 950 if (!ptq->on_heap && 951 (!ptq->sync_switch || 952 ptq->switch_state != INTEL_PT_SS_EXPECTING_SWITCH_EVENT)) { 953 const struct intel_pt_state *state; 954 int ret; 955 956 if (pt->timeless_decoding) 957 return 0; 958 959 intel_pt_log("queue %u getting timestamp\n", queue_nr); 960 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n", 961 queue_nr, ptq->cpu, ptq->pid, ptq->tid); 962 while (1) { 963 state = intel_pt_decode(ptq->decoder); 964 if (state->err) { 965 if (state->err == INTEL_PT_ERR_NODATA) { 966 intel_pt_log("queue %u has no timestamp\n", 967 queue_nr); 968 return 0; 969 } 970 continue; 971 } 972 if (state->timestamp) 973 break; 974 } 975 976 ptq->timestamp = state->timestamp; 977 intel_pt_log("queue %u timestamp 0x%" PRIx64 "\n", 978 queue_nr, ptq->timestamp); 979 ptq->state = state; 980 ptq->have_sample = true; 981 intel_pt_sample_flags(ptq); 982 ret = auxtrace_heap__add(&pt->heap, queue_nr, ptq->timestamp); 983 if (ret) 984 return ret; 985 ptq->on_heap = true; 986 } 987 988 return 0; 989 } 990 991 static int intel_pt_setup_queues(struct intel_pt *pt) 992 { 993 unsigned int i; 994 int ret; 995 996 for (i = 0; i < pt->queues.nr_queues; i++) { 997 ret = intel_pt_setup_queue(pt, &pt->queues.queue_array[i], i); 998 if (ret) 999 return ret; 1000 } 1001 return 0; 1002 } 1003 1004 static inline void intel_pt_copy_last_branch_rb(struct intel_pt_queue *ptq) 1005 { 1006 struct branch_stack *bs_src = ptq->last_branch_rb; 1007 struct branch_stack *bs_dst = ptq->last_branch; 1008 size_t nr = 0; 1009 1010 bs_dst->nr = bs_src->nr; 1011 1012 if (!bs_src->nr) 1013 return; 1014 1015 nr = ptq->pt->synth_opts.last_branch_sz - ptq->last_branch_pos; 1016 memcpy(&bs_dst->entries[0], 1017 &bs_src->entries[ptq->last_branch_pos], 1018 sizeof(struct branch_entry) * nr); 1019 1020 if (bs_src->nr >= ptq->pt->synth_opts.last_branch_sz) { 1021 memcpy(&bs_dst->entries[nr], 1022 &bs_src->entries[0], 1023 sizeof(struct branch_entry) * ptq->last_branch_pos); 1024 } 1025 } 1026 1027 static inline void intel_pt_reset_last_branch_rb(struct intel_pt_queue *ptq) 1028 { 1029 ptq->last_branch_pos = 0; 1030 ptq->last_branch_rb->nr = 0; 1031 } 1032 1033 static void intel_pt_update_last_branch_rb(struct intel_pt_queue *ptq) 1034 { 1035 const struct intel_pt_state *state = ptq->state; 1036 struct branch_stack *bs = ptq->last_branch_rb; 1037 struct branch_entry *be; 1038 1039 if (!ptq->last_branch_pos) 1040 ptq->last_branch_pos = ptq->pt->synth_opts.last_branch_sz; 1041 1042 ptq->last_branch_pos -= 1; 1043 1044 be = &bs->entries[ptq->last_branch_pos]; 1045 be->from = state->from_ip; 1046 be->to = state->to_ip; 1047 be->flags.abort = !!(state->flags & INTEL_PT_ABORT_TX); 1048 be->flags.in_tx = !!(state->flags & INTEL_PT_IN_TX); 1049 /* No support for mispredict */ 1050 be->flags.mispred = ptq->pt->mispred_all; 1051 1052 if (bs->nr < ptq->pt->synth_opts.last_branch_sz) 1053 bs->nr += 1; 1054 } 1055 1056 static inline bool intel_pt_skip_event(struct intel_pt *pt) 1057 { 1058 return pt->synth_opts.initial_skip && 1059 pt->num_events++ < pt->synth_opts.initial_skip; 1060 } 1061 1062 static void intel_pt_prep_b_sample(struct intel_pt *pt, 1063 struct intel_pt_queue *ptq, 1064 union perf_event *event, 1065 struct perf_sample *sample) 1066 { 1067 if (!pt->timeless_decoding) 1068 sample->time = tsc_to_perf_time(ptq->timestamp, &pt->tc); 1069 1070 sample->ip = ptq->state->from_ip; 1071 sample->cpumode = intel_pt_cpumode(pt, sample->ip); 1072 sample->pid = ptq->pid; 1073 sample->tid = ptq->tid; 1074 sample->addr = ptq->state->to_ip; 1075 sample->period = 1; 1076 sample->cpu = ptq->cpu; 1077 sample->flags = ptq->flags; 1078 sample->insn_len = ptq->insn_len; 1079 memcpy(sample->insn, ptq->insn, INTEL_PT_INSN_BUF_SZ); 1080 1081 event->sample.header.type = PERF_RECORD_SAMPLE; 1082 event->sample.header.misc = sample->cpumode; 1083 event->sample.header.size = sizeof(struct perf_event_header); 1084 } 1085 1086 static int intel_pt_inject_event(union perf_event *event, 1087 struct perf_sample *sample, u64 type) 1088 { 1089 event->header.size = perf_event__sample_event_size(sample, type, 0); 1090 return perf_event__synthesize_sample(event, type, 0, sample); 1091 } 1092 1093 static inline int intel_pt_opt_inject(struct intel_pt *pt, 1094 union perf_event *event, 1095 struct perf_sample *sample, u64 type) 1096 { 1097 if (!pt->synth_opts.inject) 1098 return 0; 1099 1100 return intel_pt_inject_event(event, sample, type); 1101 } 1102 1103 static int intel_pt_deliver_synth_b_event(struct intel_pt *pt, 1104 union perf_event *event, 1105 struct perf_sample *sample, u64 type) 1106 { 1107 int ret; 1108 1109 ret = intel_pt_opt_inject(pt, event, sample, type); 1110 if (ret) 1111 return ret; 1112 1113 ret = perf_session__deliver_synth_event(pt->session, event, sample); 1114 if (ret) 1115 pr_err("Intel PT: failed to deliver event, error %d\n", ret); 1116 1117 return ret; 1118 } 1119 1120 static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq) 1121 { 1122 struct intel_pt *pt = ptq->pt; 1123 union perf_event *event = ptq->event_buf; 1124 struct perf_sample sample = { .ip = 0, }; 1125 struct dummy_branch_stack { 1126 u64 nr; 1127 struct branch_entry entries; 1128 } dummy_bs; 1129 1130 if (pt->branches_filter && !(pt->branches_filter & ptq->flags)) 1131 return 0; 1132 1133 if (intel_pt_skip_event(pt)) 1134 return 0; 1135 1136 intel_pt_prep_b_sample(pt, ptq, event, &sample); 1137 1138 sample.id = ptq->pt->branches_id; 1139 sample.stream_id = ptq->pt->branches_id; 1140 1141 /* 1142 * perf report cannot handle events without a branch stack when using 1143 * SORT_MODE__BRANCH so make a dummy one. 1144 */ 1145 if (pt->synth_opts.last_branch && sort__mode == SORT_MODE__BRANCH) { 1146 dummy_bs = (struct dummy_branch_stack){ 1147 .nr = 1, 1148 .entries = { 1149 .from = sample.ip, 1150 .to = sample.addr, 1151 }, 1152 }; 1153 sample.branch_stack = (struct branch_stack *)&dummy_bs; 1154 } 1155 1156 return intel_pt_deliver_synth_b_event(pt, event, &sample, 1157 pt->branches_sample_type); 1158 } 1159 1160 static void intel_pt_prep_sample(struct intel_pt *pt, 1161 struct intel_pt_queue *ptq, 1162 union perf_event *event, 1163 struct perf_sample *sample) 1164 { 1165 intel_pt_prep_b_sample(pt, ptq, event, sample); 1166 1167 if (pt->synth_opts.callchain) { 1168 thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain, 1169 pt->synth_opts.callchain_sz + 1, 1170 sample->ip, pt->kernel_start); 1171 sample->callchain = ptq->chain; 1172 } 1173 1174 if (pt->synth_opts.last_branch) { 1175 intel_pt_copy_last_branch_rb(ptq); 1176 sample->branch_stack = ptq->last_branch; 1177 } 1178 } 1179 1180 static inline int intel_pt_deliver_synth_event(struct intel_pt *pt, 1181 struct intel_pt_queue *ptq, 1182 union perf_event *event, 1183 struct perf_sample *sample, 1184 u64 type) 1185 { 1186 int ret; 1187 1188 ret = intel_pt_deliver_synth_b_event(pt, event, sample, type); 1189 1190 if (pt->synth_opts.last_branch) 1191 intel_pt_reset_last_branch_rb(ptq); 1192 1193 return ret; 1194 } 1195 1196 static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq) 1197 { 1198 struct intel_pt *pt = ptq->pt; 1199 union perf_event *event = ptq->event_buf; 1200 struct perf_sample sample = { .ip = 0, }; 1201 1202 if (intel_pt_skip_event(pt)) 1203 return 0; 1204 1205 intel_pt_prep_sample(pt, ptq, event, &sample); 1206 1207 sample.id = ptq->pt->instructions_id; 1208 sample.stream_id = ptq->pt->instructions_id; 1209 sample.period = ptq->state->tot_insn_cnt - ptq->last_insn_cnt; 1210 1211 ptq->last_insn_cnt = ptq->state->tot_insn_cnt; 1212 1213 return intel_pt_deliver_synth_event(pt, ptq, event, &sample, 1214 pt->instructions_sample_type); 1215 } 1216 1217 static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq) 1218 { 1219 struct intel_pt *pt = ptq->pt; 1220 union perf_event *event = ptq->event_buf; 1221 struct perf_sample sample = { .ip = 0, }; 1222 1223 if (intel_pt_skip_event(pt)) 1224 return 0; 1225 1226 intel_pt_prep_sample(pt, ptq, event, &sample); 1227 1228 sample.id = ptq->pt->transactions_id; 1229 sample.stream_id = ptq->pt->transactions_id; 1230 1231 return intel_pt_deliver_synth_event(pt, ptq, event, &sample, 1232 pt->transactions_sample_type); 1233 } 1234 1235 static void intel_pt_prep_p_sample(struct intel_pt *pt, 1236 struct intel_pt_queue *ptq, 1237 union perf_event *event, 1238 struct perf_sample *sample) 1239 { 1240 intel_pt_prep_sample(pt, ptq, event, sample); 1241 1242 /* 1243 * Zero IP is used to mean "trace start" but that is not the case for 1244 * power or PTWRITE events with no IP, so clear the flags. 1245 */ 1246 if (!sample->ip) 1247 sample->flags = 0; 1248 } 1249 1250 static int intel_pt_synth_ptwrite_sample(struct intel_pt_queue *ptq) 1251 { 1252 struct intel_pt *pt = ptq->pt; 1253 union perf_event *event = ptq->event_buf; 1254 struct perf_sample sample = { .ip = 0, }; 1255 struct perf_synth_intel_ptwrite raw; 1256 1257 if (intel_pt_skip_event(pt)) 1258 return 0; 1259 1260 intel_pt_prep_p_sample(pt, ptq, event, &sample); 1261 1262 sample.id = ptq->pt->ptwrites_id; 1263 sample.stream_id = ptq->pt->ptwrites_id; 1264 1265 raw.flags = 0; 1266 raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP); 1267 raw.payload = cpu_to_le64(ptq->state->ptw_payload); 1268 1269 sample.raw_size = perf_synth__raw_size(raw); 1270 sample.raw_data = perf_synth__raw_data(&raw); 1271 1272 return intel_pt_deliver_synth_event(pt, ptq, event, &sample, 1273 pt->ptwrites_sample_type); 1274 } 1275 1276 static int intel_pt_synth_cbr_sample(struct intel_pt_queue *ptq) 1277 { 1278 struct intel_pt *pt = ptq->pt; 1279 union perf_event *event = ptq->event_buf; 1280 struct perf_sample sample = { .ip = 0, }; 1281 struct perf_synth_intel_cbr raw; 1282 u32 flags; 1283 1284 if (intel_pt_skip_event(pt)) 1285 return 0; 1286 1287 intel_pt_prep_p_sample(pt, ptq, event, &sample); 1288 1289 sample.id = ptq->pt->cbr_id; 1290 sample.stream_id = ptq->pt->cbr_id; 1291 1292 flags = (u16)ptq->state->cbr_payload | (pt->max_non_turbo_ratio << 16); 1293 raw.flags = cpu_to_le32(flags); 1294 raw.freq = cpu_to_le32(raw.cbr * pt->cbr2khz); 1295 raw.reserved3 = 0; 1296 1297 sample.raw_size = perf_synth__raw_size(raw); 1298 sample.raw_data = perf_synth__raw_data(&raw); 1299 1300 return intel_pt_deliver_synth_event(pt, ptq, event, &sample, 1301 pt->pwr_events_sample_type); 1302 } 1303 1304 static int intel_pt_synth_mwait_sample(struct intel_pt_queue *ptq) 1305 { 1306 struct intel_pt *pt = ptq->pt; 1307 union perf_event *event = ptq->event_buf; 1308 struct perf_sample sample = { .ip = 0, }; 1309 struct perf_synth_intel_mwait raw; 1310 1311 if (intel_pt_skip_event(pt)) 1312 return 0; 1313 1314 intel_pt_prep_p_sample(pt, ptq, event, &sample); 1315 1316 sample.id = ptq->pt->mwait_id; 1317 sample.stream_id = ptq->pt->mwait_id; 1318 1319 raw.reserved = 0; 1320 raw.payload = cpu_to_le64(ptq->state->mwait_payload); 1321 1322 sample.raw_size = perf_synth__raw_size(raw); 1323 sample.raw_data = perf_synth__raw_data(&raw); 1324 1325 return intel_pt_deliver_synth_event(pt, ptq, event, &sample, 1326 pt->pwr_events_sample_type); 1327 } 1328 1329 static int intel_pt_synth_pwre_sample(struct intel_pt_queue *ptq) 1330 { 1331 struct intel_pt *pt = ptq->pt; 1332 union perf_event *event = ptq->event_buf; 1333 struct perf_sample sample = { .ip = 0, }; 1334 struct perf_synth_intel_pwre raw; 1335 1336 if (intel_pt_skip_event(pt)) 1337 return 0; 1338 1339 intel_pt_prep_p_sample(pt, ptq, event, &sample); 1340 1341 sample.id = ptq->pt->pwre_id; 1342 sample.stream_id = ptq->pt->pwre_id; 1343 1344 raw.reserved = 0; 1345 raw.payload = cpu_to_le64(ptq->state->pwre_payload); 1346 1347 sample.raw_size = perf_synth__raw_size(raw); 1348 sample.raw_data = perf_synth__raw_data(&raw); 1349 1350 return intel_pt_deliver_synth_event(pt, ptq, event, &sample, 1351 pt->pwr_events_sample_type); 1352 } 1353 1354 static int intel_pt_synth_exstop_sample(struct intel_pt_queue *ptq) 1355 { 1356 struct intel_pt *pt = ptq->pt; 1357 union perf_event *event = ptq->event_buf; 1358 struct perf_sample sample = { .ip = 0, }; 1359 struct perf_synth_intel_exstop raw; 1360 1361 if (intel_pt_skip_event(pt)) 1362 return 0; 1363 1364 intel_pt_prep_p_sample(pt, ptq, event, &sample); 1365 1366 sample.id = ptq->pt->exstop_id; 1367 sample.stream_id = ptq->pt->exstop_id; 1368 1369 raw.flags = 0; 1370 raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP); 1371 1372 sample.raw_size = perf_synth__raw_size(raw); 1373 sample.raw_data = perf_synth__raw_data(&raw); 1374 1375 return intel_pt_deliver_synth_event(pt, ptq, event, &sample, 1376 pt->pwr_events_sample_type); 1377 } 1378 1379 static int intel_pt_synth_pwrx_sample(struct intel_pt_queue *ptq) 1380 { 1381 struct intel_pt *pt = ptq->pt; 1382 union perf_event *event = ptq->event_buf; 1383 struct perf_sample sample = { .ip = 0, }; 1384 struct perf_synth_intel_pwrx raw; 1385 1386 if (intel_pt_skip_event(pt)) 1387 return 0; 1388 1389 intel_pt_prep_p_sample(pt, ptq, event, &sample); 1390 1391 sample.id = ptq->pt->pwrx_id; 1392 sample.stream_id = ptq->pt->pwrx_id; 1393 1394 raw.reserved = 0; 1395 raw.payload = cpu_to_le64(ptq->state->pwrx_payload); 1396 1397 sample.raw_size = perf_synth__raw_size(raw); 1398 sample.raw_data = perf_synth__raw_data(&raw); 1399 1400 return intel_pt_deliver_synth_event(pt, ptq, event, &sample, 1401 pt->pwr_events_sample_type); 1402 } 1403 1404 static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu, 1405 pid_t pid, pid_t tid, u64 ip, u64 timestamp) 1406 { 1407 union perf_event event; 1408 char msg[MAX_AUXTRACE_ERROR_MSG]; 1409 int err; 1410 1411 intel_pt__strerror(code, msg, MAX_AUXTRACE_ERROR_MSG); 1412 1413 auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE, 1414 code, cpu, pid, tid, ip, msg, timestamp); 1415 1416 err = perf_session__deliver_synth_event(pt->session, &event, NULL); 1417 if (err) 1418 pr_err("Intel Processor Trace: failed to deliver error event, error %d\n", 1419 err); 1420 1421 return err; 1422 } 1423 1424 static int intel_ptq_synth_error(struct intel_pt_queue *ptq, 1425 const struct intel_pt_state *state) 1426 { 1427 struct intel_pt *pt = ptq->pt; 1428 u64 tm = ptq->timestamp; 1429 1430 tm = pt->timeless_decoding ? 0 : tsc_to_perf_time(tm, &pt->tc); 1431 1432 return intel_pt_synth_error(pt, state->err, ptq->cpu, ptq->pid, 1433 ptq->tid, state->from_ip, tm); 1434 } 1435 1436 static int intel_pt_next_tid(struct intel_pt *pt, struct intel_pt_queue *ptq) 1437 { 1438 struct auxtrace_queue *queue; 1439 pid_t tid = ptq->next_tid; 1440 int err; 1441 1442 if (tid == -1) 1443 return 0; 1444 1445 intel_pt_log("switch: cpu %d tid %d\n", ptq->cpu, tid); 1446 1447 err = machine__set_current_tid(pt->machine, ptq->cpu, -1, tid); 1448 1449 queue = &pt->queues.queue_array[ptq->queue_nr]; 1450 intel_pt_set_pid_tid_cpu(pt, queue); 1451 1452 ptq->next_tid = -1; 1453 1454 return err; 1455 } 1456 1457 static inline bool intel_pt_is_switch_ip(struct intel_pt_queue *ptq, u64 ip) 1458 { 1459 struct intel_pt *pt = ptq->pt; 1460 1461 return ip == pt->switch_ip && 1462 (ptq->flags & PERF_IP_FLAG_BRANCH) && 1463 !(ptq->flags & (PERF_IP_FLAG_CONDITIONAL | PERF_IP_FLAG_ASYNC | 1464 PERF_IP_FLAG_INTERRUPT | PERF_IP_FLAG_TX_ABORT)); 1465 } 1466 1467 #define INTEL_PT_PWR_EVT (INTEL_PT_MWAIT_OP | INTEL_PT_PWR_ENTRY | \ 1468 INTEL_PT_EX_STOP | INTEL_PT_PWR_EXIT | \ 1469 INTEL_PT_CBR_CHG) 1470 1471 static int intel_pt_sample(struct intel_pt_queue *ptq) 1472 { 1473 const struct intel_pt_state *state = ptq->state; 1474 struct intel_pt *pt = ptq->pt; 1475 int err; 1476 1477 if (!ptq->have_sample) 1478 return 0; 1479 1480 ptq->have_sample = false; 1481 1482 if (pt->sample_pwr_events && (state->type & INTEL_PT_PWR_EVT)) { 1483 if (state->type & INTEL_PT_CBR_CHG) { 1484 err = intel_pt_synth_cbr_sample(ptq); 1485 if (err) 1486 return err; 1487 } 1488 if (state->type & INTEL_PT_MWAIT_OP) { 1489 err = intel_pt_synth_mwait_sample(ptq); 1490 if (err) 1491 return err; 1492 } 1493 if (state->type & INTEL_PT_PWR_ENTRY) { 1494 err = intel_pt_synth_pwre_sample(ptq); 1495 if (err) 1496 return err; 1497 } 1498 if (state->type & INTEL_PT_EX_STOP) { 1499 err = intel_pt_synth_exstop_sample(ptq); 1500 if (err) 1501 return err; 1502 } 1503 if (state->type & INTEL_PT_PWR_EXIT) { 1504 err = intel_pt_synth_pwrx_sample(ptq); 1505 if (err) 1506 return err; 1507 } 1508 } 1509 1510 if (pt->sample_instructions && (state->type & INTEL_PT_INSTRUCTION)) { 1511 err = intel_pt_synth_instruction_sample(ptq); 1512 if (err) 1513 return err; 1514 } 1515 1516 if (pt->sample_transactions && (state->type & INTEL_PT_TRANSACTION)) { 1517 err = intel_pt_synth_transaction_sample(ptq); 1518 if (err) 1519 return err; 1520 } 1521 1522 if (pt->sample_ptwrites && (state->type & INTEL_PT_PTW)) { 1523 err = intel_pt_synth_ptwrite_sample(ptq); 1524 if (err) 1525 return err; 1526 } 1527 1528 if (!(state->type & INTEL_PT_BRANCH)) 1529 return 0; 1530 1531 if (pt->synth_opts.callchain || pt->synth_opts.thread_stack) 1532 thread_stack__event(ptq->thread, ptq->cpu, ptq->flags, state->from_ip, 1533 state->to_ip, ptq->insn_len, 1534 state->trace_nr); 1535 else 1536 thread_stack__set_trace_nr(ptq->thread, ptq->cpu, state->trace_nr); 1537 1538 if (pt->sample_branches) { 1539 err = intel_pt_synth_branch_sample(ptq); 1540 if (err) 1541 return err; 1542 } 1543 1544 if (pt->synth_opts.last_branch) 1545 intel_pt_update_last_branch_rb(ptq); 1546 1547 if (!ptq->sync_switch) 1548 return 0; 1549 1550 if (intel_pt_is_switch_ip(ptq, state->to_ip)) { 1551 switch (ptq->switch_state) { 1552 case INTEL_PT_SS_NOT_TRACING: 1553 case INTEL_PT_SS_UNKNOWN: 1554 case INTEL_PT_SS_EXPECTING_SWITCH_IP: 1555 err = intel_pt_next_tid(pt, ptq); 1556 if (err) 1557 return err; 1558 ptq->switch_state = INTEL_PT_SS_TRACING; 1559 break; 1560 default: 1561 ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_EVENT; 1562 return 1; 1563 } 1564 } else if (!state->to_ip) { 1565 ptq->switch_state = INTEL_PT_SS_NOT_TRACING; 1566 } else if (ptq->switch_state == INTEL_PT_SS_NOT_TRACING) { 1567 ptq->switch_state = INTEL_PT_SS_UNKNOWN; 1568 } else if (ptq->switch_state == INTEL_PT_SS_UNKNOWN && 1569 state->to_ip == pt->ptss_ip && 1570 (ptq->flags & PERF_IP_FLAG_CALL)) { 1571 ptq->switch_state = INTEL_PT_SS_TRACING; 1572 } 1573 1574 return 0; 1575 } 1576 1577 static u64 intel_pt_switch_ip(struct intel_pt *pt, u64 *ptss_ip) 1578 { 1579 struct machine *machine = pt->machine; 1580 struct map *map; 1581 struct symbol *sym, *start; 1582 u64 ip, switch_ip = 0; 1583 const char *ptss; 1584 1585 if (ptss_ip) 1586 *ptss_ip = 0; 1587 1588 map = machine__kernel_map(machine); 1589 if (!map) 1590 return 0; 1591 1592 if (map__load(map)) 1593 return 0; 1594 1595 start = dso__first_symbol(map->dso); 1596 1597 for (sym = start; sym; sym = dso__next_symbol(sym)) { 1598 if (sym->binding == STB_GLOBAL && 1599 !strcmp(sym->name, "__switch_to")) { 1600 ip = map->unmap_ip(map, sym->start); 1601 if (ip >= map->start && ip < map->end) { 1602 switch_ip = ip; 1603 break; 1604 } 1605 } 1606 } 1607 1608 if (!switch_ip || !ptss_ip) 1609 return 0; 1610 1611 if (pt->have_sched_switch == 1) 1612 ptss = "perf_trace_sched_switch"; 1613 else 1614 ptss = "__perf_event_task_sched_out"; 1615 1616 for (sym = start; sym; sym = dso__next_symbol(sym)) { 1617 if (!strcmp(sym->name, ptss)) { 1618 ip = map->unmap_ip(map, sym->start); 1619 if (ip >= map->start && ip < map->end) { 1620 *ptss_ip = ip; 1621 break; 1622 } 1623 } 1624 } 1625 1626 return switch_ip; 1627 } 1628 1629 static void intel_pt_enable_sync_switch(struct intel_pt *pt) 1630 { 1631 unsigned int i; 1632 1633 pt->sync_switch = true; 1634 1635 for (i = 0; i < pt->queues.nr_queues; i++) { 1636 struct auxtrace_queue *queue = &pt->queues.queue_array[i]; 1637 struct intel_pt_queue *ptq = queue->priv; 1638 1639 if (ptq) 1640 ptq->sync_switch = true; 1641 } 1642 } 1643 1644 static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp) 1645 { 1646 const struct intel_pt_state *state = ptq->state; 1647 struct intel_pt *pt = ptq->pt; 1648 int err; 1649 1650 if (!pt->kernel_start) { 1651 pt->kernel_start = machine__kernel_start(pt->machine); 1652 if (pt->per_cpu_mmaps && 1653 (pt->have_sched_switch == 1 || pt->have_sched_switch == 3) && 1654 !pt->timeless_decoding && intel_pt_tracing_kernel(pt) && 1655 !pt->sampling_mode) { 1656 pt->switch_ip = intel_pt_switch_ip(pt, &pt->ptss_ip); 1657 if (pt->switch_ip) { 1658 intel_pt_log("switch_ip: %"PRIx64" ptss_ip: %"PRIx64"\n", 1659 pt->switch_ip, pt->ptss_ip); 1660 intel_pt_enable_sync_switch(pt); 1661 } 1662 } 1663 } 1664 1665 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n", 1666 ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid); 1667 while (1) { 1668 err = intel_pt_sample(ptq); 1669 if (err) 1670 return err; 1671 1672 state = intel_pt_decode(ptq->decoder); 1673 if (state->err) { 1674 if (state->err == INTEL_PT_ERR_NODATA) 1675 return 1; 1676 if (ptq->sync_switch && 1677 state->from_ip >= pt->kernel_start) { 1678 ptq->sync_switch = false; 1679 intel_pt_next_tid(pt, ptq); 1680 } 1681 if (pt->synth_opts.errors) { 1682 err = intel_ptq_synth_error(ptq, state); 1683 if (err) 1684 return err; 1685 } 1686 continue; 1687 } 1688 1689 ptq->state = state; 1690 ptq->have_sample = true; 1691 intel_pt_sample_flags(ptq); 1692 1693 /* Use estimated TSC upon return to user space */ 1694 if (pt->est_tsc && 1695 (state->from_ip >= pt->kernel_start || !state->from_ip) && 1696 state->to_ip && state->to_ip < pt->kernel_start) { 1697 intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n", 1698 state->timestamp, state->est_timestamp); 1699 ptq->timestamp = state->est_timestamp; 1700 /* Use estimated TSC in unknown switch state */ 1701 } else if (ptq->sync_switch && 1702 ptq->switch_state == INTEL_PT_SS_UNKNOWN && 1703 intel_pt_is_switch_ip(ptq, state->to_ip) && 1704 ptq->next_tid == -1) { 1705 intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n", 1706 state->timestamp, state->est_timestamp); 1707 ptq->timestamp = state->est_timestamp; 1708 } else if (state->timestamp > ptq->timestamp) { 1709 ptq->timestamp = state->timestamp; 1710 } 1711 1712 if (!pt->timeless_decoding && ptq->timestamp >= *timestamp) { 1713 *timestamp = ptq->timestamp; 1714 return 0; 1715 } 1716 } 1717 return 0; 1718 } 1719 1720 static inline int intel_pt_update_queues(struct intel_pt *pt) 1721 { 1722 if (pt->queues.new_data) { 1723 pt->queues.new_data = false; 1724 return intel_pt_setup_queues(pt); 1725 } 1726 return 0; 1727 } 1728 1729 static int intel_pt_process_queues(struct intel_pt *pt, u64 timestamp) 1730 { 1731 unsigned int queue_nr; 1732 u64 ts; 1733 int ret; 1734 1735 while (1) { 1736 struct auxtrace_queue *queue; 1737 struct intel_pt_queue *ptq; 1738 1739 if (!pt->heap.heap_cnt) 1740 return 0; 1741 1742 if (pt->heap.heap_array[0].ordinal >= timestamp) 1743 return 0; 1744 1745 queue_nr = pt->heap.heap_array[0].queue_nr; 1746 queue = &pt->queues.queue_array[queue_nr]; 1747 ptq = queue->priv; 1748 1749 intel_pt_log("queue %u processing 0x%" PRIx64 " to 0x%" PRIx64 "\n", 1750 queue_nr, pt->heap.heap_array[0].ordinal, 1751 timestamp); 1752 1753 auxtrace_heap__pop(&pt->heap); 1754 1755 if (pt->heap.heap_cnt) { 1756 ts = pt->heap.heap_array[0].ordinal + 1; 1757 if (ts > timestamp) 1758 ts = timestamp; 1759 } else { 1760 ts = timestamp; 1761 } 1762 1763 intel_pt_set_pid_tid_cpu(pt, queue); 1764 1765 ret = intel_pt_run_decoder(ptq, &ts); 1766 1767 if (ret < 0) { 1768 auxtrace_heap__add(&pt->heap, queue_nr, ts); 1769 return ret; 1770 } 1771 1772 if (!ret) { 1773 ret = auxtrace_heap__add(&pt->heap, queue_nr, ts); 1774 if (ret < 0) 1775 return ret; 1776 } else { 1777 ptq->on_heap = false; 1778 } 1779 } 1780 1781 return 0; 1782 } 1783 1784 static int intel_pt_process_timeless_queues(struct intel_pt *pt, pid_t tid, 1785 u64 time_) 1786 { 1787 struct auxtrace_queues *queues = &pt->queues; 1788 unsigned int i; 1789 u64 ts = 0; 1790 1791 for (i = 0; i < queues->nr_queues; i++) { 1792 struct auxtrace_queue *queue = &pt->queues.queue_array[i]; 1793 struct intel_pt_queue *ptq = queue->priv; 1794 1795 if (ptq && (tid == -1 || ptq->tid == tid)) { 1796 ptq->time = time_; 1797 intel_pt_set_pid_tid_cpu(pt, queue); 1798 intel_pt_run_decoder(ptq, &ts); 1799 } 1800 } 1801 return 0; 1802 } 1803 1804 static int intel_pt_lost(struct intel_pt *pt, struct perf_sample *sample) 1805 { 1806 return intel_pt_synth_error(pt, INTEL_PT_ERR_LOST, sample->cpu, 1807 sample->pid, sample->tid, 0, sample->time); 1808 } 1809 1810 static struct intel_pt_queue *intel_pt_cpu_to_ptq(struct intel_pt *pt, int cpu) 1811 { 1812 unsigned i, j; 1813 1814 if (cpu < 0 || !pt->queues.nr_queues) 1815 return NULL; 1816 1817 if ((unsigned)cpu >= pt->queues.nr_queues) 1818 i = pt->queues.nr_queues - 1; 1819 else 1820 i = cpu; 1821 1822 if (pt->queues.queue_array[i].cpu == cpu) 1823 return pt->queues.queue_array[i].priv; 1824 1825 for (j = 0; i > 0; j++) { 1826 if (pt->queues.queue_array[--i].cpu == cpu) 1827 return pt->queues.queue_array[i].priv; 1828 } 1829 1830 for (; j < pt->queues.nr_queues; j++) { 1831 if (pt->queues.queue_array[j].cpu == cpu) 1832 return pt->queues.queue_array[j].priv; 1833 } 1834 1835 return NULL; 1836 } 1837 1838 static int intel_pt_sync_switch(struct intel_pt *pt, int cpu, pid_t tid, 1839 u64 timestamp) 1840 { 1841 struct intel_pt_queue *ptq; 1842 int err; 1843 1844 if (!pt->sync_switch) 1845 return 1; 1846 1847 ptq = intel_pt_cpu_to_ptq(pt, cpu); 1848 if (!ptq || !ptq->sync_switch) 1849 return 1; 1850 1851 switch (ptq->switch_state) { 1852 case INTEL_PT_SS_NOT_TRACING: 1853 ptq->next_tid = -1; 1854 break; 1855 case INTEL_PT_SS_UNKNOWN: 1856 case INTEL_PT_SS_TRACING: 1857 ptq->next_tid = tid; 1858 ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_IP; 1859 return 0; 1860 case INTEL_PT_SS_EXPECTING_SWITCH_EVENT: 1861 if (!ptq->on_heap) { 1862 ptq->timestamp = perf_time_to_tsc(timestamp, 1863 &pt->tc); 1864 err = auxtrace_heap__add(&pt->heap, ptq->queue_nr, 1865 ptq->timestamp); 1866 if (err) 1867 return err; 1868 ptq->on_heap = true; 1869 } 1870 ptq->switch_state = INTEL_PT_SS_TRACING; 1871 break; 1872 case INTEL_PT_SS_EXPECTING_SWITCH_IP: 1873 ptq->next_tid = tid; 1874 intel_pt_log("ERROR: cpu %d expecting switch ip\n", cpu); 1875 break; 1876 default: 1877 break; 1878 } 1879 1880 return 1; 1881 } 1882 1883 static int intel_pt_process_switch(struct intel_pt *pt, 1884 struct perf_sample *sample) 1885 { 1886 struct perf_evsel *evsel; 1887 pid_t tid; 1888 int cpu, ret; 1889 1890 evsel = perf_evlist__id2evsel(pt->session->evlist, sample->id); 1891 if (evsel != pt->switch_evsel) 1892 return 0; 1893 1894 tid = perf_evsel__intval(evsel, sample, "next_pid"); 1895 cpu = sample->cpu; 1896 1897 intel_pt_log("sched_switch: cpu %d tid %d time %"PRIu64" tsc %#"PRIx64"\n", 1898 cpu, tid, sample->time, perf_time_to_tsc(sample->time, 1899 &pt->tc)); 1900 1901 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time); 1902 if (ret <= 0) 1903 return ret; 1904 1905 return machine__set_current_tid(pt->machine, cpu, -1, tid); 1906 } 1907 1908 static int intel_pt_context_switch(struct intel_pt *pt, union perf_event *event, 1909 struct perf_sample *sample) 1910 { 1911 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT; 1912 pid_t pid, tid; 1913 int cpu, ret; 1914 1915 cpu = sample->cpu; 1916 1917 if (pt->have_sched_switch == 3) { 1918 if (!out) 1919 return 0; 1920 if (event->header.type != PERF_RECORD_SWITCH_CPU_WIDE) { 1921 pr_err("Expecting CPU-wide context switch event\n"); 1922 return -EINVAL; 1923 } 1924 pid = event->context_switch.next_prev_pid; 1925 tid = event->context_switch.next_prev_tid; 1926 } else { 1927 if (out) 1928 return 0; 1929 pid = sample->pid; 1930 tid = sample->tid; 1931 } 1932 1933 if (tid == -1) { 1934 pr_err("context_switch event has no tid\n"); 1935 return -EINVAL; 1936 } 1937 1938 intel_pt_log("context_switch: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n", 1939 cpu, pid, tid, sample->time, perf_time_to_tsc(sample->time, 1940 &pt->tc)); 1941 1942 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time); 1943 if (ret <= 0) 1944 return ret; 1945 1946 return machine__set_current_tid(pt->machine, cpu, pid, tid); 1947 } 1948 1949 static int intel_pt_process_itrace_start(struct intel_pt *pt, 1950 union perf_event *event, 1951 struct perf_sample *sample) 1952 { 1953 if (!pt->per_cpu_mmaps) 1954 return 0; 1955 1956 intel_pt_log("itrace_start: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n", 1957 sample->cpu, event->itrace_start.pid, 1958 event->itrace_start.tid, sample->time, 1959 perf_time_to_tsc(sample->time, &pt->tc)); 1960 1961 return machine__set_current_tid(pt->machine, sample->cpu, 1962 event->itrace_start.pid, 1963 event->itrace_start.tid); 1964 } 1965 1966 static int intel_pt_process_event(struct perf_session *session, 1967 union perf_event *event, 1968 struct perf_sample *sample, 1969 struct perf_tool *tool) 1970 { 1971 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt, 1972 auxtrace); 1973 u64 timestamp; 1974 int err = 0; 1975 1976 if (dump_trace) 1977 return 0; 1978 1979 if (!tool->ordered_events) { 1980 pr_err("Intel Processor Trace requires ordered events\n"); 1981 return -EINVAL; 1982 } 1983 1984 if (sample->time && sample->time != (u64)-1) 1985 timestamp = perf_time_to_tsc(sample->time, &pt->tc); 1986 else 1987 timestamp = 0; 1988 1989 if (timestamp || pt->timeless_decoding) { 1990 err = intel_pt_update_queues(pt); 1991 if (err) 1992 return err; 1993 } 1994 1995 if (pt->timeless_decoding) { 1996 if (event->header.type == PERF_RECORD_EXIT) { 1997 err = intel_pt_process_timeless_queues(pt, 1998 event->fork.tid, 1999 sample->time); 2000 } 2001 } else if (timestamp) { 2002 err = intel_pt_process_queues(pt, timestamp); 2003 } 2004 if (err) 2005 return err; 2006 2007 if (event->header.type == PERF_RECORD_AUX && 2008 (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) && 2009 pt->synth_opts.errors) { 2010 err = intel_pt_lost(pt, sample); 2011 if (err) 2012 return err; 2013 } 2014 2015 if (pt->switch_evsel && event->header.type == PERF_RECORD_SAMPLE) 2016 err = intel_pt_process_switch(pt, sample); 2017 else if (event->header.type == PERF_RECORD_ITRACE_START) 2018 err = intel_pt_process_itrace_start(pt, event, sample); 2019 else if (event->header.type == PERF_RECORD_SWITCH || 2020 event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) 2021 err = intel_pt_context_switch(pt, event, sample); 2022 2023 intel_pt_log("event %u: cpu %d time %"PRIu64" tsc %#"PRIx64" ", 2024 event->header.type, sample->cpu, sample->time, timestamp); 2025 intel_pt_log_event(event); 2026 2027 return err; 2028 } 2029 2030 static int intel_pt_flush(struct perf_session *session, struct perf_tool *tool) 2031 { 2032 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt, 2033 auxtrace); 2034 int ret; 2035 2036 if (dump_trace) 2037 return 0; 2038 2039 if (!tool->ordered_events) 2040 return -EINVAL; 2041 2042 ret = intel_pt_update_queues(pt); 2043 if (ret < 0) 2044 return ret; 2045 2046 if (pt->timeless_decoding) 2047 return intel_pt_process_timeless_queues(pt, -1, 2048 MAX_TIMESTAMP - 1); 2049 2050 return intel_pt_process_queues(pt, MAX_TIMESTAMP); 2051 } 2052 2053 static void intel_pt_free_events(struct perf_session *session) 2054 { 2055 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt, 2056 auxtrace); 2057 struct auxtrace_queues *queues = &pt->queues; 2058 unsigned int i; 2059 2060 for (i = 0; i < queues->nr_queues; i++) { 2061 intel_pt_free_queue(queues->queue_array[i].priv); 2062 queues->queue_array[i].priv = NULL; 2063 } 2064 intel_pt_log_disable(); 2065 auxtrace_queues__free(queues); 2066 } 2067 2068 static void intel_pt_free(struct perf_session *session) 2069 { 2070 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt, 2071 auxtrace); 2072 2073 auxtrace_heap__free(&pt->heap); 2074 intel_pt_free_events(session); 2075 session->auxtrace = NULL; 2076 thread__put(pt->unknown_thread); 2077 addr_filters__exit(&pt->filts); 2078 zfree(&pt->filter); 2079 free(pt); 2080 } 2081 2082 static int intel_pt_process_auxtrace_event(struct perf_session *session, 2083 union perf_event *event, 2084 struct perf_tool *tool __maybe_unused) 2085 { 2086 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt, 2087 auxtrace); 2088 2089 if (!pt->data_queued) { 2090 struct auxtrace_buffer *buffer; 2091 off_t data_offset; 2092 int fd = perf_data__fd(session->data); 2093 int err; 2094 2095 if (perf_data__is_pipe(session->data)) { 2096 data_offset = 0; 2097 } else { 2098 data_offset = lseek(fd, 0, SEEK_CUR); 2099 if (data_offset == -1) 2100 return -errno; 2101 } 2102 2103 err = auxtrace_queues__add_event(&pt->queues, session, event, 2104 data_offset, &buffer); 2105 if (err) 2106 return err; 2107 2108 /* Dump here now we have copied a piped trace out of the pipe */ 2109 if (dump_trace) { 2110 if (auxtrace_buffer__get_data(buffer, fd)) { 2111 intel_pt_dump_event(pt, buffer->data, 2112 buffer->size); 2113 auxtrace_buffer__put_data(buffer); 2114 } 2115 } 2116 } 2117 2118 return 0; 2119 } 2120 2121 struct intel_pt_synth { 2122 struct perf_tool dummy_tool; 2123 struct perf_session *session; 2124 }; 2125 2126 static int intel_pt_event_synth(struct perf_tool *tool, 2127 union perf_event *event, 2128 struct perf_sample *sample __maybe_unused, 2129 struct machine *machine __maybe_unused) 2130 { 2131 struct intel_pt_synth *intel_pt_synth = 2132 container_of(tool, struct intel_pt_synth, dummy_tool); 2133 2134 return perf_session__deliver_synth_event(intel_pt_synth->session, event, 2135 NULL); 2136 } 2137 2138 static int intel_pt_synth_event(struct perf_session *session, const char *name, 2139 struct perf_event_attr *attr, u64 id) 2140 { 2141 struct intel_pt_synth intel_pt_synth; 2142 int err; 2143 2144 pr_debug("Synthesizing '%s' event with id %" PRIu64 " sample type %#" PRIx64 "\n", 2145 name, id, (u64)attr->sample_type); 2146 2147 memset(&intel_pt_synth, 0, sizeof(struct intel_pt_synth)); 2148 intel_pt_synth.session = session; 2149 2150 err = perf_event__synthesize_attr(&intel_pt_synth.dummy_tool, attr, 1, 2151 &id, intel_pt_event_synth); 2152 if (err) 2153 pr_err("%s: failed to synthesize '%s' event type\n", 2154 __func__, name); 2155 2156 return err; 2157 } 2158 2159 static void intel_pt_set_event_name(struct perf_evlist *evlist, u64 id, 2160 const char *name) 2161 { 2162 struct perf_evsel *evsel; 2163 2164 evlist__for_each_entry(evlist, evsel) { 2165 if (evsel->id && evsel->id[0] == id) { 2166 if (evsel->name) 2167 zfree(&evsel->name); 2168 evsel->name = strdup(name); 2169 break; 2170 } 2171 } 2172 } 2173 2174 static struct perf_evsel *intel_pt_evsel(struct intel_pt *pt, 2175 struct perf_evlist *evlist) 2176 { 2177 struct perf_evsel *evsel; 2178 2179 evlist__for_each_entry(evlist, evsel) { 2180 if (evsel->attr.type == pt->pmu_type && evsel->ids) 2181 return evsel; 2182 } 2183 2184 return NULL; 2185 } 2186 2187 static int intel_pt_synth_events(struct intel_pt *pt, 2188 struct perf_session *session) 2189 { 2190 struct perf_evlist *evlist = session->evlist; 2191 struct perf_evsel *evsel = intel_pt_evsel(pt, evlist); 2192 struct perf_event_attr attr; 2193 u64 id; 2194 int err; 2195 2196 if (!evsel) { 2197 pr_debug("There are no selected events with Intel Processor Trace data\n"); 2198 return 0; 2199 } 2200 2201 memset(&attr, 0, sizeof(struct perf_event_attr)); 2202 attr.size = sizeof(struct perf_event_attr); 2203 attr.type = PERF_TYPE_HARDWARE; 2204 attr.sample_type = evsel->attr.sample_type & PERF_SAMPLE_MASK; 2205 attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID | 2206 PERF_SAMPLE_PERIOD; 2207 if (pt->timeless_decoding) 2208 attr.sample_type &= ~(u64)PERF_SAMPLE_TIME; 2209 else 2210 attr.sample_type |= PERF_SAMPLE_TIME; 2211 if (!pt->per_cpu_mmaps) 2212 attr.sample_type &= ~(u64)PERF_SAMPLE_CPU; 2213 attr.exclude_user = evsel->attr.exclude_user; 2214 attr.exclude_kernel = evsel->attr.exclude_kernel; 2215 attr.exclude_hv = evsel->attr.exclude_hv; 2216 attr.exclude_host = evsel->attr.exclude_host; 2217 attr.exclude_guest = evsel->attr.exclude_guest; 2218 attr.sample_id_all = evsel->attr.sample_id_all; 2219 attr.read_format = evsel->attr.read_format; 2220 2221 id = evsel->id[0] + 1000000000; 2222 if (!id) 2223 id = 1; 2224 2225 if (pt->synth_opts.branches) { 2226 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS; 2227 attr.sample_period = 1; 2228 attr.sample_type |= PERF_SAMPLE_ADDR; 2229 err = intel_pt_synth_event(session, "branches", &attr, id); 2230 if (err) 2231 return err; 2232 pt->sample_branches = true; 2233 pt->branches_sample_type = attr.sample_type; 2234 pt->branches_id = id; 2235 id += 1; 2236 attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR; 2237 } 2238 2239 if (pt->synth_opts.callchain) 2240 attr.sample_type |= PERF_SAMPLE_CALLCHAIN; 2241 if (pt->synth_opts.last_branch) 2242 attr.sample_type |= PERF_SAMPLE_BRANCH_STACK; 2243 2244 if (pt->synth_opts.instructions) { 2245 attr.config = PERF_COUNT_HW_INSTRUCTIONS; 2246 if (pt->synth_opts.period_type == PERF_ITRACE_PERIOD_NANOSECS) 2247 attr.sample_period = 2248 intel_pt_ns_to_ticks(pt, pt->synth_opts.period); 2249 else 2250 attr.sample_period = pt->synth_opts.period; 2251 err = intel_pt_synth_event(session, "instructions", &attr, id); 2252 if (err) 2253 return err; 2254 pt->sample_instructions = true; 2255 pt->instructions_sample_type = attr.sample_type; 2256 pt->instructions_id = id; 2257 id += 1; 2258 } 2259 2260 attr.sample_type &= ~(u64)PERF_SAMPLE_PERIOD; 2261 attr.sample_period = 1; 2262 2263 if (pt->synth_opts.transactions) { 2264 attr.config = PERF_COUNT_HW_INSTRUCTIONS; 2265 err = intel_pt_synth_event(session, "transactions", &attr, id); 2266 if (err) 2267 return err; 2268 pt->sample_transactions = true; 2269 pt->transactions_sample_type = attr.sample_type; 2270 pt->transactions_id = id; 2271 intel_pt_set_event_name(evlist, id, "transactions"); 2272 id += 1; 2273 } 2274 2275 attr.type = PERF_TYPE_SYNTH; 2276 attr.sample_type |= PERF_SAMPLE_RAW; 2277 2278 if (pt->synth_opts.ptwrites) { 2279 attr.config = PERF_SYNTH_INTEL_PTWRITE; 2280 err = intel_pt_synth_event(session, "ptwrite", &attr, id); 2281 if (err) 2282 return err; 2283 pt->sample_ptwrites = true; 2284 pt->ptwrites_sample_type = attr.sample_type; 2285 pt->ptwrites_id = id; 2286 intel_pt_set_event_name(evlist, id, "ptwrite"); 2287 id += 1; 2288 } 2289 2290 if (pt->synth_opts.pwr_events) { 2291 pt->sample_pwr_events = true; 2292 pt->pwr_events_sample_type = attr.sample_type; 2293 2294 attr.config = PERF_SYNTH_INTEL_CBR; 2295 err = intel_pt_synth_event(session, "cbr", &attr, id); 2296 if (err) 2297 return err; 2298 pt->cbr_id = id; 2299 intel_pt_set_event_name(evlist, id, "cbr"); 2300 id += 1; 2301 } 2302 2303 if (pt->synth_opts.pwr_events && (evsel->attr.config & 0x10)) { 2304 attr.config = PERF_SYNTH_INTEL_MWAIT; 2305 err = intel_pt_synth_event(session, "mwait", &attr, id); 2306 if (err) 2307 return err; 2308 pt->mwait_id = id; 2309 intel_pt_set_event_name(evlist, id, "mwait"); 2310 id += 1; 2311 2312 attr.config = PERF_SYNTH_INTEL_PWRE; 2313 err = intel_pt_synth_event(session, "pwre", &attr, id); 2314 if (err) 2315 return err; 2316 pt->pwre_id = id; 2317 intel_pt_set_event_name(evlist, id, "pwre"); 2318 id += 1; 2319 2320 attr.config = PERF_SYNTH_INTEL_EXSTOP; 2321 err = intel_pt_synth_event(session, "exstop", &attr, id); 2322 if (err) 2323 return err; 2324 pt->exstop_id = id; 2325 intel_pt_set_event_name(evlist, id, "exstop"); 2326 id += 1; 2327 2328 attr.config = PERF_SYNTH_INTEL_PWRX; 2329 err = intel_pt_synth_event(session, "pwrx", &attr, id); 2330 if (err) 2331 return err; 2332 pt->pwrx_id = id; 2333 intel_pt_set_event_name(evlist, id, "pwrx"); 2334 id += 1; 2335 } 2336 2337 return 0; 2338 } 2339 2340 static struct perf_evsel *intel_pt_find_sched_switch(struct perf_evlist *evlist) 2341 { 2342 struct perf_evsel *evsel; 2343 2344 evlist__for_each_entry_reverse(evlist, evsel) { 2345 const char *name = perf_evsel__name(evsel); 2346 2347 if (!strcmp(name, "sched:sched_switch")) 2348 return evsel; 2349 } 2350 2351 return NULL; 2352 } 2353 2354 static bool intel_pt_find_switch(struct perf_evlist *evlist) 2355 { 2356 struct perf_evsel *evsel; 2357 2358 evlist__for_each_entry(evlist, evsel) { 2359 if (evsel->attr.context_switch) 2360 return true; 2361 } 2362 2363 return false; 2364 } 2365 2366 static int intel_pt_perf_config(const char *var, const char *value, void *data) 2367 { 2368 struct intel_pt *pt = data; 2369 2370 if (!strcmp(var, "intel-pt.mispred-all")) 2371 pt->mispred_all = perf_config_bool(var, value); 2372 2373 return 0; 2374 } 2375 2376 static const char * const intel_pt_info_fmts[] = { 2377 [INTEL_PT_PMU_TYPE] = " PMU Type %"PRId64"\n", 2378 [INTEL_PT_TIME_SHIFT] = " Time Shift %"PRIu64"\n", 2379 [INTEL_PT_TIME_MULT] = " Time Muliplier %"PRIu64"\n", 2380 [INTEL_PT_TIME_ZERO] = " Time Zero %"PRIu64"\n", 2381 [INTEL_PT_CAP_USER_TIME_ZERO] = " Cap Time Zero %"PRId64"\n", 2382 [INTEL_PT_TSC_BIT] = " TSC bit %#"PRIx64"\n", 2383 [INTEL_PT_NORETCOMP_BIT] = " NoRETComp bit %#"PRIx64"\n", 2384 [INTEL_PT_HAVE_SCHED_SWITCH] = " Have sched_switch %"PRId64"\n", 2385 [INTEL_PT_SNAPSHOT_MODE] = " Snapshot mode %"PRId64"\n", 2386 [INTEL_PT_PER_CPU_MMAPS] = " Per-cpu maps %"PRId64"\n", 2387 [INTEL_PT_MTC_BIT] = " MTC bit %#"PRIx64"\n", 2388 [INTEL_PT_TSC_CTC_N] = " TSC:CTC numerator %"PRIu64"\n", 2389 [INTEL_PT_TSC_CTC_D] = " TSC:CTC denominator %"PRIu64"\n", 2390 [INTEL_PT_CYC_BIT] = " CYC bit %#"PRIx64"\n", 2391 [INTEL_PT_MAX_NONTURBO_RATIO] = " Max non-turbo ratio %"PRIu64"\n", 2392 [INTEL_PT_FILTER_STR_LEN] = " Filter string len. %"PRIu64"\n", 2393 }; 2394 2395 static void intel_pt_print_info(u64 *arr, int start, int finish) 2396 { 2397 int i; 2398 2399 if (!dump_trace) 2400 return; 2401 2402 for (i = start; i <= finish; i++) 2403 fprintf(stdout, intel_pt_info_fmts[i], arr[i]); 2404 } 2405 2406 static void intel_pt_print_info_str(const char *name, const char *str) 2407 { 2408 if (!dump_trace) 2409 return; 2410 2411 fprintf(stdout, " %-20s%s\n", name, str ? str : ""); 2412 } 2413 2414 static bool intel_pt_has(struct auxtrace_info_event *auxtrace_info, int pos) 2415 { 2416 return auxtrace_info->header.size >= 2417 sizeof(struct auxtrace_info_event) + (sizeof(u64) * (pos + 1)); 2418 } 2419 2420 int intel_pt_process_auxtrace_info(union perf_event *event, 2421 struct perf_session *session) 2422 { 2423 struct auxtrace_info_event *auxtrace_info = &event->auxtrace_info; 2424 size_t min_sz = sizeof(u64) * INTEL_PT_PER_CPU_MMAPS; 2425 struct intel_pt *pt; 2426 void *info_end; 2427 u64 *info; 2428 int err; 2429 2430 if (auxtrace_info->header.size < sizeof(struct auxtrace_info_event) + 2431 min_sz) 2432 return -EINVAL; 2433 2434 pt = zalloc(sizeof(struct intel_pt)); 2435 if (!pt) 2436 return -ENOMEM; 2437 2438 addr_filters__init(&pt->filts); 2439 2440 err = perf_config(intel_pt_perf_config, pt); 2441 if (err) 2442 goto err_free; 2443 2444 err = auxtrace_queues__init(&pt->queues); 2445 if (err) 2446 goto err_free; 2447 2448 intel_pt_log_set_name(INTEL_PT_PMU_NAME); 2449 2450 pt->session = session; 2451 pt->machine = &session->machines.host; /* No kvm support */ 2452 pt->auxtrace_type = auxtrace_info->type; 2453 pt->pmu_type = auxtrace_info->priv[INTEL_PT_PMU_TYPE]; 2454 pt->tc.time_shift = auxtrace_info->priv[INTEL_PT_TIME_SHIFT]; 2455 pt->tc.time_mult = auxtrace_info->priv[INTEL_PT_TIME_MULT]; 2456 pt->tc.time_zero = auxtrace_info->priv[INTEL_PT_TIME_ZERO]; 2457 pt->cap_user_time_zero = auxtrace_info->priv[INTEL_PT_CAP_USER_TIME_ZERO]; 2458 pt->tsc_bit = auxtrace_info->priv[INTEL_PT_TSC_BIT]; 2459 pt->noretcomp_bit = auxtrace_info->priv[INTEL_PT_NORETCOMP_BIT]; 2460 pt->have_sched_switch = auxtrace_info->priv[INTEL_PT_HAVE_SCHED_SWITCH]; 2461 pt->snapshot_mode = auxtrace_info->priv[INTEL_PT_SNAPSHOT_MODE]; 2462 pt->per_cpu_mmaps = auxtrace_info->priv[INTEL_PT_PER_CPU_MMAPS]; 2463 intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_PMU_TYPE, 2464 INTEL_PT_PER_CPU_MMAPS); 2465 2466 if (intel_pt_has(auxtrace_info, INTEL_PT_CYC_BIT)) { 2467 pt->mtc_bit = auxtrace_info->priv[INTEL_PT_MTC_BIT]; 2468 pt->mtc_freq_bits = auxtrace_info->priv[INTEL_PT_MTC_FREQ_BITS]; 2469 pt->tsc_ctc_ratio_n = auxtrace_info->priv[INTEL_PT_TSC_CTC_N]; 2470 pt->tsc_ctc_ratio_d = auxtrace_info->priv[INTEL_PT_TSC_CTC_D]; 2471 pt->cyc_bit = auxtrace_info->priv[INTEL_PT_CYC_BIT]; 2472 intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_MTC_BIT, 2473 INTEL_PT_CYC_BIT); 2474 } 2475 2476 if (intel_pt_has(auxtrace_info, INTEL_PT_MAX_NONTURBO_RATIO)) { 2477 pt->max_non_turbo_ratio = 2478 auxtrace_info->priv[INTEL_PT_MAX_NONTURBO_RATIO]; 2479 intel_pt_print_info(&auxtrace_info->priv[0], 2480 INTEL_PT_MAX_NONTURBO_RATIO, 2481 INTEL_PT_MAX_NONTURBO_RATIO); 2482 } 2483 2484 info = &auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN] + 1; 2485 info_end = (void *)info + auxtrace_info->header.size; 2486 2487 if (intel_pt_has(auxtrace_info, INTEL_PT_FILTER_STR_LEN)) { 2488 size_t len; 2489 2490 len = auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN]; 2491 intel_pt_print_info(&auxtrace_info->priv[0], 2492 INTEL_PT_FILTER_STR_LEN, 2493 INTEL_PT_FILTER_STR_LEN); 2494 if (len) { 2495 const char *filter = (const char *)info; 2496 2497 len = roundup(len + 1, 8); 2498 info += len >> 3; 2499 if ((void *)info > info_end) { 2500 pr_err("%s: bad filter string length\n", __func__); 2501 err = -EINVAL; 2502 goto err_free_queues; 2503 } 2504 pt->filter = memdup(filter, len); 2505 if (!pt->filter) { 2506 err = -ENOMEM; 2507 goto err_free_queues; 2508 } 2509 if (session->header.needs_swap) 2510 mem_bswap_64(pt->filter, len); 2511 if (pt->filter[len - 1]) { 2512 pr_err("%s: filter string not null terminated\n", __func__); 2513 err = -EINVAL; 2514 goto err_free_queues; 2515 } 2516 err = addr_filters__parse_bare_filter(&pt->filts, 2517 filter); 2518 if (err) 2519 goto err_free_queues; 2520 } 2521 intel_pt_print_info_str("Filter string", pt->filter); 2522 } 2523 2524 pt->timeless_decoding = intel_pt_timeless_decoding(pt); 2525 if (pt->timeless_decoding && !pt->tc.time_mult) 2526 pt->tc.time_mult = 1; 2527 pt->have_tsc = intel_pt_have_tsc(pt); 2528 pt->sampling_mode = false; 2529 pt->est_tsc = !pt->timeless_decoding; 2530 2531 pt->unknown_thread = thread__new(999999999, 999999999); 2532 if (!pt->unknown_thread) { 2533 err = -ENOMEM; 2534 goto err_free_queues; 2535 } 2536 2537 /* 2538 * Since this thread will not be kept in any rbtree not in a 2539 * list, initialize its list node so that at thread__put() the 2540 * current thread lifetime assuption is kept and we don't segfault 2541 * at list_del_init(). 2542 */ 2543 INIT_LIST_HEAD(&pt->unknown_thread->node); 2544 2545 err = thread__set_comm(pt->unknown_thread, "unknown", 0); 2546 if (err) 2547 goto err_delete_thread; 2548 if (thread__init_map_groups(pt->unknown_thread, pt->machine)) { 2549 err = -ENOMEM; 2550 goto err_delete_thread; 2551 } 2552 2553 pt->auxtrace.process_event = intel_pt_process_event; 2554 pt->auxtrace.process_auxtrace_event = intel_pt_process_auxtrace_event; 2555 pt->auxtrace.flush_events = intel_pt_flush; 2556 pt->auxtrace.free_events = intel_pt_free_events; 2557 pt->auxtrace.free = intel_pt_free; 2558 session->auxtrace = &pt->auxtrace; 2559 2560 if (dump_trace) 2561 return 0; 2562 2563 if (pt->have_sched_switch == 1) { 2564 pt->switch_evsel = intel_pt_find_sched_switch(session->evlist); 2565 if (!pt->switch_evsel) { 2566 pr_err("%s: missing sched_switch event\n", __func__); 2567 err = -EINVAL; 2568 goto err_delete_thread; 2569 } 2570 } else if (pt->have_sched_switch == 2 && 2571 !intel_pt_find_switch(session->evlist)) { 2572 pr_err("%s: missing context_switch attribute flag\n", __func__); 2573 err = -EINVAL; 2574 goto err_delete_thread; 2575 } 2576 2577 if (session->itrace_synth_opts && session->itrace_synth_opts->set) { 2578 pt->synth_opts = *session->itrace_synth_opts; 2579 } else { 2580 itrace_synth_opts__set_default(&pt->synth_opts, 2581 session->itrace_synth_opts->default_no_sample); 2582 if (use_browser != -1) { 2583 pt->synth_opts.branches = false; 2584 pt->synth_opts.callchain = true; 2585 } 2586 if (session->itrace_synth_opts) 2587 pt->synth_opts.thread_stack = 2588 session->itrace_synth_opts->thread_stack; 2589 } 2590 2591 if (pt->synth_opts.log) 2592 intel_pt_log_enable(); 2593 2594 /* Maximum non-turbo ratio is TSC freq / 100 MHz */ 2595 if (pt->tc.time_mult) { 2596 u64 tsc_freq = intel_pt_ns_to_ticks(pt, 1000000000); 2597 2598 if (!pt->max_non_turbo_ratio) 2599 pt->max_non_turbo_ratio = 2600 (tsc_freq + 50000000) / 100000000; 2601 intel_pt_log("TSC frequency %"PRIu64"\n", tsc_freq); 2602 intel_pt_log("Maximum non-turbo ratio %u\n", 2603 pt->max_non_turbo_ratio); 2604 pt->cbr2khz = tsc_freq / pt->max_non_turbo_ratio / 1000; 2605 } 2606 2607 if (pt->synth_opts.calls) 2608 pt->branches_filter |= PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC | 2609 PERF_IP_FLAG_TRACE_END; 2610 if (pt->synth_opts.returns) 2611 pt->branches_filter |= PERF_IP_FLAG_RETURN | 2612 PERF_IP_FLAG_TRACE_BEGIN; 2613 2614 if (pt->synth_opts.callchain && !symbol_conf.use_callchain) { 2615 symbol_conf.use_callchain = true; 2616 if (callchain_register_param(&callchain_param) < 0) { 2617 symbol_conf.use_callchain = false; 2618 pt->synth_opts.callchain = false; 2619 } 2620 } 2621 2622 err = intel_pt_synth_events(pt, session); 2623 if (err) 2624 goto err_delete_thread; 2625 2626 err = auxtrace_queues__process_index(&pt->queues, session); 2627 if (err) 2628 goto err_delete_thread; 2629 2630 if (pt->queues.populated) 2631 pt->data_queued = true; 2632 2633 if (pt->timeless_decoding) 2634 pr_debug2("Intel PT decoding without timestamps\n"); 2635 2636 return 0; 2637 2638 err_delete_thread: 2639 thread__zput(pt->unknown_thread); 2640 err_free_queues: 2641 intel_pt_log_disable(); 2642 auxtrace_queues__free(&pt->queues); 2643 session->auxtrace = NULL; 2644 err_free: 2645 addr_filters__exit(&pt->filts); 2646 zfree(&pt->filter); 2647 free(pt); 2648 return err; 2649 } 2650