1 /* 2 * intel-bts.c: Intel Processor Trace support 3 * Copyright (c) 2013-2015, Intel Corporation. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 */ 15 16 #include <endian.h> 17 #include <byteswap.h> 18 #include <linux/kernel.h> 19 #include <linux/types.h> 20 #include <linux/bitops.h> 21 #include <linux/log2.h> 22 23 #include "cpumap.h" 24 #include "color.h" 25 #include "evsel.h" 26 #include "evlist.h" 27 #include "machine.h" 28 #include "session.h" 29 #include "util.h" 30 #include "thread.h" 31 #include "thread-stack.h" 32 #include "debug.h" 33 #include "tsc.h" 34 #include "auxtrace.h" 35 #include "intel-pt-decoder/intel-pt-insn-decoder.h" 36 #include "intel-bts.h" 37 38 #define MAX_TIMESTAMP (~0ULL) 39 40 #define INTEL_BTS_ERR_NOINSN 5 41 #define INTEL_BTS_ERR_LOST 9 42 43 #if __BYTE_ORDER == __BIG_ENDIAN 44 #define le64_to_cpu bswap_64 45 #else 46 #define le64_to_cpu 47 #endif 48 49 struct intel_bts { 50 struct auxtrace auxtrace; 51 struct auxtrace_queues queues; 52 struct auxtrace_heap heap; 53 u32 auxtrace_type; 54 struct perf_session *session; 55 struct machine *machine; 56 bool sampling_mode; 57 bool snapshot_mode; 58 bool data_queued; 59 u32 pmu_type; 60 struct perf_tsc_conversion tc; 61 bool cap_user_time_zero; 62 struct itrace_synth_opts synth_opts; 63 bool sample_branches; 64 u32 branches_filter; 65 u64 branches_sample_type; 66 u64 branches_id; 67 size_t branches_event_size; 68 bool synth_needs_swap; 69 unsigned long num_events; 70 }; 71 72 struct intel_bts_queue { 73 struct intel_bts *bts; 74 unsigned int queue_nr; 75 struct auxtrace_buffer *buffer; 76 bool on_heap; 77 bool done; 78 pid_t pid; 79 pid_t tid; 80 int cpu; 81 u64 time; 82 struct intel_pt_insn intel_pt_insn; 83 u32 sample_flags; 84 }; 85 86 struct branch { 87 u64 from; 88 u64 to; 89 u64 misc; 90 }; 91 92 static void intel_bts_dump(struct intel_bts *bts __maybe_unused, 93 unsigned char *buf, size_t len) 94 { 95 struct branch *branch; 96 size_t i, pos = 0, br_sz = sizeof(struct branch), sz; 97 const char *color = PERF_COLOR_BLUE; 98 99 color_fprintf(stdout, color, 100 ". ... Intel BTS data: size %zu bytes\n", 101 len); 102 103 while (len) { 104 if (len >= br_sz) 105 sz = br_sz; 106 else 107 sz = len; 108 printf("."); 109 color_fprintf(stdout, color, " %08x: ", pos); 110 for (i = 0; i < sz; i++) 111 color_fprintf(stdout, color, " %02x", buf[i]); 112 for (; i < br_sz; i++) 113 color_fprintf(stdout, color, " "); 114 if (len >= br_sz) { 115 branch = (struct branch *)buf; 116 color_fprintf(stdout, color, " %"PRIx64" -> %"PRIx64" %s\n", 117 le64_to_cpu(branch->from), 118 le64_to_cpu(branch->to), 119 le64_to_cpu(branch->misc) & 0x10 ? 120 "pred" : "miss"); 121 } else { 122 color_fprintf(stdout, color, " Bad record!\n"); 123 } 124 pos += sz; 125 buf += sz; 126 len -= sz; 127 } 128 } 129 130 static void intel_bts_dump_event(struct intel_bts *bts, unsigned char *buf, 131 size_t len) 132 { 133 printf(".\n"); 134 intel_bts_dump(bts, buf, len); 135 } 136 137 static int intel_bts_lost(struct intel_bts *bts, struct perf_sample *sample) 138 { 139 union perf_event event; 140 int err; 141 142 auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE, 143 INTEL_BTS_ERR_LOST, sample->cpu, sample->pid, 144 sample->tid, 0, "Lost trace data"); 145 146 err = perf_session__deliver_synth_event(bts->session, &event, NULL); 147 if (err) 148 pr_err("Intel BTS: failed to deliver error event, error %d\n", 149 err); 150 151 return err; 152 } 153 154 static struct intel_bts_queue *intel_bts_alloc_queue(struct intel_bts *bts, 155 unsigned int queue_nr) 156 { 157 struct intel_bts_queue *btsq; 158 159 btsq = zalloc(sizeof(struct intel_bts_queue)); 160 if (!btsq) 161 return NULL; 162 163 btsq->bts = bts; 164 btsq->queue_nr = queue_nr; 165 btsq->pid = -1; 166 btsq->tid = -1; 167 btsq->cpu = -1; 168 169 return btsq; 170 } 171 172 static int intel_bts_setup_queue(struct intel_bts *bts, 173 struct auxtrace_queue *queue, 174 unsigned int queue_nr) 175 { 176 struct intel_bts_queue *btsq = queue->priv; 177 178 if (list_empty(&queue->head)) 179 return 0; 180 181 if (!btsq) { 182 btsq = intel_bts_alloc_queue(bts, queue_nr); 183 if (!btsq) 184 return -ENOMEM; 185 queue->priv = btsq; 186 187 if (queue->cpu != -1) 188 btsq->cpu = queue->cpu; 189 btsq->tid = queue->tid; 190 } 191 192 if (bts->sampling_mode) 193 return 0; 194 195 if (!btsq->on_heap && !btsq->buffer) { 196 int ret; 197 198 btsq->buffer = auxtrace_buffer__next(queue, NULL); 199 if (!btsq->buffer) 200 return 0; 201 202 ret = auxtrace_heap__add(&bts->heap, queue_nr, 203 btsq->buffer->reference); 204 if (ret) 205 return ret; 206 btsq->on_heap = true; 207 } 208 209 return 0; 210 } 211 212 static int intel_bts_setup_queues(struct intel_bts *bts) 213 { 214 unsigned int i; 215 int ret; 216 217 for (i = 0; i < bts->queues.nr_queues; i++) { 218 ret = intel_bts_setup_queue(bts, &bts->queues.queue_array[i], 219 i); 220 if (ret) 221 return ret; 222 } 223 return 0; 224 } 225 226 static inline int intel_bts_update_queues(struct intel_bts *bts) 227 { 228 if (bts->queues.new_data) { 229 bts->queues.new_data = false; 230 return intel_bts_setup_queues(bts); 231 } 232 return 0; 233 } 234 235 static unsigned char *intel_bts_find_overlap(unsigned char *buf_a, size_t len_a, 236 unsigned char *buf_b, size_t len_b) 237 { 238 size_t offs, len; 239 240 if (len_a > len_b) 241 offs = len_a - len_b; 242 else 243 offs = 0; 244 245 for (; offs < len_a; offs += sizeof(struct branch)) { 246 len = len_a - offs; 247 if (!memcmp(buf_a + offs, buf_b, len)) 248 return buf_b + len; 249 } 250 251 return buf_b; 252 } 253 254 static int intel_bts_do_fix_overlap(struct auxtrace_queue *queue, 255 struct auxtrace_buffer *b) 256 { 257 struct auxtrace_buffer *a; 258 void *start; 259 260 if (b->list.prev == &queue->head) 261 return 0; 262 a = list_entry(b->list.prev, struct auxtrace_buffer, list); 263 start = intel_bts_find_overlap(a->data, a->size, b->data, b->size); 264 if (!start) 265 return -EINVAL; 266 b->use_size = b->data + b->size - start; 267 b->use_data = start; 268 return 0; 269 } 270 271 static int intel_bts_synth_branch_sample(struct intel_bts_queue *btsq, 272 struct branch *branch) 273 { 274 int ret; 275 struct intel_bts *bts = btsq->bts; 276 union perf_event event; 277 struct perf_sample sample = { .ip = 0, }; 278 279 if (bts->synth_opts.initial_skip && 280 bts->num_events++ <= bts->synth_opts.initial_skip) 281 return 0; 282 283 event.sample.header.type = PERF_RECORD_SAMPLE; 284 event.sample.header.misc = PERF_RECORD_MISC_USER; 285 event.sample.header.size = sizeof(struct perf_event_header); 286 287 sample.cpumode = PERF_RECORD_MISC_USER; 288 sample.ip = le64_to_cpu(branch->from); 289 sample.pid = btsq->pid; 290 sample.tid = btsq->tid; 291 sample.addr = le64_to_cpu(branch->to); 292 sample.id = btsq->bts->branches_id; 293 sample.stream_id = btsq->bts->branches_id; 294 sample.period = 1; 295 sample.cpu = btsq->cpu; 296 sample.flags = btsq->sample_flags; 297 sample.insn_len = btsq->intel_pt_insn.length; 298 299 if (bts->synth_opts.inject) { 300 event.sample.header.size = bts->branches_event_size; 301 ret = perf_event__synthesize_sample(&event, 302 bts->branches_sample_type, 303 0, &sample, 304 bts->synth_needs_swap); 305 if (ret) 306 return ret; 307 } 308 309 ret = perf_session__deliver_synth_event(bts->session, &event, &sample); 310 if (ret) 311 pr_err("Intel BTS: failed to deliver branch event, error %d\n", 312 ret); 313 314 return ret; 315 } 316 317 static int intel_bts_get_next_insn(struct intel_bts_queue *btsq, u64 ip) 318 { 319 struct machine *machine = btsq->bts->machine; 320 struct thread *thread; 321 struct addr_location al; 322 unsigned char buf[1024]; 323 size_t bufsz; 324 ssize_t len; 325 int x86_64; 326 uint8_t cpumode; 327 int err = -1; 328 329 bufsz = intel_pt_insn_max_size(); 330 331 if (machine__kernel_ip(machine, ip)) 332 cpumode = PERF_RECORD_MISC_KERNEL; 333 else 334 cpumode = PERF_RECORD_MISC_USER; 335 336 thread = machine__find_thread(machine, -1, btsq->tid); 337 if (!thread) 338 return -1; 339 340 thread__find_addr_map(thread, cpumode, MAP__FUNCTION, ip, &al); 341 if (!al.map || !al.map->dso) 342 goto out_put; 343 344 len = dso__data_read_addr(al.map->dso, al.map, machine, ip, buf, bufsz); 345 if (len <= 0) 346 goto out_put; 347 348 /* Load maps to ensure dso->is_64_bit has been updated */ 349 map__load(al.map, machine->symbol_filter); 350 351 x86_64 = al.map->dso->is_64_bit; 352 353 if (intel_pt_get_insn(buf, len, x86_64, &btsq->intel_pt_insn)) 354 goto out_put; 355 356 err = 0; 357 out_put: 358 thread__put(thread); 359 return err; 360 } 361 362 static int intel_bts_synth_error(struct intel_bts *bts, int cpu, pid_t pid, 363 pid_t tid, u64 ip) 364 { 365 union perf_event event; 366 int err; 367 368 auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE, 369 INTEL_BTS_ERR_NOINSN, cpu, pid, tid, ip, 370 "Failed to get instruction"); 371 372 err = perf_session__deliver_synth_event(bts->session, &event, NULL); 373 if (err) 374 pr_err("Intel BTS: failed to deliver error event, error %d\n", 375 err); 376 377 return err; 378 } 379 380 static int intel_bts_get_branch_type(struct intel_bts_queue *btsq, 381 struct branch *branch) 382 { 383 int err; 384 385 if (!branch->from) { 386 if (branch->to) 387 btsq->sample_flags = PERF_IP_FLAG_BRANCH | 388 PERF_IP_FLAG_TRACE_BEGIN; 389 else 390 btsq->sample_flags = 0; 391 btsq->intel_pt_insn.length = 0; 392 } else if (!branch->to) { 393 btsq->sample_flags = PERF_IP_FLAG_BRANCH | 394 PERF_IP_FLAG_TRACE_END; 395 btsq->intel_pt_insn.length = 0; 396 } else { 397 err = intel_bts_get_next_insn(btsq, branch->from); 398 if (err) { 399 btsq->sample_flags = 0; 400 btsq->intel_pt_insn.length = 0; 401 if (!btsq->bts->synth_opts.errors) 402 return 0; 403 err = intel_bts_synth_error(btsq->bts, btsq->cpu, 404 btsq->pid, btsq->tid, 405 branch->from); 406 return err; 407 } 408 btsq->sample_flags = intel_pt_insn_type(btsq->intel_pt_insn.op); 409 /* Check for an async branch into the kernel */ 410 if (!machine__kernel_ip(btsq->bts->machine, branch->from) && 411 machine__kernel_ip(btsq->bts->machine, branch->to) && 412 btsq->sample_flags != (PERF_IP_FLAG_BRANCH | 413 PERF_IP_FLAG_CALL | 414 PERF_IP_FLAG_SYSCALLRET)) 415 btsq->sample_flags = PERF_IP_FLAG_BRANCH | 416 PERF_IP_FLAG_CALL | 417 PERF_IP_FLAG_ASYNC | 418 PERF_IP_FLAG_INTERRUPT; 419 } 420 421 return 0; 422 } 423 424 static int intel_bts_process_buffer(struct intel_bts_queue *btsq, 425 struct auxtrace_buffer *buffer) 426 { 427 struct branch *branch; 428 size_t sz, bsz = sizeof(struct branch); 429 u32 filter = btsq->bts->branches_filter; 430 int err = 0; 431 432 if (buffer->use_data) { 433 sz = buffer->use_size; 434 branch = buffer->use_data; 435 } else { 436 sz = buffer->size; 437 branch = buffer->data; 438 } 439 440 if (!btsq->bts->sample_branches) 441 return 0; 442 443 for (; sz > bsz; branch += 1, sz -= bsz) { 444 if (!branch->from && !branch->to) 445 continue; 446 intel_bts_get_branch_type(btsq, branch); 447 if (filter && !(filter & btsq->sample_flags)) 448 continue; 449 err = intel_bts_synth_branch_sample(btsq, branch); 450 if (err) 451 break; 452 } 453 return err; 454 } 455 456 static int intel_bts_process_queue(struct intel_bts_queue *btsq, u64 *timestamp) 457 { 458 struct auxtrace_buffer *buffer = btsq->buffer, *old_buffer = buffer; 459 struct auxtrace_queue *queue; 460 struct thread *thread; 461 int err; 462 463 if (btsq->done) 464 return 1; 465 466 if (btsq->pid == -1) { 467 thread = machine__find_thread(btsq->bts->machine, -1, 468 btsq->tid); 469 if (thread) 470 btsq->pid = thread->pid_; 471 } else { 472 thread = machine__findnew_thread(btsq->bts->machine, btsq->pid, 473 btsq->tid); 474 } 475 476 queue = &btsq->bts->queues.queue_array[btsq->queue_nr]; 477 478 if (!buffer) 479 buffer = auxtrace_buffer__next(queue, NULL); 480 481 if (!buffer) { 482 if (!btsq->bts->sampling_mode) 483 btsq->done = 1; 484 err = 1; 485 goto out_put; 486 } 487 488 /* Currently there is no support for split buffers */ 489 if (buffer->consecutive) { 490 err = -EINVAL; 491 goto out_put; 492 } 493 494 if (!buffer->data) { 495 int fd = perf_data_file__fd(btsq->bts->session->file); 496 497 buffer->data = auxtrace_buffer__get_data(buffer, fd); 498 if (!buffer->data) { 499 err = -ENOMEM; 500 goto out_put; 501 } 502 } 503 504 if (btsq->bts->snapshot_mode && !buffer->consecutive && 505 intel_bts_do_fix_overlap(queue, buffer)) { 506 err = -ENOMEM; 507 goto out_put; 508 } 509 510 if (!btsq->bts->synth_opts.callchain && thread && 511 (!old_buffer || btsq->bts->sampling_mode || 512 (btsq->bts->snapshot_mode && !buffer->consecutive))) 513 thread_stack__set_trace_nr(thread, buffer->buffer_nr + 1); 514 515 err = intel_bts_process_buffer(btsq, buffer); 516 517 auxtrace_buffer__drop_data(buffer); 518 519 btsq->buffer = auxtrace_buffer__next(queue, buffer); 520 if (btsq->buffer) { 521 if (timestamp) 522 *timestamp = btsq->buffer->reference; 523 } else { 524 if (!btsq->bts->sampling_mode) 525 btsq->done = 1; 526 } 527 out_put: 528 thread__put(thread); 529 return err; 530 } 531 532 static int intel_bts_flush_queue(struct intel_bts_queue *btsq) 533 { 534 u64 ts = 0; 535 int ret; 536 537 while (1) { 538 ret = intel_bts_process_queue(btsq, &ts); 539 if (ret < 0) 540 return ret; 541 if (ret) 542 break; 543 } 544 return 0; 545 } 546 547 static int intel_bts_process_tid_exit(struct intel_bts *bts, pid_t tid) 548 { 549 struct auxtrace_queues *queues = &bts->queues; 550 unsigned int i; 551 552 for (i = 0; i < queues->nr_queues; i++) { 553 struct auxtrace_queue *queue = &bts->queues.queue_array[i]; 554 struct intel_bts_queue *btsq = queue->priv; 555 556 if (btsq && btsq->tid == tid) 557 return intel_bts_flush_queue(btsq); 558 } 559 return 0; 560 } 561 562 static int intel_bts_process_queues(struct intel_bts *bts, u64 timestamp) 563 { 564 while (1) { 565 unsigned int queue_nr; 566 struct auxtrace_queue *queue; 567 struct intel_bts_queue *btsq; 568 u64 ts = 0; 569 int ret; 570 571 if (!bts->heap.heap_cnt) 572 return 0; 573 574 if (bts->heap.heap_array[0].ordinal > timestamp) 575 return 0; 576 577 queue_nr = bts->heap.heap_array[0].queue_nr; 578 queue = &bts->queues.queue_array[queue_nr]; 579 btsq = queue->priv; 580 581 auxtrace_heap__pop(&bts->heap); 582 583 ret = intel_bts_process_queue(btsq, &ts); 584 if (ret < 0) { 585 auxtrace_heap__add(&bts->heap, queue_nr, ts); 586 return ret; 587 } 588 589 if (!ret) { 590 ret = auxtrace_heap__add(&bts->heap, queue_nr, ts); 591 if (ret < 0) 592 return ret; 593 } else { 594 btsq->on_heap = false; 595 } 596 } 597 598 return 0; 599 } 600 601 static int intel_bts_process_event(struct perf_session *session, 602 union perf_event *event, 603 struct perf_sample *sample, 604 struct perf_tool *tool) 605 { 606 struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts, 607 auxtrace); 608 u64 timestamp; 609 int err; 610 611 if (dump_trace) 612 return 0; 613 614 if (!tool->ordered_events) { 615 pr_err("Intel BTS requires ordered events\n"); 616 return -EINVAL; 617 } 618 619 if (sample->time && sample->time != (u64)-1) 620 timestamp = perf_time_to_tsc(sample->time, &bts->tc); 621 else 622 timestamp = 0; 623 624 err = intel_bts_update_queues(bts); 625 if (err) 626 return err; 627 628 err = intel_bts_process_queues(bts, timestamp); 629 if (err) 630 return err; 631 if (event->header.type == PERF_RECORD_EXIT) { 632 err = intel_bts_process_tid_exit(bts, event->fork.tid); 633 if (err) 634 return err; 635 } 636 637 if (event->header.type == PERF_RECORD_AUX && 638 (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) && 639 bts->synth_opts.errors) 640 err = intel_bts_lost(bts, sample); 641 642 return err; 643 } 644 645 static int intel_bts_process_auxtrace_event(struct perf_session *session, 646 union perf_event *event, 647 struct perf_tool *tool __maybe_unused) 648 { 649 struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts, 650 auxtrace); 651 652 if (bts->sampling_mode) 653 return 0; 654 655 if (!bts->data_queued) { 656 struct auxtrace_buffer *buffer; 657 off_t data_offset; 658 int fd = perf_data_file__fd(session->file); 659 int err; 660 661 if (perf_data_file__is_pipe(session->file)) { 662 data_offset = 0; 663 } else { 664 data_offset = lseek(fd, 0, SEEK_CUR); 665 if (data_offset == -1) 666 return -errno; 667 } 668 669 err = auxtrace_queues__add_event(&bts->queues, session, event, 670 data_offset, &buffer); 671 if (err) 672 return err; 673 674 /* Dump here now we have copied a piped trace out of the pipe */ 675 if (dump_trace) { 676 if (auxtrace_buffer__get_data(buffer, fd)) { 677 intel_bts_dump_event(bts, buffer->data, 678 buffer->size); 679 auxtrace_buffer__put_data(buffer); 680 } 681 } 682 } 683 684 return 0; 685 } 686 687 static int intel_bts_flush(struct perf_session *session, 688 struct perf_tool *tool __maybe_unused) 689 { 690 struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts, 691 auxtrace); 692 int ret; 693 694 if (dump_trace || bts->sampling_mode) 695 return 0; 696 697 if (!tool->ordered_events) 698 return -EINVAL; 699 700 ret = intel_bts_update_queues(bts); 701 if (ret < 0) 702 return ret; 703 704 return intel_bts_process_queues(bts, MAX_TIMESTAMP); 705 } 706 707 static void intel_bts_free_queue(void *priv) 708 { 709 struct intel_bts_queue *btsq = priv; 710 711 if (!btsq) 712 return; 713 free(btsq); 714 } 715 716 static void intel_bts_free_events(struct perf_session *session) 717 { 718 struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts, 719 auxtrace); 720 struct auxtrace_queues *queues = &bts->queues; 721 unsigned int i; 722 723 for (i = 0; i < queues->nr_queues; i++) { 724 intel_bts_free_queue(queues->queue_array[i].priv); 725 queues->queue_array[i].priv = NULL; 726 } 727 auxtrace_queues__free(queues); 728 } 729 730 static void intel_bts_free(struct perf_session *session) 731 { 732 struct intel_bts *bts = container_of(session->auxtrace, struct intel_bts, 733 auxtrace); 734 735 auxtrace_heap__free(&bts->heap); 736 intel_bts_free_events(session); 737 session->auxtrace = NULL; 738 free(bts); 739 } 740 741 struct intel_bts_synth { 742 struct perf_tool dummy_tool; 743 struct perf_session *session; 744 }; 745 746 static int intel_bts_event_synth(struct perf_tool *tool, 747 union perf_event *event, 748 struct perf_sample *sample __maybe_unused, 749 struct machine *machine __maybe_unused) 750 { 751 struct intel_bts_synth *intel_bts_synth = 752 container_of(tool, struct intel_bts_synth, dummy_tool); 753 754 return perf_session__deliver_synth_event(intel_bts_synth->session, 755 event, NULL); 756 } 757 758 static int intel_bts_synth_event(struct perf_session *session, 759 struct perf_event_attr *attr, u64 id) 760 { 761 struct intel_bts_synth intel_bts_synth; 762 763 memset(&intel_bts_synth, 0, sizeof(struct intel_bts_synth)); 764 intel_bts_synth.session = session; 765 766 return perf_event__synthesize_attr(&intel_bts_synth.dummy_tool, attr, 1, 767 &id, intel_bts_event_synth); 768 } 769 770 static int intel_bts_synth_events(struct intel_bts *bts, 771 struct perf_session *session) 772 { 773 struct perf_evlist *evlist = session->evlist; 774 struct perf_evsel *evsel; 775 struct perf_event_attr attr; 776 bool found = false; 777 u64 id; 778 int err; 779 780 evlist__for_each(evlist, evsel) { 781 if (evsel->attr.type == bts->pmu_type && evsel->ids) { 782 found = true; 783 break; 784 } 785 } 786 787 if (!found) { 788 pr_debug("There are no selected events with Intel BTS data\n"); 789 return 0; 790 } 791 792 memset(&attr, 0, sizeof(struct perf_event_attr)); 793 attr.size = sizeof(struct perf_event_attr); 794 attr.type = PERF_TYPE_HARDWARE; 795 attr.sample_type = evsel->attr.sample_type & PERF_SAMPLE_MASK; 796 attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID | 797 PERF_SAMPLE_PERIOD; 798 attr.sample_type &= ~(u64)PERF_SAMPLE_TIME; 799 attr.sample_type &= ~(u64)PERF_SAMPLE_CPU; 800 attr.exclude_user = evsel->attr.exclude_user; 801 attr.exclude_kernel = evsel->attr.exclude_kernel; 802 attr.exclude_hv = evsel->attr.exclude_hv; 803 attr.exclude_host = evsel->attr.exclude_host; 804 attr.exclude_guest = evsel->attr.exclude_guest; 805 attr.sample_id_all = evsel->attr.sample_id_all; 806 attr.read_format = evsel->attr.read_format; 807 808 id = evsel->id[0] + 1000000000; 809 if (!id) 810 id = 1; 811 812 if (bts->synth_opts.branches) { 813 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS; 814 attr.sample_period = 1; 815 attr.sample_type |= PERF_SAMPLE_ADDR; 816 pr_debug("Synthesizing 'branches' event with id %" PRIu64 " sample type %#" PRIx64 "\n", 817 id, (u64)attr.sample_type); 818 err = intel_bts_synth_event(session, &attr, id); 819 if (err) { 820 pr_err("%s: failed to synthesize 'branches' event type\n", 821 __func__); 822 return err; 823 } 824 bts->sample_branches = true; 825 bts->branches_sample_type = attr.sample_type; 826 bts->branches_id = id; 827 /* 828 * We only use sample types from PERF_SAMPLE_MASK so we can use 829 * __perf_evsel__sample_size() here. 830 */ 831 bts->branches_event_size = sizeof(struct sample_event) + 832 __perf_evsel__sample_size(attr.sample_type); 833 } 834 835 bts->synth_needs_swap = evsel->needs_swap; 836 837 return 0; 838 } 839 840 static const char * const intel_bts_info_fmts[] = { 841 [INTEL_BTS_PMU_TYPE] = " PMU Type %"PRId64"\n", 842 [INTEL_BTS_TIME_SHIFT] = " Time Shift %"PRIu64"\n", 843 [INTEL_BTS_TIME_MULT] = " Time Muliplier %"PRIu64"\n", 844 [INTEL_BTS_TIME_ZERO] = " Time Zero %"PRIu64"\n", 845 [INTEL_BTS_CAP_USER_TIME_ZERO] = " Cap Time Zero %"PRId64"\n", 846 [INTEL_BTS_SNAPSHOT_MODE] = " Snapshot mode %"PRId64"\n", 847 }; 848 849 static void intel_bts_print_info(u64 *arr, int start, int finish) 850 { 851 int i; 852 853 if (!dump_trace) 854 return; 855 856 for (i = start; i <= finish; i++) 857 fprintf(stdout, intel_bts_info_fmts[i], arr[i]); 858 } 859 860 u64 intel_bts_auxtrace_info_priv[INTEL_BTS_AUXTRACE_PRIV_SIZE]; 861 862 int intel_bts_process_auxtrace_info(union perf_event *event, 863 struct perf_session *session) 864 { 865 struct auxtrace_info_event *auxtrace_info = &event->auxtrace_info; 866 size_t min_sz = sizeof(u64) * INTEL_BTS_SNAPSHOT_MODE; 867 struct intel_bts *bts; 868 int err; 869 870 if (auxtrace_info->header.size < sizeof(struct auxtrace_info_event) + 871 min_sz) 872 return -EINVAL; 873 874 bts = zalloc(sizeof(struct intel_bts)); 875 if (!bts) 876 return -ENOMEM; 877 878 err = auxtrace_queues__init(&bts->queues); 879 if (err) 880 goto err_free; 881 882 bts->session = session; 883 bts->machine = &session->machines.host; /* No kvm support */ 884 bts->auxtrace_type = auxtrace_info->type; 885 bts->pmu_type = auxtrace_info->priv[INTEL_BTS_PMU_TYPE]; 886 bts->tc.time_shift = auxtrace_info->priv[INTEL_BTS_TIME_SHIFT]; 887 bts->tc.time_mult = auxtrace_info->priv[INTEL_BTS_TIME_MULT]; 888 bts->tc.time_zero = auxtrace_info->priv[INTEL_BTS_TIME_ZERO]; 889 bts->cap_user_time_zero = 890 auxtrace_info->priv[INTEL_BTS_CAP_USER_TIME_ZERO]; 891 bts->snapshot_mode = auxtrace_info->priv[INTEL_BTS_SNAPSHOT_MODE]; 892 893 bts->sampling_mode = false; 894 895 bts->auxtrace.process_event = intel_bts_process_event; 896 bts->auxtrace.process_auxtrace_event = intel_bts_process_auxtrace_event; 897 bts->auxtrace.flush_events = intel_bts_flush; 898 bts->auxtrace.free_events = intel_bts_free_events; 899 bts->auxtrace.free = intel_bts_free; 900 session->auxtrace = &bts->auxtrace; 901 902 intel_bts_print_info(&auxtrace_info->priv[0], INTEL_BTS_PMU_TYPE, 903 INTEL_BTS_SNAPSHOT_MODE); 904 905 if (dump_trace) 906 return 0; 907 908 if (session->itrace_synth_opts && session->itrace_synth_opts->set) 909 bts->synth_opts = *session->itrace_synth_opts; 910 else 911 itrace_synth_opts__set_default(&bts->synth_opts); 912 913 if (bts->synth_opts.calls) 914 bts->branches_filter |= PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC | 915 PERF_IP_FLAG_TRACE_END; 916 if (bts->synth_opts.returns) 917 bts->branches_filter |= PERF_IP_FLAG_RETURN | 918 PERF_IP_FLAG_TRACE_BEGIN; 919 920 err = intel_bts_synth_events(bts, session); 921 if (err) 922 goto err_free_queues; 923 924 err = auxtrace_queues__process_index(&bts->queues, session); 925 if (err) 926 goto err_free_queues; 927 928 if (bts->queues.populated) 929 bts->data_queued = true; 930 931 return 0; 932 933 err_free_queues: 934 auxtrace_queues__free(&bts->queues); 935 session->auxtrace = NULL; 936 err_free: 937 free(bts); 938 return err; 939 } 940