1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Arm Statistical Profiling Extensions (SPE) support 4 * Copyright (c) 2017-2018, Arm Ltd. 5 */ 6 7 #include <byteswap.h> 8 #include <endian.h> 9 #include <errno.h> 10 #include <inttypes.h> 11 #include <linux/bitops.h> 12 #include <linux/kernel.h> 13 #include <linux/log2.h> 14 #include <linux/types.h> 15 #include <linux/zalloc.h> 16 #include <stdlib.h> 17 #include <unistd.h> 18 19 #include "auxtrace.h" 20 #include "color.h" 21 #include "debug.h" 22 #include "evlist.h" 23 #include "evsel.h" 24 #include "machine.h" 25 #include "session.h" 26 #include "symbol.h" 27 #include "thread.h" 28 #include "thread-stack.h" 29 #include "tool.h" 30 #include "util/synthetic-events.h" 31 32 #include "arm-spe.h" 33 #include "arm-spe-decoder/arm-spe-decoder.h" 34 #include "arm-spe-decoder/arm-spe-pkt-decoder.h" 35 36 #define MAX_TIMESTAMP (~0ULL) 37 38 struct arm_spe { 39 struct auxtrace auxtrace; 40 struct auxtrace_queues queues; 41 struct auxtrace_heap heap; 42 struct itrace_synth_opts synth_opts; 43 u32 auxtrace_type; 44 struct perf_session *session; 45 struct machine *machine; 46 u32 pmu_type; 47 48 u8 timeless_decoding; 49 u8 data_queued; 50 51 u8 sample_flc; 52 u8 sample_llc; 53 u8 sample_tlb; 54 u8 sample_branch; 55 u8 sample_remote_access; 56 57 u64 l1d_miss_id; 58 u64 l1d_access_id; 59 u64 llc_miss_id; 60 u64 llc_access_id; 61 u64 tlb_miss_id; 62 u64 tlb_access_id; 63 u64 branch_miss_id; 64 u64 remote_access_id; 65 66 u64 kernel_start; 67 68 unsigned long num_events; 69 }; 70 71 struct arm_spe_queue { 72 struct arm_spe *spe; 73 unsigned int queue_nr; 74 struct auxtrace_buffer *buffer; 75 struct auxtrace_buffer *old_buffer; 76 union perf_event *event_buf; 77 bool on_heap; 78 bool done; 79 pid_t pid; 80 pid_t tid; 81 int cpu; 82 struct arm_spe_decoder *decoder; 83 u64 time; 84 u64 timestamp; 85 struct thread *thread; 86 }; 87 88 static void arm_spe_dump(struct arm_spe *spe __maybe_unused, 89 unsigned char *buf, size_t len) 90 { 91 struct arm_spe_pkt packet; 92 size_t pos = 0; 93 int ret, pkt_len, i; 94 char desc[ARM_SPE_PKT_DESC_MAX]; 95 const char *color = PERF_COLOR_BLUE; 96 97 color_fprintf(stdout, color, 98 ". ... ARM SPE data: size %zu bytes\n", 99 len); 100 101 while (len) { 102 ret = arm_spe_get_packet(buf, len, &packet); 103 if (ret > 0) 104 pkt_len = ret; 105 else 106 pkt_len = 1; 107 printf("."); 108 color_fprintf(stdout, color, " %08x: ", pos); 109 for (i = 0; i < pkt_len; i++) 110 color_fprintf(stdout, color, " %02x", buf[i]); 111 for (; i < 16; i++) 112 color_fprintf(stdout, color, " "); 113 if (ret > 0) { 114 ret = arm_spe_pkt_desc(&packet, desc, 115 ARM_SPE_PKT_DESC_MAX); 116 if (ret > 0) 117 color_fprintf(stdout, color, " %s\n", desc); 118 } else { 119 color_fprintf(stdout, color, " Bad packet!\n"); 120 } 121 pos += pkt_len; 122 buf += pkt_len; 123 len -= pkt_len; 124 } 125 } 126 127 static void arm_spe_dump_event(struct arm_spe *spe, unsigned char *buf, 128 size_t len) 129 { 130 printf(".\n"); 131 arm_spe_dump(spe, buf, len); 132 } 133 134 static int arm_spe_get_trace(struct arm_spe_buffer *b, void *data) 135 { 136 struct arm_spe_queue *speq = data; 137 struct auxtrace_buffer *buffer = speq->buffer; 138 struct auxtrace_buffer *old_buffer = speq->old_buffer; 139 struct auxtrace_queue *queue; 140 141 queue = &speq->spe->queues.queue_array[speq->queue_nr]; 142 143 buffer = auxtrace_buffer__next(queue, buffer); 144 /* If no more data, drop the previous auxtrace_buffer and return */ 145 if (!buffer) { 146 if (old_buffer) 147 auxtrace_buffer__drop_data(old_buffer); 148 b->len = 0; 149 return 0; 150 } 151 152 speq->buffer = buffer; 153 154 /* If the aux_buffer doesn't have data associated, try to load it */ 155 if (!buffer->data) { 156 /* get the file desc associated with the perf data file */ 157 int fd = perf_data__fd(speq->spe->session->data); 158 159 buffer->data = auxtrace_buffer__get_data(buffer, fd); 160 if (!buffer->data) 161 return -ENOMEM; 162 } 163 164 b->len = buffer->size; 165 b->buf = buffer->data; 166 167 if (b->len) { 168 if (old_buffer) 169 auxtrace_buffer__drop_data(old_buffer); 170 speq->old_buffer = buffer; 171 } else { 172 auxtrace_buffer__drop_data(buffer); 173 return arm_spe_get_trace(b, data); 174 } 175 176 return 0; 177 } 178 179 static struct arm_spe_queue *arm_spe__alloc_queue(struct arm_spe *spe, 180 unsigned int queue_nr) 181 { 182 struct arm_spe_params params = { .get_trace = 0, }; 183 struct arm_spe_queue *speq; 184 185 speq = zalloc(sizeof(*speq)); 186 if (!speq) 187 return NULL; 188 189 speq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE); 190 if (!speq->event_buf) 191 goto out_free; 192 193 speq->spe = spe; 194 speq->queue_nr = queue_nr; 195 speq->pid = -1; 196 speq->tid = -1; 197 speq->cpu = -1; 198 199 /* params set */ 200 params.get_trace = arm_spe_get_trace; 201 params.data = speq; 202 203 /* create new decoder */ 204 speq->decoder = arm_spe_decoder_new(¶ms); 205 if (!speq->decoder) 206 goto out_free; 207 208 return speq; 209 210 out_free: 211 zfree(&speq->event_buf); 212 free(speq); 213 214 return NULL; 215 } 216 217 static inline u8 arm_spe_cpumode(struct arm_spe *spe, u64 ip) 218 { 219 return ip >= spe->kernel_start ? 220 PERF_RECORD_MISC_KERNEL : 221 PERF_RECORD_MISC_USER; 222 } 223 224 static void arm_spe_prep_sample(struct arm_spe *spe, 225 struct arm_spe_queue *speq, 226 union perf_event *event, 227 struct perf_sample *sample) 228 { 229 struct arm_spe_record *record = &speq->decoder->record; 230 231 if (!spe->timeless_decoding) 232 sample->time = speq->timestamp; 233 234 sample->ip = record->from_ip; 235 sample->cpumode = arm_spe_cpumode(spe, sample->ip); 236 sample->pid = speq->pid; 237 sample->tid = speq->tid; 238 sample->addr = record->to_ip; 239 sample->period = 1; 240 sample->cpu = speq->cpu; 241 242 event->sample.header.type = PERF_RECORD_SAMPLE; 243 event->sample.header.misc = sample->cpumode; 244 event->sample.header.size = sizeof(struct perf_event_header); 245 } 246 247 static inline int 248 arm_spe_deliver_synth_event(struct arm_spe *spe, 249 struct arm_spe_queue *speq __maybe_unused, 250 union perf_event *event, 251 struct perf_sample *sample) 252 { 253 int ret; 254 255 ret = perf_session__deliver_synth_event(spe->session, event, sample); 256 if (ret) 257 pr_err("ARM SPE: failed to deliver event, error %d\n", ret); 258 259 return ret; 260 } 261 262 static int 263 arm_spe_synth_spe_events_sample(struct arm_spe_queue *speq, 264 u64 spe_events_id) 265 { 266 struct arm_spe *spe = speq->spe; 267 union perf_event *event = speq->event_buf; 268 struct perf_sample sample = { .ip = 0, }; 269 270 arm_spe_prep_sample(spe, speq, event, &sample); 271 272 sample.id = spe_events_id; 273 sample.stream_id = spe_events_id; 274 275 return arm_spe_deliver_synth_event(spe, speq, event, &sample); 276 } 277 278 static int arm_spe_sample(struct arm_spe_queue *speq) 279 { 280 const struct arm_spe_record *record = &speq->decoder->record; 281 struct arm_spe *spe = speq->spe; 282 int err; 283 284 if (spe->sample_flc) { 285 if (record->type & ARM_SPE_L1D_MISS) { 286 err = arm_spe_synth_spe_events_sample( 287 speq, spe->l1d_miss_id); 288 if (err) 289 return err; 290 } 291 292 if (record->type & ARM_SPE_L1D_ACCESS) { 293 err = arm_spe_synth_spe_events_sample( 294 speq, spe->l1d_access_id); 295 if (err) 296 return err; 297 } 298 } 299 300 if (spe->sample_llc) { 301 if (record->type & ARM_SPE_LLC_MISS) { 302 err = arm_spe_synth_spe_events_sample( 303 speq, spe->llc_miss_id); 304 if (err) 305 return err; 306 } 307 308 if (record->type & ARM_SPE_LLC_ACCESS) { 309 err = arm_spe_synth_spe_events_sample( 310 speq, spe->llc_access_id); 311 if (err) 312 return err; 313 } 314 } 315 316 if (spe->sample_tlb) { 317 if (record->type & ARM_SPE_TLB_MISS) { 318 err = arm_spe_synth_spe_events_sample( 319 speq, spe->tlb_miss_id); 320 if (err) 321 return err; 322 } 323 324 if (record->type & ARM_SPE_TLB_ACCESS) { 325 err = arm_spe_synth_spe_events_sample( 326 speq, spe->tlb_access_id); 327 if (err) 328 return err; 329 } 330 } 331 332 if (spe->sample_branch && (record->type & ARM_SPE_BRANCH_MISS)) { 333 err = arm_spe_synth_spe_events_sample(speq, 334 spe->branch_miss_id); 335 if (err) 336 return err; 337 } 338 339 if (spe->sample_remote_access && 340 (record->type & ARM_SPE_REMOTE_ACCESS)) { 341 err = arm_spe_synth_spe_events_sample(speq, 342 spe->remote_access_id); 343 if (err) 344 return err; 345 } 346 347 return 0; 348 } 349 350 static int arm_spe_run_decoder(struct arm_spe_queue *speq, u64 *timestamp) 351 { 352 struct arm_spe *spe = speq->spe; 353 int ret; 354 355 if (!spe->kernel_start) 356 spe->kernel_start = machine__kernel_start(spe->machine); 357 358 while (1) { 359 ret = arm_spe_decode(speq->decoder); 360 if (!ret) { 361 pr_debug("No data or all data has been processed.\n"); 362 return 1; 363 } 364 365 /* 366 * Error is detected when decode SPE trace data, continue to 367 * the next trace data and find out more records. 368 */ 369 if (ret < 0) 370 continue; 371 372 ret = arm_spe_sample(speq); 373 if (ret) 374 return ret; 375 376 if (!spe->timeless_decoding && speq->timestamp >= *timestamp) { 377 *timestamp = speq->timestamp; 378 return 0; 379 } 380 } 381 382 return 0; 383 } 384 385 static int arm_spe__setup_queue(struct arm_spe *spe, 386 struct auxtrace_queue *queue, 387 unsigned int queue_nr) 388 { 389 struct arm_spe_queue *speq = queue->priv; 390 struct arm_spe_record *record; 391 392 if (list_empty(&queue->head) || speq) 393 return 0; 394 395 speq = arm_spe__alloc_queue(spe, queue_nr); 396 397 if (!speq) 398 return -ENOMEM; 399 400 queue->priv = speq; 401 402 if (queue->cpu != -1) 403 speq->cpu = queue->cpu; 404 405 if (!speq->on_heap) { 406 int ret; 407 408 if (spe->timeless_decoding) 409 return 0; 410 411 retry: 412 ret = arm_spe_decode(speq->decoder); 413 414 if (!ret) 415 return 0; 416 417 if (ret < 0) 418 goto retry; 419 420 record = &speq->decoder->record; 421 422 speq->timestamp = record->timestamp; 423 ret = auxtrace_heap__add(&spe->heap, queue_nr, speq->timestamp); 424 if (ret) 425 return ret; 426 speq->on_heap = true; 427 } 428 429 return 0; 430 } 431 432 static int arm_spe__setup_queues(struct arm_spe *spe) 433 { 434 unsigned int i; 435 int ret; 436 437 for (i = 0; i < spe->queues.nr_queues; i++) { 438 ret = arm_spe__setup_queue(spe, &spe->queues.queue_array[i], i); 439 if (ret) 440 return ret; 441 } 442 443 return 0; 444 } 445 446 static int arm_spe__update_queues(struct arm_spe *spe) 447 { 448 if (spe->queues.new_data) { 449 spe->queues.new_data = false; 450 return arm_spe__setup_queues(spe); 451 } 452 453 return 0; 454 } 455 456 static bool arm_spe__is_timeless_decoding(struct arm_spe *spe) 457 { 458 struct evsel *evsel; 459 struct evlist *evlist = spe->session->evlist; 460 bool timeless_decoding = true; 461 462 /* 463 * Circle through the list of event and complain if we find one 464 * with the time bit set. 465 */ 466 evlist__for_each_entry(evlist, evsel) { 467 if ((evsel->core.attr.sample_type & PERF_SAMPLE_TIME)) 468 timeless_decoding = false; 469 } 470 471 return timeless_decoding; 472 } 473 474 static void arm_spe_set_pid_tid_cpu(struct arm_spe *spe, 475 struct auxtrace_queue *queue) 476 { 477 struct arm_spe_queue *speq = queue->priv; 478 pid_t tid; 479 480 tid = machine__get_current_tid(spe->machine, speq->cpu); 481 if (tid != -1) { 482 speq->tid = tid; 483 thread__zput(speq->thread); 484 } else 485 speq->tid = queue->tid; 486 487 if ((!speq->thread) && (speq->tid != -1)) { 488 speq->thread = machine__find_thread(spe->machine, -1, 489 speq->tid); 490 } 491 492 if (speq->thread) { 493 speq->pid = speq->thread->pid_; 494 if (queue->cpu == -1) 495 speq->cpu = speq->thread->cpu; 496 } 497 } 498 499 static int arm_spe_process_queues(struct arm_spe *spe, u64 timestamp) 500 { 501 unsigned int queue_nr; 502 u64 ts; 503 int ret; 504 505 while (1) { 506 struct auxtrace_queue *queue; 507 struct arm_spe_queue *speq; 508 509 if (!spe->heap.heap_cnt) 510 return 0; 511 512 if (spe->heap.heap_array[0].ordinal >= timestamp) 513 return 0; 514 515 queue_nr = spe->heap.heap_array[0].queue_nr; 516 queue = &spe->queues.queue_array[queue_nr]; 517 speq = queue->priv; 518 519 auxtrace_heap__pop(&spe->heap); 520 521 if (spe->heap.heap_cnt) { 522 ts = spe->heap.heap_array[0].ordinal + 1; 523 if (ts > timestamp) 524 ts = timestamp; 525 } else { 526 ts = timestamp; 527 } 528 529 arm_spe_set_pid_tid_cpu(spe, queue); 530 531 ret = arm_spe_run_decoder(speq, &ts); 532 if (ret < 0) { 533 auxtrace_heap__add(&spe->heap, queue_nr, ts); 534 return ret; 535 } 536 537 if (!ret) { 538 ret = auxtrace_heap__add(&spe->heap, queue_nr, ts); 539 if (ret < 0) 540 return ret; 541 } else { 542 speq->on_heap = false; 543 } 544 } 545 546 return 0; 547 } 548 549 static int arm_spe_process_timeless_queues(struct arm_spe *spe, pid_t tid, 550 u64 time_) 551 { 552 struct auxtrace_queues *queues = &spe->queues; 553 unsigned int i; 554 u64 ts = 0; 555 556 for (i = 0; i < queues->nr_queues; i++) { 557 struct auxtrace_queue *queue = &spe->queues.queue_array[i]; 558 struct arm_spe_queue *speq = queue->priv; 559 560 if (speq && (tid == -1 || speq->tid == tid)) { 561 speq->time = time_; 562 arm_spe_set_pid_tid_cpu(spe, queue); 563 arm_spe_run_decoder(speq, &ts); 564 } 565 } 566 return 0; 567 } 568 569 static int arm_spe_process_event(struct perf_session *session, 570 union perf_event *event, 571 struct perf_sample *sample, 572 struct perf_tool *tool) 573 { 574 int err = 0; 575 u64 timestamp; 576 struct arm_spe *spe = container_of(session->auxtrace, 577 struct arm_spe, auxtrace); 578 579 if (dump_trace) 580 return 0; 581 582 if (!tool->ordered_events) { 583 pr_err("SPE trace requires ordered events\n"); 584 return -EINVAL; 585 } 586 587 if (sample->time && (sample->time != (u64) -1)) 588 timestamp = sample->time; 589 else 590 timestamp = 0; 591 592 if (timestamp || spe->timeless_decoding) { 593 err = arm_spe__update_queues(spe); 594 if (err) 595 return err; 596 } 597 598 if (spe->timeless_decoding) { 599 if (event->header.type == PERF_RECORD_EXIT) { 600 err = arm_spe_process_timeless_queues(spe, 601 event->fork.tid, 602 sample->time); 603 } 604 } else if (timestamp) { 605 if (event->header.type == PERF_RECORD_EXIT) { 606 err = arm_spe_process_queues(spe, timestamp); 607 if (err) 608 return err; 609 } 610 } 611 612 return err; 613 } 614 615 static int arm_spe_process_auxtrace_event(struct perf_session *session, 616 union perf_event *event, 617 struct perf_tool *tool __maybe_unused) 618 { 619 struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe, 620 auxtrace); 621 622 if (!spe->data_queued) { 623 struct auxtrace_buffer *buffer; 624 off_t data_offset; 625 int fd = perf_data__fd(session->data); 626 int err; 627 628 if (perf_data__is_pipe(session->data)) { 629 data_offset = 0; 630 } else { 631 data_offset = lseek(fd, 0, SEEK_CUR); 632 if (data_offset == -1) 633 return -errno; 634 } 635 636 err = auxtrace_queues__add_event(&spe->queues, session, event, 637 data_offset, &buffer); 638 if (err) 639 return err; 640 641 /* Dump here now we have copied a piped trace out of the pipe */ 642 if (dump_trace) { 643 if (auxtrace_buffer__get_data(buffer, fd)) { 644 arm_spe_dump_event(spe, buffer->data, 645 buffer->size); 646 auxtrace_buffer__put_data(buffer); 647 } 648 } 649 } 650 651 return 0; 652 } 653 654 static int arm_spe_flush(struct perf_session *session __maybe_unused, 655 struct perf_tool *tool __maybe_unused) 656 { 657 struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe, 658 auxtrace); 659 int ret; 660 661 if (dump_trace) 662 return 0; 663 664 if (!tool->ordered_events) 665 return -EINVAL; 666 667 ret = arm_spe__update_queues(spe); 668 if (ret < 0) 669 return ret; 670 671 if (spe->timeless_decoding) 672 return arm_spe_process_timeless_queues(spe, -1, 673 MAX_TIMESTAMP - 1); 674 675 return arm_spe_process_queues(spe, MAX_TIMESTAMP); 676 } 677 678 static void arm_spe_free_queue(void *priv) 679 { 680 struct arm_spe_queue *speq = priv; 681 682 if (!speq) 683 return; 684 thread__zput(speq->thread); 685 arm_spe_decoder_free(speq->decoder); 686 zfree(&speq->event_buf); 687 free(speq); 688 } 689 690 static void arm_spe_free_events(struct perf_session *session) 691 { 692 struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe, 693 auxtrace); 694 struct auxtrace_queues *queues = &spe->queues; 695 unsigned int i; 696 697 for (i = 0; i < queues->nr_queues; i++) { 698 arm_spe_free_queue(queues->queue_array[i].priv); 699 queues->queue_array[i].priv = NULL; 700 } 701 auxtrace_queues__free(queues); 702 } 703 704 static void arm_spe_free(struct perf_session *session) 705 { 706 struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe, 707 auxtrace); 708 709 auxtrace_heap__free(&spe->heap); 710 arm_spe_free_events(session); 711 session->auxtrace = NULL; 712 free(spe); 713 } 714 715 static bool arm_spe_evsel_is_auxtrace(struct perf_session *session, 716 struct evsel *evsel) 717 { 718 struct arm_spe *spe = container_of(session->auxtrace, struct arm_spe, auxtrace); 719 720 return evsel->core.attr.type == spe->pmu_type; 721 } 722 723 static const char * const arm_spe_info_fmts[] = { 724 [ARM_SPE_PMU_TYPE] = " PMU Type %"PRId64"\n", 725 }; 726 727 static void arm_spe_print_info(__u64 *arr) 728 { 729 if (!dump_trace) 730 return; 731 732 fprintf(stdout, arm_spe_info_fmts[ARM_SPE_PMU_TYPE], arr[ARM_SPE_PMU_TYPE]); 733 } 734 735 struct arm_spe_synth { 736 struct perf_tool dummy_tool; 737 struct perf_session *session; 738 }; 739 740 static int arm_spe_event_synth(struct perf_tool *tool, 741 union perf_event *event, 742 struct perf_sample *sample __maybe_unused, 743 struct machine *machine __maybe_unused) 744 { 745 struct arm_spe_synth *arm_spe_synth = 746 container_of(tool, struct arm_spe_synth, dummy_tool); 747 748 return perf_session__deliver_synth_event(arm_spe_synth->session, 749 event, NULL); 750 } 751 752 static int arm_spe_synth_event(struct perf_session *session, 753 struct perf_event_attr *attr, u64 id) 754 { 755 struct arm_spe_synth arm_spe_synth; 756 757 memset(&arm_spe_synth, 0, sizeof(struct arm_spe_synth)); 758 arm_spe_synth.session = session; 759 760 return perf_event__synthesize_attr(&arm_spe_synth.dummy_tool, attr, 1, 761 &id, arm_spe_event_synth); 762 } 763 764 static void arm_spe_set_event_name(struct evlist *evlist, u64 id, 765 const char *name) 766 { 767 struct evsel *evsel; 768 769 evlist__for_each_entry(evlist, evsel) { 770 if (evsel->core.id && evsel->core.id[0] == id) { 771 if (evsel->name) 772 zfree(&evsel->name); 773 evsel->name = strdup(name); 774 break; 775 } 776 } 777 } 778 779 static int 780 arm_spe_synth_events(struct arm_spe *spe, struct perf_session *session) 781 { 782 struct evlist *evlist = session->evlist; 783 struct evsel *evsel; 784 struct perf_event_attr attr; 785 bool found = false; 786 u64 id; 787 int err; 788 789 evlist__for_each_entry(evlist, evsel) { 790 if (evsel->core.attr.type == spe->pmu_type) { 791 found = true; 792 break; 793 } 794 } 795 796 if (!found) { 797 pr_debug("No selected events with SPE trace data\n"); 798 return 0; 799 } 800 801 memset(&attr, 0, sizeof(struct perf_event_attr)); 802 attr.size = sizeof(struct perf_event_attr); 803 attr.type = PERF_TYPE_HARDWARE; 804 attr.sample_type = evsel->core.attr.sample_type & PERF_SAMPLE_MASK; 805 attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID | 806 PERF_SAMPLE_PERIOD; 807 if (spe->timeless_decoding) 808 attr.sample_type &= ~(u64)PERF_SAMPLE_TIME; 809 else 810 attr.sample_type |= PERF_SAMPLE_TIME; 811 812 attr.exclude_user = evsel->core.attr.exclude_user; 813 attr.exclude_kernel = evsel->core.attr.exclude_kernel; 814 attr.exclude_hv = evsel->core.attr.exclude_hv; 815 attr.exclude_host = evsel->core.attr.exclude_host; 816 attr.exclude_guest = evsel->core.attr.exclude_guest; 817 attr.sample_id_all = evsel->core.attr.sample_id_all; 818 attr.read_format = evsel->core.attr.read_format; 819 820 /* create new id val to be a fixed offset from evsel id */ 821 id = evsel->core.id[0] + 1000000000; 822 823 if (!id) 824 id = 1; 825 826 if (spe->synth_opts.flc) { 827 spe->sample_flc = true; 828 829 /* Level 1 data cache miss */ 830 err = arm_spe_synth_event(session, &attr, id); 831 if (err) 832 return err; 833 spe->l1d_miss_id = id; 834 arm_spe_set_event_name(evlist, id, "l1d-miss"); 835 id += 1; 836 837 /* Level 1 data cache access */ 838 err = arm_spe_synth_event(session, &attr, id); 839 if (err) 840 return err; 841 spe->l1d_access_id = id; 842 arm_spe_set_event_name(evlist, id, "l1d-access"); 843 id += 1; 844 } 845 846 if (spe->synth_opts.llc) { 847 spe->sample_llc = true; 848 849 /* Last level cache miss */ 850 err = arm_spe_synth_event(session, &attr, id); 851 if (err) 852 return err; 853 spe->llc_miss_id = id; 854 arm_spe_set_event_name(evlist, id, "llc-miss"); 855 id += 1; 856 857 /* Last level cache access */ 858 err = arm_spe_synth_event(session, &attr, id); 859 if (err) 860 return err; 861 spe->llc_access_id = id; 862 arm_spe_set_event_name(evlist, id, "llc-access"); 863 id += 1; 864 } 865 866 if (spe->synth_opts.tlb) { 867 spe->sample_tlb = true; 868 869 /* TLB miss */ 870 err = arm_spe_synth_event(session, &attr, id); 871 if (err) 872 return err; 873 spe->tlb_miss_id = id; 874 arm_spe_set_event_name(evlist, id, "tlb-miss"); 875 id += 1; 876 877 /* TLB access */ 878 err = arm_spe_synth_event(session, &attr, id); 879 if (err) 880 return err; 881 spe->tlb_access_id = id; 882 arm_spe_set_event_name(evlist, id, "tlb-access"); 883 id += 1; 884 } 885 886 if (spe->synth_opts.branches) { 887 spe->sample_branch = true; 888 889 /* Branch miss */ 890 err = arm_spe_synth_event(session, &attr, id); 891 if (err) 892 return err; 893 spe->branch_miss_id = id; 894 arm_spe_set_event_name(evlist, id, "branch-miss"); 895 id += 1; 896 } 897 898 if (spe->synth_opts.remote_access) { 899 spe->sample_remote_access = true; 900 901 /* Remote access */ 902 err = arm_spe_synth_event(session, &attr, id); 903 if (err) 904 return err; 905 spe->remote_access_id = id; 906 arm_spe_set_event_name(evlist, id, "remote-access"); 907 id += 1; 908 } 909 910 return 0; 911 } 912 913 int arm_spe_process_auxtrace_info(union perf_event *event, 914 struct perf_session *session) 915 { 916 struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info; 917 size_t min_sz = sizeof(u64) * ARM_SPE_AUXTRACE_PRIV_MAX; 918 struct arm_spe *spe; 919 int err; 920 921 if (auxtrace_info->header.size < sizeof(struct perf_record_auxtrace_info) + 922 min_sz) 923 return -EINVAL; 924 925 spe = zalloc(sizeof(struct arm_spe)); 926 if (!spe) 927 return -ENOMEM; 928 929 err = auxtrace_queues__init(&spe->queues); 930 if (err) 931 goto err_free; 932 933 spe->session = session; 934 spe->machine = &session->machines.host; /* No kvm support */ 935 spe->auxtrace_type = auxtrace_info->type; 936 spe->pmu_type = auxtrace_info->priv[ARM_SPE_PMU_TYPE]; 937 938 spe->timeless_decoding = arm_spe__is_timeless_decoding(spe); 939 spe->auxtrace.process_event = arm_spe_process_event; 940 spe->auxtrace.process_auxtrace_event = arm_spe_process_auxtrace_event; 941 spe->auxtrace.flush_events = arm_spe_flush; 942 spe->auxtrace.free_events = arm_spe_free_events; 943 spe->auxtrace.free = arm_spe_free; 944 spe->auxtrace.evsel_is_auxtrace = arm_spe_evsel_is_auxtrace; 945 session->auxtrace = &spe->auxtrace; 946 947 arm_spe_print_info(&auxtrace_info->priv[0]); 948 949 if (dump_trace) 950 return 0; 951 952 if (session->itrace_synth_opts && session->itrace_synth_opts->set) 953 spe->synth_opts = *session->itrace_synth_opts; 954 else 955 itrace_synth_opts__set_default(&spe->synth_opts, false); 956 957 err = arm_spe_synth_events(spe, session); 958 if (err) 959 goto err_free_queues; 960 961 err = auxtrace_queues__process_index(&spe->queues, session); 962 if (err) 963 goto err_free_queues; 964 965 if (spe->queues.populated) 966 spe->data_queued = true; 967 968 return 0; 969 970 err_free_queues: 971 auxtrace_queues__free(&spe->queues); 972 session->auxtrace = NULL; 973 err_free: 974 free(spe); 975 return err; 976 } 977