1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * auxtrace.c: AUX area trace support 4 * Copyright (c) 2013-2015, Intel Corporation. 5 */ 6 7 #include <inttypes.h> 8 #include <sys/types.h> 9 #include <sys/mman.h> 10 #include <stdbool.h> 11 #include <string.h> 12 #include <limits.h> 13 #include <errno.h> 14 15 #include <linux/kernel.h> 16 #include <linux/perf_event.h> 17 #include <linux/types.h> 18 #include <linux/bitops.h> 19 #include <linux/log2.h> 20 #include <linux/string.h> 21 #include <linux/time64.h> 22 23 #include <sys/param.h> 24 #include <stdlib.h> 25 #include <stdio.h> 26 #include <linux/list.h> 27 #include <linux/zalloc.h> 28 29 #include "evlist.h" 30 #include "dso.h" 31 #include "map.h" 32 #include "pmu.h" 33 #include "evsel.h" 34 #include "evsel_config.h" 35 #include "symbol.h" 36 #include "util/perf_api_probe.h" 37 #include "util/synthetic-events.h" 38 #include "thread_map.h" 39 #include "asm/bug.h" 40 #include "auxtrace.h" 41 42 #include <linux/hash.h> 43 44 #include "event.h" 45 #include "record.h" 46 #include "session.h" 47 #include "debug.h" 48 #include <subcmd/parse-options.h> 49 50 #include "cs-etm.h" 51 #include "intel-pt.h" 52 #include "intel-bts.h" 53 #include "arm-spe.h" 54 #include "s390-cpumsf.h" 55 #include "util/mmap.h" 56 57 #include <linux/ctype.h> 58 #include "symbol/kallsyms.h" 59 #include <internal/lib.h> 60 61 /* 62 * Make a group from 'leader' to 'last', requiring that the events were not 63 * already grouped to a different leader. 64 */ 65 static int evlist__regroup(struct evlist *evlist, struct evsel *leader, struct evsel *last) 66 { 67 struct evsel *evsel; 68 bool grp; 69 70 if (!evsel__is_group_leader(leader)) 71 return -EINVAL; 72 73 grp = false; 74 evlist__for_each_entry(evlist, evsel) { 75 if (grp) { 76 if (!(evsel__leader(evsel) == leader || 77 (evsel__leader(evsel) == evsel && 78 evsel->core.nr_members <= 1))) 79 return -EINVAL; 80 } else if (evsel == leader) { 81 grp = true; 82 } 83 if (evsel == last) 84 break; 85 } 86 87 grp = false; 88 evlist__for_each_entry(evlist, evsel) { 89 if (grp) { 90 if (!evsel__has_leader(evsel, leader)) { 91 evsel__set_leader(evsel, leader); 92 if (leader->core.nr_members < 1) 93 leader->core.nr_members = 1; 94 leader->core.nr_members += 1; 95 } 96 } else if (evsel == leader) { 97 grp = true; 98 } 99 if (evsel == last) 100 break; 101 } 102 103 return 0; 104 } 105 106 static bool auxtrace__dont_decode(struct perf_session *session) 107 { 108 return !session->itrace_synth_opts || 109 session->itrace_synth_opts->dont_decode; 110 } 111 112 int auxtrace_mmap__mmap(struct auxtrace_mmap *mm, 113 struct auxtrace_mmap_params *mp, 114 void *userpg, int fd) 115 { 116 struct perf_event_mmap_page *pc = userpg; 117 118 WARN_ONCE(mm->base, "Uninitialized auxtrace_mmap\n"); 119 120 mm->userpg = userpg; 121 mm->mask = mp->mask; 122 mm->len = mp->len; 123 mm->prev = 0; 124 mm->idx = mp->idx; 125 mm->tid = mp->tid; 126 mm->cpu = mp->cpu.cpu; 127 128 if (!mp->len) { 129 mm->base = NULL; 130 return 0; 131 } 132 133 pc->aux_offset = mp->offset; 134 pc->aux_size = mp->len; 135 136 mm->base = mmap(NULL, mp->len, mp->prot, MAP_SHARED, fd, mp->offset); 137 if (mm->base == MAP_FAILED) { 138 pr_debug2("failed to mmap AUX area\n"); 139 mm->base = NULL; 140 return -1; 141 } 142 143 return 0; 144 } 145 146 void auxtrace_mmap__munmap(struct auxtrace_mmap *mm) 147 { 148 if (mm->base) { 149 munmap(mm->base, mm->len); 150 mm->base = NULL; 151 } 152 } 153 154 void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp, 155 off_t auxtrace_offset, 156 unsigned int auxtrace_pages, 157 bool auxtrace_overwrite) 158 { 159 if (auxtrace_pages) { 160 mp->offset = auxtrace_offset; 161 mp->len = auxtrace_pages * (size_t)page_size; 162 mp->mask = is_power_of_2(mp->len) ? mp->len - 1 : 0; 163 mp->prot = PROT_READ | (auxtrace_overwrite ? 0 : PROT_WRITE); 164 pr_debug2("AUX area mmap length %zu\n", mp->len); 165 } else { 166 mp->len = 0; 167 } 168 } 169 170 void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp, 171 struct evlist *evlist, int idx, 172 bool per_cpu) 173 { 174 mp->idx = idx; 175 176 if (per_cpu) { 177 mp->cpu = perf_cpu_map__cpu(evlist->core.cpus, idx); 178 if (evlist->core.threads) 179 mp->tid = perf_thread_map__pid(evlist->core.threads, 0); 180 else 181 mp->tid = -1; 182 } else { 183 mp->cpu.cpu = -1; 184 mp->tid = perf_thread_map__pid(evlist->core.threads, idx); 185 } 186 } 187 188 #define AUXTRACE_INIT_NR_QUEUES 32 189 190 static struct auxtrace_queue *auxtrace_alloc_queue_array(unsigned int nr_queues) 191 { 192 struct auxtrace_queue *queue_array; 193 unsigned int max_nr_queues, i; 194 195 max_nr_queues = UINT_MAX / sizeof(struct auxtrace_queue); 196 if (nr_queues > max_nr_queues) 197 return NULL; 198 199 queue_array = calloc(nr_queues, sizeof(struct auxtrace_queue)); 200 if (!queue_array) 201 return NULL; 202 203 for (i = 0; i < nr_queues; i++) { 204 INIT_LIST_HEAD(&queue_array[i].head); 205 queue_array[i].priv = NULL; 206 } 207 208 return queue_array; 209 } 210 211 int auxtrace_queues__init(struct auxtrace_queues *queues) 212 { 213 queues->nr_queues = AUXTRACE_INIT_NR_QUEUES; 214 queues->queue_array = auxtrace_alloc_queue_array(queues->nr_queues); 215 if (!queues->queue_array) 216 return -ENOMEM; 217 return 0; 218 } 219 220 static int auxtrace_queues__grow(struct auxtrace_queues *queues, 221 unsigned int new_nr_queues) 222 { 223 unsigned int nr_queues = queues->nr_queues; 224 struct auxtrace_queue *queue_array; 225 unsigned int i; 226 227 if (!nr_queues) 228 nr_queues = AUXTRACE_INIT_NR_QUEUES; 229 230 while (nr_queues && nr_queues < new_nr_queues) 231 nr_queues <<= 1; 232 233 if (nr_queues < queues->nr_queues || nr_queues < new_nr_queues) 234 return -EINVAL; 235 236 queue_array = auxtrace_alloc_queue_array(nr_queues); 237 if (!queue_array) 238 return -ENOMEM; 239 240 for (i = 0; i < queues->nr_queues; i++) { 241 list_splice_tail(&queues->queue_array[i].head, 242 &queue_array[i].head); 243 queue_array[i].tid = queues->queue_array[i].tid; 244 queue_array[i].cpu = queues->queue_array[i].cpu; 245 queue_array[i].set = queues->queue_array[i].set; 246 queue_array[i].priv = queues->queue_array[i].priv; 247 } 248 249 queues->nr_queues = nr_queues; 250 queues->queue_array = queue_array; 251 252 return 0; 253 } 254 255 static void *auxtrace_copy_data(u64 size, struct perf_session *session) 256 { 257 int fd = perf_data__fd(session->data); 258 void *p; 259 ssize_t ret; 260 261 if (size > SSIZE_MAX) 262 return NULL; 263 264 p = malloc(size); 265 if (!p) 266 return NULL; 267 268 ret = readn(fd, p, size); 269 if (ret != (ssize_t)size) { 270 free(p); 271 return NULL; 272 } 273 274 return p; 275 } 276 277 static int auxtrace_queues__queue_buffer(struct auxtrace_queues *queues, 278 unsigned int idx, 279 struct auxtrace_buffer *buffer) 280 { 281 struct auxtrace_queue *queue; 282 int err; 283 284 if (idx >= queues->nr_queues) { 285 err = auxtrace_queues__grow(queues, idx + 1); 286 if (err) 287 return err; 288 } 289 290 queue = &queues->queue_array[idx]; 291 292 if (!queue->set) { 293 queue->set = true; 294 queue->tid = buffer->tid; 295 queue->cpu = buffer->cpu.cpu; 296 } 297 298 buffer->buffer_nr = queues->next_buffer_nr++; 299 300 list_add_tail(&buffer->list, &queue->head); 301 302 queues->new_data = true; 303 queues->populated = true; 304 305 return 0; 306 } 307 308 /* Limit buffers to 32MiB on 32-bit */ 309 #define BUFFER_LIMIT_FOR_32_BIT (32 * 1024 * 1024) 310 311 static int auxtrace_queues__split_buffer(struct auxtrace_queues *queues, 312 unsigned int idx, 313 struct auxtrace_buffer *buffer) 314 { 315 u64 sz = buffer->size; 316 bool consecutive = false; 317 struct auxtrace_buffer *b; 318 int err; 319 320 while (sz > BUFFER_LIMIT_FOR_32_BIT) { 321 b = memdup(buffer, sizeof(struct auxtrace_buffer)); 322 if (!b) 323 return -ENOMEM; 324 b->size = BUFFER_LIMIT_FOR_32_BIT; 325 b->consecutive = consecutive; 326 err = auxtrace_queues__queue_buffer(queues, idx, b); 327 if (err) { 328 auxtrace_buffer__free(b); 329 return err; 330 } 331 buffer->data_offset += BUFFER_LIMIT_FOR_32_BIT; 332 sz -= BUFFER_LIMIT_FOR_32_BIT; 333 consecutive = true; 334 } 335 336 buffer->size = sz; 337 buffer->consecutive = consecutive; 338 339 return 0; 340 } 341 342 static bool filter_cpu(struct perf_session *session, struct perf_cpu cpu) 343 { 344 unsigned long *cpu_bitmap = session->itrace_synth_opts->cpu_bitmap; 345 346 return cpu_bitmap && cpu.cpu != -1 && !test_bit(cpu.cpu, cpu_bitmap); 347 } 348 349 static int auxtrace_queues__add_buffer(struct auxtrace_queues *queues, 350 struct perf_session *session, 351 unsigned int idx, 352 struct auxtrace_buffer *buffer, 353 struct auxtrace_buffer **buffer_ptr) 354 { 355 int err = -ENOMEM; 356 357 if (filter_cpu(session, buffer->cpu)) 358 return 0; 359 360 buffer = memdup(buffer, sizeof(*buffer)); 361 if (!buffer) 362 return -ENOMEM; 363 364 if (session->one_mmap) { 365 buffer->data = buffer->data_offset - session->one_mmap_offset + 366 session->one_mmap_addr; 367 } else if (perf_data__is_pipe(session->data)) { 368 buffer->data = auxtrace_copy_data(buffer->size, session); 369 if (!buffer->data) 370 goto out_free; 371 buffer->data_needs_freeing = true; 372 } else if (BITS_PER_LONG == 32 && 373 buffer->size > BUFFER_LIMIT_FOR_32_BIT) { 374 err = auxtrace_queues__split_buffer(queues, idx, buffer); 375 if (err) 376 goto out_free; 377 } 378 379 err = auxtrace_queues__queue_buffer(queues, idx, buffer); 380 if (err) 381 goto out_free; 382 383 /* FIXME: Doesn't work for split buffer */ 384 if (buffer_ptr) 385 *buffer_ptr = buffer; 386 387 return 0; 388 389 out_free: 390 auxtrace_buffer__free(buffer); 391 return err; 392 } 393 394 int auxtrace_queues__add_event(struct auxtrace_queues *queues, 395 struct perf_session *session, 396 union perf_event *event, off_t data_offset, 397 struct auxtrace_buffer **buffer_ptr) 398 { 399 struct auxtrace_buffer buffer = { 400 .pid = -1, 401 .tid = event->auxtrace.tid, 402 .cpu = { event->auxtrace.cpu }, 403 .data_offset = data_offset, 404 .offset = event->auxtrace.offset, 405 .reference = event->auxtrace.reference, 406 .size = event->auxtrace.size, 407 }; 408 unsigned int idx = event->auxtrace.idx; 409 410 return auxtrace_queues__add_buffer(queues, session, idx, &buffer, 411 buffer_ptr); 412 } 413 414 static int auxtrace_queues__add_indexed_event(struct auxtrace_queues *queues, 415 struct perf_session *session, 416 off_t file_offset, size_t sz) 417 { 418 union perf_event *event; 419 int err; 420 char buf[PERF_SAMPLE_MAX_SIZE]; 421 422 err = perf_session__peek_event(session, file_offset, buf, 423 PERF_SAMPLE_MAX_SIZE, &event, NULL); 424 if (err) 425 return err; 426 427 if (event->header.type == PERF_RECORD_AUXTRACE) { 428 if (event->header.size < sizeof(struct perf_record_auxtrace) || 429 event->header.size != sz) { 430 err = -EINVAL; 431 goto out; 432 } 433 file_offset += event->header.size; 434 err = auxtrace_queues__add_event(queues, session, event, 435 file_offset, NULL); 436 } 437 out: 438 return err; 439 } 440 441 void auxtrace_queues__free(struct auxtrace_queues *queues) 442 { 443 unsigned int i; 444 445 for (i = 0; i < queues->nr_queues; i++) { 446 while (!list_empty(&queues->queue_array[i].head)) { 447 struct auxtrace_buffer *buffer; 448 449 buffer = list_entry(queues->queue_array[i].head.next, 450 struct auxtrace_buffer, list); 451 list_del_init(&buffer->list); 452 auxtrace_buffer__free(buffer); 453 } 454 } 455 456 zfree(&queues->queue_array); 457 queues->nr_queues = 0; 458 } 459 460 static void auxtrace_heapify(struct auxtrace_heap_item *heap_array, 461 unsigned int pos, unsigned int queue_nr, 462 u64 ordinal) 463 { 464 unsigned int parent; 465 466 while (pos) { 467 parent = (pos - 1) >> 1; 468 if (heap_array[parent].ordinal <= ordinal) 469 break; 470 heap_array[pos] = heap_array[parent]; 471 pos = parent; 472 } 473 heap_array[pos].queue_nr = queue_nr; 474 heap_array[pos].ordinal = ordinal; 475 } 476 477 int auxtrace_heap__add(struct auxtrace_heap *heap, unsigned int queue_nr, 478 u64 ordinal) 479 { 480 struct auxtrace_heap_item *heap_array; 481 482 if (queue_nr >= heap->heap_sz) { 483 unsigned int heap_sz = AUXTRACE_INIT_NR_QUEUES; 484 485 while (heap_sz <= queue_nr) 486 heap_sz <<= 1; 487 heap_array = realloc(heap->heap_array, 488 heap_sz * sizeof(struct auxtrace_heap_item)); 489 if (!heap_array) 490 return -ENOMEM; 491 heap->heap_array = heap_array; 492 heap->heap_sz = heap_sz; 493 } 494 495 auxtrace_heapify(heap->heap_array, heap->heap_cnt++, queue_nr, ordinal); 496 497 return 0; 498 } 499 500 void auxtrace_heap__free(struct auxtrace_heap *heap) 501 { 502 zfree(&heap->heap_array); 503 heap->heap_cnt = 0; 504 heap->heap_sz = 0; 505 } 506 507 void auxtrace_heap__pop(struct auxtrace_heap *heap) 508 { 509 unsigned int pos, last, heap_cnt = heap->heap_cnt; 510 struct auxtrace_heap_item *heap_array; 511 512 if (!heap_cnt) 513 return; 514 515 heap->heap_cnt -= 1; 516 517 heap_array = heap->heap_array; 518 519 pos = 0; 520 while (1) { 521 unsigned int left, right; 522 523 left = (pos << 1) + 1; 524 if (left >= heap_cnt) 525 break; 526 right = left + 1; 527 if (right >= heap_cnt) { 528 heap_array[pos] = heap_array[left]; 529 return; 530 } 531 if (heap_array[left].ordinal < heap_array[right].ordinal) { 532 heap_array[pos] = heap_array[left]; 533 pos = left; 534 } else { 535 heap_array[pos] = heap_array[right]; 536 pos = right; 537 } 538 } 539 540 last = heap_cnt - 1; 541 auxtrace_heapify(heap_array, pos, heap_array[last].queue_nr, 542 heap_array[last].ordinal); 543 } 544 545 size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr, 546 struct evlist *evlist) 547 { 548 if (itr) 549 return itr->info_priv_size(itr, evlist); 550 return 0; 551 } 552 553 static int auxtrace_not_supported(void) 554 { 555 pr_err("AUX area tracing is not supported on this architecture\n"); 556 return -EINVAL; 557 } 558 559 int auxtrace_record__info_fill(struct auxtrace_record *itr, 560 struct perf_session *session, 561 struct perf_record_auxtrace_info *auxtrace_info, 562 size_t priv_size) 563 { 564 if (itr) 565 return itr->info_fill(itr, session, auxtrace_info, priv_size); 566 return auxtrace_not_supported(); 567 } 568 569 void auxtrace_record__free(struct auxtrace_record *itr) 570 { 571 if (itr) 572 itr->free(itr); 573 } 574 575 int auxtrace_record__snapshot_start(struct auxtrace_record *itr) 576 { 577 if (itr && itr->snapshot_start) 578 return itr->snapshot_start(itr); 579 return 0; 580 } 581 582 int auxtrace_record__snapshot_finish(struct auxtrace_record *itr, bool on_exit) 583 { 584 if (!on_exit && itr && itr->snapshot_finish) 585 return itr->snapshot_finish(itr); 586 return 0; 587 } 588 589 int auxtrace_record__find_snapshot(struct auxtrace_record *itr, int idx, 590 struct auxtrace_mmap *mm, 591 unsigned char *data, u64 *head, u64 *old) 592 { 593 if (itr && itr->find_snapshot) 594 return itr->find_snapshot(itr, idx, mm, data, head, old); 595 return 0; 596 } 597 598 int auxtrace_record__options(struct auxtrace_record *itr, 599 struct evlist *evlist, 600 struct record_opts *opts) 601 { 602 if (itr) { 603 itr->evlist = evlist; 604 return itr->recording_options(itr, evlist, opts); 605 } 606 return 0; 607 } 608 609 u64 auxtrace_record__reference(struct auxtrace_record *itr) 610 { 611 if (itr) 612 return itr->reference(itr); 613 return 0; 614 } 615 616 int auxtrace_parse_snapshot_options(struct auxtrace_record *itr, 617 struct record_opts *opts, const char *str) 618 { 619 if (!str) 620 return 0; 621 622 /* PMU-agnostic options */ 623 switch (*str) { 624 case 'e': 625 opts->auxtrace_snapshot_on_exit = true; 626 str++; 627 break; 628 default: 629 break; 630 } 631 632 if (itr && itr->parse_snapshot_options) 633 return itr->parse_snapshot_options(itr, opts, str); 634 635 pr_err("No AUX area tracing to snapshot\n"); 636 return -EINVAL; 637 } 638 639 int auxtrace_record__read_finish(struct auxtrace_record *itr, int idx) 640 { 641 struct evsel *evsel; 642 643 if (!itr->evlist || !itr->pmu) 644 return -EINVAL; 645 646 evlist__for_each_entry(itr->evlist, evsel) { 647 if (evsel->core.attr.type == itr->pmu->type) { 648 if (evsel->disabled) 649 return 0; 650 return evlist__enable_event_idx(itr->evlist, evsel, idx); 651 } 652 } 653 return -EINVAL; 654 } 655 656 /* 657 * Event record size is 16-bit which results in a maximum size of about 64KiB. 658 * Allow about 4KiB for the rest of the sample record, to give a maximum 659 * AUX area sample size of 60KiB. 660 */ 661 #define MAX_AUX_SAMPLE_SIZE (60 * 1024) 662 663 /* Arbitrary default size if no other default provided */ 664 #define DEFAULT_AUX_SAMPLE_SIZE (4 * 1024) 665 666 static int auxtrace_validate_aux_sample_size(struct evlist *evlist, 667 struct record_opts *opts) 668 { 669 struct evsel *evsel; 670 bool has_aux_leader = false; 671 u32 sz; 672 673 evlist__for_each_entry(evlist, evsel) { 674 sz = evsel->core.attr.aux_sample_size; 675 if (evsel__is_group_leader(evsel)) { 676 has_aux_leader = evsel__is_aux_event(evsel); 677 if (sz) { 678 if (has_aux_leader) 679 pr_err("Cannot add AUX area sampling to an AUX area event\n"); 680 else 681 pr_err("Cannot add AUX area sampling to a group leader\n"); 682 return -EINVAL; 683 } 684 } 685 if (sz > MAX_AUX_SAMPLE_SIZE) { 686 pr_err("AUX area sample size %u too big, max. %d\n", 687 sz, MAX_AUX_SAMPLE_SIZE); 688 return -EINVAL; 689 } 690 if (sz) { 691 if (!has_aux_leader) { 692 pr_err("Cannot add AUX area sampling because group leader is not an AUX area event\n"); 693 return -EINVAL; 694 } 695 evsel__set_sample_bit(evsel, AUX); 696 opts->auxtrace_sample_mode = true; 697 } else { 698 evsel__reset_sample_bit(evsel, AUX); 699 } 700 } 701 702 if (!opts->auxtrace_sample_mode) { 703 pr_err("AUX area sampling requires an AUX area event group leader plus other events to which to add samples\n"); 704 return -EINVAL; 705 } 706 707 if (!perf_can_aux_sample()) { 708 pr_err("AUX area sampling is not supported by kernel\n"); 709 return -EINVAL; 710 } 711 712 return 0; 713 } 714 715 int auxtrace_parse_sample_options(struct auxtrace_record *itr, 716 struct evlist *evlist, 717 struct record_opts *opts, const char *str) 718 { 719 struct evsel_config_term *term; 720 struct evsel *aux_evsel; 721 bool has_aux_sample_size = false; 722 bool has_aux_leader = false; 723 struct evsel *evsel; 724 char *endptr; 725 unsigned long sz; 726 727 if (!str) 728 goto no_opt; 729 730 if (!itr) { 731 pr_err("No AUX area event to sample\n"); 732 return -EINVAL; 733 } 734 735 sz = strtoul(str, &endptr, 0); 736 if (*endptr || sz > UINT_MAX) { 737 pr_err("Bad AUX area sampling option: '%s'\n", str); 738 return -EINVAL; 739 } 740 741 if (!sz) 742 sz = itr->default_aux_sample_size; 743 744 if (!sz) 745 sz = DEFAULT_AUX_SAMPLE_SIZE; 746 747 /* Set aux_sample_size based on --aux-sample option */ 748 evlist__for_each_entry(evlist, evsel) { 749 if (evsel__is_group_leader(evsel)) { 750 has_aux_leader = evsel__is_aux_event(evsel); 751 } else if (has_aux_leader) { 752 evsel->core.attr.aux_sample_size = sz; 753 } 754 } 755 no_opt: 756 aux_evsel = NULL; 757 /* Override with aux_sample_size from config term */ 758 evlist__for_each_entry(evlist, evsel) { 759 if (evsel__is_aux_event(evsel)) 760 aux_evsel = evsel; 761 term = evsel__get_config_term(evsel, AUX_SAMPLE_SIZE); 762 if (term) { 763 has_aux_sample_size = true; 764 evsel->core.attr.aux_sample_size = term->val.aux_sample_size; 765 /* If possible, group with the AUX event */ 766 if (aux_evsel && evsel->core.attr.aux_sample_size) 767 evlist__regroup(evlist, aux_evsel, evsel); 768 } 769 } 770 771 if (!str && !has_aux_sample_size) 772 return 0; 773 774 if (!itr) { 775 pr_err("No AUX area event to sample\n"); 776 return -EINVAL; 777 } 778 779 return auxtrace_validate_aux_sample_size(evlist, opts); 780 } 781 782 void auxtrace_regroup_aux_output(struct evlist *evlist) 783 { 784 struct evsel *evsel, *aux_evsel = NULL; 785 struct evsel_config_term *term; 786 787 evlist__for_each_entry(evlist, evsel) { 788 if (evsel__is_aux_event(evsel)) 789 aux_evsel = evsel; 790 term = evsel__get_config_term(evsel, AUX_OUTPUT); 791 /* If possible, group with the AUX event */ 792 if (term && aux_evsel) 793 evlist__regroup(evlist, aux_evsel, evsel); 794 } 795 } 796 797 struct auxtrace_record *__weak 798 auxtrace_record__init(struct evlist *evlist __maybe_unused, int *err) 799 { 800 *err = 0; 801 return NULL; 802 } 803 804 static int auxtrace_index__alloc(struct list_head *head) 805 { 806 struct auxtrace_index *auxtrace_index; 807 808 auxtrace_index = malloc(sizeof(struct auxtrace_index)); 809 if (!auxtrace_index) 810 return -ENOMEM; 811 812 auxtrace_index->nr = 0; 813 INIT_LIST_HEAD(&auxtrace_index->list); 814 815 list_add_tail(&auxtrace_index->list, head); 816 817 return 0; 818 } 819 820 void auxtrace_index__free(struct list_head *head) 821 { 822 struct auxtrace_index *auxtrace_index, *n; 823 824 list_for_each_entry_safe(auxtrace_index, n, head, list) { 825 list_del_init(&auxtrace_index->list); 826 free(auxtrace_index); 827 } 828 } 829 830 static struct auxtrace_index *auxtrace_index__last(struct list_head *head) 831 { 832 struct auxtrace_index *auxtrace_index; 833 int err; 834 835 if (list_empty(head)) { 836 err = auxtrace_index__alloc(head); 837 if (err) 838 return NULL; 839 } 840 841 auxtrace_index = list_entry(head->prev, struct auxtrace_index, list); 842 843 if (auxtrace_index->nr >= PERF_AUXTRACE_INDEX_ENTRY_COUNT) { 844 err = auxtrace_index__alloc(head); 845 if (err) 846 return NULL; 847 auxtrace_index = list_entry(head->prev, struct auxtrace_index, 848 list); 849 } 850 851 return auxtrace_index; 852 } 853 854 int auxtrace_index__auxtrace_event(struct list_head *head, 855 union perf_event *event, off_t file_offset) 856 { 857 struct auxtrace_index *auxtrace_index; 858 size_t nr; 859 860 auxtrace_index = auxtrace_index__last(head); 861 if (!auxtrace_index) 862 return -ENOMEM; 863 864 nr = auxtrace_index->nr; 865 auxtrace_index->entries[nr].file_offset = file_offset; 866 auxtrace_index->entries[nr].sz = event->header.size; 867 auxtrace_index->nr += 1; 868 869 return 0; 870 } 871 872 static int auxtrace_index__do_write(int fd, 873 struct auxtrace_index *auxtrace_index) 874 { 875 struct auxtrace_index_entry ent; 876 size_t i; 877 878 for (i = 0; i < auxtrace_index->nr; i++) { 879 ent.file_offset = auxtrace_index->entries[i].file_offset; 880 ent.sz = auxtrace_index->entries[i].sz; 881 if (writen(fd, &ent, sizeof(ent)) != sizeof(ent)) 882 return -errno; 883 } 884 return 0; 885 } 886 887 int auxtrace_index__write(int fd, struct list_head *head) 888 { 889 struct auxtrace_index *auxtrace_index; 890 u64 total = 0; 891 int err; 892 893 list_for_each_entry(auxtrace_index, head, list) 894 total += auxtrace_index->nr; 895 896 if (writen(fd, &total, sizeof(total)) != sizeof(total)) 897 return -errno; 898 899 list_for_each_entry(auxtrace_index, head, list) { 900 err = auxtrace_index__do_write(fd, auxtrace_index); 901 if (err) 902 return err; 903 } 904 905 return 0; 906 } 907 908 static int auxtrace_index__process_entry(int fd, struct list_head *head, 909 bool needs_swap) 910 { 911 struct auxtrace_index *auxtrace_index; 912 struct auxtrace_index_entry ent; 913 size_t nr; 914 915 if (readn(fd, &ent, sizeof(ent)) != sizeof(ent)) 916 return -1; 917 918 auxtrace_index = auxtrace_index__last(head); 919 if (!auxtrace_index) 920 return -1; 921 922 nr = auxtrace_index->nr; 923 if (needs_swap) { 924 auxtrace_index->entries[nr].file_offset = 925 bswap_64(ent.file_offset); 926 auxtrace_index->entries[nr].sz = bswap_64(ent.sz); 927 } else { 928 auxtrace_index->entries[nr].file_offset = ent.file_offset; 929 auxtrace_index->entries[nr].sz = ent.sz; 930 } 931 932 auxtrace_index->nr = nr + 1; 933 934 return 0; 935 } 936 937 int auxtrace_index__process(int fd, u64 size, struct perf_session *session, 938 bool needs_swap) 939 { 940 struct list_head *head = &session->auxtrace_index; 941 u64 nr; 942 943 if (readn(fd, &nr, sizeof(u64)) != sizeof(u64)) 944 return -1; 945 946 if (needs_swap) 947 nr = bswap_64(nr); 948 949 if (sizeof(u64) + nr * sizeof(struct auxtrace_index_entry) > size) 950 return -1; 951 952 while (nr--) { 953 int err; 954 955 err = auxtrace_index__process_entry(fd, head, needs_swap); 956 if (err) 957 return -1; 958 } 959 960 return 0; 961 } 962 963 static int auxtrace_queues__process_index_entry(struct auxtrace_queues *queues, 964 struct perf_session *session, 965 struct auxtrace_index_entry *ent) 966 { 967 return auxtrace_queues__add_indexed_event(queues, session, 968 ent->file_offset, ent->sz); 969 } 970 971 int auxtrace_queues__process_index(struct auxtrace_queues *queues, 972 struct perf_session *session) 973 { 974 struct auxtrace_index *auxtrace_index; 975 struct auxtrace_index_entry *ent; 976 size_t i; 977 int err; 978 979 if (auxtrace__dont_decode(session)) 980 return 0; 981 982 list_for_each_entry(auxtrace_index, &session->auxtrace_index, list) { 983 for (i = 0; i < auxtrace_index->nr; i++) { 984 ent = &auxtrace_index->entries[i]; 985 err = auxtrace_queues__process_index_entry(queues, 986 session, 987 ent); 988 if (err) 989 return err; 990 } 991 } 992 return 0; 993 } 994 995 struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue, 996 struct auxtrace_buffer *buffer) 997 { 998 if (buffer) { 999 if (list_is_last(&buffer->list, &queue->head)) 1000 return NULL; 1001 return list_entry(buffer->list.next, struct auxtrace_buffer, 1002 list); 1003 } else { 1004 if (list_empty(&queue->head)) 1005 return NULL; 1006 return list_entry(queue->head.next, struct auxtrace_buffer, 1007 list); 1008 } 1009 } 1010 1011 struct auxtrace_queue *auxtrace_queues__sample_queue(struct auxtrace_queues *queues, 1012 struct perf_sample *sample, 1013 struct perf_session *session) 1014 { 1015 struct perf_sample_id *sid; 1016 unsigned int idx; 1017 u64 id; 1018 1019 id = sample->id; 1020 if (!id) 1021 return NULL; 1022 1023 sid = evlist__id2sid(session->evlist, id); 1024 if (!sid) 1025 return NULL; 1026 1027 idx = sid->idx; 1028 1029 if (idx >= queues->nr_queues) 1030 return NULL; 1031 1032 return &queues->queue_array[idx]; 1033 } 1034 1035 int auxtrace_queues__add_sample(struct auxtrace_queues *queues, 1036 struct perf_session *session, 1037 struct perf_sample *sample, u64 data_offset, 1038 u64 reference) 1039 { 1040 struct auxtrace_buffer buffer = { 1041 .pid = -1, 1042 .data_offset = data_offset, 1043 .reference = reference, 1044 .size = sample->aux_sample.size, 1045 }; 1046 struct perf_sample_id *sid; 1047 u64 id = sample->id; 1048 unsigned int idx; 1049 1050 if (!id) 1051 return -EINVAL; 1052 1053 sid = evlist__id2sid(session->evlist, id); 1054 if (!sid) 1055 return -ENOENT; 1056 1057 idx = sid->idx; 1058 buffer.tid = sid->tid; 1059 buffer.cpu = sid->cpu; 1060 1061 return auxtrace_queues__add_buffer(queues, session, idx, &buffer, NULL); 1062 } 1063 1064 struct queue_data { 1065 bool samples; 1066 bool events; 1067 }; 1068 1069 static int auxtrace_queue_data_cb(struct perf_session *session, 1070 union perf_event *event, u64 offset, 1071 void *data) 1072 { 1073 struct queue_data *qd = data; 1074 struct perf_sample sample; 1075 int err; 1076 1077 if (qd->events && event->header.type == PERF_RECORD_AUXTRACE) { 1078 if (event->header.size < sizeof(struct perf_record_auxtrace)) 1079 return -EINVAL; 1080 offset += event->header.size; 1081 return session->auxtrace->queue_data(session, NULL, event, 1082 offset); 1083 } 1084 1085 if (!qd->samples || event->header.type != PERF_RECORD_SAMPLE) 1086 return 0; 1087 1088 err = evlist__parse_sample(session->evlist, event, &sample); 1089 if (err) 1090 return err; 1091 1092 if (!sample.aux_sample.size) 1093 return 0; 1094 1095 offset += sample.aux_sample.data - (void *)event; 1096 1097 return session->auxtrace->queue_data(session, &sample, NULL, offset); 1098 } 1099 1100 int auxtrace_queue_data(struct perf_session *session, bool samples, bool events) 1101 { 1102 struct queue_data qd = { 1103 .samples = samples, 1104 .events = events, 1105 }; 1106 1107 if (auxtrace__dont_decode(session)) 1108 return 0; 1109 1110 if (!session->auxtrace || !session->auxtrace->queue_data) 1111 return -EINVAL; 1112 1113 return perf_session__peek_events(session, session->header.data_offset, 1114 session->header.data_size, 1115 auxtrace_queue_data_cb, &qd); 1116 } 1117 1118 void *auxtrace_buffer__get_data_rw(struct auxtrace_buffer *buffer, int fd, bool rw) 1119 { 1120 int prot = rw ? PROT_READ | PROT_WRITE : PROT_READ; 1121 size_t adj = buffer->data_offset & (page_size - 1); 1122 size_t size = buffer->size + adj; 1123 off_t file_offset = buffer->data_offset - adj; 1124 void *addr; 1125 1126 if (buffer->data) 1127 return buffer->data; 1128 1129 addr = mmap(NULL, size, prot, MAP_SHARED, fd, file_offset); 1130 if (addr == MAP_FAILED) 1131 return NULL; 1132 1133 buffer->mmap_addr = addr; 1134 buffer->mmap_size = size; 1135 1136 buffer->data = addr + adj; 1137 1138 return buffer->data; 1139 } 1140 1141 void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer) 1142 { 1143 if (!buffer->data || !buffer->mmap_addr) 1144 return; 1145 munmap(buffer->mmap_addr, buffer->mmap_size); 1146 buffer->mmap_addr = NULL; 1147 buffer->mmap_size = 0; 1148 buffer->data = NULL; 1149 buffer->use_data = NULL; 1150 } 1151 1152 void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer) 1153 { 1154 auxtrace_buffer__put_data(buffer); 1155 if (buffer->data_needs_freeing) { 1156 buffer->data_needs_freeing = false; 1157 zfree(&buffer->data); 1158 buffer->use_data = NULL; 1159 buffer->size = 0; 1160 } 1161 } 1162 1163 void auxtrace_buffer__free(struct auxtrace_buffer *buffer) 1164 { 1165 auxtrace_buffer__drop_data(buffer); 1166 free(buffer); 1167 } 1168 1169 void auxtrace_synth_error(struct perf_record_auxtrace_error *auxtrace_error, int type, 1170 int code, int cpu, pid_t pid, pid_t tid, u64 ip, 1171 const char *msg, u64 timestamp) 1172 { 1173 size_t size; 1174 1175 memset(auxtrace_error, 0, sizeof(struct perf_record_auxtrace_error)); 1176 1177 auxtrace_error->header.type = PERF_RECORD_AUXTRACE_ERROR; 1178 auxtrace_error->type = type; 1179 auxtrace_error->code = code; 1180 auxtrace_error->cpu = cpu; 1181 auxtrace_error->pid = pid; 1182 auxtrace_error->tid = tid; 1183 auxtrace_error->fmt = 1; 1184 auxtrace_error->ip = ip; 1185 auxtrace_error->time = timestamp; 1186 strlcpy(auxtrace_error->msg, msg, MAX_AUXTRACE_ERROR_MSG); 1187 1188 size = (void *)auxtrace_error->msg - (void *)auxtrace_error + 1189 strlen(auxtrace_error->msg) + 1; 1190 auxtrace_error->header.size = PERF_ALIGN(size, sizeof(u64)); 1191 } 1192 1193 int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr, 1194 struct perf_tool *tool, 1195 struct perf_session *session, 1196 perf_event__handler_t process) 1197 { 1198 union perf_event *ev; 1199 size_t priv_size; 1200 int err; 1201 1202 pr_debug2("Synthesizing auxtrace information\n"); 1203 priv_size = auxtrace_record__info_priv_size(itr, session->evlist); 1204 ev = zalloc(sizeof(struct perf_record_auxtrace_info) + priv_size); 1205 if (!ev) 1206 return -ENOMEM; 1207 1208 ev->auxtrace_info.header.type = PERF_RECORD_AUXTRACE_INFO; 1209 ev->auxtrace_info.header.size = sizeof(struct perf_record_auxtrace_info) + 1210 priv_size; 1211 err = auxtrace_record__info_fill(itr, session, &ev->auxtrace_info, 1212 priv_size); 1213 if (err) 1214 goto out_free; 1215 1216 err = process(tool, ev, NULL, NULL); 1217 out_free: 1218 free(ev); 1219 return err; 1220 } 1221 1222 static void unleader_evsel(struct evlist *evlist, struct evsel *leader) 1223 { 1224 struct evsel *new_leader = NULL; 1225 struct evsel *evsel; 1226 1227 /* Find new leader for the group */ 1228 evlist__for_each_entry(evlist, evsel) { 1229 if (!evsel__has_leader(evsel, leader) || evsel == leader) 1230 continue; 1231 if (!new_leader) 1232 new_leader = evsel; 1233 evsel__set_leader(evsel, new_leader); 1234 } 1235 1236 /* Update group information */ 1237 if (new_leader) { 1238 zfree(&new_leader->group_name); 1239 new_leader->group_name = leader->group_name; 1240 leader->group_name = NULL; 1241 1242 new_leader->core.nr_members = leader->core.nr_members - 1; 1243 leader->core.nr_members = 1; 1244 } 1245 } 1246 1247 static void unleader_auxtrace(struct perf_session *session) 1248 { 1249 struct evsel *evsel; 1250 1251 evlist__for_each_entry(session->evlist, evsel) { 1252 if (auxtrace__evsel_is_auxtrace(session, evsel) && 1253 evsel__is_group_leader(evsel)) { 1254 unleader_evsel(session->evlist, evsel); 1255 } 1256 } 1257 } 1258 1259 int perf_event__process_auxtrace_info(struct perf_session *session, 1260 union perf_event *event) 1261 { 1262 enum auxtrace_type type = event->auxtrace_info.type; 1263 int err; 1264 1265 if (dump_trace) 1266 fprintf(stdout, " type: %u\n", type); 1267 1268 switch (type) { 1269 case PERF_AUXTRACE_INTEL_PT: 1270 err = intel_pt_process_auxtrace_info(event, session); 1271 break; 1272 case PERF_AUXTRACE_INTEL_BTS: 1273 err = intel_bts_process_auxtrace_info(event, session); 1274 break; 1275 case PERF_AUXTRACE_ARM_SPE: 1276 err = arm_spe_process_auxtrace_info(event, session); 1277 break; 1278 case PERF_AUXTRACE_CS_ETM: 1279 err = cs_etm__process_auxtrace_info(event, session); 1280 break; 1281 case PERF_AUXTRACE_S390_CPUMSF: 1282 err = s390_cpumsf_process_auxtrace_info(event, session); 1283 break; 1284 case PERF_AUXTRACE_UNKNOWN: 1285 default: 1286 return -EINVAL; 1287 } 1288 1289 if (err) 1290 return err; 1291 1292 unleader_auxtrace(session); 1293 1294 return 0; 1295 } 1296 1297 s64 perf_event__process_auxtrace(struct perf_session *session, 1298 union perf_event *event) 1299 { 1300 s64 err; 1301 1302 if (dump_trace) 1303 fprintf(stdout, " size: %#"PRI_lx64" offset: %#"PRI_lx64" ref: %#"PRI_lx64" idx: %u tid: %d cpu: %d\n", 1304 event->auxtrace.size, event->auxtrace.offset, 1305 event->auxtrace.reference, event->auxtrace.idx, 1306 event->auxtrace.tid, event->auxtrace.cpu); 1307 1308 if (auxtrace__dont_decode(session)) 1309 return event->auxtrace.size; 1310 1311 if (!session->auxtrace || event->header.type != PERF_RECORD_AUXTRACE) 1312 return -EINVAL; 1313 1314 err = session->auxtrace->process_auxtrace_event(session, event, session->tool); 1315 if (err < 0) 1316 return err; 1317 1318 return event->auxtrace.size; 1319 } 1320 1321 #define PERF_ITRACE_DEFAULT_PERIOD_TYPE PERF_ITRACE_PERIOD_NANOSECS 1322 #define PERF_ITRACE_DEFAULT_PERIOD 100000 1323 #define PERF_ITRACE_DEFAULT_CALLCHAIN_SZ 16 1324 #define PERF_ITRACE_MAX_CALLCHAIN_SZ 1024 1325 #define PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ 64 1326 #define PERF_ITRACE_MAX_LAST_BRANCH_SZ 1024 1327 1328 void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts, 1329 bool no_sample) 1330 { 1331 synth_opts->branches = true; 1332 synth_opts->transactions = true; 1333 synth_opts->ptwrites = true; 1334 synth_opts->pwr_events = true; 1335 synth_opts->other_events = true; 1336 synth_opts->errors = true; 1337 synth_opts->flc = true; 1338 synth_opts->llc = true; 1339 synth_opts->tlb = true; 1340 synth_opts->mem = true; 1341 synth_opts->remote_access = true; 1342 1343 if (no_sample) { 1344 synth_opts->period_type = PERF_ITRACE_PERIOD_INSTRUCTIONS; 1345 synth_opts->period = 1; 1346 synth_opts->calls = true; 1347 } else { 1348 synth_opts->instructions = true; 1349 synth_opts->period_type = PERF_ITRACE_DEFAULT_PERIOD_TYPE; 1350 synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD; 1351 } 1352 synth_opts->callchain_sz = PERF_ITRACE_DEFAULT_CALLCHAIN_SZ; 1353 synth_opts->last_branch_sz = PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ; 1354 synth_opts->initial_skip = 0; 1355 } 1356 1357 static int get_flag(const char **ptr, unsigned int *flags) 1358 { 1359 while (1) { 1360 char c = **ptr; 1361 1362 if (c >= 'a' && c <= 'z') { 1363 *flags |= 1 << (c - 'a'); 1364 ++*ptr; 1365 return 0; 1366 } else if (c == ' ') { 1367 ++*ptr; 1368 continue; 1369 } else { 1370 return -1; 1371 } 1372 } 1373 } 1374 1375 static int get_flags(const char **ptr, unsigned int *plus_flags, unsigned int *minus_flags) 1376 { 1377 while (1) { 1378 switch (**ptr) { 1379 case '+': 1380 ++*ptr; 1381 if (get_flag(ptr, plus_flags)) 1382 return -1; 1383 break; 1384 case '-': 1385 ++*ptr; 1386 if (get_flag(ptr, minus_flags)) 1387 return -1; 1388 break; 1389 case ' ': 1390 ++*ptr; 1391 break; 1392 default: 1393 return 0; 1394 } 1395 } 1396 } 1397 1398 /* 1399 * Please check tools/perf/Documentation/perf-script.txt for information 1400 * about the options parsed here, which is introduced after this cset, 1401 * when support in 'perf script' for these options is introduced. 1402 */ 1403 int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts, 1404 const char *str, int unset) 1405 { 1406 const char *p; 1407 char *endptr; 1408 bool period_type_set = false; 1409 bool period_set = false; 1410 1411 synth_opts->set = true; 1412 1413 if (unset) { 1414 synth_opts->dont_decode = true; 1415 return 0; 1416 } 1417 1418 if (!str) { 1419 itrace_synth_opts__set_default(synth_opts, 1420 synth_opts->default_no_sample); 1421 return 0; 1422 } 1423 1424 for (p = str; *p;) { 1425 switch (*p++) { 1426 case 'i': 1427 synth_opts->instructions = true; 1428 while (*p == ' ' || *p == ',') 1429 p += 1; 1430 if (isdigit(*p)) { 1431 synth_opts->period = strtoull(p, &endptr, 10); 1432 period_set = true; 1433 p = endptr; 1434 while (*p == ' ' || *p == ',') 1435 p += 1; 1436 switch (*p++) { 1437 case 'i': 1438 synth_opts->period_type = 1439 PERF_ITRACE_PERIOD_INSTRUCTIONS; 1440 period_type_set = true; 1441 break; 1442 case 't': 1443 synth_opts->period_type = 1444 PERF_ITRACE_PERIOD_TICKS; 1445 period_type_set = true; 1446 break; 1447 case 'm': 1448 synth_opts->period *= 1000; 1449 /* Fall through */ 1450 case 'u': 1451 synth_opts->period *= 1000; 1452 /* Fall through */ 1453 case 'n': 1454 if (*p++ != 's') 1455 goto out_err; 1456 synth_opts->period_type = 1457 PERF_ITRACE_PERIOD_NANOSECS; 1458 period_type_set = true; 1459 break; 1460 case '\0': 1461 goto out; 1462 default: 1463 goto out_err; 1464 } 1465 } 1466 break; 1467 case 'b': 1468 synth_opts->branches = true; 1469 break; 1470 case 'x': 1471 synth_opts->transactions = true; 1472 break; 1473 case 'w': 1474 synth_opts->ptwrites = true; 1475 break; 1476 case 'p': 1477 synth_opts->pwr_events = true; 1478 break; 1479 case 'o': 1480 synth_opts->other_events = true; 1481 break; 1482 case 'e': 1483 synth_opts->errors = true; 1484 if (get_flags(&p, &synth_opts->error_plus_flags, 1485 &synth_opts->error_minus_flags)) 1486 goto out_err; 1487 break; 1488 case 'd': 1489 synth_opts->log = true; 1490 if (get_flags(&p, &synth_opts->log_plus_flags, 1491 &synth_opts->log_minus_flags)) 1492 goto out_err; 1493 break; 1494 case 'c': 1495 synth_opts->branches = true; 1496 synth_opts->calls = true; 1497 break; 1498 case 'r': 1499 synth_opts->branches = true; 1500 synth_opts->returns = true; 1501 break; 1502 case 'G': 1503 case 'g': 1504 if (p[-1] == 'G') 1505 synth_opts->add_callchain = true; 1506 else 1507 synth_opts->callchain = true; 1508 synth_opts->callchain_sz = 1509 PERF_ITRACE_DEFAULT_CALLCHAIN_SZ; 1510 while (*p == ' ' || *p == ',') 1511 p += 1; 1512 if (isdigit(*p)) { 1513 unsigned int val; 1514 1515 val = strtoul(p, &endptr, 10); 1516 p = endptr; 1517 if (!val || val > PERF_ITRACE_MAX_CALLCHAIN_SZ) 1518 goto out_err; 1519 synth_opts->callchain_sz = val; 1520 } 1521 break; 1522 case 'L': 1523 case 'l': 1524 if (p[-1] == 'L') 1525 synth_opts->add_last_branch = true; 1526 else 1527 synth_opts->last_branch = true; 1528 synth_opts->last_branch_sz = 1529 PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ; 1530 while (*p == ' ' || *p == ',') 1531 p += 1; 1532 if (isdigit(*p)) { 1533 unsigned int val; 1534 1535 val = strtoul(p, &endptr, 10); 1536 p = endptr; 1537 if (!val || 1538 val > PERF_ITRACE_MAX_LAST_BRANCH_SZ) 1539 goto out_err; 1540 synth_opts->last_branch_sz = val; 1541 } 1542 break; 1543 case 's': 1544 synth_opts->initial_skip = strtoul(p, &endptr, 10); 1545 if (p == endptr) 1546 goto out_err; 1547 p = endptr; 1548 break; 1549 case 'f': 1550 synth_opts->flc = true; 1551 break; 1552 case 'm': 1553 synth_opts->llc = true; 1554 break; 1555 case 't': 1556 synth_opts->tlb = true; 1557 break; 1558 case 'a': 1559 synth_opts->remote_access = true; 1560 break; 1561 case 'M': 1562 synth_opts->mem = true; 1563 break; 1564 case 'q': 1565 synth_opts->quick += 1; 1566 break; 1567 case 'A': 1568 synth_opts->approx_ipc = true; 1569 break; 1570 case 'Z': 1571 synth_opts->timeless_decoding = true; 1572 break; 1573 case ' ': 1574 case ',': 1575 break; 1576 default: 1577 goto out_err; 1578 } 1579 } 1580 out: 1581 if (synth_opts->instructions) { 1582 if (!period_type_set) 1583 synth_opts->period_type = 1584 PERF_ITRACE_DEFAULT_PERIOD_TYPE; 1585 if (!period_set) 1586 synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD; 1587 } 1588 1589 return 0; 1590 1591 out_err: 1592 pr_err("Bad Instruction Tracing options '%s'\n", str); 1593 return -EINVAL; 1594 } 1595 1596 int itrace_parse_synth_opts(const struct option *opt, const char *str, int unset) 1597 { 1598 return itrace_do_parse_synth_opts(opt->value, str, unset); 1599 } 1600 1601 static const char * const auxtrace_error_type_name[] = { 1602 [PERF_AUXTRACE_ERROR_ITRACE] = "instruction trace", 1603 }; 1604 1605 static const char *auxtrace_error_name(int type) 1606 { 1607 const char *error_type_name = NULL; 1608 1609 if (type < PERF_AUXTRACE_ERROR_MAX) 1610 error_type_name = auxtrace_error_type_name[type]; 1611 if (!error_type_name) 1612 error_type_name = "unknown AUX"; 1613 return error_type_name; 1614 } 1615 1616 size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp) 1617 { 1618 struct perf_record_auxtrace_error *e = &event->auxtrace_error; 1619 unsigned long long nsecs = e->time; 1620 const char *msg = e->msg; 1621 int ret; 1622 1623 ret = fprintf(fp, " %s error type %u", 1624 auxtrace_error_name(e->type), e->type); 1625 1626 if (e->fmt && nsecs) { 1627 unsigned long secs = nsecs / NSEC_PER_SEC; 1628 1629 nsecs -= secs * NSEC_PER_SEC; 1630 ret += fprintf(fp, " time %lu.%09llu", secs, nsecs); 1631 } else { 1632 ret += fprintf(fp, " time 0"); 1633 } 1634 1635 if (!e->fmt) 1636 msg = (const char *)&e->time; 1637 1638 ret += fprintf(fp, " cpu %d pid %d tid %d ip %#"PRI_lx64" code %u: %s\n", 1639 e->cpu, e->pid, e->tid, e->ip, e->code, msg); 1640 return ret; 1641 } 1642 1643 void perf_session__auxtrace_error_inc(struct perf_session *session, 1644 union perf_event *event) 1645 { 1646 struct perf_record_auxtrace_error *e = &event->auxtrace_error; 1647 1648 if (e->type < PERF_AUXTRACE_ERROR_MAX) 1649 session->evlist->stats.nr_auxtrace_errors[e->type] += 1; 1650 } 1651 1652 void events_stats__auxtrace_error_warn(const struct events_stats *stats) 1653 { 1654 int i; 1655 1656 for (i = 0; i < PERF_AUXTRACE_ERROR_MAX; i++) { 1657 if (!stats->nr_auxtrace_errors[i]) 1658 continue; 1659 ui__warning("%u %s errors\n", 1660 stats->nr_auxtrace_errors[i], 1661 auxtrace_error_name(i)); 1662 } 1663 } 1664 1665 int perf_event__process_auxtrace_error(struct perf_session *session, 1666 union perf_event *event) 1667 { 1668 if (auxtrace__dont_decode(session)) 1669 return 0; 1670 1671 perf_event__fprintf_auxtrace_error(event, stdout); 1672 return 0; 1673 } 1674 1675 /* 1676 * In the compat mode kernel runs in 64-bit and perf tool runs in 32-bit mode, 1677 * 32-bit perf tool cannot access 64-bit value atomically, which might lead to 1678 * the issues caused by the below sequence on multiple CPUs: when perf tool 1679 * accesses either the load operation or the store operation for 64-bit value, 1680 * on some architectures the operation is divided into two instructions, one 1681 * is for accessing the low 32-bit value and another is for the high 32-bit; 1682 * thus these two user operations can give the kernel chances to access the 1683 * 64-bit value, and thus leads to the unexpected load values. 1684 * 1685 * kernel (64-bit) user (32-bit) 1686 * 1687 * if (LOAD ->aux_tail) { --, LOAD ->aux_head_lo 1688 * STORE $aux_data | ,---> 1689 * FLUSH $aux_data | | LOAD ->aux_head_hi 1690 * STORE ->aux_head --|-------` smp_rmb() 1691 * } | LOAD $data 1692 * | smp_mb() 1693 * | STORE ->aux_tail_lo 1694 * `-----------> 1695 * STORE ->aux_tail_hi 1696 * 1697 * For this reason, it's impossible for the perf tool to work correctly when 1698 * the AUX head or tail is bigger than 4GB (more than 32 bits length); and we 1699 * can not simply limit the AUX ring buffer to less than 4GB, the reason is 1700 * the pointers can be increased monotonically, whatever the buffer size it is, 1701 * at the end the head and tail can be bigger than 4GB and carry out to the 1702 * high 32-bit. 1703 * 1704 * To mitigate the issues and improve the user experience, we can allow the 1705 * perf tool working in certain conditions and bail out with error if detect 1706 * any overflow cannot be handled. 1707 * 1708 * For reading the AUX head, it reads out the values for three times, and 1709 * compares the high 4 bytes of the values between the first time and the last 1710 * time, if there has no change for high 4 bytes injected by the kernel during 1711 * the user reading sequence, it's safe for use the second value. 1712 * 1713 * When compat_auxtrace_mmap__write_tail() detects any carrying in the high 1714 * 32 bits, it means there have two store operations in user space and it cannot 1715 * promise the atomicity for 64-bit write, so return '-1' in this case to tell 1716 * the caller an overflow error has happened. 1717 */ 1718 u64 __weak compat_auxtrace_mmap__read_head(struct auxtrace_mmap *mm) 1719 { 1720 struct perf_event_mmap_page *pc = mm->userpg; 1721 u64 first, second, last; 1722 u64 mask = (u64)(UINT32_MAX) << 32; 1723 1724 do { 1725 first = READ_ONCE(pc->aux_head); 1726 /* Ensure all reads are done after we read the head */ 1727 smp_rmb(); 1728 second = READ_ONCE(pc->aux_head); 1729 /* Ensure all reads are done after we read the head */ 1730 smp_rmb(); 1731 last = READ_ONCE(pc->aux_head); 1732 } while ((first & mask) != (last & mask)); 1733 1734 return second; 1735 } 1736 1737 int __weak compat_auxtrace_mmap__write_tail(struct auxtrace_mmap *mm, u64 tail) 1738 { 1739 struct perf_event_mmap_page *pc = mm->userpg; 1740 u64 mask = (u64)(UINT32_MAX) << 32; 1741 1742 if (tail & mask) 1743 return -1; 1744 1745 /* Ensure all reads are done before we write the tail out */ 1746 smp_mb(); 1747 WRITE_ONCE(pc->aux_tail, tail); 1748 return 0; 1749 } 1750 1751 static int __auxtrace_mmap__read(struct mmap *map, 1752 struct auxtrace_record *itr, 1753 struct perf_tool *tool, process_auxtrace_t fn, 1754 bool snapshot, size_t snapshot_size) 1755 { 1756 struct auxtrace_mmap *mm = &map->auxtrace_mmap; 1757 u64 head, old = mm->prev, offset, ref; 1758 unsigned char *data = mm->base; 1759 size_t size, head_off, old_off, len1, len2, padding; 1760 union perf_event ev; 1761 void *data1, *data2; 1762 int kernel_is_64_bit = perf_env__kernel_is_64_bit(evsel__env(NULL)); 1763 1764 head = auxtrace_mmap__read_head(mm, kernel_is_64_bit); 1765 1766 if (snapshot && 1767 auxtrace_record__find_snapshot(itr, mm->idx, mm, data, &head, &old)) 1768 return -1; 1769 1770 if (old == head) 1771 return 0; 1772 1773 pr_debug3("auxtrace idx %d old %#"PRIx64" head %#"PRIx64" diff %#"PRIx64"\n", 1774 mm->idx, old, head, head - old); 1775 1776 if (mm->mask) { 1777 head_off = head & mm->mask; 1778 old_off = old & mm->mask; 1779 } else { 1780 head_off = head % mm->len; 1781 old_off = old % mm->len; 1782 } 1783 1784 if (head_off > old_off) 1785 size = head_off - old_off; 1786 else 1787 size = mm->len - (old_off - head_off); 1788 1789 if (snapshot && size > snapshot_size) 1790 size = snapshot_size; 1791 1792 ref = auxtrace_record__reference(itr); 1793 1794 if (head > old || size <= head || mm->mask) { 1795 offset = head - size; 1796 } else { 1797 /* 1798 * When the buffer size is not a power of 2, 'head' wraps at the 1799 * highest multiple of the buffer size, so we have to subtract 1800 * the remainder here. 1801 */ 1802 u64 rem = (0ULL - mm->len) % mm->len; 1803 1804 offset = head - size - rem; 1805 } 1806 1807 if (size > head_off) { 1808 len1 = size - head_off; 1809 data1 = &data[mm->len - len1]; 1810 len2 = head_off; 1811 data2 = &data[0]; 1812 } else { 1813 len1 = size; 1814 data1 = &data[head_off - len1]; 1815 len2 = 0; 1816 data2 = NULL; 1817 } 1818 1819 if (itr->alignment) { 1820 unsigned int unwanted = len1 % itr->alignment; 1821 1822 len1 -= unwanted; 1823 size -= unwanted; 1824 } 1825 1826 /* padding must be written by fn() e.g. record__process_auxtrace() */ 1827 padding = size & (PERF_AUXTRACE_RECORD_ALIGNMENT - 1); 1828 if (padding) 1829 padding = PERF_AUXTRACE_RECORD_ALIGNMENT - padding; 1830 1831 memset(&ev, 0, sizeof(ev)); 1832 ev.auxtrace.header.type = PERF_RECORD_AUXTRACE; 1833 ev.auxtrace.header.size = sizeof(ev.auxtrace); 1834 ev.auxtrace.size = size + padding; 1835 ev.auxtrace.offset = offset; 1836 ev.auxtrace.reference = ref; 1837 ev.auxtrace.idx = mm->idx; 1838 ev.auxtrace.tid = mm->tid; 1839 ev.auxtrace.cpu = mm->cpu; 1840 1841 if (fn(tool, map, &ev, data1, len1, data2, len2)) 1842 return -1; 1843 1844 mm->prev = head; 1845 1846 if (!snapshot) { 1847 int err; 1848 1849 err = auxtrace_mmap__write_tail(mm, head, kernel_is_64_bit); 1850 if (err < 0) 1851 return err; 1852 1853 if (itr->read_finish) { 1854 err = itr->read_finish(itr, mm->idx); 1855 if (err < 0) 1856 return err; 1857 } 1858 } 1859 1860 return 1; 1861 } 1862 1863 int auxtrace_mmap__read(struct mmap *map, struct auxtrace_record *itr, 1864 struct perf_tool *tool, process_auxtrace_t fn) 1865 { 1866 return __auxtrace_mmap__read(map, itr, tool, fn, false, 0); 1867 } 1868 1869 int auxtrace_mmap__read_snapshot(struct mmap *map, 1870 struct auxtrace_record *itr, 1871 struct perf_tool *tool, process_auxtrace_t fn, 1872 size_t snapshot_size) 1873 { 1874 return __auxtrace_mmap__read(map, itr, tool, fn, true, snapshot_size); 1875 } 1876 1877 /** 1878 * struct auxtrace_cache - hash table to implement a cache 1879 * @hashtable: the hashtable 1880 * @sz: hashtable size (number of hlists) 1881 * @entry_size: size of an entry 1882 * @limit: limit the number of entries to this maximum, when reached the cache 1883 * is dropped and caching begins again with an empty cache 1884 * @cnt: current number of entries 1885 * @bits: hashtable size (@sz = 2^@bits) 1886 */ 1887 struct auxtrace_cache { 1888 struct hlist_head *hashtable; 1889 size_t sz; 1890 size_t entry_size; 1891 size_t limit; 1892 size_t cnt; 1893 unsigned int bits; 1894 }; 1895 1896 struct auxtrace_cache *auxtrace_cache__new(unsigned int bits, size_t entry_size, 1897 unsigned int limit_percent) 1898 { 1899 struct auxtrace_cache *c; 1900 struct hlist_head *ht; 1901 size_t sz, i; 1902 1903 c = zalloc(sizeof(struct auxtrace_cache)); 1904 if (!c) 1905 return NULL; 1906 1907 sz = 1UL << bits; 1908 1909 ht = calloc(sz, sizeof(struct hlist_head)); 1910 if (!ht) 1911 goto out_free; 1912 1913 for (i = 0; i < sz; i++) 1914 INIT_HLIST_HEAD(&ht[i]); 1915 1916 c->hashtable = ht; 1917 c->sz = sz; 1918 c->entry_size = entry_size; 1919 c->limit = (c->sz * limit_percent) / 100; 1920 c->bits = bits; 1921 1922 return c; 1923 1924 out_free: 1925 free(c); 1926 return NULL; 1927 } 1928 1929 static void auxtrace_cache__drop(struct auxtrace_cache *c) 1930 { 1931 struct auxtrace_cache_entry *entry; 1932 struct hlist_node *tmp; 1933 size_t i; 1934 1935 if (!c) 1936 return; 1937 1938 for (i = 0; i < c->sz; i++) { 1939 hlist_for_each_entry_safe(entry, tmp, &c->hashtable[i], hash) { 1940 hlist_del(&entry->hash); 1941 auxtrace_cache__free_entry(c, entry); 1942 } 1943 } 1944 1945 c->cnt = 0; 1946 } 1947 1948 void auxtrace_cache__free(struct auxtrace_cache *c) 1949 { 1950 if (!c) 1951 return; 1952 1953 auxtrace_cache__drop(c); 1954 zfree(&c->hashtable); 1955 free(c); 1956 } 1957 1958 void *auxtrace_cache__alloc_entry(struct auxtrace_cache *c) 1959 { 1960 return malloc(c->entry_size); 1961 } 1962 1963 void auxtrace_cache__free_entry(struct auxtrace_cache *c __maybe_unused, 1964 void *entry) 1965 { 1966 free(entry); 1967 } 1968 1969 int auxtrace_cache__add(struct auxtrace_cache *c, u32 key, 1970 struct auxtrace_cache_entry *entry) 1971 { 1972 if (c->limit && ++c->cnt > c->limit) 1973 auxtrace_cache__drop(c); 1974 1975 entry->key = key; 1976 hlist_add_head(&entry->hash, &c->hashtable[hash_32(key, c->bits)]); 1977 1978 return 0; 1979 } 1980 1981 static struct auxtrace_cache_entry *auxtrace_cache__rm(struct auxtrace_cache *c, 1982 u32 key) 1983 { 1984 struct auxtrace_cache_entry *entry; 1985 struct hlist_head *hlist; 1986 struct hlist_node *n; 1987 1988 if (!c) 1989 return NULL; 1990 1991 hlist = &c->hashtable[hash_32(key, c->bits)]; 1992 hlist_for_each_entry_safe(entry, n, hlist, hash) { 1993 if (entry->key == key) { 1994 hlist_del(&entry->hash); 1995 return entry; 1996 } 1997 } 1998 1999 return NULL; 2000 } 2001 2002 void auxtrace_cache__remove(struct auxtrace_cache *c, u32 key) 2003 { 2004 struct auxtrace_cache_entry *entry = auxtrace_cache__rm(c, key); 2005 2006 auxtrace_cache__free_entry(c, entry); 2007 } 2008 2009 void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key) 2010 { 2011 struct auxtrace_cache_entry *entry; 2012 struct hlist_head *hlist; 2013 2014 if (!c) 2015 return NULL; 2016 2017 hlist = &c->hashtable[hash_32(key, c->bits)]; 2018 hlist_for_each_entry(entry, hlist, hash) { 2019 if (entry->key == key) 2020 return entry; 2021 } 2022 2023 return NULL; 2024 } 2025 2026 static void addr_filter__free_str(struct addr_filter *filt) 2027 { 2028 zfree(&filt->str); 2029 filt->action = NULL; 2030 filt->sym_from = NULL; 2031 filt->sym_to = NULL; 2032 filt->filename = NULL; 2033 } 2034 2035 static struct addr_filter *addr_filter__new(void) 2036 { 2037 struct addr_filter *filt = zalloc(sizeof(*filt)); 2038 2039 if (filt) 2040 INIT_LIST_HEAD(&filt->list); 2041 2042 return filt; 2043 } 2044 2045 static void addr_filter__free(struct addr_filter *filt) 2046 { 2047 if (filt) 2048 addr_filter__free_str(filt); 2049 free(filt); 2050 } 2051 2052 static void addr_filters__add(struct addr_filters *filts, 2053 struct addr_filter *filt) 2054 { 2055 list_add_tail(&filt->list, &filts->head); 2056 filts->cnt += 1; 2057 } 2058 2059 static void addr_filters__del(struct addr_filters *filts, 2060 struct addr_filter *filt) 2061 { 2062 list_del_init(&filt->list); 2063 filts->cnt -= 1; 2064 } 2065 2066 void addr_filters__init(struct addr_filters *filts) 2067 { 2068 INIT_LIST_HEAD(&filts->head); 2069 filts->cnt = 0; 2070 } 2071 2072 void addr_filters__exit(struct addr_filters *filts) 2073 { 2074 struct addr_filter *filt, *n; 2075 2076 list_for_each_entry_safe(filt, n, &filts->head, list) { 2077 addr_filters__del(filts, filt); 2078 addr_filter__free(filt); 2079 } 2080 } 2081 2082 static int parse_num_or_str(char **inp, u64 *num, const char **str, 2083 const char *str_delim) 2084 { 2085 *inp += strspn(*inp, " "); 2086 2087 if (isdigit(**inp)) { 2088 char *endptr; 2089 2090 if (!num) 2091 return -EINVAL; 2092 errno = 0; 2093 *num = strtoull(*inp, &endptr, 0); 2094 if (errno) 2095 return -errno; 2096 if (endptr == *inp) 2097 return -EINVAL; 2098 *inp = endptr; 2099 } else { 2100 size_t n; 2101 2102 if (!str) 2103 return -EINVAL; 2104 *inp += strspn(*inp, " "); 2105 *str = *inp; 2106 n = strcspn(*inp, str_delim); 2107 if (!n) 2108 return -EINVAL; 2109 *inp += n; 2110 if (**inp) { 2111 **inp = '\0'; 2112 *inp += 1; 2113 } 2114 } 2115 return 0; 2116 } 2117 2118 static int parse_action(struct addr_filter *filt) 2119 { 2120 if (!strcmp(filt->action, "filter")) { 2121 filt->start = true; 2122 filt->range = true; 2123 } else if (!strcmp(filt->action, "start")) { 2124 filt->start = true; 2125 } else if (!strcmp(filt->action, "stop")) { 2126 filt->start = false; 2127 } else if (!strcmp(filt->action, "tracestop")) { 2128 filt->start = false; 2129 filt->range = true; 2130 filt->action += 5; /* Change 'tracestop' to 'stop' */ 2131 } else { 2132 return -EINVAL; 2133 } 2134 return 0; 2135 } 2136 2137 static int parse_sym_idx(char **inp, int *idx) 2138 { 2139 *idx = -1; 2140 2141 *inp += strspn(*inp, " "); 2142 2143 if (**inp != '#') 2144 return 0; 2145 2146 *inp += 1; 2147 2148 if (**inp == 'g' || **inp == 'G') { 2149 *inp += 1; 2150 *idx = 0; 2151 } else { 2152 unsigned long num; 2153 char *endptr; 2154 2155 errno = 0; 2156 num = strtoul(*inp, &endptr, 0); 2157 if (errno) 2158 return -errno; 2159 if (endptr == *inp || num > INT_MAX) 2160 return -EINVAL; 2161 *inp = endptr; 2162 *idx = num; 2163 } 2164 2165 return 0; 2166 } 2167 2168 static int parse_addr_size(char **inp, u64 *num, const char **str, int *idx) 2169 { 2170 int err = parse_num_or_str(inp, num, str, " "); 2171 2172 if (!err && *str) 2173 err = parse_sym_idx(inp, idx); 2174 2175 return err; 2176 } 2177 2178 static int parse_one_filter(struct addr_filter *filt, const char **filter_inp) 2179 { 2180 char *fstr; 2181 int err; 2182 2183 filt->str = fstr = strdup(*filter_inp); 2184 if (!fstr) 2185 return -ENOMEM; 2186 2187 err = parse_num_or_str(&fstr, NULL, &filt->action, " "); 2188 if (err) 2189 goto out_err; 2190 2191 err = parse_action(filt); 2192 if (err) 2193 goto out_err; 2194 2195 err = parse_addr_size(&fstr, &filt->addr, &filt->sym_from, 2196 &filt->sym_from_idx); 2197 if (err) 2198 goto out_err; 2199 2200 fstr += strspn(fstr, " "); 2201 2202 if (*fstr == '/') { 2203 fstr += 1; 2204 err = parse_addr_size(&fstr, &filt->size, &filt->sym_to, 2205 &filt->sym_to_idx); 2206 if (err) 2207 goto out_err; 2208 filt->range = true; 2209 } 2210 2211 fstr += strspn(fstr, " "); 2212 2213 if (*fstr == '@') { 2214 fstr += 1; 2215 err = parse_num_or_str(&fstr, NULL, &filt->filename, " ,"); 2216 if (err) 2217 goto out_err; 2218 } 2219 2220 fstr += strspn(fstr, " ,"); 2221 2222 *filter_inp += fstr - filt->str; 2223 2224 return 0; 2225 2226 out_err: 2227 addr_filter__free_str(filt); 2228 2229 return err; 2230 } 2231 2232 int addr_filters__parse_bare_filter(struct addr_filters *filts, 2233 const char *filter) 2234 { 2235 struct addr_filter *filt; 2236 const char *fstr = filter; 2237 int err; 2238 2239 while (*fstr) { 2240 filt = addr_filter__new(); 2241 err = parse_one_filter(filt, &fstr); 2242 if (err) { 2243 addr_filter__free(filt); 2244 addr_filters__exit(filts); 2245 return err; 2246 } 2247 addr_filters__add(filts, filt); 2248 } 2249 2250 return 0; 2251 } 2252 2253 struct sym_args { 2254 const char *name; 2255 u64 start; 2256 u64 size; 2257 int idx; 2258 int cnt; 2259 bool started; 2260 bool global; 2261 bool selected; 2262 bool duplicate; 2263 bool near; 2264 }; 2265 2266 static bool kern_sym_match(struct sym_args *args, const char *name, char type) 2267 { 2268 /* A function with the same name, and global or the n'th found or any */ 2269 return kallsyms__is_function(type) && 2270 !strcmp(name, args->name) && 2271 ((args->global && isupper(type)) || 2272 (args->selected && ++(args->cnt) == args->idx) || 2273 (!args->global && !args->selected)); 2274 } 2275 2276 static int find_kern_sym_cb(void *arg, const char *name, char type, u64 start) 2277 { 2278 struct sym_args *args = arg; 2279 2280 if (args->started) { 2281 if (!args->size) 2282 args->size = start - args->start; 2283 if (args->selected) { 2284 if (args->size) 2285 return 1; 2286 } else if (kern_sym_match(args, name, type)) { 2287 args->duplicate = true; 2288 return 1; 2289 } 2290 } else if (kern_sym_match(args, name, type)) { 2291 args->started = true; 2292 args->start = start; 2293 } 2294 2295 return 0; 2296 } 2297 2298 static int print_kern_sym_cb(void *arg, const char *name, char type, u64 start) 2299 { 2300 struct sym_args *args = arg; 2301 2302 if (kern_sym_match(args, name, type)) { 2303 pr_err("#%d\t0x%"PRIx64"\t%c\t%s\n", 2304 ++args->cnt, start, type, name); 2305 args->near = true; 2306 } else if (args->near) { 2307 args->near = false; 2308 pr_err("\t\twhich is near\t\t%s\n", name); 2309 } 2310 2311 return 0; 2312 } 2313 2314 static int sym_not_found_error(const char *sym_name, int idx) 2315 { 2316 if (idx > 0) { 2317 pr_err("N'th occurrence (N=%d) of symbol '%s' not found.\n", 2318 idx, sym_name); 2319 } else if (!idx) { 2320 pr_err("Global symbol '%s' not found.\n", sym_name); 2321 } else { 2322 pr_err("Symbol '%s' not found.\n", sym_name); 2323 } 2324 pr_err("Note that symbols must be functions.\n"); 2325 2326 return -EINVAL; 2327 } 2328 2329 static int find_kern_sym(const char *sym_name, u64 *start, u64 *size, int idx) 2330 { 2331 struct sym_args args = { 2332 .name = sym_name, 2333 .idx = idx, 2334 .global = !idx, 2335 .selected = idx > 0, 2336 }; 2337 int err; 2338 2339 *start = 0; 2340 *size = 0; 2341 2342 err = kallsyms__parse("/proc/kallsyms", &args, find_kern_sym_cb); 2343 if (err < 0) { 2344 pr_err("Failed to parse /proc/kallsyms\n"); 2345 return err; 2346 } 2347 2348 if (args.duplicate) { 2349 pr_err("Multiple kernel symbols with name '%s'\n", sym_name); 2350 args.cnt = 0; 2351 kallsyms__parse("/proc/kallsyms", &args, print_kern_sym_cb); 2352 pr_err("Disambiguate symbol name by inserting #n after the name e.g. %s #2\n", 2353 sym_name); 2354 pr_err("Or select a global symbol by inserting #0 or #g or #G\n"); 2355 return -EINVAL; 2356 } 2357 2358 if (!args.started) { 2359 pr_err("Kernel symbol lookup: "); 2360 return sym_not_found_error(sym_name, idx); 2361 } 2362 2363 *start = args.start; 2364 *size = args.size; 2365 2366 return 0; 2367 } 2368 2369 static int find_entire_kern_cb(void *arg, const char *name __maybe_unused, 2370 char type, u64 start) 2371 { 2372 struct sym_args *args = arg; 2373 2374 if (!kallsyms__is_function(type)) 2375 return 0; 2376 2377 if (!args->started) { 2378 args->started = true; 2379 args->start = start; 2380 } 2381 /* Don't know exactly where the kernel ends, so we add a page */ 2382 args->size = round_up(start, page_size) + page_size - args->start; 2383 2384 return 0; 2385 } 2386 2387 static int addr_filter__entire_kernel(struct addr_filter *filt) 2388 { 2389 struct sym_args args = { .started = false }; 2390 int err; 2391 2392 err = kallsyms__parse("/proc/kallsyms", &args, find_entire_kern_cb); 2393 if (err < 0 || !args.started) { 2394 pr_err("Failed to parse /proc/kallsyms\n"); 2395 return err; 2396 } 2397 2398 filt->addr = args.start; 2399 filt->size = args.size; 2400 2401 return 0; 2402 } 2403 2404 static int check_end_after_start(struct addr_filter *filt, u64 start, u64 size) 2405 { 2406 if (start + size >= filt->addr) 2407 return 0; 2408 2409 if (filt->sym_from) { 2410 pr_err("Symbol '%s' (0x%"PRIx64") comes before '%s' (0x%"PRIx64")\n", 2411 filt->sym_to, start, filt->sym_from, filt->addr); 2412 } else { 2413 pr_err("Symbol '%s' (0x%"PRIx64") comes before address 0x%"PRIx64")\n", 2414 filt->sym_to, start, filt->addr); 2415 } 2416 2417 return -EINVAL; 2418 } 2419 2420 static int addr_filter__resolve_kernel_syms(struct addr_filter *filt) 2421 { 2422 bool no_size = false; 2423 u64 start, size; 2424 int err; 2425 2426 if (symbol_conf.kptr_restrict) { 2427 pr_err("Kernel addresses are restricted. Unable to resolve kernel symbols.\n"); 2428 return -EINVAL; 2429 } 2430 2431 if (filt->sym_from && !strcmp(filt->sym_from, "*")) 2432 return addr_filter__entire_kernel(filt); 2433 2434 if (filt->sym_from) { 2435 err = find_kern_sym(filt->sym_from, &start, &size, 2436 filt->sym_from_idx); 2437 if (err) 2438 return err; 2439 filt->addr = start; 2440 if (filt->range && !filt->size && !filt->sym_to) { 2441 filt->size = size; 2442 no_size = !size; 2443 } 2444 } 2445 2446 if (filt->sym_to) { 2447 err = find_kern_sym(filt->sym_to, &start, &size, 2448 filt->sym_to_idx); 2449 if (err) 2450 return err; 2451 2452 err = check_end_after_start(filt, start, size); 2453 if (err) 2454 return err; 2455 filt->size = start + size - filt->addr; 2456 no_size = !size; 2457 } 2458 2459 /* The very last symbol in kallsyms does not imply a particular size */ 2460 if (no_size) { 2461 pr_err("Cannot determine size of symbol '%s'\n", 2462 filt->sym_to ? filt->sym_to : filt->sym_from); 2463 return -EINVAL; 2464 } 2465 2466 return 0; 2467 } 2468 2469 static struct dso *load_dso(const char *name) 2470 { 2471 struct map *map; 2472 struct dso *dso; 2473 2474 map = dso__new_map(name); 2475 if (!map) 2476 return NULL; 2477 2478 if (map__load(map) < 0) 2479 pr_err("File '%s' not found or has no symbols.\n", name); 2480 2481 dso = dso__get(map->dso); 2482 2483 map__put(map); 2484 2485 return dso; 2486 } 2487 2488 static bool dso_sym_match(struct symbol *sym, const char *name, int *cnt, 2489 int idx) 2490 { 2491 /* Same name, and global or the n'th found or any */ 2492 return !arch__compare_symbol_names(name, sym->name) && 2493 ((!idx && sym->binding == STB_GLOBAL) || 2494 (idx > 0 && ++*cnt == idx) || 2495 idx < 0); 2496 } 2497 2498 static void print_duplicate_syms(struct dso *dso, const char *sym_name) 2499 { 2500 struct symbol *sym; 2501 bool near = false; 2502 int cnt = 0; 2503 2504 pr_err("Multiple symbols with name '%s'\n", sym_name); 2505 2506 sym = dso__first_symbol(dso); 2507 while (sym) { 2508 if (dso_sym_match(sym, sym_name, &cnt, -1)) { 2509 pr_err("#%d\t0x%"PRIx64"\t%c\t%s\n", 2510 ++cnt, sym->start, 2511 sym->binding == STB_GLOBAL ? 'g' : 2512 sym->binding == STB_LOCAL ? 'l' : 'w', 2513 sym->name); 2514 near = true; 2515 } else if (near) { 2516 near = false; 2517 pr_err("\t\twhich is near\t\t%s\n", sym->name); 2518 } 2519 sym = dso__next_symbol(sym); 2520 } 2521 2522 pr_err("Disambiguate symbol name by inserting #n after the name e.g. %s #2\n", 2523 sym_name); 2524 pr_err("Or select a global symbol by inserting #0 or #g or #G\n"); 2525 } 2526 2527 static int find_dso_sym(struct dso *dso, const char *sym_name, u64 *start, 2528 u64 *size, int idx) 2529 { 2530 struct symbol *sym; 2531 int cnt = 0; 2532 2533 *start = 0; 2534 *size = 0; 2535 2536 sym = dso__first_symbol(dso); 2537 while (sym) { 2538 if (*start) { 2539 if (!*size) 2540 *size = sym->start - *start; 2541 if (idx > 0) { 2542 if (*size) 2543 return 1; 2544 } else if (dso_sym_match(sym, sym_name, &cnt, idx)) { 2545 print_duplicate_syms(dso, sym_name); 2546 return -EINVAL; 2547 } 2548 } else if (dso_sym_match(sym, sym_name, &cnt, idx)) { 2549 *start = sym->start; 2550 *size = sym->end - sym->start; 2551 } 2552 sym = dso__next_symbol(sym); 2553 } 2554 2555 if (!*start) 2556 return sym_not_found_error(sym_name, idx); 2557 2558 return 0; 2559 } 2560 2561 static int addr_filter__entire_dso(struct addr_filter *filt, struct dso *dso) 2562 { 2563 if (dso__data_file_size(dso, NULL)) { 2564 pr_err("Failed to determine filter for %s\nCannot determine file size.\n", 2565 filt->filename); 2566 return -EINVAL; 2567 } 2568 2569 filt->addr = 0; 2570 filt->size = dso->data.file_size; 2571 2572 return 0; 2573 } 2574 2575 static int addr_filter__resolve_syms(struct addr_filter *filt) 2576 { 2577 u64 start, size; 2578 struct dso *dso; 2579 int err = 0; 2580 2581 if (!filt->sym_from && !filt->sym_to) 2582 return 0; 2583 2584 if (!filt->filename) 2585 return addr_filter__resolve_kernel_syms(filt); 2586 2587 dso = load_dso(filt->filename); 2588 if (!dso) { 2589 pr_err("Failed to load symbols from: %s\n", filt->filename); 2590 return -EINVAL; 2591 } 2592 2593 if (filt->sym_from && !strcmp(filt->sym_from, "*")) { 2594 err = addr_filter__entire_dso(filt, dso); 2595 goto put_dso; 2596 } 2597 2598 if (filt->sym_from) { 2599 err = find_dso_sym(dso, filt->sym_from, &start, &size, 2600 filt->sym_from_idx); 2601 if (err) 2602 goto put_dso; 2603 filt->addr = start; 2604 if (filt->range && !filt->size && !filt->sym_to) 2605 filt->size = size; 2606 } 2607 2608 if (filt->sym_to) { 2609 err = find_dso_sym(dso, filt->sym_to, &start, &size, 2610 filt->sym_to_idx); 2611 if (err) 2612 goto put_dso; 2613 2614 err = check_end_after_start(filt, start, size); 2615 if (err) 2616 return err; 2617 2618 filt->size = start + size - filt->addr; 2619 } 2620 2621 put_dso: 2622 dso__put(dso); 2623 2624 return err; 2625 } 2626 2627 static char *addr_filter__to_str(struct addr_filter *filt) 2628 { 2629 char filename_buf[PATH_MAX]; 2630 const char *at = ""; 2631 const char *fn = ""; 2632 char *filter; 2633 int err; 2634 2635 if (filt->filename) { 2636 at = "@"; 2637 fn = realpath(filt->filename, filename_buf); 2638 if (!fn) 2639 return NULL; 2640 } 2641 2642 if (filt->range) { 2643 err = asprintf(&filter, "%s 0x%"PRIx64"/0x%"PRIx64"%s%s", 2644 filt->action, filt->addr, filt->size, at, fn); 2645 } else { 2646 err = asprintf(&filter, "%s 0x%"PRIx64"%s%s", 2647 filt->action, filt->addr, at, fn); 2648 } 2649 2650 return err < 0 ? NULL : filter; 2651 } 2652 2653 static int parse_addr_filter(struct evsel *evsel, const char *filter, 2654 int max_nr) 2655 { 2656 struct addr_filters filts; 2657 struct addr_filter *filt; 2658 int err; 2659 2660 addr_filters__init(&filts); 2661 2662 err = addr_filters__parse_bare_filter(&filts, filter); 2663 if (err) 2664 goto out_exit; 2665 2666 if (filts.cnt > max_nr) { 2667 pr_err("Error: number of address filters (%d) exceeds maximum (%d)\n", 2668 filts.cnt, max_nr); 2669 err = -EINVAL; 2670 goto out_exit; 2671 } 2672 2673 list_for_each_entry(filt, &filts.head, list) { 2674 char *new_filter; 2675 2676 err = addr_filter__resolve_syms(filt); 2677 if (err) 2678 goto out_exit; 2679 2680 new_filter = addr_filter__to_str(filt); 2681 if (!new_filter) { 2682 err = -ENOMEM; 2683 goto out_exit; 2684 } 2685 2686 if (evsel__append_addr_filter(evsel, new_filter)) { 2687 err = -ENOMEM; 2688 goto out_exit; 2689 } 2690 } 2691 2692 out_exit: 2693 addr_filters__exit(&filts); 2694 2695 if (err) { 2696 pr_err("Failed to parse address filter: '%s'\n", filter); 2697 pr_err("Filter format is: filter|start|stop|tracestop <start symbol or address> [/ <end symbol or size>] [@<file name>]\n"); 2698 pr_err("Where multiple filters are separated by space or comma.\n"); 2699 } 2700 2701 return err; 2702 } 2703 2704 static int evsel__nr_addr_filter(struct evsel *evsel) 2705 { 2706 struct perf_pmu *pmu = evsel__find_pmu(evsel); 2707 int nr_addr_filters = 0; 2708 2709 if (!pmu) 2710 return 0; 2711 2712 perf_pmu__scan_file(pmu, "nr_addr_filters", "%d", &nr_addr_filters); 2713 2714 return nr_addr_filters; 2715 } 2716 2717 int auxtrace_parse_filters(struct evlist *evlist) 2718 { 2719 struct evsel *evsel; 2720 char *filter; 2721 int err, max_nr; 2722 2723 evlist__for_each_entry(evlist, evsel) { 2724 filter = evsel->filter; 2725 max_nr = evsel__nr_addr_filter(evsel); 2726 if (!filter || !max_nr) 2727 continue; 2728 evsel->filter = NULL; 2729 err = parse_addr_filter(evsel, filter, max_nr); 2730 free(filter); 2731 if (err) 2732 return err; 2733 pr_debug("Address filter: %s\n", evsel->filter); 2734 } 2735 2736 return 0; 2737 } 2738 2739 int auxtrace__process_event(struct perf_session *session, union perf_event *event, 2740 struct perf_sample *sample, struct perf_tool *tool) 2741 { 2742 if (!session->auxtrace) 2743 return 0; 2744 2745 return session->auxtrace->process_event(session, event, sample, tool); 2746 } 2747 2748 void auxtrace__dump_auxtrace_sample(struct perf_session *session, 2749 struct perf_sample *sample) 2750 { 2751 if (!session->auxtrace || !session->auxtrace->dump_auxtrace_sample || 2752 auxtrace__dont_decode(session)) 2753 return; 2754 2755 session->auxtrace->dump_auxtrace_sample(session, sample); 2756 } 2757 2758 int auxtrace__flush_events(struct perf_session *session, struct perf_tool *tool) 2759 { 2760 if (!session->auxtrace) 2761 return 0; 2762 2763 return session->auxtrace->flush_events(session, tool); 2764 } 2765 2766 void auxtrace__free_events(struct perf_session *session) 2767 { 2768 if (!session->auxtrace) 2769 return; 2770 2771 return session->auxtrace->free_events(session); 2772 } 2773 2774 void auxtrace__free(struct perf_session *session) 2775 { 2776 if (!session->auxtrace) 2777 return; 2778 2779 return session->auxtrace->free(session); 2780 } 2781 2782 bool auxtrace__evsel_is_auxtrace(struct perf_session *session, 2783 struct evsel *evsel) 2784 { 2785 if (!session->auxtrace || !session->auxtrace->evsel_is_auxtrace) 2786 return false; 2787 2788 return session->auxtrace->evsel_is_auxtrace(session, evsel); 2789 } 2790