1 /* 2 * auxtrace.c: AUX area trace support 3 * Copyright (c) 2013-2015, Intel Corporation. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 */ 15 16 #include <inttypes.h> 17 #include <sys/types.h> 18 #include <sys/mman.h> 19 #include <stdbool.h> 20 #include <string.h> 21 #include <limits.h> 22 #include <errno.h> 23 24 #include <linux/kernel.h> 25 #include <linux/perf_event.h> 26 #include <linux/types.h> 27 #include <linux/bitops.h> 28 #include <linux/log2.h> 29 #include <linux/string.h> 30 31 #include <sys/param.h> 32 #include <stdlib.h> 33 #include <stdio.h> 34 #include <string.h> 35 #include <limits.h> 36 #include <errno.h> 37 #include <linux/list.h> 38 39 #include "../perf.h" 40 #include "util.h" 41 #include "evlist.h" 42 #include "dso.h" 43 #include "map.h" 44 #include "pmu.h" 45 #include "evsel.h" 46 #include "cpumap.h" 47 #include "thread_map.h" 48 #include "asm/bug.h" 49 #include "auxtrace.h" 50 51 #include <linux/hash.h> 52 53 #include "event.h" 54 #include "session.h" 55 #include "debug.h" 56 #include <subcmd/parse-options.h> 57 58 #include "intel-pt.h" 59 #include "intel-bts.h" 60 61 #include "sane_ctype.h" 62 #include "symbol/kallsyms.h" 63 64 int auxtrace_mmap__mmap(struct auxtrace_mmap *mm, 65 struct auxtrace_mmap_params *mp, 66 void *userpg, int fd) 67 { 68 struct perf_event_mmap_page *pc = userpg; 69 70 WARN_ONCE(mm->base, "Uninitialized auxtrace_mmap\n"); 71 72 mm->userpg = userpg; 73 mm->mask = mp->mask; 74 mm->len = mp->len; 75 mm->prev = 0; 76 mm->idx = mp->idx; 77 mm->tid = mp->tid; 78 mm->cpu = mp->cpu; 79 80 if (!mp->len) { 81 mm->base = NULL; 82 return 0; 83 } 84 85 #if BITS_PER_LONG != 64 && !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT) 86 pr_err("Cannot use AUX area tracing mmaps\n"); 87 return -1; 88 #endif 89 90 pc->aux_offset = mp->offset; 91 pc->aux_size = mp->len; 92 93 mm->base = mmap(NULL, mp->len, mp->prot, MAP_SHARED, fd, mp->offset); 94 if (mm->base == MAP_FAILED) { 95 pr_debug2("failed to mmap AUX area\n"); 96 mm->base = NULL; 97 return -1; 98 } 99 100 return 0; 101 } 102 103 void auxtrace_mmap__munmap(struct auxtrace_mmap *mm) 104 { 105 if (mm->base) { 106 munmap(mm->base, mm->len); 107 mm->base = NULL; 108 } 109 } 110 111 void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp, 112 off_t auxtrace_offset, 113 unsigned int auxtrace_pages, 114 bool auxtrace_overwrite) 115 { 116 if (auxtrace_pages) { 117 mp->offset = auxtrace_offset; 118 mp->len = auxtrace_pages * (size_t)page_size; 119 mp->mask = is_power_of_2(mp->len) ? mp->len - 1 : 0; 120 mp->prot = PROT_READ | (auxtrace_overwrite ? 0 : PROT_WRITE); 121 pr_debug2("AUX area mmap length %zu\n", mp->len); 122 } else { 123 mp->len = 0; 124 } 125 } 126 127 void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp, 128 struct perf_evlist *evlist, int idx, 129 bool per_cpu) 130 { 131 mp->idx = idx; 132 133 if (per_cpu) { 134 mp->cpu = evlist->cpus->map[idx]; 135 if (evlist->threads) 136 mp->tid = thread_map__pid(evlist->threads, 0); 137 else 138 mp->tid = -1; 139 } else { 140 mp->cpu = -1; 141 mp->tid = thread_map__pid(evlist->threads, idx); 142 } 143 } 144 145 #define AUXTRACE_INIT_NR_QUEUES 32 146 147 static struct auxtrace_queue *auxtrace_alloc_queue_array(unsigned int nr_queues) 148 { 149 struct auxtrace_queue *queue_array; 150 unsigned int max_nr_queues, i; 151 152 max_nr_queues = UINT_MAX / sizeof(struct auxtrace_queue); 153 if (nr_queues > max_nr_queues) 154 return NULL; 155 156 queue_array = calloc(nr_queues, sizeof(struct auxtrace_queue)); 157 if (!queue_array) 158 return NULL; 159 160 for (i = 0; i < nr_queues; i++) { 161 INIT_LIST_HEAD(&queue_array[i].head); 162 queue_array[i].priv = NULL; 163 } 164 165 return queue_array; 166 } 167 168 int auxtrace_queues__init(struct auxtrace_queues *queues) 169 { 170 queues->nr_queues = AUXTRACE_INIT_NR_QUEUES; 171 queues->queue_array = auxtrace_alloc_queue_array(queues->nr_queues); 172 if (!queues->queue_array) 173 return -ENOMEM; 174 return 0; 175 } 176 177 static int auxtrace_queues__grow(struct auxtrace_queues *queues, 178 unsigned int new_nr_queues) 179 { 180 unsigned int nr_queues = queues->nr_queues; 181 struct auxtrace_queue *queue_array; 182 unsigned int i; 183 184 if (!nr_queues) 185 nr_queues = AUXTRACE_INIT_NR_QUEUES; 186 187 while (nr_queues && nr_queues < new_nr_queues) 188 nr_queues <<= 1; 189 190 if (nr_queues < queues->nr_queues || nr_queues < new_nr_queues) 191 return -EINVAL; 192 193 queue_array = auxtrace_alloc_queue_array(nr_queues); 194 if (!queue_array) 195 return -ENOMEM; 196 197 for (i = 0; i < queues->nr_queues; i++) { 198 list_splice_tail(&queues->queue_array[i].head, 199 &queue_array[i].head); 200 queue_array[i].priv = queues->queue_array[i].priv; 201 } 202 203 queues->nr_queues = nr_queues; 204 queues->queue_array = queue_array; 205 206 return 0; 207 } 208 209 static void *auxtrace_copy_data(u64 size, struct perf_session *session) 210 { 211 int fd = perf_data_file__fd(session->file); 212 void *p; 213 ssize_t ret; 214 215 if (size > SSIZE_MAX) 216 return NULL; 217 218 p = malloc(size); 219 if (!p) 220 return NULL; 221 222 ret = readn(fd, p, size); 223 if (ret != (ssize_t)size) { 224 free(p); 225 return NULL; 226 } 227 228 return p; 229 } 230 231 static int auxtrace_queues__add_buffer(struct auxtrace_queues *queues, 232 unsigned int idx, 233 struct auxtrace_buffer *buffer) 234 { 235 struct auxtrace_queue *queue; 236 int err; 237 238 if (idx >= queues->nr_queues) { 239 err = auxtrace_queues__grow(queues, idx + 1); 240 if (err) 241 return err; 242 } 243 244 queue = &queues->queue_array[idx]; 245 246 if (!queue->set) { 247 queue->set = true; 248 queue->tid = buffer->tid; 249 queue->cpu = buffer->cpu; 250 } else if (buffer->cpu != queue->cpu || buffer->tid != queue->tid) { 251 pr_err("auxtrace queue conflict: cpu %d, tid %d vs cpu %d, tid %d\n", 252 queue->cpu, queue->tid, buffer->cpu, buffer->tid); 253 return -EINVAL; 254 } 255 256 buffer->buffer_nr = queues->next_buffer_nr++; 257 258 list_add_tail(&buffer->list, &queue->head); 259 260 queues->new_data = true; 261 queues->populated = true; 262 263 return 0; 264 } 265 266 /* Limit buffers to 32MiB on 32-bit */ 267 #define BUFFER_LIMIT_FOR_32_BIT (32 * 1024 * 1024) 268 269 static int auxtrace_queues__split_buffer(struct auxtrace_queues *queues, 270 unsigned int idx, 271 struct auxtrace_buffer *buffer) 272 { 273 u64 sz = buffer->size; 274 bool consecutive = false; 275 struct auxtrace_buffer *b; 276 int err; 277 278 while (sz > BUFFER_LIMIT_FOR_32_BIT) { 279 b = memdup(buffer, sizeof(struct auxtrace_buffer)); 280 if (!b) 281 return -ENOMEM; 282 b->size = BUFFER_LIMIT_FOR_32_BIT; 283 b->consecutive = consecutive; 284 err = auxtrace_queues__add_buffer(queues, idx, b); 285 if (err) { 286 auxtrace_buffer__free(b); 287 return err; 288 } 289 buffer->data_offset += BUFFER_LIMIT_FOR_32_BIT; 290 sz -= BUFFER_LIMIT_FOR_32_BIT; 291 consecutive = true; 292 } 293 294 buffer->size = sz; 295 buffer->consecutive = consecutive; 296 297 return 0; 298 } 299 300 static int auxtrace_queues__add_event_buffer(struct auxtrace_queues *queues, 301 struct perf_session *session, 302 unsigned int idx, 303 struct auxtrace_buffer *buffer) 304 { 305 if (session->one_mmap) { 306 buffer->data = buffer->data_offset - session->one_mmap_offset + 307 session->one_mmap_addr; 308 } else if (perf_data_file__is_pipe(session->file)) { 309 buffer->data = auxtrace_copy_data(buffer->size, session); 310 if (!buffer->data) 311 return -ENOMEM; 312 buffer->data_needs_freeing = true; 313 } else if (BITS_PER_LONG == 32 && 314 buffer->size > BUFFER_LIMIT_FOR_32_BIT) { 315 int err; 316 317 err = auxtrace_queues__split_buffer(queues, idx, buffer); 318 if (err) 319 return err; 320 } 321 322 return auxtrace_queues__add_buffer(queues, idx, buffer); 323 } 324 325 static bool filter_cpu(struct perf_session *session, int cpu) 326 { 327 unsigned long *cpu_bitmap = session->itrace_synth_opts->cpu_bitmap; 328 329 return cpu_bitmap && cpu != -1 && !test_bit(cpu, cpu_bitmap); 330 } 331 332 int auxtrace_queues__add_event(struct auxtrace_queues *queues, 333 struct perf_session *session, 334 union perf_event *event, off_t data_offset, 335 struct auxtrace_buffer **buffer_ptr) 336 { 337 struct auxtrace_buffer *buffer; 338 unsigned int idx; 339 int err; 340 341 if (filter_cpu(session, event->auxtrace.cpu)) 342 return 0; 343 344 buffer = zalloc(sizeof(struct auxtrace_buffer)); 345 if (!buffer) 346 return -ENOMEM; 347 348 buffer->pid = -1; 349 buffer->tid = event->auxtrace.tid; 350 buffer->cpu = event->auxtrace.cpu; 351 buffer->data_offset = data_offset; 352 buffer->offset = event->auxtrace.offset; 353 buffer->reference = event->auxtrace.reference; 354 buffer->size = event->auxtrace.size; 355 idx = event->auxtrace.idx; 356 357 err = auxtrace_queues__add_event_buffer(queues, session, idx, buffer); 358 if (err) 359 goto out_err; 360 361 if (buffer_ptr) 362 *buffer_ptr = buffer; 363 364 return 0; 365 366 out_err: 367 auxtrace_buffer__free(buffer); 368 return err; 369 } 370 371 static int auxtrace_queues__add_indexed_event(struct auxtrace_queues *queues, 372 struct perf_session *session, 373 off_t file_offset, size_t sz) 374 { 375 union perf_event *event; 376 int err; 377 char buf[PERF_SAMPLE_MAX_SIZE]; 378 379 err = perf_session__peek_event(session, file_offset, buf, 380 PERF_SAMPLE_MAX_SIZE, &event, NULL); 381 if (err) 382 return err; 383 384 if (event->header.type == PERF_RECORD_AUXTRACE) { 385 if (event->header.size < sizeof(struct auxtrace_event) || 386 event->header.size != sz) { 387 err = -EINVAL; 388 goto out; 389 } 390 file_offset += event->header.size; 391 err = auxtrace_queues__add_event(queues, session, event, 392 file_offset, NULL); 393 } 394 out: 395 return err; 396 } 397 398 void auxtrace_queues__free(struct auxtrace_queues *queues) 399 { 400 unsigned int i; 401 402 for (i = 0; i < queues->nr_queues; i++) { 403 while (!list_empty(&queues->queue_array[i].head)) { 404 struct auxtrace_buffer *buffer; 405 406 buffer = list_entry(queues->queue_array[i].head.next, 407 struct auxtrace_buffer, list); 408 list_del(&buffer->list); 409 auxtrace_buffer__free(buffer); 410 } 411 } 412 413 zfree(&queues->queue_array); 414 queues->nr_queues = 0; 415 } 416 417 static void auxtrace_heapify(struct auxtrace_heap_item *heap_array, 418 unsigned int pos, unsigned int queue_nr, 419 u64 ordinal) 420 { 421 unsigned int parent; 422 423 while (pos) { 424 parent = (pos - 1) >> 1; 425 if (heap_array[parent].ordinal <= ordinal) 426 break; 427 heap_array[pos] = heap_array[parent]; 428 pos = parent; 429 } 430 heap_array[pos].queue_nr = queue_nr; 431 heap_array[pos].ordinal = ordinal; 432 } 433 434 int auxtrace_heap__add(struct auxtrace_heap *heap, unsigned int queue_nr, 435 u64 ordinal) 436 { 437 struct auxtrace_heap_item *heap_array; 438 439 if (queue_nr >= heap->heap_sz) { 440 unsigned int heap_sz = AUXTRACE_INIT_NR_QUEUES; 441 442 while (heap_sz <= queue_nr) 443 heap_sz <<= 1; 444 heap_array = realloc(heap->heap_array, 445 heap_sz * sizeof(struct auxtrace_heap_item)); 446 if (!heap_array) 447 return -ENOMEM; 448 heap->heap_array = heap_array; 449 heap->heap_sz = heap_sz; 450 } 451 452 auxtrace_heapify(heap->heap_array, heap->heap_cnt++, queue_nr, ordinal); 453 454 return 0; 455 } 456 457 void auxtrace_heap__free(struct auxtrace_heap *heap) 458 { 459 zfree(&heap->heap_array); 460 heap->heap_cnt = 0; 461 heap->heap_sz = 0; 462 } 463 464 void auxtrace_heap__pop(struct auxtrace_heap *heap) 465 { 466 unsigned int pos, last, heap_cnt = heap->heap_cnt; 467 struct auxtrace_heap_item *heap_array; 468 469 if (!heap_cnt) 470 return; 471 472 heap->heap_cnt -= 1; 473 474 heap_array = heap->heap_array; 475 476 pos = 0; 477 while (1) { 478 unsigned int left, right; 479 480 left = (pos << 1) + 1; 481 if (left >= heap_cnt) 482 break; 483 right = left + 1; 484 if (right >= heap_cnt) { 485 heap_array[pos] = heap_array[left]; 486 return; 487 } 488 if (heap_array[left].ordinal < heap_array[right].ordinal) { 489 heap_array[pos] = heap_array[left]; 490 pos = left; 491 } else { 492 heap_array[pos] = heap_array[right]; 493 pos = right; 494 } 495 } 496 497 last = heap_cnt - 1; 498 auxtrace_heapify(heap_array, pos, heap_array[last].queue_nr, 499 heap_array[last].ordinal); 500 } 501 502 size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr, 503 struct perf_evlist *evlist) 504 { 505 if (itr) 506 return itr->info_priv_size(itr, evlist); 507 return 0; 508 } 509 510 static int auxtrace_not_supported(void) 511 { 512 pr_err("AUX area tracing is not supported on this architecture\n"); 513 return -EINVAL; 514 } 515 516 int auxtrace_record__info_fill(struct auxtrace_record *itr, 517 struct perf_session *session, 518 struct auxtrace_info_event *auxtrace_info, 519 size_t priv_size) 520 { 521 if (itr) 522 return itr->info_fill(itr, session, auxtrace_info, priv_size); 523 return auxtrace_not_supported(); 524 } 525 526 void auxtrace_record__free(struct auxtrace_record *itr) 527 { 528 if (itr) 529 itr->free(itr); 530 } 531 532 int auxtrace_record__snapshot_start(struct auxtrace_record *itr) 533 { 534 if (itr && itr->snapshot_start) 535 return itr->snapshot_start(itr); 536 return 0; 537 } 538 539 int auxtrace_record__snapshot_finish(struct auxtrace_record *itr) 540 { 541 if (itr && itr->snapshot_finish) 542 return itr->snapshot_finish(itr); 543 return 0; 544 } 545 546 int auxtrace_record__find_snapshot(struct auxtrace_record *itr, int idx, 547 struct auxtrace_mmap *mm, 548 unsigned char *data, u64 *head, u64 *old) 549 { 550 if (itr && itr->find_snapshot) 551 return itr->find_snapshot(itr, idx, mm, data, head, old); 552 return 0; 553 } 554 555 int auxtrace_record__options(struct auxtrace_record *itr, 556 struct perf_evlist *evlist, 557 struct record_opts *opts) 558 { 559 if (itr) 560 return itr->recording_options(itr, evlist, opts); 561 return 0; 562 } 563 564 u64 auxtrace_record__reference(struct auxtrace_record *itr) 565 { 566 if (itr) 567 return itr->reference(itr); 568 return 0; 569 } 570 571 int auxtrace_parse_snapshot_options(struct auxtrace_record *itr, 572 struct record_opts *opts, const char *str) 573 { 574 if (!str) 575 return 0; 576 577 if (itr) 578 return itr->parse_snapshot_options(itr, opts, str); 579 580 pr_err("No AUX area tracing to snapshot\n"); 581 return -EINVAL; 582 } 583 584 struct auxtrace_record *__weak 585 auxtrace_record__init(struct perf_evlist *evlist __maybe_unused, int *err) 586 { 587 *err = 0; 588 return NULL; 589 } 590 591 static int auxtrace_index__alloc(struct list_head *head) 592 { 593 struct auxtrace_index *auxtrace_index; 594 595 auxtrace_index = malloc(sizeof(struct auxtrace_index)); 596 if (!auxtrace_index) 597 return -ENOMEM; 598 599 auxtrace_index->nr = 0; 600 INIT_LIST_HEAD(&auxtrace_index->list); 601 602 list_add_tail(&auxtrace_index->list, head); 603 604 return 0; 605 } 606 607 void auxtrace_index__free(struct list_head *head) 608 { 609 struct auxtrace_index *auxtrace_index, *n; 610 611 list_for_each_entry_safe(auxtrace_index, n, head, list) { 612 list_del(&auxtrace_index->list); 613 free(auxtrace_index); 614 } 615 } 616 617 static struct auxtrace_index *auxtrace_index__last(struct list_head *head) 618 { 619 struct auxtrace_index *auxtrace_index; 620 int err; 621 622 if (list_empty(head)) { 623 err = auxtrace_index__alloc(head); 624 if (err) 625 return NULL; 626 } 627 628 auxtrace_index = list_entry(head->prev, struct auxtrace_index, list); 629 630 if (auxtrace_index->nr >= PERF_AUXTRACE_INDEX_ENTRY_COUNT) { 631 err = auxtrace_index__alloc(head); 632 if (err) 633 return NULL; 634 auxtrace_index = list_entry(head->prev, struct auxtrace_index, 635 list); 636 } 637 638 return auxtrace_index; 639 } 640 641 int auxtrace_index__auxtrace_event(struct list_head *head, 642 union perf_event *event, off_t file_offset) 643 { 644 struct auxtrace_index *auxtrace_index; 645 size_t nr; 646 647 auxtrace_index = auxtrace_index__last(head); 648 if (!auxtrace_index) 649 return -ENOMEM; 650 651 nr = auxtrace_index->nr; 652 auxtrace_index->entries[nr].file_offset = file_offset; 653 auxtrace_index->entries[nr].sz = event->header.size; 654 auxtrace_index->nr += 1; 655 656 return 0; 657 } 658 659 static int auxtrace_index__do_write(int fd, 660 struct auxtrace_index *auxtrace_index) 661 { 662 struct auxtrace_index_entry ent; 663 size_t i; 664 665 for (i = 0; i < auxtrace_index->nr; i++) { 666 ent.file_offset = auxtrace_index->entries[i].file_offset; 667 ent.sz = auxtrace_index->entries[i].sz; 668 if (writen(fd, &ent, sizeof(ent)) != sizeof(ent)) 669 return -errno; 670 } 671 return 0; 672 } 673 674 int auxtrace_index__write(int fd, struct list_head *head) 675 { 676 struct auxtrace_index *auxtrace_index; 677 u64 total = 0; 678 int err; 679 680 list_for_each_entry(auxtrace_index, head, list) 681 total += auxtrace_index->nr; 682 683 if (writen(fd, &total, sizeof(total)) != sizeof(total)) 684 return -errno; 685 686 list_for_each_entry(auxtrace_index, head, list) { 687 err = auxtrace_index__do_write(fd, auxtrace_index); 688 if (err) 689 return err; 690 } 691 692 return 0; 693 } 694 695 static int auxtrace_index__process_entry(int fd, struct list_head *head, 696 bool needs_swap) 697 { 698 struct auxtrace_index *auxtrace_index; 699 struct auxtrace_index_entry ent; 700 size_t nr; 701 702 if (readn(fd, &ent, sizeof(ent)) != sizeof(ent)) 703 return -1; 704 705 auxtrace_index = auxtrace_index__last(head); 706 if (!auxtrace_index) 707 return -1; 708 709 nr = auxtrace_index->nr; 710 if (needs_swap) { 711 auxtrace_index->entries[nr].file_offset = 712 bswap_64(ent.file_offset); 713 auxtrace_index->entries[nr].sz = bswap_64(ent.sz); 714 } else { 715 auxtrace_index->entries[nr].file_offset = ent.file_offset; 716 auxtrace_index->entries[nr].sz = ent.sz; 717 } 718 719 auxtrace_index->nr = nr + 1; 720 721 return 0; 722 } 723 724 int auxtrace_index__process(int fd, u64 size, struct perf_session *session, 725 bool needs_swap) 726 { 727 struct list_head *head = &session->auxtrace_index; 728 u64 nr; 729 730 if (readn(fd, &nr, sizeof(u64)) != sizeof(u64)) 731 return -1; 732 733 if (needs_swap) 734 nr = bswap_64(nr); 735 736 if (sizeof(u64) + nr * sizeof(struct auxtrace_index_entry) > size) 737 return -1; 738 739 while (nr--) { 740 int err; 741 742 err = auxtrace_index__process_entry(fd, head, needs_swap); 743 if (err) 744 return -1; 745 } 746 747 return 0; 748 } 749 750 static int auxtrace_queues__process_index_entry(struct auxtrace_queues *queues, 751 struct perf_session *session, 752 struct auxtrace_index_entry *ent) 753 { 754 return auxtrace_queues__add_indexed_event(queues, session, 755 ent->file_offset, ent->sz); 756 } 757 758 int auxtrace_queues__process_index(struct auxtrace_queues *queues, 759 struct perf_session *session) 760 { 761 struct auxtrace_index *auxtrace_index; 762 struct auxtrace_index_entry *ent; 763 size_t i; 764 int err; 765 766 list_for_each_entry(auxtrace_index, &session->auxtrace_index, list) { 767 for (i = 0; i < auxtrace_index->nr; i++) { 768 ent = &auxtrace_index->entries[i]; 769 err = auxtrace_queues__process_index_entry(queues, 770 session, 771 ent); 772 if (err) 773 return err; 774 } 775 } 776 return 0; 777 } 778 779 struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue, 780 struct auxtrace_buffer *buffer) 781 { 782 if (buffer) { 783 if (list_is_last(&buffer->list, &queue->head)) 784 return NULL; 785 return list_entry(buffer->list.next, struct auxtrace_buffer, 786 list); 787 } else { 788 if (list_empty(&queue->head)) 789 return NULL; 790 return list_entry(queue->head.next, struct auxtrace_buffer, 791 list); 792 } 793 } 794 795 void *auxtrace_buffer__get_data(struct auxtrace_buffer *buffer, int fd) 796 { 797 size_t adj = buffer->data_offset & (page_size - 1); 798 size_t size = buffer->size + adj; 799 off_t file_offset = buffer->data_offset - adj; 800 void *addr; 801 802 if (buffer->data) 803 return buffer->data; 804 805 addr = mmap(NULL, size, PROT_READ, MAP_SHARED, fd, file_offset); 806 if (addr == MAP_FAILED) 807 return NULL; 808 809 buffer->mmap_addr = addr; 810 buffer->mmap_size = size; 811 812 buffer->data = addr + adj; 813 814 return buffer->data; 815 } 816 817 void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer) 818 { 819 if (!buffer->data || !buffer->mmap_addr) 820 return; 821 munmap(buffer->mmap_addr, buffer->mmap_size); 822 buffer->mmap_addr = NULL; 823 buffer->mmap_size = 0; 824 buffer->data = NULL; 825 buffer->use_data = NULL; 826 } 827 828 void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer) 829 { 830 auxtrace_buffer__put_data(buffer); 831 if (buffer->data_needs_freeing) { 832 buffer->data_needs_freeing = false; 833 zfree(&buffer->data); 834 buffer->use_data = NULL; 835 buffer->size = 0; 836 } 837 } 838 839 void auxtrace_buffer__free(struct auxtrace_buffer *buffer) 840 { 841 auxtrace_buffer__drop_data(buffer); 842 free(buffer); 843 } 844 845 void auxtrace_synth_error(struct auxtrace_error_event *auxtrace_error, int type, 846 int code, int cpu, pid_t pid, pid_t tid, u64 ip, 847 const char *msg) 848 { 849 size_t size; 850 851 memset(auxtrace_error, 0, sizeof(struct auxtrace_error_event)); 852 853 auxtrace_error->header.type = PERF_RECORD_AUXTRACE_ERROR; 854 auxtrace_error->type = type; 855 auxtrace_error->code = code; 856 auxtrace_error->cpu = cpu; 857 auxtrace_error->pid = pid; 858 auxtrace_error->tid = tid; 859 auxtrace_error->ip = ip; 860 strlcpy(auxtrace_error->msg, msg, MAX_AUXTRACE_ERROR_MSG); 861 862 size = (void *)auxtrace_error->msg - (void *)auxtrace_error + 863 strlen(auxtrace_error->msg) + 1; 864 auxtrace_error->header.size = PERF_ALIGN(size, sizeof(u64)); 865 } 866 867 int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr, 868 struct perf_tool *tool, 869 struct perf_session *session, 870 perf_event__handler_t process) 871 { 872 union perf_event *ev; 873 size_t priv_size; 874 int err; 875 876 pr_debug2("Synthesizing auxtrace information\n"); 877 priv_size = auxtrace_record__info_priv_size(itr, session->evlist); 878 ev = zalloc(sizeof(struct auxtrace_info_event) + priv_size); 879 if (!ev) 880 return -ENOMEM; 881 882 ev->auxtrace_info.header.type = PERF_RECORD_AUXTRACE_INFO; 883 ev->auxtrace_info.header.size = sizeof(struct auxtrace_info_event) + 884 priv_size; 885 err = auxtrace_record__info_fill(itr, session, &ev->auxtrace_info, 886 priv_size); 887 if (err) 888 goto out_free; 889 890 err = process(tool, ev, NULL, NULL); 891 out_free: 892 free(ev); 893 return err; 894 } 895 896 static bool auxtrace__dont_decode(struct perf_session *session) 897 { 898 return !session->itrace_synth_opts || 899 session->itrace_synth_opts->dont_decode; 900 } 901 902 int perf_event__process_auxtrace_info(struct perf_tool *tool __maybe_unused, 903 union perf_event *event, 904 struct perf_session *session) 905 { 906 enum auxtrace_type type = event->auxtrace_info.type; 907 908 if (dump_trace) 909 fprintf(stdout, " type: %u\n", type); 910 911 switch (type) { 912 case PERF_AUXTRACE_INTEL_PT: 913 return intel_pt_process_auxtrace_info(event, session); 914 case PERF_AUXTRACE_INTEL_BTS: 915 return intel_bts_process_auxtrace_info(event, session); 916 case PERF_AUXTRACE_CS_ETM: 917 case PERF_AUXTRACE_UNKNOWN: 918 default: 919 return -EINVAL; 920 } 921 } 922 923 s64 perf_event__process_auxtrace(struct perf_tool *tool, 924 union perf_event *event, 925 struct perf_session *session) 926 { 927 s64 err; 928 929 if (dump_trace) 930 fprintf(stdout, " size: %#"PRIx64" offset: %#"PRIx64" ref: %#"PRIx64" idx: %u tid: %d cpu: %d\n", 931 event->auxtrace.size, event->auxtrace.offset, 932 event->auxtrace.reference, event->auxtrace.idx, 933 event->auxtrace.tid, event->auxtrace.cpu); 934 935 if (auxtrace__dont_decode(session)) 936 return event->auxtrace.size; 937 938 if (!session->auxtrace || event->header.type != PERF_RECORD_AUXTRACE) 939 return -EINVAL; 940 941 err = session->auxtrace->process_auxtrace_event(session, event, tool); 942 if (err < 0) 943 return err; 944 945 return event->auxtrace.size; 946 } 947 948 #define PERF_ITRACE_DEFAULT_PERIOD_TYPE PERF_ITRACE_PERIOD_NANOSECS 949 #define PERF_ITRACE_DEFAULT_PERIOD 100000 950 #define PERF_ITRACE_DEFAULT_CALLCHAIN_SZ 16 951 #define PERF_ITRACE_MAX_CALLCHAIN_SZ 1024 952 #define PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ 64 953 #define PERF_ITRACE_MAX_LAST_BRANCH_SZ 1024 954 955 void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts) 956 { 957 synth_opts->instructions = true; 958 synth_opts->branches = true; 959 synth_opts->transactions = true; 960 synth_opts->ptwrites = true; 961 synth_opts->pwr_events = true; 962 synth_opts->errors = true; 963 synth_opts->period_type = PERF_ITRACE_DEFAULT_PERIOD_TYPE; 964 synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD; 965 synth_opts->callchain_sz = PERF_ITRACE_DEFAULT_CALLCHAIN_SZ; 966 synth_opts->last_branch_sz = PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ; 967 synth_opts->initial_skip = 0; 968 } 969 970 /* 971 * Please check tools/perf/Documentation/perf-script.txt for information 972 * about the options parsed here, which is introduced after this cset, 973 * when support in 'perf script' for these options is introduced. 974 */ 975 int itrace_parse_synth_opts(const struct option *opt, const char *str, 976 int unset) 977 { 978 struct itrace_synth_opts *synth_opts = opt->value; 979 const char *p; 980 char *endptr; 981 bool period_type_set = false; 982 bool period_set = false; 983 984 synth_opts->set = true; 985 986 if (unset) { 987 synth_opts->dont_decode = true; 988 return 0; 989 } 990 991 if (!str) { 992 itrace_synth_opts__set_default(synth_opts); 993 return 0; 994 } 995 996 for (p = str; *p;) { 997 switch (*p++) { 998 case 'i': 999 synth_opts->instructions = true; 1000 while (*p == ' ' || *p == ',') 1001 p += 1; 1002 if (isdigit(*p)) { 1003 synth_opts->period = strtoull(p, &endptr, 10); 1004 period_set = true; 1005 p = endptr; 1006 while (*p == ' ' || *p == ',') 1007 p += 1; 1008 switch (*p++) { 1009 case 'i': 1010 synth_opts->period_type = 1011 PERF_ITRACE_PERIOD_INSTRUCTIONS; 1012 period_type_set = true; 1013 break; 1014 case 't': 1015 synth_opts->period_type = 1016 PERF_ITRACE_PERIOD_TICKS; 1017 period_type_set = true; 1018 break; 1019 case 'm': 1020 synth_opts->period *= 1000; 1021 /* Fall through */ 1022 case 'u': 1023 synth_opts->period *= 1000; 1024 /* Fall through */ 1025 case 'n': 1026 if (*p++ != 's') 1027 goto out_err; 1028 synth_opts->period_type = 1029 PERF_ITRACE_PERIOD_NANOSECS; 1030 period_type_set = true; 1031 break; 1032 case '\0': 1033 goto out; 1034 default: 1035 goto out_err; 1036 } 1037 } 1038 break; 1039 case 'b': 1040 synth_opts->branches = true; 1041 break; 1042 case 'x': 1043 synth_opts->transactions = true; 1044 break; 1045 case 'w': 1046 synth_opts->ptwrites = true; 1047 break; 1048 case 'p': 1049 synth_opts->pwr_events = true; 1050 break; 1051 case 'e': 1052 synth_opts->errors = true; 1053 break; 1054 case 'd': 1055 synth_opts->log = true; 1056 break; 1057 case 'c': 1058 synth_opts->branches = true; 1059 synth_opts->calls = true; 1060 break; 1061 case 'r': 1062 synth_opts->branches = true; 1063 synth_opts->returns = true; 1064 break; 1065 case 'g': 1066 synth_opts->callchain = true; 1067 synth_opts->callchain_sz = 1068 PERF_ITRACE_DEFAULT_CALLCHAIN_SZ; 1069 while (*p == ' ' || *p == ',') 1070 p += 1; 1071 if (isdigit(*p)) { 1072 unsigned int val; 1073 1074 val = strtoul(p, &endptr, 10); 1075 p = endptr; 1076 if (!val || val > PERF_ITRACE_MAX_CALLCHAIN_SZ) 1077 goto out_err; 1078 synth_opts->callchain_sz = val; 1079 } 1080 break; 1081 case 'l': 1082 synth_opts->last_branch = true; 1083 synth_opts->last_branch_sz = 1084 PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ; 1085 while (*p == ' ' || *p == ',') 1086 p += 1; 1087 if (isdigit(*p)) { 1088 unsigned int val; 1089 1090 val = strtoul(p, &endptr, 10); 1091 p = endptr; 1092 if (!val || 1093 val > PERF_ITRACE_MAX_LAST_BRANCH_SZ) 1094 goto out_err; 1095 synth_opts->last_branch_sz = val; 1096 } 1097 break; 1098 case 's': 1099 synth_opts->initial_skip = strtoul(p, &endptr, 10); 1100 if (p == endptr) 1101 goto out_err; 1102 p = endptr; 1103 break; 1104 case ' ': 1105 case ',': 1106 break; 1107 default: 1108 goto out_err; 1109 } 1110 } 1111 out: 1112 if (synth_opts->instructions) { 1113 if (!period_type_set) 1114 synth_opts->period_type = 1115 PERF_ITRACE_DEFAULT_PERIOD_TYPE; 1116 if (!period_set) 1117 synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD; 1118 } 1119 1120 return 0; 1121 1122 out_err: 1123 pr_err("Bad Instruction Tracing options '%s'\n", str); 1124 return -EINVAL; 1125 } 1126 1127 static const char * const auxtrace_error_type_name[] = { 1128 [PERF_AUXTRACE_ERROR_ITRACE] = "instruction trace", 1129 }; 1130 1131 static const char *auxtrace_error_name(int type) 1132 { 1133 const char *error_type_name = NULL; 1134 1135 if (type < PERF_AUXTRACE_ERROR_MAX) 1136 error_type_name = auxtrace_error_type_name[type]; 1137 if (!error_type_name) 1138 error_type_name = "unknown AUX"; 1139 return error_type_name; 1140 } 1141 1142 size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp) 1143 { 1144 struct auxtrace_error_event *e = &event->auxtrace_error; 1145 int ret; 1146 1147 ret = fprintf(fp, " %s error type %u", 1148 auxtrace_error_name(e->type), e->type); 1149 ret += fprintf(fp, " cpu %d pid %d tid %d ip %#"PRIx64" code %u: %s\n", 1150 e->cpu, e->pid, e->tid, e->ip, e->code, e->msg); 1151 return ret; 1152 } 1153 1154 void perf_session__auxtrace_error_inc(struct perf_session *session, 1155 union perf_event *event) 1156 { 1157 struct auxtrace_error_event *e = &event->auxtrace_error; 1158 1159 if (e->type < PERF_AUXTRACE_ERROR_MAX) 1160 session->evlist->stats.nr_auxtrace_errors[e->type] += 1; 1161 } 1162 1163 void events_stats__auxtrace_error_warn(const struct events_stats *stats) 1164 { 1165 int i; 1166 1167 for (i = 0; i < PERF_AUXTRACE_ERROR_MAX; i++) { 1168 if (!stats->nr_auxtrace_errors[i]) 1169 continue; 1170 ui__warning("%u %s errors\n", 1171 stats->nr_auxtrace_errors[i], 1172 auxtrace_error_name(i)); 1173 } 1174 } 1175 1176 int perf_event__process_auxtrace_error(struct perf_tool *tool __maybe_unused, 1177 union perf_event *event, 1178 struct perf_session *session) 1179 { 1180 if (auxtrace__dont_decode(session)) 1181 return 0; 1182 1183 perf_event__fprintf_auxtrace_error(event, stdout); 1184 return 0; 1185 } 1186 1187 static int __auxtrace_mmap__read(struct auxtrace_mmap *mm, 1188 struct auxtrace_record *itr, 1189 struct perf_tool *tool, process_auxtrace_t fn, 1190 bool snapshot, size_t snapshot_size) 1191 { 1192 u64 head, old = mm->prev, offset, ref; 1193 unsigned char *data = mm->base; 1194 size_t size, head_off, old_off, len1, len2, padding; 1195 union perf_event ev; 1196 void *data1, *data2; 1197 1198 if (snapshot) { 1199 head = auxtrace_mmap__read_snapshot_head(mm); 1200 if (auxtrace_record__find_snapshot(itr, mm->idx, mm, data, 1201 &head, &old)) 1202 return -1; 1203 } else { 1204 head = auxtrace_mmap__read_head(mm); 1205 } 1206 1207 if (old == head) 1208 return 0; 1209 1210 pr_debug3("auxtrace idx %d old %#"PRIx64" head %#"PRIx64" diff %#"PRIx64"\n", 1211 mm->idx, old, head, head - old); 1212 1213 if (mm->mask) { 1214 head_off = head & mm->mask; 1215 old_off = old & mm->mask; 1216 } else { 1217 head_off = head % mm->len; 1218 old_off = old % mm->len; 1219 } 1220 1221 if (head_off > old_off) 1222 size = head_off - old_off; 1223 else 1224 size = mm->len - (old_off - head_off); 1225 1226 if (snapshot && size > snapshot_size) 1227 size = snapshot_size; 1228 1229 ref = auxtrace_record__reference(itr); 1230 1231 if (head > old || size <= head || mm->mask) { 1232 offset = head - size; 1233 } else { 1234 /* 1235 * When the buffer size is not a power of 2, 'head' wraps at the 1236 * highest multiple of the buffer size, so we have to subtract 1237 * the remainder here. 1238 */ 1239 u64 rem = (0ULL - mm->len) % mm->len; 1240 1241 offset = head - size - rem; 1242 } 1243 1244 if (size > head_off) { 1245 len1 = size - head_off; 1246 data1 = &data[mm->len - len1]; 1247 len2 = head_off; 1248 data2 = &data[0]; 1249 } else { 1250 len1 = size; 1251 data1 = &data[head_off - len1]; 1252 len2 = 0; 1253 data2 = NULL; 1254 } 1255 1256 if (itr->alignment) { 1257 unsigned int unwanted = len1 % itr->alignment; 1258 1259 len1 -= unwanted; 1260 size -= unwanted; 1261 } 1262 1263 /* padding must be written by fn() e.g. record__process_auxtrace() */ 1264 padding = size & 7; 1265 if (padding) 1266 padding = 8 - padding; 1267 1268 memset(&ev, 0, sizeof(ev)); 1269 ev.auxtrace.header.type = PERF_RECORD_AUXTRACE; 1270 ev.auxtrace.header.size = sizeof(ev.auxtrace); 1271 ev.auxtrace.size = size + padding; 1272 ev.auxtrace.offset = offset; 1273 ev.auxtrace.reference = ref; 1274 ev.auxtrace.idx = mm->idx; 1275 ev.auxtrace.tid = mm->tid; 1276 ev.auxtrace.cpu = mm->cpu; 1277 1278 if (fn(tool, &ev, data1, len1, data2, len2)) 1279 return -1; 1280 1281 mm->prev = head; 1282 1283 if (!snapshot) { 1284 auxtrace_mmap__write_tail(mm, head); 1285 if (itr->read_finish) { 1286 int err; 1287 1288 err = itr->read_finish(itr, mm->idx); 1289 if (err < 0) 1290 return err; 1291 } 1292 } 1293 1294 return 1; 1295 } 1296 1297 int auxtrace_mmap__read(struct auxtrace_mmap *mm, struct auxtrace_record *itr, 1298 struct perf_tool *tool, process_auxtrace_t fn) 1299 { 1300 return __auxtrace_mmap__read(mm, itr, tool, fn, false, 0); 1301 } 1302 1303 int auxtrace_mmap__read_snapshot(struct auxtrace_mmap *mm, 1304 struct auxtrace_record *itr, 1305 struct perf_tool *tool, process_auxtrace_t fn, 1306 size_t snapshot_size) 1307 { 1308 return __auxtrace_mmap__read(mm, itr, tool, fn, true, snapshot_size); 1309 } 1310 1311 /** 1312 * struct auxtrace_cache - hash table to implement a cache 1313 * @hashtable: the hashtable 1314 * @sz: hashtable size (number of hlists) 1315 * @entry_size: size of an entry 1316 * @limit: limit the number of entries to this maximum, when reached the cache 1317 * is dropped and caching begins again with an empty cache 1318 * @cnt: current number of entries 1319 * @bits: hashtable size (@sz = 2^@bits) 1320 */ 1321 struct auxtrace_cache { 1322 struct hlist_head *hashtable; 1323 size_t sz; 1324 size_t entry_size; 1325 size_t limit; 1326 size_t cnt; 1327 unsigned int bits; 1328 }; 1329 1330 struct auxtrace_cache *auxtrace_cache__new(unsigned int bits, size_t entry_size, 1331 unsigned int limit_percent) 1332 { 1333 struct auxtrace_cache *c; 1334 struct hlist_head *ht; 1335 size_t sz, i; 1336 1337 c = zalloc(sizeof(struct auxtrace_cache)); 1338 if (!c) 1339 return NULL; 1340 1341 sz = 1UL << bits; 1342 1343 ht = calloc(sz, sizeof(struct hlist_head)); 1344 if (!ht) 1345 goto out_free; 1346 1347 for (i = 0; i < sz; i++) 1348 INIT_HLIST_HEAD(&ht[i]); 1349 1350 c->hashtable = ht; 1351 c->sz = sz; 1352 c->entry_size = entry_size; 1353 c->limit = (c->sz * limit_percent) / 100; 1354 c->bits = bits; 1355 1356 return c; 1357 1358 out_free: 1359 free(c); 1360 return NULL; 1361 } 1362 1363 static void auxtrace_cache__drop(struct auxtrace_cache *c) 1364 { 1365 struct auxtrace_cache_entry *entry; 1366 struct hlist_node *tmp; 1367 size_t i; 1368 1369 if (!c) 1370 return; 1371 1372 for (i = 0; i < c->sz; i++) { 1373 hlist_for_each_entry_safe(entry, tmp, &c->hashtable[i], hash) { 1374 hlist_del(&entry->hash); 1375 auxtrace_cache__free_entry(c, entry); 1376 } 1377 } 1378 1379 c->cnt = 0; 1380 } 1381 1382 void auxtrace_cache__free(struct auxtrace_cache *c) 1383 { 1384 if (!c) 1385 return; 1386 1387 auxtrace_cache__drop(c); 1388 free(c->hashtable); 1389 free(c); 1390 } 1391 1392 void *auxtrace_cache__alloc_entry(struct auxtrace_cache *c) 1393 { 1394 return malloc(c->entry_size); 1395 } 1396 1397 void auxtrace_cache__free_entry(struct auxtrace_cache *c __maybe_unused, 1398 void *entry) 1399 { 1400 free(entry); 1401 } 1402 1403 int auxtrace_cache__add(struct auxtrace_cache *c, u32 key, 1404 struct auxtrace_cache_entry *entry) 1405 { 1406 if (c->limit && ++c->cnt > c->limit) 1407 auxtrace_cache__drop(c); 1408 1409 entry->key = key; 1410 hlist_add_head(&entry->hash, &c->hashtable[hash_32(key, c->bits)]); 1411 1412 return 0; 1413 } 1414 1415 void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key) 1416 { 1417 struct auxtrace_cache_entry *entry; 1418 struct hlist_head *hlist; 1419 1420 if (!c) 1421 return NULL; 1422 1423 hlist = &c->hashtable[hash_32(key, c->bits)]; 1424 hlist_for_each_entry(entry, hlist, hash) { 1425 if (entry->key == key) 1426 return entry; 1427 } 1428 1429 return NULL; 1430 } 1431 1432 static void addr_filter__free_str(struct addr_filter *filt) 1433 { 1434 free(filt->str); 1435 filt->action = NULL; 1436 filt->sym_from = NULL; 1437 filt->sym_to = NULL; 1438 filt->filename = NULL; 1439 filt->str = NULL; 1440 } 1441 1442 static struct addr_filter *addr_filter__new(void) 1443 { 1444 struct addr_filter *filt = zalloc(sizeof(*filt)); 1445 1446 if (filt) 1447 INIT_LIST_HEAD(&filt->list); 1448 1449 return filt; 1450 } 1451 1452 static void addr_filter__free(struct addr_filter *filt) 1453 { 1454 if (filt) 1455 addr_filter__free_str(filt); 1456 free(filt); 1457 } 1458 1459 static void addr_filters__add(struct addr_filters *filts, 1460 struct addr_filter *filt) 1461 { 1462 list_add_tail(&filt->list, &filts->head); 1463 filts->cnt += 1; 1464 } 1465 1466 static void addr_filters__del(struct addr_filters *filts, 1467 struct addr_filter *filt) 1468 { 1469 list_del_init(&filt->list); 1470 filts->cnt -= 1; 1471 } 1472 1473 void addr_filters__init(struct addr_filters *filts) 1474 { 1475 INIT_LIST_HEAD(&filts->head); 1476 filts->cnt = 0; 1477 } 1478 1479 void addr_filters__exit(struct addr_filters *filts) 1480 { 1481 struct addr_filter *filt, *n; 1482 1483 list_for_each_entry_safe(filt, n, &filts->head, list) { 1484 addr_filters__del(filts, filt); 1485 addr_filter__free(filt); 1486 } 1487 } 1488 1489 static int parse_num_or_str(char **inp, u64 *num, const char **str, 1490 const char *str_delim) 1491 { 1492 *inp += strspn(*inp, " "); 1493 1494 if (isdigit(**inp)) { 1495 char *endptr; 1496 1497 if (!num) 1498 return -EINVAL; 1499 errno = 0; 1500 *num = strtoull(*inp, &endptr, 0); 1501 if (errno) 1502 return -errno; 1503 if (endptr == *inp) 1504 return -EINVAL; 1505 *inp = endptr; 1506 } else { 1507 size_t n; 1508 1509 if (!str) 1510 return -EINVAL; 1511 *inp += strspn(*inp, " "); 1512 *str = *inp; 1513 n = strcspn(*inp, str_delim); 1514 if (!n) 1515 return -EINVAL; 1516 *inp += n; 1517 if (**inp) { 1518 **inp = '\0'; 1519 *inp += 1; 1520 } 1521 } 1522 return 0; 1523 } 1524 1525 static int parse_action(struct addr_filter *filt) 1526 { 1527 if (!strcmp(filt->action, "filter")) { 1528 filt->start = true; 1529 filt->range = true; 1530 } else if (!strcmp(filt->action, "start")) { 1531 filt->start = true; 1532 } else if (!strcmp(filt->action, "stop")) { 1533 filt->start = false; 1534 } else if (!strcmp(filt->action, "tracestop")) { 1535 filt->start = false; 1536 filt->range = true; 1537 filt->action += 5; /* Change 'tracestop' to 'stop' */ 1538 } else { 1539 return -EINVAL; 1540 } 1541 return 0; 1542 } 1543 1544 static int parse_sym_idx(char **inp, int *idx) 1545 { 1546 *idx = -1; 1547 1548 *inp += strspn(*inp, " "); 1549 1550 if (**inp != '#') 1551 return 0; 1552 1553 *inp += 1; 1554 1555 if (**inp == 'g' || **inp == 'G') { 1556 *inp += 1; 1557 *idx = 0; 1558 } else { 1559 unsigned long num; 1560 char *endptr; 1561 1562 errno = 0; 1563 num = strtoul(*inp, &endptr, 0); 1564 if (errno) 1565 return -errno; 1566 if (endptr == *inp || num > INT_MAX) 1567 return -EINVAL; 1568 *inp = endptr; 1569 *idx = num; 1570 } 1571 1572 return 0; 1573 } 1574 1575 static int parse_addr_size(char **inp, u64 *num, const char **str, int *idx) 1576 { 1577 int err = parse_num_or_str(inp, num, str, " "); 1578 1579 if (!err && *str) 1580 err = parse_sym_idx(inp, idx); 1581 1582 return err; 1583 } 1584 1585 static int parse_one_filter(struct addr_filter *filt, const char **filter_inp) 1586 { 1587 char *fstr; 1588 int err; 1589 1590 filt->str = fstr = strdup(*filter_inp); 1591 if (!fstr) 1592 return -ENOMEM; 1593 1594 err = parse_num_or_str(&fstr, NULL, &filt->action, " "); 1595 if (err) 1596 goto out_err; 1597 1598 err = parse_action(filt); 1599 if (err) 1600 goto out_err; 1601 1602 err = parse_addr_size(&fstr, &filt->addr, &filt->sym_from, 1603 &filt->sym_from_idx); 1604 if (err) 1605 goto out_err; 1606 1607 fstr += strspn(fstr, " "); 1608 1609 if (*fstr == '/') { 1610 fstr += 1; 1611 err = parse_addr_size(&fstr, &filt->size, &filt->sym_to, 1612 &filt->sym_to_idx); 1613 if (err) 1614 goto out_err; 1615 filt->range = true; 1616 } 1617 1618 fstr += strspn(fstr, " "); 1619 1620 if (*fstr == '@') { 1621 fstr += 1; 1622 err = parse_num_or_str(&fstr, NULL, &filt->filename, " ,"); 1623 if (err) 1624 goto out_err; 1625 } 1626 1627 fstr += strspn(fstr, " ,"); 1628 1629 *filter_inp += fstr - filt->str; 1630 1631 return 0; 1632 1633 out_err: 1634 addr_filter__free_str(filt); 1635 1636 return err; 1637 } 1638 1639 int addr_filters__parse_bare_filter(struct addr_filters *filts, 1640 const char *filter) 1641 { 1642 struct addr_filter *filt; 1643 const char *fstr = filter; 1644 int err; 1645 1646 while (*fstr) { 1647 filt = addr_filter__new(); 1648 err = parse_one_filter(filt, &fstr); 1649 if (err) { 1650 addr_filter__free(filt); 1651 addr_filters__exit(filts); 1652 return err; 1653 } 1654 addr_filters__add(filts, filt); 1655 } 1656 1657 return 0; 1658 } 1659 1660 struct sym_args { 1661 const char *name; 1662 u64 start; 1663 u64 size; 1664 int idx; 1665 int cnt; 1666 bool started; 1667 bool global; 1668 bool selected; 1669 bool duplicate; 1670 bool near; 1671 }; 1672 1673 static bool kern_sym_match(struct sym_args *args, const char *name, char type) 1674 { 1675 /* A function with the same name, and global or the n'th found or any */ 1676 return symbol_type__is_a(type, MAP__FUNCTION) && 1677 !strcmp(name, args->name) && 1678 ((args->global && isupper(type)) || 1679 (args->selected && ++(args->cnt) == args->idx) || 1680 (!args->global && !args->selected)); 1681 } 1682 1683 static int find_kern_sym_cb(void *arg, const char *name, char type, u64 start) 1684 { 1685 struct sym_args *args = arg; 1686 1687 if (args->started) { 1688 if (!args->size) 1689 args->size = start - args->start; 1690 if (args->selected) { 1691 if (args->size) 1692 return 1; 1693 } else if (kern_sym_match(args, name, type)) { 1694 args->duplicate = true; 1695 return 1; 1696 } 1697 } else if (kern_sym_match(args, name, type)) { 1698 args->started = true; 1699 args->start = start; 1700 } 1701 1702 return 0; 1703 } 1704 1705 static int print_kern_sym_cb(void *arg, const char *name, char type, u64 start) 1706 { 1707 struct sym_args *args = arg; 1708 1709 if (kern_sym_match(args, name, type)) { 1710 pr_err("#%d\t0x%"PRIx64"\t%c\t%s\n", 1711 ++args->cnt, start, type, name); 1712 args->near = true; 1713 } else if (args->near) { 1714 args->near = false; 1715 pr_err("\t\twhich is near\t\t%s\n", name); 1716 } 1717 1718 return 0; 1719 } 1720 1721 static int sym_not_found_error(const char *sym_name, int idx) 1722 { 1723 if (idx > 0) { 1724 pr_err("N'th occurrence (N=%d) of symbol '%s' not found.\n", 1725 idx, sym_name); 1726 } else if (!idx) { 1727 pr_err("Global symbol '%s' not found.\n", sym_name); 1728 } else { 1729 pr_err("Symbol '%s' not found.\n", sym_name); 1730 } 1731 pr_err("Note that symbols must be functions.\n"); 1732 1733 return -EINVAL; 1734 } 1735 1736 static int find_kern_sym(const char *sym_name, u64 *start, u64 *size, int idx) 1737 { 1738 struct sym_args args = { 1739 .name = sym_name, 1740 .idx = idx, 1741 .global = !idx, 1742 .selected = idx > 0, 1743 }; 1744 int err; 1745 1746 *start = 0; 1747 *size = 0; 1748 1749 err = kallsyms__parse("/proc/kallsyms", &args, find_kern_sym_cb); 1750 if (err < 0) { 1751 pr_err("Failed to parse /proc/kallsyms\n"); 1752 return err; 1753 } 1754 1755 if (args.duplicate) { 1756 pr_err("Multiple kernel symbols with name '%s'\n", sym_name); 1757 args.cnt = 0; 1758 kallsyms__parse("/proc/kallsyms", &args, print_kern_sym_cb); 1759 pr_err("Disambiguate symbol name by inserting #n after the name e.g. %s #2\n", 1760 sym_name); 1761 pr_err("Or select a global symbol by inserting #0 or #g or #G\n"); 1762 return -EINVAL; 1763 } 1764 1765 if (!args.started) { 1766 pr_err("Kernel symbol lookup: "); 1767 return sym_not_found_error(sym_name, idx); 1768 } 1769 1770 *start = args.start; 1771 *size = args.size; 1772 1773 return 0; 1774 } 1775 1776 static int find_entire_kern_cb(void *arg, const char *name __maybe_unused, 1777 char type, u64 start) 1778 { 1779 struct sym_args *args = arg; 1780 1781 if (!symbol_type__is_a(type, MAP__FUNCTION)) 1782 return 0; 1783 1784 if (!args->started) { 1785 args->started = true; 1786 args->start = start; 1787 } 1788 /* Don't know exactly where the kernel ends, so we add a page */ 1789 args->size = round_up(start, page_size) + page_size - args->start; 1790 1791 return 0; 1792 } 1793 1794 static int addr_filter__entire_kernel(struct addr_filter *filt) 1795 { 1796 struct sym_args args = { .started = false }; 1797 int err; 1798 1799 err = kallsyms__parse("/proc/kallsyms", &args, find_entire_kern_cb); 1800 if (err < 0 || !args.started) { 1801 pr_err("Failed to parse /proc/kallsyms\n"); 1802 return err; 1803 } 1804 1805 filt->addr = args.start; 1806 filt->size = args.size; 1807 1808 return 0; 1809 } 1810 1811 static int check_end_after_start(struct addr_filter *filt, u64 start, u64 size) 1812 { 1813 if (start + size >= filt->addr) 1814 return 0; 1815 1816 if (filt->sym_from) { 1817 pr_err("Symbol '%s' (0x%"PRIx64") comes before '%s' (0x%"PRIx64")\n", 1818 filt->sym_to, start, filt->sym_from, filt->addr); 1819 } else { 1820 pr_err("Symbol '%s' (0x%"PRIx64") comes before address 0x%"PRIx64")\n", 1821 filt->sym_to, start, filt->addr); 1822 } 1823 1824 return -EINVAL; 1825 } 1826 1827 static int addr_filter__resolve_kernel_syms(struct addr_filter *filt) 1828 { 1829 bool no_size = false; 1830 u64 start, size; 1831 int err; 1832 1833 if (symbol_conf.kptr_restrict) { 1834 pr_err("Kernel addresses are restricted. Unable to resolve kernel symbols.\n"); 1835 return -EINVAL; 1836 } 1837 1838 if (filt->sym_from && !strcmp(filt->sym_from, "*")) 1839 return addr_filter__entire_kernel(filt); 1840 1841 if (filt->sym_from) { 1842 err = find_kern_sym(filt->sym_from, &start, &size, 1843 filt->sym_from_idx); 1844 if (err) 1845 return err; 1846 filt->addr = start; 1847 if (filt->range && !filt->size && !filt->sym_to) { 1848 filt->size = size; 1849 no_size = !size; 1850 } 1851 } 1852 1853 if (filt->sym_to) { 1854 err = find_kern_sym(filt->sym_to, &start, &size, 1855 filt->sym_to_idx); 1856 if (err) 1857 return err; 1858 1859 err = check_end_after_start(filt, start, size); 1860 if (err) 1861 return err; 1862 filt->size = start + size - filt->addr; 1863 no_size = !size; 1864 } 1865 1866 /* The very last symbol in kallsyms does not imply a particular size */ 1867 if (no_size) { 1868 pr_err("Cannot determine size of symbol '%s'\n", 1869 filt->sym_to ? filt->sym_to : filt->sym_from); 1870 return -EINVAL; 1871 } 1872 1873 return 0; 1874 } 1875 1876 static struct dso *load_dso(const char *name) 1877 { 1878 struct map *map; 1879 struct dso *dso; 1880 1881 map = dso__new_map(name); 1882 if (!map) 1883 return NULL; 1884 1885 map__load(map); 1886 1887 dso = dso__get(map->dso); 1888 1889 map__put(map); 1890 1891 return dso; 1892 } 1893 1894 static bool dso_sym_match(struct symbol *sym, const char *name, int *cnt, 1895 int idx) 1896 { 1897 /* Same name, and global or the n'th found or any */ 1898 return !arch__compare_symbol_names(name, sym->name) && 1899 ((!idx && sym->binding == STB_GLOBAL) || 1900 (idx > 0 && ++*cnt == idx) || 1901 idx < 0); 1902 } 1903 1904 static void print_duplicate_syms(struct dso *dso, const char *sym_name) 1905 { 1906 struct symbol *sym; 1907 bool near = false; 1908 int cnt = 0; 1909 1910 pr_err("Multiple symbols with name '%s'\n", sym_name); 1911 1912 sym = dso__first_symbol(dso, MAP__FUNCTION); 1913 while (sym) { 1914 if (dso_sym_match(sym, sym_name, &cnt, -1)) { 1915 pr_err("#%d\t0x%"PRIx64"\t%c\t%s\n", 1916 ++cnt, sym->start, 1917 sym->binding == STB_GLOBAL ? 'g' : 1918 sym->binding == STB_LOCAL ? 'l' : 'w', 1919 sym->name); 1920 near = true; 1921 } else if (near) { 1922 near = false; 1923 pr_err("\t\twhich is near\t\t%s\n", sym->name); 1924 } 1925 sym = dso__next_symbol(sym); 1926 } 1927 1928 pr_err("Disambiguate symbol name by inserting #n after the name e.g. %s #2\n", 1929 sym_name); 1930 pr_err("Or select a global symbol by inserting #0 or #g or #G\n"); 1931 } 1932 1933 static int find_dso_sym(struct dso *dso, const char *sym_name, u64 *start, 1934 u64 *size, int idx) 1935 { 1936 struct symbol *sym; 1937 int cnt = 0; 1938 1939 *start = 0; 1940 *size = 0; 1941 1942 sym = dso__first_symbol(dso, MAP__FUNCTION); 1943 while (sym) { 1944 if (*start) { 1945 if (!*size) 1946 *size = sym->start - *start; 1947 if (idx > 0) { 1948 if (*size) 1949 return 1; 1950 } else if (dso_sym_match(sym, sym_name, &cnt, idx)) { 1951 print_duplicate_syms(dso, sym_name); 1952 return -EINVAL; 1953 } 1954 } else if (dso_sym_match(sym, sym_name, &cnt, idx)) { 1955 *start = sym->start; 1956 *size = sym->end - sym->start; 1957 } 1958 sym = dso__next_symbol(sym); 1959 } 1960 1961 if (!*start) 1962 return sym_not_found_error(sym_name, idx); 1963 1964 return 0; 1965 } 1966 1967 static int addr_filter__entire_dso(struct addr_filter *filt, struct dso *dso) 1968 { 1969 struct symbol *first_sym = dso__first_symbol(dso, MAP__FUNCTION); 1970 struct symbol *last_sym = dso__last_symbol(dso, MAP__FUNCTION); 1971 1972 if (!first_sym || !last_sym) { 1973 pr_err("Failed to determine filter for %s\nNo symbols found.\n", 1974 filt->filename); 1975 return -EINVAL; 1976 } 1977 1978 filt->addr = first_sym->start; 1979 filt->size = last_sym->end - first_sym->start; 1980 1981 return 0; 1982 } 1983 1984 static int addr_filter__resolve_syms(struct addr_filter *filt) 1985 { 1986 u64 start, size; 1987 struct dso *dso; 1988 int err = 0; 1989 1990 if (!filt->sym_from && !filt->sym_to) 1991 return 0; 1992 1993 if (!filt->filename) 1994 return addr_filter__resolve_kernel_syms(filt); 1995 1996 dso = load_dso(filt->filename); 1997 if (!dso) { 1998 pr_err("Failed to load symbols from: %s\n", filt->filename); 1999 return -EINVAL; 2000 } 2001 2002 if (filt->sym_from && !strcmp(filt->sym_from, "*")) { 2003 err = addr_filter__entire_dso(filt, dso); 2004 goto put_dso; 2005 } 2006 2007 if (filt->sym_from) { 2008 err = find_dso_sym(dso, filt->sym_from, &start, &size, 2009 filt->sym_from_idx); 2010 if (err) 2011 goto put_dso; 2012 filt->addr = start; 2013 if (filt->range && !filt->size && !filt->sym_to) 2014 filt->size = size; 2015 } 2016 2017 if (filt->sym_to) { 2018 err = find_dso_sym(dso, filt->sym_to, &start, &size, 2019 filt->sym_to_idx); 2020 if (err) 2021 goto put_dso; 2022 2023 err = check_end_after_start(filt, start, size); 2024 if (err) 2025 return err; 2026 2027 filt->size = start + size - filt->addr; 2028 } 2029 2030 put_dso: 2031 dso__put(dso); 2032 2033 return err; 2034 } 2035 2036 static char *addr_filter__to_str(struct addr_filter *filt) 2037 { 2038 char filename_buf[PATH_MAX]; 2039 const char *at = ""; 2040 const char *fn = ""; 2041 char *filter; 2042 int err; 2043 2044 if (filt->filename) { 2045 at = "@"; 2046 fn = realpath(filt->filename, filename_buf); 2047 if (!fn) 2048 return NULL; 2049 } 2050 2051 if (filt->range) { 2052 err = asprintf(&filter, "%s 0x%"PRIx64"/0x%"PRIx64"%s%s", 2053 filt->action, filt->addr, filt->size, at, fn); 2054 } else { 2055 err = asprintf(&filter, "%s 0x%"PRIx64"%s%s", 2056 filt->action, filt->addr, at, fn); 2057 } 2058 2059 return err < 0 ? NULL : filter; 2060 } 2061 2062 static int parse_addr_filter(struct perf_evsel *evsel, const char *filter, 2063 int max_nr) 2064 { 2065 struct addr_filters filts; 2066 struct addr_filter *filt; 2067 int err; 2068 2069 addr_filters__init(&filts); 2070 2071 err = addr_filters__parse_bare_filter(&filts, filter); 2072 if (err) 2073 goto out_exit; 2074 2075 if (filts.cnt > max_nr) { 2076 pr_err("Error: number of address filters (%d) exceeds maximum (%d)\n", 2077 filts.cnt, max_nr); 2078 err = -EINVAL; 2079 goto out_exit; 2080 } 2081 2082 list_for_each_entry(filt, &filts.head, list) { 2083 char *new_filter; 2084 2085 err = addr_filter__resolve_syms(filt); 2086 if (err) 2087 goto out_exit; 2088 2089 new_filter = addr_filter__to_str(filt); 2090 if (!new_filter) { 2091 err = -ENOMEM; 2092 goto out_exit; 2093 } 2094 2095 if (perf_evsel__append_addr_filter(evsel, new_filter)) { 2096 err = -ENOMEM; 2097 goto out_exit; 2098 } 2099 } 2100 2101 out_exit: 2102 addr_filters__exit(&filts); 2103 2104 if (err) { 2105 pr_err("Failed to parse address filter: '%s'\n", filter); 2106 pr_err("Filter format is: filter|start|stop|tracestop <start symbol or address> [/ <end symbol or size>] [@<file name>]\n"); 2107 pr_err("Where multiple filters are separated by space or comma.\n"); 2108 } 2109 2110 return err; 2111 } 2112 2113 static struct perf_pmu *perf_evsel__find_pmu(struct perf_evsel *evsel) 2114 { 2115 struct perf_pmu *pmu = NULL; 2116 2117 while ((pmu = perf_pmu__scan(pmu)) != NULL) { 2118 if (pmu->type == evsel->attr.type) 2119 break; 2120 } 2121 2122 return pmu; 2123 } 2124 2125 static int perf_evsel__nr_addr_filter(struct perf_evsel *evsel) 2126 { 2127 struct perf_pmu *pmu = perf_evsel__find_pmu(evsel); 2128 int nr_addr_filters = 0; 2129 2130 if (!pmu) 2131 return 0; 2132 2133 perf_pmu__scan_file(pmu, "nr_addr_filters", "%d", &nr_addr_filters); 2134 2135 return nr_addr_filters; 2136 } 2137 2138 int auxtrace_parse_filters(struct perf_evlist *evlist) 2139 { 2140 struct perf_evsel *evsel; 2141 char *filter; 2142 int err, max_nr; 2143 2144 evlist__for_each_entry(evlist, evsel) { 2145 filter = evsel->filter; 2146 max_nr = perf_evsel__nr_addr_filter(evsel); 2147 if (!filter || !max_nr) 2148 continue; 2149 evsel->filter = NULL; 2150 err = parse_addr_filter(evsel, filter, max_nr); 2151 free(filter); 2152 if (err) 2153 return err; 2154 pr_debug("Address filter: %s\n", evsel->filter); 2155 } 2156 2157 return 0; 2158 } 2159