1 // SPDX-License-Identifier: GPL-2.0 2 #include <errno.h> 3 #include <linux/kernel.h> 4 #include <linux/types.h> 5 #include <inttypes.h> 6 #include <stdlib.h> 7 #include <unistd.h> 8 #include <stdio.h> 9 #include <string.h> 10 #include <sys/param.h> 11 12 #include "parse-events.h" 13 #include "evlist.h" 14 #include "evsel.h" 15 #include "thread_map.h" 16 #include "cpumap.h" 17 #include "machine.h" 18 #include "event.h" 19 #include "thread.h" 20 21 #include "tests.h" 22 23 #include "sane_ctype.h" 24 25 #define BUFSZ 1024 26 #define READLEN 128 27 28 struct state { 29 u64 done[1024]; 30 size_t done_cnt; 31 }; 32 33 static unsigned int hex(char c) 34 { 35 if (c >= '0' && c <= '9') 36 return c - '0'; 37 if (c >= 'a' && c <= 'f') 38 return c - 'a' + 10; 39 return c - 'A' + 10; 40 } 41 42 static size_t read_objdump_chunk(const char **line, unsigned char **buf, 43 size_t *buf_len) 44 { 45 size_t bytes_read = 0; 46 unsigned char *chunk_start = *buf; 47 48 /* Read bytes */ 49 while (*buf_len > 0) { 50 char c1, c2; 51 52 /* Get 2 hex digits */ 53 c1 = *(*line)++; 54 if (!isxdigit(c1)) 55 break; 56 c2 = *(*line)++; 57 if (!isxdigit(c2)) 58 break; 59 60 /* Store byte and advance buf */ 61 **buf = (hex(c1) << 4) | hex(c2); 62 (*buf)++; 63 (*buf_len)--; 64 bytes_read++; 65 66 /* End of chunk? */ 67 if (isspace(**line)) 68 break; 69 } 70 71 /* 72 * objdump will display raw insn as LE if code endian 73 * is LE and bytes_per_chunk > 1. In that case reverse 74 * the chunk we just read. 75 * 76 * see disassemble_bytes() at binutils/objdump.c for details 77 * how objdump chooses display endian) 78 */ 79 if (bytes_read > 1 && !bigendian()) { 80 unsigned char *chunk_end = chunk_start + bytes_read - 1; 81 unsigned char tmp; 82 83 while (chunk_start < chunk_end) { 84 tmp = *chunk_start; 85 *chunk_start = *chunk_end; 86 *chunk_end = tmp; 87 chunk_start++; 88 chunk_end--; 89 } 90 } 91 92 return bytes_read; 93 } 94 95 static size_t read_objdump_line(const char *line, unsigned char *buf, 96 size_t buf_len) 97 { 98 const char *p; 99 size_t ret, bytes_read = 0; 100 101 /* Skip to a colon */ 102 p = strchr(line, ':'); 103 if (!p) 104 return 0; 105 p++; 106 107 /* Skip initial spaces */ 108 while (*p) { 109 if (!isspace(*p)) 110 break; 111 p++; 112 } 113 114 do { 115 ret = read_objdump_chunk(&p, &buf, &buf_len); 116 bytes_read += ret; 117 p++; 118 } while (ret > 0); 119 120 /* return number of successfully read bytes */ 121 return bytes_read; 122 } 123 124 static int read_objdump_output(FILE *f, void *buf, size_t *len, u64 start_addr) 125 { 126 char *line = NULL; 127 size_t line_len, off_last = 0; 128 ssize_t ret; 129 int err = 0; 130 u64 addr, last_addr = start_addr; 131 132 while (off_last < *len) { 133 size_t off, read_bytes, written_bytes; 134 unsigned char tmp[BUFSZ]; 135 136 ret = getline(&line, &line_len, f); 137 if (feof(f)) 138 break; 139 if (ret < 0) { 140 pr_debug("getline failed\n"); 141 err = -1; 142 break; 143 } 144 145 /* read objdump data into temporary buffer */ 146 read_bytes = read_objdump_line(line, tmp, sizeof(tmp)); 147 if (!read_bytes) 148 continue; 149 150 if (sscanf(line, "%"PRIx64, &addr) != 1) 151 continue; 152 if (addr < last_addr) { 153 pr_debug("addr going backwards, read beyond section?\n"); 154 break; 155 } 156 last_addr = addr; 157 158 /* copy it from temporary buffer to 'buf' according 159 * to address on current objdump line */ 160 off = addr - start_addr; 161 if (off >= *len) 162 break; 163 written_bytes = MIN(read_bytes, *len - off); 164 memcpy(buf + off, tmp, written_bytes); 165 off_last = off + written_bytes; 166 } 167 168 /* len returns number of bytes that could not be read */ 169 *len -= off_last; 170 171 free(line); 172 173 return err; 174 } 175 176 static int read_via_objdump(const char *filename, u64 addr, void *buf, 177 size_t len) 178 { 179 char cmd[PATH_MAX * 2]; 180 const char *fmt; 181 FILE *f; 182 int ret; 183 184 fmt = "%s -z -d --start-address=0x%"PRIx64" --stop-address=0x%"PRIx64" %s"; 185 ret = snprintf(cmd, sizeof(cmd), fmt, "objdump", addr, addr + len, 186 filename); 187 if (ret <= 0 || (size_t)ret >= sizeof(cmd)) 188 return -1; 189 190 pr_debug("Objdump command is: %s\n", cmd); 191 192 /* Ignore objdump errors */ 193 strcat(cmd, " 2>/dev/null"); 194 195 f = popen(cmd, "r"); 196 if (!f) { 197 pr_debug("popen failed\n"); 198 return -1; 199 } 200 201 ret = read_objdump_output(f, buf, &len, addr); 202 if (len) { 203 pr_debug("objdump read too few bytes: %zd\n", len); 204 if (!ret) 205 ret = len; 206 } 207 208 pclose(f); 209 210 return ret; 211 } 212 213 static void dump_buf(unsigned char *buf, size_t len) 214 { 215 size_t i; 216 217 for (i = 0; i < len; i++) { 218 pr_debug("0x%02x ", buf[i]); 219 if (i % 16 == 15) 220 pr_debug("\n"); 221 } 222 pr_debug("\n"); 223 } 224 225 static int read_object_code(u64 addr, size_t len, u8 cpumode, 226 struct thread *thread, struct state *state) 227 { 228 struct addr_location al; 229 unsigned char buf1[BUFSZ]; 230 unsigned char buf2[BUFSZ]; 231 size_t ret_len; 232 u64 objdump_addr; 233 const char *objdump_name; 234 char decomp_name[KMOD_DECOMP_LEN]; 235 bool decomp = false; 236 int ret; 237 238 pr_debug("Reading object code for memory address: %#"PRIx64"\n", addr); 239 240 if (!thread__find_map(thread, cpumode, addr, &al) || !al.map->dso) { 241 if (cpumode == PERF_RECORD_MISC_HYPERVISOR) { 242 pr_debug("Hypervisor address can not be resolved - skipping\n"); 243 return 0; 244 } 245 246 pr_debug("thread__find_map failed\n"); 247 return -1; 248 } 249 250 pr_debug("File is: %s\n", al.map->dso->long_name); 251 252 if (al.map->dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS && 253 !dso__is_kcore(al.map->dso)) { 254 pr_debug("Unexpected kernel address - skipping\n"); 255 return 0; 256 } 257 258 pr_debug("On file address is: %#"PRIx64"\n", al.addr); 259 260 if (len > BUFSZ) 261 len = BUFSZ; 262 263 /* Do not go off the map */ 264 if (addr + len > al.map->end) 265 len = al.map->end - addr; 266 267 /* Read the object code using perf */ 268 ret_len = dso__data_read_offset(al.map->dso, thread->mg->machine, 269 al.addr, buf1, len); 270 if (ret_len != len) { 271 pr_debug("dso__data_read_offset failed\n"); 272 return -1; 273 } 274 275 /* 276 * Converting addresses for use by objdump requires more information. 277 * map__load() does that. See map__rip_2objdump() for details. 278 */ 279 if (map__load(al.map)) 280 return -1; 281 282 /* objdump struggles with kcore - try each map only once */ 283 if (dso__is_kcore(al.map->dso)) { 284 size_t d; 285 286 for (d = 0; d < state->done_cnt; d++) { 287 if (state->done[d] == al.map->start) { 288 pr_debug("kcore map tested already"); 289 pr_debug(" - skipping\n"); 290 return 0; 291 } 292 } 293 if (state->done_cnt >= ARRAY_SIZE(state->done)) { 294 pr_debug("Too many kcore maps - skipping\n"); 295 return 0; 296 } 297 state->done[state->done_cnt++] = al.map->start; 298 } 299 300 objdump_name = al.map->dso->long_name; 301 if (dso__needs_decompress(al.map->dso)) { 302 if (dso__decompress_kmodule_path(al.map->dso, objdump_name, 303 decomp_name, 304 sizeof(decomp_name)) < 0) { 305 pr_debug("decompression failed\n"); 306 return -1; 307 } 308 309 decomp = true; 310 objdump_name = decomp_name; 311 } 312 313 /* Read the object code using objdump */ 314 objdump_addr = map__rip_2objdump(al.map, al.addr); 315 ret = read_via_objdump(objdump_name, objdump_addr, buf2, len); 316 317 if (decomp) 318 unlink(objdump_name); 319 320 if (ret > 0) { 321 /* 322 * The kernel maps are inaccurate - assume objdump is right in 323 * that case. 324 */ 325 if (cpumode == PERF_RECORD_MISC_KERNEL || 326 cpumode == PERF_RECORD_MISC_GUEST_KERNEL) { 327 len -= ret; 328 if (len) { 329 pr_debug("Reducing len to %zu\n", len); 330 } else if (dso__is_kcore(al.map->dso)) { 331 /* 332 * objdump cannot handle very large segments 333 * that may be found in kcore. 334 */ 335 pr_debug("objdump failed for kcore"); 336 pr_debug(" - skipping\n"); 337 return 0; 338 } else { 339 return -1; 340 } 341 } 342 } 343 if (ret < 0) { 344 pr_debug("read_via_objdump failed\n"); 345 return -1; 346 } 347 348 /* The results should be identical */ 349 if (memcmp(buf1, buf2, len)) { 350 pr_debug("Bytes read differ from those read by objdump\n"); 351 pr_debug("buf1 (dso):\n"); 352 dump_buf(buf1, len); 353 pr_debug("buf2 (objdump):\n"); 354 dump_buf(buf2, len); 355 return -1; 356 } 357 pr_debug("Bytes read match those read by objdump\n"); 358 359 return 0; 360 } 361 362 static int process_sample_event(struct machine *machine, 363 struct perf_evlist *evlist, 364 union perf_event *event, struct state *state) 365 { 366 struct perf_sample sample; 367 struct thread *thread; 368 int ret; 369 370 if (perf_evlist__parse_sample(evlist, event, &sample)) { 371 pr_debug("perf_evlist__parse_sample failed\n"); 372 return -1; 373 } 374 375 thread = machine__findnew_thread(machine, sample.pid, sample.tid); 376 if (!thread) { 377 pr_debug("machine__findnew_thread failed\n"); 378 return -1; 379 } 380 381 ret = read_object_code(sample.ip, READLEN, sample.cpumode, thread, state); 382 thread__put(thread); 383 return ret; 384 } 385 386 static int process_event(struct machine *machine, struct perf_evlist *evlist, 387 union perf_event *event, struct state *state) 388 { 389 if (event->header.type == PERF_RECORD_SAMPLE) 390 return process_sample_event(machine, evlist, event, state); 391 392 if (event->header.type == PERF_RECORD_THROTTLE || 393 event->header.type == PERF_RECORD_UNTHROTTLE) 394 return 0; 395 396 if (event->header.type < PERF_RECORD_MAX) { 397 int ret; 398 399 ret = machine__process_event(machine, event, NULL); 400 if (ret < 0) 401 pr_debug("machine__process_event failed, event type %u\n", 402 event->header.type); 403 return ret; 404 } 405 406 return 0; 407 } 408 409 static int process_events(struct machine *machine, struct perf_evlist *evlist, 410 struct state *state) 411 { 412 union perf_event *event; 413 struct perf_mmap *md; 414 int i, ret; 415 416 for (i = 0; i < evlist->nr_mmaps; i++) { 417 md = &evlist->mmap[i]; 418 if (perf_mmap__read_init(md) < 0) 419 continue; 420 421 while ((event = perf_mmap__read_event(md)) != NULL) { 422 ret = process_event(machine, evlist, event, state); 423 perf_mmap__consume(md); 424 if (ret < 0) 425 return ret; 426 } 427 perf_mmap__read_done(md); 428 } 429 return 0; 430 } 431 432 static int comp(const void *a, const void *b) 433 { 434 return *(int *)a - *(int *)b; 435 } 436 437 static void do_sort_something(void) 438 { 439 int buf[40960], i; 440 441 for (i = 0; i < (int)ARRAY_SIZE(buf); i++) 442 buf[i] = ARRAY_SIZE(buf) - i - 1; 443 444 qsort(buf, ARRAY_SIZE(buf), sizeof(int), comp); 445 446 for (i = 0; i < (int)ARRAY_SIZE(buf); i++) { 447 if (buf[i] != i) { 448 pr_debug("qsort failed\n"); 449 break; 450 } 451 } 452 } 453 454 static void sort_something(void) 455 { 456 int i; 457 458 for (i = 0; i < 10; i++) 459 do_sort_something(); 460 } 461 462 static void syscall_something(void) 463 { 464 int pipefd[2]; 465 int i; 466 467 for (i = 0; i < 1000; i++) { 468 if (pipe(pipefd) < 0) { 469 pr_debug("pipe failed\n"); 470 break; 471 } 472 close(pipefd[1]); 473 close(pipefd[0]); 474 } 475 } 476 477 static void fs_something(void) 478 { 479 const char *test_file_name = "temp-perf-code-reading-test-file--"; 480 FILE *f; 481 int i; 482 483 for (i = 0; i < 1000; i++) { 484 f = fopen(test_file_name, "w+"); 485 if (f) { 486 fclose(f); 487 unlink(test_file_name); 488 } 489 } 490 } 491 492 static const char *do_determine_event(bool excl_kernel) 493 { 494 const char *event = excl_kernel ? "cycles:u" : "cycles"; 495 496 #ifdef __s390x__ 497 char cpuid[128], model[16], model_c[16], cpum_cf_v[16]; 498 unsigned int family; 499 int ret, cpum_cf_a; 500 501 if (get_cpuid(cpuid, sizeof(cpuid))) 502 goto out_clocks; 503 ret = sscanf(cpuid, "%*[^,],%u,%[^,],%[^,],%[^,],%x", &family, model_c, 504 model, cpum_cf_v, &cpum_cf_a); 505 if (ret != 5) /* Not available */ 506 goto out_clocks; 507 if (excl_kernel && (cpum_cf_a & 4)) 508 return event; 509 if (!excl_kernel && (cpum_cf_a & 2)) 510 return event; 511 512 /* Fall through: missing authorization */ 513 out_clocks: 514 event = excl_kernel ? "cpu-clock:u" : "cpu-clock"; 515 516 #endif 517 return event; 518 } 519 520 static void do_something(void) 521 { 522 fs_something(); 523 524 sort_something(); 525 526 syscall_something(); 527 } 528 529 enum { 530 TEST_CODE_READING_OK, 531 TEST_CODE_READING_NO_VMLINUX, 532 TEST_CODE_READING_NO_KCORE, 533 TEST_CODE_READING_NO_ACCESS, 534 TEST_CODE_READING_NO_KERNEL_OBJ, 535 }; 536 537 static int do_test_code_reading(bool try_kcore) 538 { 539 struct machine *machine; 540 struct thread *thread; 541 struct record_opts opts = { 542 .mmap_pages = UINT_MAX, 543 .user_freq = UINT_MAX, 544 .user_interval = ULLONG_MAX, 545 .freq = 500, 546 .target = { 547 .uses_mmap = true, 548 }, 549 }; 550 struct state state = { 551 .done_cnt = 0, 552 }; 553 struct thread_map *threads = NULL; 554 struct cpu_map *cpus = NULL; 555 struct perf_evlist *evlist = NULL; 556 struct perf_evsel *evsel = NULL; 557 int err = -1, ret; 558 pid_t pid; 559 struct map *map; 560 bool have_vmlinux, have_kcore, excl_kernel = false; 561 562 pid = getpid(); 563 564 machine = machine__new_host(); 565 machine->env = &perf_env; 566 567 ret = machine__create_kernel_maps(machine); 568 if (ret < 0) { 569 pr_debug("machine__create_kernel_maps failed\n"); 570 goto out_err; 571 } 572 573 /* Force the use of kallsyms instead of vmlinux to try kcore */ 574 if (try_kcore) 575 symbol_conf.kallsyms_name = "/proc/kallsyms"; 576 577 /* Load kernel map */ 578 map = machine__kernel_map(machine); 579 ret = map__load(map); 580 if (ret < 0) { 581 pr_debug("map__load failed\n"); 582 goto out_err; 583 } 584 have_vmlinux = dso__is_vmlinux(map->dso); 585 have_kcore = dso__is_kcore(map->dso); 586 587 /* 2nd time through we just try kcore */ 588 if (try_kcore && !have_kcore) 589 return TEST_CODE_READING_NO_KCORE; 590 591 /* No point getting kernel events if there is no kernel object */ 592 if (!have_vmlinux && !have_kcore) 593 excl_kernel = true; 594 595 threads = thread_map__new_by_tid(pid); 596 if (!threads) { 597 pr_debug("thread_map__new_by_tid failed\n"); 598 goto out_err; 599 } 600 601 ret = perf_event__synthesize_thread_map(NULL, threads, 602 perf_event__process, machine, false, 500); 603 if (ret < 0) { 604 pr_debug("perf_event__synthesize_thread_map failed\n"); 605 goto out_err; 606 } 607 608 thread = machine__findnew_thread(machine, pid, pid); 609 if (!thread) { 610 pr_debug("machine__findnew_thread failed\n"); 611 goto out_put; 612 } 613 614 cpus = cpu_map__new(NULL); 615 if (!cpus) { 616 pr_debug("cpu_map__new failed\n"); 617 goto out_put; 618 } 619 620 while (1) { 621 const char *str; 622 623 evlist = perf_evlist__new(); 624 if (!evlist) { 625 pr_debug("perf_evlist__new failed\n"); 626 goto out_put; 627 } 628 629 perf_evlist__set_maps(evlist, cpus, threads); 630 631 str = do_determine_event(excl_kernel); 632 pr_debug("Parsing event '%s'\n", str); 633 ret = parse_events(evlist, str, NULL); 634 if (ret < 0) { 635 pr_debug("parse_events failed\n"); 636 goto out_put; 637 } 638 639 perf_evlist__config(evlist, &opts, NULL); 640 641 evsel = perf_evlist__first(evlist); 642 643 evsel->attr.comm = 1; 644 evsel->attr.disabled = 1; 645 evsel->attr.enable_on_exec = 0; 646 647 ret = perf_evlist__open(evlist); 648 if (ret < 0) { 649 if (!excl_kernel) { 650 excl_kernel = true; 651 /* 652 * Both cpus and threads are now owned by evlist 653 * and will be freed by following perf_evlist__set_maps 654 * call. Getting refference to keep them alive. 655 */ 656 cpu_map__get(cpus); 657 thread_map__get(threads); 658 perf_evlist__set_maps(evlist, NULL, NULL); 659 perf_evlist__delete(evlist); 660 evlist = NULL; 661 continue; 662 } 663 664 if (verbose > 0) { 665 char errbuf[512]; 666 perf_evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf)); 667 pr_debug("perf_evlist__open() failed!\n%s\n", errbuf); 668 } 669 670 goto out_put; 671 } 672 break; 673 } 674 675 ret = perf_evlist__mmap(evlist, UINT_MAX); 676 if (ret < 0) { 677 pr_debug("perf_evlist__mmap failed\n"); 678 goto out_put; 679 } 680 681 perf_evlist__enable(evlist); 682 683 do_something(); 684 685 perf_evlist__disable(evlist); 686 687 ret = process_events(machine, evlist, &state); 688 if (ret < 0) 689 goto out_put; 690 691 if (!have_vmlinux && !have_kcore && !try_kcore) 692 err = TEST_CODE_READING_NO_KERNEL_OBJ; 693 else if (!have_vmlinux && !try_kcore) 694 err = TEST_CODE_READING_NO_VMLINUX; 695 else if (excl_kernel) 696 err = TEST_CODE_READING_NO_ACCESS; 697 else 698 err = TEST_CODE_READING_OK; 699 out_put: 700 thread__put(thread); 701 out_err: 702 703 if (evlist) { 704 perf_evlist__delete(evlist); 705 } else { 706 cpu_map__put(cpus); 707 thread_map__put(threads); 708 } 709 machine__delete_threads(machine); 710 machine__delete(machine); 711 712 return err; 713 } 714 715 int test__code_reading(struct test *test __maybe_unused, int subtest __maybe_unused) 716 { 717 int ret; 718 719 ret = do_test_code_reading(false); 720 if (!ret) 721 ret = do_test_code_reading(true); 722 723 switch (ret) { 724 case TEST_CODE_READING_OK: 725 return 0; 726 case TEST_CODE_READING_NO_VMLINUX: 727 pr_debug("no vmlinux\n"); 728 return 0; 729 case TEST_CODE_READING_NO_KCORE: 730 pr_debug("no kcore\n"); 731 return 0; 732 case TEST_CODE_READING_NO_ACCESS: 733 pr_debug("no access\n"); 734 return 0; 735 case TEST_CODE_READING_NO_KERNEL_OBJ: 736 pr_debug("no kernel obj\n"); 737 return 0; 738 default: 739 return -1; 740 }; 741 } 742