1 #include <linux/types.h> 2 #include "event.h" 3 #include "debug.h" 4 #include "machine.h" 5 #include "sort.h" 6 #include "string.h" 7 #include "strlist.h" 8 #include "thread.h" 9 #include "thread_map.h" 10 11 static const char *perf_event__names[] = { 12 [0] = "TOTAL", 13 [PERF_RECORD_MMAP] = "MMAP", 14 [PERF_RECORD_MMAP2] = "MMAP2", 15 [PERF_RECORD_LOST] = "LOST", 16 [PERF_RECORD_COMM] = "COMM", 17 [PERF_RECORD_EXIT] = "EXIT", 18 [PERF_RECORD_THROTTLE] = "THROTTLE", 19 [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE", 20 [PERF_RECORD_FORK] = "FORK", 21 [PERF_RECORD_READ] = "READ", 22 [PERF_RECORD_SAMPLE] = "SAMPLE", 23 [PERF_RECORD_HEADER_ATTR] = "ATTR", 24 [PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE", 25 [PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA", 26 [PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID", 27 [PERF_RECORD_FINISHED_ROUND] = "FINISHED_ROUND", 28 }; 29 30 const char *perf_event__name(unsigned int id) 31 { 32 if (id >= ARRAY_SIZE(perf_event__names)) 33 return "INVALID"; 34 if (!perf_event__names[id]) 35 return "UNKNOWN"; 36 return perf_event__names[id]; 37 } 38 39 static struct perf_sample synth_sample = { 40 .pid = -1, 41 .tid = -1, 42 .time = -1, 43 .stream_id = -1, 44 .cpu = -1, 45 .period = 1, 46 }; 47 48 static pid_t perf_event__get_comm_tgid(pid_t pid, char *comm, size_t len) 49 { 50 char filename[PATH_MAX]; 51 char bf[BUFSIZ]; 52 FILE *fp; 53 size_t size = 0; 54 pid_t tgid = -1; 55 56 snprintf(filename, sizeof(filename), "/proc/%d/status", pid); 57 58 fp = fopen(filename, "r"); 59 if (fp == NULL) { 60 pr_debug("couldn't open %s\n", filename); 61 return 0; 62 } 63 64 while (!comm[0] || (tgid < 0)) { 65 if (fgets(bf, sizeof(bf), fp) == NULL) { 66 pr_warning("couldn't get COMM and pgid, malformed %s\n", 67 filename); 68 break; 69 } 70 71 if (memcmp(bf, "Name:", 5) == 0) { 72 char *name = bf + 5; 73 while (*name && isspace(*name)) 74 ++name; 75 size = strlen(name) - 1; 76 if (size >= len) 77 size = len - 1; 78 memcpy(comm, name, size); 79 comm[size] = '\0'; 80 81 } else if (memcmp(bf, "Tgid:", 5) == 0) { 82 char *tgids = bf + 5; 83 while (*tgids && isspace(*tgids)) 84 ++tgids; 85 tgid = atoi(tgids); 86 } 87 } 88 89 fclose(fp); 90 91 return tgid; 92 } 93 94 static pid_t perf_event__synthesize_comm(struct perf_tool *tool, 95 union perf_event *event, pid_t pid, 96 int full, 97 perf_event__handler_t process, 98 struct machine *machine) 99 { 100 char filename[PATH_MAX]; 101 size_t size; 102 DIR *tasks; 103 struct dirent dirent, *next; 104 pid_t tgid; 105 106 memset(&event->comm, 0, sizeof(event->comm)); 107 108 tgid = perf_event__get_comm_tgid(pid, event->comm.comm, 109 sizeof(event->comm.comm)); 110 if (tgid < 0) 111 goto out; 112 113 event->comm.pid = tgid; 114 event->comm.header.type = PERF_RECORD_COMM; 115 116 size = strlen(event->comm.comm) + 1; 117 size = PERF_ALIGN(size, sizeof(u64)); 118 memset(event->comm.comm + size, 0, machine->id_hdr_size); 119 event->comm.header.size = (sizeof(event->comm) - 120 (sizeof(event->comm.comm) - size) + 121 machine->id_hdr_size); 122 if (!full) { 123 event->comm.tid = pid; 124 125 if (process(tool, event, &synth_sample, machine) != 0) 126 return -1; 127 128 goto out; 129 } 130 131 snprintf(filename, sizeof(filename), "/proc/%d/task", pid); 132 133 tasks = opendir(filename); 134 if (tasks == NULL) { 135 pr_debug("couldn't open %s\n", filename); 136 return 0; 137 } 138 139 while (!readdir_r(tasks, &dirent, &next) && next) { 140 char *end; 141 pid = strtol(dirent.d_name, &end, 10); 142 if (*end) 143 continue; 144 145 /* already have tgid; jut want to update the comm */ 146 (void) perf_event__get_comm_tgid(pid, event->comm.comm, 147 sizeof(event->comm.comm)); 148 149 size = strlen(event->comm.comm) + 1; 150 size = PERF_ALIGN(size, sizeof(u64)); 151 memset(event->comm.comm + size, 0, machine->id_hdr_size); 152 event->comm.header.size = (sizeof(event->comm) - 153 (sizeof(event->comm.comm) - size) + 154 machine->id_hdr_size); 155 156 event->comm.tid = pid; 157 158 if (process(tool, event, &synth_sample, machine) != 0) { 159 tgid = -1; 160 break; 161 } 162 } 163 164 closedir(tasks); 165 out: 166 return tgid; 167 } 168 169 static int perf_event__synthesize_mmap_events(struct perf_tool *tool, 170 union perf_event *event, 171 pid_t pid, pid_t tgid, 172 perf_event__handler_t process, 173 struct machine *machine) 174 { 175 char filename[PATH_MAX]; 176 FILE *fp; 177 int rc = 0; 178 179 snprintf(filename, sizeof(filename), "/proc/%d/maps", pid); 180 181 fp = fopen(filename, "r"); 182 if (fp == NULL) { 183 /* 184 * We raced with a task exiting - just return: 185 */ 186 pr_debug("couldn't open %s\n", filename); 187 return -1; 188 } 189 190 event->header.type = PERF_RECORD_MMAP2; 191 /* 192 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c 193 */ 194 event->header.misc = PERF_RECORD_MISC_USER; 195 196 while (1) { 197 char bf[BUFSIZ]; 198 char prot[5]; 199 char execname[PATH_MAX]; 200 char anonstr[] = "//anon"; 201 unsigned int ino; 202 size_t size; 203 ssize_t n; 204 205 if (fgets(bf, sizeof(bf), fp) == NULL) 206 break; 207 208 /* ensure null termination since stack will be reused. */ 209 strcpy(execname, ""); 210 211 /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */ 212 n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %s\n", 213 &event->mmap2.start, &event->mmap2.len, prot, 214 &event->mmap2.pgoff, &event->mmap2.maj, 215 &event->mmap2.min, 216 &ino, execname); 217 218 event->mmap2.ino = (u64)ino; 219 220 if (n != 8) 221 continue; 222 223 if (prot[2] != 'x') 224 continue; 225 226 if (!strcmp(execname, "")) 227 strcpy(execname, anonstr); 228 229 size = strlen(execname) + 1; 230 memcpy(event->mmap2.filename, execname, size); 231 size = PERF_ALIGN(size, sizeof(u64)); 232 event->mmap2.len -= event->mmap.start; 233 event->mmap2.header.size = (sizeof(event->mmap2) - 234 (sizeof(event->mmap2.filename) - size)); 235 memset(event->mmap2.filename + size, 0, machine->id_hdr_size); 236 event->mmap2.header.size += machine->id_hdr_size; 237 event->mmap2.pid = tgid; 238 event->mmap2.tid = pid; 239 240 if (process(tool, event, &synth_sample, machine) != 0) { 241 rc = -1; 242 break; 243 } 244 } 245 246 fclose(fp); 247 return rc; 248 } 249 250 int perf_event__synthesize_modules(struct perf_tool *tool, 251 perf_event__handler_t process, 252 struct machine *machine) 253 { 254 int rc = 0; 255 struct rb_node *nd; 256 struct map_groups *kmaps = &machine->kmaps; 257 union perf_event *event = zalloc((sizeof(event->mmap) + 258 machine->id_hdr_size)); 259 if (event == NULL) { 260 pr_debug("Not enough memory synthesizing mmap event " 261 "for kernel modules\n"); 262 return -1; 263 } 264 265 event->header.type = PERF_RECORD_MMAP; 266 267 /* 268 * kernel uses 0 for user space maps, see kernel/perf_event.c 269 * __perf_event_mmap 270 */ 271 if (machine__is_host(machine)) 272 event->header.misc = PERF_RECORD_MISC_KERNEL; 273 else 274 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; 275 276 for (nd = rb_first(&kmaps->maps[MAP__FUNCTION]); 277 nd; nd = rb_next(nd)) { 278 size_t size; 279 struct map *pos = rb_entry(nd, struct map, rb_node); 280 281 if (pos->dso->kernel) 282 continue; 283 284 size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64)); 285 event->mmap.header.type = PERF_RECORD_MMAP; 286 event->mmap.header.size = (sizeof(event->mmap) - 287 (sizeof(event->mmap.filename) - size)); 288 memset(event->mmap.filename + size, 0, machine->id_hdr_size); 289 event->mmap.header.size += machine->id_hdr_size; 290 event->mmap.start = pos->start; 291 event->mmap.len = pos->end - pos->start; 292 event->mmap.pid = machine->pid; 293 294 memcpy(event->mmap.filename, pos->dso->long_name, 295 pos->dso->long_name_len + 1); 296 if (process(tool, event, &synth_sample, machine) != 0) { 297 rc = -1; 298 break; 299 } 300 } 301 302 free(event); 303 return rc; 304 } 305 306 static int __event__synthesize_thread(union perf_event *comm_event, 307 union perf_event *mmap_event, 308 pid_t pid, int full, 309 perf_event__handler_t process, 310 struct perf_tool *tool, 311 struct machine *machine) 312 { 313 pid_t tgid = perf_event__synthesize_comm(tool, comm_event, pid, full, 314 process, machine); 315 if (tgid == -1) 316 return -1; 317 return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid, 318 process, machine); 319 } 320 321 int perf_event__synthesize_thread_map(struct perf_tool *tool, 322 struct thread_map *threads, 323 perf_event__handler_t process, 324 struct machine *machine) 325 { 326 union perf_event *comm_event, *mmap_event; 327 int err = -1, thread, j; 328 329 comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size); 330 if (comm_event == NULL) 331 goto out; 332 333 mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size); 334 if (mmap_event == NULL) 335 goto out_free_comm; 336 337 err = 0; 338 for (thread = 0; thread < threads->nr; ++thread) { 339 if (__event__synthesize_thread(comm_event, mmap_event, 340 threads->map[thread], 0, 341 process, tool, machine)) { 342 err = -1; 343 break; 344 } 345 346 /* 347 * comm.pid is set to thread group id by 348 * perf_event__synthesize_comm 349 */ 350 if ((int) comm_event->comm.pid != threads->map[thread]) { 351 bool need_leader = true; 352 353 /* is thread group leader in thread_map? */ 354 for (j = 0; j < threads->nr; ++j) { 355 if ((int) comm_event->comm.pid == threads->map[j]) { 356 need_leader = false; 357 break; 358 } 359 } 360 361 /* if not, generate events for it */ 362 if (need_leader && 363 __event__synthesize_thread(comm_event, 364 mmap_event, 365 comm_event->comm.pid, 0, 366 process, tool, machine)) { 367 err = -1; 368 break; 369 } 370 } 371 } 372 free(mmap_event); 373 out_free_comm: 374 free(comm_event); 375 out: 376 return err; 377 } 378 379 int perf_event__synthesize_threads(struct perf_tool *tool, 380 perf_event__handler_t process, 381 struct machine *machine) 382 { 383 DIR *proc; 384 struct dirent dirent, *next; 385 union perf_event *comm_event, *mmap_event; 386 int err = -1; 387 388 comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size); 389 if (comm_event == NULL) 390 goto out; 391 392 mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size); 393 if (mmap_event == NULL) 394 goto out_free_comm; 395 396 proc = opendir("/proc"); 397 if (proc == NULL) 398 goto out_free_mmap; 399 400 while (!readdir_r(proc, &dirent, &next) && next) { 401 char *end; 402 pid_t pid = strtol(dirent.d_name, &end, 10); 403 404 if (*end) /* only interested in proper numerical dirents */ 405 continue; 406 /* 407 * We may race with exiting thread, so don't stop just because 408 * one thread couldn't be synthesized. 409 */ 410 __event__synthesize_thread(comm_event, mmap_event, pid, 1, 411 process, tool, machine); 412 } 413 414 err = 0; 415 closedir(proc); 416 out_free_mmap: 417 free(mmap_event); 418 out_free_comm: 419 free(comm_event); 420 out: 421 return err; 422 } 423 424 struct process_symbol_args { 425 const char *name; 426 u64 start; 427 }; 428 429 static int find_symbol_cb(void *arg, const char *name, char type, 430 u64 start) 431 { 432 struct process_symbol_args *args = arg; 433 434 /* 435 * Must be a function or at least an alias, as in PARISC64, where "_text" is 436 * an 'A' to the same address as "_stext". 437 */ 438 if (!(symbol_type__is_a(type, MAP__FUNCTION) || 439 type == 'A') || strcmp(name, args->name)) 440 return 0; 441 442 args->start = start; 443 return 1; 444 } 445 446 int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, 447 perf_event__handler_t process, 448 struct machine *machine, 449 const char *symbol_name) 450 { 451 size_t size; 452 const char *filename, *mmap_name; 453 char path[PATH_MAX]; 454 char name_buff[PATH_MAX]; 455 struct map *map; 456 int err; 457 /* 458 * We should get this from /sys/kernel/sections/.text, but till that is 459 * available use this, and after it is use this as a fallback for older 460 * kernels. 461 */ 462 struct process_symbol_args args = { .name = symbol_name, }; 463 union perf_event *event = zalloc((sizeof(event->mmap) + 464 machine->id_hdr_size)); 465 if (event == NULL) { 466 pr_debug("Not enough memory synthesizing mmap event " 467 "for kernel modules\n"); 468 return -1; 469 } 470 471 mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff)); 472 if (machine__is_host(machine)) { 473 /* 474 * kernel uses PERF_RECORD_MISC_USER for user space maps, 475 * see kernel/perf_event.c __perf_event_mmap 476 */ 477 event->header.misc = PERF_RECORD_MISC_KERNEL; 478 filename = "/proc/kallsyms"; 479 } else { 480 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; 481 if (machine__is_default_guest(machine)) 482 filename = (char *) symbol_conf.default_guest_kallsyms; 483 else { 484 sprintf(path, "%s/proc/kallsyms", machine->root_dir); 485 filename = path; 486 } 487 } 488 489 if (kallsyms__parse(filename, &args, find_symbol_cb) <= 0) { 490 free(event); 491 return -ENOENT; 492 } 493 494 map = machine->vmlinux_maps[MAP__FUNCTION]; 495 size = snprintf(event->mmap.filename, sizeof(event->mmap.filename), 496 "%s%s", mmap_name, symbol_name) + 1; 497 size = PERF_ALIGN(size, sizeof(u64)); 498 event->mmap.header.type = PERF_RECORD_MMAP; 499 event->mmap.header.size = (sizeof(event->mmap) - 500 (sizeof(event->mmap.filename) - size) + machine->id_hdr_size); 501 event->mmap.pgoff = args.start; 502 event->mmap.start = map->start; 503 event->mmap.len = map->end - event->mmap.start; 504 event->mmap.pid = machine->pid; 505 506 err = process(tool, event, &synth_sample, machine); 507 free(event); 508 509 return err; 510 } 511 512 size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp) 513 { 514 return fprintf(fp, ": %s:%d\n", event->comm.comm, event->comm.tid); 515 } 516 517 int perf_event__process_comm(struct perf_tool *tool __maybe_unused, 518 union perf_event *event, 519 struct perf_sample *sample __maybe_unused, 520 struct machine *machine) 521 { 522 return machine__process_comm_event(machine, event); 523 } 524 525 int perf_event__process_lost(struct perf_tool *tool __maybe_unused, 526 union perf_event *event, 527 struct perf_sample *sample __maybe_unused, 528 struct machine *machine) 529 { 530 return machine__process_lost_event(machine, event); 531 } 532 533 size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp) 534 { 535 return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %s\n", 536 event->mmap.pid, event->mmap.tid, event->mmap.start, 537 event->mmap.len, event->mmap.pgoff, event->mmap.filename); 538 } 539 540 size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp) 541 { 542 return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 543 " %02x:%02x %"PRIu64" %"PRIu64"]: %s\n", 544 event->mmap2.pid, event->mmap2.tid, event->mmap2.start, 545 event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj, 546 event->mmap2.min, event->mmap2.ino, 547 event->mmap2.ino_generation, 548 event->mmap2.filename); 549 } 550 551 int perf_event__process_mmap(struct perf_tool *tool __maybe_unused, 552 union perf_event *event, 553 struct perf_sample *sample __maybe_unused, 554 struct machine *machine) 555 { 556 return machine__process_mmap_event(machine, event); 557 } 558 559 int perf_event__process_mmap2(struct perf_tool *tool __maybe_unused, 560 union perf_event *event, 561 struct perf_sample *sample __maybe_unused, 562 struct machine *machine) 563 { 564 return machine__process_mmap2_event(machine, event); 565 } 566 567 size_t perf_event__fprintf_task(union perf_event *event, FILE *fp) 568 { 569 return fprintf(fp, "(%d:%d):(%d:%d)\n", 570 event->fork.pid, event->fork.tid, 571 event->fork.ppid, event->fork.ptid); 572 } 573 574 int perf_event__process_fork(struct perf_tool *tool __maybe_unused, 575 union perf_event *event, 576 struct perf_sample *sample __maybe_unused, 577 struct machine *machine) 578 { 579 return machine__process_fork_event(machine, event); 580 } 581 582 int perf_event__process_exit(struct perf_tool *tool __maybe_unused, 583 union perf_event *event, 584 struct perf_sample *sample __maybe_unused, 585 struct machine *machine) 586 { 587 return machine__process_exit_event(machine, event); 588 } 589 590 size_t perf_event__fprintf(union perf_event *event, FILE *fp) 591 { 592 size_t ret = fprintf(fp, "PERF_RECORD_%s", 593 perf_event__name(event->header.type)); 594 595 switch (event->header.type) { 596 case PERF_RECORD_COMM: 597 ret += perf_event__fprintf_comm(event, fp); 598 break; 599 case PERF_RECORD_FORK: 600 case PERF_RECORD_EXIT: 601 ret += perf_event__fprintf_task(event, fp); 602 break; 603 case PERF_RECORD_MMAP: 604 ret += perf_event__fprintf_mmap(event, fp); 605 break; 606 case PERF_RECORD_MMAP2: 607 ret += perf_event__fprintf_mmap2(event, fp); 608 break; 609 default: 610 ret += fprintf(fp, "\n"); 611 } 612 613 return ret; 614 } 615 616 int perf_event__process(struct perf_tool *tool __maybe_unused, 617 union perf_event *event, 618 struct perf_sample *sample __maybe_unused, 619 struct machine *machine) 620 { 621 return machine__process_event(machine, event); 622 } 623 624 void thread__find_addr_map(struct thread *self, 625 struct machine *machine, u8 cpumode, 626 enum map_type type, u64 addr, 627 struct addr_location *al) 628 { 629 struct map_groups *mg = &self->mg; 630 bool load_map = false; 631 632 al->thread = self; 633 al->addr = addr; 634 al->cpumode = cpumode; 635 al->filtered = false; 636 637 if (machine == NULL) { 638 al->map = NULL; 639 return; 640 } 641 642 if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) { 643 al->level = 'k'; 644 mg = &machine->kmaps; 645 load_map = true; 646 } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) { 647 al->level = '.'; 648 } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) { 649 al->level = 'g'; 650 mg = &machine->kmaps; 651 load_map = true; 652 } else { 653 /* 654 * 'u' means guest os user space. 655 * TODO: We don't support guest user space. Might support late. 656 */ 657 if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) 658 al->level = 'u'; 659 else 660 al->level = 'H'; 661 al->map = NULL; 662 663 if ((cpumode == PERF_RECORD_MISC_GUEST_USER || 664 cpumode == PERF_RECORD_MISC_GUEST_KERNEL) && 665 !perf_guest) 666 al->filtered = true; 667 if ((cpumode == PERF_RECORD_MISC_USER || 668 cpumode == PERF_RECORD_MISC_KERNEL) && 669 !perf_host) 670 al->filtered = true; 671 672 return; 673 } 674 try_again: 675 al->map = map_groups__find(mg, type, al->addr); 676 if (al->map == NULL) { 677 /* 678 * If this is outside of all known maps, and is a negative 679 * address, try to look it up in the kernel dso, as it might be 680 * a vsyscall or vdso (which executes in user-mode). 681 * 682 * XXX This is nasty, we should have a symbol list in the 683 * "[vdso]" dso, but for now lets use the old trick of looking 684 * in the whole kernel symbol list. 685 */ 686 if ((long long)al->addr < 0 && 687 cpumode == PERF_RECORD_MISC_USER && 688 machine && mg != &machine->kmaps) { 689 mg = &machine->kmaps; 690 goto try_again; 691 } 692 } else { 693 /* 694 * Kernel maps might be changed when loading symbols so loading 695 * must be done prior to using kernel maps. 696 */ 697 if (load_map) 698 map__load(al->map, machine->symbol_filter); 699 al->addr = al->map->map_ip(al->map, al->addr); 700 } 701 } 702 703 void thread__find_addr_location(struct thread *thread, struct machine *machine, 704 u8 cpumode, enum map_type type, u64 addr, 705 struct addr_location *al) 706 { 707 thread__find_addr_map(thread, machine, cpumode, type, addr, al); 708 if (al->map != NULL) 709 al->sym = map__find_symbol(al->map, al->addr, 710 machine->symbol_filter); 711 else 712 al->sym = NULL; 713 } 714 715 int perf_event__preprocess_sample(const union perf_event *event, 716 struct machine *machine, 717 struct addr_location *al, 718 struct perf_sample *sample) 719 { 720 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 721 struct thread *thread = machine__findnew_thread(machine, sample->pid, 722 sample->pid); 723 724 if (thread == NULL) 725 return -1; 726 727 if (symbol_conf.comm_list && 728 !strlist__has_entry(symbol_conf.comm_list, thread->comm)) 729 goto out_filtered; 730 731 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->tid); 732 /* 733 * Have we already created the kernel maps for this machine? 734 * 735 * This should have happened earlier, when we processed the kernel MMAP 736 * events, but for older perf.data files there was no such thing, so do 737 * it now. 738 */ 739 if (cpumode == PERF_RECORD_MISC_KERNEL && 740 machine->vmlinux_maps[MAP__FUNCTION] == NULL) 741 machine__create_kernel_maps(machine); 742 743 thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION, 744 sample->ip, al); 745 dump_printf(" ...... dso: %s\n", 746 al->map ? al->map->dso->long_name : 747 al->level == 'H' ? "[hypervisor]" : "<not found>"); 748 al->sym = NULL; 749 al->cpu = sample->cpu; 750 751 if (al->map) { 752 struct dso *dso = al->map->dso; 753 754 if (symbol_conf.dso_list && 755 (!dso || !(strlist__has_entry(symbol_conf.dso_list, 756 dso->short_name) || 757 (dso->short_name != dso->long_name && 758 strlist__has_entry(symbol_conf.dso_list, 759 dso->long_name))))) 760 goto out_filtered; 761 762 al->sym = map__find_symbol(al->map, al->addr, 763 machine->symbol_filter); 764 } 765 766 if (symbol_conf.sym_list && 767 (!al->sym || !strlist__has_entry(symbol_conf.sym_list, 768 al->sym->name))) 769 goto out_filtered; 770 771 return 0; 772 773 out_filtered: 774 al->filtered = true; 775 return 0; 776 } 777