1 #include <linux/types.h> 2 #include "event.h" 3 #include "debug.h" 4 #include "machine.h" 5 #include "sort.h" 6 #include "string.h" 7 #include "strlist.h" 8 #include "thread.h" 9 #include "thread_map.h" 10 #include "symbol/kallsyms.h" 11 12 static const char *perf_event__names[] = { 13 [0] = "TOTAL", 14 [PERF_RECORD_MMAP] = "MMAP", 15 [PERF_RECORD_MMAP2] = "MMAP2", 16 [PERF_RECORD_LOST] = "LOST", 17 [PERF_RECORD_COMM] = "COMM", 18 [PERF_RECORD_EXIT] = "EXIT", 19 [PERF_RECORD_THROTTLE] = "THROTTLE", 20 [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE", 21 [PERF_RECORD_FORK] = "FORK", 22 [PERF_RECORD_READ] = "READ", 23 [PERF_RECORD_SAMPLE] = "SAMPLE", 24 [PERF_RECORD_HEADER_ATTR] = "ATTR", 25 [PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE", 26 [PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA", 27 [PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID", 28 [PERF_RECORD_FINISHED_ROUND] = "FINISHED_ROUND", 29 }; 30 31 const char *perf_event__name(unsigned int id) 32 { 33 if (id >= ARRAY_SIZE(perf_event__names)) 34 return "INVALID"; 35 if (!perf_event__names[id]) 36 return "UNKNOWN"; 37 return perf_event__names[id]; 38 } 39 40 static struct perf_sample synth_sample = { 41 .pid = -1, 42 .tid = -1, 43 .time = -1, 44 .stream_id = -1, 45 .cpu = -1, 46 .period = 1, 47 }; 48 49 static pid_t perf_event__get_comm_tgid(pid_t pid, char *comm, size_t len) 50 { 51 char filename[PATH_MAX]; 52 char bf[BUFSIZ]; 53 FILE *fp; 54 size_t size = 0; 55 pid_t tgid = -1; 56 57 snprintf(filename, sizeof(filename), "/proc/%d/status", pid); 58 59 fp = fopen(filename, "r"); 60 if (fp == NULL) { 61 pr_debug("couldn't open %s\n", filename); 62 return 0; 63 } 64 65 while (!comm[0] || (tgid < 0)) { 66 if (fgets(bf, sizeof(bf), fp) == NULL) { 67 pr_warning("couldn't get COMM and pgid, malformed %s\n", 68 filename); 69 break; 70 } 71 72 if (memcmp(bf, "Name:", 5) == 0) { 73 char *name = bf + 5; 74 while (*name && isspace(*name)) 75 ++name; 76 size = strlen(name) - 1; 77 if (size >= len) 78 size = len - 1; 79 memcpy(comm, name, size); 80 comm[size] = '\0'; 81 82 } else if (memcmp(bf, "Tgid:", 5) == 0) { 83 char *tgids = bf + 5; 84 while (*tgids && isspace(*tgids)) 85 ++tgids; 86 tgid = atoi(tgids); 87 } 88 } 89 90 fclose(fp); 91 92 return tgid; 93 } 94 95 static pid_t perf_event__synthesize_comm(struct perf_tool *tool, 96 union perf_event *event, pid_t pid, 97 int full, 98 perf_event__handler_t process, 99 struct machine *machine) 100 { 101 char filename[PATH_MAX]; 102 size_t size; 103 DIR *tasks; 104 struct dirent dirent, *next; 105 pid_t tgid; 106 107 memset(&event->comm, 0, sizeof(event->comm)); 108 109 if (machine__is_host(machine)) 110 tgid = perf_event__get_comm_tgid(pid, event->comm.comm, 111 sizeof(event->comm.comm)); 112 else 113 tgid = machine->pid; 114 115 if (tgid < 0) 116 goto out; 117 118 event->comm.pid = tgid; 119 event->comm.header.type = PERF_RECORD_COMM; 120 121 size = strlen(event->comm.comm) + 1; 122 size = PERF_ALIGN(size, sizeof(u64)); 123 memset(event->comm.comm + size, 0, machine->id_hdr_size); 124 event->comm.header.size = (sizeof(event->comm) - 125 (sizeof(event->comm.comm) - size) + 126 machine->id_hdr_size); 127 if (!full) { 128 event->comm.tid = pid; 129 130 if (process(tool, event, &synth_sample, machine) != 0) 131 return -1; 132 133 goto out; 134 } 135 136 if (machine__is_default_guest(machine)) 137 return 0; 138 139 snprintf(filename, sizeof(filename), "%s/proc/%d/task", 140 machine->root_dir, pid); 141 142 tasks = opendir(filename); 143 if (tasks == NULL) { 144 pr_debug("couldn't open %s\n", filename); 145 return 0; 146 } 147 148 while (!readdir_r(tasks, &dirent, &next) && next) { 149 char *end; 150 pid = strtol(dirent.d_name, &end, 10); 151 if (*end) 152 continue; 153 154 /* already have tgid; jut want to update the comm */ 155 (void) perf_event__get_comm_tgid(pid, event->comm.comm, 156 sizeof(event->comm.comm)); 157 158 size = strlen(event->comm.comm) + 1; 159 size = PERF_ALIGN(size, sizeof(u64)); 160 memset(event->comm.comm + size, 0, machine->id_hdr_size); 161 event->comm.header.size = (sizeof(event->comm) - 162 (sizeof(event->comm.comm) - size) + 163 machine->id_hdr_size); 164 165 event->comm.tid = pid; 166 167 if (process(tool, event, &synth_sample, machine) != 0) { 168 tgid = -1; 169 break; 170 } 171 } 172 173 closedir(tasks); 174 out: 175 return tgid; 176 } 177 178 int perf_event__synthesize_mmap_events(struct perf_tool *tool, 179 union perf_event *event, 180 pid_t pid, pid_t tgid, 181 perf_event__handler_t process, 182 struct machine *machine, 183 bool mmap_data) 184 { 185 char filename[PATH_MAX]; 186 FILE *fp; 187 int rc = 0; 188 189 if (machine__is_default_guest(machine)) 190 return 0; 191 192 snprintf(filename, sizeof(filename), "%s/proc/%d/maps", 193 machine->root_dir, pid); 194 195 fp = fopen(filename, "r"); 196 if (fp == NULL) { 197 /* 198 * We raced with a task exiting - just return: 199 */ 200 pr_debug("couldn't open %s\n", filename); 201 return -1; 202 } 203 204 event->header.type = PERF_RECORD_MMAP; 205 206 while (1) { 207 char bf[BUFSIZ]; 208 char prot[5]; 209 char execname[PATH_MAX]; 210 char anonstr[] = "//anon"; 211 size_t size; 212 ssize_t n; 213 214 if (fgets(bf, sizeof(bf), fp) == NULL) 215 break; 216 217 /* ensure null termination since stack will be reused. */ 218 strcpy(execname, ""); 219 220 /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */ 221 n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %*x:%*x %*u %s\n", 222 &event->mmap.start, &event->mmap.len, prot, 223 &event->mmap.pgoff, 224 execname); 225 /* 226 * Anon maps don't have the execname. 227 */ 228 if (n < 4) 229 continue; 230 /* 231 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c 232 */ 233 if (machine__is_host(machine)) 234 event->header.misc = PERF_RECORD_MISC_USER; 235 else 236 event->header.misc = PERF_RECORD_MISC_GUEST_USER; 237 238 if (prot[2] != 'x') { 239 if (!mmap_data || prot[0] != 'r') 240 continue; 241 242 event->header.misc |= PERF_RECORD_MISC_MMAP_DATA; 243 } 244 245 if (!strcmp(execname, "")) 246 strcpy(execname, anonstr); 247 248 size = strlen(execname) + 1; 249 memcpy(event->mmap.filename, execname, size); 250 size = PERF_ALIGN(size, sizeof(u64)); 251 event->mmap.len -= event->mmap.start; 252 event->mmap.header.size = (sizeof(event->mmap) - 253 (sizeof(event->mmap.filename) - size)); 254 memset(event->mmap.filename + size, 0, machine->id_hdr_size); 255 event->mmap.header.size += machine->id_hdr_size; 256 event->mmap.pid = tgid; 257 event->mmap.tid = pid; 258 259 if (process(tool, event, &synth_sample, machine) != 0) { 260 rc = -1; 261 break; 262 } 263 } 264 265 fclose(fp); 266 return rc; 267 } 268 269 int perf_event__synthesize_modules(struct perf_tool *tool, 270 perf_event__handler_t process, 271 struct machine *machine) 272 { 273 int rc = 0; 274 struct rb_node *nd; 275 struct map_groups *kmaps = &machine->kmaps; 276 union perf_event *event = zalloc((sizeof(event->mmap) + 277 machine->id_hdr_size)); 278 if (event == NULL) { 279 pr_debug("Not enough memory synthesizing mmap event " 280 "for kernel modules\n"); 281 return -1; 282 } 283 284 event->header.type = PERF_RECORD_MMAP; 285 286 /* 287 * kernel uses 0 for user space maps, see kernel/perf_event.c 288 * __perf_event_mmap 289 */ 290 if (machine__is_host(machine)) 291 event->header.misc = PERF_RECORD_MISC_KERNEL; 292 else 293 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; 294 295 for (nd = rb_first(&kmaps->maps[MAP__FUNCTION]); 296 nd; nd = rb_next(nd)) { 297 size_t size; 298 struct map *pos = rb_entry(nd, struct map, rb_node); 299 300 if (pos->dso->kernel) 301 continue; 302 303 size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64)); 304 event->mmap.header.type = PERF_RECORD_MMAP; 305 event->mmap.header.size = (sizeof(event->mmap) - 306 (sizeof(event->mmap.filename) - size)); 307 memset(event->mmap.filename + size, 0, machine->id_hdr_size); 308 event->mmap.header.size += machine->id_hdr_size; 309 event->mmap.start = pos->start; 310 event->mmap.len = pos->end - pos->start; 311 event->mmap.pid = machine->pid; 312 313 memcpy(event->mmap.filename, pos->dso->long_name, 314 pos->dso->long_name_len + 1); 315 if (process(tool, event, &synth_sample, machine) != 0) { 316 rc = -1; 317 break; 318 } 319 } 320 321 free(event); 322 return rc; 323 } 324 325 static int __event__synthesize_thread(union perf_event *comm_event, 326 union perf_event *mmap_event, 327 pid_t pid, int full, 328 perf_event__handler_t process, 329 struct perf_tool *tool, 330 struct machine *machine, bool mmap_data) 331 { 332 pid_t tgid = perf_event__synthesize_comm(tool, comm_event, pid, full, 333 process, machine); 334 if (tgid == -1) 335 return -1; 336 return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid, 337 process, machine, mmap_data); 338 } 339 340 int perf_event__synthesize_thread_map(struct perf_tool *tool, 341 struct thread_map *threads, 342 perf_event__handler_t process, 343 struct machine *machine, 344 bool mmap_data) 345 { 346 union perf_event *comm_event, *mmap_event; 347 int err = -1, thread, j; 348 349 comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size); 350 if (comm_event == NULL) 351 goto out; 352 353 mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size); 354 if (mmap_event == NULL) 355 goto out_free_comm; 356 357 err = 0; 358 for (thread = 0; thread < threads->nr; ++thread) { 359 if (__event__synthesize_thread(comm_event, mmap_event, 360 threads->map[thread], 0, 361 process, tool, machine, 362 mmap_data)) { 363 err = -1; 364 break; 365 } 366 367 /* 368 * comm.pid is set to thread group id by 369 * perf_event__synthesize_comm 370 */ 371 if ((int) comm_event->comm.pid != threads->map[thread]) { 372 bool need_leader = true; 373 374 /* is thread group leader in thread_map? */ 375 for (j = 0; j < threads->nr; ++j) { 376 if ((int) comm_event->comm.pid == threads->map[j]) { 377 need_leader = false; 378 break; 379 } 380 } 381 382 /* if not, generate events for it */ 383 if (need_leader && 384 __event__synthesize_thread(comm_event, mmap_event, 385 comm_event->comm.pid, 0, 386 process, tool, machine, 387 mmap_data)) { 388 err = -1; 389 break; 390 } 391 } 392 } 393 free(mmap_event); 394 out_free_comm: 395 free(comm_event); 396 out: 397 return err; 398 } 399 400 int perf_event__synthesize_threads(struct perf_tool *tool, 401 perf_event__handler_t process, 402 struct machine *machine, bool mmap_data) 403 { 404 DIR *proc; 405 char proc_path[PATH_MAX]; 406 struct dirent dirent, *next; 407 union perf_event *comm_event, *mmap_event; 408 int err = -1; 409 410 comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size); 411 if (comm_event == NULL) 412 goto out; 413 414 mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size); 415 if (mmap_event == NULL) 416 goto out_free_comm; 417 418 if (machine__is_default_guest(machine)) 419 return 0; 420 421 snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir); 422 proc = opendir(proc_path); 423 424 if (proc == NULL) 425 goto out_free_mmap; 426 427 while (!readdir_r(proc, &dirent, &next) && next) { 428 char *end; 429 pid_t pid = strtol(dirent.d_name, &end, 10); 430 431 if (*end) /* only interested in proper numerical dirents */ 432 continue; 433 /* 434 * We may race with exiting thread, so don't stop just because 435 * one thread couldn't be synthesized. 436 */ 437 __event__synthesize_thread(comm_event, mmap_event, pid, 1, 438 process, tool, machine, mmap_data); 439 } 440 441 err = 0; 442 closedir(proc); 443 out_free_mmap: 444 free(mmap_event); 445 out_free_comm: 446 free(comm_event); 447 out: 448 return err; 449 } 450 451 struct process_symbol_args { 452 const char *name; 453 u64 start; 454 }; 455 456 static int find_symbol_cb(void *arg, const char *name, char type, 457 u64 start) 458 { 459 struct process_symbol_args *args = arg; 460 461 /* 462 * Must be a function or at least an alias, as in PARISC64, where "_text" is 463 * an 'A' to the same address as "_stext". 464 */ 465 if (!(symbol_type__is_a(type, MAP__FUNCTION) || 466 type == 'A') || strcmp(name, args->name)) 467 return 0; 468 469 args->start = start; 470 return 1; 471 } 472 473 u64 kallsyms__get_function_start(const char *kallsyms_filename, 474 const char *symbol_name) 475 { 476 struct process_symbol_args args = { .name = symbol_name, }; 477 478 if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0) 479 return 0; 480 481 return args.start; 482 } 483 484 int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, 485 perf_event__handler_t process, 486 struct machine *machine) 487 { 488 size_t size; 489 const char *mmap_name; 490 char name_buff[PATH_MAX]; 491 struct map *map; 492 struct kmap *kmap; 493 int err; 494 /* 495 * We should get this from /sys/kernel/sections/.text, but till that is 496 * available use this, and after it is use this as a fallback for older 497 * kernels. 498 */ 499 union perf_event *event = zalloc((sizeof(event->mmap) + 500 machine->id_hdr_size)); 501 if (event == NULL) { 502 pr_debug("Not enough memory synthesizing mmap event " 503 "for kernel modules\n"); 504 return -1; 505 } 506 507 mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff)); 508 if (machine__is_host(machine)) { 509 /* 510 * kernel uses PERF_RECORD_MISC_USER for user space maps, 511 * see kernel/perf_event.c __perf_event_mmap 512 */ 513 event->header.misc = PERF_RECORD_MISC_KERNEL; 514 } else { 515 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; 516 } 517 518 map = machine->vmlinux_maps[MAP__FUNCTION]; 519 kmap = map__kmap(map); 520 size = snprintf(event->mmap.filename, sizeof(event->mmap.filename), 521 "%s%s", mmap_name, kmap->ref_reloc_sym->name) + 1; 522 size = PERF_ALIGN(size, sizeof(u64)); 523 event->mmap.header.type = PERF_RECORD_MMAP; 524 event->mmap.header.size = (sizeof(event->mmap) - 525 (sizeof(event->mmap.filename) - size) + machine->id_hdr_size); 526 event->mmap.pgoff = kmap->ref_reloc_sym->addr; 527 event->mmap.start = map->start; 528 event->mmap.len = map->end - event->mmap.start; 529 event->mmap.pid = machine->pid; 530 531 err = process(tool, event, &synth_sample, machine); 532 free(event); 533 534 return err; 535 } 536 537 size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp) 538 { 539 return fprintf(fp, ": %s:%d\n", event->comm.comm, event->comm.tid); 540 } 541 542 int perf_event__process_comm(struct perf_tool *tool __maybe_unused, 543 union perf_event *event, 544 struct perf_sample *sample, 545 struct machine *machine) 546 { 547 return machine__process_comm_event(machine, event, sample); 548 } 549 550 int perf_event__process_lost(struct perf_tool *tool __maybe_unused, 551 union perf_event *event, 552 struct perf_sample *sample, 553 struct machine *machine) 554 { 555 return machine__process_lost_event(machine, event, sample); 556 } 557 558 size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp) 559 { 560 return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n", 561 event->mmap.pid, event->mmap.tid, event->mmap.start, 562 event->mmap.len, event->mmap.pgoff, 563 (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x', 564 event->mmap.filename); 565 } 566 567 size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp) 568 { 569 return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 570 " %02x:%02x %"PRIu64" %"PRIu64"]: %c %s\n", 571 event->mmap2.pid, event->mmap2.tid, event->mmap2.start, 572 event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj, 573 event->mmap2.min, event->mmap2.ino, 574 event->mmap2.ino_generation, 575 (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x', 576 event->mmap2.filename); 577 } 578 579 int perf_event__process_mmap(struct perf_tool *tool __maybe_unused, 580 union perf_event *event, 581 struct perf_sample *sample, 582 struct machine *machine) 583 { 584 return machine__process_mmap_event(machine, event, sample); 585 } 586 587 int perf_event__process_mmap2(struct perf_tool *tool __maybe_unused, 588 union perf_event *event, 589 struct perf_sample *sample, 590 struct machine *machine) 591 { 592 return machine__process_mmap2_event(machine, event, sample); 593 } 594 595 size_t perf_event__fprintf_task(union perf_event *event, FILE *fp) 596 { 597 return fprintf(fp, "(%d:%d):(%d:%d)\n", 598 event->fork.pid, event->fork.tid, 599 event->fork.ppid, event->fork.ptid); 600 } 601 602 int perf_event__process_fork(struct perf_tool *tool __maybe_unused, 603 union perf_event *event, 604 struct perf_sample *sample, 605 struct machine *machine) 606 { 607 return machine__process_fork_event(machine, event, sample); 608 } 609 610 int perf_event__process_exit(struct perf_tool *tool __maybe_unused, 611 union perf_event *event, 612 struct perf_sample *sample, 613 struct machine *machine) 614 { 615 return machine__process_exit_event(machine, event, sample); 616 } 617 618 size_t perf_event__fprintf(union perf_event *event, FILE *fp) 619 { 620 size_t ret = fprintf(fp, "PERF_RECORD_%s", 621 perf_event__name(event->header.type)); 622 623 switch (event->header.type) { 624 case PERF_RECORD_COMM: 625 ret += perf_event__fprintf_comm(event, fp); 626 break; 627 case PERF_RECORD_FORK: 628 case PERF_RECORD_EXIT: 629 ret += perf_event__fprintf_task(event, fp); 630 break; 631 case PERF_RECORD_MMAP: 632 ret += perf_event__fprintf_mmap(event, fp); 633 break; 634 case PERF_RECORD_MMAP2: 635 ret += perf_event__fprintf_mmap2(event, fp); 636 break; 637 default: 638 ret += fprintf(fp, "\n"); 639 } 640 641 return ret; 642 } 643 644 int perf_event__process(struct perf_tool *tool __maybe_unused, 645 union perf_event *event, 646 struct perf_sample *sample, 647 struct machine *machine) 648 { 649 return machine__process_event(machine, event, sample); 650 } 651 652 void thread__find_addr_map(struct thread *thread, 653 struct machine *machine, u8 cpumode, 654 enum map_type type, u64 addr, 655 struct addr_location *al) 656 { 657 struct map_groups *mg = &thread->mg; 658 bool load_map = false; 659 660 al->machine = machine; 661 al->thread = thread; 662 al->addr = addr; 663 al->cpumode = cpumode; 664 al->filtered = false; 665 666 if (machine == NULL) { 667 al->map = NULL; 668 return; 669 } 670 671 if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) { 672 al->level = 'k'; 673 mg = &machine->kmaps; 674 load_map = true; 675 } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) { 676 al->level = '.'; 677 } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) { 678 al->level = 'g'; 679 mg = &machine->kmaps; 680 load_map = true; 681 } else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) { 682 al->level = 'u'; 683 } else { 684 al->level = 'H'; 685 al->map = NULL; 686 687 if ((cpumode == PERF_RECORD_MISC_GUEST_USER || 688 cpumode == PERF_RECORD_MISC_GUEST_KERNEL) && 689 !perf_guest) 690 al->filtered = true; 691 if ((cpumode == PERF_RECORD_MISC_USER || 692 cpumode == PERF_RECORD_MISC_KERNEL) && 693 !perf_host) 694 al->filtered = true; 695 696 return; 697 } 698 try_again: 699 al->map = map_groups__find(mg, type, al->addr); 700 if (al->map == NULL) { 701 /* 702 * If this is outside of all known maps, and is a negative 703 * address, try to look it up in the kernel dso, as it might be 704 * a vsyscall or vdso (which executes in user-mode). 705 * 706 * XXX This is nasty, we should have a symbol list in the 707 * "[vdso]" dso, but for now lets use the old trick of looking 708 * in the whole kernel symbol list. 709 */ 710 if ((long long)al->addr < 0 && 711 cpumode == PERF_RECORD_MISC_USER && 712 machine && mg != &machine->kmaps) { 713 mg = &machine->kmaps; 714 goto try_again; 715 } 716 } else { 717 /* 718 * Kernel maps might be changed when loading symbols so loading 719 * must be done prior to using kernel maps. 720 */ 721 if (load_map) 722 map__load(al->map, machine->symbol_filter); 723 al->addr = al->map->map_ip(al->map, al->addr); 724 } 725 } 726 727 void thread__find_addr_location(struct thread *thread, struct machine *machine, 728 u8 cpumode, enum map_type type, u64 addr, 729 struct addr_location *al) 730 { 731 thread__find_addr_map(thread, machine, cpumode, type, addr, al); 732 if (al->map != NULL) 733 al->sym = map__find_symbol(al->map, al->addr, 734 machine->symbol_filter); 735 else 736 al->sym = NULL; 737 } 738 739 int perf_event__preprocess_sample(const union perf_event *event, 740 struct machine *machine, 741 struct addr_location *al, 742 struct perf_sample *sample) 743 { 744 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 745 struct thread *thread = machine__findnew_thread(machine, sample->pid, 746 sample->pid); 747 748 if (thread == NULL) 749 return -1; 750 751 if (thread__is_filtered(thread)) 752 goto out_filtered; 753 754 dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid); 755 /* 756 * Have we already created the kernel maps for this machine? 757 * 758 * This should have happened earlier, when we processed the kernel MMAP 759 * events, but for older perf.data files there was no such thing, so do 760 * it now. 761 */ 762 if (cpumode == PERF_RECORD_MISC_KERNEL && 763 machine->vmlinux_maps[MAP__FUNCTION] == NULL) 764 machine__create_kernel_maps(machine); 765 766 thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION, 767 sample->ip, al); 768 dump_printf(" ...... dso: %s\n", 769 al->map ? al->map->dso->long_name : 770 al->level == 'H' ? "[hypervisor]" : "<not found>"); 771 al->sym = NULL; 772 al->cpu = sample->cpu; 773 774 if (al->map) { 775 struct dso *dso = al->map->dso; 776 777 if (symbol_conf.dso_list && 778 (!dso || !(strlist__has_entry(symbol_conf.dso_list, 779 dso->short_name) || 780 (dso->short_name != dso->long_name && 781 strlist__has_entry(symbol_conf.dso_list, 782 dso->long_name))))) 783 goto out_filtered; 784 785 al->sym = map__find_symbol(al->map, al->addr, 786 machine->symbol_filter); 787 } 788 789 if (symbol_conf.sym_list && 790 (!al->sym || !strlist__has_entry(symbol_conf.sym_list, 791 al->sym->name))) 792 goto out_filtered; 793 794 return 0; 795 796 out_filtered: 797 al->filtered = true; 798 return 0; 799 } 800