1 #include <linux/types.h> 2 #include "event.h" 3 #include "debug.h" 4 #include "session.h" 5 #include "sort.h" 6 #include "string.h" 7 #include "strlist.h" 8 #include "thread.h" 9 #include "thread_map.h" 10 11 static const char *perf_event__names[] = { 12 [0] = "TOTAL", 13 [PERF_RECORD_MMAP] = "MMAP", 14 [PERF_RECORD_LOST] = "LOST", 15 [PERF_RECORD_COMM] = "COMM", 16 [PERF_RECORD_EXIT] = "EXIT", 17 [PERF_RECORD_THROTTLE] = "THROTTLE", 18 [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE", 19 [PERF_RECORD_FORK] = "FORK", 20 [PERF_RECORD_READ] = "READ", 21 [PERF_RECORD_SAMPLE] = "SAMPLE", 22 [PERF_RECORD_HEADER_ATTR] = "ATTR", 23 [PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE", 24 [PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA", 25 [PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID", 26 [PERF_RECORD_FINISHED_ROUND] = "FINISHED_ROUND", 27 }; 28 29 const char *perf_event__name(unsigned int id) 30 { 31 if (id >= ARRAY_SIZE(perf_event__names)) 32 return "INVALID"; 33 if (!perf_event__names[id]) 34 return "UNKNOWN"; 35 return perf_event__names[id]; 36 } 37 38 static struct perf_sample synth_sample = { 39 .pid = -1, 40 .tid = -1, 41 .time = -1, 42 .stream_id = -1, 43 .cpu = -1, 44 .period = 1, 45 }; 46 47 static pid_t perf_event__synthesize_comm(union perf_event *event, pid_t pid, 48 int full, perf_event__handler_t process, 49 struct perf_session *session) 50 { 51 char filename[PATH_MAX]; 52 char bf[BUFSIZ]; 53 FILE *fp; 54 size_t size = 0; 55 DIR *tasks; 56 struct dirent dirent, *next; 57 pid_t tgid = 0; 58 59 snprintf(filename, sizeof(filename), "/proc/%d/status", pid); 60 61 fp = fopen(filename, "r"); 62 if (fp == NULL) { 63 out_race: 64 /* 65 * We raced with a task exiting - just return: 66 */ 67 pr_debug("couldn't open %s\n", filename); 68 return 0; 69 } 70 71 memset(&event->comm, 0, sizeof(event->comm)); 72 73 while (!event->comm.comm[0] || !event->comm.pid) { 74 if (fgets(bf, sizeof(bf), fp) == NULL) { 75 pr_warning("couldn't get COMM and pgid, malformed %s\n", filename); 76 goto out; 77 } 78 79 if (memcmp(bf, "Name:", 5) == 0) { 80 char *name = bf + 5; 81 while (*name && isspace(*name)) 82 ++name; 83 size = strlen(name) - 1; 84 memcpy(event->comm.comm, name, size++); 85 } else if (memcmp(bf, "Tgid:", 5) == 0) { 86 char *tgids = bf + 5; 87 while (*tgids && isspace(*tgids)) 88 ++tgids; 89 tgid = event->comm.pid = atoi(tgids); 90 } 91 } 92 93 event->comm.header.type = PERF_RECORD_COMM; 94 size = ALIGN(size, sizeof(u64)); 95 memset(event->comm.comm + size, 0, session->id_hdr_size); 96 event->comm.header.size = (sizeof(event->comm) - 97 (sizeof(event->comm.comm) - size) + 98 session->id_hdr_size); 99 if (!full) { 100 event->comm.tid = pid; 101 102 process(event, &synth_sample, session); 103 goto out; 104 } 105 106 snprintf(filename, sizeof(filename), "/proc/%d/task", pid); 107 108 tasks = opendir(filename); 109 if (tasks == NULL) 110 goto out_race; 111 112 while (!readdir_r(tasks, &dirent, &next) && next) { 113 char *end; 114 pid = strtol(dirent.d_name, &end, 10); 115 if (*end) 116 continue; 117 118 event->comm.tid = pid; 119 120 process(event, &synth_sample, session); 121 } 122 123 closedir(tasks); 124 out: 125 fclose(fp); 126 127 return tgid; 128 } 129 130 static int perf_event__synthesize_mmap_events(union perf_event *event, 131 pid_t pid, pid_t tgid, 132 perf_event__handler_t process, 133 struct perf_session *session) 134 { 135 char filename[PATH_MAX]; 136 FILE *fp; 137 138 snprintf(filename, sizeof(filename), "/proc/%d/maps", pid); 139 140 fp = fopen(filename, "r"); 141 if (fp == NULL) { 142 /* 143 * We raced with a task exiting - just return: 144 */ 145 pr_debug("couldn't open %s\n", filename); 146 return -1; 147 } 148 149 event->header.type = PERF_RECORD_MMAP; 150 /* 151 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c 152 */ 153 event->header.misc = PERF_RECORD_MISC_USER; 154 155 while (1) { 156 char bf[BUFSIZ], *pbf = bf; 157 int n; 158 size_t size; 159 if (fgets(bf, sizeof(bf), fp) == NULL) 160 break; 161 162 /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */ 163 n = hex2u64(pbf, &event->mmap.start); 164 if (n < 0) 165 continue; 166 pbf += n + 1; 167 n = hex2u64(pbf, &event->mmap.len); 168 if (n < 0) 169 continue; 170 pbf += n + 3; 171 if (*pbf == 'x') { /* vm_exec */ 172 char *execname = strchr(bf, '/'); 173 174 /* Catch VDSO */ 175 if (execname == NULL) 176 execname = strstr(bf, "[vdso]"); 177 178 if (execname == NULL) 179 continue; 180 181 pbf += 3; 182 n = hex2u64(pbf, &event->mmap.pgoff); 183 184 size = strlen(execname); 185 execname[size - 1] = '\0'; /* Remove \n */ 186 memcpy(event->mmap.filename, execname, size); 187 size = ALIGN(size, sizeof(u64)); 188 event->mmap.len -= event->mmap.start; 189 event->mmap.header.size = (sizeof(event->mmap) - 190 (sizeof(event->mmap.filename) - size)); 191 memset(event->mmap.filename + size, 0, session->id_hdr_size); 192 event->mmap.header.size += session->id_hdr_size; 193 event->mmap.pid = tgid; 194 event->mmap.tid = pid; 195 196 process(event, &synth_sample, session); 197 } 198 } 199 200 fclose(fp); 201 return 0; 202 } 203 204 int perf_event__synthesize_modules(perf_event__handler_t process, 205 struct perf_session *session, 206 struct machine *machine) 207 { 208 struct rb_node *nd; 209 struct map_groups *kmaps = &machine->kmaps; 210 union perf_event *event = zalloc((sizeof(event->mmap) + 211 session->id_hdr_size)); 212 if (event == NULL) { 213 pr_debug("Not enough memory synthesizing mmap event " 214 "for kernel modules\n"); 215 return -1; 216 } 217 218 event->header.type = PERF_RECORD_MMAP; 219 220 /* 221 * kernel uses 0 for user space maps, see kernel/perf_event.c 222 * __perf_event_mmap 223 */ 224 if (machine__is_host(machine)) 225 event->header.misc = PERF_RECORD_MISC_KERNEL; 226 else 227 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; 228 229 for (nd = rb_first(&kmaps->maps[MAP__FUNCTION]); 230 nd; nd = rb_next(nd)) { 231 size_t size; 232 struct map *pos = rb_entry(nd, struct map, rb_node); 233 234 if (pos->dso->kernel) 235 continue; 236 237 size = ALIGN(pos->dso->long_name_len + 1, sizeof(u64)); 238 event->mmap.header.type = PERF_RECORD_MMAP; 239 event->mmap.header.size = (sizeof(event->mmap) - 240 (sizeof(event->mmap.filename) - size)); 241 memset(event->mmap.filename + size, 0, session->id_hdr_size); 242 event->mmap.header.size += session->id_hdr_size; 243 event->mmap.start = pos->start; 244 event->mmap.len = pos->end - pos->start; 245 event->mmap.pid = machine->pid; 246 247 memcpy(event->mmap.filename, pos->dso->long_name, 248 pos->dso->long_name_len + 1); 249 process(event, &synth_sample, session); 250 } 251 252 free(event); 253 return 0; 254 } 255 256 static int __event__synthesize_thread(union perf_event *comm_event, 257 union perf_event *mmap_event, 258 pid_t pid, perf_event__handler_t process, 259 struct perf_session *session) 260 { 261 pid_t tgid = perf_event__synthesize_comm(comm_event, pid, 1, process, 262 session); 263 if (tgid == -1) 264 return -1; 265 return perf_event__synthesize_mmap_events(mmap_event, pid, tgid, 266 process, session); 267 } 268 269 int perf_event__synthesize_thread_map(struct thread_map *threads, 270 perf_event__handler_t process, 271 struct perf_session *session) 272 { 273 union perf_event *comm_event, *mmap_event; 274 int err = -1, thread; 275 276 comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size); 277 if (comm_event == NULL) 278 goto out; 279 280 mmap_event = malloc(sizeof(mmap_event->mmap) + session->id_hdr_size); 281 if (mmap_event == NULL) 282 goto out_free_comm; 283 284 err = 0; 285 for (thread = 0; thread < threads->nr; ++thread) { 286 if (__event__synthesize_thread(comm_event, mmap_event, 287 threads->map[thread], 288 process, session)) { 289 err = -1; 290 break; 291 } 292 } 293 free(mmap_event); 294 out_free_comm: 295 free(comm_event); 296 out: 297 return err; 298 } 299 300 int perf_event__synthesize_threads(perf_event__handler_t process, 301 struct perf_session *session) 302 { 303 DIR *proc; 304 struct dirent dirent, *next; 305 union perf_event *comm_event, *mmap_event; 306 int err = -1; 307 308 comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size); 309 if (comm_event == NULL) 310 goto out; 311 312 mmap_event = malloc(sizeof(mmap_event->mmap) + session->id_hdr_size); 313 if (mmap_event == NULL) 314 goto out_free_comm; 315 316 proc = opendir("/proc"); 317 if (proc == NULL) 318 goto out_free_mmap; 319 320 while (!readdir_r(proc, &dirent, &next) && next) { 321 char *end; 322 pid_t pid = strtol(dirent.d_name, &end, 10); 323 324 if (*end) /* only interested in proper numerical dirents */ 325 continue; 326 327 __event__synthesize_thread(comm_event, mmap_event, pid, 328 process, session); 329 } 330 331 closedir(proc); 332 err = 0; 333 out_free_mmap: 334 free(mmap_event); 335 out_free_comm: 336 free(comm_event); 337 out: 338 return err; 339 } 340 341 struct process_symbol_args { 342 const char *name; 343 u64 start; 344 }; 345 346 static int find_symbol_cb(void *arg, const char *name, char type, 347 u64 start, u64 end __used) 348 { 349 struct process_symbol_args *args = arg; 350 351 /* 352 * Must be a function or at least an alias, as in PARISC64, where "_text" is 353 * an 'A' to the same address as "_stext". 354 */ 355 if (!(symbol_type__is_a(type, MAP__FUNCTION) || 356 type == 'A') || strcmp(name, args->name)) 357 return 0; 358 359 args->start = start; 360 return 1; 361 } 362 363 int perf_event__synthesize_kernel_mmap(perf_event__handler_t process, 364 struct perf_session *session, 365 struct machine *machine, 366 const char *symbol_name) 367 { 368 size_t size; 369 const char *filename, *mmap_name; 370 char path[PATH_MAX]; 371 char name_buff[PATH_MAX]; 372 struct map *map; 373 int err; 374 /* 375 * We should get this from /sys/kernel/sections/.text, but till that is 376 * available use this, and after it is use this as a fallback for older 377 * kernels. 378 */ 379 struct process_symbol_args args = { .name = symbol_name, }; 380 union perf_event *event = zalloc((sizeof(event->mmap) + 381 session->id_hdr_size)); 382 if (event == NULL) { 383 pr_debug("Not enough memory synthesizing mmap event " 384 "for kernel modules\n"); 385 return -1; 386 } 387 388 mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff)); 389 if (machine__is_host(machine)) { 390 /* 391 * kernel uses PERF_RECORD_MISC_USER for user space maps, 392 * see kernel/perf_event.c __perf_event_mmap 393 */ 394 event->header.misc = PERF_RECORD_MISC_KERNEL; 395 filename = "/proc/kallsyms"; 396 } else { 397 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; 398 if (machine__is_default_guest(machine)) 399 filename = (char *) symbol_conf.default_guest_kallsyms; 400 else { 401 sprintf(path, "%s/proc/kallsyms", machine->root_dir); 402 filename = path; 403 } 404 } 405 406 if (kallsyms__parse(filename, &args, find_symbol_cb) <= 0) 407 return -ENOENT; 408 409 map = machine->vmlinux_maps[MAP__FUNCTION]; 410 size = snprintf(event->mmap.filename, sizeof(event->mmap.filename), 411 "%s%s", mmap_name, symbol_name) + 1; 412 size = ALIGN(size, sizeof(u64)); 413 event->mmap.header.type = PERF_RECORD_MMAP; 414 event->mmap.header.size = (sizeof(event->mmap) - 415 (sizeof(event->mmap.filename) - size) + session->id_hdr_size); 416 event->mmap.pgoff = args.start; 417 event->mmap.start = map->start; 418 event->mmap.len = map->end - event->mmap.start; 419 event->mmap.pid = machine->pid; 420 421 err = process(event, &synth_sample, session); 422 free(event); 423 424 return err; 425 } 426 427 int perf_event__process_comm(union perf_event *event, 428 struct perf_sample *sample __used, 429 struct perf_session *session) 430 { 431 struct thread *thread = perf_session__findnew(session, event->comm.tid); 432 433 dump_printf(": %s:%d\n", event->comm.comm, event->comm.tid); 434 435 if (thread == NULL || thread__set_comm(thread, event->comm.comm)) { 436 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n"); 437 return -1; 438 } 439 440 return 0; 441 } 442 443 int perf_event__process_lost(union perf_event *event, 444 struct perf_sample *sample __used, 445 struct perf_session *session) 446 { 447 dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n", 448 event->lost.id, event->lost.lost); 449 session->hists.stats.total_lost += event->lost.lost; 450 return 0; 451 } 452 453 static void perf_event__set_kernel_mmap_len(union perf_event *event, 454 struct map **maps) 455 { 456 maps[MAP__FUNCTION]->start = event->mmap.start; 457 maps[MAP__FUNCTION]->end = event->mmap.start + event->mmap.len; 458 /* 459 * Be a bit paranoid here, some perf.data file came with 460 * a zero sized synthesized MMAP event for the kernel. 461 */ 462 if (maps[MAP__FUNCTION]->end == 0) 463 maps[MAP__FUNCTION]->end = ~0ULL; 464 } 465 466 static int perf_event__process_kernel_mmap(union perf_event *event, 467 struct perf_session *session) 468 { 469 struct map *map; 470 char kmmap_prefix[PATH_MAX]; 471 struct machine *machine; 472 enum dso_kernel_type kernel_type; 473 bool is_kernel_mmap; 474 475 machine = perf_session__findnew_machine(session, event->mmap.pid); 476 if (!machine) { 477 pr_err("Can't find id %d's machine\n", event->mmap.pid); 478 goto out_problem; 479 } 480 481 machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix)); 482 if (machine__is_host(machine)) 483 kernel_type = DSO_TYPE_KERNEL; 484 else 485 kernel_type = DSO_TYPE_GUEST_KERNEL; 486 487 is_kernel_mmap = memcmp(event->mmap.filename, 488 kmmap_prefix, 489 strlen(kmmap_prefix)) == 0; 490 if (event->mmap.filename[0] == '/' || 491 (!is_kernel_mmap && event->mmap.filename[0] == '[')) { 492 493 char short_module_name[1024]; 494 char *name, *dot; 495 496 if (event->mmap.filename[0] == '/') { 497 name = strrchr(event->mmap.filename, '/'); 498 if (name == NULL) 499 goto out_problem; 500 501 ++name; /* skip / */ 502 dot = strrchr(name, '.'); 503 if (dot == NULL) 504 goto out_problem; 505 snprintf(short_module_name, sizeof(short_module_name), 506 "[%.*s]", (int)(dot - name), name); 507 strxfrchar(short_module_name, '-', '_'); 508 } else 509 strcpy(short_module_name, event->mmap.filename); 510 511 map = machine__new_module(machine, event->mmap.start, 512 event->mmap.filename); 513 if (map == NULL) 514 goto out_problem; 515 516 name = strdup(short_module_name); 517 if (name == NULL) 518 goto out_problem; 519 520 map->dso->short_name = name; 521 map->dso->sname_alloc = 1; 522 map->end = map->start + event->mmap.len; 523 } else if (is_kernel_mmap) { 524 const char *symbol_name = (event->mmap.filename + 525 strlen(kmmap_prefix)); 526 /* 527 * Should be there already, from the build-id table in 528 * the header. 529 */ 530 struct dso *kernel = __dsos__findnew(&machine->kernel_dsos, 531 kmmap_prefix); 532 if (kernel == NULL) 533 goto out_problem; 534 535 kernel->kernel = kernel_type; 536 if (__machine__create_kernel_maps(machine, kernel) < 0) 537 goto out_problem; 538 539 perf_event__set_kernel_mmap_len(event, machine->vmlinux_maps); 540 perf_session__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, 541 symbol_name, 542 event->mmap.pgoff); 543 if (machine__is_default_guest(machine)) { 544 /* 545 * preload dso of guest kernel and modules 546 */ 547 dso__load(kernel, machine->vmlinux_maps[MAP__FUNCTION], 548 NULL); 549 } 550 } 551 return 0; 552 out_problem: 553 return -1; 554 } 555 556 int perf_event__process_mmap(union perf_event *event, 557 struct perf_sample *sample __used, 558 struct perf_session *session) 559 { 560 struct machine *machine; 561 struct thread *thread; 562 struct map *map; 563 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 564 int ret = 0; 565 566 dump_printf(" %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %s\n", 567 event->mmap.pid, event->mmap.tid, event->mmap.start, 568 event->mmap.len, event->mmap.pgoff, event->mmap.filename); 569 570 if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL || 571 cpumode == PERF_RECORD_MISC_KERNEL) { 572 ret = perf_event__process_kernel_mmap(event, session); 573 if (ret < 0) 574 goto out_problem; 575 return 0; 576 } 577 578 machine = perf_session__find_host_machine(session); 579 if (machine == NULL) 580 goto out_problem; 581 thread = perf_session__findnew(session, event->mmap.pid); 582 if (thread == NULL) 583 goto out_problem; 584 map = map__new(&machine->user_dsos, event->mmap.start, 585 event->mmap.len, event->mmap.pgoff, 586 event->mmap.pid, event->mmap.filename, 587 MAP__FUNCTION); 588 if (map == NULL) 589 goto out_problem; 590 591 thread__insert_map(thread, map); 592 return 0; 593 594 out_problem: 595 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n"); 596 return 0; 597 } 598 599 int perf_event__process_task(union perf_event *event, 600 struct perf_sample *sample __used, 601 struct perf_session *session) 602 { 603 struct thread *thread = perf_session__findnew(session, event->fork.tid); 604 struct thread *parent = perf_session__findnew(session, event->fork.ptid); 605 606 dump_printf("(%d:%d):(%d:%d)\n", event->fork.pid, event->fork.tid, 607 event->fork.ppid, event->fork.ptid); 608 609 if (event->header.type == PERF_RECORD_EXIT) { 610 perf_session__remove_thread(session, thread); 611 return 0; 612 } 613 614 if (thread == NULL || parent == NULL || 615 thread__fork(thread, parent) < 0) { 616 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n"); 617 return -1; 618 } 619 620 return 0; 621 } 622 623 int perf_event__process(union perf_event *event, struct perf_sample *sample, 624 struct perf_session *session) 625 { 626 switch (event->header.type) { 627 case PERF_RECORD_COMM: 628 perf_event__process_comm(event, sample, session); 629 break; 630 case PERF_RECORD_MMAP: 631 perf_event__process_mmap(event, sample, session); 632 break; 633 case PERF_RECORD_FORK: 634 case PERF_RECORD_EXIT: 635 perf_event__process_task(event, sample, session); 636 break; 637 case PERF_RECORD_LOST: 638 perf_event__process_lost(event, sample, session); 639 default: 640 break; 641 } 642 643 return 0; 644 } 645 646 void thread__find_addr_map(struct thread *self, 647 struct perf_session *session, u8 cpumode, 648 enum map_type type, pid_t pid, u64 addr, 649 struct addr_location *al) 650 { 651 struct map_groups *mg = &self->mg; 652 struct machine *machine = NULL; 653 654 al->thread = self; 655 al->addr = addr; 656 al->cpumode = cpumode; 657 al->filtered = false; 658 659 if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) { 660 al->level = 'k'; 661 machine = perf_session__find_host_machine(session); 662 if (machine == NULL) { 663 al->map = NULL; 664 return; 665 } 666 mg = &machine->kmaps; 667 } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) { 668 al->level = '.'; 669 machine = perf_session__find_host_machine(session); 670 } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) { 671 al->level = 'g'; 672 machine = perf_session__find_machine(session, pid); 673 if (machine == NULL) { 674 al->map = NULL; 675 return; 676 } 677 mg = &machine->kmaps; 678 } else { 679 /* 680 * 'u' means guest os user space. 681 * TODO: We don't support guest user space. Might support late. 682 */ 683 if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) 684 al->level = 'u'; 685 else 686 al->level = 'H'; 687 al->map = NULL; 688 689 if ((cpumode == PERF_RECORD_MISC_GUEST_USER || 690 cpumode == PERF_RECORD_MISC_GUEST_KERNEL) && 691 !perf_guest) 692 al->filtered = true; 693 if ((cpumode == PERF_RECORD_MISC_USER || 694 cpumode == PERF_RECORD_MISC_KERNEL) && 695 !perf_host) 696 al->filtered = true; 697 698 return; 699 } 700 try_again: 701 al->map = map_groups__find(mg, type, al->addr); 702 if (al->map == NULL) { 703 /* 704 * If this is outside of all known maps, and is a negative 705 * address, try to look it up in the kernel dso, as it might be 706 * a vsyscall or vdso (which executes in user-mode). 707 * 708 * XXX This is nasty, we should have a symbol list in the 709 * "[vdso]" dso, but for now lets use the old trick of looking 710 * in the whole kernel symbol list. 711 */ 712 if ((long long)al->addr < 0 && 713 cpumode == PERF_RECORD_MISC_KERNEL && 714 machine && mg != &machine->kmaps) { 715 mg = &machine->kmaps; 716 goto try_again; 717 } 718 } else 719 al->addr = al->map->map_ip(al->map, al->addr); 720 } 721 722 void thread__find_addr_location(struct thread *self, 723 struct perf_session *session, u8 cpumode, 724 enum map_type type, pid_t pid, u64 addr, 725 struct addr_location *al, 726 symbol_filter_t filter) 727 { 728 thread__find_addr_map(self, session, cpumode, type, pid, addr, al); 729 if (al->map != NULL) 730 al->sym = map__find_symbol(al->map, al->addr, filter); 731 else 732 al->sym = NULL; 733 } 734 735 int perf_event__preprocess_sample(const union perf_event *event, 736 struct perf_session *session, 737 struct addr_location *al, 738 struct perf_sample *sample, 739 symbol_filter_t filter) 740 { 741 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 742 struct thread *thread = perf_session__findnew(session, event->ip.pid); 743 744 if (thread == NULL) 745 return -1; 746 747 if (symbol_conf.comm_list && 748 !strlist__has_entry(symbol_conf.comm_list, thread->comm)) 749 goto out_filtered; 750 751 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); 752 /* 753 * Have we already created the kernel maps for the host machine? 754 * 755 * This should have happened earlier, when we processed the kernel MMAP 756 * events, but for older perf.data files there was no such thing, so do 757 * it now. 758 */ 759 if (cpumode == PERF_RECORD_MISC_KERNEL && 760 session->host_machine.vmlinux_maps[MAP__FUNCTION] == NULL) 761 machine__create_kernel_maps(&session->host_machine); 762 763 thread__find_addr_map(thread, session, cpumode, MAP__FUNCTION, 764 event->ip.pid, event->ip.ip, al); 765 dump_printf(" ...... dso: %s\n", 766 al->map ? al->map->dso->long_name : 767 al->level == 'H' ? "[hypervisor]" : "<not found>"); 768 al->sym = NULL; 769 al->cpu = sample->cpu; 770 771 if (al->map) { 772 if (symbol_conf.dso_list && 773 (!al->map || !al->map->dso || 774 !(strlist__has_entry(symbol_conf.dso_list, 775 al->map->dso->short_name) || 776 (al->map->dso->short_name != al->map->dso->long_name && 777 strlist__has_entry(symbol_conf.dso_list, 778 al->map->dso->long_name))))) 779 goto out_filtered; 780 781 al->sym = map__find_symbol(al->map, al->addr, filter); 782 } 783 784 if (symbol_conf.sym_list && al->sym && 785 !strlist__has_entry(symbol_conf.sym_list, al->sym->name)) 786 goto out_filtered; 787 788 return 0; 789 790 out_filtered: 791 al->filtered = true; 792 return 0; 793 } 794