1 #include <linux/types.h> 2 #include <sys/mman.h> 3 #include "event.h" 4 #include "debug.h" 5 #include "hist.h" 6 #include "machine.h" 7 #include "sort.h" 8 #include "string.h" 9 #include "strlist.h" 10 #include "thread.h" 11 #include "thread_map.h" 12 #include "symbol/kallsyms.h" 13 14 static const char *perf_event__names[] = { 15 [0] = "TOTAL", 16 [PERF_RECORD_MMAP] = "MMAP", 17 [PERF_RECORD_MMAP2] = "MMAP2", 18 [PERF_RECORD_LOST] = "LOST", 19 [PERF_RECORD_COMM] = "COMM", 20 [PERF_RECORD_EXIT] = "EXIT", 21 [PERF_RECORD_THROTTLE] = "THROTTLE", 22 [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE", 23 [PERF_RECORD_FORK] = "FORK", 24 [PERF_RECORD_READ] = "READ", 25 [PERF_RECORD_SAMPLE] = "SAMPLE", 26 [PERF_RECORD_AUX] = "AUX", 27 [PERF_RECORD_ITRACE_START] = "ITRACE_START", 28 [PERF_RECORD_LOST_SAMPLES] = "LOST_SAMPLES", 29 [PERF_RECORD_SWITCH] = "SWITCH", 30 [PERF_RECORD_SWITCH_CPU_WIDE] = "SWITCH_CPU_WIDE", 31 [PERF_RECORD_HEADER_ATTR] = "ATTR", 32 [PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE", 33 [PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA", 34 [PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID", 35 [PERF_RECORD_FINISHED_ROUND] = "FINISHED_ROUND", 36 [PERF_RECORD_ID_INDEX] = "ID_INDEX", 37 [PERF_RECORD_AUXTRACE_INFO] = "AUXTRACE_INFO", 38 [PERF_RECORD_AUXTRACE] = "AUXTRACE", 39 [PERF_RECORD_AUXTRACE_ERROR] = "AUXTRACE_ERROR", 40 }; 41 42 const char *perf_event__name(unsigned int id) 43 { 44 if (id >= ARRAY_SIZE(perf_event__names)) 45 return "INVALID"; 46 if (!perf_event__names[id]) 47 return "UNKNOWN"; 48 return perf_event__names[id]; 49 } 50 51 static struct perf_sample synth_sample = { 52 .pid = -1, 53 .tid = -1, 54 .time = -1, 55 .stream_id = -1, 56 .cpu = -1, 57 .period = 1, 58 }; 59 60 /* 61 * Assumes that the first 4095 bytes of /proc/pid/stat contains 62 * the comm, tgid and ppid. 63 */ 64 static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len, 65 pid_t *tgid, pid_t *ppid) 66 { 67 char filename[PATH_MAX]; 68 char bf[4096]; 69 int fd; 70 size_t size = 0; 71 ssize_t n; 72 char *nl, *name, *tgids, *ppids; 73 74 *tgid = -1; 75 *ppid = -1; 76 77 snprintf(filename, sizeof(filename), "/proc/%d/status", pid); 78 79 fd = open(filename, O_RDONLY); 80 if (fd < 0) { 81 pr_debug("couldn't open %s\n", filename); 82 return -1; 83 } 84 85 n = read(fd, bf, sizeof(bf) - 1); 86 close(fd); 87 if (n <= 0) { 88 pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n", 89 pid); 90 return -1; 91 } 92 bf[n] = '\0'; 93 94 name = strstr(bf, "Name:"); 95 tgids = strstr(bf, "Tgid:"); 96 ppids = strstr(bf, "PPid:"); 97 98 if (name) { 99 name += 5; /* strlen("Name:") */ 100 101 while (*name && isspace(*name)) 102 ++name; 103 104 nl = strchr(name, '\n'); 105 if (nl) 106 *nl = '\0'; 107 108 size = strlen(name); 109 if (size >= len) 110 size = len - 1; 111 memcpy(comm, name, size); 112 comm[size] = '\0'; 113 } else { 114 pr_debug("Name: string not found for pid %d\n", pid); 115 } 116 117 if (tgids) { 118 tgids += 5; /* strlen("Tgid:") */ 119 *tgid = atoi(tgids); 120 } else { 121 pr_debug("Tgid: string not found for pid %d\n", pid); 122 } 123 124 if (ppids) { 125 ppids += 5; /* strlen("PPid:") */ 126 *ppid = atoi(ppids); 127 } else { 128 pr_debug("PPid: string not found for pid %d\n", pid); 129 } 130 131 return 0; 132 } 133 134 static int perf_event__prepare_comm(union perf_event *event, pid_t pid, 135 struct machine *machine, 136 pid_t *tgid, pid_t *ppid) 137 { 138 size_t size; 139 140 *ppid = -1; 141 142 memset(&event->comm, 0, sizeof(event->comm)); 143 144 if (machine__is_host(machine)) { 145 if (perf_event__get_comm_ids(pid, event->comm.comm, 146 sizeof(event->comm.comm), 147 tgid, ppid) != 0) { 148 return -1; 149 } 150 } else { 151 *tgid = machine->pid; 152 } 153 154 if (*tgid < 0) 155 return -1; 156 157 event->comm.pid = *tgid; 158 event->comm.header.type = PERF_RECORD_COMM; 159 160 size = strlen(event->comm.comm) + 1; 161 size = PERF_ALIGN(size, sizeof(u64)); 162 memset(event->comm.comm + size, 0, machine->id_hdr_size); 163 event->comm.header.size = (sizeof(event->comm) - 164 (sizeof(event->comm.comm) - size) + 165 machine->id_hdr_size); 166 event->comm.tid = pid; 167 168 return 0; 169 } 170 171 pid_t perf_event__synthesize_comm(struct perf_tool *tool, 172 union perf_event *event, pid_t pid, 173 perf_event__handler_t process, 174 struct machine *machine) 175 { 176 pid_t tgid, ppid; 177 178 if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0) 179 return -1; 180 181 if (process(tool, event, &synth_sample, machine) != 0) 182 return -1; 183 184 return tgid; 185 } 186 187 static int perf_event__synthesize_fork(struct perf_tool *tool, 188 union perf_event *event, 189 pid_t pid, pid_t tgid, pid_t ppid, 190 perf_event__handler_t process, 191 struct machine *machine) 192 { 193 memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size); 194 195 /* 196 * for main thread set parent to ppid from status file. For other 197 * threads set parent pid to main thread. ie., assume main thread 198 * spawns all threads in a process 199 */ 200 if (tgid == pid) { 201 event->fork.ppid = ppid; 202 event->fork.ptid = ppid; 203 } else { 204 event->fork.ppid = tgid; 205 event->fork.ptid = tgid; 206 } 207 event->fork.pid = tgid; 208 event->fork.tid = pid; 209 event->fork.header.type = PERF_RECORD_FORK; 210 211 event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size); 212 213 if (process(tool, event, &synth_sample, machine) != 0) 214 return -1; 215 216 return 0; 217 } 218 219 int perf_event__synthesize_mmap_events(struct perf_tool *tool, 220 union perf_event *event, 221 pid_t pid, pid_t tgid, 222 perf_event__handler_t process, 223 struct machine *machine, 224 bool mmap_data, 225 unsigned int proc_map_timeout) 226 { 227 char filename[PATH_MAX]; 228 FILE *fp; 229 unsigned long long t; 230 bool truncation = false; 231 unsigned long long timeout = proc_map_timeout * 1000000ULL; 232 int rc = 0; 233 234 if (machine__is_default_guest(machine)) 235 return 0; 236 237 snprintf(filename, sizeof(filename), "%s/proc/%d/maps", 238 machine->root_dir, pid); 239 240 fp = fopen(filename, "r"); 241 if (fp == NULL) { 242 /* 243 * We raced with a task exiting - just return: 244 */ 245 pr_debug("couldn't open %s\n", filename); 246 return -1; 247 } 248 249 event->header.type = PERF_RECORD_MMAP2; 250 t = rdclock(); 251 252 while (1) { 253 char bf[BUFSIZ]; 254 char prot[5]; 255 char execname[PATH_MAX]; 256 char anonstr[] = "//anon"; 257 unsigned int ino; 258 size_t size; 259 ssize_t n; 260 261 if (fgets(bf, sizeof(bf), fp) == NULL) 262 break; 263 264 if ((rdclock() - t) > timeout) { 265 pr_warning("Reading %s time out. " 266 "You may want to increase " 267 "the time limit by --proc-map-timeout\n", 268 filename); 269 truncation = true; 270 goto out; 271 } 272 273 /* ensure null termination since stack will be reused. */ 274 strcpy(execname, ""); 275 276 /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */ 277 n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %s\n", 278 &event->mmap2.start, &event->mmap2.len, prot, 279 &event->mmap2.pgoff, &event->mmap2.maj, 280 &event->mmap2.min, 281 &ino, execname); 282 283 /* 284 * Anon maps don't have the execname. 285 */ 286 if (n < 7) 287 continue; 288 289 event->mmap2.ino = (u64)ino; 290 291 /* 292 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c 293 */ 294 if (machine__is_host(machine)) 295 event->header.misc = PERF_RECORD_MISC_USER; 296 else 297 event->header.misc = PERF_RECORD_MISC_GUEST_USER; 298 299 /* map protection and flags bits */ 300 event->mmap2.prot = 0; 301 event->mmap2.flags = 0; 302 if (prot[0] == 'r') 303 event->mmap2.prot |= PROT_READ; 304 if (prot[1] == 'w') 305 event->mmap2.prot |= PROT_WRITE; 306 if (prot[2] == 'x') 307 event->mmap2.prot |= PROT_EXEC; 308 309 if (prot[3] == 's') 310 event->mmap2.flags |= MAP_SHARED; 311 else 312 event->mmap2.flags |= MAP_PRIVATE; 313 314 if (prot[2] != 'x') { 315 if (!mmap_data || prot[0] != 'r') 316 continue; 317 318 event->header.misc |= PERF_RECORD_MISC_MMAP_DATA; 319 } 320 321 out: 322 if (truncation) 323 event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT; 324 325 if (!strcmp(execname, "")) 326 strcpy(execname, anonstr); 327 328 size = strlen(execname) + 1; 329 memcpy(event->mmap2.filename, execname, size); 330 size = PERF_ALIGN(size, sizeof(u64)); 331 event->mmap2.len -= event->mmap.start; 332 event->mmap2.header.size = (sizeof(event->mmap2) - 333 (sizeof(event->mmap2.filename) - size)); 334 memset(event->mmap2.filename + size, 0, machine->id_hdr_size); 335 event->mmap2.header.size += machine->id_hdr_size; 336 event->mmap2.pid = tgid; 337 event->mmap2.tid = pid; 338 339 if (process(tool, event, &synth_sample, machine) != 0) { 340 rc = -1; 341 break; 342 } 343 344 if (truncation) 345 break; 346 } 347 348 fclose(fp); 349 return rc; 350 } 351 352 int perf_event__synthesize_modules(struct perf_tool *tool, 353 perf_event__handler_t process, 354 struct machine *machine) 355 { 356 int rc = 0; 357 struct map *pos; 358 struct map_groups *kmaps = &machine->kmaps; 359 struct maps *maps = &kmaps->maps[MAP__FUNCTION]; 360 union perf_event *event = zalloc((sizeof(event->mmap) + 361 machine->id_hdr_size)); 362 if (event == NULL) { 363 pr_debug("Not enough memory synthesizing mmap event " 364 "for kernel modules\n"); 365 return -1; 366 } 367 368 event->header.type = PERF_RECORD_MMAP; 369 370 /* 371 * kernel uses 0 for user space maps, see kernel/perf_event.c 372 * __perf_event_mmap 373 */ 374 if (machine__is_host(machine)) 375 event->header.misc = PERF_RECORD_MISC_KERNEL; 376 else 377 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; 378 379 for (pos = maps__first(maps); pos; pos = map__next(pos)) { 380 size_t size; 381 382 if (__map__is_kernel(pos)) 383 continue; 384 385 size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64)); 386 event->mmap.header.type = PERF_RECORD_MMAP; 387 event->mmap.header.size = (sizeof(event->mmap) - 388 (sizeof(event->mmap.filename) - size)); 389 memset(event->mmap.filename + size, 0, machine->id_hdr_size); 390 event->mmap.header.size += machine->id_hdr_size; 391 event->mmap.start = pos->start; 392 event->mmap.len = pos->end - pos->start; 393 event->mmap.pid = machine->pid; 394 395 memcpy(event->mmap.filename, pos->dso->long_name, 396 pos->dso->long_name_len + 1); 397 if (process(tool, event, &synth_sample, machine) != 0) { 398 rc = -1; 399 break; 400 } 401 } 402 403 free(event); 404 return rc; 405 } 406 407 static int __event__synthesize_thread(union perf_event *comm_event, 408 union perf_event *mmap_event, 409 union perf_event *fork_event, 410 pid_t pid, int full, 411 perf_event__handler_t process, 412 struct perf_tool *tool, 413 struct machine *machine, 414 bool mmap_data, 415 unsigned int proc_map_timeout) 416 { 417 char filename[PATH_MAX]; 418 DIR *tasks; 419 struct dirent dirent, *next; 420 pid_t tgid, ppid; 421 int rc = 0; 422 423 /* special case: only send one comm event using passed in pid */ 424 if (!full) { 425 tgid = perf_event__synthesize_comm(tool, comm_event, pid, 426 process, machine); 427 428 if (tgid == -1) 429 return -1; 430 431 return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid, 432 process, machine, mmap_data, 433 proc_map_timeout); 434 } 435 436 if (machine__is_default_guest(machine)) 437 return 0; 438 439 snprintf(filename, sizeof(filename), "%s/proc/%d/task", 440 machine->root_dir, pid); 441 442 tasks = opendir(filename); 443 if (tasks == NULL) { 444 pr_debug("couldn't open %s\n", filename); 445 return 0; 446 } 447 448 while (!readdir_r(tasks, &dirent, &next) && next) { 449 char *end; 450 pid_t _pid; 451 452 _pid = strtol(dirent.d_name, &end, 10); 453 if (*end) 454 continue; 455 456 rc = -1; 457 if (perf_event__prepare_comm(comm_event, _pid, machine, 458 &tgid, &ppid) != 0) 459 break; 460 461 if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid, 462 ppid, process, machine) < 0) 463 break; 464 /* 465 * Send the prepared comm event 466 */ 467 if (process(tool, comm_event, &synth_sample, machine) != 0) 468 break; 469 470 rc = 0; 471 if (_pid == pid) { 472 /* process the parent's maps too */ 473 rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid, 474 process, machine, mmap_data, proc_map_timeout); 475 if (rc) 476 break; 477 } 478 } 479 480 closedir(tasks); 481 return rc; 482 } 483 484 int perf_event__synthesize_thread_map(struct perf_tool *tool, 485 struct thread_map *threads, 486 perf_event__handler_t process, 487 struct machine *machine, 488 bool mmap_data, 489 unsigned int proc_map_timeout) 490 { 491 union perf_event *comm_event, *mmap_event, *fork_event; 492 int err = -1, thread, j; 493 494 comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size); 495 if (comm_event == NULL) 496 goto out; 497 498 mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size); 499 if (mmap_event == NULL) 500 goto out_free_comm; 501 502 fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size); 503 if (fork_event == NULL) 504 goto out_free_mmap; 505 506 err = 0; 507 for (thread = 0; thread < threads->nr; ++thread) { 508 if (__event__synthesize_thread(comm_event, mmap_event, 509 fork_event, 510 thread_map__pid(threads, thread), 0, 511 process, tool, machine, 512 mmap_data, proc_map_timeout)) { 513 err = -1; 514 break; 515 } 516 517 /* 518 * comm.pid is set to thread group id by 519 * perf_event__synthesize_comm 520 */ 521 if ((int) comm_event->comm.pid != thread_map__pid(threads, thread)) { 522 bool need_leader = true; 523 524 /* is thread group leader in thread_map? */ 525 for (j = 0; j < threads->nr; ++j) { 526 if ((int) comm_event->comm.pid == thread_map__pid(threads, j)) { 527 need_leader = false; 528 break; 529 } 530 } 531 532 /* if not, generate events for it */ 533 if (need_leader && 534 __event__synthesize_thread(comm_event, mmap_event, 535 fork_event, 536 comm_event->comm.pid, 0, 537 process, tool, machine, 538 mmap_data, proc_map_timeout)) { 539 err = -1; 540 break; 541 } 542 } 543 } 544 free(fork_event); 545 out_free_mmap: 546 free(mmap_event); 547 out_free_comm: 548 free(comm_event); 549 out: 550 return err; 551 } 552 553 int perf_event__synthesize_threads(struct perf_tool *tool, 554 perf_event__handler_t process, 555 struct machine *machine, 556 bool mmap_data, 557 unsigned int proc_map_timeout) 558 { 559 DIR *proc; 560 char proc_path[PATH_MAX]; 561 struct dirent dirent, *next; 562 union perf_event *comm_event, *mmap_event, *fork_event; 563 int err = -1; 564 565 if (machine__is_default_guest(machine)) 566 return 0; 567 568 comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size); 569 if (comm_event == NULL) 570 goto out; 571 572 mmap_event = malloc(sizeof(mmap_event->mmap) + machine->id_hdr_size); 573 if (mmap_event == NULL) 574 goto out_free_comm; 575 576 fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size); 577 if (fork_event == NULL) 578 goto out_free_mmap; 579 580 snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir); 581 proc = opendir(proc_path); 582 583 if (proc == NULL) 584 goto out_free_fork; 585 586 while (!readdir_r(proc, &dirent, &next) && next) { 587 char *end; 588 pid_t pid = strtol(dirent.d_name, &end, 10); 589 590 if (*end) /* only interested in proper numerical dirents */ 591 continue; 592 /* 593 * We may race with exiting thread, so don't stop just because 594 * one thread couldn't be synthesized. 595 */ 596 __event__synthesize_thread(comm_event, mmap_event, fork_event, pid, 597 1, process, tool, machine, mmap_data, 598 proc_map_timeout); 599 } 600 601 err = 0; 602 closedir(proc); 603 out_free_fork: 604 free(fork_event); 605 out_free_mmap: 606 free(mmap_event); 607 out_free_comm: 608 free(comm_event); 609 out: 610 return err; 611 } 612 613 struct process_symbol_args { 614 const char *name; 615 u64 start; 616 }; 617 618 static int find_symbol_cb(void *arg, const char *name, char type, 619 u64 start) 620 { 621 struct process_symbol_args *args = arg; 622 623 /* 624 * Must be a function or at least an alias, as in PARISC64, where "_text" is 625 * an 'A' to the same address as "_stext". 626 */ 627 if (!(symbol_type__is_a(type, MAP__FUNCTION) || 628 type == 'A') || strcmp(name, args->name)) 629 return 0; 630 631 args->start = start; 632 return 1; 633 } 634 635 u64 kallsyms__get_function_start(const char *kallsyms_filename, 636 const char *symbol_name) 637 { 638 struct process_symbol_args args = { .name = symbol_name, }; 639 640 if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0) 641 return 0; 642 643 return args.start; 644 } 645 646 int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, 647 perf_event__handler_t process, 648 struct machine *machine) 649 { 650 size_t size; 651 const char *mmap_name; 652 char name_buff[PATH_MAX]; 653 struct map *map = machine__kernel_map(machine); 654 struct kmap *kmap; 655 int err; 656 union perf_event *event; 657 658 if (map == NULL) 659 return -1; 660 661 /* 662 * We should get this from /sys/kernel/sections/.text, but till that is 663 * available use this, and after it is use this as a fallback for older 664 * kernels. 665 */ 666 event = zalloc((sizeof(event->mmap) + machine->id_hdr_size)); 667 if (event == NULL) { 668 pr_debug("Not enough memory synthesizing mmap event " 669 "for kernel modules\n"); 670 return -1; 671 } 672 673 mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff)); 674 if (machine__is_host(machine)) { 675 /* 676 * kernel uses PERF_RECORD_MISC_USER for user space maps, 677 * see kernel/perf_event.c __perf_event_mmap 678 */ 679 event->header.misc = PERF_RECORD_MISC_KERNEL; 680 } else { 681 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; 682 } 683 684 kmap = map__kmap(map); 685 size = snprintf(event->mmap.filename, sizeof(event->mmap.filename), 686 "%s%s", mmap_name, kmap->ref_reloc_sym->name) + 1; 687 size = PERF_ALIGN(size, sizeof(u64)); 688 event->mmap.header.type = PERF_RECORD_MMAP; 689 event->mmap.header.size = (sizeof(event->mmap) - 690 (sizeof(event->mmap.filename) - size) + machine->id_hdr_size); 691 event->mmap.pgoff = kmap->ref_reloc_sym->addr; 692 event->mmap.start = map->start; 693 event->mmap.len = map->end - event->mmap.start; 694 event->mmap.pid = machine->pid; 695 696 err = process(tool, event, &synth_sample, machine); 697 free(event); 698 699 return err; 700 } 701 702 size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp) 703 { 704 const char *s; 705 706 if (event->header.misc & PERF_RECORD_MISC_COMM_EXEC) 707 s = " exec"; 708 else 709 s = ""; 710 711 return fprintf(fp, "%s: %s:%d/%d\n", s, event->comm.comm, event->comm.pid, event->comm.tid); 712 } 713 714 int perf_event__process_comm(struct perf_tool *tool __maybe_unused, 715 union perf_event *event, 716 struct perf_sample *sample, 717 struct machine *machine) 718 { 719 return machine__process_comm_event(machine, event, sample); 720 } 721 722 int perf_event__process_lost(struct perf_tool *tool __maybe_unused, 723 union perf_event *event, 724 struct perf_sample *sample, 725 struct machine *machine) 726 { 727 return machine__process_lost_event(machine, event, sample); 728 } 729 730 int perf_event__process_aux(struct perf_tool *tool __maybe_unused, 731 union perf_event *event, 732 struct perf_sample *sample __maybe_unused, 733 struct machine *machine) 734 { 735 return machine__process_aux_event(machine, event); 736 } 737 738 int perf_event__process_itrace_start(struct perf_tool *tool __maybe_unused, 739 union perf_event *event, 740 struct perf_sample *sample __maybe_unused, 741 struct machine *machine) 742 { 743 return machine__process_itrace_start_event(machine, event); 744 } 745 746 int perf_event__process_lost_samples(struct perf_tool *tool __maybe_unused, 747 union perf_event *event, 748 struct perf_sample *sample, 749 struct machine *machine) 750 { 751 return machine__process_lost_samples_event(machine, event, sample); 752 } 753 754 int perf_event__process_switch(struct perf_tool *tool __maybe_unused, 755 union perf_event *event, 756 struct perf_sample *sample __maybe_unused, 757 struct machine *machine) 758 { 759 return machine__process_switch_event(machine, event); 760 } 761 762 size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp) 763 { 764 return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n", 765 event->mmap.pid, event->mmap.tid, event->mmap.start, 766 event->mmap.len, event->mmap.pgoff, 767 (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x', 768 event->mmap.filename); 769 } 770 771 size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp) 772 { 773 return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 774 " %02x:%02x %"PRIu64" %"PRIu64"]: %c%c%c%c %s\n", 775 event->mmap2.pid, event->mmap2.tid, event->mmap2.start, 776 event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj, 777 event->mmap2.min, event->mmap2.ino, 778 event->mmap2.ino_generation, 779 (event->mmap2.prot & PROT_READ) ? 'r' : '-', 780 (event->mmap2.prot & PROT_WRITE) ? 'w' : '-', 781 (event->mmap2.prot & PROT_EXEC) ? 'x' : '-', 782 (event->mmap2.flags & MAP_SHARED) ? 's' : 'p', 783 event->mmap2.filename); 784 } 785 786 int perf_event__process_mmap(struct perf_tool *tool __maybe_unused, 787 union perf_event *event, 788 struct perf_sample *sample, 789 struct machine *machine) 790 { 791 return machine__process_mmap_event(machine, event, sample); 792 } 793 794 int perf_event__process_mmap2(struct perf_tool *tool __maybe_unused, 795 union perf_event *event, 796 struct perf_sample *sample, 797 struct machine *machine) 798 { 799 return machine__process_mmap2_event(machine, event, sample); 800 } 801 802 size_t perf_event__fprintf_task(union perf_event *event, FILE *fp) 803 { 804 return fprintf(fp, "(%d:%d):(%d:%d)\n", 805 event->fork.pid, event->fork.tid, 806 event->fork.ppid, event->fork.ptid); 807 } 808 809 int perf_event__process_fork(struct perf_tool *tool __maybe_unused, 810 union perf_event *event, 811 struct perf_sample *sample, 812 struct machine *machine) 813 { 814 return machine__process_fork_event(machine, event, sample); 815 } 816 817 int perf_event__process_exit(struct perf_tool *tool __maybe_unused, 818 union perf_event *event, 819 struct perf_sample *sample, 820 struct machine *machine) 821 { 822 return machine__process_exit_event(machine, event, sample); 823 } 824 825 size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp) 826 { 827 return fprintf(fp, " offset: %#"PRIx64" size: %#"PRIx64" flags: %#"PRIx64" [%s%s]\n", 828 event->aux.aux_offset, event->aux.aux_size, 829 event->aux.flags, 830 event->aux.flags & PERF_AUX_FLAG_TRUNCATED ? "T" : "", 831 event->aux.flags & PERF_AUX_FLAG_OVERWRITE ? "O" : ""); 832 } 833 834 size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp) 835 { 836 return fprintf(fp, " pid: %u tid: %u\n", 837 event->itrace_start.pid, event->itrace_start.tid); 838 } 839 840 size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp) 841 { 842 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT; 843 const char *in_out = out ? "OUT" : "IN "; 844 845 if (event->header.type == PERF_RECORD_SWITCH) 846 return fprintf(fp, " %s\n", in_out); 847 848 return fprintf(fp, " %s %s pid/tid: %5u/%-5u\n", 849 in_out, out ? "next" : "prev", 850 event->context_switch.next_prev_pid, 851 event->context_switch.next_prev_tid); 852 } 853 854 size_t perf_event__fprintf(union perf_event *event, FILE *fp) 855 { 856 size_t ret = fprintf(fp, "PERF_RECORD_%s", 857 perf_event__name(event->header.type)); 858 859 switch (event->header.type) { 860 case PERF_RECORD_COMM: 861 ret += perf_event__fprintf_comm(event, fp); 862 break; 863 case PERF_RECORD_FORK: 864 case PERF_RECORD_EXIT: 865 ret += perf_event__fprintf_task(event, fp); 866 break; 867 case PERF_RECORD_MMAP: 868 ret += perf_event__fprintf_mmap(event, fp); 869 break; 870 case PERF_RECORD_MMAP2: 871 ret += perf_event__fprintf_mmap2(event, fp); 872 break; 873 case PERF_RECORD_AUX: 874 ret += perf_event__fprintf_aux(event, fp); 875 break; 876 case PERF_RECORD_ITRACE_START: 877 ret += perf_event__fprintf_itrace_start(event, fp); 878 break; 879 case PERF_RECORD_SWITCH: 880 case PERF_RECORD_SWITCH_CPU_WIDE: 881 ret += perf_event__fprintf_switch(event, fp); 882 break; 883 default: 884 ret += fprintf(fp, "\n"); 885 } 886 887 return ret; 888 } 889 890 int perf_event__process(struct perf_tool *tool __maybe_unused, 891 union perf_event *event, 892 struct perf_sample *sample, 893 struct machine *machine) 894 { 895 return machine__process_event(machine, event, sample); 896 } 897 898 void thread__find_addr_map(struct thread *thread, u8 cpumode, 899 enum map_type type, u64 addr, 900 struct addr_location *al) 901 { 902 struct map_groups *mg = thread->mg; 903 struct machine *machine = mg->machine; 904 bool load_map = false; 905 906 al->machine = machine; 907 al->thread = thread; 908 al->addr = addr; 909 al->cpumode = cpumode; 910 al->filtered = 0; 911 912 if (machine == NULL) { 913 al->map = NULL; 914 return; 915 } 916 917 if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) { 918 al->level = 'k'; 919 mg = &machine->kmaps; 920 load_map = true; 921 } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) { 922 al->level = '.'; 923 } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) { 924 al->level = 'g'; 925 mg = &machine->kmaps; 926 load_map = true; 927 } else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) { 928 al->level = 'u'; 929 } else { 930 al->level = 'H'; 931 al->map = NULL; 932 933 if ((cpumode == PERF_RECORD_MISC_GUEST_USER || 934 cpumode == PERF_RECORD_MISC_GUEST_KERNEL) && 935 !perf_guest) 936 al->filtered |= (1 << HIST_FILTER__GUEST); 937 if ((cpumode == PERF_RECORD_MISC_USER || 938 cpumode == PERF_RECORD_MISC_KERNEL) && 939 !perf_host) 940 al->filtered |= (1 << HIST_FILTER__HOST); 941 942 return; 943 } 944 try_again: 945 al->map = map_groups__find(mg, type, al->addr); 946 if (al->map == NULL) { 947 /* 948 * If this is outside of all known maps, and is a negative 949 * address, try to look it up in the kernel dso, as it might be 950 * a vsyscall or vdso (which executes in user-mode). 951 * 952 * XXX This is nasty, we should have a symbol list in the 953 * "[vdso]" dso, but for now lets use the old trick of looking 954 * in the whole kernel symbol list. 955 */ 956 if (cpumode == PERF_RECORD_MISC_USER && machine && 957 mg != &machine->kmaps && 958 machine__kernel_ip(machine, al->addr)) { 959 mg = &machine->kmaps; 960 load_map = true; 961 goto try_again; 962 } 963 } else { 964 /* 965 * Kernel maps might be changed when loading symbols so loading 966 * must be done prior to using kernel maps. 967 */ 968 if (load_map) 969 map__load(al->map, machine->symbol_filter); 970 al->addr = al->map->map_ip(al->map, al->addr); 971 } 972 } 973 974 void thread__find_addr_location(struct thread *thread, 975 u8 cpumode, enum map_type type, u64 addr, 976 struct addr_location *al) 977 { 978 thread__find_addr_map(thread, cpumode, type, addr, al); 979 if (al->map != NULL) 980 al->sym = map__find_symbol(al->map, al->addr, 981 thread->mg->machine->symbol_filter); 982 else 983 al->sym = NULL; 984 } 985 986 /* 987 * Callers need to drop the reference to al->thread, obtained in 988 * machine__findnew_thread() 989 */ 990 int perf_event__preprocess_sample(const union perf_event *event, 991 struct machine *machine, 992 struct addr_location *al, 993 struct perf_sample *sample) 994 { 995 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 996 struct thread *thread = machine__findnew_thread(machine, sample->pid, 997 sample->tid); 998 999 if (thread == NULL) 1000 return -1; 1001 1002 dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid); 1003 /* 1004 * Have we already created the kernel maps for this machine? 1005 * 1006 * This should have happened earlier, when we processed the kernel MMAP 1007 * events, but for older perf.data files there was no such thing, so do 1008 * it now. 1009 */ 1010 if (cpumode == PERF_RECORD_MISC_KERNEL && 1011 machine__kernel_map(machine) == NULL) 1012 machine__create_kernel_maps(machine); 1013 1014 thread__find_addr_map(thread, cpumode, MAP__FUNCTION, sample->ip, al); 1015 dump_printf(" ...... dso: %s\n", 1016 al->map ? al->map->dso->long_name : 1017 al->level == 'H' ? "[hypervisor]" : "<not found>"); 1018 1019 if (thread__is_filtered(thread)) 1020 al->filtered |= (1 << HIST_FILTER__THREAD); 1021 1022 al->sym = NULL; 1023 al->cpu = sample->cpu; 1024 al->socket = -1; 1025 1026 if (al->cpu >= 0) { 1027 struct perf_env *env = machine->env; 1028 1029 if (env && env->cpu) 1030 al->socket = env->cpu[al->cpu].socket_id; 1031 } 1032 1033 if (al->map) { 1034 struct dso *dso = al->map->dso; 1035 1036 if (symbol_conf.dso_list && 1037 (!dso || !(strlist__has_entry(symbol_conf.dso_list, 1038 dso->short_name) || 1039 (dso->short_name != dso->long_name && 1040 strlist__has_entry(symbol_conf.dso_list, 1041 dso->long_name))))) { 1042 al->filtered |= (1 << HIST_FILTER__DSO); 1043 } 1044 1045 al->sym = map__find_symbol(al->map, al->addr, 1046 machine->symbol_filter); 1047 } 1048 1049 if (symbol_conf.sym_list && 1050 (!al->sym || !strlist__has_entry(symbol_conf.sym_list, 1051 al->sym->name))) { 1052 al->filtered |= (1 << HIST_FILTER__SYMBOL); 1053 } 1054 1055 return 0; 1056 } 1057 1058 /* 1059 * The preprocess_sample method will return with reference counts for the 1060 * in it, when done using (and perhaps getting ref counts if needing to 1061 * keep a pointer to one of those entries) it must be paired with 1062 * addr_location__put(), so that the refcounts can be decremented. 1063 */ 1064 void addr_location__put(struct addr_location *al) 1065 { 1066 thread__zput(al->thread); 1067 } 1068 1069 bool is_bts_event(struct perf_event_attr *attr) 1070 { 1071 return attr->type == PERF_TYPE_HARDWARE && 1072 (attr->config & PERF_COUNT_HW_BRANCH_INSTRUCTIONS) && 1073 attr->sample_period == 1; 1074 } 1075 1076 bool sample_addr_correlates_sym(struct perf_event_attr *attr) 1077 { 1078 if (attr->type == PERF_TYPE_SOFTWARE && 1079 (attr->config == PERF_COUNT_SW_PAGE_FAULTS || 1080 attr->config == PERF_COUNT_SW_PAGE_FAULTS_MIN || 1081 attr->config == PERF_COUNT_SW_PAGE_FAULTS_MAJ)) 1082 return true; 1083 1084 if (is_bts_event(attr)) 1085 return true; 1086 1087 return false; 1088 } 1089 1090 void perf_event__preprocess_sample_addr(union perf_event *event, 1091 struct perf_sample *sample, 1092 struct thread *thread, 1093 struct addr_location *al) 1094 { 1095 u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 1096 1097 thread__find_addr_map(thread, cpumode, MAP__FUNCTION, sample->addr, al); 1098 if (!al->map) 1099 thread__find_addr_map(thread, cpumode, MAP__VARIABLE, 1100 sample->addr, al); 1101 1102 al->cpu = sample->cpu; 1103 al->sym = NULL; 1104 1105 if (al->map) 1106 al->sym = map__find_symbol(al->map, al->addr, NULL); 1107 } 1108