1 #include <linux/types.h> 2 #include <sys/mman.h> 3 #include "event.h" 4 #include "debug.h" 5 #include "hist.h" 6 #include "machine.h" 7 #include "sort.h" 8 #include "string.h" 9 #include "strlist.h" 10 #include "thread.h" 11 #include "thread_map.h" 12 #include "symbol/kallsyms.h" 13 #include "asm/bug.h" 14 #include "stat.h" 15 16 static const char *perf_event__names[] = { 17 [0] = "TOTAL", 18 [PERF_RECORD_MMAP] = "MMAP", 19 [PERF_RECORD_MMAP2] = "MMAP2", 20 [PERF_RECORD_LOST] = "LOST", 21 [PERF_RECORD_COMM] = "COMM", 22 [PERF_RECORD_EXIT] = "EXIT", 23 [PERF_RECORD_THROTTLE] = "THROTTLE", 24 [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE", 25 [PERF_RECORD_FORK] = "FORK", 26 [PERF_RECORD_READ] = "READ", 27 [PERF_RECORD_SAMPLE] = "SAMPLE", 28 [PERF_RECORD_AUX] = "AUX", 29 [PERF_RECORD_ITRACE_START] = "ITRACE_START", 30 [PERF_RECORD_LOST_SAMPLES] = "LOST_SAMPLES", 31 [PERF_RECORD_SWITCH] = "SWITCH", 32 [PERF_RECORD_SWITCH_CPU_WIDE] = "SWITCH_CPU_WIDE", 33 [PERF_RECORD_HEADER_ATTR] = "ATTR", 34 [PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE", 35 [PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA", 36 [PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID", 37 [PERF_RECORD_FINISHED_ROUND] = "FINISHED_ROUND", 38 [PERF_RECORD_ID_INDEX] = "ID_INDEX", 39 [PERF_RECORD_AUXTRACE_INFO] = "AUXTRACE_INFO", 40 [PERF_RECORD_AUXTRACE] = "AUXTRACE", 41 [PERF_RECORD_AUXTRACE_ERROR] = "AUXTRACE_ERROR", 42 [PERF_RECORD_THREAD_MAP] = "THREAD_MAP", 43 [PERF_RECORD_CPU_MAP] = "CPU_MAP", 44 [PERF_RECORD_STAT_CONFIG] = "STAT_CONFIG", 45 [PERF_RECORD_STAT] = "STAT", 46 [PERF_RECORD_STAT_ROUND] = "STAT_ROUND", 47 [PERF_RECORD_EVENT_UPDATE] = "EVENT_UPDATE", 48 [PERF_RECORD_TIME_CONV] = "TIME_CONV", 49 }; 50 51 const char *perf_event__name(unsigned int id) 52 { 53 if (id >= ARRAY_SIZE(perf_event__names)) 54 return "INVALID"; 55 if (!perf_event__names[id]) 56 return "UNKNOWN"; 57 return perf_event__names[id]; 58 } 59 60 static int perf_tool__process_synth_event(struct perf_tool *tool, 61 union perf_event *event, 62 struct machine *machine, 63 perf_event__handler_t process) 64 { 65 struct perf_sample synth_sample = { 66 .pid = -1, 67 .tid = -1, 68 .time = -1, 69 .stream_id = -1, 70 .cpu = -1, 71 .period = 1, 72 .cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK, 73 }; 74 75 return process(tool, event, &synth_sample, machine); 76 }; 77 78 /* 79 * Assumes that the first 4095 bytes of /proc/pid/stat contains 80 * the comm, tgid and ppid. 81 */ 82 static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len, 83 pid_t *tgid, pid_t *ppid) 84 { 85 char filename[PATH_MAX]; 86 char bf[4096]; 87 int fd; 88 size_t size = 0; 89 ssize_t n; 90 char *nl, *name, *tgids, *ppids; 91 92 *tgid = -1; 93 *ppid = -1; 94 95 snprintf(filename, sizeof(filename), "/proc/%d/status", pid); 96 97 fd = open(filename, O_RDONLY); 98 if (fd < 0) { 99 pr_debug("couldn't open %s\n", filename); 100 return -1; 101 } 102 103 n = read(fd, bf, sizeof(bf) - 1); 104 close(fd); 105 if (n <= 0) { 106 pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n", 107 pid); 108 return -1; 109 } 110 bf[n] = '\0'; 111 112 name = strstr(bf, "Name:"); 113 tgids = strstr(bf, "Tgid:"); 114 ppids = strstr(bf, "PPid:"); 115 116 if (name) { 117 name += 5; /* strlen("Name:") */ 118 119 while (*name && isspace(*name)) 120 ++name; 121 122 nl = strchr(name, '\n'); 123 if (nl) 124 *nl = '\0'; 125 126 size = strlen(name); 127 if (size >= len) 128 size = len - 1; 129 memcpy(comm, name, size); 130 comm[size] = '\0'; 131 } else { 132 pr_debug("Name: string not found for pid %d\n", pid); 133 } 134 135 if (tgids) { 136 tgids += 5; /* strlen("Tgid:") */ 137 *tgid = atoi(tgids); 138 } else { 139 pr_debug("Tgid: string not found for pid %d\n", pid); 140 } 141 142 if (ppids) { 143 ppids += 5; /* strlen("PPid:") */ 144 *ppid = atoi(ppids); 145 } else { 146 pr_debug("PPid: string not found for pid %d\n", pid); 147 } 148 149 return 0; 150 } 151 152 static int perf_event__prepare_comm(union perf_event *event, pid_t pid, 153 struct machine *machine, 154 pid_t *tgid, pid_t *ppid) 155 { 156 size_t size; 157 158 *ppid = -1; 159 160 memset(&event->comm, 0, sizeof(event->comm)); 161 162 if (machine__is_host(machine)) { 163 if (perf_event__get_comm_ids(pid, event->comm.comm, 164 sizeof(event->comm.comm), 165 tgid, ppid) != 0) { 166 return -1; 167 } 168 } else { 169 *tgid = machine->pid; 170 } 171 172 if (*tgid < 0) 173 return -1; 174 175 event->comm.pid = *tgid; 176 event->comm.header.type = PERF_RECORD_COMM; 177 178 size = strlen(event->comm.comm) + 1; 179 size = PERF_ALIGN(size, sizeof(u64)); 180 memset(event->comm.comm + size, 0, machine->id_hdr_size); 181 event->comm.header.size = (sizeof(event->comm) - 182 (sizeof(event->comm.comm) - size) + 183 machine->id_hdr_size); 184 event->comm.tid = pid; 185 186 return 0; 187 } 188 189 pid_t perf_event__synthesize_comm(struct perf_tool *tool, 190 union perf_event *event, pid_t pid, 191 perf_event__handler_t process, 192 struct machine *machine) 193 { 194 pid_t tgid, ppid; 195 196 if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0) 197 return -1; 198 199 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) 200 return -1; 201 202 return tgid; 203 } 204 205 static int perf_event__synthesize_fork(struct perf_tool *tool, 206 union perf_event *event, 207 pid_t pid, pid_t tgid, pid_t ppid, 208 perf_event__handler_t process, 209 struct machine *machine) 210 { 211 memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size); 212 213 /* 214 * for main thread set parent to ppid from status file. For other 215 * threads set parent pid to main thread. ie., assume main thread 216 * spawns all threads in a process 217 */ 218 if (tgid == pid) { 219 event->fork.ppid = ppid; 220 event->fork.ptid = ppid; 221 } else { 222 event->fork.ppid = tgid; 223 event->fork.ptid = tgid; 224 } 225 event->fork.pid = tgid; 226 event->fork.tid = pid; 227 event->fork.header.type = PERF_RECORD_FORK; 228 229 event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size); 230 231 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) 232 return -1; 233 234 return 0; 235 } 236 237 int perf_event__synthesize_mmap_events(struct perf_tool *tool, 238 union perf_event *event, 239 pid_t pid, pid_t tgid, 240 perf_event__handler_t process, 241 struct machine *machine, 242 bool mmap_data, 243 unsigned int proc_map_timeout) 244 { 245 char filename[PATH_MAX]; 246 FILE *fp; 247 unsigned long long t; 248 bool truncation = false; 249 unsigned long long timeout = proc_map_timeout * 1000000ULL; 250 int rc = 0; 251 252 if (machine__is_default_guest(machine)) 253 return 0; 254 255 snprintf(filename, sizeof(filename), "%s/proc/%d/maps", 256 machine->root_dir, pid); 257 258 fp = fopen(filename, "r"); 259 if (fp == NULL) { 260 /* 261 * We raced with a task exiting - just return: 262 */ 263 pr_debug("couldn't open %s\n", filename); 264 return -1; 265 } 266 267 event->header.type = PERF_RECORD_MMAP2; 268 t = rdclock(); 269 270 while (1) { 271 char bf[BUFSIZ]; 272 char prot[5]; 273 char execname[PATH_MAX]; 274 char anonstr[] = "//anon"; 275 unsigned int ino; 276 size_t size; 277 ssize_t n; 278 279 if (fgets(bf, sizeof(bf), fp) == NULL) 280 break; 281 282 if ((rdclock() - t) > timeout) { 283 pr_warning("Reading %s time out. " 284 "You may want to increase " 285 "the time limit by --proc-map-timeout\n", 286 filename); 287 truncation = true; 288 goto out; 289 } 290 291 /* ensure null termination since stack will be reused. */ 292 strcpy(execname, ""); 293 294 /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */ 295 n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %[^\n]\n", 296 &event->mmap2.start, &event->mmap2.len, prot, 297 &event->mmap2.pgoff, &event->mmap2.maj, 298 &event->mmap2.min, 299 &ino, execname); 300 301 /* 302 * Anon maps don't have the execname. 303 */ 304 if (n < 7) 305 continue; 306 307 event->mmap2.ino = (u64)ino; 308 309 /* 310 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c 311 */ 312 if (machine__is_host(machine)) 313 event->header.misc = PERF_RECORD_MISC_USER; 314 else 315 event->header.misc = PERF_RECORD_MISC_GUEST_USER; 316 317 /* map protection and flags bits */ 318 event->mmap2.prot = 0; 319 event->mmap2.flags = 0; 320 if (prot[0] == 'r') 321 event->mmap2.prot |= PROT_READ; 322 if (prot[1] == 'w') 323 event->mmap2.prot |= PROT_WRITE; 324 if (prot[2] == 'x') 325 event->mmap2.prot |= PROT_EXEC; 326 327 if (prot[3] == 's') 328 event->mmap2.flags |= MAP_SHARED; 329 else 330 event->mmap2.flags |= MAP_PRIVATE; 331 332 if (prot[2] != 'x') { 333 if (!mmap_data || prot[0] != 'r') 334 continue; 335 336 event->header.misc |= PERF_RECORD_MISC_MMAP_DATA; 337 } 338 339 out: 340 if (truncation) 341 event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT; 342 343 if (!strcmp(execname, "")) 344 strcpy(execname, anonstr); 345 346 size = strlen(execname) + 1; 347 memcpy(event->mmap2.filename, execname, size); 348 size = PERF_ALIGN(size, sizeof(u64)); 349 event->mmap2.len -= event->mmap.start; 350 event->mmap2.header.size = (sizeof(event->mmap2) - 351 (sizeof(event->mmap2.filename) - size)); 352 memset(event->mmap2.filename + size, 0, machine->id_hdr_size); 353 event->mmap2.header.size += machine->id_hdr_size; 354 event->mmap2.pid = tgid; 355 event->mmap2.tid = pid; 356 357 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) { 358 rc = -1; 359 break; 360 } 361 362 if (truncation) 363 break; 364 } 365 366 fclose(fp); 367 return rc; 368 } 369 370 int perf_event__synthesize_modules(struct perf_tool *tool, 371 perf_event__handler_t process, 372 struct machine *machine) 373 { 374 int rc = 0; 375 struct map *pos; 376 struct map_groups *kmaps = &machine->kmaps; 377 struct maps *maps = &kmaps->maps[MAP__FUNCTION]; 378 union perf_event *event = zalloc((sizeof(event->mmap) + 379 machine->id_hdr_size)); 380 if (event == NULL) { 381 pr_debug("Not enough memory synthesizing mmap event " 382 "for kernel modules\n"); 383 return -1; 384 } 385 386 event->header.type = PERF_RECORD_MMAP; 387 388 /* 389 * kernel uses 0 for user space maps, see kernel/perf_event.c 390 * __perf_event_mmap 391 */ 392 if (machine__is_host(machine)) 393 event->header.misc = PERF_RECORD_MISC_KERNEL; 394 else 395 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; 396 397 for (pos = maps__first(maps); pos; pos = map__next(pos)) { 398 size_t size; 399 400 if (__map__is_kernel(pos)) 401 continue; 402 403 size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64)); 404 event->mmap.header.type = PERF_RECORD_MMAP; 405 event->mmap.header.size = (sizeof(event->mmap) - 406 (sizeof(event->mmap.filename) - size)); 407 memset(event->mmap.filename + size, 0, machine->id_hdr_size); 408 event->mmap.header.size += machine->id_hdr_size; 409 event->mmap.start = pos->start; 410 event->mmap.len = pos->end - pos->start; 411 event->mmap.pid = machine->pid; 412 413 memcpy(event->mmap.filename, pos->dso->long_name, 414 pos->dso->long_name_len + 1); 415 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) { 416 rc = -1; 417 break; 418 } 419 } 420 421 free(event); 422 return rc; 423 } 424 425 static int __event__synthesize_thread(union perf_event *comm_event, 426 union perf_event *mmap_event, 427 union perf_event *fork_event, 428 pid_t pid, int full, 429 perf_event__handler_t process, 430 struct perf_tool *tool, 431 struct machine *machine, 432 bool mmap_data, 433 unsigned int proc_map_timeout) 434 { 435 char filename[PATH_MAX]; 436 DIR *tasks; 437 struct dirent *dirent; 438 pid_t tgid, ppid; 439 int rc = 0; 440 441 /* special case: only send one comm event using passed in pid */ 442 if (!full) { 443 tgid = perf_event__synthesize_comm(tool, comm_event, pid, 444 process, machine); 445 446 if (tgid == -1) 447 return -1; 448 449 return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid, 450 process, machine, mmap_data, 451 proc_map_timeout); 452 } 453 454 if (machine__is_default_guest(machine)) 455 return 0; 456 457 snprintf(filename, sizeof(filename), "%s/proc/%d/task", 458 machine->root_dir, pid); 459 460 tasks = opendir(filename); 461 if (tasks == NULL) { 462 pr_debug("couldn't open %s\n", filename); 463 return 0; 464 } 465 466 while ((dirent = readdir(tasks)) != NULL) { 467 char *end; 468 pid_t _pid; 469 470 _pid = strtol(dirent->d_name, &end, 10); 471 if (*end) 472 continue; 473 474 rc = -1; 475 if (perf_event__prepare_comm(comm_event, _pid, machine, 476 &tgid, &ppid) != 0) 477 break; 478 479 if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid, 480 ppid, process, machine) < 0) 481 break; 482 /* 483 * Send the prepared comm event 484 */ 485 if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0) 486 break; 487 488 rc = 0; 489 if (_pid == pid) { 490 /* process the parent's maps too */ 491 rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid, 492 process, machine, mmap_data, proc_map_timeout); 493 if (rc) 494 break; 495 } 496 } 497 498 closedir(tasks); 499 return rc; 500 } 501 502 int perf_event__synthesize_thread_map(struct perf_tool *tool, 503 struct thread_map *threads, 504 perf_event__handler_t process, 505 struct machine *machine, 506 bool mmap_data, 507 unsigned int proc_map_timeout) 508 { 509 union perf_event *comm_event, *mmap_event, *fork_event; 510 int err = -1, thread, j; 511 512 comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size); 513 if (comm_event == NULL) 514 goto out; 515 516 mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size); 517 if (mmap_event == NULL) 518 goto out_free_comm; 519 520 fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size); 521 if (fork_event == NULL) 522 goto out_free_mmap; 523 524 err = 0; 525 for (thread = 0; thread < threads->nr; ++thread) { 526 if (__event__synthesize_thread(comm_event, mmap_event, 527 fork_event, 528 thread_map__pid(threads, thread), 0, 529 process, tool, machine, 530 mmap_data, proc_map_timeout)) { 531 err = -1; 532 break; 533 } 534 535 /* 536 * comm.pid is set to thread group id by 537 * perf_event__synthesize_comm 538 */ 539 if ((int) comm_event->comm.pid != thread_map__pid(threads, thread)) { 540 bool need_leader = true; 541 542 /* is thread group leader in thread_map? */ 543 for (j = 0; j < threads->nr; ++j) { 544 if ((int) comm_event->comm.pid == thread_map__pid(threads, j)) { 545 need_leader = false; 546 break; 547 } 548 } 549 550 /* if not, generate events for it */ 551 if (need_leader && 552 __event__synthesize_thread(comm_event, mmap_event, 553 fork_event, 554 comm_event->comm.pid, 0, 555 process, tool, machine, 556 mmap_data, proc_map_timeout)) { 557 err = -1; 558 break; 559 } 560 } 561 } 562 free(fork_event); 563 out_free_mmap: 564 free(mmap_event); 565 out_free_comm: 566 free(comm_event); 567 out: 568 return err; 569 } 570 571 int perf_event__synthesize_threads(struct perf_tool *tool, 572 perf_event__handler_t process, 573 struct machine *machine, 574 bool mmap_data, 575 unsigned int proc_map_timeout) 576 { 577 DIR *proc; 578 char proc_path[PATH_MAX]; 579 struct dirent *dirent; 580 union perf_event *comm_event, *mmap_event, *fork_event; 581 int err = -1; 582 583 if (machine__is_default_guest(machine)) 584 return 0; 585 586 comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size); 587 if (comm_event == NULL) 588 goto out; 589 590 mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size); 591 if (mmap_event == NULL) 592 goto out_free_comm; 593 594 fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size); 595 if (fork_event == NULL) 596 goto out_free_mmap; 597 598 snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir); 599 proc = opendir(proc_path); 600 601 if (proc == NULL) 602 goto out_free_fork; 603 604 while ((dirent = readdir(proc)) != NULL) { 605 char *end; 606 pid_t pid = strtol(dirent->d_name, &end, 10); 607 608 if (*end) /* only interested in proper numerical dirents */ 609 continue; 610 /* 611 * We may race with exiting thread, so don't stop just because 612 * one thread couldn't be synthesized. 613 */ 614 __event__synthesize_thread(comm_event, mmap_event, fork_event, pid, 615 1, process, tool, machine, mmap_data, 616 proc_map_timeout); 617 } 618 619 err = 0; 620 closedir(proc); 621 out_free_fork: 622 free(fork_event); 623 out_free_mmap: 624 free(mmap_event); 625 out_free_comm: 626 free(comm_event); 627 out: 628 return err; 629 } 630 631 struct process_symbol_args { 632 const char *name; 633 u64 start; 634 }; 635 636 static int find_symbol_cb(void *arg, const char *name, char type, 637 u64 start) 638 { 639 struct process_symbol_args *args = arg; 640 641 /* 642 * Must be a function or at least an alias, as in PARISC64, where "_text" is 643 * an 'A' to the same address as "_stext". 644 */ 645 if (!(symbol_type__is_a(type, MAP__FUNCTION) || 646 type == 'A') || strcmp(name, args->name)) 647 return 0; 648 649 args->start = start; 650 return 1; 651 } 652 653 u64 kallsyms__get_function_start(const char *kallsyms_filename, 654 const char *symbol_name) 655 { 656 struct process_symbol_args args = { .name = symbol_name, }; 657 658 if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0) 659 return 0; 660 661 return args.start; 662 } 663 664 int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, 665 perf_event__handler_t process, 666 struct machine *machine) 667 { 668 size_t size; 669 const char *mmap_name; 670 char name_buff[PATH_MAX]; 671 struct map *map = machine__kernel_map(machine); 672 struct kmap *kmap; 673 int err; 674 union perf_event *event; 675 676 if (map == NULL) 677 return -1; 678 679 /* 680 * We should get this from /sys/kernel/sections/.text, but till that is 681 * available use this, and after it is use this as a fallback for older 682 * kernels. 683 */ 684 event = zalloc((sizeof(event->mmap) + machine->id_hdr_size)); 685 if (event == NULL) { 686 pr_debug("Not enough memory synthesizing mmap event " 687 "for kernel modules\n"); 688 return -1; 689 } 690 691 mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff)); 692 if (machine__is_host(machine)) { 693 /* 694 * kernel uses PERF_RECORD_MISC_USER for user space maps, 695 * see kernel/perf_event.c __perf_event_mmap 696 */ 697 event->header.misc = PERF_RECORD_MISC_KERNEL; 698 } else { 699 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; 700 } 701 702 kmap = map__kmap(map); 703 size = snprintf(event->mmap.filename, sizeof(event->mmap.filename), 704 "%s%s", mmap_name, kmap->ref_reloc_sym->name) + 1; 705 size = PERF_ALIGN(size, sizeof(u64)); 706 event->mmap.header.type = PERF_RECORD_MMAP; 707 event->mmap.header.size = (sizeof(event->mmap) - 708 (sizeof(event->mmap.filename) - size) + machine->id_hdr_size); 709 event->mmap.pgoff = kmap->ref_reloc_sym->addr; 710 event->mmap.start = map->start; 711 event->mmap.len = map->end - event->mmap.start; 712 event->mmap.pid = machine->pid; 713 714 err = perf_tool__process_synth_event(tool, event, machine, process); 715 free(event); 716 717 return err; 718 } 719 720 int perf_event__synthesize_thread_map2(struct perf_tool *tool, 721 struct thread_map *threads, 722 perf_event__handler_t process, 723 struct machine *machine) 724 { 725 union perf_event *event; 726 int i, err, size; 727 728 size = sizeof(event->thread_map); 729 size += threads->nr * sizeof(event->thread_map.entries[0]); 730 731 event = zalloc(size); 732 if (!event) 733 return -ENOMEM; 734 735 event->header.type = PERF_RECORD_THREAD_MAP; 736 event->header.size = size; 737 event->thread_map.nr = threads->nr; 738 739 for (i = 0; i < threads->nr; i++) { 740 struct thread_map_event_entry *entry = &event->thread_map.entries[i]; 741 char *comm = thread_map__comm(threads, i); 742 743 if (!comm) 744 comm = (char *) ""; 745 746 entry->pid = thread_map__pid(threads, i); 747 strncpy((char *) &entry->comm, comm, sizeof(entry->comm)); 748 } 749 750 err = process(tool, event, NULL, machine); 751 752 free(event); 753 return err; 754 } 755 756 static void synthesize_cpus(struct cpu_map_entries *cpus, 757 struct cpu_map *map) 758 { 759 int i; 760 761 cpus->nr = map->nr; 762 763 for (i = 0; i < map->nr; i++) 764 cpus->cpu[i] = map->map[i]; 765 } 766 767 static void synthesize_mask(struct cpu_map_mask *mask, 768 struct cpu_map *map, int max) 769 { 770 int i; 771 772 mask->nr = BITS_TO_LONGS(max); 773 mask->long_size = sizeof(long); 774 775 for (i = 0; i < map->nr; i++) 776 set_bit(map->map[i], mask->mask); 777 } 778 779 static size_t cpus_size(struct cpu_map *map) 780 { 781 return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16); 782 } 783 784 static size_t mask_size(struct cpu_map *map, int *max) 785 { 786 int i; 787 788 *max = 0; 789 790 for (i = 0; i < map->nr; i++) { 791 /* bit possition of the cpu is + 1 */ 792 int bit = map->map[i] + 1; 793 794 if (bit > *max) 795 *max = bit; 796 } 797 798 return sizeof(struct cpu_map_mask) + BITS_TO_LONGS(*max) * sizeof(long); 799 } 800 801 void *cpu_map_data__alloc(struct cpu_map *map, size_t *size, u16 *type, int *max) 802 { 803 size_t size_cpus, size_mask; 804 bool is_dummy = cpu_map__empty(map); 805 806 /* 807 * Both array and mask data have variable size based 808 * on the number of cpus and their actual values. 809 * The size of the 'struct cpu_map_data' is: 810 * 811 * array = size of 'struct cpu_map_entries' + 812 * number of cpus * sizeof(u64) 813 * 814 * mask = size of 'struct cpu_map_mask' + 815 * maximum cpu bit converted to size of longs 816 * 817 * and finaly + the size of 'struct cpu_map_data'. 818 */ 819 size_cpus = cpus_size(map); 820 size_mask = mask_size(map, max); 821 822 if (is_dummy || (size_cpus < size_mask)) { 823 *size += size_cpus; 824 *type = PERF_CPU_MAP__CPUS; 825 } else { 826 *size += size_mask; 827 *type = PERF_CPU_MAP__MASK; 828 } 829 830 *size += sizeof(struct cpu_map_data); 831 return zalloc(*size); 832 } 833 834 void cpu_map_data__synthesize(struct cpu_map_data *data, struct cpu_map *map, 835 u16 type, int max) 836 { 837 data->type = type; 838 839 switch (type) { 840 case PERF_CPU_MAP__CPUS: 841 synthesize_cpus((struct cpu_map_entries *) data->data, map); 842 break; 843 case PERF_CPU_MAP__MASK: 844 synthesize_mask((struct cpu_map_mask *) data->data, map, max); 845 default: 846 break; 847 }; 848 } 849 850 static struct cpu_map_event* cpu_map_event__new(struct cpu_map *map) 851 { 852 size_t size = sizeof(struct cpu_map_event); 853 struct cpu_map_event *event; 854 int max; 855 u16 type; 856 857 event = cpu_map_data__alloc(map, &size, &type, &max); 858 if (!event) 859 return NULL; 860 861 event->header.type = PERF_RECORD_CPU_MAP; 862 event->header.size = size; 863 event->data.type = type; 864 865 cpu_map_data__synthesize(&event->data, map, type, max); 866 return event; 867 } 868 869 int perf_event__synthesize_cpu_map(struct perf_tool *tool, 870 struct cpu_map *map, 871 perf_event__handler_t process, 872 struct machine *machine) 873 { 874 struct cpu_map_event *event; 875 int err; 876 877 event = cpu_map_event__new(map); 878 if (!event) 879 return -ENOMEM; 880 881 err = process(tool, (union perf_event *) event, NULL, machine); 882 883 free(event); 884 return err; 885 } 886 887 int perf_event__synthesize_stat_config(struct perf_tool *tool, 888 struct perf_stat_config *config, 889 perf_event__handler_t process, 890 struct machine *machine) 891 { 892 struct stat_config_event *event; 893 int size, i = 0, err; 894 895 size = sizeof(*event); 896 size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0])); 897 898 event = zalloc(size); 899 if (!event) 900 return -ENOMEM; 901 902 event->header.type = PERF_RECORD_STAT_CONFIG; 903 event->header.size = size; 904 event->nr = PERF_STAT_CONFIG_TERM__MAX; 905 906 #define ADD(__term, __val) \ 907 event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term; \ 908 event->data[i].val = __val; \ 909 i++; 910 911 ADD(AGGR_MODE, config->aggr_mode) 912 ADD(INTERVAL, config->interval) 913 ADD(SCALE, config->scale) 914 915 WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX, 916 "stat config terms unbalanced\n"); 917 #undef ADD 918 919 err = process(tool, (union perf_event *) event, NULL, machine); 920 921 free(event); 922 return err; 923 } 924 925 int perf_event__synthesize_stat(struct perf_tool *tool, 926 u32 cpu, u32 thread, u64 id, 927 struct perf_counts_values *count, 928 perf_event__handler_t process, 929 struct machine *machine) 930 { 931 struct stat_event event; 932 933 event.header.type = PERF_RECORD_STAT; 934 event.header.size = sizeof(event); 935 event.header.misc = 0; 936 937 event.id = id; 938 event.cpu = cpu; 939 event.thread = thread; 940 event.val = count->val; 941 event.ena = count->ena; 942 event.run = count->run; 943 944 return process(tool, (union perf_event *) &event, NULL, machine); 945 } 946 947 int perf_event__synthesize_stat_round(struct perf_tool *tool, 948 u64 evtime, u64 type, 949 perf_event__handler_t process, 950 struct machine *machine) 951 { 952 struct stat_round_event event; 953 954 event.header.type = PERF_RECORD_STAT_ROUND; 955 event.header.size = sizeof(event); 956 event.header.misc = 0; 957 958 event.time = evtime; 959 event.type = type; 960 961 return process(tool, (union perf_event *) &event, NULL, machine); 962 } 963 964 void perf_event__read_stat_config(struct perf_stat_config *config, 965 struct stat_config_event *event) 966 { 967 unsigned i; 968 969 for (i = 0; i < event->nr; i++) { 970 971 switch (event->data[i].tag) { 972 #define CASE(__term, __val) \ 973 case PERF_STAT_CONFIG_TERM__##__term: \ 974 config->__val = event->data[i].val; \ 975 break; 976 977 CASE(AGGR_MODE, aggr_mode) 978 CASE(SCALE, scale) 979 CASE(INTERVAL, interval) 980 #undef CASE 981 default: 982 pr_warning("unknown stat config term %" PRIu64 "\n", 983 event->data[i].tag); 984 } 985 } 986 } 987 988 size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp) 989 { 990 const char *s; 991 992 if (event->header.misc & PERF_RECORD_MISC_COMM_EXEC) 993 s = " exec"; 994 else 995 s = ""; 996 997 return fprintf(fp, "%s: %s:%d/%d\n", s, event->comm.comm, event->comm.pid, event->comm.tid); 998 } 999 1000 int perf_event__process_comm(struct perf_tool *tool __maybe_unused, 1001 union perf_event *event, 1002 struct perf_sample *sample, 1003 struct machine *machine) 1004 { 1005 return machine__process_comm_event(machine, event, sample); 1006 } 1007 1008 int perf_event__process_lost(struct perf_tool *tool __maybe_unused, 1009 union perf_event *event, 1010 struct perf_sample *sample, 1011 struct machine *machine) 1012 { 1013 return machine__process_lost_event(machine, event, sample); 1014 } 1015 1016 int perf_event__process_aux(struct perf_tool *tool __maybe_unused, 1017 union perf_event *event, 1018 struct perf_sample *sample __maybe_unused, 1019 struct machine *machine) 1020 { 1021 return machine__process_aux_event(machine, event); 1022 } 1023 1024 int perf_event__process_itrace_start(struct perf_tool *tool __maybe_unused, 1025 union perf_event *event, 1026 struct perf_sample *sample __maybe_unused, 1027 struct machine *machine) 1028 { 1029 return machine__process_itrace_start_event(machine, event); 1030 } 1031 1032 int perf_event__process_lost_samples(struct perf_tool *tool __maybe_unused, 1033 union perf_event *event, 1034 struct perf_sample *sample, 1035 struct machine *machine) 1036 { 1037 return machine__process_lost_samples_event(machine, event, sample); 1038 } 1039 1040 int perf_event__process_switch(struct perf_tool *tool __maybe_unused, 1041 union perf_event *event, 1042 struct perf_sample *sample __maybe_unused, 1043 struct machine *machine) 1044 { 1045 return machine__process_switch_event(machine, event); 1046 } 1047 1048 size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp) 1049 { 1050 return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n", 1051 event->mmap.pid, event->mmap.tid, event->mmap.start, 1052 event->mmap.len, event->mmap.pgoff, 1053 (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x', 1054 event->mmap.filename); 1055 } 1056 1057 size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp) 1058 { 1059 return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 1060 " %02x:%02x %"PRIu64" %"PRIu64"]: %c%c%c%c %s\n", 1061 event->mmap2.pid, event->mmap2.tid, event->mmap2.start, 1062 event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj, 1063 event->mmap2.min, event->mmap2.ino, 1064 event->mmap2.ino_generation, 1065 (event->mmap2.prot & PROT_READ) ? 'r' : '-', 1066 (event->mmap2.prot & PROT_WRITE) ? 'w' : '-', 1067 (event->mmap2.prot & PROT_EXEC) ? 'x' : '-', 1068 (event->mmap2.flags & MAP_SHARED) ? 's' : 'p', 1069 event->mmap2.filename); 1070 } 1071 1072 size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp) 1073 { 1074 struct thread_map *threads = thread_map__new_event(&event->thread_map); 1075 size_t ret; 1076 1077 ret = fprintf(fp, " nr: "); 1078 1079 if (threads) 1080 ret += thread_map__fprintf(threads, fp); 1081 else 1082 ret += fprintf(fp, "failed to get threads from event\n"); 1083 1084 thread_map__put(threads); 1085 return ret; 1086 } 1087 1088 size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp) 1089 { 1090 struct cpu_map *cpus = cpu_map__new_data(&event->cpu_map.data); 1091 size_t ret; 1092 1093 ret = fprintf(fp, " nr: "); 1094 1095 if (cpus) 1096 ret += cpu_map__fprintf(cpus, fp); 1097 else 1098 ret += fprintf(fp, "failed to get cpumap from event\n"); 1099 1100 cpu_map__put(cpus); 1101 return ret; 1102 } 1103 1104 int perf_event__process_mmap(struct perf_tool *tool __maybe_unused, 1105 union perf_event *event, 1106 struct perf_sample *sample, 1107 struct machine *machine) 1108 { 1109 return machine__process_mmap_event(machine, event, sample); 1110 } 1111 1112 int perf_event__process_mmap2(struct perf_tool *tool __maybe_unused, 1113 union perf_event *event, 1114 struct perf_sample *sample, 1115 struct machine *machine) 1116 { 1117 return machine__process_mmap2_event(machine, event, sample); 1118 } 1119 1120 size_t perf_event__fprintf_task(union perf_event *event, FILE *fp) 1121 { 1122 return fprintf(fp, "(%d:%d):(%d:%d)\n", 1123 event->fork.pid, event->fork.tid, 1124 event->fork.ppid, event->fork.ptid); 1125 } 1126 1127 int perf_event__process_fork(struct perf_tool *tool __maybe_unused, 1128 union perf_event *event, 1129 struct perf_sample *sample, 1130 struct machine *machine) 1131 { 1132 return machine__process_fork_event(machine, event, sample); 1133 } 1134 1135 int perf_event__process_exit(struct perf_tool *tool __maybe_unused, 1136 union perf_event *event, 1137 struct perf_sample *sample, 1138 struct machine *machine) 1139 { 1140 return machine__process_exit_event(machine, event, sample); 1141 } 1142 1143 size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp) 1144 { 1145 return fprintf(fp, " offset: %#"PRIx64" size: %#"PRIx64" flags: %#"PRIx64" [%s%s]\n", 1146 event->aux.aux_offset, event->aux.aux_size, 1147 event->aux.flags, 1148 event->aux.flags & PERF_AUX_FLAG_TRUNCATED ? "T" : "", 1149 event->aux.flags & PERF_AUX_FLAG_OVERWRITE ? "O" : ""); 1150 } 1151 1152 size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp) 1153 { 1154 return fprintf(fp, " pid: %u tid: %u\n", 1155 event->itrace_start.pid, event->itrace_start.tid); 1156 } 1157 1158 size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp) 1159 { 1160 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT; 1161 const char *in_out = out ? "OUT" : "IN "; 1162 1163 if (event->header.type == PERF_RECORD_SWITCH) 1164 return fprintf(fp, " %s\n", in_out); 1165 1166 return fprintf(fp, " %s %s pid/tid: %5u/%-5u\n", 1167 in_out, out ? "next" : "prev", 1168 event->context_switch.next_prev_pid, 1169 event->context_switch.next_prev_tid); 1170 } 1171 1172 size_t perf_event__fprintf(union perf_event *event, FILE *fp) 1173 { 1174 size_t ret = fprintf(fp, "PERF_RECORD_%s", 1175 perf_event__name(event->header.type)); 1176 1177 switch (event->header.type) { 1178 case PERF_RECORD_COMM: 1179 ret += perf_event__fprintf_comm(event, fp); 1180 break; 1181 case PERF_RECORD_FORK: 1182 case PERF_RECORD_EXIT: 1183 ret += perf_event__fprintf_task(event, fp); 1184 break; 1185 case PERF_RECORD_MMAP: 1186 ret += perf_event__fprintf_mmap(event, fp); 1187 break; 1188 case PERF_RECORD_MMAP2: 1189 ret += perf_event__fprintf_mmap2(event, fp); 1190 break; 1191 case PERF_RECORD_AUX: 1192 ret += perf_event__fprintf_aux(event, fp); 1193 break; 1194 case PERF_RECORD_ITRACE_START: 1195 ret += perf_event__fprintf_itrace_start(event, fp); 1196 break; 1197 case PERF_RECORD_SWITCH: 1198 case PERF_RECORD_SWITCH_CPU_WIDE: 1199 ret += perf_event__fprintf_switch(event, fp); 1200 break; 1201 default: 1202 ret += fprintf(fp, "\n"); 1203 } 1204 1205 return ret; 1206 } 1207 1208 int perf_event__process(struct perf_tool *tool __maybe_unused, 1209 union perf_event *event, 1210 struct perf_sample *sample, 1211 struct machine *machine) 1212 { 1213 return machine__process_event(machine, event, sample); 1214 } 1215 1216 void thread__find_addr_map(struct thread *thread, u8 cpumode, 1217 enum map_type type, u64 addr, 1218 struct addr_location *al) 1219 { 1220 struct map_groups *mg = thread->mg; 1221 struct machine *machine = mg->machine; 1222 bool load_map = false; 1223 1224 al->machine = machine; 1225 al->thread = thread; 1226 al->addr = addr; 1227 al->cpumode = cpumode; 1228 al->filtered = 0; 1229 1230 if (machine == NULL) { 1231 al->map = NULL; 1232 return; 1233 } 1234 1235 if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) { 1236 al->level = 'k'; 1237 mg = &machine->kmaps; 1238 load_map = true; 1239 } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) { 1240 al->level = '.'; 1241 } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) { 1242 al->level = 'g'; 1243 mg = &machine->kmaps; 1244 load_map = true; 1245 } else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) { 1246 al->level = 'u'; 1247 } else { 1248 al->level = 'H'; 1249 al->map = NULL; 1250 1251 if ((cpumode == PERF_RECORD_MISC_GUEST_USER || 1252 cpumode == PERF_RECORD_MISC_GUEST_KERNEL) && 1253 !perf_guest) 1254 al->filtered |= (1 << HIST_FILTER__GUEST); 1255 if ((cpumode == PERF_RECORD_MISC_USER || 1256 cpumode == PERF_RECORD_MISC_KERNEL) && 1257 !perf_host) 1258 al->filtered |= (1 << HIST_FILTER__HOST); 1259 1260 return; 1261 } 1262 try_again: 1263 al->map = map_groups__find(mg, type, al->addr); 1264 if (al->map == NULL) { 1265 /* 1266 * If this is outside of all known maps, and is a negative 1267 * address, try to look it up in the kernel dso, as it might be 1268 * a vsyscall or vdso (which executes in user-mode). 1269 * 1270 * XXX This is nasty, we should have a symbol list in the 1271 * "[vdso]" dso, but for now lets use the old trick of looking 1272 * in the whole kernel symbol list. 1273 */ 1274 if (cpumode == PERF_RECORD_MISC_USER && machine && 1275 mg != &machine->kmaps && 1276 machine__kernel_ip(machine, al->addr)) { 1277 mg = &machine->kmaps; 1278 load_map = true; 1279 goto try_again; 1280 } 1281 } else { 1282 /* 1283 * Kernel maps might be changed when loading symbols so loading 1284 * must be done prior to using kernel maps. 1285 */ 1286 if (load_map) 1287 map__load(al->map, machine->symbol_filter); 1288 al->addr = al->map->map_ip(al->map, al->addr); 1289 } 1290 } 1291 1292 void thread__find_addr_location(struct thread *thread, 1293 u8 cpumode, enum map_type type, u64 addr, 1294 struct addr_location *al) 1295 { 1296 thread__find_addr_map(thread, cpumode, type, addr, al); 1297 if (al->map != NULL) 1298 al->sym = map__find_symbol(al->map, al->addr, 1299 thread->mg->machine->symbol_filter); 1300 else 1301 al->sym = NULL; 1302 } 1303 1304 /* 1305 * Callers need to drop the reference to al->thread, obtained in 1306 * machine__findnew_thread() 1307 */ 1308 int machine__resolve(struct machine *machine, struct addr_location *al, 1309 struct perf_sample *sample) 1310 { 1311 struct thread *thread = machine__findnew_thread(machine, sample->pid, 1312 sample->tid); 1313 1314 if (thread == NULL) 1315 return -1; 1316 1317 dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid); 1318 /* 1319 * Have we already created the kernel maps for this machine? 1320 * 1321 * This should have happened earlier, when we processed the kernel MMAP 1322 * events, but for older perf.data files there was no such thing, so do 1323 * it now. 1324 */ 1325 if (sample->cpumode == PERF_RECORD_MISC_KERNEL && 1326 machine__kernel_map(machine) == NULL) 1327 machine__create_kernel_maps(machine); 1328 1329 thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->ip, al); 1330 dump_printf(" ...... dso: %s\n", 1331 al->map ? al->map->dso->long_name : 1332 al->level == 'H' ? "[hypervisor]" : "<not found>"); 1333 1334 if (thread__is_filtered(thread)) 1335 al->filtered |= (1 << HIST_FILTER__THREAD); 1336 1337 al->sym = NULL; 1338 al->cpu = sample->cpu; 1339 al->socket = -1; 1340 1341 if (al->cpu >= 0) { 1342 struct perf_env *env = machine->env; 1343 1344 if (env && env->cpu) 1345 al->socket = env->cpu[al->cpu].socket_id; 1346 } 1347 1348 if (al->map) { 1349 struct dso *dso = al->map->dso; 1350 1351 if (symbol_conf.dso_list && 1352 (!dso || !(strlist__has_entry(symbol_conf.dso_list, 1353 dso->short_name) || 1354 (dso->short_name != dso->long_name && 1355 strlist__has_entry(symbol_conf.dso_list, 1356 dso->long_name))))) { 1357 al->filtered |= (1 << HIST_FILTER__DSO); 1358 } 1359 1360 al->sym = map__find_symbol(al->map, al->addr, 1361 machine->symbol_filter); 1362 } 1363 1364 if (symbol_conf.sym_list && 1365 (!al->sym || !strlist__has_entry(symbol_conf.sym_list, 1366 al->sym->name))) { 1367 al->filtered |= (1 << HIST_FILTER__SYMBOL); 1368 } 1369 1370 return 0; 1371 } 1372 1373 /* 1374 * The preprocess_sample method will return with reference counts for the 1375 * in it, when done using (and perhaps getting ref counts if needing to 1376 * keep a pointer to one of those entries) it must be paired with 1377 * addr_location__put(), so that the refcounts can be decremented. 1378 */ 1379 void addr_location__put(struct addr_location *al) 1380 { 1381 thread__zput(al->thread); 1382 } 1383 1384 bool is_bts_event(struct perf_event_attr *attr) 1385 { 1386 return attr->type == PERF_TYPE_HARDWARE && 1387 (attr->config & PERF_COUNT_HW_BRANCH_INSTRUCTIONS) && 1388 attr->sample_period == 1; 1389 } 1390 1391 bool sample_addr_correlates_sym(struct perf_event_attr *attr) 1392 { 1393 if (attr->type == PERF_TYPE_SOFTWARE && 1394 (attr->config == PERF_COUNT_SW_PAGE_FAULTS || 1395 attr->config == PERF_COUNT_SW_PAGE_FAULTS_MIN || 1396 attr->config == PERF_COUNT_SW_PAGE_FAULTS_MAJ)) 1397 return true; 1398 1399 if (is_bts_event(attr)) 1400 return true; 1401 1402 return false; 1403 } 1404 1405 void thread__resolve(struct thread *thread, struct addr_location *al, 1406 struct perf_sample *sample) 1407 { 1408 thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->addr, al); 1409 if (!al->map) 1410 thread__find_addr_map(thread, sample->cpumode, MAP__VARIABLE, 1411 sample->addr, al); 1412 1413 al->cpu = sample->cpu; 1414 al->sym = NULL; 1415 1416 if (al->map) 1417 al->sym = map__find_symbol(al->map, al->addr, NULL); 1418 } 1419