1 #include <linux/types.h> 2 #include <sys/mman.h> 3 #include "event.h" 4 #include "debug.h" 5 #include "hist.h" 6 #include "machine.h" 7 #include "sort.h" 8 #include "string.h" 9 #include "strlist.h" 10 #include "thread.h" 11 #include "thread_map.h" 12 #include "symbol/kallsyms.h" 13 #include "asm/bug.h" 14 #include "stat.h" 15 16 static const char *perf_event__names[] = { 17 [0] = "TOTAL", 18 [PERF_RECORD_MMAP] = "MMAP", 19 [PERF_RECORD_MMAP2] = "MMAP2", 20 [PERF_RECORD_LOST] = "LOST", 21 [PERF_RECORD_COMM] = "COMM", 22 [PERF_RECORD_EXIT] = "EXIT", 23 [PERF_RECORD_THROTTLE] = "THROTTLE", 24 [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE", 25 [PERF_RECORD_FORK] = "FORK", 26 [PERF_RECORD_READ] = "READ", 27 [PERF_RECORD_SAMPLE] = "SAMPLE", 28 [PERF_RECORD_AUX] = "AUX", 29 [PERF_RECORD_ITRACE_START] = "ITRACE_START", 30 [PERF_RECORD_LOST_SAMPLES] = "LOST_SAMPLES", 31 [PERF_RECORD_SWITCH] = "SWITCH", 32 [PERF_RECORD_SWITCH_CPU_WIDE] = "SWITCH_CPU_WIDE", 33 [PERF_RECORD_HEADER_ATTR] = "ATTR", 34 [PERF_RECORD_HEADER_EVENT_TYPE] = "EVENT_TYPE", 35 [PERF_RECORD_HEADER_TRACING_DATA] = "TRACING_DATA", 36 [PERF_RECORD_HEADER_BUILD_ID] = "BUILD_ID", 37 [PERF_RECORD_FINISHED_ROUND] = "FINISHED_ROUND", 38 [PERF_RECORD_ID_INDEX] = "ID_INDEX", 39 [PERF_RECORD_AUXTRACE_INFO] = "AUXTRACE_INFO", 40 [PERF_RECORD_AUXTRACE] = "AUXTRACE", 41 [PERF_RECORD_AUXTRACE_ERROR] = "AUXTRACE_ERROR", 42 [PERF_RECORD_THREAD_MAP] = "THREAD_MAP", 43 [PERF_RECORD_CPU_MAP] = "CPU_MAP", 44 [PERF_RECORD_STAT_CONFIG] = "STAT_CONFIG", 45 [PERF_RECORD_STAT] = "STAT", 46 [PERF_RECORD_STAT_ROUND] = "STAT_ROUND", 47 [PERF_RECORD_EVENT_UPDATE] = "EVENT_UPDATE", 48 [PERF_RECORD_TIME_CONV] = "TIME_CONV", 49 }; 50 51 const char *perf_event__name(unsigned int id) 52 { 53 if (id >= ARRAY_SIZE(perf_event__names)) 54 return "INVALID"; 55 if (!perf_event__names[id]) 56 return "UNKNOWN"; 57 return perf_event__names[id]; 58 } 59 60 static int perf_tool__process_synth_event(struct perf_tool *tool, 61 union perf_event *event, 62 struct machine *machine, 63 perf_event__handler_t process) 64 { 65 struct perf_sample synth_sample = { 66 .pid = -1, 67 .tid = -1, 68 .time = -1, 69 .stream_id = -1, 70 .cpu = -1, 71 .period = 1, 72 .cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK, 73 }; 74 75 return process(tool, event, &synth_sample, machine); 76 }; 77 78 /* 79 * Assumes that the first 4095 bytes of /proc/pid/stat contains 80 * the comm, tgid and ppid. 81 */ 82 static int perf_event__get_comm_ids(pid_t pid, char *comm, size_t len, 83 pid_t *tgid, pid_t *ppid) 84 { 85 char filename[PATH_MAX]; 86 char bf[4096]; 87 int fd; 88 size_t size = 0; 89 ssize_t n; 90 char *nl, *name, *tgids, *ppids; 91 92 *tgid = -1; 93 *ppid = -1; 94 95 snprintf(filename, sizeof(filename), "/proc/%d/status", pid); 96 97 fd = open(filename, O_RDONLY); 98 if (fd < 0) { 99 pr_debug("couldn't open %s\n", filename); 100 return -1; 101 } 102 103 n = read(fd, bf, sizeof(bf) - 1); 104 close(fd); 105 if (n <= 0) { 106 pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n", 107 pid); 108 return -1; 109 } 110 bf[n] = '\0'; 111 112 name = strstr(bf, "Name:"); 113 tgids = strstr(bf, "Tgid:"); 114 ppids = strstr(bf, "PPid:"); 115 116 if (name) { 117 name += 5; /* strlen("Name:") */ 118 119 while (*name && isspace(*name)) 120 ++name; 121 122 nl = strchr(name, '\n'); 123 if (nl) 124 *nl = '\0'; 125 126 size = strlen(name); 127 if (size >= len) 128 size = len - 1; 129 memcpy(comm, name, size); 130 comm[size] = '\0'; 131 } else { 132 pr_debug("Name: string not found for pid %d\n", pid); 133 } 134 135 if (tgids) { 136 tgids += 5; /* strlen("Tgid:") */ 137 *tgid = atoi(tgids); 138 } else { 139 pr_debug("Tgid: string not found for pid %d\n", pid); 140 } 141 142 if (ppids) { 143 ppids += 5; /* strlen("PPid:") */ 144 *ppid = atoi(ppids); 145 } else { 146 pr_debug("PPid: string not found for pid %d\n", pid); 147 } 148 149 return 0; 150 } 151 152 static int perf_event__prepare_comm(union perf_event *event, pid_t pid, 153 struct machine *machine, 154 pid_t *tgid, pid_t *ppid) 155 { 156 size_t size; 157 158 *ppid = -1; 159 160 memset(&event->comm, 0, sizeof(event->comm)); 161 162 if (machine__is_host(machine)) { 163 if (perf_event__get_comm_ids(pid, event->comm.comm, 164 sizeof(event->comm.comm), 165 tgid, ppid) != 0) { 166 return -1; 167 } 168 } else { 169 *tgid = machine->pid; 170 } 171 172 if (*tgid < 0) 173 return -1; 174 175 event->comm.pid = *tgid; 176 event->comm.header.type = PERF_RECORD_COMM; 177 178 size = strlen(event->comm.comm) + 1; 179 size = PERF_ALIGN(size, sizeof(u64)); 180 memset(event->comm.comm + size, 0, machine->id_hdr_size); 181 event->comm.header.size = (sizeof(event->comm) - 182 (sizeof(event->comm.comm) - size) + 183 machine->id_hdr_size); 184 event->comm.tid = pid; 185 186 return 0; 187 } 188 189 pid_t perf_event__synthesize_comm(struct perf_tool *tool, 190 union perf_event *event, pid_t pid, 191 perf_event__handler_t process, 192 struct machine *machine) 193 { 194 pid_t tgid, ppid; 195 196 if (perf_event__prepare_comm(event, pid, machine, &tgid, &ppid) != 0) 197 return -1; 198 199 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) 200 return -1; 201 202 return tgid; 203 } 204 205 static int perf_event__synthesize_fork(struct perf_tool *tool, 206 union perf_event *event, 207 pid_t pid, pid_t tgid, pid_t ppid, 208 perf_event__handler_t process, 209 struct machine *machine) 210 { 211 memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size); 212 213 /* 214 * for main thread set parent to ppid from status file. For other 215 * threads set parent pid to main thread. ie., assume main thread 216 * spawns all threads in a process 217 */ 218 if (tgid == pid) { 219 event->fork.ppid = ppid; 220 event->fork.ptid = ppid; 221 } else { 222 event->fork.ppid = tgid; 223 event->fork.ptid = tgid; 224 } 225 event->fork.pid = tgid; 226 event->fork.tid = pid; 227 event->fork.header.type = PERF_RECORD_FORK; 228 229 event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size); 230 231 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) 232 return -1; 233 234 return 0; 235 } 236 237 int perf_event__synthesize_mmap_events(struct perf_tool *tool, 238 union perf_event *event, 239 pid_t pid, pid_t tgid, 240 perf_event__handler_t process, 241 struct machine *machine, 242 bool mmap_data, 243 unsigned int proc_map_timeout) 244 { 245 char filename[PATH_MAX]; 246 FILE *fp; 247 unsigned long long t; 248 bool truncation = false; 249 unsigned long long timeout = proc_map_timeout * 1000000ULL; 250 int rc = 0; 251 252 if (machine__is_default_guest(machine)) 253 return 0; 254 255 snprintf(filename, sizeof(filename), "%s/proc/%d/maps", 256 machine->root_dir, pid); 257 258 fp = fopen(filename, "r"); 259 if (fp == NULL) { 260 /* 261 * We raced with a task exiting - just return: 262 */ 263 pr_debug("couldn't open %s\n", filename); 264 return -1; 265 } 266 267 event->header.type = PERF_RECORD_MMAP2; 268 t = rdclock(); 269 270 while (1) { 271 char bf[BUFSIZ]; 272 char prot[5]; 273 char execname[PATH_MAX]; 274 char anonstr[] = "//anon"; 275 unsigned int ino; 276 size_t size; 277 ssize_t n; 278 279 if (fgets(bf, sizeof(bf), fp) == NULL) 280 break; 281 282 if ((rdclock() - t) > timeout) { 283 pr_warning("Reading %s time out. " 284 "You may want to increase " 285 "the time limit by --proc-map-timeout\n", 286 filename); 287 truncation = true; 288 goto out; 289 } 290 291 /* ensure null termination since stack will be reused. */ 292 strcpy(execname, ""); 293 294 /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */ 295 n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %[^\n]\n", 296 &event->mmap2.start, &event->mmap2.len, prot, 297 &event->mmap2.pgoff, &event->mmap2.maj, 298 &event->mmap2.min, 299 &ino, execname); 300 301 /* 302 * Anon maps don't have the execname. 303 */ 304 if (n < 7) 305 continue; 306 307 event->mmap2.ino = (u64)ino; 308 309 /* 310 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c 311 */ 312 if (machine__is_host(machine)) 313 event->header.misc = PERF_RECORD_MISC_USER; 314 else 315 event->header.misc = PERF_RECORD_MISC_GUEST_USER; 316 317 /* map protection and flags bits */ 318 event->mmap2.prot = 0; 319 event->mmap2.flags = 0; 320 if (prot[0] == 'r') 321 event->mmap2.prot |= PROT_READ; 322 if (prot[1] == 'w') 323 event->mmap2.prot |= PROT_WRITE; 324 if (prot[2] == 'x') 325 event->mmap2.prot |= PROT_EXEC; 326 327 if (prot[3] == 's') 328 event->mmap2.flags |= MAP_SHARED; 329 else 330 event->mmap2.flags |= MAP_PRIVATE; 331 332 if (prot[2] != 'x') { 333 if (!mmap_data || prot[0] != 'r') 334 continue; 335 336 event->header.misc |= PERF_RECORD_MISC_MMAP_DATA; 337 } 338 339 out: 340 if (truncation) 341 event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT; 342 343 if (!strcmp(execname, "")) 344 strcpy(execname, anonstr); 345 346 size = strlen(execname) + 1; 347 memcpy(event->mmap2.filename, execname, size); 348 size = PERF_ALIGN(size, sizeof(u64)); 349 event->mmap2.len -= event->mmap.start; 350 event->mmap2.header.size = (sizeof(event->mmap2) - 351 (sizeof(event->mmap2.filename) - size)); 352 memset(event->mmap2.filename + size, 0, machine->id_hdr_size); 353 event->mmap2.header.size += machine->id_hdr_size; 354 event->mmap2.pid = tgid; 355 event->mmap2.tid = pid; 356 357 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) { 358 rc = -1; 359 break; 360 } 361 362 if (truncation) 363 break; 364 } 365 366 fclose(fp); 367 return rc; 368 } 369 370 int perf_event__synthesize_modules(struct perf_tool *tool, 371 perf_event__handler_t process, 372 struct machine *machine) 373 { 374 int rc = 0; 375 struct map *pos; 376 struct map_groups *kmaps = &machine->kmaps; 377 struct maps *maps = &kmaps->maps[MAP__FUNCTION]; 378 union perf_event *event = zalloc((sizeof(event->mmap) + 379 machine->id_hdr_size)); 380 if (event == NULL) { 381 pr_debug("Not enough memory synthesizing mmap event " 382 "for kernel modules\n"); 383 return -1; 384 } 385 386 event->header.type = PERF_RECORD_MMAP; 387 388 /* 389 * kernel uses 0 for user space maps, see kernel/perf_event.c 390 * __perf_event_mmap 391 */ 392 if (machine__is_host(machine)) 393 event->header.misc = PERF_RECORD_MISC_KERNEL; 394 else 395 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; 396 397 for (pos = maps__first(maps); pos; pos = map__next(pos)) { 398 size_t size; 399 400 if (__map__is_kernel(pos)) 401 continue; 402 403 size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64)); 404 event->mmap.header.type = PERF_RECORD_MMAP; 405 event->mmap.header.size = (sizeof(event->mmap) - 406 (sizeof(event->mmap.filename) - size)); 407 memset(event->mmap.filename + size, 0, machine->id_hdr_size); 408 event->mmap.header.size += machine->id_hdr_size; 409 event->mmap.start = pos->start; 410 event->mmap.len = pos->end - pos->start; 411 event->mmap.pid = machine->pid; 412 413 memcpy(event->mmap.filename, pos->dso->long_name, 414 pos->dso->long_name_len + 1); 415 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) { 416 rc = -1; 417 break; 418 } 419 } 420 421 free(event); 422 return rc; 423 } 424 425 static int __event__synthesize_thread(union perf_event *comm_event, 426 union perf_event *mmap_event, 427 union perf_event *fork_event, 428 pid_t pid, int full, 429 perf_event__handler_t process, 430 struct perf_tool *tool, 431 struct machine *machine, 432 bool mmap_data, 433 unsigned int proc_map_timeout) 434 { 435 char filename[PATH_MAX]; 436 DIR *tasks; 437 struct dirent *dirent; 438 pid_t tgid, ppid; 439 int rc = 0; 440 441 /* special case: only send one comm event using passed in pid */ 442 if (!full) { 443 tgid = perf_event__synthesize_comm(tool, comm_event, pid, 444 process, machine); 445 446 if (tgid == -1) 447 return -1; 448 449 return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid, 450 process, machine, mmap_data, 451 proc_map_timeout); 452 } 453 454 if (machine__is_default_guest(machine)) 455 return 0; 456 457 snprintf(filename, sizeof(filename), "%s/proc/%d/task", 458 machine->root_dir, pid); 459 460 tasks = opendir(filename); 461 if (tasks == NULL) { 462 pr_debug("couldn't open %s\n", filename); 463 return 0; 464 } 465 466 while ((dirent = readdir(tasks)) != NULL) { 467 char *end; 468 pid_t _pid; 469 470 _pid = strtol(dirent->d_name, &end, 10); 471 if (*end) 472 continue; 473 474 rc = -1; 475 if (perf_event__prepare_comm(comm_event, _pid, machine, 476 &tgid, &ppid) != 0) 477 break; 478 479 if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid, 480 ppid, process, machine) < 0) 481 break; 482 /* 483 * Send the prepared comm event 484 */ 485 if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0) 486 break; 487 488 rc = 0; 489 if (_pid == pid) { 490 /* process the parent's maps too */ 491 rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid, 492 process, machine, mmap_data, proc_map_timeout); 493 if (rc) 494 break; 495 } 496 } 497 498 closedir(tasks); 499 return rc; 500 } 501 502 int perf_event__synthesize_thread_map(struct perf_tool *tool, 503 struct thread_map *threads, 504 perf_event__handler_t process, 505 struct machine *machine, 506 bool mmap_data, 507 unsigned int proc_map_timeout) 508 { 509 union perf_event *comm_event, *mmap_event, *fork_event; 510 int err = -1, thread, j; 511 512 comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size); 513 if (comm_event == NULL) 514 goto out; 515 516 mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size); 517 if (mmap_event == NULL) 518 goto out_free_comm; 519 520 fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size); 521 if (fork_event == NULL) 522 goto out_free_mmap; 523 524 err = 0; 525 for (thread = 0; thread < threads->nr; ++thread) { 526 if (__event__synthesize_thread(comm_event, mmap_event, 527 fork_event, 528 thread_map__pid(threads, thread), 0, 529 process, tool, machine, 530 mmap_data, proc_map_timeout)) { 531 err = -1; 532 break; 533 } 534 535 /* 536 * comm.pid is set to thread group id by 537 * perf_event__synthesize_comm 538 */ 539 if ((int) comm_event->comm.pid != thread_map__pid(threads, thread)) { 540 bool need_leader = true; 541 542 /* is thread group leader in thread_map? */ 543 for (j = 0; j < threads->nr; ++j) { 544 if ((int) comm_event->comm.pid == thread_map__pid(threads, j)) { 545 need_leader = false; 546 break; 547 } 548 } 549 550 /* if not, generate events for it */ 551 if (need_leader && 552 __event__synthesize_thread(comm_event, mmap_event, 553 fork_event, 554 comm_event->comm.pid, 0, 555 process, tool, machine, 556 mmap_data, proc_map_timeout)) { 557 err = -1; 558 break; 559 } 560 } 561 } 562 free(fork_event); 563 out_free_mmap: 564 free(mmap_event); 565 out_free_comm: 566 free(comm_event); 567 out: 568 return err; 569 } 570 571 int perf_event__synthesize_threads(struct perf_tool *tool, 572 perf_event__handler_t process, 573 struct machine *machine, 574 bool mmap_data, 575 unsigned int proc_map_timeout) 576 { 577 DIR *proc; 578 char proc_path[PATH_MAX]; 579 struct dirent *dirent; 580 union perf_event *comm_event, *mmap_event, *fork_event; 581 int err = -1; 582 583 if (machine__is_default_guest(machine)) 584 return 0; 585 586 comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size); 587 if (comm_event == NULL) 588 goto out; 589 590 mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size); 591 if (mmap_event == NULL) 592 goto out_free_comm; 593 594 fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size); 595 if (fork_event == NULL) 596 goto out_free_mmap; 597 598 snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir); 599 proc = opendir(proc_path); 600 601 if (proc == NULL) 602 goto out_free_fork; 603 604 while ((dirent = readdir(proc)) != NULL) { 605 char *end; 606 pid_t pid = strtol(dirent->d_name, &end, 10); 607 608 if (*end) /* only interested in proper numerical dirents */ 609 continue; 610 /* 611 * We may race with exiting thread, so don't stop just because 612 * one thread couldn't be synthesized. 613 */ 614 __event__synthesize_thread(comm_event, mmap_event, fork_event, pid, 615 1, process, tool, machine, mmap_data, 616 proc_map_timeout); 617 } 618 619 err = 0; 620 closedir(proc); 621 out_free_fork: 622 free(fork_event); 623 out_free_mmap: 624 free(mmap_event); 625 out_free_comm: 626 free(comm_event); 627 out: 628 return err; 629 } 630 631 struct process_symbol_args { 632 const char *name; 633 u64 start; 634 }; 635 636 static int find_symbol_cb(void *arg, const char *name, char type, 637 u64 start) 638 { 639 struct process_symbol_args *args = arg; 640 641 /* 642 * Must be a function or at least an alias, as in PARISC64, where "_text" is 643 * an 'A' to the same address as "_stext". 644 */ 645 if (!(symbol_type__is_a(type, MAP__FUNCTION) || 646 type == 'A') || strcmp(name, args->name)) 647 return 0; 648 649 args->start = start; 650 return 1; 651 } 652 653 u64 kallsyms__get_function_start(const char *kallsyms_filename, 654 const char *symbol_name) 655 { 656 struct process_symbol_args args = { .name = symbol_name, }; 657 658 if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0) 659 return 0; 660 661 return args.start; 662 } 663 664 int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, 665 perf_event__handler_t process, 666 struct machine *machine) 667 { 668 size_t size; 669 const char *mmap_name; 670 char name_buff[PATH_MAX]; 671 struct map *map = machine__kernel_map(machine); 672 struct kmap *kmap; 673 int err; 674 union perf_event *event; 675 676 if (symbol_conf.kptr_restrict) 677 return -1; 678 if (map == NULL) 679 return -1; 680 681 /* 682 * We should get this from /sys/kernel/sections/.text, but till that is 683 * available use this, and after it is use this as a fallback for older 684 * kernels. 685 */ 686 event = zalloc((sizeof(event->mmap) + machine->id_hdr_size)); 687 if (event == NULL) { 688 pr_debug("Not enough memory synthesizing mmap event " 689 "for kernel modules\n"); 690 return -1; 691 } 692 693 mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff)); 694 if (machine__is_host(machine)) { 695 /* 696 * kernel uses PERF_RECORD_MISC_USER for user space maps, 697 * see kernel/perf_event.c __perf_event_mmap 698 */ 699 event->header.misc = PERF_RECORD_MISC_KERNEL; 700 } else { 701 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; 702 } 703 704 kmap = map__kmap(map); 705 size = snprintf(event->mmap.filename, sizeof(event->mmap.filename), 706 "%s%s", mmap_name, kmap->ref_reloc_sym->name) + 1; 707 size = PERF_ALIGN(size, sizeof(u64)); 708 event->mmap.header.type = PERF_RECORD_MMAP; 709 event->mmap.header.size = (sizeof(event->mmap) - 710 (sizeof(event->mmap.filename) - size) + machine->id_hdr_size); 711 event->mmap.pgoff = kmap->ref_reloc_sym->addr; 712 event->mmap.start = map->start; 713 event->mmap.len = map->end - event->mmap.start; 714 event->mmap.pid = machine->pid; 715 716 err = perf_tool__process_synth_event(tool, event, machine, process); 717 free(event); 718 719 return err; 720 } 721 722 int perf_event__synthesize_thread_map2(struct perf_tool *tool, 723 struct thread_map *threads, 724 perf_event__handler_t process, 725 struct machine *machine) 726 { 727 union perf_event *event; 728 int i, err, size; 729 730 size = sizeof(event->thread_map); 731 size += threads->nr * sizeof(event->thread_map.entries[0]); 732 733 event = zalloc(size); 734 if (!event) 735 return -ENOMEM; 736 737 event->header.type = PERF_RECORD_THREAD_MAP; 738 event->header.size = size; 739 event->thread_map.nr = threads->nr; 740 741 for (i = 0; i < threads->nr; i++) { 742 struct thread_map_event_entry *entry = &event->thread_map.entries[i]; 743 char *comm = thread_map__comm(threads, i); 744 745 if (!comm) 746 comm = (char *) ""; 747 748 entry->pid = thread_map__pid(threads, i); 749 strncpy((char *) &entry->comm, comm, sizeof(entry->comm)); 750 } 751 752 err = process(tool, event, NULL, machine); 753 754 free(event); 755 return err; 756 } 757 758 static void synthesize_cpus(struct cpu_map_entries *cpus, 759 struct cpu_map *map) 760 { 761 int i; 762 763 cpus->nr = map->nr; 764 765 for (i = 0; i < map->nr; i++) 766 cpus->cpu[i] = map->map[i]; 767 } 768 769 static void synthesize_mask(struct cpu_map_mask *mask, 770 struct cpu_map *map, int max) 771 { 772 int i; 773 774 mask->nr = BITS_TO_LONGS(max); 775 mask->long_size = sizeof(long); 776 777 for (i = 0; i < map->nr; i++) 778 set_bit(map->map[i], mask->mask); 779 } 780 781 static size_t cpus_size(struct cpu_map *map) 782 { 783 return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16); 784 } 785 786 static size_t mask_size(struct cpu_map *map, int *max) 787 { 788 int i; 789 790 *max = 0; 791 792 for (i = 0; i < map->nr; i++) { 793 /* bit possition of the cpu is + 1 */ 794 int bit = map->map[i] + 1; 795 796 if (bit > *max) 797 *max = bit; 798 } 799 800 return sizeof(struct cpu_map_mask) + BITS_TO_LONGS(*max) * sizeof(long); 801 } 802 803 void *cpu_map_data__alloc(struct cpu_map *map, size_t *size, u16 *type, int *max) 804 { 805 size_t size_cpus, size_mask; 806 bool is_dummy = cpu_map__empty(map); 807 808 /* 809 * Both array and mask data have variable size based 810 * on the number of cpus and their actual values. 811 * The size of the 'struct cpu_map_data' is: 812 * 813 * array = size of 'struct cpu_map_entries' + 814 * number of cpus * sizeof(u64) 815 * 816 * mask = size of 'struct cpu_map_mask' + 817 * maximum cpu bit converted to size of longs 818 * 819 * and finaly + the size of 'struct cpu_map_data'. 820 */ 821 size_cpus = cpus_size(map); 822 size_mask = mask_size(map, max); 823 824 if (is_dummy || (size_cpus < size_mask)) { 825 *size += size_cpus; 826 *type = PERF_CPU_MAP__CPUS; 827 } else { 828 *size += size_mask; 829 *type = PERF_CPU_MAP__MASK; 830 } 831 832 *size += sizeof(struct cpu_map_data); 833 return zalloc(*size); 834 } 835 836 void cpu_map_data__synthesize(struct cpu_map_data *data, struct cpu_map *map, 837 u16 type, int max) 838 { 839 data->type = type; 840 841 switch (type) { 842 case PERF_CPU_MAP__CPUS: 843 synthesize_cpus((struct cpu_map_entries *) data->data, map); 844 break; 845 case PERF_CPU_MAP__MASK: 846 synthesize_mask((struct cpu_map_mask *) data->data, map, max); 847 default: 848 break; 849 }; 850 } 851 852 static struct cpu_map_event* cpu_map_event__new(struct cpu_map *map) 853 { 854 size_t size = sizeof(struct cpu_map_event); 855 struct cpu_map_event *event; 856 int max; 857 u16 type; 858 859 event = cpu_map_data__alloc(map, &size, &type, &max); 860 if (!event) 861 return NULL; 862 863 event->header.type = PERF_RECORD_CPU_MAP; 864 event->header.size = size; 865 event->data.type = type; 866 867 cpu_map_data__synthesize(&event->data, map, type, max); 868 return event; 869 } 870 871 int perf_event__synthesize_cpu_map(struct perf_tool *tool, 872 struct cpu_map *map, 873 perf_event__handler_t process, 874 struct machine *machine) 875 { 876 struct cpu_map_event *event; 877 int err; 878 879 event = cpu_map_event__new(map); 880 if (!event) 881 return -ENOMEM; 882 883 err = process(tool, (union perf_event *) event, NULL, machine); 884 885 free(event); 886 return err; 887 } 888 889 int perf_event__synthesize_stat_config(struct perf_tool *tool, 890 struct perf_stat_config *config, 891 perf_event__handler_t process, 892 struct machine *machine) 893 { 894 struct stat_config_event *event; 895 int size, i = 0, err; 896 897 size = sizeof(*event); 898 size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0])); 899 900 event = zalloc(size); 901 if (!event) 902 return -ENOMEM; 903 904 event->header.type = PERF_RECORD_STAT_CONFIG; 905 event->header.size = size; 906 event->nr = PERF_STAT_CONFIG_TERM__MAX; 907 908 #define ADD(__term, __val) \ 909 event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term; \ 910 event->data[i].val = __val; \ 911 i++; 912 913 ADD(AGGR_MODE, config->aggr_mode) 914 ADD(INTERVAL, config->interval) 915 ADD(SCALE, config->scale) 916 917 WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX, 918 "stat config terms unbalanced\n"); 919 #undef ADD 920 921 err = process(tool, (union perf_event *) event, NULL, machine); 922 923 free(event); 924 return err; 925 } 926 927 int perf_event__synthesize_stat(struct perf_tool *tool, 928 u32 cpu, u32 thread, u64 id, 929 struct perf_counts_values *count, 930 perf_event__handler_t process, 931 struct machine *machine) 932 { 933 struct stat_event event; 934 935 event.header.type = PERF_RECORD_STAT; 936 event.header.size = sizeof(event); 937 event.header.misc = 0; 938 939 event.id = id; 940 event.cpu = cpu; 941 event.thread = thread; 942 event.val = count->val; 943 event.ena = count->ena; 944 event.run = count->run; 945 946 return process(tool, (union perf_event *) &event, NULL, machine); 947 } 948 949 int perf_event__synthesize_stat_round(struct perf_tool *tool, 950 u64 evtime, u64 type, 951 perf_event__handler_t process, 952 struct machine *machine) 953 { 954 struct stat_round_event event; 955 956 event.header.type = PERF_RECORD_STAT_ROUND; 957 event.header.size = sizeof(event); 958 event.header.misc = 0; 959 960 event.time = evtime; 961 event.type = type; 962 963 return process(tool, (union perf_event *) &event, NULL, machine); 964 } 965 966 void perf_event__read_stat_config(struct perf_stat_config *config, 967 struct stat_config_event *event) 968 { 969 unsigned i; 970 971 for (i = 0; i < event->nr; i++) { 972 973 switch (event->data[i].tag) { 974 #define CASE(__term, __val) \ 975 case PERF_STAT_CONFIG_TERM__##__term: \ 976 config->__val = event->data[i].val; \ 977 break; 978 979 CASE(AGGR_MODE, aggr_mode) 980 CASE(SCALE, scale) 981 CASE(INTERVAL, interval) 982 #undef CASE 983 default: 984 pr_warning("unknown stat config term %" PRIu64 "\n", 985 event->data[i].tag); 986 } 987 } 988 } 989 990 size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp) 991 { 992 const char *s; 993 994 if (event->header.misc & PERF_RECORD_MISC_COMM_EXEC) 995 s = " exec"; 996 else 997 s = ""; 998 999 return fprintf(fp, "%s: %s:%d/%d\n", s, event->comm.comm, event->comm.pid, event->comm.tid); 1000 } 1001 1002 int perf_event__process_comm(struct perf_tool *tool __maybe_unused, 1003 union perf_event *event, 1004 struct perf_sample *sample, 1005 struct machine *machine) 1006 { 1007 return machine__process_comm_event(machine, event, sample); 1008 } 1009 1010 int perf_event__process_lost(struct perf_tool *tool __maybe_unused, 1011 union perf_event *event, 1012 struct perf_sample *sample, 1013 struct machine *machine) 1014 { 1015 return machine__process_lost_event(machine, event, sample); 1016 } 1017 1018 int perf_event__process_aux(struct perf_tool *tool __maybe_unused, 1019 union perf_event *event, 1020 struct perf_sample *sample __maybe_unused, 1021 struct machine *machine) 1022 { 1023 return machine__process_aux_event(machine, event); 1024 } 1025 1026 int perf_event__process_itrace_start(struct perf_tool *tool __maybe_unused, 1027 union perf_event *event, 1028 struct perf_sample *sample __maybe_unused, 1029 struct machine *machine) 1030 { 1031 return machine__process_itrace_start_event(machine, event); 1032 } 1033 1034 int perf_event__process_lost_samples(struct perf_tool *tool __maybe_unused, 1035 union perf_event *event, 1036 struct perf_sample *sample, 1037 struct machine *machine) 1038 { 1039 return machine__process_lost_samples_event(machine, event, sample); 1040 } 1041 1042 int perf_event__process_switch(struct perf_tool *tool __maybe_unused, 1043 union perf_event *event, 1044 struct perf_sample *sample __maybe_unused, 1045 struct machine *machine) 1046 { 1047 return machine__process_switch_event(machine, event); 1048 } 1049 1050 size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp) 1051 { 1052 return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n", 1053 event->mmap.pid, event->mmap.tid, event->mmap.start, 1054 event->mmap.len, event->mmap.pgoff, 1055 (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x', 1056 event->mmap.filename); 1057 } 1058 1059 size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp) 1060 { 1061 return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 1062 " %02x:%02x %"PRIu64" %"PRIu64"]: %c%c%c%c %s\n", 1063 event->mmap2.pid, event->mmap2.tid, event->mmap2.start, 1064 event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj, 1065 event->mmap2.min, event->mmap2.ino, 1066 event->mmap2.ino_generation, 1067 (event->mmap2.prot & PROT_READ) ? 'r' : '-', 1068 (event->mmap2.prot & PROT_WRITE) ? 'w' : '-', 1069 (event->mmap2.prot & PROT_EXEC) ? 'x' : '-', 1070 (event->mmap2.flags & MAP_SHARED) ? 's' : 'p', 1071 event->mmap2.filename); 1072 } 1073 1074 size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp) 1075 { 1076 struct thread_map *threads = thread_map__new_event(&event->thread_map); 1077 size_t ret; 1078 1079 ret = fprintf(fp, " nr: "); 1080 1081 if (threads) 1082 ret += thread_map__fprintf(threads, fp); 1083 else 1084 ret += fprintf(fp, "failed to get threads from event\n"); 1085 1086 thread_map__put(threads); 1087 return ret; 1088 } 1089 1090 size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp) 1091 { 1092 struct cpu_map *cpus = cpu_map__new_data(&event->cpu_map.data); 1093 size_t ret; 1094 1095 ret = fprintf(fp, " nr: "); 1096 1097 if (cpus) 1098 ret += cpu_map__fprintf(cpus, fp); 1099 else 1100 ret += fprintf(fp, "failed to get cpumap from event\n"); 1101 1102 cpu_map__put(cpus); 1103 return ret; 1104 } 1105 1106 int perf_event__process_mmap(struct perf_tool *tool __maybe_unused, 1107 union perf_event *event, 1108 struct perf_sample *sample, 1109 struct machine *machine) 1110 { 1111 return machine__process_mmap_event(machine, event, sample); 1112 } 1113 1114 int perf_event__process_mmap2(struct perf_tool *tool __maybe_unused, 1115 union perf_event *event, 1116 struct perf_sample *sample, 1117 struct machine *machine) 1118 { 1119 return machine__process_mmap2_event(machine, event, sample); 1120 } 1121 1122 size_t perf_event__fprintf_task(union perf_event *event, FILE *fp) 1123 { 1124 return fprintf(fp, "(%d:%d):(%d:%d)\n", 1125 event->fork.pid, event->fork.tid, 1126 event->fork.ppid, event->fork.ptid); 1127 } 1128 1129 int perf_event__process_fork(struct perf_tool *tool __maybe_unused, 1130 union perf_event *event, 1131 struct perf_sample *sample, 1132 struct machine *machine) 1133 { 1134 return machine__process_fork_event(machine, event, sample); 1135 } 1136 1137 int perf_event__process_exit(struct perf_tool *tool __maybe_unused, 1138 union perf_event *event, 1139 struct perf_sample *sample, 1140 struct machine *machine) 1141 { 1142 return machine__process_exit_event(machine, event, sample); 1143 } 1144 1145 size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp) 1146 { 1147 return fprintf(fp, " offset: %#"PRIx64" size: %#"PRIx64" flags: %#"PRIx64" [%s%s]\n", 1148 event->aux.aux_offset, event->aux.aux_size, 1149 event->aux.flags, 1150 event->aux.flags & PERF_AUX_FLAG_TRUNCATED ? "T" : "", 1151 event->aux.flags & PERF_AUX_FLAG_OVERWRITE ? "O" : ""); 1152 } 1153 1154 size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp) 1155 { 1156 return fprintf(fp, " pid: %u tid: %u\n", 1157 event->itrace_start.pid, event->itrace_start.tid); 1158 } 1159 1160 size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp) 1161 { 1162 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT; 1163 const char *in_out = out ? "OUT" : "IN "; 1164 1165 if (event->header.type == PERF_RECORD_SWITCH) 1166 return fprintf(fp, " %s\n", in_out); 1167 1168 return fprintf(fp, " %s %s pid/tid: %5u/%-5u\n", 1169 in_out, out ? "next" : "prev", 1170 event->context_switch.next_prev_pid, 1171 event->context_switch.next_prev_tid); 1172 } 1173 1174 size_t perf_event__fprintf(union perf_event *event, FILE *fp) 1175 { 1176 size_t ret = fprintf(fp, "PERF_RECORD_%s", 1177 perf_event__name(event->header.type)); 1178 1179 switch (event->header.type) { 1180 case PERF_RECORD_COMM: 1181 ret += perf_event__fprintf_comm(event, fp); 1182 break; 1183 case PERF_RECORD_FORK: 1184 case PERF_RECORD_EXIT: 1185 ret += perf_event__fprintf_task(event, fp); 1186 break; 1187 case PERF_RECORD_MMAP: 1188 ret += perf_event__fprintf_mmap(event, fp); 1189 break; 1190 case PERF_RECORD_MMAP2: 1191 ret += perf_event__fprintf_mmap2(event, fp); 1192 break; 1193 case PERF_RECORD_AUX: 1194 ret += perf_event__fprintf_aux(event, fp); 1195 break; 1196 case PERF_RECORD_ITRACE_START: 1197 ret += perf_event__fprintf_itrace_start(event, fp); 1198 break; 1199 case PERF_RECORD_SWITCH: 1200 case PERF_RECORD_SWITCH_CPU_WIDE: 1201 ret += perf_event__fprintf_switch(event, fp); 1202 break; 1203 default: 1204 ret += fprintf(fp, "\n"); 1205 } 1206 1207 return ret; 1208 } 1209 1210 int perf_event__process(struct perf_tool *tool __maybe_unused, 1211 union perf_event *event, 1212 struct perf_sample *sample, 1213 struct machine *machine) 1214 { 1215 return machine__process_event(machine, event, sample); 1216 } 1217 1218 void thread__find_addr_map(struct thread *thread, u8 cpumode, 1219 enum map_type type, u64 addr, 1220 struct addr_location *al) 1221 { 1222 struct map_groups *mg = thread->mg; 1223 struct machine *machine = mg->machine; 1224 bool load_map = false; 1225 1226 al->machine = machine; 1227 al->thread = thread; 1228 al->addr = addr; 1229 al->cpumode = cpumode; 1230 al->filtered = 0; 1231 1232 if (machine == NULL) { 1233 al->map = NULL; 1234 return; 1235 } 1236 1237 if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) { 1238 al->level = 'k'; 1239 mg = &machine->kmaps; 1240 load_map = true; 1241 } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) { 1242 al->level = '.'; 1243 } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) { 1244 al->level = 'g'; 1245 mg = &machine->kmaps; 1246 load_map = true; 1247 } else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) { 1248 al->level = 'u'; 1249 } else { 1250 al->level = 'H'; 1251 al->map = NULL; 1252 1253 if ((cpumode == PERF_RECORD_MISC_GUEST_USER || 1254 cpumode == PERF_RECORD_MISC_GUEST_KERNEL) && 1255 !perf_guest) 1256 al->filtered |= (1 << HIST_FILTER__GUEST); 1257 if ((cpumode == PERF_RECORD_MISC_USER || 1258 cpumode == PERF_RECORD_MISC_KERNEL) && 1259 !perf_host) 1260 al->filtered |= (1 << HIST_FILTER__HOST); 1261 1262 return; 1263 } 1264 try_again: 1265 al->map = map_groups__find(mg, type, al->addr); 1266 if (al->map == NULL) { 1267 /* 1268 * If this is outside of all known maps, and is a negative 1269 * address, try to look it up in the kernel dso, as it might be 1270 * a vsyscall or vdso (which executes in user-mode). 1271 * 1272 * XXX This is nasty, we should have a symbol list in the 1273 * "[vdso]" dso, but for now lets use the old trick of looking 1274 * in the whole kernel symbol list. 1275 */ 1276 if (cpumode == PERF_RECORD_MISC_USER && machine && 1277 mg != &machine->kmaps && 1278 machine__kernel_ip(machine, al->addr)) { 1279 mg = &machine->kmaps; 1280 load_map = true; 1281 goto try_again; 1282 } 1283 } else { 1284 /* 1285 * Kernel maps might be changed when loading symbols so loading 1286 * must be done prior to using kernel maps. 1287 */ 1288 if (load_map) 1289 map__load(al->map, machine->symbol_filter); 1290 al->addr = al->map->map_ip(al->map, al->addr); 1291 } 1292 } 1293 1294 void thread__find_addr_location(struct thread *thread, 1295 u8 cpumode, enum map_type type, u64 addr, 1296 struct addr_location *al) 1297 { 1298 thread__find_addr_map(thread, cpumode, type, addr, al); 1299 if (al->map != NULL) 1300 al->sym = map__find_symbol(al->map, al->addr, 1301 thread->mg->machine->symbol_filter); 1302 else 1303 al->sym = NULL; 1304 } 1305 1306 /* 1307 * Callers need to drop the reference to al->thread, obtained in 1308 * machine__findnew_thread() 1309 */ 1310 int machine__resolve(struct machine *machine, struct addr_location *al, 1311 struct perf_sample *sample) 1312 { 1313 struct thread *thread = machine__findnew_thread(machine, sample->pid, 1314 sample->tid); 1315 1316 if (thread == NULL) 1317 return -1; 1318 1319 dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid); 1320 /* 1321 * Have we already created the kernel maps for this machine? 1322 * 1323 * This should have happened earlier, when we processed the kernel MMAP 1324 * events, but for older perf.data files there was no such thing, so do 1325 * it now. 1326 */ 1327 if (sample->cpumode == PERF_RECORD_MISC_KERNEL && 1328 machine__kernel_map(machine) == NULL) 1329 machine__create_kernel_maps(machine); 1330 1331 thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->ip, al); 1332 dump_printf(" ...... dso: %s\n", 1333 al->map ? al->map->dso->long_name : 1334 al->level == 'H' ? "[hypervisor]" : "<not found>"); 1335 1336 if (thread__is_filtered(thread)) 1337 al->filtered |= (1 << HIST_FILTER__THREAD); 1338 1339 al->sym = NULL; 1340 al->cpu = sample->cpu; 1341 al->socket = -1; 1342 1343 if (al->cpu >= 0) { 1344 struct perf_env *env = machine->env; 1345 1346 if (env && env->cpu) 1347 al->socket = env->cpu[al->cpu].socket_id; 1348 } 1349 1350 if (al->map) { 1351 struct dso *dso = al->map->dso; 1352 1353 if (symbol_conf.dso_list && 1354 (!dso || !(strlist__has_entry(symbol_conf.dso_list, 1355 dso->short_name) || 1356 (dso->short_name != dso->long_name && 1357 strlist__has_entry(symbol_conf.dso_list, 1358 dso->long_name))))) { 1359 al->filtered |= (1 << HIST_FILTER__DSO); 1360 } 1361 1362 al->sym = map__find_symbol(al->map, al->addr, 1363 machine->symbol_filter); 1364 } 1365 1366 if (symbol_conf.sym_list && 1367 (!al->sym || !strlist__has_entry(symbol_conf.sym_list, 1368 al->sym->name))) { 1369 al->filtered |= (1 << HIST_FILTER__SYMBOL); 1370 } 1371 1372 return 0; 1373 } 1374 1375 /* 1376 * The preprocess_sample method will return with reference counts for the 1377 * in it, when done using (and perhaps getting ref counts if needing to 1378 * keep a pointer to one of those entries) it must be paired with 1379 * addr_location__put(), so that the refcounts can be decremented. 1380 */ 1381 void addr_location__put(struct addr_location *al) 1382 { 1383 thread__zput(al->thread); 1384 } 1385 1386 bool is_bts_event(struct perf_event_attr *attr) 1387 { 1388 return attr->type == PERF_TYPE_HARDWARE && 1389 (attr->config & PERF_COUNT_HW_BRANCH_INSTRUCTIONS) && 1390 attr->sample_period == 1; 1391 } 1392 1393 bool sample_addr_correlates_sym(struct perf_event_attr *attr) 1394 { 1395 if (attr->type == PERF_TYPE_SOFTWARE && 1396 (attr->config == PERF_COUNT_SW_PAGE_FAULTS || 1397 attr->config == PERF_COUNT_SW_PAGE_FAULTS_MIN || 1398 attr->config == PERF_COUNT_SW_PAGE_FAULTS_MAJ)) 1399 return true; 1400 1401 if (is_bts_event(attr)) 1402 return true; 1403 1404 return false; 1405 } 1406 1407 void thread__resolve(struct thread *thread, struct addr_location *al, 1408 struct perf_sample *sample) 1409 { 1410 thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->addr, al); 1411 if (!al->map) 1412 thread__find_addr_map(thread, sample->cpumode, MAP__VARIABLE, 1413 sample->addr, al); 1414 1415 al->cpu = sample->cpu; 1416 al->sym = NULL; 1417 1418 if (al->map) 1419 al->sym = map__find_symbol(al->map, al->addr, NULL); 1420 } 1421