1 #define _FILE_OFFSET_BITS 64 2 3 #include <linux/kernel.h> 4 5 #include <byteswap.h> 6 #include <unistd.h> 7 #include <sys/types.h> 8 9 #include "session.h" 10 #include "sort.h" 11 #include "util.h" 12 13 static int perf_session__open(struct perf_session *self, bool force) 14 { 15 struct stat input_stat; 16 17 if (!strcmp(self->filename, "-")) { 18 self->fd_pipe = true; 19 self->fd = STDIN_FILENO; 20 21 if (perf_header__read(self, self->fd) < 0) 22 pr_err("incompatible file format"); 23 24 return 0; 25 } 26 27 self->fd = open(self->filename, O_RDONLY); 28 if (self->fd < 0) { 29 pr_err("failed to open file: %s", self->filename); 30 if (!strcmp(self->filename, "perf.data")) 31 pr_err(" (try 'perf record' first)"); 32 pr_err("\n"); 33 return -errno; 34 } 35 36 if (fstat(self->fd, &input_stat) < 0) 37 goto out_close; 38 39 if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) { 40 pr_err("file %s not owned by current user or root\n", 41 self->filename); 42 goto out_close; 43 } 44 45 if (!input_stat.st_size) { 46 pr_info("zero-sized file (%s), nothing to do!\n", 47 self->filename); 48 goto out_close; 49 } 50 51 if (perf_header__read(self, self->fd) < 0) { 52 pr_err("incompatible file format"); 53 goto out_close; 54 } 55 56 self->size = input_stat.st_size; 57 return 0; 58 59 out_close: 60 close(self->fd); 61 self->fd = -1; 62 return -1; 63 } 64 65 void perf_session__update_sample_type(struct perf_session *self) 66 { 67 self->sample_type = perf_header__sample_type(&self->header); 68 } 69 70 struct perf_session *perf_session__new(const char *filename, int mode, bool force) 71 { 72 size_t len = filename ? strlen(filename) + 1 : 0; 73 struct perf_session *self = zalloc(sizeof(*self) + len); 74 75 if (self == NULL) 76 goto out; 77 78 if (perf_header__init(&self->header) < 0) 79 goto out_free; 80 81 memcpy(self->filename, filename, len); 82 self->threads = RB_ROOT; 83 self->stats_by_id = RB_ROOT; 84 self->last_match = NULL; 85 self->mmap_window = 32; 86 self->cwd = NULL; 87 self->cwdlen = 0; 88 self->unknown_events = 0; 89 map_groups__init(&self->kmaps); 90 91 if (mode == O_RDONLY) { 92 if (perf_session__open(self, force) < 0) 93 goto out_delete; 94 } else if (mode == O_WRONLY) { 95 /* 96 * In O_RDONLY mode this will be performed when reading the 97 * kernel MMAP event, in event__process_mmap(). 98 */ 99 if (perf_session__create_kernel_maps(self) < 0) 100 goto out_delete; 101 } 102 103 perf_session__update_sample_type(self); 104 out: 105 return self; 106 out_free: 107 free(self); 108 return NULL; 109 out_delete: 110 perf_session__delete(self); 111 return NULL; 112 } 113 114 void perf_session__delete(struct perf_session *self) 115 { 116 perf_header__exit(&self->header); 117 close(self->fd); 118 free(self->cwd); 119 free(self); 120 } 121 122 static bool symbol__match_parent_regex(struct symbol *sym) 123 { 124 if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0)) 125 return 1; 126 127 return 0; 128 } 129 130 struct map_symbol *perf_session__resolve_callchain(struct perf_session *self, 131 struct thread *thread, 132 struct ip_callchain *chain, 133 struct symbol **parent) 134 { 135 u8 cpumode = PERF_RECORD_MISC_USER; 136 unsigned int i; 137 struct map_symbol *syms = calloc(chain->nr, sizeof(*syms)); 138 139 if (!syms) 140 return NULL; 141 142 for (i = 0; i < chain->nr; i++) { 143 u64 ip = chain->ips[i]; 144 struct addr_location al; 145 146 if (ip >= PERF_CONTEXT_MAX) { 147 switch (ip) { 148 case PERF_CONTEXT_HV: 149 cpumode = PERF_RECORD_MISC_HYPERVISOR; break; 150 case PERF_CONTEXT_KERNEL: 151 cpumode = PERF_RECORD_MISC_KERNEL; break; 152 case PERF_CONTEXT_USER: 153 cpumode = PERF_RECORD_MISC_USER; break; 154 default: 155 break; 156 } 157 continue; 158 } 159 160 thread__find_addr_location(thread, self, cpumode, 161 MAP__FUNCTION, ip, &al, NULL); 162 if (al.sym != NULL) { 163 if (sort__has_parent && !*parent && 164 symbol__match_parent_regex(al.sym)) 165 *parent = al.sym; 166 if (!symbol_conf.use_callchain) 167 break; 168 syms[i].map = al.map; 169 syms[i].sym = al.sym; 170 } 171 } 172 173 return syms; 174 } 175 176 static int process_event_stub(event_t *event __used, 177 struct perf_session *session __used) 178 { 179 dump_printf(": unhandled!\n"); 180 return 0; 181 } 182 183 static void perf_event_ops__fill_defaults(struct perf_event_ops *handler) 184 { 185 if (handler->sample == NULL) 186 handler->sample = process_event_stub; 187 if (handler->mmap == NULL) 188 handler->mmap = process_event_stub; 189 if (handler->comm == NULL) 190 handler->comm = process_event_stub; 191 if (handler->fork == NULL) 192 handler->fork = process_event_stub; 193 if (handler->exit == NULL) 194 handler->exit = process_event_stub; 195 if (handler->lost == NULL) 196 handler->lost = process_event_stub; 197 if (handler->read == NULL) 198 handler->read = process_event_stub; 199 if (handler->throttle == NULL) 200 handler->throttle = process_event_stub; 201 if (handler->unthrottle == NULL) 202 handler->unthrottle = process_event_stub; 203 } 204 205 static const char *event__name[] = { 206 [0] = "TOTAL", 207 [PERF_RECORD_MMAP] = "MMAP", 208 [PERF_RECORD_LOST] = "LOST", 209 [PERF_RECORD_COMM] = "COMM", 210 [PERF_RECORD_EXIT] = "EXIT", 211 [PERF_RECORD_THROTTLE] = "THROTTLE", 212 [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE", 213 [PERF_RECORD_FORK] = "FORK", 214 [PERF_RECORD_READ] = "READ", 215 [PERF_RECORD_SAMPLE] = "SAMPLE", 216 }; 217 218 unsigned long event__total[PERF_RECORD_HEADER_MAX]; 219 220 void event__print_totals(void) 221 { 222 int i; 223 for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) { 224 if (!event__name[i]) 225 continue; 226 pr_info("%10s events: %10ld\n", 227 event__name[i], event__total[i]); 228 } 229 } 230 231 void mem_bswap_64(void *src, int byte_size) 232 { 233 u64 *m = src; 234 235 while (byte_size > 0) { 236 *m = bswap_64(*m); 237 byte_size -= sizeof(u64); 238 ++m; 239 } 240 } 241 242 static void event__all64_swap(event_t *self) 243 { 244 struct perf_event_header *hdr = &self->header; 245 mem_bswap_64(hdr + 1, self->header.size - sizeof(*hdr)); 246 } 247 248 static void event__comm_swap(event_t *self) 249 { 250 self->comm.pid = bswap_32(self->comm.pid); 251 self->comm.tid = bswap_32(self->comm.tid); 252 } 253 254 static void event__mmap_swap(event_t *self) 255 { 256 self->mmap.pid = bswap_32(self->mmap.pid); 257 self->mmap.tid = bswap_32(self->mmap.tid); 258 self->mmap.start = bswap_64(self->mmap.start); 259 self->mmap.len = bswap_64(self->mmap.len); 260 self->mmap.pgoff = bswap_64(self->mmap.pgoff); 261 } 262 263 static void event__task_swap(event_t *self) 264 { 265 self->fork.pid = bswap_32(self->fork.pid); 266 self->fork.tid = bswap_32(self->fork.tid); 267 self->fork.ppid = bswap_32(self->fork.ppid); 268 self->fork.ptid = bswap_32(self->fork.ptid); 269 self->fork.time = bswap_64(self->fork.time); 270 } 271 272 static void event__read_swap(event_t *self) 273 { 274 self->read.pid = bswap_32(self->read.pid); 275 self->read.tid = bswap_32(self->read.tid); 276 self->read.value = bswap_64(self->read.value); 277 self->read.time_enabled = bswap_64(self->read.time_enabled); 278 self->read.time_running = bswap_64(self->read.time_running); 279 self->read.id = bswap_64(self->read.id); 280 } 281 282 typedef void (*event__swap_op)(event_t *self); 283 284 static event__swap_op event__swap_ops[] = { 285 [PERF_RECORD_MMAP] = event__mmap_swap, 286 [PERF_RECORD_COMM] = event__comm_swap, 287 [PERF_RECORD_FORK] = event__task_swap, 288 [PERF_RECORD_EXIT] = event__task_swap, 289 [PERF_RECORD_LOST] = event__all64_swap, 290 [PERF_RECORD_READ] = event__read_swap, 291 [PERF_RECORD_SAMPLE] = event__all64_swap, 292 [PERF_RECORD_HEADER_MAX] = NULL, 293 }; 294 295 static int perf_session__process_event(struct perf_session *self, 296 event_t *event, 297 struct perf_event_ops *ops, 298 u64 offset, u64 head) 299 { 300 trace_event(event); 301 302 if (event->header.type < PERF_RECORD_HEADER_MAX) { 303 dump_printf("%#Lx [%#x]: PERF_RECORD_%s", 304 offset + head, event->header.size, 305 event__name[event->header.type]); 306 ++event__total[0]; 307 ++event__total[event->header.type]; 308 } 309 310 if (self->header.needs_swap && event__swap_ops[event->header.type]) 311 event__swap_ops[event->header.type](event); 312 313 switch (event->header.type) { 314 case PERF_RECORD_SAMPLE: 315 return ops->sample(event, self); 316 case PERF_RECORD_MMAP: 317 return ops->mmap(event, self); 318 case PERF_RECORD_COMM: 319 return ops->comm(event, self); 320 case PERF_RECORD_FORK: 321 return ops->fork(event, self); 322 case PERF_RECORD_EXIT: 323 return ops->exit(event, self); 324 case PERF_RECORD_LOST: 325 return ops->lost(event, self); 326 case PERF_RECORD_READ: 327 return ops->read(event, self); 328 case PERF_RECORD_THROTTLE: 329 return ops->throttle(event, self); 330 case PERF_RECORD_UNTHROTTLE: 331 return ops->unthrottle(event, self); 332 default: 333 self->unknown_events++; 334 return -1; 335 } 336 } 337 338 void perf_event_header__bswap(struct perf_event_header *self) 339 { 340 self->type = bswap_32(self->type); 341 self->misc = bswap_16(self->misc); 342 self->size = bswap_16(self->size); 343 } 344 345 int perf_header__read_build_ids(struct perf_header *self, 346 int input, u64 offset, u64 size) 347 { 348 struct build_id_event bev; 349 char filename[PATH_MAX]; 350 u64 limit = offset + size; 351 int err = -1; 352 353 while (offset < limit) { 354 struct dso *dso; 355 ssize_t len; 356 struct list_head *head = &dsos__user; 357 358 if (read(input, &bev, sizeof(bev)) != sizeof(bev)) 359 goto out; 360 361 if (self->needs_swap) 362 perf_event_header__bswap(&bev.header); 363 364 len = bev.header.size - sizeof(bev); 365 if (read(input, filename, len) != len) 366 goto out; 367 368 if (bev.header.misc & PERF_RECORD_MISC_KERNEL) 369 head = &dsos__kernel; 370 371 dso = __dsos__findnew(head, filename); 372 if (dso != NULL) { 373 dso__set_build_id(dso, &bev.build_id); 374 if (head == &dsos__kernel && filename[0] == '[') 375 dso->kernel = 1; 376 } 377 378 offset += bev.header.size; 379 } 380 err = 0; 381 out: 382 return err; 383 } 384 385 static struct thread *perf_session__register_idle_thread(struct perf_session *self) 386 { 387 struct thread *thread = perf_session__findnew(self, 0); 388 389 if (thread == NULL || thread__set_comm(thread, "swapper")) { 390 pr_err("problem inserting idle task.\n"); 391 thread = NULL; 392 } 393 394 return thread; 395 } 396 397 int do_read(int fd, void *buf, size_t size) 398 { 399 void *buf_start = buf; 400 401 while (size) { 402 int ret = read(fd, buf, size); 403 404 if (ret <= 0) 405 return ret; 406 407 size -= ret; 408 buf += ret; 409 } 410 411 return buf - buf_start; 412 } 413 414 #define session_done() (*(volatile int *)(&session_done)) 415 volatile int session_done; 416 417 static int __perf_session__process_pipe_events(struct perf_session *self, 418 struct perf_event_ops *ops) 419 { 420 event_t event; 421 uint32_t size; 422 int skip = 0; 423 u64 head; 424 int err; 425 void *p; 426 427 perf_event_ops__fill_defaults(ops); 428 429 head = 0; 430 more: 431 err = do_read(self->fd, &event, sizeof(struct perf_event_header)); 432 if (err <= 0) { 433 if (err == 0) 434 goto done; 435 436 pr_err("failed to read event header\n"); 437 goto out_err; 438 } 439 440 if (self->header.needs_swap) 441 perf_event_header__bswap(&event.header); 442 443 size = event.header.size; 444 if (size == 0) 445 size = 8; 446 447 p = &event; 448 p += sizeof(struct perf_event_header); 449 450 err = do_read(self->fd, p, size - sizeof(struct perf_event_header)); 451 if (err <= 0) { 452 if (err == 0) { 453 pr_err("unexpected end of event stream\n"); 454 goto done; 455 } 456 457 pr_err("failed to read event data\n"); 458 goto out_err; 459 } 460 461 if (size == 0 || 462 (skip = perf_session__process_event(self, &event, ops, 463 0, head)) < 0) { 464 dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n", 465 head, event.header.size, event.header.type); 466 /* 467 * assume we lost track of the stream, check alignment, and 468 * increment a single u64 in the hope to catch on again 'soon'. 469 */ 470 if (unlikely(head & 7)) 471 head &= ~7ULL; 472 473 size = 8; 474 } 475 476 head += size; 477 478 dump_printf("\n%#Lx [%#x]: event: %d\n", 479 head, event.header.size, event.header.type); 480 481 if (skip > 0) 482 head += skip; 483 484 if (!session_done()) 485 goto more; 486 done: 487 err = 0; 488 out_err: 489 return err; 490 } 491 492 int __perf_session__process_events(struct perf_session *self, 493 u64 data_offset, u64 data_size, 494 u64 file_size, struct perf_event_ops *ops) 495 { 496 int err, mmap_prot, mmap_flags; 497 u64 head, shift; 498 u64 offset = 0; 499 size_t page_size; 500 event_t *event; 501 uint32_t size; 502 char *buf; 503 struct ui_progress *progress = ui_progress__new("Processing events...", 504 self->size); 505 if (progress == NULL) 506 return -1; 507 508 perf_event_ops__fill_defaults(ops); 509 510 page_size = sysconf(_SC_PAGESIZE); 511 512 head = data_offset; 513 shift = page_size * (head / page_size); 514 offset += shift; 515 head -= shift; 516 517 mmap_prot = PROT_READ; 518 mmap_flags = MAP_SHARED; 519 520 if (self->header.needs_swap) { 521 mmap_prot |= PROT_WRITE; 522 mmap_flags = MAP_PRIVATE; 523 } 524 remap: 525 buf = mmap(NULL, page_size * self->mmap_window, mmap_prot, 526 mmap_flags, self->fd, offset); 527 if (buf == MAP_FAILED) { 528 pr_err("failed to mmap file\n"); 529 err = -errno; 530 goto out_err; 531 } 532 533 more: 534 event = (event_t *)(buf + head); 535 ui_progress__update(progress, offset); 536 537 if (self->header.needs_swap) 538 perf_event_header__bswap(&event->header); 539 size = event->header.size; 540 if (size == 0) 541 size = 8; 542 543 if (head + event->header.size >= page_size * self->mmap_window) { 544 int munmap_ret; 545 546 shift = page_size * (head / page_size); 547 548 munmap_ret = munmap(buf, page_size * self->mmap_window); 549 assert(munmap_ret == 0); 550 551 offset += shift; 552 head -= shift; 553 goto remap; 554 } 555 556 size = event->header.size; 557 558 dump_printf("\n%#Lx [%#x]: event: %d\n", 559 offset + head, event->header.size, event->header.type); 560 561 if (size == 0 || 562 perf_session__process_event(self, event, ops, offset, head) < 0) { 563 dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n", 564 offset + head, event->header.size, 565 event->header.type); 566 /* 567 * assume we lost track of the stream, check alignment, and 568 * increment a single u64 in the hope to catch on again 'soon'. 569 */ 570 if (unlikely(head & 7)) 571 head &= ~7ULL; 572 573 size = 8; 574 } 575 576 head += size; 577 578 if (offset + head >= data_offset + data_size) 579 goto done; 580 581 if (offset + head < file_size) 582 goto more; 583 done: 584 err = 0; 585 out_err: 586 ui_progress__delete(progress); 587 return err; 588 } 589 590 int perf_session__process_events(struct perf_session *self, 591 struct perf_event_ops *ops) 592 { 593 int err; 594 595 if (perf_session__register_idle_thread(self) == NULL) 596 return -ENOMEM; 597 598 if (!symbol_conf.full_paths) { 599 char bf[PATH_MAX]; 600 601 if (getcwd(bf, sizeof(bf)) == NULL) { 602 err = -errno; 603 out_getcwd_err: 604 pr_err("failed to get the current directory\n"); 605 goto out_err; 606 } 607 self->cwd = strdup(bf); 608 if (self->cwd == NULL) { 609 err = -ENOMEM; 610 goto out_getcwd_err; 611 } 612 self->cwdlen = strlen(self->cwd); 613 } 614 615 if (!self->fd_pipe) 616 err = __perf_session__process_events(self, 617 self->header.data_offset, 618 self->header.data_size, 619 self->size, ops); 620 else 621 err = __perf_session__process_pipe_events(self, ops); 622 out_err: 623 return err; 624 } 625 626 bool perf_session__has_traces(struct perf_session *self, const char *msg) 627 { 628 if (!(self->sample_type & PERF_SAMPLE_RAW)) { 629 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg); 630 return false; 631 } 632 633 return true; 634 } 635 636 int perf_session__set_kallsyms_ref_reloc_sym(struct perf_session *self, 637 const char *symbol_name, 638 u64 addr) 639 { 640 char *bracket; 641 enum map_type i; 642 643 self->ref_reloc_sym.name = strdup(symbol_name); 644 if (self->ref_reloc_sym.name == NULL) 645 return -ENOMEM; 646 647 bracket = strchr(self->ref_reloc_sym.name, ']'); 648 if (bracket) 649 *bracket = '\0'; 650 651 self->ref_reloc_sym.addr = addr; 652 653 for (i = 0; i < MAP__NR_TYPES; ++i) { 654 struct kmap *kmap = map__kmap(self->vmlinux_maps[i]); 655 kmap->ref_reloc_sym = &self->ref_reloc_sym; 656 } 657 658 return 0; 659 } 660