1 #define _FILE_OFFSET_BITS 64 2 3 #include <linux/kernel.h> 4 5 #include <byteswap.h> 6 #include <unistd.h> 7 #include <sys/types.h> 8 #include <sys/mman.h> 9 10 #include "evlist.h" 11 #include "evsel.h" 12 #include "session.h" 13 #include "tool.h" 14 #include "sort.h" 15 #include "util.h" 16 #include "cpumap.h" 17 #include "event-parse.h" 18 #include "perf_regs.h" 19 #include "vdso.h" 20 21 static int perf_session__open(struct perf_session *self, bool force) 22 { 23 struct stat input_stat; 24 25 if (!strcmp(self->filename, "-")) { 26 self->fd_pipe = true; 27 self->fd = STDIN_FILENO; 28 29 if (perf_session__read_header(self, self->fd) < 0) 30 pr_err("incompatible file format (rerun with -v to learn more)"); 31 32 return 0; 33 } 34 35 self->fd = open(self->filename, O_RDONLY); 36 if (self->fd < 0) { 37 int err = errno; 38 39 pr_err("failed to open %s: %s", self->filename, strerror(err)); 40 if (err == ENOENT && !strcmp(self->filename, "perf.data")) 41 pr_err(" (try 'perf record' first)"); 42 pr_err("\n"); 43 return -errno; 44 } 45 46 if (fstat(self->fd, &input_stat) < 0) 47 goto out_close; 48 49 if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) { 50 pr_err("file %s not owned by current user or root\n", 51 self->filename); 52 goto out_close; 53 } 54 55 if (!input_stat.st_size) { 56 pr_info("zero-sized file (%s), nothing to do!\n", 57 self->filename); 58 goto out_close; 59 } 60 61 if (perf_session__read_header(self, self->fd) < 0) { 62 pr_err("incompatible file format (rerun with -v to learn more)"); 63 goto out_close; 64 } 65 66 if (!perf_evlist__valid_sample_type(self->evlist)) { 67 pr_err("non matching sample_type"); 68 goto out_close; 69 } 70 71 if (!perf_evlist__valid_sample_id_all(self->evlist)) { 72 pr_err("non matching sample_id_all"); 73 goto out_close; 74 } 75 76 self->size = input_stat.st_size; 77 return 0; 78 79 out_close: 80 close(self->fd); 81 self->fd = -1; 82 return -1; 83 } 84 85 void perf_session__set_id_hdr_size(struct perf_session *session) 86 { 87 u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist); 88 89 machines__set_id_hdr_size(&session->machines, id_hdr_size); 90 } 91 92 int perf_session__create_kernel_maps(struct perf_session *self) 93 { 94 int ret = machine__create_kernel_maps(&self->machines.host); 95 96 if (ret >= 0) 97 ret = machines__create_guest_kernel_maps(&self->machines); 98 return ret; 99 } 100 101 static void perf_session__destroy_kernel_maps(struct perf_session *self) 102 { 103 machines__destroy_kernel_maps(&self->machines); 104 } 105 106 struct perf_session *perf_session__new(const char *filename, int mode, 107 bool force, bool repipe, 108 struct perf_tool *tool) 109 { 110 struct perf_session *self; 111 struct stat st; 112 size_t len; 113 114 if (!filename || !strlen(filename)) { 115 if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode)) 116 filename = "-"; 117 else 118 filename = "perf.data"; 119 } 120 121 len = strlen(filename); 122 self = zalloc(sizeof(*self) + len); 123 124 if (self == NULL) 125 goto out; 126 127 memcpy(self->filename, filename, len); 128 self->repipe = repipe; 129 INIT_LIST_HEAD(&self->ordered_samples.samples); 130 INIT_LIST_HEAD(&self->ordered_samples.sample_cache); 131 INIT_LIST_HEAD(&self->ordered_samples.to_free); 132 machines__init(&self->machines); 133 134 if (mode == O_RDONLY) { 135 if (perf_session__open(self, force) < 0) 136 goto out_delete; 137 perf_session__set_id_hdr_size(self); 138 } else if (mode == O_WRONLY) { 139 /* 140 * In O_RDONLY mode this will be performed when reading the 141 * kernel MMAP event, in perf_event__process_mmap(). 142 */ 143 if (perf_session__create_kernel_maps(self) < 0) 144 goto out_delete; 145 } 146 147 if (tool && tool->ordering_requires_timestamps && 148 tool->ordered_samples && !perf_evlist__sample_id_all(self->evlist)) { 149 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n"); 150 tool->ordered_samples = false; 151 } 152 153 out: 154 return self; 155 out_delete: 156 perf_session__delete(self); 157 return NULL; 158 } 159 160 static void perf_session__delete_dead_threads(struct perf_session *session) 161 { 162 machine__delete_dead_threads(&session->machines.host); 163 } 164 165 static void perf_session__delete_threads(struct perf_session *session) 166 { 167 machine__delete_threads(&session->machines.host); 168 } 169 170 static void perf_session_env__delete(struct perf_session_env *env) 171 { 172 free(env->hostname); 173 free(env->os_release); 174 free(env->version); 175 free(env->arch); 176 free(env->cpu_desc); 177 free(env->cpuid); 178 179 free(env->cmdline); 180 free(env->sibling_cores); 181 free(env->sibling_threads); 182 free(env->numa_nodes); 183 free(env->pmu_mappings); 184 } 185 186 void perf_session__delete(struct perf_session *self) 187 { 188 perf_session__destroy_kernel_maps(self); 189 perf_session__delete_dead_threads(self); 190 perf_session__delete_threads(self); 191 perf_session_env__delete(&self->header.env); 192 machines__exit(&self->machines); 193 close(self->fd); 194 free(self); 195 vdso__exit(); 196 } 197 198 static int process_event_synth_tracing_data_stub(union perf_event *event 199 __maybe_unused, 200 struct perf_session *session 201 __maybe_unused) 202 { 203 dump_printf(": unhandled!\n"); 204 return 0; 205 } 206 207 static int process_event_synth_attr_stub(union perf_event *event __maybe_unused, 208 struct perf_evlist **pevlist 209 __maybe_unused) 210 { 211 dump_printf(": unhandled!\n"); 212 return 0; 213 } 214 215 static int process_event_sample_stub(struct perf_tool *tool __maybe_unused, 216 union perf_event *event __maybe_unused, 217 struct perf_sample *sample __maybe_unused, 218 struct perf_evsel *evsel __maybe_unused, 219 struct machine *machine __maybe_unused) 220 { 221 dump_printf(": unhandled!\n"); 222 return 0; 223 } 224 225 static int process_event_stub(struct perf_tool *tool __maybe_unused, 226 union perf_event *event __maybe_unused, 227 struct perf_sample *sample __maybe_unused, 228 struct machine *machine __maybe_unused) 229 { 230 dump_printf(": unhandled!\n"); 231 return 0; 232 } 233 234 static int process_finished_round_stub(struct perf_tool *tool __maybe_unused, 235 union perf_event *event __maybe_unused, 236 struct perf_session *perf_session 237 __maybe_unused) 238 { 239 dump_printf(": unhandled!\n"); 240 return 0; 241 } 242 243 static int process_event_type_stub(struct perf_tool *tool __maybe_unused, 244 union perf_event *event __maybe_unused) 245 { 246 dump_printf(": unhandled!\n"); 247 return 0; 248 } 249 250 static int process_finished_round(struct perf_tool *tool, 251 union perf_event *event, 252 struct perf_session *session); 253 254 static void perf_tool__fill_defaults(struct perf_tool *tool) 255 { 256 if (tool->sample == NULL) 257 tool->sample = process_event_sample_stub; 258 if (tool->mmap == NULL) 259 tool->mmap = process_event_stub; 260 if (tool->comm == NULL) 261 tool->comm = process_event_stub; 262 if (tool->fork == NULL) 263 tool->fork = process_event_stub; 264 if (tool->exit == NULL) 265 tool->exit = process_event_stub; 266 if (tool->lost == NULL) 267 tool->lost = perf_event__process_lost; 268 if (tool->read == NULL) 269 tool->read = process_event_sample_stub; 270 if (tool->throttle == NULL) 271 tool->throttle = process_event_stub; 272 if (tool->unthrottle == NULL) 273 tool->unthrottle = process_event_stub; 274 if (tool->attr == NULL) 275 tool->attr = process_event_synth_attr_stub; 276 if (tool->event_type == NULL) 277 tool->event_type = process_event_type_stub; 278 if (tool->tracing_data == NULL) 279 tool->tracing_data = process_event_synth_tracing_data_stub; 280 if (tool->build_id == NULL) 281 tool->build_id = process_finished_round_stub; 282 if (tool->finished_round == NULL) { 283 if (tool->ordered_samples) 284 tool->finished_round = process_finished_round; 285 else 286 tool->finished_round = process_finished_round_stub; 287 } 288 } 289 290 void mem_bswap_32(void *src, int byte_size) 291 { 292 u32 *m = src; 293 while (byte_size > 0) { 294 *m = bswap_32(*m); 295 byte_size -= sizeof(u32); 296 ++m; 297 } 298 } 299 300 void mem_bswap_64(void *src, int byte_size) 301 { 302 u64 *m = src; 303 304 while (byte_size > 0) { 305 *m = bswap_64(*m); 306 byte_size -= sizeof(u64); 307 ++m; 308 } 309 } 310 311 static void swap_sample_id_all(union perf_event *event, void *data) 312 { 313 void *end = (void *) event + event->header.size; 314 int size = end - data; 315 316 BUG_ON(size % sizeof(u64)); 317 mem_bswap_64(data, size); 318 } 319 320 static void perf_event__all64_swap(union perf_event *event, 321 bool sample_id_all __maybe_unused) 322 { 323 struct perf_event_header *hdr = &event->header; 324 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr)); 325 } 326 327 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all) 328 { 329 event->comm.pid = bswap_32(event->comm.pid); 330 event->comm.tid = bswap_32(event->comm.tid); 331 332 if (sample_id_all) { 333 void *data = &event->comm.comm; 334 335 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); 336 swap_sample_id_all(event, data); 337 } 338 } 339 340 static void perf_event__mmap_swap(union perf_event *event, 341 bool sample_id_all) 342 { 343 event->mmap.pid = bswap_32(event->mmap.pid); 344 event->mmap.tid = bswap_32(event->mmap.tid); 345 event->mmap.start = bswap_64(event->mmap.start); 346 event->mmap.len = bswap_64(event->mmap.len); 347 event->mmap.pgoff = bswap_64(event->mmap.pgoff); 348 349 if (sample_id_all) { 350 void *data = &event->mmap.filename; 351 352 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); 353 swap_sample_id_all(event, data); 354 } 355 } 356 357 static void perf_event__task_swap(union perf_event *event, bool sample_id_all) 358 { 359 event->fork.pid = bswap_32(event->fork.pid); 360 event->fork.tid = bswap_32(event->fork.tid); 361 event->fork.ppid = bswap_32(event->fork.ppid); 362 event->fork.ptid = bswap_32(event->fork.ptid); 363 event->fork.time = bswap_64(event->fork.time); 364 365 if (sample_id_all) 366 swap_sample_id_all(event, &event->fork + 1); 367 } 368 369 static void perf_event__read_swap(union perf_event *event, bool sample_id_all) 370 { 371 event->read.pid = bswap_32(event->read.pid); 372 event->read.tid = bswap_32(event->read.tid); 373 event->read.value = bswap_64(event->read.value); 374 event->read.time_enabled = bswap_64(event->read.time_enabled); 375 event->read.time_running = bswap_64(event->read.time_running); 376 event->read.id = bswap_64(event->read.id); 377 378 if (sample_id_all) 379 swap_sample_id_all(event, &event->read + 1); 380 } 381 382 static u8 revbyte(u8 b) 383 { 384 int rev = (b >> 4) | ((b & 0xf) << 4); 385 rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2); 386 rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1); 387 return (u8) rev; 388 } 389 390 /* 391 * XXX this is hack in attempt to carry flags bitfield 392 * throught endian village. ABI says: 393 * 394 * Bit-fields are allocated from right to left (least to most significant) 395 * on little-endian implementations and from left to right (most to least 396 * significant) on big-endian implementations. 397 * 398 * The above seems to be byte specific, so we need to reverse each 399 * byte of the bitfield. 'Internet' also says this might be implementation 400 * specific and we probably need proper fix and carry perf_event_attr 401 * bitfield flags in separate data file FEAT_ section. Thought this seems 402 * to work for now. 403 */ 404 static void swap_bitfield(u8 *p, unsigned len) 405 { 406 unsigned i; 407 408 for (i = 0; i < len; i++) { 409 *p = revbyte(*p); 410 p++; 411 } 412 } 413 414 /* exported for swapping attributes in file header */ 415 void perf_event__attr_swap(struct perf_event_attr *attr) 416 { 417 attr->type = bswap_32(attr->type); 418 attr->size = bswap_32(attr->size); 419 attr->config = bswap_64(attr->config); 420 attr->sample_period = bswap_64(attr->sample_period); 421 attr->sample_type = bswap_64(attr->sample_type); 422 attr->read_format = bswap_64(attr->read_format); 423 attr->wakeup_events = bswap_32(attr->wakeup_events); 424 attr->bp_type = bswap_32(attr->bp_type); 425 attr->bp_addr = bswap_64(attr->bp_addr); 426 attr->bp_len = bswap_64(attr->bp_len); 427 428 swap_bitfield((u8 *) (&attr->read_format + 1), sizeof(u64)); 429 } 430 431 static void perf_event__hdr_attr_swap(union perf_event *event, 432 bool sample_id_all __maybe_unused) 433 { 434 size_t size; 435 436 perf_event__attr_swap(&event->attr.attr); 437 438 size = event->header.size; 439 size -= (void *)&event->attr.id - (void *)event; 440 mem_bswap_64(event->attr.id, size); 441 } 442 443 static void perf_event__event_type_swap(union perf_event *event, 444 bool sample_id_all __maybe_unused) 445 { 446 event->event_type.event_type.event_id = 447 bswap_64(event->event_type.event_type.event_id); 448 } 449 450 static void perf_event__tracing_data_swap(union perf_event *event, 451 bool sample_id_all __maybe_unused) 452 { 453 event->tracing_data.size = bswap_32(event->tracing_data.size); 454 } 455 456 typedef void (*perf_event__swap_op)(union perf_event *event, 457 bool sample_id_all); 458 459 static perf_event__swap_op perf_event__swap_ops[] = { 460 [PERF_RECORD_MMAP] = perf_event__mmap_swap, 461 [PERF_RECORD_COMM] = perf_event__comm_swap, 462 [PERF_RECORD_FORK] = perf_event__task_swap, 463 [PERF_RECORD_EXIT] = perf_event__task_swap, 464 [PERF_RECORD_LOST] = perf_event__all64_swap, 465 [PERF_RECORD_READ] = perf_event__read_swap, 466 [PERF_RECORD_SAMPLE] = perf_event__all64_swap, 467 [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap, 468 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap, 469 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap, 470 [PERF_RECORD_HEADER_BUILD_ID] = NULL, 471 [PERF_RECORD_HEADER_MAX] = NULL, 472 }; 473 474 struct sample_queue { 475 u64 timestamp; 476 u64 file_offset; 477 union perf_event *event; 478 struct list_head list; 479 }; 480 481 static void perf_session_free_sample_buffers(struct perf_session *session) 482 { 483 struct ordered_samples *os = &session->ordered_samples; 484 485 while (!list_empty(&os->to_free)) { 486 struct sample_queue *sq; 487 488 sq = list_entry(os->to_free.next, struct sample_queue, list); 489 list_del(&sq->list); 490 free(sq); 491 } 492 } 493 494 static int perf_session_deliver_event(struct perf_session *session, 495 union perf_event *event, 496 struct perf_sample *sample, 497 struct perf_tool *tool, 498 u64 file_offset); 499 500 static int flush_sample_queue(struct perf_session *s, 501 struct perf_tool *tool) 502 { 503 struct ordered_samples *os = &s->ordered_samples; 504 struct list_head *head = &os->samples; 505 struct sample_queue *tmp, *iter; 506 struct perf_sample sample; 507 u64 limit = os->next_flush; 508 u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL; 509 unsigned idx = 0, progress_next = os->nr_samples / 16; 510 int ret; 511 512 if (!tool->ordered_samples || !limit) 513 return 0; 514 515 list_for_each_entry_safe(iter, tmp, head, list) { 516 if (iter->timestamp > limit) 517 break; 518 519 ret = perf_evlist__parse_sample(s->evlist, iter->event, &sample); 520 if (ret) 521 pr_err("Can't parse sample, err = %d\n", ret); 522 else { 523 ret = perf_session_deliver_event(s, iter->event, &sample, tool, 524 iter->file_offset); 525 if (ret) 526 return ret; 527 } 528 529 os->last_flush = iter->timestamp; 530 list_del(&iter->list); 531 list_add(&iter->list, &os->sample_cache); 532 if (++idx >= progress_next) { 533 progress_next += os->nr_samples / 16; 534 ui_progress__update(idx, os->nr_samples, 535 "Processing time ordered events..."); 536 } 537 } 538 539 if (list_empty(head)) { 540 os->last_sample = NULL; 541 } else if (last_ts <= limit) { 542 os->last_sample = 543 list_entry(head->prev, struct sample_queue, list); 544 } 545 546 os->nr_samples = 0; 547 548 return 0; 549 } 550 551 /* 552 * When perf record finishes a pass on every buffers, it records this pseudo 553 * event. 554 * We record the max timestamp t found in the pass n. 555 * Assuming these timestamps are monotonic across cpus, we know that if 556 * a buffer still has events with timestamps below t, they will be all 557 * available and then read in the pass n + 1. 558 * Hence when we start to read the pass n + 2, we can safely flush every 559 * events with timestamps below t. 560 * 561 * ============ PASS n ================= 562 * CPU 0 | CPU 1 563 * | 564 * cnt1 timestamps | cnt2 timestamps 565 * 1 | 2 566 * 2 | 3 567 * - | 4 <--- max recorded 568 * 569 * ============ PASS n + 1 ============== 570 * CPU 0 | CPU 1 571 * | 572 * cnt1 timestamps | cnt2 timestamps 573 * 3 | 5 574 * 4 | 6 575 * 5 | 7 <---- max recorded 576 * 577 * Flush every events below timestamp 4 578 * 579 * ============ PASS n + 2 ============== 580 * CPU 0 | CPU 1 581 * | 582 * cnt1 timestamps | cnt2 timestamps 583 * 6 | 8 584 * 7 | 9 585 * - | 10 586 * 587 * Flush every events below timestamp 7 588 * etc... 589 */ 590 static int process_finished_round(struct perf_tool *tool, 591 union perf_event *event __maybe_unused, 592 struct perf_session *session) 593 { 594 int ret = flush_sample_queue(session, tool); 595 if (!ret) 596 session->ordered_samples.next_flush = session->ordered_samples.max_timestamp; 597 598 return ret; 599 } 600 601 /* The queue is ordered by time */ 602 static void __queue_event(struct sample_queue *new, struct perf_session *s) 603 { 604 struct ordered_samples *os = &s->ordered_samples; 605 struct sample_queue *sample = os->last_sample; 606 u64 timestamp = new->timestamp; 607 struct list_head *p; 608 609 ++os->nr_samples; 610 os->last_sample = new; 611 612 if (!sample) { 613 list_add(&new->list, &os->samples); 614 os->max_timestamp = timestamp; 615 return; 616 } 617 618 /* 619 * last_sample might point to some random place in the list as it's 620 * the last queued event. We expect that the new event is close to 621 * this. 622 */ 623 if (sample->timestamp <= timestamp) { 624 while (sample->timestamp <= timestamp) { 625 p = sample->list.next; 626 if (p == &os->samples) { 627 list_add_tail(&new->list, &os->samples); 628 os->max_timestamp = timestamp; 629 return; 630 } 631 sample = list_entry(p, struct sample_queue, list); 632 } 633 list_add_tail(&new->list, &sample->list); 634 } else { 635 while (sample->timestamp > timestamp) { 636 p = sample->list.prev; 637 if (p == &os->samples) { 638 list_add(&new->list, &os->samples); 639 return; 640 } 641 sample = list_entry(p, struct sample_queue, list); 642 } 643 list_add(&new->list, &sample->list); 644 } 645 } 646 647 #define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct sample_queue)) 648 649 static int perf_session_queue_event(struct perf_session *s, union perf_event *event, 650 struct perf_sample *sample, u64 file_offset) 651 { 652 struct ordered_samples *os = &s->ordered_samples; 653 struct list_head *sc = &os->sample_cache; 654 u64 timestamp = sample->time; 655 struct sample_queue *new; 656 657 if (!timestamp || timestamp == ~0ULL) 658 return -ETIME; 659 660 if (timestamp < s->ordered_samples.last_flush) { 661 printf("Warning: Timestamp below last timeslice flush\n"); 662 return -EINVAL; 663 } 664 665 if (!list_empty(sc)) { 666 new = list_entry(sc->next, struct sample_queue, list); 667 list_del(&new->list); 668 } else if (os->sample_buffer) { 669 new = os->sample_buffer + os->sample_buffer_idx; 670 if (++os->sample_buffer_idx == MAX_SAMPLE_BUFFER) 671 os->sample_buffer = NULL; 672 } else { 673 os->sample_buffer = malloc(MAX_SAMPLE_BUFFER * sizeof(*new)); 674 if (!os->sample_buffer) 675 return -ENOMEM; 676 list_add(&os->sample_buffer->list, &os->to_free); 677 os->sample_buffer_idx = 2; 678 new = os->sample_buffer + 1; 679 } 680 681 new->timestamp = timestamp; 682 new->file_offset = file_offset; 683 new->event = event; 684 685 __queue_event(new, s); 686 687 return 0; 688 } 689 690 static void callchain__printf(struct perf_sample *sample) 691 { 692 unsigned int i; 693 694 printf("... chain: nr:%" PRIu64 "\n", sample->callchain->nr); 695 696 for (i = 0; i < sample->callchain->nr; i++) 697 printf("..... %2d: %016" PRIx64 "\n", 698 i, sample->callchain->ips[i]); 699 } 700 701 static void branch_stack__printf(struct perf_sample *sample) 702 { 703 uint64_t i; 704 705 printf("... branch stack: nr:%" PRIu64 "\n", sample->branch_stack->nr); 706 707 for (i = 0; i < sample->branch_stack->nr; i++) 708 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 "\n", 709 i, sample->branch_stack->entries[i].from, 710 sample->branch_stack->entries[i].to); 711 } 712 713 static void regs_dump__printf(u64 mask, u64 *regs) 714 { 715 unsigned rid, i = 0; 716 717 for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) { 718 u64 val = regs[i++]; 719 720 printf(".... %-5s 0x%" PRIx64 "\n", 721 perf_reg_name(rid), val); 722 } 723 } 724 725 static void regs_user__printf(struct perf_sample *sample, u64 mask) 726 { 727 struct regs_dump *user_regs = &sample->user_regs; 728 729 if (user_regs->regs) { 730 printf("... user regs: mask 0x%" PRIx64 "\n", mask); 731 regs_dump__printf(mask, user_regs->regs); 732 } 733 } 734 735 static void stack_user__printf(struct stack_dump *dump) 736 { 737 printf("... ustack: size %" PRIu64 ", offset 0x%x\n", 738 dump->size, dump->offset); 739 } 740 741 static void perf_session__print_tstamp(struct perf_session *session, 742 union perf_event *event, 743 struct perf_sample *sample) 744 { 745 u64 sample_type = perf_evlist__sample_type(session->evlist); 746 747 if (event->header.type != PERF_RECORD_SAMPLE && 748 !perf_evlist__sample_id_all(session->evlist)) { 749 fputs("-1 -1 ", stdout); 750 return; 751 } 752 753 if ((sample_type & PERF_SAMPLE_CPU)) 754 printf("%u ", sample->cpu); 755 756 if (sample_type & PERF_SAMPLE_TIME) 757 printf("%" PRIu64 " ", sample->time); 758 } 759 760 static void dump_event(struct perf_session *session, union perf_event *event, 761 u64 file_offset, struct perf_sample *sample) 762 { 763 if (!dump_trace) 764 return; 765 766 printf("\n%#" PRIx64 " [%#x]: event: %d\n", 767 file_offset, event->header.size, event->header.type); 768 769 trace_event(event); 770 771 if (sample) 772 perf_session__print_tstamp(session, event, sample); 773 774 printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset, 775 event->header.size, perf_event__name(event->header.type)); 776 } 777 778 static void dump_sample(struct perf_evsel *evsel, union perf_event *event, 779 struct perf_sample *sample) 780 { 781 u64 sample_type; 782 783 if (!dump_trace) 784 return; 785 786 printf("(IP, %d): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n", 787 event->header.misc, sample->pid, sample->tid, sample->ip, 788 sample->period, sample->addr); 789 790 sample_type = evsel->attr.sample_type; 791 792 if (sample_type & PERF_SAMPLE_CALLCHAIN) 793 callchain__printf(sample); 794 795 if (sample_type & PERF_SAMPLE_BRANCH_STACK) 796 branch_stack__printf(sample); 797 798 if (sample_type & PERF_SAMPLE_REGS_USER) 799 regs_user__printf(sample, evsel->attr.sample_regs_user); 800 801 if (sample_type & PERF_SAMPLE_STACK_USER) 802 stack_user__printf(&sample->user_stack); 803 } 804 805 static struct machine * 806 perf_session__find_machine_for_cpumode(struct perf_session *session, 807 union perf_event *event) 808 { 809 const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 810 811 if (perf_guest && 812 ((cpumode == PERF_RECORD_MISC_GUEST_KERNEL) || 813 (cpumode == PERF_RECORD_MISC_GUEST_USER))) { 814 u32 pid; 815 816 if (event->header.type == PERF_RECORD_MMAP) 817 pid = event->mmap.pid; 818 else 819 pid = event->ip.pid; 820 821 return perf_session__findnew_machine(session, pid); 822 } 823 824 return &session->machines.host; 825 } 826 827 static int perf_session_deliver_event(struct perf_session *session, 828 union perf_event *event, 829 struct perf_sample *sample, 830 struct perf_tool *tool, 831 u64 file_offset) 832 { 833 struct perf_evsel *evsel; 834 struct machine *machine; 835 836 dump_event(session, event, file_offset, sample); 837 838 evsel = perf_evlist__id2evsel(session->evlist, sample->id); 839 if (evsel != NULL && event->header.type != PERF_RECORD_SAMPLE) { 840 /* 841 * XXX We're leaving PERF_RECORD_SAMPLE unnacounted here 842 * because the tools right now may apply filters, discarding 843 * some of the samples. For consistency, in the future we 844 * should have something like nr_filtered_samples and remove 845 * the sample->period from total_sample_period, etc, KISS for 846 * now tho. 847 * 848 * Also testing against NULL allows us to handle files without 849 * attr.sample_id_all and/or without PERF_SAMPLE_ID. In the 850 * future probably it'll be a good idea to restrict event 851 * processing via perf_session to files with both set. 852 */ 853 hists__inc_nr_events(&evsel->hists, event->header.type); 854 } 855 856 machine = perf_session__find_machine_for_cpumode(session, event); 857 858 switch (event->header.type) { 859 case PERF_RECORD_SAMPLE: 860 dump_sample(evsel, event, sample); 861 if (evsel == NULL) { 862 ++session->stats.nr_unknown_id; 863 return 0; 864 } 865 if (machine == NULL) { 866 ++session->stats.nr_unprocessable_samples; 867 return 0; 868 } 869 return tool->sample(tool, event, sample, evsel, machine); 870 case PERF_RECORD_MMAP: 871 return tool->mmap(tool, event, sample, machine); 872 case PERF_RECORD_COMM: 873 return tool->comm(tool, event, sample, machine); 874 case PERF_RECORD_FORK: 875 return tool->fork(tool, event, sample, machine); 876 case PERF_RECORD_EXIT: 877 return tool->exit(tool, event, sample, machine); 878 case PERF_RECORD_LOST: 879 if (tool->lost == perf_event__process_lost) 880 session->stats.total_lost += event->lost.lost; 881 return tool->lost(tool, event, sample, machine); 882 case PERF_RECORD_READ: 883 return tool->read(tool, event, sample, evsel, machine); 884 case PERF_RECORD_THROTTLE: 885 return tool->throttle(tool, event, sample, machine); 886 case PERF_RECORD_UNTHROTTLE: 887 return tool->unthrottle(tool, event, sample, machine); 888 default: 889 ++session->stats.nr_unknown_events; 890 return -1; 891 } 892 } 893 894 static int perf_session__preprocess_sample(struct perf_session *session, 895 union perf_event *event, struct perf_sample *sample) 896 { 897 if (event->header.type != PERF_RECORD_SAMPLE || 898 !(perf_evlist__sample_type(session->evlist) & PERF_SAMPLE_CALLCHAIN)) 899 return 0; 900 901 if (!ip_callchain__valid(sample->callchain, event)) { 902 pr_debug("call-chain problem with event, skipping it.\n"); 903 ++session->stats.nr_invalid_chains; 904 session->stats.total_invalid_chains += sample->period; 905 return -EINVAL; 906 } 907 return 0; 908 } 909 910 static int perf_session__process_user_event(struct perf_session *session, union perf_event *event, 911 struct perf_tool *tool, u64 file_offset) 912 { 913 int err; 914 915 dump_event(session, event, file_offset, NULL); 916 917 /* These events are processed right away */ 918 switch (event->header.type) { 919 case PERF_RECORD_HEADER_ATTR: 920 err = tool->attr(event, &session->evlist); 921 if (err == 0) 922 perf_session__set_id_hdr_size(session); 923 return err; 924 case PERF_RECORD_HEADER_EVENT_TYPE: 925 return tool->event_type(tool, event); 926 case PERF_RECORD_HEADER_TRACING_DATA: 927 /* setup for reading amidst mmap */ 928 lseek(session->fd, file_offset, SEEK_SET); 929 return tool->tracing_data(event, session); 930 case PERF_RECORD_HEADER_BUILD_ID: 931 return tool->build_id(tool, event, session); 932 case PERF_RECORD_FINISHED_ROUND: 933 return tool->finished_round(tool, event, session); 934 default: 935 return -EINVAL; 936 } 937 } 938 939 static void event_swap(union perf_event *event, bool sample_id_all) 940 { 941 perf_event__swap_op swap; 942 943 swap = perf_event__swap_ops[event->header.type]; 944 if (swap) 945 swap(event, sample_id_all); 946 } 947 948 static int perf_session__process_event(struct perf_session *session, 949 union perf_event *event, 950 struct perf_tool *tool, 951 u64 file_offset) 952 { 953 struct perf_sample sample; 954 int ret; 955 956 if (session->header.needs_swap) 957 event_swap(event, perf_evlist__sample_id_all(session->evlist)); 958 959 if (event->header.type >= PERF_RECORD_HEADER_MAX) 960 return -EINVAL; 961 962 events_stats__inc(&session->stats, event->header.type); 963 964 if (event->header.type >= PERF_RECORD_USER_TYPE_START) 965 return perf_session__process_user_event(session, event, tool, file_offset); 966 967 /* 968 * For all kernel events we get the sample data 969 */ 970 ret = perf_evlist__parse_sample(session->evlist, event, &sample); 971 if (ret) 972 return ret; 973 974 /* Preprocess sample records - precheck callchains */ 975 if (perf_session__preprocess_sample(session, event, &sample)) 976 return 0; 977 978 if (tool->ordered_samples) { 979 ret = perf_session_queue_event(session, event, &sample, 980 file_offset); 981 if (ret != -ETIME) 982 return ret; 983 } 984 985 return perf_session_deliver_event(session, event, &sample, tool, 986 file_offset); 987 } 988 989 void perf_event_header__bswap(struct perf_event_header *self) 990 { 991 self->type = bswap_32(self->type); 992 self->misc = bswap_16(self->misc); 993 self->size = bswap_16(self->size); 994 } 995 996 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid) 997 { 998 return machine__findnew_thread(&session->machines.host, pid); 999 } 1000 1001 static struct thread *perf_session__register_idle_thread(struct perf_session *self) 1002 { 1003 struct thread *thread = perf_session__findnew(self, 0); 1004 1005 if (thread == NULL || thread__set_comm(thread, "swapper")) { 1006 pr_err("problem inserting idle task.\n"); 1007 thread = NULL; 1008 } 1009 1010 return thread; 1011 } 1012 1013 static void perf_session__warn_about_errors(const struct perf_session *session, 1014 const struct perf_tool *tool) 1015 { 1016 if (tool->lost == perf_event__process_lost && 1017 session->stats.nr_events[PERF_RECORD_LOST] != 0) { 1018 ui__warning("Processed %d events and lost %d chunks!\n\n" 1019 "Check IO/CPU overload!\n\n", 1020 session->stats.nr_events[0], 1021 session->stats.nr_events[PERF_RECORD_LOST]); 1022 } 1023 1024 if (session->stats.nr_unknown_events != 0) { 1025 ui__warning("Found %u unknown events!\n\n" 1026 "Is this an older tool processing a perf.data " 1027 "file generated by a more recent tool?\n\n" 1028 "If that is not the case, consider " 1029 "reporting to linux-kernel@vger.kernel.org.\n\n", 1030 session->stats.nr_unknown_events); 1031 } 1032 1033 if (session->stats.nr_unknown_id != 0) { 1034 ui__warning("%u samples with id not present in the header\n", 1035 session->stats.nr_unknown_id); 1036 } 1037 1038 if (session->stats.nr_invalid_chains != 0) { 1039 ui__warning("Found invalid callchains!\n\n" 1040 "%u out of %u events were discarded for this reason.\n\n" 1041 "Consider reporting to linux-kernel@vger.kernel.org.\n\n", 1042 session->stats.nr_invalid_chains, 1043 session->stats.nr_events[PERF_RECORD_SAMPLE]); 1044 } 1045 1046 if (session->stats.nr_unprocessable_samples != 0) { 1047 ui__warning("%u unprocessable samples recorded.\n" 1048 "Do you have a KVM guest running and not using 'perf kvm'?\n", 1049 session->stats.nr_unprocessable_samples); 1050 } 1051 } 1052 1053 #define session_done() (*(volatile int *)(&session_done)) 1054 volatile int session_done; 1055 1056 static int __perf_session__process_pipe_events(struct perf_session *self, 1057 struct perf_tool *tool) 1058 { 1059 union perf_event *event; 1060 uint32_t size, cur_size = 0; 1061 void *buf = NULL; 1062 int skip = 0; 1063 u64 head; 1064 int err; 1065 void *p; 1066 1067 perf_tool__fill_defaults(tool); 1068 1069 head = 0; 1070 cur_size = sizeof(union perf_event); 1071 1072 buf = malloc(cur_size); 1073 if (!buf) 1074 return -errno; 1075 more: 1076 event = buf; 1077 err = readn(self->fd, event, sizeof(struct perf_event_header)); 1078 if (err <= 0) { 1079 if (err == 0) 1080 goto done; 1081 1082 pr_err("failed to read event header\n"); 1083 goto out_err; 1084 } 1085 1086 if (self->header.needs_swap) 1087 perf_event_header__bswap(&event->header); 1088 1089 size = event->header.size; 1090 if (size == 0) 1091 size = 8; 1092 1093 if (size > cur_size) { 1094 void *new = realloc(buf, size); 1095 if (!new) { 1096 pr_err("failed to allocate memory to read event\n"); 1097 goto out_err; 1098 } 1099 buf = new; 1100 cur_size = size; 1101 event = buf; 1102 } 1103 p = event; 1104 p += sizeof(struct perf_event_header); 1105 1106 if (size - sizeof(struct perf_event_header)) { 1107 err = readn(self->fd, p, size - sizeof(struct perf_event_header)); 1108 if (err <= 0) { 1109 if (err == 0) { 1110 pr_err("unexpected end of event stream\n"); 1111 goto done; 1112 } 1113 1114 pr_err("failed to read event data\n"); 1115 goto out_err; 1116 } 1117 } 1118 1119 if ((skip = perf_session__process_event(self, event, tool, head)) < 0) { 1120 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n", 1121 head, event->header.size, event->header.type); 1122 err = -EINVAL; 1123 goto out_err; 1124 } 1125 1126 head += size; 1127 1128 if (skip > 0) 1129 head += skip; 1130 1131 if (!session_done()) 1132 goto more; 1133 done: 1134 err = 0; 1135 out_err: 1136 free(buf); 1137 perf_session__warn_about_errors(self, tool); 1138 perf_session_free_sample_buffers(self); 1139 return err; 1140 } 1141 1142 static union perf_event * 1143 fetch_mmaped_event(struct perf_session *session, 1144 u64 head, size_t mmap_size, char *buf) 1145 { 1146 union perf_event *event; 1147 1148 /* 1149 * Ensure we have enough space remaining to read 1150 * the size of the event in the headers. 1151 */ 1152 if (head + sizeof(event->header) > mmap_size) 1153 return NULL; 1154 1155 event = (union perf_event *)(buf + head); 1156 1157 if (session->header.needs_swap) 1158 perf_event_header__bswap(&event->header); 1159 1160 if (head + event->header.size > mmap_size) 1161 return NULL; 1162 1163 return event; 1164 } 1165 1166 /* 1167 * On 64bit we can mmap the data file in one go. No need for tiny mmap 1168 * slices. On 32bit we use 32MB. 1169 */ 1170 #if BITS_PER_LONG == 64 1171 #define MMAP_SIZE ULLONG_MAX 1172 #define NUM_MMAPS 1 1173 #else 1174 #define MMAP_SIZE (32 * 1024 * 1024ULL) 1175 #define NUM_MMAPS 128 1176 #endif 1177 1178 int __perf_session__process_events(struct perf_session *session, 1179 u64 data_offset, u64 data_size, 1180 u64 file_size, struct perf_tool *tool) 1181 { 1182 u64 head, page_offset, file_offset, file_pos, progress_next; 1183 int err, mmap_prot, mmap_flags, map_idx = 0; 1184 size_t mmap_size; 1185 char *buf, *mmaps[NUM_MMAPS]; 1186 union perf_event *event; 1187 uint32_t size; 1188 1189 perf_tool__fill_defaults(tool); 1190 1191 page_offset = page_size * (data_offset / page_size); 1192 file_offset = page_offset; 1193 head = data_offset - page_offset; 1194 1195 if (data_offset + data_size < file_size) 1196 file_size = data_offset + data_size; 1197 1198 progress_next = file_size / 16; 1199 1200 mmap_size = MMAP_SIZE; 1201 if (mmap_size > file_size) 1202 mmap_size = file_size; 1203 1204 memset(mmaps, 0, sizeof(mmaps)); 1205 1206 mmap_prot = PROT_READ; 1207 mmap_flags = MAP_SHARED; 1208 1209 if (session->header.needs_swap) { 1210 mmap_prot |= PROT_WRITE; 1211 mmap_flags = MAP_PRIVATE; 1212 } 1213 remap: 1214 buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, session->fd, 1215 file_offset); 1216 if (buf == MAP_FAILED) { 1217 pr_err("failed to mmap file\n"); 1218 err = -errno; 1219 goto out_err; 1220 } 1221 mmaps[map_idx] = buf; 1222 map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1); 1223 file_pos = file_offset + head; 1224 1225 more: 1226 event = fetch_mmaped_event(session, head, mmap_size, buf); 1227 if (!event) { 1228 if (mmaps[map_idx]) { 1229 munmap(mmaps[map_idx], mmap_size); 1230 mmaps[map_idx] = NULL; 1231 } 1232 1233 page_offset = page_size * (head / page_size); 1234 file_offset += page_offset; 1235 head -= page_offset; 1236 goto remap; 1237 } 1238 1239 size = event->header.size; 1240 1241 if (size == 0 || 1242 perf_session__process_event(session, event, tool, file_pos) < 0) { 1243 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n", 1244 file_offset + head, event->header.size, 1245 event->header.type); 1246 err = -EINVAL; 1247 goto out_err; 1248 } 1249 1250 head += size; 1251 file_pos += size; 1252 1253 if (file_pos >= progress_next) { 1254 progress_next += file_size / 16; 1255 ui_progress__update(file_pos, file_size, 1256 "Processing events..."); 1257 } 1258 1259 if (file_pos < file_size) 1260 goto more; 1261 1262 err = 0; 1263 /* do the final flush for ordered samples */ 1264 session->ordered_samples.next_flush = ULLONG_MAX; 1265 err = flush_sample_queue(session, tool); 1266 out_err: 1267 ui_progress__finish(); 1268 perf_session__warn_about_errors(session, tool); 1269 perf_session_free_sample_buffers(session); 1270 return err; 1271 } 1272 1273 int perf_session__process_events(struct perf_session *self, 1274 struct perf_tool *tool) 1275 { 1276 int err; 1277 1278 if (perf_session__register_idle_thread(self) == NULL) 1279 return -ENOMEM; 1280 1281 if (!self->fd_pipe) 1282 err = __perf_session__process_events(self, 1283 self->header.data_offset, 1284 self->header.data_size, 1285 self->size, tool); 1286 else 1287 err = __perf_session__process_pipe_events(self, tool); 1288 1289 return err; 1290 } 1291 1292 bool perf_session__has_traces(struct perf_session *session, const char *msg) 1293 { 1294 if (!(perf_evlist__sample_type(session->evlist) & PERF_SAMPLE_RAW)) { 1295 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg); 1296 return false; 1297 } 1298 1299 return true; 1300 } 1301 1302 int maps__set_kallsyms_ref_reloc_sym(struct map **maps, 1303 const char *symbol_name, u64 addr) 1304 { 1305 char *bracket; 1306 enum map_type i; 1307 struct ref_reloc_sym *ref; 1308 1309 ref = zalloc(sizeof(struct ref_reloc_sym)); 1310 if (ref == NULL) 1311 return -ENOMEM; 1312 1313 ref->name = strdup(symbol_name); 1314 if (ref->name == NULL) { 1315 free(ref); 1316 return -ENOMEM; 1317 } 1318 1319 bracket = strchr(ref->name, ']'); 1320 if (bracket) 1321 *bracket = '\0'; 1322 1323 ref->addr = addr; 1324 1325 for (i = 0; i < MAP__NR_TYPES; ++i) { 1326 struct kmap *kmap = map__kmap(maps[i]); 1327 kmap->ref_reloc_sym = ref; 1328 } 1329 1330 return 0; 1331 } 1332 1333 size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp) 1334 { 1335 return machines__fprintf_dsos(&self->machines, fp); 1336 } 1337 1338 size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp, 1339 bool (skip)(struct dso *dso, int parm), int parm) 1340 { 1341 return machines__fprintf_dsos_buildid(&self->machines, fp, skip, parm); 1342 } 1343 1344 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp) 1345 { 1346 struct perf_evsel *pos; 1347 size_t ret = fprintf(fp, "Aggregated stats:\n"); 1348 1349 ret += events_stats__fprintf(&session->stats, fp); 1350 1351 list_for_each_entry(pos, &session->evlist->entries, node) { 1352 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos)); 1353 ret += events_stats__fprintf(&pos->hists.stats, fp); 1354 } 1355 1356 return ret; 1357 } 1358 1359 size_t perf_session__fprintf(struct perf_session *session, FILE *fp) 1360 { 1361 /* 1362 * FIXME: Here we have to actually print all the machines in this 1363 * session, not just the host... 1364 */ 1365 return machine__fprintf(&session->machines.host, fp); 1366 } 1367 1368 void perf_session__remove_thread(struct perf_session *session, 1369 struct thread *th) 1370 { 1371 /* 1372 * FIXME: This one makes no sense, we need to remove the thread from 1373 * the machine it belongs to, perf_session can have many machines, so 1374 * doing it always on ->machines.host is wrong. Fix when auditing all 1375 * the 'perf kvm' code. 1376 */ 1377 machine__remove_thread(&session->machines.host, th); 1378 } 1379 1380 struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, 1381 unsigned int type) 1382 { 1383 struct perf_evsel *pos; 1384 1385 list_for_each_entry(pos, &session->evlist->entries, node) { 1386 if (pos->attr.type == type) 1387 return pos; 1388 } 1389 return NULL; 1390 } 1391 1392 void perf_evsel__print_ip(struct perf_evsel *evsel, union perf_event *event, 1393 struct perf_sample *sample, struct machine *machine, 1394 int print_sym, int print_dso, int print_symoffset) 1395 { 1396 struct addr_location al; 1397 struct callchain_cursor_node *node; 1398 1399 if (perf_event__preprocess_sample(event, machine, &al, sample, 1400 NULL) < 0) { 1401 error("problem processing %d event, skipping it.\n", 1402 event->header.type); 1403 return; 1404 } 1405 1406 if (symbol_conf.use_callchain && sample->callchain) { 1407 1408 1409 if (machine__resolve_callchain(machine, evsel, al.thread, 1410 sample, NULL) != 0) { 1411 if (verbose) 1412 error("Failed to resolve callchain. Skipping\n"); 1413 return; 1414 } 1415 callchain_cursor_commit(&callchain_cursor); 1416 1417 while (1) { 1418 node = callchain_cursor_current(&callchain_cursor); 1419 if (!node) 1420 break; 1421 1422 printf("\t%16" PRIx64, node->ip); 1423 if (print_sym) { 1424 printf(" "); 1425 symbol__fprintf_symname(node->sym, stdout); 1426 } 1427 if (print_dso) { 1428 printf(" ("); 1429 map__fprintf_dsoname(node->map, stdout); 1430 printf(")"); 1431 } 1432 printf("\n"); 1433 1434 callchain_cursor_advance(&callchain_cursor); 1435 } 1436 1437 } else { 1438 printf("%16" PRIx64, sample->ip); 1439 if (print_sym) { 1440 printf(" "); 1441 if (print_symoffset) 1442 symbol__fprintf_symname_offs(al.sym, &al, 1443 stdout); 1444 else 1445 symbol__fprintf_symname(al.sym, stdout); 1446 } 1447 1448 if (print_dso) { 1449 printf(" ("); 1450 map__fprintf_dsoname(al.map, stdout); 1451 printf(")"); 1452 } 1453 } 1454 } 1455 1456 int perf_session__cpu_bitmap(struct perf_session *session, 1457 const char *cpu_list, unsigned long *cpu_bitmap) 1458 { 1459 int i; 1460 struct cpu_map *map; 1461 1462 for (i = 0; i < PERF_TYPE_MAX; ++i) { 1463 struct perf_evsel *evsel; 1464 1465 evsel = perf_session__find_first_evtype(session, i); 1466 if (!evsel) 1467 continue; 1468 1469 if (!(evsel->attr.sample_type & PERF_SAMPLE_CPU)) { 1470 pr_err("File does not contain CPU events. " 1471 "Remove -c option to proceed.\n"); 1472 return -1; 1473 } 1474 } 1475 1476 map = cpu_map__new(cpu_list); 1477 if (map == NULL) { 1478 pr_err("Invalid cpu_list\n"); 1479 return -1; 1480 } 1481 1482 for (i = 0; i < map->nr; i++) { 1483 int cpu = map->map[i]; 1484 1485 if (cpu >= MAX_NR_CPUS) { 1486 pr_err("Requested CPU %d too large. " 1487 "Consider raising MAX_NR_CPUS\n", cpu); 1488 return -1; 1489 } 1490 1491 set_bit(cpu, cpu_bitmap); 1492 } 1493 1494 return 0; 1495 } 1496 1497 void perf_session__fprintf_info(struct perf_session *session, FILE *fp, 1498 bool full) 1499 { 1500 struct stat st; 1501 int ret; 1502 1503 if (session == NULL || fp == NULL) 1504 return; 1505 1506 ret = fstat(session->fd, &st); 1507 if (ret == -1) 1508 return; 1509 1510 fprintf(fp, "# ========\n"); 1511 fprintf(fp, "# captured on: %s", ctime(&st.st_ctime)); 1512 perf_header__fprintf_info(session, fp, full); 1513 fprintf(fp, "# ========\n#\n"); 1514 } 1515 1516 1517 int __perf_session__set_tracepoints_handlers(struct perf_session *session, 1518 const struct perf_evsel_str_handler *assocs, 1519 size_t nr_assocs) 1520 { 1521 struct perf_evlist *evlist = session->evlist; 1522 struct event_format *format; 1523 struct perf_evsel *evsel; 1524 char *tracepoint, *name; 1525 size_t i; 1526 int err; 1527 1528 for (i = 0; i < nr_assocs; i++) { 1529 err = -ENOMEM; 1530 tracepoint = strdup(assocs[i].name); 1531 if (tracepoint == NULL) 1532 goto out; 1533 1534 err = -ENOENT; 1535 name = strchr(tracepoint, ':'); 1536 if (name == NULL) 1537 goto out_free; 1538 1539 *name++ = '\0'; 1540 format = pevent_find_event_by_name(session->pevent, 1541 tracepoint, name); 1542 if (format == NULL) { 1543 /* 1544 * Adding a handler for an event not in the session, 1545 * just ignore it. 1546 */ 1547 goto next; 1548 } 1549 1550 evsel = perf_evlist__find_tracepoint_by_id(evlist, format->id); 1551 if (evsel == NULL) 1552 goto next; 1553 1554 err = -EEXIST; 1555 if (evsel->handler.func != NULL) 1556 goto out_free; 1557 evsel->handler.func = assocs[i].handler; 1558 next: 1559 free(tracepoint); 1560 } 1561 1562 err = 0; 1563 out: 1564 return err; 1565 1566 out_free: 1567 free(tracepoint); 1568 goto out; 1569 } 1570