1 /* 2 * builtin-timechart.c - make an svg timechart of system activity 3 * 4 * (C) Copyright 2009 Intel Corporation 5 * 6 * Authors: 7 * Arjan van de Ven <arjan@linux.intel.com> 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * as published by the Free Software Foundation; version 2 12 * of the License. 13 */ 14 15 #include "builtin.h" 16 17 #include "util/util.h" 18 19 #include "util/color.h" 20 #include <linux/list.h> 21 #include "util/cache.h" 22 #include <linux/rbtree.h> 23 #include "util/symbol.h" 24 #include "util/string.h" 25 #include "util/callchain.h" 26 #include "util/strlist.h" 27 28 #include "perf.h" 29 #include "util/header.h" 30 #include "util/parse-options.h" 31 #include "util/parse-events.h" 32 #include "util/svghelper.h" 33 34 static char const *input_name = "perf.data"; 35 static char const *output_name = "output.svg"; 36 37 38 static unsigned long page_size; 39 static unsigned long mmap_window = 32; 40 static u64 sample_type; 41 42 static unsigned int numcpus; 43 static u64 min_freq; /* Lowest CPU frequency seen */ 44 static u64 max_freq; /* Highest CPU frequency seen */ 45 static u64 turbo_frequency; 46 47 static u64 first_time, last_time; 48 49 50 static struct perf_header *header; 51 52 struct per_pid; 53 struct per_pidcomm; 54 55 struct cpu_sample; 56 struct power_event; 57 struct wake_event; 58 59 struct sample_wrapper; 60 61 /* 62 * Datastructure layout: 63 * We keep an list of "pid"s, matching the kernels notion of a task struct. 64 * Each "pid" entry, has a list of "comm"s. 65 * this is because we want to track different programs different, while 66 * exec will reuse the original pid (by design). 67 * Each comm has a list of samples that will be used to draw 68 * final graph. 69 */ 70 71 struct per_pid { 72 struct per_pid *next; 73 74 int pid; 75 int ppid; 76 77 u64 start_time; 78 u64 end_time; 79 u64 total_time; 80 int display; 81 82 struct per_pidcomm *all; 83 struct per_pidcomm *current; 84 85 int painted; 86 }; 87 88 89 struct per_pidcomm { 90 struct per_pidcomm *next; 91 92 u64 start_time; 93 u64 end_time; 94 u64 total_time; 95 96 int Y; 97 int display; 98 99 long state; 100 u64 state_since; 101 102 char *comm; 103 104 struct cpu_sample *samples; 105 }; 106 107 struct sample_wrapper { 108 struct sample_wrapper *next; 109 110 u64 timestamp; 111 unsigned char data[0]; 112 }; 113 114 #define TYPE_NONE 0 115 #define TYPE_RUNNING 1 116 #define TYPE_WAITING 2 117 #define TYPE_BLOCKED 3 118 119 struct cpu_sample { 120 struct cpu_sample *next; 121 122 u64 start_time; 123 u64 end_time; 124 int type; 125 int cpu; 126 }; 127 128 static struct per_pid *all_data; 129 130 #define CSTATE 1 131 #define PSTATE 2 132 133 struct power_event { 134 struct power_event *next; 135 int type; 136 int state; 137 u64 start_time; 138 u64 end_time; 139 int cpu; 140 }; 141 142 struct wake_event { 143 struct wake_event *next; 144 int waker; 145 int wakee; 146 u64 time; 147 }; 148 149 static struct power_event *power_events; 150 static struct wake_event *wake_events; 151 152 struct sample_wrapper *all_samples; 153 154 static struct per_pid *find_create_pid(int pid) 155 { 156 struct per_pid *cursor = all_data; 157 158 while (cursor) { 159 if (cursor->pid == pid) 160 return cursor; 161 cursor = cursor->next; 162 } 163 cursor = malloc(sizeof(struct per_pid)); 164 assert(cursor != NULL); 165 memset(cursor, 0, sizeof(struct per_pid)); 166 cursor->pid = pid; 167 cursor->next = all_data; 168 all_data = cursor; 169 return cursor; 170 } 171 172 static void pid_set_comm(int pid, char *comm) 173 { 174 struct per_pid *p; 175 struct per_pidcomm *c; 176 p = find_create_pid(pid); 177 c = p->all; 178 while (c) { 179 if (c->comm && strcmp(c->comm, comm) == 0) { 180 p->current = c; 181 return; 182 } 183 if (!c->comm) { 184 c->comm = strdup(comm); 185 p->current = c; 186 return; 187 } 188 c = c->next; 189 } 190 c = malloc(sizeof(struct per_pidcomm)); 191 assert(c != NULL); 192 memset(c, 0, sizeof(struct per_pidcomm)); 193 c->comm = strdup(comm); 194 p->current = c; 195 c->next = p->all; 196 p->all = c; 197 } 198 199 static void pid_fork(int pid, int ppid, u64 timestamp) 200 { 201 struct per_pid *p, *pp; 202 p = find_create_pid(pid); 203 pp = find_create_pid(ppid); 204 p->ppid = ppid; 205 if (pp->current && pp->current->comm && !p->current) 206 pid_set_comm(pid, pp->current->comm); 207 208 p->start_time = timestamp; 209 if (p->current) { 210 p->current->start_time = timestamp; 211 p->current->state_since = timestamp; 212 } 213 } 214 215 static void pid_exit(int pid, u64 timestamp) 216 { 217 struct per_pid *p; 218 p = find_create_pid(pid); 219 p->end_time = timestamp; 220 if (p->current) 221 p->current->end_time = timestamp; 222 } 223 224 static void 225 pid_put_sample(int pid, int type, unsigned int cpu, u64 start, u64 end) 226 { 227 struct per_pid *p; 228 struct per_pidcomm *c; 229 struct cpu_sample *sample; 230 231 p = find_create_pid(pid); 232 c = p->current; 233 if (!c) { 234 c = malloc(sizeof(struct per_pidcomm)); 235 assert(c != NULL); 236 memset(c, 0, sizeof(struct per_pidcomm)); 237 p->current = c; 238 c->next = p->all; 239 p->all = c; 240 } 241 242 sample = malloc(sizeof(struct cpu_sample)); 243 assert(sample != NULL); 244 memset(sample, 0, sizeof(struct cpu_sample)); 245 sample->start_time = start; 246 sample->end_time = end; 247 sample->type = type; 248 sample->next = c->samples; 249 sample->cpu = cpu; 250 c->samples = sample; 251 252 if (sample->type == TYPE_RUNNING && end > start && start > 0) { 253 c->total_time += (end-start); 254 p->total_time += (end-start); 255 } 256 257 if (c->start_time == 0 || c->start_time > start) 258 c->start_time = start; 259 if (p->start_time == 0 || p->start_time > start) 260 p->start_time = start; 261 262 if (cpu > numcpus) 263 numcpus = cpu; 264 } 265 266 #define MAX_CPUS 4096 267 268 static u64 cpus_cstate_start_times[MAX_CPUS]; 269 static int cpus_cstate_state[MAX_CPUS]; 270 static u64 cpus_pstate_start_times[MAX_CPUS]; 271 static u64 cpus_pstate_state[MAX_CPUS]; 272 273 static int 274 process_comm_event(event_t *event) 275 { 276 pid_set_comm(event->comm.pid, event->comm.comm); 277 return 0; 278 } 279 static int 280 process_fork_event(event_t *event) 281 { 282 pid_fork(event->fork.pid, event->fork.ppid, event->fork.time); 283 return 0; 284 } 285 286 static int 287 process_exit_event(event_t *event) 288 { 289 pid_exit(event->fork.pid, event->fork.time); 290 return 0; 291 } 292 293 struct trace_entry { 294 u32 size; 295 unsigned short type; 296 unsigned char flags; 297 unsigned char preempt_count; 298 int pid; 299 int tgid; 300 }; 301 302 struct power_entry { 303 struct trace_entry te; 304 s64 type; 305 s64 value; 306 }; 307 308 #define TASK_COMM_LEN 16 309 struct wakeup_entry { 310 struct trace_entry te; 311 char comm[TASK_COMM_LEN]; 312 int pid; 313 int prio; 314 int success; 315 }; 316 317 /* 318 * trace_flag_type is an enumeration that holds different 319 * states when a trace occurs. These are: 320 * IRQS_OFF - interrupts were disabled 321 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags 322 * NEED_RESCED - reschedule is requested 323 * HARDIRQ - inside an interrupt handler 324 * SOFTIRQ - inside a softirq handler 325 */ 326 enum trace_flag_type { 327 TRACE_FLAG_IRQS_OFF = 0x01, 328 TRACE_FLAG_IRQS_NOSUPPORT = 0x02, 329 TRACE_FLAG_NEED_RESCHED = 0x04, 330 TRACE_FLAG_HARDIRQ = 0x08, 331 TRACE_FLAG_SOFTIRQ = 0x10, 332 }; 333 334 335 336 struct sched_switch { 337 struct trace_entry te; 338 char prev_comm[TASK_COMM_LEN]; 339 int prev_pid; 340 int prev_prio; 341 long prev_state; /* Arjan weeps. */ 342 char next_comm[TASK_COMM_LEN]; 343 int next_pid; 344 int next_prio; 345 }; 346 347 static void c_state_start(int cpu, u64 timestamp, int state) 348 { 349 cpus_cstate_start_times[cpu] = timestamp; 350 cpus_cstate_state[cpu] = state; 351 } 352 353 static void c_state_end(int cpu, u64 timestamp) 354 { 355 struct power_event *pwr; 356 pwr = malloc(sizeof(struct power_event)); 357 if (!pwr) 358 return; 359 memset(pwr, 0, sizeof(struct power_event)); 360 361 pwr->state = cpus_cstate_state[cpu]; 362 pwr->start_time = cpus_cstate_start_times[cpu]; 363 pwr->end_time = timestamp; 364 pwr->cpu = cpu; 365 pwr->type = CSTATE; 366 pwr->next = power_events; 367 368 power_events = pwr; 369 } 370 371 static void p_state_change(int cpu, u64 timestamp, u64 new_freq) 372 { 373 struct power_event *pwr; 374 pwr = malloc(sizeof(struct power_event)); 375 376 if (new_freq > 8000000) /* detect invalid data */ 377 return; 378 379 if (!pwr) 380 return; 381 memset(pwr, 0, sizeof(struct power_event)); 382 383 pwr->state = cpus_pstate_state[cpu]; 384 pwr->start_time = cpus_pstate_start_times[cpu]; 385 pwr->end_time = timestamp; 386 pwr->cpu = cpu; 387 pwr->type = PSTATE; 388 pwr->next = power_events; 389 390 if (!pwr->start_time) 391 pwr->start_time = first_time; 392 393 power_events = pwr; 394 395 cpus_pstate_state[cpu] = new_freq; 396 cpus_pstate_start_times[cpu] = timestamp; 397 398 if ((u64)new_freq > max_freq) 399 max_freq = new_freq; 400 401 if (new_freq < min_freq || min_freq == 0) 402 min_freq = new_freq; 403 404 if (new_freq == max_freq - 1000) 405 turbo_frequency = max_freq; 406 } 407 408 static void 409 sched_wakeup(int cpu, u64 timestamp, int pid, struct trace_entry *te) 410 { 411 struct wake_event *we; 412 struct per_pid *p; 413 struct wakeup_entry *wake = (void *)te; 414 415 we = malloc(sizeof(struct wake_event)); 416 if (!we) 417 return; 418 419 memset(we, 0, sizeof(struct wake_event)); 420 we->time = timestamp; 421 we->waker = pid; 422 423 if ((te->flags & TRACE_FLAG_HARDIRQ) || (te->flags & TRACE_FLAG_SOFTIRQ)) 424 we->waker = -1; 425 426 we->wakee = wake->pid; 427 we->next = wake_events; 428 wake_events = we; 429 p = find_create_pid(we->wakee); 430 431 if (p && p->current && p->current->state == TYPE_NONE) { 432 p->current->state_since = timestamp; 433 p->current->state = TYPE_WAITING; 434 } 435 if (p && p->current && p->current->state == TYPE_BLOCKED) { 436 pid_put_sample(p->pid, p->current->state, cpu, p->current->state_since, timestamp); 437 p->current->state_since = timestamp; 438 p->current->state = TYPE_WAITING; 439 } 440 } 441 442 static void sched_switch(int cpu, u64 timestamp, struct trace_entry *te) 443 { 444 struct per_pid *p = NULL, *prev_p; 445 struct sched_switch *sw = (void *)te; 446 447 448 prev_p = find_create_pid(sw->prev_pid); 449 450 p = find_create_pid(sw->next_pid); 451 452 if (prev_p->current && prev_p->current->state != TYPE_NONE) 453 pid_put_sample(sw->prev_pid, TYPE_RUNNING, cpu, prev_p->current->state_since, timestamp); 454 if (p && p->current) { 455 if (p->current->state != TYPE_NONE) 456 pid_put_sample(sw->next_pid, p->current->state, cpu, p->current->state_since, timestamp); 457 458 p->current->state_since = timestamp; 459 p->current->state = TYPE_RUNNING; 460 } 461 462 if (prev_p->current) { 463 prev_p->current->state = TYPE_NONE; 464 prev_p->current->state_since = timestamp; 465 if (sw->prev_state & 2) 466 prev_p->current->state = TYPE_BLOCKED; 467 if (sw->prev_state == 0) 468 prev_p->current->state = TYPE_WAITING; 469 } 470 } 471 472 473 static int 474 process_sample_event(event_t *event) 475 { 476 int cursor = 0; 477 u64 addr = 0; 478 u64 stamp = 0; 479 u32 cpu = 0; 480 u32 pid = 0; 481 struct trace_entry *te; 482 483 if (sample_type & PERF_SAMPLE_IP) 484 cursor++; 485 486 if (sample_type & PERF_SAMPLE_TID) { 487 pid = event->sample.array[cursor]>>32; 488 cursor++; 489 } 490 if (sample_type & PERF_SAMPLE_TIME) { 491 stamp = event->sample.array[cursor++]; 492 493 if (!first_time || first_time > stamp) 494 first_time = stamp; 495 if (last_time < stamp) 496 last_time = stamp; 497 498 } 499 if (sample_type & PERF_SAMPLE_ADDR) 500 addr = event->sample.array[cursor++]; 501 if (sample_type & PERF_SAMPLE_ID) 502 cursor++; 503 if (sample_type & PERF_SAMPLE_STREAM_ID) 504 cursor++; 505 if (sample_type & PERF_SAMPLE_CPU) 506 cpu = event->sample.array[cursor++] & 0xFFFFFFFF; 507 if (sample_type & PERF_SAMPLE_PERIOD) 508 cursor++; 509 510 te = (void *)&event->sample.array[cursor]; 511 512 if (sample_type & PERF_SAMPLE_RAW && te->size > 0) { 513 char *event_str; 514 struct power_entry *pe; 515 516 pe = (void *)te; 517 518 event_str = perf_header__find_event(te->type); 519 520 if (!event_str) 521 return 0; 522 523 if (strcmp(event_str, "power:power_start") == 0) 524 c_state_start(cpu, stamp, pe->value); 525 526 if (strcmp(event_str, "power:power_end") == 0) 527 c_state_end(cpu, stamp); 528 529 if (strcmp(event_str, "power:power_frequency") == 0) 530 p_state_change(cpu, stamp, pe->value); 531 532 if (strcmp(event_str, "sched:sched_wakeup") == 0) 533 sched_wakeup(cpu, stamp, pid, te); 534 535 if (strcmp(event_str, "sched:sched_switch") == 0) 536 sched_switch(cpu, stamp, te); 537 } 538 return 0; 539 } 540 541 /* 542 * After the last sample we need to wrap up the current C/P state 543 * and close out each CPU for these. 544 */ 545 static void end_sample_processing(void) 546 { 547 u64 cpu; 548 struct power_event *pwr; 549 550 for (cpu = 0; cpu < numcpus; cpu++) { 551 pwr = malloc(sizeof(struct power_event)); 552 if (!pwr) 553 return; 554 memset(pwr, 0, sizeof(struct power_event)); 555 556 /* C state */ 557 #if 0 558 pwr->state = cpus_cstate_state[cpu]; 559 pwr->start_time = cpus_cstate_start_times[cpu]; 560 pwr->end_time = last_time; 561 pwr->cpu = cpu; 562 pwr->type = CSTATE; 563 pwr->next = power_events; 564 565 power_events = pwr; 566 #endif 567 /* P state */ 568 569 pwr = malloc(sizeof(struct power_event)); 570 if (!pwr) 571 return; 572 memset(pwr, 0, sizeof(struct power_event)); 573 574 pwr->state = cpus_pstate_state[cpu]; 575 pwr->start_time = cpus_pstate_start_times[cpu]; 576 pwr->end_time = last_time; 577 pwr->cpu = cpu; 578 pwr->type = PSTATE; 579 pwr->next = power_events; 580 581 if (!pwr->start_time) 582 pwr->start_time = first_time; 583 if (!pwr->state) 584 pwr->state = min_freq; 585 power_events = pwr; 586 } 587 } 588 589 static u64 sample_time(event_t *event) 590 { 591 int cursor; 592 593 cursor = 0; 594 if (sample_type & PERF_SAMPLE_IP) 595 cursor++; 596 if (sample_type & PERF_SAMPLE_TID) 597 cursor++; 598 if (sample_type & PERF_SAMPLE_TIME) 599 return event->sample.array[cursor]; 600 return 0; 601 } 602 603 604 /* 605 * We first queue all events, sorted backwards by insertion. 606 * The order will get flipped later. 607 */ 608 static int 609 queue_sample_event(event_t *event) 610 { 611 struct sample_wrapper *copy, *prev; 612 int size; 613 614 size = event->sample.header.size + sizeof(struct sample_wrapper) + 8; 615 616 copy = malloc(size); 617 if (!copy) 618 return 1; 619 620 memset(copy, 0, size); 621 622 copy->next = NULL; 623 copy->timestamp = sample_time(event); 624 625 memcpy(©->data, event, event->sample.header.size); 626 627 /* insert in the right place in the list */ 628 629 if (!all_samples) { 630 /* first sample ever */ 631 all_samples = copy; 632 return 0; 633 } 634 635 if (all_samples->timestamp < copy->timestamp) { 636 /* insert at the head of the list */ 637 copy->next = all_samples; 638 all_samples = copy; 639 return 0; 640 } 641 642 prev = all_samples; 643 while (prev->next) { 644 if (prev->next->timestamp < copy->timestamp) { 645 copy->next = prev->next; 646 prev->next = copy; 647 return 0; 648 } 649 prev = prev->next; 650 } 651 /* insert at the end of the list */ 652 prev->next = copy; 653 654 return 0; 655 } 656 657 static void sort_queued_samples(void) 658 { 659 struct sample_wrapper *cursor, *next; 660 661 cursor = all_samples; 662 all_samples = NULL; 663 664 while (cursor) { 665 next = cursor->next; 666 cursor->next = all_samples; 667 all_samples = cursor; 668 cursor = next; 669 } 670 } 671 672 /* 673 * Sort the pid datastructure 674 */ 675 static void sort_pids(void) 676 { 677 struct per_pid *new_list, *p, *cursor, *prev; 678 /* sort by ppid first, then by pid, lowest to highest */ 679 680 new_list = NULL; 681 682 while (all_data) { 683 p = all_data; 684 all_data = p->next; 685 p->next = NULL; 686 687 if (new_list == NULL) { 688 new_list = p; 689 p->next = NULL; 690 continue; 691 } 692 prev = NULL; 693 cursor = new_list; 694 while (cursor) { 695 if (cursor->ppid > p->ppid || 696 (cursor->ppid == p->ppid && cursor->pid > p->pid)) { 697 /* must insert before */ 698 if (prev) { 699 p->next = prev->next; 700 prev->next = p; 701 cursor = NULL; 702 continue; 703 } else { 704 p->next = new_list; 705 new_list = p; 706 cursor = NULL; 707 continue; 708 } 709 } 710 711 prev = cursor; 712 cursor = cursor->next; 713 if (!cursor) 714 prev->next = p; 715 } 716 } 717 all_data = new_list; 718 } 719 720 721 static void draw_c_p_states(void) 722 { 723 struct power_event *pwr; 724 pwr = power_events; 725 726 /* 727 * two pass drawing so that the P state bars are on top of the C state blocks 728 */ 729 while (pwr) { 730 if (pwr->type == CSTATE) 731 svg_cstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state); 732 pwr = pwr->next; 733 } 734 735 pwr = power_events; 736 while (pwr) { 737 if (pwr->type == PSTATE) { 738 if (!pwr->state) 739 pwr->state = min_freq; 740 svg_pstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state); 741 } 742 pwr = pwr->next; 743 } 744 } 745 746 static void draw_wakeups(void) 747 { 748 struct wake_event *we; 749 struct per_pid *p; 750 struct per_pidcomm *c; 751 752 we = wake_events; 753 while (we) { 754 int from = 0, to = 0; 755 char *task_from = NULL, *task_to = NULL; 756 757 /* locate the column of the waker and wakee */ 758 p = all_data; 759 while (p) { 760 if (p->pid == we->waker || p->pid == we->wakee) { 761 c = p->all; 762 while (c) { 763 if (c->Y && c->start_time <= we->time && c->end_time >= we->time) { 764 if (p->pid == we->waker) { 765 from = c->Y; 766 task_from = c->comm; 767 } 768 if (p->pid == we->wakee) { 769 to = c->Y; 770 task_to = c->comm; 771 } 772 } 773 c = c->next; 774 } 775 } 776 p = p->next; 777 } 778 779 if (we->waker == -1) 780 svg_interrupt(we->time, to); 781 else if (from && to && abs(from - to) == 1) 782 svg_wakeline(we->time, from, to); 783 else 784 svg_partial_wakeline(we->time, from, task_from, to, task_to); 785 we = we->next; 786 } 787 } 788 789 static void draw_cpu_usage(void) 790 { 791 struct per_pid *p; 792 struct per_pidcomm *c; 793 struct cpu_sample *sample; 794 p = all_data; 795 while (p) { 796 c = p->all; 797 while (c) { 798 sample = c->samples; 799 while (sample) { 800 if (sample->type == TYPE_RUNNING) 801 svg_process(sample->cpu, sample->start_time, sample->end_time, "sample", c->comm); 802 803 sample = sample->next; 804 } 805 c = c->next; 806 } 807 p = p->next; 808 } 809 } 810 811 static void draw_process_bars(void) 812 { 813 struct per_pid *p; 814 struct per_pidcomm *c; 815 struct cpu_sample *sample; 816 int Y = 0; 817 818 Y = 2 * numcpus + 2; 819 820 p = all_data; 821 while (p) { 822 c = p->all; 823 while (c) { 824 if (!c->display) { 825 c->Y = 0; 826 c = c->next; 827 continue; 828 } 829 830 svg_box(Y, c->start_time, c->end_time, "process"); 831 sample = c->samples; 832 while (sample) { 833 if (sample->type == TYPE_RUNNING) 834 svg_sample(Y, sample->cpu, sample->start_time, sample->end_time); 835 if (sample->type == TYPE_BLOCKED) 836 svg_box(Y, sample->start_time, sample->end_time, "blocked"); 837 if (sample->type == TYPE_WAITING) 838 svg_waiting(Y, sample->start_time, sample->end_time); 839 sample = sample->next; 840 } 841 842 if (c->comm) { 843 char comm[256]; 844 if (c->total_time > 5000000000) /* 5 seconds */ 845 sprintf(comm, "%s:%i (%2.2fs)", c->comm, p->pid, c->total_time / 1000000000.0); 846 else 847 sprintf(comm, "%s:%i (%3.1fms)", c->comm, p->pid, c->total_time / 1000000.0); 848 849 svg_text(Y, c->start_time, comm); 850 } 851 c->Y = Y; 852 Y++; 853 c = c->next; 854 } 855 p = p->next; 856 } 857 } 858 859 static int determine_display_tasks(u64 threshold) 860 { 861 struct per_pid *p; 862 struct per_pidcomm *c; 863 int count = 0; 864 865 p = all_data; 866 while (p) { 867 p->display = 0; 868 if (p->start_time == 1) 869 p->start_time = first_time; 870 871 /* no exit marker, task kept running to the end */ 872 if (p->end_time == 0) 873 p->end_time = last_time; 874 if (p->total_time >= threshold) 875 p->display = 1; 876 877 c = p->all; 878 879 while (c) { 880 c->display = 0; 881 882 if (c->start_time == 1) 883 c->start_time = first_time; 884 885 if (c->total_time >= threshold) { 886 c->display = 1; 887 count++; 888 } 889 890 if (c->end_time == 0) 891 c->end_time = last_time; 892 893 c = c->next; 894 } 895 p = p->next; 896 } 897 return count; 898 } 899 900 901 902 #define TIME_THRESH 10000000 903 904 static void write_svg_file(const char *filename) 905 { 906 u64 i; 907 int count; 908 909 numcpus++; 910 911 912 count = determine_display_tasks(TIME_THRESH); 913 914 /* We'd like to show at least 15 tasks; be less picky if we have fewer */ 915 if (count < 15) 916 count = determine_display_tasks(TIME_THRESH / 10); 917 918 open_svg(filename, numcpus, count, first_time, last_time); 919 920 svg_time_grid(); 921 svg_legenda(); 922 923 for (i = 0; i < numcpus; i++) 924 svg_cpu_box(i, max_freq, turbo_frequency); 925 926 draw_cpu_usage(); 927 draw_process_bars(); 928 draw_c_p_states(); 929 draw_wakeups(); 930 931 svg_close(); 932 } 933 934 static int 935 process_event(event_t *event) 936 { 937 938 switch (event->header.type) { 939 940 case PERF_RECORD_COMM: 941 return process_comm_event(event); 942 case PERF_RECORD_FORK: 943 return process_fork_event(event); 944 case PERF_RECORD_EXIT: 945 return process_exit_event(event); 946 case PERF_RECORD_SAMPLE: 947 return queue_sample_event(event); 948 949 /* 950 * We dont process them right now but they are fine: 951 */ 952 case PERF_RECORD_MMAP: 953 case PERF_RECORD_THROTTLE: 954 case PERF_RECORD_UNTHROTTLE: 955 return 0; 956 957 default: 958 return -1; 959 } 960 961 return 0; 962 } 963 964 static void process_samples(void) 965 { 966 struct sample_wrapper *cursor; 967 event_t *event; 968 969 sort_queued_samples(); 970 971 cursor = all_samples; 972 while (cursor) { 973 event = (void *)&cursor->data; 974 cursor = cursor->next; 975 process_sample_event(event); 976 } 977 } 978 979 980 static int __cmd_timechart(void) 981 { 982 int ret, rc = EXIT_FAILURE; 983 unsigned long offset = 0; 984 unsigned long head, shift; 985 struct stat statbuf; 986 event_t *event; 987 uint32_t size; 988 char *buf; 989 int input; 990 991 input = open(input_name, O_RDONLY); 992 if (input < 0) { 993 fprintf(stderr, " failed to open file: %s", input_name); 994 if (!strcmp(input_name, "perf.data")) 995 fprintf(stderr, " (try 'perf record' first)"); 996 fprintf(stderr, "\n"); 997 exit(-1); 998 } 999 1000 ret = fstat(input, &statbuf); 1001 if (ret < 0) { 1002 perror("failed to stat file"); 1003 exit(-1); 1004 } 1005 1006 if (!statbuf.st_size) { 1007 fprintf(stderr, "zero-sized file, nothing to do!\n"); 1008 exit(0); 1009 } 1010 1011 header = perf_header__read(input); 1012 head = header->data_offset; 1013 1014 sample_type = perf_header__sample_type(header); 1015 1016 shift = page_size * (head / page_size); 1017 offset += shift; 1018 head -= shift; 1019 1020 remap: 1021 buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ, 1022 MAP_SHARED, input, offset); 1023 if (buf == MAP_FAILED) { 1024 perror("failed to mmap file"); 1025 exit(-1); 1026 } 1027 1028 more: 1029 event = (event_t *)(buf + head); 1030 1031 size = event->header.size; 1032 if (!size) 1033 size = 8; 1034 1035 if (head + event->header.size >= page_size * mmap_window) { 1036 int ret2; 1037 1038 shift = page_size * (head / page_size); 1039 1040 ret2 = munmap(buf, page_size * mmap_window); 1041 assert(ret2 == 0); 1042 1043 offset += shift; 1044 head -= shift; 1045 goto remap; 1046 } 1047 1048 size = event->header.size; 1049 1050 if (!size || process_event(event) < 0) { 1051 1052 printf("%p [%p]: skipping unknown header type: %d\n", 1053 (void *)(offset + head), 1054 (void *)(long)(event->header.size), 1055 event->header.type); 1056 1057 /* 1058 * assume we lost track of the stream, check alignment, and 1059 * increment a single u64 in the hope to catch on again 'soon'. 1060 */ 1061 1062 if (unlikely(head & 7)) 1063 head &= ~7ULL; 1064 1065 size = 8; 1066 } 1067 1068 head += size; 1069 1070 if (offset + head >= header->data_offset + header->data_size) 1071 goto done; 1072 1073 if (offset + head < (unsigned long)statbuf.st_size) 1074 goto more; 1075 1076 done: 1077 rc = EXIT_SUCCESS; 1078 close(input); 1079 1080 1081 process_samples(); 1082 1083 end_sample_processing(); 1084 1085 sort_pids(); 1086 1087 write_svg_file(output_name); 1088 1089 printf("Written %2.1f seconds of trace to %s.\n", (last_time - first_time) / 1000000000.0, output_name); 1090 1091 return rc; 1092 } 1093 1094 static const char * const timechart_usage[] = { 1095 "perf timechart [<options>] {record}", 1096 NULL 1097 }; 1098 1099 static const char *record_args[] = { 1100 "record", 1101 "-a", 1102 "-R", 1103 "-M", 1104 "-f", 1105 "-c", "1", 1106 "-e", "power:power_start", 1107 "-e", "power:power_end", 1108 "-e", "power:power_frequency", 1109 "-e", "sched:sched_wakeup", 1110 "-e", "sched:sched_switch", 1111 }; 1112 1113 static int __cmd_record(int argc, const char **argv) 1114 { 1115 unsigned int rec_argc, i, j; 1116 const char **rec_argv; 1117 1118 rec_argc = ARRAY_SIZE(record_args) + argc - 1; 1119 rec_argv = calloc(rec_argc + 1, sizeof(char *)); 1120 1121 for (i = 0; i < ARRAY_SIZE(record_args); i++) 1122 rec_argv[i] = strdup(record_args[i]); 1123 1124 for (j = 1; j < (unsigned int)argc; j++, i++) 1125 rec_argv[i] = argv[j]; 1126 1127 return cmd_record(i, rec_argv, NULL); 1128 } 1129 1130 static const struct option options[] = { 1131 OPT_STRING('i', "input", &input_name, "file", 1132 "input file name"), 1133 OPT_STRING('o', "output", &output_name, "file", 1134 "output file name"), 1135 OPT_INTEGER('w', "width", &svg_page_width, 1136 "page width"), 1137 OPT_END() 1138 }; 1139 1140 1141 int cmd_timechart(int argc, const char **argv, const char *prefix __used) 1142 { 1143 symbol__init(); 1144 1145 page_size = getpagesize(); 1146 1147 argc = parse_options(argc, argv, options, timechart_usage, 1148 PARSE_OPT_STOP_AT_NON_OPTION); 1149 1150 if (argc && !strncmp(argv[0], "rec", 3)) 1151 return __cmd_record(argc, argv); 1152 else if (argc) 1153 usage_with_options(timechart_usage, options); 1154 1155 setup_pager(); 1156 1157 return __cmd_timechart(); 1158 } 1159