1 /* 2 * builtin-timechart.c - make an svg timechart of system activity 3 * 4 * (C) Copyright 2009 Intel Corporation 5 * 6 * Authors: 7 * Arjan van de Ven <arjan@linux.intel.com> 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * as published by the Free Software Foundation; version 2 12 * of the License. 13 */ 14 15 #include <traceevent/event-parse.h> 16 17 #include "builtin.h" 18 19 #include "util/util.h" 20 21 #include "util/color.h" 22 #include <linux/list.h> 23 #include "util/cache.h" 24 #include "util/evlist.h" 25 #include "util/evsel.h" 26 #include <linux/rbtree.h> 27 #include "util/symbol.h" 28 #include "util/callchain.h" 29 #include "util/strlist.h" 30 31 #include "perf.h" 32 #include "util/header.h" 33 #include "util/parse-options.h" 34 #include "util/parse-events.h" 35 #include "util/event.h" 36 #include "util/session.h" 37 #include "util/svghelper.h" 38 #include "util/tool.h" 39 #include "util/data.h" 40 41 #define SUPPORT_OLD_POWER_EVENTS 1 42 #define PWR_EVENT_EXIT -1 43 44 45 static unsigned int numcpus; 46 static u64 min_freq; /* Lowest CPU frequency seen */ 47 static u64 max_freq; /* Highest CPU frequency seen */ 48 static u64 turbo_frequency; 49 50 static u64 first_time, last_time; 51 52 static bool power_only; 53 54 55 struct per_pid; 56 struct per_pidcomm; 57 58 struct cpu_sample; 59 struct power_event; 60 struct wake_event; 61 62 struct sample_wrapper; 63 64 /* 65 * Datastructure layout: 66 * We keep an list of "pid"s, matching the kernels notion of a task struct. 67 * Each "pid" entry, has a list of "comm"s. 68 * this is because we want to track different programs different, while 69 * exec will reuse the original pid (by design). 70 * Each comm has a list of samples that will be used to draw 71 * final graph. 72 */ 73 74 struct per_pid { 75 struct per_pid *next; 76 77 int pid; 78 int ppid; 79 80 u64 start_time; 81 u64 end_time; 82 u64 total_time; 83 int display; 84 85 struct per_pidcomm *all; 86 struct per_pidcomm *current; 87 }; 88 89 90 struct per_pidcomm { 91 struct per_pidcomm *next; 92 93 u64 start_time; 94 u64 end_time; 95 u64 total_time; 96 97 int Y; 98 int display; 99 100 long state; 101 u64 state_since; 102 103 char *comm; 104 105 struct cpu_sample *samples; 106 }; 107 108 struct sample_wrapper { 109 struct sample_wrapper *next; 110 111 u64 timestamp; 112 unsigned char data[0]; 113 }; 114 115 #define TYPE_NONE 0 116 #define TYPE_RUNNING 1 117 #define TYPE_WAITING 2 118 #define TYPE_BLOCKED 3 119 120 struct cpu_sample { 121 struct cpu_sample *next; 122 123 u64 start_time; 124 u64 end_time; 125 int type; 126 int cpu; 127 }; 128 129 static struct per_pid *all_data; 130 131 #define CSTATE 1 132 #define PSTATE 2 133 134 struct power_event { 135 struct power_event *next; 136 int type; 137 int state; 138 u64 start_time; 139 u64 end_time; 140 int cpu; 141 }; 142 143 struct wake_event { 144 struct wake_event *next; 145 int waker; 146 int wakee; 147 u64 time; 148 }; 149 150 static struct power_event *power_events; 151 static struct wake_event *wake_events; 152 153 struct process_filter; 154 struct process_filter { 155 char *name; 156 int pid; 157 struct process_filter *next; 158 }; 159 160 static struct process_filter *process_filter; 161 162 163 static struct per_pid *find_create_pid(int pid) 164 { 165 struct per_pid *cursor = all_data; 166 167 while (cursor) { 168 if (cursor->pid == pid) 169 return cursor; 170 cursor = cursor->next; 171 } 172 cursor = zalloc(sizeof(*cursor)); 173 assert(cursor != NULL); 174 cursor->pid = pid; 175 cursor->next = all_data; 176 all_data = cursor; 177 return cursor; 178 } 179 180 static void pid_set_comm(int pid, char *comm) 181 { 182 struct per_pid *p; 183 struct per_pidcomm *c; 184 p = find_create_pid(pid); 185 c = p->all; 186 while (c) { 187 if (c->comm && strcmp(c->comm, comm) == 0) { 188 p->current = c; 189 return; 190 } 191 if (!c->comm) { 192 c->comm = strdup(comm); 193 p->current = c; 194 return; 195 } 196 c = c->next; 197 } 198 c = zalloc(sizeof(*c)); 199 assert(c != NULL); 200 c->comm = strdup(comm); 201 p->current = c; 202 c->next = p->all; 203 p->all = c; 204 } 205 206 static void pid_fork(int pid, int ppid, u64 timestamp) 207 { 208 struct per_pid *p, *pp; 209 p = find_create_pid(pid); 210 pp = find_create_pid(ppid); 211 p->ppid = ppid; 212 if (pp->current && pp->current->comm && !p->current) 213 pid_set_comm(pid, pp->current->comm); 214 215 p->start_time = timestamp; 216 if (p->current) { 217 p->current->start_time = timestamp; 218 p->current->state_since = timestamp; 219 } 220 } 221 222 static void pid_exit(int pid, u64 timestamp) 223 { 224 struct per_pid *p; 225 p = find_create_pid(pid); 226 p->end_time = timestamp; 227 if (p->current) 228 p->current->end_time = timestamp; 229 } 230 231 static void 232 pid_put_sample(int pid, int type, unsigned int cpu, u64 start, u64 end) 233 { 234 struct per_pid *p; 235 struct per_pidcomm *c; 236 struct cpu_sample *sample; 237 238 p = find_create_pid(pid); 239 c = p->current; 240 if (!c) { 241 c = zalloc(sizeof(*c)); 242 assert(c != NULL); 243 p->current = c; 244 c->next = p->all; 245 p->all = c; 246 } 247 248 sample = zalloc(sizeof(*sample)); 249 assert(sample != NULL); 250 sample->start_time = start; 251 sample->end_time = end; 252 sample->type = type; 253 sample->next = c->samples; 254 sample->cpu = cpu; 255 c->samples = sample; 256 257 if (sample->type == TYPE_RUNNING && end > start && start > 0) { 258 c->total_time += (end-start); 259 p->total_time += (end-start); 260 } 261 262 if (c->start_time == 0 || c->start_time > start) 263 c->start_time = start; 264 if (p->start_time == 0 || p->start_time > start) 265 p->start_time = start; 266 } 267 268 #define MAX_CPUS 4096 269 270 static u64 cpus_cstate_start_times[MAX_CPUS]; 271 static int cpus_cstate_state[MAX_CPUS]; 272 static u64 cpus_pstate_start_times[MAX_CPUS]; 273 static u64 cpus_pstate_state[MAX_CPUS]; 274 275 static int process_comm_event(struct perf_tool *tool __maybe_unused, 276 union perf_event *event, 277 struct perf_sample *sample __maybe_unused, 278 struct machine *machine __maybe_unused) 279 { 280 pid_set_comm(event->comm.tid, event->comm.comm); 281 return 0; 282 } 283 284 static int process_fork_event(struct perf_tool *tool __maybe_unused, 285 union perf_event *event, 286 struct perf_sample *sample __maybe_unused, 287 struct machine *machine __maybe_unused) 288 { 289 pid_fork(event->fork.pid, event->fork.ppid, event->fork.time); 290 return 0; 291 } 292 293 static int process_exit_event(struct perf_tool *tool __maybe_unused, 294 union perf_event *event, 295 struct perf_sample *sample __maybe_unused, 296 struct machine *machine __maybe_unused) 297 { 298 pid_exit(event->fork.pid, event->fork.time); 299 return 0; 300 } 301 302 struct trace_entry { 303 unsigned short type; 304 unsigned char flags; 305 unsigned char preempt_count; 306 int pid; 307 int lock_depth; 308 }; 309 310 #ifdef SUPPORT_OLD_POWER_EVENTS 311 static int use_old_power_events; 312 struct power_entry_old { 313 struct trace_entry te; 314 u64 type; 315 u64 value; 316 u64 cpu_id; 317 }; 318 #endif 319 320 struct power_processor_entry { 321 struct trace_entry te; 322 u32 state; 323 u32 cpu_id; 324 }; 325 326 #define TASK_COMM_LEN 16 327 struct wakeup_entry { 328 struct trace_entry te; 329 char comm[TASK_COMM_LEN]; 330 int pid; 331 int prio; 332 int success; 333 }; 334 335 struct sched_switch { 336 struct trace_entry te; 337 char prev_comm[TASK_COMM_LEN]; 338 int prev_pid; 339 int prev_prio; 340 long prev_state; /* Arjan weeps. */ 341 char next_comm[TASK_COMM_LEN]; 342 int next_pid; 343 int next_prio; 344 }; 345 346 static void c_state_start(int cpu, u64 timestamp, int state) 347 { 348 cpus_cstate_start_times[cpu] = timestamp; 349 cpus_cstate_state[cpu] = state; 350 } 351 352 static void c_state_end(int cpu, u64 timestamp) 353 { 354 struct power_event *pwr = zalloc(sizeof(*pwr)); 355 356 if (!pwr) 357 return; 358 359 pwr->state = cpus_cstate_state[cpu]; 360 pwr->start_time = cpus_cstate_start_times[cpu]; 361 pwr->end_time = timestamp; 362 pwr->cpu = cpu; 363 pwr->type = CSTATE; 364 pwr->next = power_events; 365 366 power_events = pwr; 367 } 368 369 static void p_state_change(int cpu, u64 timestamp, u64 new_freq) 370 { 371 struct power_event *pwr; 372 373 if (new_freq > 8000000) /* detect invalid data */ 374 return; 375 376 pwr = zalloc(sizeof(*pwr)); 377 if (!pwr) 378 return; 379 380 pwr->state = cpus_pstate_state[cpu]; 381 pwr->start_time = cpus_pstate_start_times[cpu]; 382 pwr->end_time = timestamp; 383 pwr->cpu = cpu; 384 pwr->type = PSTATE; 385 pwr->next = power_events; 386 387 if (!pwr->start_time) 388 pwr->start_time = first_time; 389 390 power_events = pwr; 391 392 cpus_pstate_state[cpu] = new_freq; 393 cpus_pstate_start_times[cpu] = timestamp; 394 395 if ((u64)new_freq > max_freq) 396 max_freq = new_freq; 397 398 if (new_freq < min_freq || min_freq == 0) 399 min_freq = new_freq; 400 401 if (new_freq == max_freq - 1000) 402 turbo_frequency = max_freq; 403 } 404 405 static void 406 sched_wakeup(int cpu, u64 timestamp, int pid, struct trace_entry *te) 407 { 408 struct per_pid *p; 409 struct wakeup_entry *wake = (void *)te; 410 struct wake_event *we = zalloc(sizeof(*we)); 411 412 if (!we) 413 return; 414 415 we->time = timestamp; 416 we->waker = pid; 417 418 if ((te->flags & TRACE_FLAG_HARDIRQ) || (te->flags & TRACE_FLAG_SOFTIRQ)) 419 we->waker = -1; 420 421 we->wakee = wake->pid; 422 we->next = wake_events; 423 wake_events = we; 424 p = find_create_pid(we->wakee); 425 426 if (p && p->current && p->current->state == TYPE_NONE) { 427 p->current->state_since = timestamp; 428 p->current->state = TYPE_WAITING; 429 } 430 if (p && p->current && p->current->state == TYPE_BLOCKED) { 431 pid_put_sample(p->pid, p->current->state, cpu, p->current->state_since, timestamp); 432 p->current->state_since = timestamp; 433 p->current->state = TYPE_WAITING; 434 } 435 } 436 437 static void sched_switch(int cpu, u64 timestamp, struct trace_entry *te) 438 { 439 struct per_pid *p = NULL, *prev_p; 440 struct sched_switch *sw = (void *)te; 441 442 443 prev_p = find_create_pid(sw->prev_pid); 444 445 p = find_create_pid(sw->next_pid); 446 447 if (prev_p->current && prev_p->current->state != TYPE_NONE) 448 pid_put_sample(sw->prev_pid, TYPE_RUNNING, cpu, prev_p->current->state_since, timestamp); 449 if (p && p->current) { 450 if (p->current->state != TYPE_NONE) 451 pid_put_sample(sw->next_pid, p->current->state, cpu, p->current->state_since, timestamp); 452 453 p->current->state_since = timestamp; 454 p->current->state = TYPE_RUNNING; 455 } 456 457 if (prev_p->current) { 458 prev_p->current->state = TYPE_NONE; 459 prev_p->current->state_since = timestamp; 460 if (sw->prev_state & 2) 461 prev_p->current->state = TYPE_BLOCKED; 462 if (sw->prev_state == 0) 463 prev_p->current->state = TYPE_WAITING; 464 } 465 } 466 467 typedef int (*tracepoint_handler)(struct perf_evsel *evsel, 468 struct perf_sample *sample); 469 470 static int process_sample_event(struct perf_tool *tool __maybe_unused, 471 union perf_event *event __maybe_unused, 472 struct perf_sample *sample, 473 struct perf_evsel *evsel, 474 struct machine *machine __maybe_unused) 475 { 476 if (evsel->attr.sample_type & PERF_SAMPLE_TIME) { 477 if (!first_time || first_time > sample->time) 478 first_time = sample->time; 479 if (last_time < sample->time) 480 last_time = sample->time; 481 } 482 483 if (sample->cpu > numcpus) 484 numcpus = sample->cpu; 485 486 if (evsel->handler != NULL) { 487 tracepoint_handler f = evsel->handler; 488 return f(evsel, sample); 489 } 490 491 return 0; 492 } 493 494 static int 495 process_sample_cpu_idle(struct perf_evsel *evsel __maybe_unused, 496 struct perf_sample *sample) 497 { 498 struct power_processor_entry *ppe = sample->raw_data; 499 500 if (ppe->state == (u32) PWR_EVENT_EXIT) 501 c_state_end(ppe->cpu_id, sample->time); 502 else 503 c_state_start(ppe->cpu_id, sample->time, ppe->state); 504 return 0; 505 } 506 507 static int 508 process_sample_cpu_frequency(struct perf_evsel *evsel __maybe_unused, 509 struct perf_sample *sample) 510 { 511 struct power_processor_entry *ppe = sample->raw_data; 512 513 p_state_change(ppe->cpu_id, sample->time, ppe->state); 514 return 0; 515 } 516 517 static int 518 process_sample_sched_wakeup(struct perf_evsel *evsel __maybe_unused, 519 struct perf_sample *sample) 520 { 521 struct trace_entry *te = sample->raw_data; 522 523 sched_wakeup(sample->cpu, sample->time, sample->pid, te); 524 return 0; 525 } 526 527 static int 528 process_sample_sched_switch(struct perf_evsel *evsel __maybe_unused, 529 struct perf_sample *sample) 530 { 531 struct trace_entry *te = sample->raw_data; 532 533 sched_switch(sample->cpu, sample->time, te); 534 return 0; 535 } 536 537 #ifdef SUPPORT_OLD_POWER_EVENTS 538 static int 539 process_sample_power_start(struct perf_evsel *evsel __maybe_unused, 540 struct perf_sample *sample) 541 { 542 struct power_entry_old *peo = sample->raw_data; 543 544 c_state_start(peo->cpu_id, sample->time, peo->value); 545 return 0; 546 } 547 548 static int 549 process_sample_power_end(struct perf_evsel *evsel __maybe_unused, 550 struct perf_sample *sample) 551 { 552 c_state_end(sample->cpu, sample->time); 553 return 0; 554 } 555 556 static int 557 process_sample_power_frequency(struct perf_evsel *evsel __maybe_unused, 558 struct perf_sample *sample) 559 { 560 struct power_entry_old *peo = sample->raw_data; 561 562 p_state_change(peo->cpu_id, sample->time, peo->value); 563 return 0; 564 } 565 #endif /* SUPPORT_OLD_POWER_EVENTS */ 566 567 /* 568 * After the last sample we need to wrap up the current C/P state 569 * and close out each CPU for these. 570 */ 571 static void end_sample_processing(void) 572 { 573 u64 cpu; 574 struct power_event *pwr; 575 576 for (cpu = 0; cpu <= numcpus; cpu++) { 577 /* C state */ 578 #if 0 579 pwr = zalloc(sizeof(*pwr)); 580 if (!pwr) 581 return; 582 583 pwr->state = cpus_cstate_state[cpu]; 584 pwr->start_time = cpus_cstate_start_times[cpu]; 585 pwr->end_time = last_time; 586 pwr->cpu = cpu; 587 pwr->type = CSTATE; 588 pwr->next = power_events; 589 590 power_events = pwr; 591 #endif 592 /* P state */ 593 594 pwr = zalloc(sizeof(*pwr)); 595 if (!pwr) 596 return; 597 598 pwr->state = cpus_pstate_state[cpu]; 599 pwr->start_time = cpus_pstate_start_times[cpu]; 600 pwr->end_time = last_time; 601 pwr->cpu = cpu; 602 pwr->type = PSTATE; 603 pwr->next = power_events; 604 605 if (!pwr->start_time) 606 pwr->start_time = first_time; 607 if (!pwr->state) 608 pwr->state = min_freq; 609 power_events = pwr; 610 } 611 } 612 613 /* 614 * Sort the pid datastructure 615 */ 616 static void sort_pids(void) 617 { 618 struct per_pid *new_list, *p, *cursor, *prev; 619 /* sort by ppid first, then by pid, lowest to highest */ 620 621 new_list = NULL; 622 623 while (all_data) { 624 p = all_data; 625 all_data = p->next; 626 p->next = NULL; 627 628 if (new_list == NULL) { 629 new_list = p; 630 p->next = NULL; 631 continue; 632 } 633 prev = NULL; 634 cursor = new_list; 635 while (cursor) { 636 if (cursor->ppid > p->ppid || 637 (cursor->ppid == p->ppid && cursor->pid > p->pid)) { 638 /* must insert before */ 639 if (prev) { 640 p->next = prev->next; 641 prev->next = p; 642 cursor = NULL; 643 continue; 644 } else { 645 p->next = new_list; 646 new_list = p; 647 cursor = NULL; 648 continue; 649 } 650 } 651 652 prev = cursor; 653 cursor = cursor->next; 654 if (!cursor) 655 prev->next = p; 656 } 657 } 658 all_data = new_list; 659 } 660 661 662 static void draw_c_p_states(void) 663 { 664 struct power_event *pwr; 665 pwr = power_events; 666 667 /* 668 * two pass drawing so that the P state bars are on top of the C state blocks 669 */ 670 while (pwr) { 671 if (pwr->type == CSTATE) 672 svg_cstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state); 673 pwr = pwr->next; 674 } 675 676 pwr = power_events; 677 while (pwr) { 678 if (pwr->type == PSTATE) { 679 if (!pwr->state) 680 pwr->state = min_freq; 681 svg_pstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state); 682 } 683 pwr = pwr->next; 684 } 685 } 686 687 static void draw_wakeups(void) 688 { 689 struct wake_event *we; 690 struct per_pid *p; 691 struct per_pidcomm *c; 692 693 we = wake_events; 694 while (we) { 695 int from = 0, to = 0; 696 char *task_from = NULL, *task_to = NULL; 697 698 /* locate the column of the waker and wakee */ 699 p = all_data; 700 while (p) { 701 if (p->pid == we->waker || p->pid == we->wakee) { 702 c = p->all; 703 while (c) { 704 if (c->Y && c->start_time <= we->time && c->end_time >= we->time) { 705 if (p->pid == we->waker && !from) { 706 from = c->Y; 707 task_from = strdup(c->comm); 708 } 709 if (p->pid == we->wakee && !to) { 710 to = c->Y; 711 task_to = strdup(c->comm); 712 } 713 } 714 c = c->next; 715 } 716 c = p->all; 717 while (c) { 718 if (p->pid == we->waker && !from) { 719 from = c->Y; 720 task_from = strdup(c->comm); 721 } 722 if (p->pid == we->wakee && !to) { 723 to = c->Y; 724 task_to = strdup(c->comm); 725 } 726 c = c->next; 727 } 728 } 729 p = p->next; 730 } 731 732 if (!task_from) { 733 task_from = malloc(40); 734 sprintf(task_from, "[%i]", we->waker); 735 } 736 if (!task_to) { 737 task_to = malloc(40); 738 sprintf(task_to, "[%i]", we->wakee); 739 } 740 741 if (we->waker == -1) 742 svg_interrupt(we->time, to); 743 else if (from && to && abs(from - to) == 1) 744 svg_wakeline(we->time, from, to); 745 else 746 svg_partial_wakeline(we->time, from, task_from, to, task_to); 747 we = we->next; 748 749 free(task_from); 750 free(task_to); 751 } 752 } 753 754 static void draw_cpu_usage(void) 755 { 756 struct per_pid *p; 757 struct per_pidcomm *c; 758 struct cpu_sample *sample; 759 p = all_data; 760 while (p) { 761 c = p->all; 762 while (c) { 763 sample = c->samples; 764 while (sample) { 765 if (sample->type == TYPE_RUNNING) 766 svg_process(sample->cpu, sample->start_time, sample->end_time, "sample", c->comm); 767 768 sample = sample->next; 769 } 770 c = c->next; 771 } 772 p = p->next; 773 } 774 } 775 776 static void draw_process_bars(void) 777 { 778 struct per_pid *p; 779 struct per_pidcomm *c; 780 struct cpu_sample *sample; 781 int Y = 0; 782 783 Y = 2 * numcpus + 2; 784 785 p = all_data; 786 while (p) { 787 c = p->all; 788 while (c) { 789 if (!c->display) { 790 c->Y = 0; 791 c = c->next; 792 continue; 793 } 794 795 svg_box(Y, c->start_time, c->end_time, "process"); 796 sample = c->samples; 797 while (sample) { 798 if (sample->type == TYPE_RUNNING) 799 svg_sample(Y, sample->cpu, sample->start_time, sample->end_time); 800 if (sample->type == TYPE_BLOCKED) 801 svg_box(Y, sample->start_time, sample->end_time, "blocked"); 802 if (sample->type == TYPE_WAITING) 803 svg_waiting(Y, sample->start_time, sample->end_time); 804 sample = sample->next; 805 } 806 807 if (c->comm) { 808 char comm[256]; 809 if (c->total_time > 5000000000) /* 5 seconds */ 810 sprintf(comm, "%s:%i (%2.2fs)", c->comm, p->pid, c->total_time / 1000000000.0); 811 else 812 sprintf(comm, "%s:%i (%3.1fms)", c->comm, p->pid, c->total_time / 1000000.0); 813 814 svg_text(Y, c->start_time, comm); 815 } 816 c->Y = Y; 817 Y++; 818 c = c->next; 819 } 820 p = p->next; 821 } 822 } 823 824 static void add_process_filter(const char *string) 825 { 826 int pid = strtoull(string, NULL, 10); 827 struct process_filter *filt = malloc(sizeof(*filt)); 828 829 if (!filt) 830 return; 831 832 filt->name = strdup(string); 833 filt->pid = pid; 834 filt->next = process_filter; 835 836 process_filter = filt; 837 } 838 839 static int passes_filter(struct per_pid *p, struct per_pidcomm *c) 840 { 841 struct process_filter *filt; 842 if (!process_filter) 843 return 1; 844 845 filt = process_filter; 846 while (filt) { 847 if (filt->pid && p->pid == filt->pid) 848 return 1; 849 if (strcmp(filt->name, c->comm) == 0) 850 return 1; 851 filt = filt->next; 852 } 853 return 0; 854 } 855 856 static int determine_display_tasks_filtered(void) 857 { 858 struct per_pid *p; 859 struct per_pidcomm *c; 860 int count = 0; 861 862 p = all_data; 863 while (p) { 864 p->display = 0; 865 if (p->start_time == 1) 866 p->start_time = first_time; 867 868 /* no exit marker, task kept running to the end */ 869 if (p->end_time == 0) 870 p->end_time = last_time; 871 872 c = p->all; 873 874 while (c) { 875 c->display = 0; 876 877 if (c->start_time == 1) 878 c->start_time = first_time; 879 880 if (passes_filter(p, c)) { 881 c->display = 1; 882 p->display = 1; 883 count++; 884 } 885 886 if (c->end_time == 0) 887 c->end_time = last_time; 888 889 c = c->next; 890 } 891 p = p->next; 892 } 893 return count; 894 } 895 896 static int determine_display_tasks(u64 threshold) 897 { 898 struct per_pid *p; 899 struct per_pidcomm *c; 900 int count = 0; 901 902 if (process_filter) 903 return determine_display_tasks_filtered(); 904 905 p = all_data; 906 while (p) { 907 p->display = 0; 908 if (p->start_time == 1) 909 p->start_time = first_time; 910 911 /* no exit marker, task kept running to the end */ 912 if (p->end_time == 0) 913 p->end_time = last_time; 914 if (p->total_time >= threshold && !power_only) 915 p->display = 1; 916 917 c = p->all; 918 919 while (c) { 920 c->display = 0; 921 922 if (c->start_time == 1) 923 c->start_time = first_time; 924 925 if (c->total_time >= threshold && !power_only) { 926 c->display = 1; 927 count++; 928 } 929 930 if (c->end_time == 0) 931 c->end_time = last_time; 932 933 c = c->next; 934 } 935 p = p->next; 936 } 937 return count; 938 } 939 940 941 942 #define TIME_THRESH 10000000 943 944 static void write_svg_file(const char *filename) 945 { 946 u64 i; 947 int count; 948 949 numcpus++; 950 951 952 count = determine_display_tasks(TIME_THRESH); 953 954 /* We'd like to show at least 15 tasks; be less picky if we have fewer */ 955 if (count < 15) 956 count = determine_display_tasks(TIME_THRESH / 10); 957 958 open_svg(filename, numcpus, count, first_time, last_time); 959 960 svg_time_grid(); 961 svg_legenda(); 962 963 for (i = 0; i < numcpus; i++) 964 svg_cpu_box(i, max_freq, turbo_frequency); 965 966 draw_cpu_usage(); 967 draw_process_bars(); 968 draw_c_p_states(); 969 draw_wakeups(); 970 971 svg_close(); 972 } 973 974 static int __cmd_timechart(const char *output_name) 975 { 976 struct perf_tool perf_timechart = { 977 .comm = process_comm_event, 978 .fork = process_fork_event, 979 .exit = process_exit_event, 980 .sample = process_sample_event, 981 .ordered_samples = true, 982 }; 983 const struct perf_evsel_str_handler power_tracepoints[] = { 984 { "power:cpu_idle", process_sample_cpu_idle }, 985 { "power:cpu_frequency", process_sample_cpu_frequency }, 986 { "sched:sched_wakeup", process_sample_sched_wakeup }, 987 { "sched:sched_switch", process_sample_sched_switch }, 988 #ifdef SUPPORT_OLD_POWER_EVENTS 989 { "power:power_start", process_sample_power_start }, 990 { "power:power_end", process_sample_power_end }, 991 { "power:power_frequency", process_sample_power_frequency }, 992 #endif 993 }; 994 struct perf_data_file file = { 995 .path = input_name, 996 .mode = PERF_DATA_MODE_READ, 997 }; 998 999 struct perf_session *session = perf_session__new(&file, false, 1000 &perf_timechart); 1001 int ret = -EINVAL; 1002 1003 if (session == NULL) 1004 return -ENOMEM; 1005 1006 if (!perf_session__has_traces(session, "timechart record")) 1007 goto out_delete; 1008 1009 if (perf_session__set_tracepoints_handlers(session, 1010 power_tracepoints)) { 1011 pr_err("Initializing session tracepoint handlers failed\n"); 1012 goto out_delete; 1013 } 1014 1015 ret = perf_session__process_events(session, &perf_timechart); 1016 if (ret) 1017 goto out_delete; 1018 1019 end_sample_processing(); 1020 1021 sort_pids(); 1022 1023 write_svg_file(output_name); 1024 1025 pr_info("Written %2.1f seconds of trace to %s.\n", 1026 (last_time - first_time) / 1000000000.0, output_name); 1027 out_delete: 1028 perf_session__delete(session); 1029 return ret; 1030 } 1031 1032 static int __cmd_record(int argc, const char **argv) 1033 { 1034 #ifdef SUPPORT_OLD_POWER_EVENTS 1035 const char * const record_old_args[] = { 1036 "record", "-a", "-R", "-c", "1", 1037 "-e", "power:power_start", 1038 "-e", "power:power_end", 1039 "-e", "power:power_frequency", 1040 "-e", "sched:sched_wakeup", 1041 "-e", "sched:sched_switch", 1042 }; 1043 #endif 1044 const char * const record_new_args[] = { 1045 "record", "-a", "-R", "-c", "1", 1046 "-e", "power:cpu_frequency", 1047 "-e", "power:cpu_idle", 1048 "-e", "sched:sched_wakeup", 1049 "-e", "sched:sched_switch", 1050 }; 1051 unsigned int rec_argc, i, j; 1052 const char **rec_argv; 1053 const char * const *record_args = record_new_args; 1054 unsigned int record_elems = ARRAY_SIZE(record_new_args); 1055 1056 #ifdef SUPPORT_OLD_POWER_EVENTS 1057 if (!is_valid_tracepoint("power:cpu_idle") && 1058 is_valid_tracepoint("power:power_start")) { 1059 use_old_power_events = 1; 1060 record_args = record_old_args; 1061 record_elems = ARRAY_SIZE(record_old_args); 1062 } 1063 #endif 1064 1065 rec_argc = record_elems + argc - 1; 1066 rec_argv = calloc(rec_argc + 1, sizeof(char *)); 1067 1068 if (rec_argv == NULL) 1069 return -ENOMEM; 1070 1071 for (i = 0; i < record_elems; i++) 1072 rec_argv[i] = strdup(record_args[i]); 1073 1074 for (j = 1; j < (unsigned int)argc; j++, i++) 1075 rec_argv[i] = argv[j]; 1076 1077 return cmd_record(i, rec_argv, NULL); 1078 } 1079 1080 static int 1081 parse_process(const struct option *opt __maybe_unused, const char *arg, 1082 int __maybe_unused unset) 1083 { 1084 if (arg) 1085 add_process_filter(arg); 1086 return 0; 1087 } 1088 1089 int cmd_timechart(int argc, const char **argv, 1090 const char *prefix __maybe_unused) 1091 { 1092 const char *output_name = "output.svg"; 1093 const struct option options[] = { 1094 OPT_STRING('i', "input", &input_name, "file", "input file name"), 1095 OPT_STRING('o', "output", &output_name, "file", "output file name"), 1096 OPT_INTEGER('w', "width", &svg_page_width, "page width"), 1097 OPT_BOOLEAN('P', "power-only", &power_only, "output power data only"), 1098 OPT_CALLBACK('p', "process", NULL, "process", 1099 "process selector. Pass a pid or process name.", 1100 parse_process), 1101 OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory", 1102 "Look for files with symbols relative to this directory"), 1103 OPT_END() 1104 }; 1105 const char * const timechart_usage[] = { 1106 "perf timechart [<options>] {record}", 1107 NULL 1108 }; 1109 1110 argc = parse_options(argc, argv, options, timechart_usage, 1111 PARSE_OPT_STOP_AT_NON_OPTION); 1112 1113 symbol__init(); 1114 1115 if (argc && !strncmp(argv[0], "rec", 3)) 1116 return __cmd_record(argc, argv); 1117 else if (argc) 1118 usage_with_options(timechart_usage, options); 1119 1120 setup_pager(); 1121 1122 return __cmd_timechart(output_name); 1123 } 1124