1 /*
2  * builtin-timechart.c - make an svg timechart of system activity
3  *
4  * (C) Copyright 2009 Intel Corporation
5  *
6  * Authors:
7  *     Arjan van de Ven <arjan@linux.intel.com>
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; version 2
12  * of the License.
13  */
14 
15 #include "builtin.h"
16 
17 #include "util/util.h"
18 
19 #include "util/color.h"
20 #include <linux/list.h>
21 #include "util/cache.h"
22 #include <linux/rbtree.h>
23 #include "util/symbol.h"
24 #include "util/callchain.h"
25 #include "util/strlist.h"
26 
27 #include "perf.h"
28 #include "util/header.h"
29 #include "util/parse-options.h"
30 #include "util/parse-events.h"
31 #include "util/event.h"
32 #include "util/session.h"
33 #include "util/svghelper.h"
34 
35 #define SUPPORT_OLD_POWER_EVENTS 1
36 #define PWR_EVENT_EXIT -1
37 
38 
39 static char		const *input_name = "perf.data";
40 static char		const *output_name = "output.svg";
41 
42 static unsigned int	numcpus;
43 static u64		min_freq;	/* Lowest CPU frequency seen */
44 static u64		max_freq;	/* Highest CPU frequency seen */
45 static u64		turbo_frequency;
46 
47 static u64		first_time, last_time;
48 
49 static bool		power_only;
50 
51 
52 struct per_pid;
53 struct per_pidcomm;
54 
55 struct cpu_sample;
56 struct power_event;
57 struct wake_event;
58 
59 struct sample_wrapper;
60 
61 /*
62  * Datastructure layout:
63  * We keep an list of "pid"s, matching the kernels notion of a task struct.
64  * Each "pid" entry, has a list of "comm"s.
65  *	this is because we want to track different programs different, while
66  *	exec will reuse the original pid (by design).
67  * Each comm has a list of samples that will be used to draw
68  * final graph.
69  */
70 
71 struct per_pid {
72 	struct per_pid *next;
73 
74 	int		pid;
75 	int		ppid;
76 
77 	u64		start_time;
78 	u64		end_time;
79 	u64		total_time;
80 	int		display;
81 
82 	struct per_pidcomm *all;
83 	struct per_pidcomm *current;
84 };
85 
86 
87 struct per_pidcomm {
88 	struct per_pidcomm *next;
89 
90 	u64		start_time;
91 	u64		end_time;
92 	u64		total_time;
93 
94 	int		Y;
95 	int		display;
96 
97 	long		state;
98 	u64		state_since;
99 
100 	char		*comm;
101 
102 	struct cpu_sample *samples;
103 };
104 
105 struct sample_wrapper {
106 	struct sample_wrapper *next;
107 
108 	u64		timestamp;
109 	unsigned char	data[0];
110 };
111 
112 #define TYPE_NONE	0
113 #define TYPE_RUNNING	1
114 #define TYPE_WAITING	2
115 #define TYPE_BLOCKED	3
116 
117 struct cpu_sample {
118 	struct cpu_sample *next;
119 
120 	u64 start_time;
121 	u64 end_time;
122 	int type;
123 	int cpu;
124 };
125 
126 static struct per_pid *all_data;
127 
128 #define CSTATE 1
129 #define PSTATE 2
130 
131 struct power_event {
132 	struct power_event *next;
133 	int type;
134 	int state;
135 	u64 start_time;
136 	u64 end_time;
137 	int cpu;
138 };
139 
140 struct wake_event {
141 	struct wake_event *next;
142 	int waker;
143 	int wakee;
144 	u64 time;
145 };
146 
147 static struct power_event    *power_events;
148 static struct wake_event     *wake_events;
149 
150 struct process_filter;
151 struct process_filter {
152 	char			*name;
153 	int			pid;
154 	struct process_filter	*next;
155 };
156 
157 static struct process_filter *process_filter;
158 
159 
160 static struct per_pid *find_create_pid(int pid)
161 {
162 	struct per_pid *cursor = all_data;
163 
164 	while (cursor) {
165 		if (cursor->pid == pid)
166 			return cursor;
167 		cursor = cursor->next;
168 	}
169 	cursor = malloc(sizeof(struct per_pid));
170 	assert(cursor != NULL);
171 	memset(cursor, 0, sizeof(struct per_pid));
172 	cursor->pid = pid;
173 	cursor->next = all_data;
174 	all_data = cursor;
175 	return cursor;
176 }
177 
178 static void pid_set_comm(int pid, char *comm)
179 {
180 	struct per_pid *p;
181 	struct per_pidcomm *c;
182 	p = find_create_pid(pid);
183 	c = p->all;
184 	while (c) {
185 		if (c->comm && strcmp(c->comm, comm) == 0) {
186 			p->current = c;
187 			return;
188 		}
189 		if (!c->comm) {
190 			c->comm = strdup(comm);
191 			p->current = c;
192 			return;
193 		}
194 		c = c->next;
195 	}
196 	c = malloc(sizeof(struct per_pidcomm));
197 	assert(c != NULL);
198 	memset(c, 0, sizeof(struct per_pidcomm));
199 	c->comm = strdup(comm);
200 	p->current = c;
201 	c->next = p->all;
202 	p->all = c;
203 }
204 
205 static void pid_fork(int pid, int ppid, u64 timestamp)
206 {
207 	struct per_pid *p, *pp;
208 	p = find_create_pid(pid);
209 	pp = find_create_pid(ppid);
210 	p->ppid = ppid;
211 	if (pp->current && pp->current->comm && !p->current)
212 		pid_set_comm(pid, pp->current->comm);
213 
214 	p->start_time = timestamp;
215 	if (p->current) {
216 		p->current->start_time = timestamp;
217 		p->current->state_since = timestamp;
218 	}
219 }
220 
221 static void pid_exit(int pid, u64 timestamp)
222 {
223 	struct per_pid *p;
224 	p = find_create_pid(pid);
225 	p->end_time = timestamp;
226 	if (p->current)
227 		p->current->end_time = timestamp;
228 }
229 
230 static void
231 pid_put_sample(int pid, int type, unsigned int cpu, u64 start, u64 end)
232 {
233 	struct per_pid *p;
234 	struct per_pidcomm *c;
235 	struct cpu_sample *sample;
236 
237 	p = find_create_pid(pid);
238 	c = p->current;
239 	if (!c) {
240 		c = malloc(sizeof(struct per_pidcomm));
241 		assert(c != NULL);
242 		memset(c, 0, sizeof(struct per_pidcomm));
243 		p->current = c;
244 		c->next = p->all;
245 		p->all = c;
246 	}
247 
248 	sample = malloc(sizeof(struct cpu_sample));
249 	assert(sample != NULL);
250 	memset(sample, 0, sizeof(struct cpu_sample));
251 	sample->start_time = start;
252 	sample->end_time = end;
253 	sample->type = type;
254 	sample->next = c->samples;
255 	sample->cpu = cpu;
256 	c->samples = sample;
257 
258 	if (sample->type == TYPE_RUNNING && end > start && start > 0) {
259 		c->total_time += (end-start);
260 		p->total_time += (end-start);
261 	}
262 
263 	if (c->start_time == 0 || c->start_time > start)
264 		c->start_time = start;
265 	if (p->start_time == 0 || p->start_time > start)
266 		p->start_time = start;
267 }
268 
269 #define MAX_CPUS 4096
270 
271 static u64 cpus_cstate_start_times[MAX_CPUS];
272 static int cpus_cstate_state[MAX_CPUS];
273 static u64 cpus_pstate_start_times[MAX_CPUS];
274 static u64 cpus_pstate_state[MAX_CPUS];
275 
276 static int process_comm_event(union perf_event *event,
277 			      struct perf_sample *sample __used,
278 			      struct perf_session *session __used)
279 {
280 	pid_set_comm(event->comm.tid, event->comm.comm);
281 	return 0;
282 }
283 
284 static int process_fork_event(union perf_event *event,
285 			      struct perf_sample *sample __used,
286 			      struct perf_session *session __used)
287 {
288 	pid_fork(event->fork.pid, event->fork.ppid, event->fork.time);
289 	return 0;
290 }
291 
292 static int process_exit_event(union perf_event *event,
293 			      struct perf_sample *sample __used,
294 			      struct perf_session *session __used)
295 {
296 	pid_exit(event->fork.pid, event->fork.time);
297 	return 0;
298 }
299 
300 struct trace_entry {
301 	unsigned short		type;
302 	unsigned char		flags;
303 	unsigned char		preempt_count;
304 	int			pid;
305 	int			lock_depth;
306 };
307 
308 #ifdef SUPPORT_OLD_POWER_EVENTS
309 static int use_old_power_events;
310 struct power_entry_old {
311 	struct trace_entry te;
312 	u64	type;
313 	u64	value;
314 	u64	cpu_id;
315 };
316 #endif
317 
318 struct power_processor_entry {
319 	struct trace_entry te;
320 	u32	state;
321 	u32	cpu_id;
322 };
323 
324 #define TASK_COMM_LEN 16
325 struct wakeup_entry {
326 	struct trace_entry te;
327 	char comm[TASK_COMM_LEN];
328 	int   pid;
329 	int   prio;
330 	int   success;
331 };
332 
333 /*
334  * trace_flag_type is an enumeration that holds different
335  * states when a trace occurs. These are:
336  *  IRQS_OFF            - interrupts were disabled
337  *  IRQS_NOSUPPORT      - arch does not support irqs_disabled_flags
338  *  NEED_RESCED         - reschedule is requested
339  *  HARDIRQ             - inside an interrupt handler
340  *  SOFTIRQ             - inside a softirq handler
341  */
342 enum trace_flag_type {
343 	TRACE_FLAG_IRQS_OFF		= 0x01,
344 	TRACE_FLAG_IRQS_NOSUPPORT	= 0x02,
345 	TRACE_FLAG_NEED_RESCHED		= 0x04,
346 	TRACE_FLAG_HARDIRQ		= 0x08,
347 	TRACE_FLAG_SOFTIRQ		= 0x10,
348 };
349 
350 
351 
352 struct sched_switch {
353 	struct trace_entry te;
354 	char prev_comm[TASK_COMM_LEN];
355 	int  prev_pid;
356 	int  prev_prio;
357 	long prev_state; /* Arjan weeps. */
358 	char next_comm[TASK_COMM_LEN];
359 	int  next_pid;
360 	int  next_prio;
361 };
362 
363 static void c_state_start(int cpu, u64 timestamp, int state)
364 {
365 	cpus_cstate_start_times[cpu] = timestamp;
366 	cpus_cstate_state[cpu] = state;
367 }
368 
369 static void c_state_end(int cpu, u64 timestamp)
370 {
371 	struct power_event *pwr;
372 	pwr = malloc(sizeof(struct power_event));
373 	if (!pwr)
374 		return;
375 	memset(pwr, 0, sizeof(struct power_event));
376 
377 	pwr->state = cpus_cstate_state[cpu];
378 	pwr->start_time = cpus_cstate_start_times[cpu];
379 	pwr->end_time = timestamp;
380 	pwr->cpu = cpu;
381 	pwr->type = CSTATE;
382 	pwr->next = power_events;
383 
384 	power_events = pwr;
385 }
386 
387 static void p_state_change(int cpu, u64 timestamp, u64 new_freq)
388 {
389 	struct power_event *pwr;
390 	pwr = malloc(sizeof(struct power_event));
391 
392 	if (new_freq > 8000000) /* detect invalid data */
393 		return;
394 
395 	if (!pwr)
396 		return;
397 	memset(pwr, 0, sizeof(struct power_event));
398 
399 	pwr->state = cpus_pstate_state[cpu];
400 	pwr->start_time = cpus_pstate_start_times[cpu];
401 	pwr->end_time = timestamp;
402 	pwr->cpu = cpu;
403 	pwr->type = PSTATE;
404 	pwr->next = power_events;
405 
406 	if (!pwr->start_time)
407 		pwr->start_time = first_time;
408 
409 	power_events = pwr;
410 
411 	cpus_pstate_state[cpu] = new_freq;
412 	cpus_pstate_start_times[cpu] = timestamp;
413 
414 	if ((u64)new_freq > max_freq)
415 		max_freq = new_freq;
416 
417 	if (new_freq < min_freq || min_freq == 0)
418 		min_freq = new_freq;
419 
420 	if (new_freq == max_freq - 1000)
421 			turbo_frequency = max_freq;
422 }
423 
424 static void
425 sched_wakeup(int cpu, u64 timestamp, int pid, struct trace_entry *te)
426 {
427 	struct wake_event *we;
428 	struct per_pid *p;
429 	struct wakeup_entry *wake = (void *)te;
430 
431 	we = malloc(sizeof(struct wake_event));
432 	if (!we)
433 		return;
434 
435 	memset(we, 0, sizeof(struct wake_event));
436 	we->time = timestamp;
437 	we->waker = pid;
438 
439 	if ((te->flags & TRACE_FLAG_HARDIRQ) || (te->flags & TRACE_FLAG_SOFTIRQ))
440 		we->waker = -1;
441 
442 	we->wakee = wake->pid;
443 	we->next = wake_events;
444 	wake_events = we;
445 	p = find_create_pid(we->wakee);
446 
447 	if (p && p->current && p->current->state == TYPE_NONE) {
448 		p->current->state_since = timestamp;
449 		p->current->state = TYPE_WAITING;
450 	}
451 	if (p && p->current && p->current->state == TYPE_BLOCKED) {
452 		pid_put_sample(p->pid, p->current->state, cpu, p->current->state_since, timestamp);
453 		p->current->state_since = timestamp;
454 		p->current->state = TYPE_WAITING;
455 	}
456 }
457 
458 static void sched_switch(int cpu, u64 timestamp, struct trace_entry *te)
459 {
460 	struct per_pid *p = NULL, *prev_p;
461 	struct sched_switch *sw = (void *)te;
462 
463 
464 	prev_p = find_create_pid(sw->prev_pid);
465 
466 	p = find_create_pid(sw->next_pid);
467 
468 	if (prev_p->current && prev_p->current->state != TYPE_NONE)
469 		pid_put_sample(sw->prev_pid, TYPE_RUNNING, cpu, prev_p->current->state_since, timestamp);
470 	if (p && p->current) {
471 		if (p->current->state != TYPE_NONE)
472 			pid_put_sample(sw->next_pid, p->current->state, cpu, p->current->state_since, timestamp);
473 
474 		p->current->state_since = timestamp;
475 		p->current->state = TYPE_RUNNING;
476 	}
477 
478 	if (prev_p->current) {
479 		prev_p->current->state = TYPE_NONE;
480 		prev_p->current->state_since = timestamp;
481 		if (sw->prev_state & 2)
482 			prev_p->current->state = TYPE_BLOCKED;
483 		if (sw->prev_state == 0)
484 			prev_p->current->state = TYPE_WAITING;
485 	}
486 }
487 
488 
489 static int process_sample_event(union perf_event *event __used,
490 				struct perf_sample *sample,
491 				struct perf_session *session)
492 {
493 	struct trace_entry *te;
494 
495 	if (session->sample_type & PERF_SAMPLE_TIME) {
496 		if (!first_time || first_time > sample->time)
497 			first_time = sample->time;
498 		if (last_time < sample->time)
499 			last_time = sample->time;
500 	}
501 
502 	te = (void *)sample->raw_data;
503 	if (session->sample_type & PERF_SAMPLE_RAW && sample->raw_size > 0) {
504 		char *event_str;
505 #ifdef SUPPORT_OLD_POWER_EVENTS
506 		struct power_entry_old *peo;
507 		peo = (void *)te;
508 #endif
509 		event_str = perf_header__find_event(te->type);
510 
511 		if (!event_str)
512 			return 0;
513 
514 		if (sample->cpu > numcpus)
515 			numcpus = sample->cpu;
516 
517 		if (strcmp(event_str, "power:cpu_idle") == 0) {
518 			struct power_processor_entry *ppe = (void *)te;
519 			if (ppe->state == (u32)PWR_EVENT_EXIT)
520 				c_state_end(ppe->cpu_id, sample->time);
521 			else
522 				c_state_start(ppe->cpu_id, sample->time,
523 					      ppe->state);
524 		}
525 		else if (strcmp(event_str, "power:cpu_frequency") == 0) {
526 			struct power_processor_entry *ppe = (void *)te;
527 			p_state_change(ppe->cpu_id, sample->time, ppe->state);
528 		}
529 
530 		else if (strcmp(event_str, "sched:sched_wakeup") == 0)
531 			sched_wakeup(sample->cpu, sample->time, sample->pid, te);
532 
533 		else if (strcmp(event_str, "sched:sched_switch") == 0)
534 			sched_switch(sample->cpu, sample->time, te);
535 
536 #ifdef SUPPORT_OLD_POWER_EVENTS
537 		if (use_old_power_events) {
538 			if (strcmp(event_str, "power:power_start") == 0)
539 				c_state_start(peo->cpu_id, sample->time,
540 					      peo->value);
541 
542 			else if (strcmp(event_str, "power:power_end") == 0)
543 				c_state_end(sample->cpu, sample->time);
544 
545 			else if (strcmp(event_str,
546 					"power:power_frequency") == 0)
547 				p_state_change(peo->cpu_id, sample->time,
548 					       peo->value);
549 		}
550 #endif
551 	}
552 	return 0;
553 }
554 
555 /*
556  * After the last sample we need to wrap up the current C/P state
557  * and close out each CPU for these.
558  */
559 static void end_sample_processing(void)
560 {
561 	u64 cpu;
562 	struct power_event *pwr;
563 
564 	for (cpu = 0; cpu <= numcpus; cpu++) {
565 		pwr = malloc(sizeof(struct power_event));
566 		if (!pwr)
567 			return;
568 		memset(pwr, 0, sizeof(struct power_event));
569 
570 		/* C state */
571 #if 0
572 		pwr->state = cpus_cstate_state[cpu];
573 		pwr->start_time = cpus_cstate_start_times[cpu];
574 		pwr->end_time = last_time;
575 		pwr->cpu = cpu;
576 		pwr->type = CSTATE;
577 		pwr->next = power_events;
578 
579 		power_events = pwr;
580 #endif
581 		/* P state */
582 
583 		pwr = malloc(sizeof(struct power_event));
584 		if (!pwr)
585 			return;
586 		memset(pwr, 0, sizeof(struct power_event));
587 
588 		pwr->state = cpus_pstate_state[cpu];
589 		pwr->start_time = cpus_pstate_start_times[cpu];
590 		pwr->end_time = last_time;
591 		pwr->cpu = cpu;
592 		pwr->type = PSTATE;
593 		pwr->next = power_events;
594 
595 		if (!pwr->start_time)
596 			pwr->start_time = first_time;
597 		if (!pwr->state)
598 			pwr->state = min_freq;
599 		power_events = pwr;
600 	}
601 }
602 
603 /*
604  * Sort the pid datastructure
605  */
606 static void sort_pids(void)
607 {
608 	struct per_pid *new_list, *p, *cursor, *prev;
609 	/* sort by ppid first, then by pid, lowest to highest */
610 
611 	new_list = NULL;
612 
613 	while (all_data) {
614 		p = all_data;
615 		all_data = p->next;
616 		p->next = NULL;
617 
618 		if (new_list == NULL) {
619 			new_list = p;
620 			p->next = NULL;
621 			continue;
622 		}
623 		prev = NULL;
624 		cursor = new_list;
625 		while (cursor) {
626 			if (cursor->ppid > p->ppid ||
627 				(cursor->ppid == p->ppid && cursor->pid > p->pid)) {
628 				/* must insert before */
629 				if (prev) {
630 					p->next = prev->next;
631 					prev->next = p;
632 					cursor = NULL;
633 					continue;
634 				} else {
635 					p->next = new_list;
636 					new_list = p;
637 					cursor = NULL;
638 					continue;
639 				}
640 			}
641 
642 			prev = cursor;
643 			cursor = cursor->next;
644 			if (!cursor)
645 				prev->next = p;
646 		}
647 	}
648 	all_data = new_list;
649 }
650 
651 
652 static void draw_c_p_states(void)
653 {
654 	struct power_event *pwr;
655 	pwr = power_events;
656 
657 	/*
658 	 * two pass drawing so that the P state bars are on top of the C state blocks
659 	 */
660 	while (pwr) {
661 		if (pwr->type == CSTATE)
662 			svg_cstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
663 		pwr = pwr->next;
664 	}
665 
666 	pwr = power_events;
667 	while (pwr) {
668 		if (pwr->type == PSTATE) {
669 			if (!pwr->state)
670 				pwr->state = min_freq;
671 			svg_pstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
672 		}
673 		pwr = pwr->next;
674 	}
675 }
676 
677 static void draw_wakeups(void)
678 {
679 	struct wake_event *we;
680 	struct per_pid *p;
681 	struct per_pidcomm *c;
682 
683 	we = wake_events;
684 	while (we) {
685 		int from = 0, to = 0;
686 		char *task_from = NULL, *task_to = NULL;
687 
688 		/* locate the column of the waker and wakee */
689 		p = all_data;
690 		while (p) {
691 			if (p->pid == we->waker || p->pid == we->wakee) {
692 				c = p->all;
693 				while (c) {
694 					if (c->Y && c->start_time <= we->time && c->end_time >= we->time) {
695 						if (p->pid == we->waker && !from) {
696 							from = c->Y;
697 							task_from = strdup(c->comm);
698 						}
699 						if (p->pid == we->wakee && !to) {
700 							to = c->Y;
701 							task_to = strdup(c->comm);
702 						}
703 					}
704 					c = c->next;
705 				}
706 				c = p->all;
707 				while (c) {
708 					if (p->pid == we->waker && !from) {
709 						from = c->Y;
710 						task_from = strdup(c->comm);
711 					}
712 					if (p->pid == we->wakee && !to) {
713 						to = c->Y;
714 						task_to = strdup(c->comm);
715 					}
716 					c = c->next;
717 				}
718 			}
719 			p = p->next;
720 		}
721 
722 		if (!task_from) {
723 			task_from = malloc(40);
724 			sprintf(task_from, "[%i]", we->waker);
725 		}
726 		if (!task_to) {
727 			task_to = malloc(40);
728 			sprintf(task_to, "[%i]", we->wakee);
729 		}
730 
731 		if (we->waker == -1)
732 			svg_interrupt(we->time, to);
733 		else if (from && to && abs(from - to) == 1)
734 			svg_wakeline(we->time, from, to);
735 		else
736 			svg_partial_wakeline(we->time, from, task_from, to, task_to);
737 		we = we->next;
738 
739 		free(task_from);
740 		free(task_to);
741 	}
742 }
743 
744 static void draw_cpu_usage(void)
745 {
746 	struct per_pid *p;
747 	struct per_pidcomm *c;
748 	struct cpu_sample *sample;
749 	p = all_data;
750 	while (p) {
751 		c = p->all;
752 		while (c) {
753 			sample = c->samples;
754 			while (sample) {
755 				if (sample->type == TYPE_RUNNING)
756 					svg_process(sample->cpu, sample->start_time, sample->end_time, "sample", c->comm);
757 
758 				sample = sample->next;
759 			}
760 			c = c->next;
761 		}
762 		p = p->next;
763 	}
764 }
765 
766 static void draw_process_bars(void)
767 {
768 	struct per_pid *p;
769 	struct per_pidcomm *c;
770 	struct cpu_sample *sample;
771 	int Y = 0;
772 
773 	Y = 2 * numcpus + 2;
774 
775 	p = all_data;
776 	while (p) {
777 		c = p->all;
778 		while (c) {
779 			if (!c->display) {
780 				c->Y = 0;
781 				c = c->next;
782 				continue;
783 			}
784 
785 			svg_box(Y, c->start_time, c->end_time, "process");
786 			sample = c->samples;
787 			while (sample) {
788 				if (sample->type == TYPE_RUNNING)
789 					svg_sample(Y, sample->cpu, sample->start_time, sample->end_time);
790 				if (sample->type == TYPE_BLOCKED)
791 					svg_box(Y, sample->start_time, sample->end_time, "blocked");
792 				if (sample->type == TYPE_WAITING)
793 					svg_waiting(Y, sample->start_time, sample->end_time);
794 				sample = sample->next;
795 			}
796 
797 			if (c->comm) {
798 				char comm[256];
799 				if (c->total_time > 5000000000) /* 5 seconds */
800 					sprintf(comm, "%s:%i (%2.2fs)", c->comm, p->pid, c->total_time / 1000000000.0);
801 				else
802 					sprintf(comm, "%s:%i (%3.1fms)", c->comm, p->pid, c->total_time / 1000000.0);
803 
804 				svg_text(Y, c->start_time, comm);
805 			}
806 			c->Y = Y;
807 			Y++;
808 			c = c->next;
809 		}
810 		p = p->next;
811 	}
812 }
813 
814 static void add_process_filter(const char *string)
815 {
816 	struct process_filter *filt;
817 	int pid;
818 
819 	pid = strtoull(string, NULL, 10);
820 	filt = malloc(sizeof(struct process_filter));
821 	if (!filt)
822 		return;
823 
824 	filt->name = strdup(string);
825 	filt->pid  = pid;
826 	filt->next = process_filter;
827 
828 	process_filter = filt;
829 }
830 
831 static int passes_filter(struct per_pid *p, struct per_pidcomm *c)
832 {
833 	struct process_filter *filt;
834 	if (!process_filter)
835 		return 1;
836 
837 	filt = process_filter;
838 	while (filt) {
839 		if (filt->pid && p->pid == filt->pid)
840 			return 1;
841 		if (strcmp(filt->name, c->comm) == 0)
842 			return 1;
843 		filt = filt->next;
844 	}
845 	return 0;
846 }
847 
848 static int determine_display_tasks_filtered(void)
849 {
850 	struct per_pid *p;
851 	struct per_pidcomm *c;
852 	int count = 0;
853 
854 	p = all_data;
855 	while (p) {
856 		p->display = 0;
857 		if (p->start_time == 1)
858 			p->start_time = first_time;
859 
860 		/* no exit marker, task kept running to the end */
861 		if (p->end_time == 0)
862 			p->end_time = last_time;
863 
864 		c = p->all;
865 
866 		while (c) {
867 			c->display = 0;
868 
869 			if (c->start_time == 1)
870 				c->start_time = first_time;
871 
872 			if (passes_filter(p, c)) {
873 				c->display = 1;
874 				p->display = 1;
875 				count++;
876 			}
877 
878 			if (c->end_time == 0)
879 				c->end_time = last_time;
880 
881 			c = c->next;
882 		}
883 		p = p->next;
884 	}
885 	return count;
886 }
887 
888 static int determine_display_tasks(u64 threshold)
889 {
890 	struct per_pid *p;
891 	struct per_pidcomm *c;
892 	int count = 0;
893 
894 	if (process_filter)
895 		return determine_display_tasks_filtered();
896 
897 	p = all_data;
898 	while (p) {
899 		p->display = 0;
900 		if (p->start_time == 1)
901 			p->start_time = first_time;
902 
903 		/* no exit marker, task kept running to the end */
904 		if (p->end_time == 0)
905 			p->end_time = last_time;
906 		if (p->total_time >= threshold && !power_only)
907 			p->display = 1;
908 
909 		c = p->all;
910 
911 		while (c) {
912 			c->display = 0;
913 
914 			if (c->start_time == 1)
915 				c->start_time = first_time;
916 
917 			if (c->total_time >= threshold && !power_only) {
918 				c->display = 1;
919 				count++;
920 			}
921 
922 			if (c->end_time == 0)
923 				c->end_time = last_time;
924 
925 			c = c->next;
926 		}
927 		p = p->next;
928 	}
929 	return count;
930 }
931 
932 
933 
934 #define TIME_THRESH 10000000
935 
936 static void write_svg_file(const char *filename)
937 {
938 	u64 i;
939 	int count;
940 
941 	numcpus++;
942 
943 
944 	count = determine_display_tasks(TIME_THRESH);
945 
946 	/* We'd like to show at least 15 tasks; be less picky if we have fewer */
947 	if (count < 15)
948 		count = determine_display_tasks(TIME_THRESH / 10);
949 
950 	open_svg(filename, numcpus, count, first_time, last_time);
951 
952 	svg_time_grid();
953 	svg_legenda();
954 
955 	for (i = 0; i < numcpus; i++)
956 		svg_cpu_box(i, max_freq, turbo_frequency);
957 
958 	draw_cpu_usage();
959 	draw_process_bars();
960 	draw_c_p_states();
961 	draw_wakeups();
962 
963 	svg_close();
964 }
965 
966 static struct perf_event_ops event_ops = {
967 	.comm			= process_comm_event,
968 	.fork			= process_fork_event,
969 	.exit			= process_exit_event,
970 	.sample			= process_sample_event,
971 	.ordered_samples	= true,
972 };
973 
974 static int __cmd_timechart(void)
975 {
976 	struct perf_session *session = perf_session__new(input_name, O_RDONLY,
977 							 0, false, &event_ops);
978 	int ret = -EINVAL;
979 
980 	if (session == NULL)
981 		return -ENOMEM;
982 
983 	if (!perf_session__has_traces(session, "timechart record"))
984 		goto out_delete;
985 
986 	ret = perf_session__process_events(session, &event_ops);
987 	if (ret)
988 		goto out_delete;
989 
990 	end_sample_processing();
991 
992 	sort_pids();
993 
994 	write_svg_file(output_name);
995 
996 	pr_info("Written %2.1f seconds of trace to %s.\n",
997 		(last_time - first_time) / 1000000000.0, output_name);
998 out_delete:
999 	perf_session__delete(session);
1000 	return ret;
1001 }
1002 
1003 static const char * const timechart_usage[] = {
1004 	"perf timechart [<options>] {record}",
1005 	NULL
1006 };
1007 
1008 #ifdef SUPPORT_OLD_POWER_EVENTS
1009 static const char * const record_old_args[] = {
1010 	"record",
1011 	"-a",
1012 	"-R",
1013 	"-f",
1014 	"-c", "1",
1015 	"-e", "power:power_start",
1016 	"-e", "power:power_end",
1017 	"-e", "power:power_frequency",
1018 	"-e", "sched:sched_wakeup",
1019 	"-e", "sched:sched_switch",
1020 };
1021 #endif
1022 
1023 static const char * const record_new_args[] = {
1024 	"record",
1025 	"-a",
1026 	"-R",
1027 	"-f",
1028 	"-c", "1",
1029 	"-e", "power:cpu_frequency",
1030 	"-e", "power:cpu_idle",
1031 	"-e", "sched:sched_wakeup",
1032 	"-e", "sched:sched_switch",
1033 };
1034 
1035 static int __cmd_record(int argc, const char **argv)
1036 {
1037 	unsigned int rec_argc, i, j;
1038 	const char **rec_argv;
1039 	const char * const *record_args = record_new_args;
1040 	unsigned int record_elems = ARRAY_SIZE(record_new_args);
1041 
1042 #ifdef SUPPORT_OLD_POWER_EVENTS
1043 	if (!is_valid_tracepoint("power:cpu_idle") &&
1044 	    is_valid_tracepoint("power:power_start")) {
1045 		use_old_power_events = 1;
1046 		record_args = record_old_args;
1047 		record_elems = ARRAY_SIZE(record_old_args);
1048 	}
1049 #endif
1050 
1051 	rec_argc = record_elems + argc - 1;
1052 	rec_argv = calloc(rec_argc + 1, sizeof(char *));
1053 
1054 	if (rec_argv == NULL)
1055 		return -ENOMEM;
1056 
1057 	for (i = 0; i < record_elems; i++)
1058 		rec_argv[i] = strdup(record_args[i]);
1059 
1060 	for (j = 1; j < (unsigned int)argc; j++, i++)
1061 		rec_argv[i] = argv[j];
1062 
1063 	return cmd_record(i, rec_argv, NULL);
1064 }
1065 
1066 static int
1067 parse_process(const struct option *opt __used, const char *arg, int __used unset)
1068 {
1069 	if (arg)
1070 		add_process_filter(arg);
1071 	return 0;
1072 }
1073 
1074 static const struct option options[] = {
1075 	OPT_STRING('i', "input", &input_name, "file",
1076 		    "input file name"),
1077 	OPT_STRING('o', "output", &output_name, "file",
1078 		    "output file name"),
1079 	OPT_INTEGER('w', "width", &svg_page_width,
1080 		    "page width"),
1081 	OPT_BOOLEAN('P', "power-only", &power_only,
1082 		    "output power data only"),
1083 	OPT_CALLBACK('p', "process", NULL, "process",
1084 		      "process selector. Pass a pid or process name.",
1085 		       parse_process),
1086 	OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
1087 		    "Look for files with symbols relative to this directory"),
1088 	OPT_END()
1089 };
1090 
1091 
1092 int cmd_timechart(int argc, const char **argv, const char *prefix __used)
1093 {
1094 	argc = parse_options(argc, argv, options, timechart_usage,
1095 			PARSE_OPT_STOP_AT_NON_OPTION);
1096 
1097 	symbol__init();
1098 
1099 	if (argc && !strncmp(argv[0], "rec", 3))
1100 		return __cmd_record(argc, argv);
1101 	else if (argc)
1102 		usage_with_options(timechart_usage, options);
1103 
1104 	setup_pager();
1105 
1106 	return __cmd_timechart();
1107 }
1108