1 /*
2  * builtin-timechart.c - make an svg timechart of system activity
3  *
4  * (C) Copyright 2009 Intel Corporation
5  *
6  * Authors:
7  *     Arjan van de Ven <arjan@linux.intel.com>
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; version 2
12  * of the License.
13  */
14 
15 #include "builtin.h"
16 
17 #include "util/util.h"
18 
19 #include "util/color.h"
20 #include <linux/list.h>
21 #include "util/cache.h"
22 #include <linux/rbtree.h>
23 #include "util/symbol.h"
24 #include "util/callchain.h"
25 #include "util/strlist.h"
26 
27 #include "perf.h"
28 #include "util/header.h"
29 #include "util/parse-options.h"
30 #include "util/parse-events.h"
31 #include "util/event.h"
32 #include "util/session.h"
33 #include "util/svghelper.h"
34 
35 #define SUPPORT_OLD_POWER_EVENTS 1
36 #define PWR_EVENT_EXIT -1
37 
38 
39 static char		const *input_name = "perf.data";
40 static char		const *output_name = "output.svg";
41 
42 static unsigned int	numcpus;
43 static u64		min_freq;	/* Lowest CPU frequency seen */
44 static u64		max_freq;	/* Highest CPU frequency seen */
45 static u64		turbo_frequency;
46 
47 static u64		first_time, last_time;
48 
49 static bool		power_only;
50 
51 
52 struct per_pid;
53 struct per_pidcomm;
54 
55 struct cpu_sample;
56 struct power_event;
57 struct wake_event;
58 
59 struct sample_wrapper;
60 
61 /*
62  * Datastructure layout:
63  * We keep an list of "pid"s, matching the kernels notion of a task struct.
64  * Each "pid" entry, has a list of "comm"s.
65  *	this is because we want to track different programs different, while
66  *	exec will reuse the original pid (by design).
67  * Each comm has a list of samples that will be used to draw
68  * final graph.
69  */
70 
71 struct per_pid {
72 	struct per_pid *next;
73 
74 	int		pid;
75 	int		ppid;
76 
77 	u64		start_time;
78 	u64		end_time;
79 	u64		total_time;
80 	int		display;
81 
82 	struct per_pidcomm *all;
83 	struct per_pidcomm *current;
84 };
85 
86 
87 struct per_pidcomm {
88 	struct per_pidcomm *next;
89 
90 	u64		start_time;
91 	u64		end_time;
92 	u64		total_time;
93 
94 	int		Y;
95 	int		display;
96 
97 	long		state;
98 	u64		state_since;
99 
100 	char		*comm;
101 
102 	struct cpu_sample *samples;
103 };
104 
105 struct sample_wrapper {
106 	struct sample_wrapper *next;
107 
108 	u64		timestamp;
109 	unsigned char	data[0];
110 };
111 
112 #define TYPE_NONE	0
113 #define TYPE_RUNNING	1
114 #define TYPE_WAITING	2
115 #define TYPE_BLOCKED	3
116 
117 struct cpu_sample {
118 	struct cpu_sample *next;
119 
120 	u64 start_time;
121 	u64 end_time;
122 	int type;
123 	int cpu;
124 };
125 
126 static struct per_pid *all_data;
127 
128 #define CSTATE 1
129 #define PSTATE 2
130 
131 struct power_event {
132 	struct power_event *next;
133 	int type;
134 	int state;
135 	u64 start_time;
136 	u64 end_time;
137 	int cpu;
138 };
139 
140 struct wake_event {
141 	struct wake_event *next;
142 	int waker;
143 	int wakee;
144 	u64 time;
145 };
146 
147 static struct power_event    *power_events;
148 static struct wake_event     *wake_events;
149 
150 struct process_filter;
151 struct process_filter {
152 	char			*name;
153 	int			pid;
154 	struct process_filter	*next;
155 };
156 
157 static struct process_filter *process_filter;
158 
159 
160 static struct per_pid *find_create_pid(int pid)
161 {
162 	struct per_pid *cursor = all_data;
163 
164 	while (cursor) {
165 		if (cursor->pid == pid)
166 			return cursor;
167 		cursor = cursor->next;
168 	}
169 	cursor = malloc(sizeof(struct per_pid));
170 	assert(cursor != NULL);
171 	memset(cursor, 0, sizeof(struct per_pid));
172 	cursor->pid = pid;
173 	cursor->next = all_data;
174 	all_data = cursor;
175 	return cursor;
176 }
177 
178 static void pid_set_comm(int pid, char *comm)
179 {
180 	struct per_pid *p;
181 	struct per_pidcomm *c;
182 	p = find_create_pid(pid);
183 	c = p->all;
184 	while (c) {
185 		if (c->comm && strcmp(c->comm, comm) == 0) {
186 			p->current = c;
187 			return;
188 		}
189 		if (!c->comm) {
190 			c->comm = strdup(comm);
191 			p->current = c;
192 			return;
193 		}
194 		c = c->next;
195 	}
196 	c = malloc(sizeof(struct per_pidcomm));
197 	assert(c != NULL);
198 	memset(c, 0, sizeof(struct per_pidcomm));
199 	c->comm = strdup(comm);
200 	p->current = c;
201 	c->next = p->all;
202 	p->all = c;
203 }
204 
205 static void pid_fork(int pid, int ppid, u64 timestamp)
206 {
207 	struct per_pid *p, *pp;
208 	p = find_create_pid(pid);
209 	pp = find_create_pid(ppid);
210 	p->ppid = ppid;
211 	if (pp->current && pp->current->comm && !p->current)
212 		pid_set_comm(pid, pp->current->comm);
213 
214 	p->start_time = timestamp;
215 	if (p->current) {
216 		p->current->start_time = timestamp;
217 		p->current->state_since = timestamp;
218 	}
219 }
220 
221 static void pid_exit(int pid, u64 timestamp)
222 {
223 	struct per_pid *p;
224 	p = find_create_pid(pid);
225 	p->end_time = timestamp;
226 	if (p->current)
227 		p->current->end_time = timestamp;
228 }
229 
230 static void
231 pid_put_sample(int pid, int type, unsigned int cpu, u64 start, u64 end)
232 {
233 	struct per_pid *p;
234 	struct per_pidcomm *c;
235 	struct cpu_sample *sample;
236 
237 	p = find_create_pid(pid);
238 	c = p->current;
239 	if (!c) {
240 		c = malloc(sizeof(struct per_pidcomm));
241 		assert(c != NULL);
242 		memset(c, 0, sizeof(struct per_pidcomm));
243 		p->current = c;
244 		c->next = p->all;
245 		p->all = c;
246 	}
247 
248 	sample = malloc(sizeof(struct cpu_sample));
249 	assert(sample != NULL);
250 	memset(sample, 0, sizeof(struct cpu_sample));
251 	sample->start_time = start;
252 	sample->end_time = end;
253 	sample->type = type;
254 	sample->next = c->samples;
255 	sample->cpu = cpu;
256 	c->samples = sample;
257 
258 	if (sample->type == TYPE_RUNNING && end > start && start > 0) {
259 		c->total_time += (end-start);
260 		p->total_time += (end-start);
261 	}
262 
263 	if (c->start_time == 0 || c->start_time > start)
264 		c->start_time = start;
265 	if (p->start_time == 0 || p->start_time > start)
266 		p->start_time = start;
267 }
268 
269 #define MAX_CPUS 4096
270 
271 static u64 cpus_cstate_start_times[MAX_CPUS];
272 static int cpus_cstate_state[MAX_CPUS];
273 static u64 cpus_pstate_start_times[MAX_CPUS];
274 static u64 cpus_pstate_state[MAX_CPUS];
275 
276 static int process_comm_event(union perf_event *event,
277 			      struct perf_sample *sample __used,
278 			      struct perf_session *session __used)
279 {
280 	pid_set_comm(event->comm.tid, event->comm.comm);
281 	return 0;
282 }
283 
284 static int process_fork_event(union perf_event *event,
285 			      struct perf_sample *sample __used,
286 			      struct perf_session *session __used)
287 {
288 	pid_fork(event->fork.pid, event->fork.ppid, event->fork.time);
289 	return 0;
290 }
291 
292 static int process_exit_event(union perf_event *event,
293 			      struct perf_sample *sample __used,
294 			      struct perf_session *session __used)
295 {
296 	pid_exit(event->fork.pid, event->fork.time);
297 	return 0;
298 }
299 
300 struct trace_entry {
301 	unsigned short		type;
302 	unsigned char		flags;
303 	unsigned char		preempt_count;
304 	int			pid;
305 	int			lock_depth;
306 };
307 
308 #ifdef SUPPORT_OLD_POWER_EVENTS
309 static int use_old_power_events;
310 struct power_entry_old {
311 	struct trace_entry te;
312 	u64	type;
313 	u64	value;
314 	u64	cpu_id;
315 };
316 #endif
317 
318 struct power_processor_entry {
319 	struct trace_entry te;
320 	u32	state;
321 	u32	cpu_id;
322 };
323 
324 #define TASK_COMM_LEN 16
325 struct wakeup_entry {
326 	struct trace_entry te;
327 	char comm[TASK_COMM_LEN];
328 	int   pid;
329 	int   prio;
330 	int   success;
331 };
332 
333 /*
334  * trace_flag_type is an enumeration that holds different
335  * states when a trace occurs. These are:
336  *  IRQS_OFF            - interrupts were disabled
337  *  IRQS_NOSUPPORT      - arch does not support irqs_disabled_flags
338  *  NEED_RESCED         - reschedule is requested
339  *  HARDIRQ             - inside an interrupt handler
340  *  SOFTIRQ             - inside a softirq handler
341  */
342 enum trace_flag_type {
343 	TRACE_FLAG_IRQS_OFF		= 0x01,
344 	TRACE_FLAG_IRQS_NOSUPPORT	= 0x02,
345 	TRACE_FLAG_NEED_RESCHED		= 0x04,
346 	TRACE_FLAG_HARDIRQ		= 0x08,
347 	TRACE_FLAG_SOFTIRQ		= 0x10,
348 };
349 
350 
351 
352 struct sched_switch {
353 	struct trace_entry te;
354 	char prev_comm[TASK_COMM_LEN];
355 	int  prev_pid;
356 	int  prev_prio;
357 	long prev_state; /* Arjan weeps. */
358 	char next_comm[TASK_COMM_LEN];
359 	int  next_pid;
360 	int  next_prio;
361 };
362 
363 static void c_state_start(int cpu, u64 timestamp, int state)
364 {
365 	cpus_cstate_start_times[cpu] = timestamp;
366 	cpus_cstate_state[cpu] = state;
367 }
368 
369 static void c_state_end(int cpu, u64 timestamp)
370 {
371 	struct power_event *pwr;
372 	pwr = malloc(sizeof(struct power_event));
373 	if (!pwr)
374 		return;
375 	memset(pwr, 0, sizeof(struct power_event));
376 
377 	pwr->state = cpus_cstate_state[cpu];
378 	pwr->start_time = cpus_cstate_start_times[cpu];
379 	pwr->end_time = timestamp;
380 	pwr->cpu = cpu;
381 	pwr->type = CSTATE;
382 	pwr->next = power_events;
383 
384 	power_events = pwr;
385 }
386 
387 static void p_state_change(int cpu, u64 timestamp, u64 new_freq)
388 {
389 	struct power_event *pwr;
390 	pwr = malloc(sizeof(struct power_event));
391 
392 	if (new_freq > 8000000) /* detect invalid data */
393 		return;
394 
395 	if (!pwr)
396 		return;
397 	memset(pwr, 0, sizeof(struct power_event));
398 
399 	pwr->state = cpus_pstate_state[cpu];
400 	pwr->start_time = cpus_pstate_start_times[cpu];
401 	pwr->end_time = timestamp;
402 	pwr->cpu = cpu;
403 	pwr->type = PSTATE;
404 	pwr->next = power_events;
405 
406 	if (!pwr->start_time)
407 		pwr->start_time = first_time;
408 
409 	power_events = pwr;
410 
411 	cpus_pstate_state[cpu] = new_freq;
412 	cpus_pstate_start_times[cpu] = timestamp;
413 
414 	if ((u64)new_freq > max_freq)
415 		max_freq = new_freq;
416 
417 	if (new_freq < min_freq || min_freq == 0)
418 		min_freq = new_freq;
419 
420 	if (new_freq == max_freq - 1000)
421 			turbo_frequency = max_freq;
422 }
423 
424 static void
425 sched_wakeup(int cpu, u64 timestamp, int pid, struct trace_entry *te)
426 {
427 	struct wake_event *we;
428 	struct per_pid *p;
429 	struct wakeup_entry *wake = (void *)te;
430 
431 	we = malloc(sizeof(struct wake_event));
432 	if (!we)
433 		return;
434 
435 	memset(we, 0, sizeof(struct wake_event));
436 	we->time = timestamp;
437 	we->waker = pid;
438 
439 	if ((te->flags & TRACE_FLAG_HARDIRQ) || (te->flags & TRACE_FLAG_SOFTIRQ))
440 		we->waker = -1;
441 
442 	we->wakee = wake->pid;
443 	we->next = wake_events;
444 	wake_events = we;
445 	p = find_create_pid(we->wakee);
446 
447 	if (p && p->current && p->current->state == TYPE_NONE) {
448 		p->current->state_since = timestamp;
449 		p->current->state = TYPE_WAITING;
450 	}
451 	if (p && p->current && p->current->state == TYPE_BLOCKED) {
452 		pid_put_sample(p->pid, p->current->state, cpu, p->current->state_since, timestamp);
453 		p->current->state_since = timestamp;
454 		p->current->state = TYPE_WAITING;
455 	}
456 }
457 
458 static void sched_switch(int cpu, u64 timestamp, struct trace_entry *te)
459 {
460 	struct per_pid *p = NULL, *prev_p;
461 	struct sched_switch *sw = (void *)te;
462 
463 
464 	prev_p = find_create_pid(sw->prev_pid);
465 
466 	p = find_create_pid(sw->next_pid);
467 
468 	if (prev_p->current && prev_p->current->state != TYPE_NONE)
469 		pid_put_sample(sw->prev_pid, TYPE_RUNNING, cpu, prev_p->current->state_since, timestamp);
470 	if (p && p->current) {
471 		if (p->current->state != TYPE_NONE)
472 			pid_put_sample(sw->next_pid, p->current->state, cpu, p->current->state_since, timestamp);
473 
474 		p->current->state_since = timestamp;
475 		p->current->state = TYPE_RUNNING;
476 	}
477 
478 	if (prev_p->current) {
479 		prev_p->current->state = TYPE_NONE;
480 		prev_p->current->state_since = timestamp;
481 		if (sw->prev_state & 2)
482 			prev_p->current->state = TYPE_BLOCKED;
483 		if (sw->prev_state == 0)
484 			prev_p->current->state = TYPE_WAITING;
485 	}
486 }
487 
488 
489 static int process_sample_event(union perf_event *event __used,
490 				struct perf_sample *sample,
491 				struct perf_evsel *evsel __used,
492 				struct perf_session *session)
493 {
494 	struct trace_entry *te;
495 
496 	if (session->sample_type & PERF_SAMPLE_TIME) {
497 		if (!first_time || first_time > sample->time)
498 			first_time = sample->time;
499 		if (last_time < sample->time)
500 			last_time = sample->time;
501 	}
502 
503 	te = (void *)sample->raw_data;
504 	if (session->sample_type & PERF_SAMPLE_RAW && sample->raw_size > 0) {
505 		char *event_str;
506 #ifdef SUPPORT_OLD_POWER_EVENTS
507 		struct power_entry_old *peo;
508 		peo = (void *)te;
509 #endif
510 		/*
511 		 * FIXME: use evsel, its already mapped from id to perf_evsel,
512 		 * remove perf_header__find_event infrastructure bits.
513 		 * Mapping all these "power:cpu_idle" strings to the tracepoint
514 		 * ID and then just comparing against evsel->attr.config.
515 		 *
516 		 * e.g.:
517 		 *
518 		 * if (evsel->attr.config == power_cpu_idle_id)
519 		 */
520 		event_str = perf_header__find_event(te->type);
521 
522 		if (!event_str)
523 			return 0;
524 
525 		if (sample->cpu > numcpus)
526 			numcpus = sample->cpu;
527 
528 		if (strcmp(event_str, "power:cpu_idle") == 0) {
529 			struct power_processor_entry *ppe = (void *)te;
530 			if (ppe->state == (u32)PWR_EVENT_EXIT)
531 				c_state_end(ppe->cpu_id, sample->time);
532 			else
533 				c_state_start(ppe->cpu_id, sample->time,
534 					      ppe->state);
535 		}
536 		else if (strcmp(event_str, "power:cpu_frequency") == 0) {
537 			struct power_processor_entry *ppe = (void *)te;
538 			p_state_change(ppe->cpu_id, sample->time, ppe->state);
539 		}
540 
541 		else if (strcmp(event_str, "sched:sched_wakeup") == 0)
542 			sched_wakeup(sample->cpu, sample->time, sample->pid, te);
543 
544 		else if (strcmp(event_str, "sched:sched_switch") == 0)
545 			sched_switch(sample->cpu, sample->time, te);
546 
547 #ifdef SUPPORT_OLD_POWER_EVENTS
548 		if (use_old_power_events) {
549 			if (strcmp(event_str, "power:power_start") == 0)
550 				c_state_start(peo->cpu_id, sample->time,
551 					      peo->value);
552 
553 			else if (strcmp(event_str, "power:power_end") == 0)
554 				c_state_end(sample->cpu, sample->time);
555 
556 			else if (strcmp(event_str,
557 					"power:power_frequency") == 0)
558 				p_state_change(peo->cpu_id, sample->time,
559 					       peo->value);
560 		}
561 #endif
562 	}
563 	return 0;
564 }
565 
566 /*
567  * After the last sample we need to wrap up the current C/P state
568  * and close out each CPU for these.
569  */
570 static void end_sample_processing(void)
571 {
572 	u64 cpu;
573 	struct power_event *pwr;
574 
575 	for (cpu = 0; cpu <= numcpus; cpu++) {
576 		pwr = malloc(sizeof(struct power_event));
577 		if (!pwr)
578 			return;
579 		memset(pwr, 0, sizeof(struct power_event));
580 
581 		/* C state */
582 #if 0
583 		pwr->state = cpus_cstate_state[cpu];
584 		pwr->start_time = cpus_cstate_start_times[cpu];
585 		pwr->end_time = last_time;
586 		pwr->cpu = cpu;
587 		pwr->type = CSTATE;
588 		pwr->next = power_events;
589 
590 		power_events = pwr;
591 #endif
592 		/* P state */
593 
594 		pwr = malloc(sizeof(struct power_event));
595 		if (!pwr)
596 			return;
597 		memset(pwr, 0, sizeof(struct power_event));
598 
599 		pwr->state = cpus_pstate_state[cpu];
600 		pwr->start_time = cpus_pstate_start_times[cpu];
601 		pwr->end_time = last_time;
602 		pwr->cpu = cpu;
603 		pwr->type = PSTATE;
604 		pwr->next = power_events;
605 
606 		if (!pwr->start_time)
607 			pwr->start_time = first_time;
608 		if (!pwr->state)
609 			pwr->state = min_freq;
610 		power_events = pwr;
611 	}
612 }
613 
614 /*
615  * Sort the pid datastructure
616  */
617 static void sort_pids(void)
618 {
619 	struct per_pid *new_list, *p, *cursor, *prev;
620 	/* sort by ppid first, then by pid, lowest to highest */
621 
622 	new_list = NULL;
623 
624 	while (all_data) {
625 		p = all_data;
626 		all_data = p->next;
627 		p->next = NULL;
628 
629 		if (new_list == NULL) {
630 			new_list = p;
631 			p->next = NULL;
632 			continue;
633 		}
634 		prev = NULL;
635 		cursor = new_list;
636 		while (cursor) {
637 			if (cursor->ppid > p->ppid ||
638 				(cursor->ppid == p->ppid && cursor->pid > p->pid)) {
639 				/* must insert before */
640 				if (prev) {
641 					p->next = prev->next;
642 					prev->next = p;
643 					cursor = NULL;
644 					continue;
645 				} else {
646 					p->next = new_list;
647 					new_list = p;
648 					cursor = NULL;
649 					continue;
650 				}
651 			}
652 
653 			prev = cursor;
654 			cursor = cursor->next;
655 			if (!cursor)
656 				prev->next = p;
657 		}
658 	}
659 	all_data = new_list;
660 }
661 
662 
663 static void draw_c_p_states(void)
664 {
665 	struct power_event *pwr;
666 	pwr = power_events;
667 
668 	/*
669 	 * two pass drawing so that the P state bars are on top of the C state blocks
670 	 */
671 	while (pwr) {
672 		if (pwr->type == CSTATE)
673 			svg_cstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
674 		pwr = pwr->next;
675 	}
676 
677 	pwr = power_events;
678 	while (pwr) {
679 		if (pwr->type == PSTATE) {
680 			if (!pwr->state)
681 				pwr->state = min_freq;
682 			svg_pstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
683 		}
684 		pwr = pwr->next;
685 	}
686 }
687 
688 static void draw_wakeups(void)
689 {
690 	struct wake_event *we;
691 	struct per_pid *p;
692 	struct per_pidcomm *c;
693 
694 	we = wake_events;
695 	while (we) {
696 		int from = 0, to = 0;
697 		char *task_from = NULL, *task_to = NULL;
698 
699 		/* locate the column of the waker and wakee */
700 		p = all_data;
701 		while (p) {
702 			if (p->pid == we->waker || p->pid == we->wakee) {
703 				c = p->all;
704 				while (c) {
705 					if (c->Y && c->start_time <= we->time && c->end_time >= we->time) {
706 						if (p->pid == we->waker && !from) {
707 							from = c->Y;
708 							task_from = strdup(c->comm);
709 						}
710 						if (p->pid == we->wakee && !to) {
711 							to = c->Y;
712 							task_to = strdup(c->comm);
713 						}
714 					}
715 					c = c->next;
716 				}
717 				c = p->all;
718 				while (c) {
719 					if (p->pid == we->waker && !from) {
720 						from = c->Y;
721 						task_from = strdup(c->comm);
722 					}
723 					if (p->pid == we->wakee && !to) {
724 						to = c->Y;
725 						task_to = strdup(c->comm);
726 					}
727 					c = c->next;
728 				}
729 			}
730 			p = p->next;
731 		}
732 
733 		if (!task_from) {
734 			task_from = malloc(40);
735 			sprintf(task_from, "[%i]", we->waker);
736 		}
737 		if (!task_to) {
738 			task_to = malloc(40);
739 			sprintf(task_to, "[%i]", we->wakee);
740 		}
741 
742 		if (we->waker == -1)
743 			svg_interrupt(we->time, to);
744 		else if (from && to && abs(from - to) == 1)
745 			svg_wakeline(we->time, from, to);
746 		else
747 			svg_partial_wakeline(we->time, from, task_from, to, task_to);
748 		we = we->next;
749 
750 		free(task_from);
751 		free(task_to);
752 	}
753 }
754 
755 static void draw_cpu_usage(void)
756 {
757 	struct per_pid *p;
758 	struct per_pidcomm *c;
759 	struct cpu_sample *sample;
760 	p = all_data;
761 	while (p) {
762 		c = p->all;
763 		while (c) {
764 			sample = c->samples;
765 			while (sample) {
766 				if (sample->type == TYPE_RUNNING)
767 					svg_process(sample->cpu, sample->start_time, sample->end_time, "sample", c->comm);
768 
769 				sample = sample->next;
770 			}
771 			c = c->next;
772 		}
773 		p = p->next;
774 	}
775 }
776 
777 static void draw_process_bars(void)
778 {
779 	struct per_pid *p;
780 	struct per_pidcomm *c;
781 	struct cpu_sample *sample;
782 	int Y = 0;
783 
784 	Y = 2 * numcpus + 2;
785 
786 	p = all_data;
787 	while (p) {
788 		c = p->all;
789 		while (c) {
790 			if (!c->display) {
791 				c->Y = 0;
792 				c = c->next;
793 				continue;
794 			}
795 
796 			svg_box(Y, c->start_time, c->end_time, "process");
797 			sample = c->samples;
798 			while (sample) {
799 				if (sample->type == TYPE_RUNNING)
800 					svg_sample(Y, sample->cpu, sample->start_time, sample->end_time);
801 				if (sample->type == TYPE_BLOCKED)
802 					svg_box(Y, sample->start_time, sample->end_time, "blocked");
803 				if (sample->type == TYPE_WAITING)
804 					svg_waiting(Y, sample->start_time, sample->end_time);
805 				sample = sample->next;
806 			}
807 
808 			if (c->comm) {
809 				char comm[256];
810 				if (c->total_time > 5000000000) /* 5 seconds */
811 					sprintf(comm, "%s:%i (%2.2fs)", c->comm, p->pid, c->total_time / 1000000000.0);
812 				else
813 					sprintf(comm, "%s:%i (%3.1fms)", c->comm, p->pid, c->total_time / 1000000.0);
814 
815 				svg_text(Y, c->start_time, comm);
816 			}
817 			c->Y = Y;
818 			Y++;
819 			c = c->next;
820 		}
821 		p = p->next;
822 	}
823 }
824 
825 static void add_process_filter(const char *string)
826 {
827 	struct process_filter *filt;
828 	int pid;
829 
830 	pid = strtoull(string, NULL, 10);
831 	filt = malloc(sizeof(struct process_filter));
832 	if (!filt)
833 		return;
834 
835 	filt->name = strdup(string);
836 	filt->pid  = pid;
837 	filt->next = process_filter;
838 
839 	process_filter = filt;
840 }
841 
842 static int passes_filter(struct per_pid *p, struct per_pidcomm *c)
843 {
844 	struct process_filter *filt;
845 	if (!process_filter)
846 		return 1;
847 
848 	filt = process_filter;
849 	while (filt) {
850 		if (filt->pid && p->pid == filt->pid)
851 			return 1;
852 		if (strcmp(filt->name, c->comm) == 0)
853 			return 1;
854 		filt = filt->next;
855 	}
856 	return 0;
857 }
858 
859 static int determine_display_tasks_filtered(void)
860 {
861 	struct per_pid *p;
862 	struct per_pidcomm *c;
863 	int count = 0;
864 
865 	p = all_data;
866 	while (p) {
867 		p->display = 0;
868 		if (p->start_time == 1)
869 			p->start_time = first_time;
870 
871 		/* no exit marker, task kept running to the end */
872 		if (p->end_time == 0)
873 			p->end_time = last_time;
874 
875 		c = p->all;
876 
877 		while (c) {
878 			c->display = 0;
879 
880 			if (c->start_time == 1)
881 				c->start_time = first_time;
882 
883 			if (passes_filter(p, c)) {
884 				c->display = 1;
885 				p->display = 1;
886 				count++;
887 			}
888 
889 			if (c->end_time == 0)
890 				c->end_time = last_time;
891 
892 			c = c->next;
893 		}
894 		p = p->next;
895 	}
896 	return count;
897 }
898 
899 static int determine_display_tasks(u64 threshold)
900 {
901 	struct per_pid *p;
902 	struct per_pidcomm *c;
903 	int count = 0;
904 
905 	if (process_filter)
906 		return determine_display_tasks_filtered();
907 
908 	p = all_data;
909 	while (p) {
910 		p->display = 0;
911 		if (p->start_time == 1)
912 			p->start_time = first_time;
913 
914 		/* no exit marker, task kept running to the end */
915 		if (p->end_time == 0)
916 			p->end_time = last_time;
917 		if (p->total_time >= threshold && !power_only)
918 			p->display = 1;
919 
920 		c = p->all;
921 
922 		while (c) {
923 			c->display = 0;
924 
925 			if (c->start_time == 1)
926 				c->start_time = first_time;
927 
928 			if (c->total_time >= threshold && !power_only) {
929 				c->display = 1;
930 				count++;
931 			}
932 
933 			if (c->end_time == 0)
934 				c->end_time = last_time;
935 
936 			c = c->next;
937 		}
938 		p = p->next;
939 	}
940 	return count;
941 }
942 
943 
944 
945 #define TIME_THRESH 10000000
946 
947 static void write_svg_file(const char *filename)
948 {
949 	u64 i;
950 	int count;
951 
952 	numcpus++;
953 
954 
955 	count = determine_display_tasks(TIME_THRESH);
956 
957 	/* We'd like to show at least 15 tasks; be less picky if we have fewer */
958 	if (count < 15)
959 		count = determine_display_tasks(TIME_THRESH / 10);
960 
961 	open_svg(filename, numcpus, count, first_time, last_time);
962 
963 	svg_time_grid();
964 	svg_legenda();
965 
966 	for (i = 0; i < numcpus; i++)
967 		svg_cpu_box(i, max_freq, turbo_frequency);
968 
969 	draw_cpu_usage();
970 	draw_process_bars();
971 	draw_c_p_states();
972 	draw_wakeups();
973 
974 	svg_close();
975 }
976 
977 static struct perf_event_ops event_ops = {
978 	.comm			= process_comm_event,
979 	.fork			= process_fork_event,
980 	.exit			= process_exit_event,
981 	.sample			= process_sample_event,
982 	.ordered_samples	= true,
983 };
984 
985 static int __cmd_timechart(void)
986 {
987 	struct perf_session *session = perf_session__new(input_name, O_RDONLY,
988 							 0, false, &event_ops);
989 	int ret = -EINVAL;
990 
991 	if (session == NULL)
992 		return -ENOMEM;
993 
994 	if (!perf_session__has_traces(session, "timechart record"))
995 		goto out_delete;
996 
997 	ret = perf_session__process_events(session, &event_ops);
998 	if (ret)
999 		goto out_delete;
1000 
1001 	end_sample_processing();
1002 
1003 	sort_pids();
1004 
1005 	write_svg_file(output_name);
1006 
1007 	pr_info("Written %2.1f seconds of trace to %s.\n",
1008 		(last_time - first_time) / 1000000000.0, output_name);
1009 out_delete:
1010 	perf_session__delete(session);
1011 	return ret;
1012 }
1013 
1014 static const char * const timechart_usage[] = {
1015 	"perf timechart [<options>] {record}",
1016 	NULL
1017 };
1018 
1019 #ifdef SUPPORT_OLD_POWER_EVENTS
1020 static const char * const record_old_args[] = {
1021 	"record",
1022 	"-a",
1023 	"-R",
1024 	"-f",
1025 	"-c", "1",
1026 	"-e", "power:power_start",
1027 	"-e", "power:power_end",
1028 	"-e", "power:power_frequency",
1029 	"-e", "sched:sched_wakeup",
1030 	"-e", "sched:sched_switch",
1031 };
1032 #endif
1033 
1034 static const char * const record_new_args[] = {
1035 	"record",
1036 	"-a",
1037 	"-R",
1038 	"-f",
1039 	"-c", "1",
1040 	"-e", "power:cpu_frequency",
1041 	"-e", "power:cpu_idle",
1042 	"-e", "sched:sched_wakeup",
1043 	"-e", "sched:sched_switch",
1044 };
1045 
1046 static int __cmd_record(int argc, const char **argv)
1047 {
1048 	unsigned int rec_argc, i, j;
1049 	const char **rec_argv;
1050 	const char * const *record_args = record_new_args;
1051 	unsigned int record_elems = ARRAY_SIZE(record_new_args);
1052 
1053 #ifdef SUPPORT_OLD_POWER_EVENTS
1054 	if (!is_valid_tracepoint("power:cpu_idle") &&
1055 	    is_valid_tracepoint("power:power_start")) {
1056 		use_old_power_events = 1;
1057 		record_args = record_old_args;
1058 		record_elems = ARRAY_SIZE(record_old_args);
1059 	}
1060 #endif
1061 
1062 	rec_argc = record_elems + argc - 1;
1063 	rec_argv = calloc(rec_argc + 1, sizeof(char *));
1064 
1065 	if (rec_argv == NULL)
1066 		return -ENOMEM;
1067 
1068 	for (i = 0; i < record_elems; i++)
1069 		rec_argv[i] = strdup(record_args[i]);
1070 
1071 	for (j = 1; j < (unsigned int)argc; j++, i++)
1072 		rec_argv[i] = argv[j];
1073 
1074 	return cmd_record(i, rec_argv, NULL);
1075 }
1076 
1077 static int
1078 parse_process(const struct option *opt __used, const char *arg, int __used unset)
1079 {
1080 	if (arg)
1081 		add_process_filter(arg);
1082 	return 0;
1083 }
1084 
1085 static const struct option options[] = {
1086 	OPT_STRING('i', "input", &input_name, "file",
1087 		    "input file name"),
1088 	OPT_STRING('o', "output", &output_name, "file",
1089 		    "output file name"),
1090 	OPT_INTEGER('w', "width", &svg_page_width,
1091 		    "page width"),
1092 	OPT_BOOLEAN('P', "power-only", &power_only,
1093 		    "output power data only"),
1094 	OPT_CALLBACK('p', "process", NULL, "process",
1095 		      "process selector. Pass a pid or process name.",
1096 		       parse_process),
1097 	OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
1098 		    "Look for files with symbols relative to this directory"),
1099 	OPT_END()
1100 };
1101 
1102 
1103 int cmd_timechart(int argc, const char **argv, const char *prefix __used)
1104 {
1105 	argc = parse_options(argc, argv, options, timechart_usage,
1106 			PARSE_OPT_STOP_AT_NON_OPTION);
1107 
1108 	symbol__init();
1109 
1110 	if (argc && !strncmp(argv[0], "rec", 3))
1111 		return __cmd_record(argc, argv);
1112 	else if (argc)
1113 		usage_with_options(timechart_usage, options);
1114 
1115 	setup_pager();
1116 
1117 	return __cmd_timechart();
1118 }
1119