xref: /openbmc/linux/tools/perf/builtin-timechart.c (revision 58b9a18ecd251cbd6e666ad792023ab77c7d100e)
1 /*
2  * builtin-timechart.c - make an svg timechart of system activity
3  *
4  * (C) Copyright 2009 Intel Corporation
5  *
6  * Authors:
7  *     Arjan van de Ven <arjan@linux.intel.com>
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; version 2
12  * of the License.
13  */
14 
15 #include <traceevent/event-parse.h>
16 
17 #include "builtin.h"
18 
19 #include "util/util.h"
20 
21 #include "util/color.h"
22 #include <linux/list.h>
23 #include "util/cache.h"
24 #include "util/evlist.h"
25 #include "util/evsel.h"
26 #include <linux/rbtree.h>
27 #include "util/symbol.h"
28 #include "util/callchain.h"
29 #include "util/strlist.h"
30 
31 #include "perf.h"
32 #include "util/header.h"
33 #include "util/parse-options.h"
34 #include "util/parse-events.h"
35 #include "util/event.h"
36 #include "util/session.h"
37 #include "util/svghelper.h"
38 #include "util/tool.h"
39 #include "util/data.h"
40 
41 #define SUPPORT_OLD_POWER_EVENTS 1
42 #define PWR_EVENT_EXIT -1
43 
44 struct per_pid;
45 struct power_event;
46 struct wake_event;
47 
48 struct timechart {
49 	struct perf_tool	tool;
50 	struct per_pid		*all_data;
51 	struct power_event	*power_events;
52 	struct wake_event	*wake_events;
53 	int			proc_num;
54 	unsigned int		numcpus;
55 	u64			min_freq,	/* Lowest CPU frequency seen */
56 				max_freq,	/* Highest CPU frequency seen */
57 				turbo_frequency,
58 				first_time, last_time;
59 	bool			power_only,
60 				tasks_only,
61 				with_backtrace;
62 };
63 
64 struct per_pidcomm;
65 struct cpu_sample;
66 
67 /*
68  * Datastructure layout:
69  * We keep an list of "pid"s, matching the kernels notion of a task struct.
70  * Each "pid" entry, has a list of "comm"s.
71  *	this is because we want to track different programs different, while
72  *	exec will reuse the original pid (by design).
73  * Each comm has a list of samples that will be used to draw
74  * final graph.
75  */
76 
77 struct per_pid {
78 	struct per_pid *next;
79 
80 	int		pid;
81 	int		ppid;
82 
83 	u64		start_time;
84 	u64		end_time;
85 	u64		total_time;
86 	int		display;
87 
88 	struct per_pidcomm *all;
89 	struct per_pidcomm *current;
90 };
91 
92 
93 struct per_pidcomm {
94 	struct per_pidcomm *next;
95 
96 	u64		start_time;
97 	u64		end_time;
98 	u64		total_time;
99 
100 	int		Y;
101 	int		display;
102 
103 	long		state;
104 	u64		state_since;
105 
106 	char		*comm;
107 
108 	struct cpu_sample *samples;
109 };
110 
111 struct sample_wrapper {
112 	struct sample_wrapper *next;
113 
114 	u64		timestamp;
115 	unsigned char	data[0];
116 };
117 
118 #define TYPE_NONE	0
119 #define TYPE_RUNNING	1
120 #define TYPE_WAITING	2
121 #define TYPE_BLOCKED	3
122 
123 struct cpu_sample {
124 	struct cpu_sample *next;
125 
126 	u64 start_time;
127 	u64 end_time;
128 	int type;
129 	int cpu;
130 	const char *backtrace;
131 };
132 
133 #define CSTATE 1
134 #define PSTATE 2
135 
136 struct power_event {
137 	struct power_event *next;
138 	int type;
139 	int state;
140 	u64 start_time;
141 	u64 end_time;
142 	int cpu;
143 };
144 
145 struct wake_event {
146 	struct wake_event *next;
147 	int waker;
148 	int wakee;
149 	u64 time;
150 	const char *backtrace;
151 };
152 
153 struct process_filter {
154 	char			*name;
155 	int			pid;
156 	struct process_filter	*next;
157 };
158 
159 static struct process_filter *process_filter;
160 
161 
162 static struct per_pid *find_create_pid(struct timechart *tchart, int pid)
163 {
164 	struct per_pid *cursor = tchart->all_data;
165 
166 	while (cursor) {
167 		if (cursor->pid == pid)
168 			return cursor;
169 		cursor = cursor->next;
170 	}
171 	cursor = zalloc(sizeof(*cursor));
172 	assert(cursor != NULL);
173 	cursor->pid = pid;
174 	cursor->next = tchart->all_data;
175 	tchart->all_data = cursor;
176 	return cursor;
177 }
178 
179 static void pid_set_comm(struct timechart *tchart, int pid, char *comm)
180 {
181 	struct per_pid *p;
182 	struct per_pidcomm *c;
183 	p = find_create_pid(tchart, pid);
184 	c = p->all;
185 	while (c) {
186 		if (c->comm && strcmp(c->comm, comm) == 0) {
187 			p->current = c;
188 			return;
189 		}
190 		if (!c->comm) {
191 			c->comm = strdup(comm);
192 			p->current = c;
193 			return;
194 		}
195 		c = c->next;
196 	}
197 	c = zalloc(sizeof(*c));
198 	assert(c != NULL);
199 	c->comm = strdup(comm);
200 	p->current = c;
201 	c->next = p->all;
202 	p->all = c;
203 }
204 
205 static void pid_fork(struct timechart *tchart, int pid, int ppid, u64 timestamp)
206 {
207 	struct per_pid *p, *pp;
208 	p = find_create_pid(tchart, pid);
209 	pp = find_create_pid(tchart, ppid);
210 	p->ppid = ppid;
211 	if (pp->current && pp->current->comm && !p->current)
212 		pid_set_comm(tchart, pid, pp->current->comm);
213 
214 	p->start_time = timestamp;
215 	if (p->current) {
216 		p->current->start_time = timestamp;
217 		p->current->state_since = timestamp;
218 	}
219 }
220 
221 static void pid_exit(struct timechart *tchart, int pid, u64 timestamp)
222 {
223 	struct per_pid *p;
224 	p = find_create_pid(tchart, pid);
225 	p->end_time = timestamp;
226 	if (p->current)
227 		p->current->end_time = timestamp;
228 }
229 
230 static void pid_put_sample(struct timechart *tchart, int pid, int type,
231 			   unsigned int cpu, u64 start, u64 end,
232 			   const char *backtrace)
233 {
234 	struct per_pid *p;
235 	struct per_pidcomm *c;
236 	struct cpu_sample *sample;
237 
238 	p = find_create_pid(tchart, pid);
239 	c = p->current;
240 	if (!c) {
241 		c = zalloc(sizeof(*c));
242 		assert(c != NULL);
243 		p->current = c;
244 		c->next = p->all;
245 		p->all = c;
246 	}
247 
248 	sample = zalloc(sizeof(*sample));
249 	assert(sample != NULL);
250 	sample->start_time = start;
251 	sample->end_time = end;
252 	sample->type = type;
253 	sample->next = c->samples;
254 	sample->cpu = cpu;
255 	sample->backtrace = backtrace;
256 	c->samples = sample;
257 
258 	if (sample->type == TYPE_RUNNING && end > start && start > 0) {
259 		c->total_time += (end-start);
260 		p->total_time += (end-start);
261 	}
262 
263 	if (c->start_time == 0 || c->start_time > start)
264 		c->start_time = start;
265 	if (p->start_time == 0 || p->start_time > start)
266 		p->start_time = start;
267 }
268 
269 #define MAX_CPUS 4096
270 
271 static u64 cpus_cstate_start_times[MAX_CPUS];
272 static int cpus_cstate_state[MAX_CPUS];
273 static u64 cpus_pstate_start_times[MAX_CPUS];
274 static u64 cpus_pstate_state[MAX_CPUS];
275 
276 static int process_comm_event(struct perf_tool *tool,
277 			      union perf_event *event,
278 			      struct perf_sample *sample __maybe_unused,
279 			      struct machine *machine __maybe_unused)
280 {
281 	struct timechart *tchart = container_of(tool, struct timechart, tool);
282 	pid_set_comm(tchart, event->comm.tid, event->comm.comm);
283 	return 0;
284 }
285 
286 static int process_fork_event(struct perf_tool *tool,
287 			      union perf_event *event,
288 			      struct perf_sample *sample __maybe_unused,
289 			      struct machine *machine __maybe_unused)
290 {
291 	struct timechart *tchart = container_of(tool, struct timechart, tool);
292 	pid_fork(tchart, event->fork.pid, event->fork.ppid, event->fork.time);
293 	return 0;
294 }
295 
296 static int process_exit_event(struct perf_tool *tool,
297 			      union perf_event *event,
298 			      struct perf_sample *sample __maybe_unused,
299 			      struct machine *machine __maybe_unused)
300 {
301 	struct timechart *tchart = container_of(tool, struct timechart, tool);
302 	pid_exit(tchart, event->fork.pid, event->fork.time);
303 	return 0;
304 }
305 
306 #ifdef SUPPORT_OLD_POWER_EVENTS
307 static int use_old_power_events;
308 #endif
309 
310 static void c_state_start(int cpu, u64 timestamp, int state)
311 {
312 	cpus_cstate_start_times[cpu] = timestamp;
313 	cpus_cstate_state[cpu] = state;
314 }
315 
316 static void c_state_end(struct timechart *tchart, int cpu, u64 timestamp)
317 {
318 	struct power_event *pwr = zalloc(sizeof(*pwr));
319 
320 	if (!pwr)
321 		return;
322 
323 	pwr->state = cpus_cstate_state[cpu];
324 	pwr->start_time = cpus_cstate_start_times[cpu];
325 	pwr->end_time = timestamp;
326 	pwr->cpu = cpu;
327 	pwr->type = CSTATE;
328 	pwr->next = tchart->power_events;
329 
330 	tchart->power_events = pwr;
331 }
332 
333 static void p_state_change(struct timechart *tchart, int cpu, u64 timestamp, u64 new_freq)
334 {
335 	struct power_event *pwr;
336 
337 	if (new_freq > 8000000) /* detect invalid data */
338 		return;
339 
340 	pwr = zalloc(sizeof(*pwr));
341 	if (!pwr)
342 		return;
343 
344 	pwr->state = cpus_pstate_state[cpu];
345 	pwr->start_time = cpus_pstate_start_times[cpu];
346 	pwr->end_time = timestamp;
347 	pwr->cpu = cpu;
348 	pwr->type = PSTATE;
349 	pwr->next = tchart->power_events;
350 
351 	if (!pwr->start_time)
352 		pwr->start_time = tchart->first_time;
353 
354 	tchart->power_events = pwr;
355 
356 	cpus_pstate_state[cpu] = new_freq;
357 	cpus_pstate_start_times[cpu] = timestamp;
358 
359 	if ((u64)new_freq > tchart->max_freq)
360 		tchart->max_freq = new_freq;
361 
362 	if (new_freq < tchart->min_freq || tchart->min_freq == 0)
363 		tchart->min_freq = new_freq;
364 
365 	if (new_freq == tchart->max_freq - 1000)
366 		tchart->turbo_frequency = tchart->max_freq;
367 }
368 
369 static void sched_wakeup(struct timechart *tchart, int cpu, u64 timestamp,
370 			 int waker, int wakee, u8 flags, const char *backtrace)
371 {
372 	struct per_pid *p;
373 	struct wake_event *we = zalloc(sizeof(*we));
374 
375 	if (!we)
376 		return;
377 
378 	we->time = timestamp;
379 	we->waker = waker;
380 	we->backtrace = backtrace;
381 
382 	if ((flags & TRACE_FLAG_HARDIRQ) || (flags & TRACE_FLAG_SOFTIRQ))
383 		we->waker = -1;
384 
385 	we->wakee = wakee;
386 	we->next = tchart->wake_events;
387 	tchart->wake_events = we;
388 	p = find_create_pid(tchart, we->wakee);
389 
390 	if (p && p->current && p->current->state == TYPE_NONE) {
391 		p->current->state_since = timestamp;
392 		p->current->state = TYPE_WAITING;
393 	}
394 	if (p && p->current && p->current->state == TYPE_BLOCKED) {
395 		pid_put_sample(tchart, p->pid, p->current->state, cpu,
396 			       p->current->state_since, timestamp, NULL);
397 		p->current->state_since = timestamp;
398 		p->current->state = TYPE_WAITING;
399 	}
400 }
401 
402 static void sched_switch(struct timechart *tchart, int cpu, u64 timestamp,
403 			 int prev_pid, int next_pid, u64 prev_state,
404 			 const char *backtrace)
405 {
406 	struct per_pid *p = NULL, *prev_p;
407 
408 	prev_p = find_create_pid(tchart, prev_pid);
409 
410 	p = find_create_pid(tchart, next_pid);
411 
412 	if (prev_p->current && prev_p->current->state != TYPE_NONE)
413 		pid_put_sample(tchart, prev_pid, TYPE_RUNNING, cpu,
414 			       prev_p->current->state_since, timestamp,
415 			       backtrace);
416 	if (p && p->current) {
417 		if (p->current->state != TYPE_NONE)
418 			pid_put_sample(tchart, next_pid, p->current->state, cpu,
419 				       p->current->state_since, timestamp,
420 				       backtrace);
421 
422 		p->current->state_since = timestamp;
423 		p->current->state = TYPE_RUNNING;
424 	}
425 
426 	if (prev_p->current) {
427 		prev_p->current->state = TYPE_NONE;
428 		prev_p->current->state_since = timestamp;
429 		if (prev_state & 2)
430 			prev_p->current->state = TYPE_BLOCKED;
431 		if (prev_state == 0)
432 			prev_p->current->state = TYPE_WAITING;
433 	}
434 }
435 
436 static const char *cat_backtrace(union perf_event *event,
437 				 struct perf_sample *sample,
438 				 struct machine *machine)
439 {
440 	struct addr_location al;
441 	unsigned int i;
442 	char *p = NULL;
443 	size_t p_len;
444 	u8 cpumode = PERF_RECORD_MISC_USER;
445 	struct addr_location tal;
446 	struct ip_callchain *chain = sample->callchain;
447 	FILE *f = open_memstream(&p, &p_len);
448 
449 	if (!f) {
450 		perror("open_memstream error");
451 		return NULL;
452 	}
453 
454 	if (!chain)
455 		goto exit;
456 
457 	if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) {
458 		fprintf(stderr, "problem processing %d event, skipping it.\n",
459 			event->header.type);
460 		goto exit;
461 	}
462 
463 	for (i = 0; i < chain->nr; i++) {
464 		u64 ip;
465 
466 		if (callchain_param.order == ORDER_CALLEE)
467 			ip = chain->ips[i];
468 		else
469 			ip = chain->ips[chain->nr - i - 1];
470 
471 		if (ip >= PERF_CONTEXT_MAX) {
472 			switch (ip) {
473 			case PERF_CONTEXT_HV:
474 				cpumode = PERF_RECORD_MISC_HYPERVISOR;
475 				break;
476 			case PERF_CONTEXT_KERNEL:
477 				cpumode = PERF_RECORD_MISC_KERNEL;
478 				break;
479 			case PERF_CONTEXT_USER:
480 				cpumode = PERF_RECORD_MISC_USER;
481 				break;
482 			default:
483 				pr_debug("invalid callchain context: "
484 					 "%"PRId64"\n", (s64) ip);
485 
486 				/*
487 				 * It seems the callchain is corrupted.
488 				 * Discard all.
489 				 */
490 				free(p);
491 				p = NULL;
492 				goto exit;
493 			}
494 			continue;
495 		}
496 
497 		tal.filtered = false;
498 		thread__find_addr_location(al.thread, machine, cpumode,
499 					   MAP__FUNCTION, ip, &tal);
500 
501 		if (tal.sym)
502 			fprintf(f, "..... %016" PRIx64 " %s\n", ip,
503 				tal.sym->name);
504 		else
505 			fprintf(f, "..... %016" PRIx64 "\n", ip);
506 	}
507 
508 exit:
509 	fclose(f);
510 
511 	return p;
512 }
513 
514 typedef int (*tracepoint_handler)(struct timechart *tchart,
515 				  struct perf_evsel *evsel,
516 				  struct perf_sample *sample,
517 				  const char *backtrace);
518 
519 static int process_sample_event(struct perf_tool *tool,
520 				union perf_event *event,
521 				struct perf_sample *sample,
522 				struct perf_evsel *evsel,
523 				struct machine *machine)
524 {
525 	struct timechart *tchart = container_of(tool, struct timechart, tool);
526 
527 	if (evsel->attr.sample_type & PERF_SAMPLE_TIME) {
528 		if (!tchart->first_time || tchart->first_time > sample->time)
529 			tchart->first_time = sample->time;
530 		if (tchart->last_time < sample->time)
531 			tchart->last_time = sample->time;
532 	}
533 
534 	if (evsel->handler != NULL) {
535 		tracepoint_handler f = evsel->handler;
536 		return f(tchart, evsel, sample,
537 			 cat_backtrace(event, sample, machine));
538 	}
539 
540 	return 0;
541 }
542 
543 static int
544 process_sample_cpu_idle(struct timechart *tchart __maybe_unused,
545 			struct perf_evsel *evsel,
546 			struct perf_sample *sample,
547 			const char *backtrace __maybe_unused)
548 {
549 	u32 state = perf_evsel__intval(evsel, sample, "state");
550 	u32 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
551 
552 	if (state == (u32)PWR_EVENT_EXIT)
553 		c_state_end(tchart, cpu_id, sample->time);
554 	else
555 		c_state_start(cpu_id, sample->time, state);
556 	return 0;
557 }
558 
559 static int
560 process_sample_cpu_frequency(struct timechart *tchart,
561 			     struct perf_evsel *evsel,
562 			     struct perf_sample *sample,
563 			     const char *backtrace __maybe_unused)
564 {
565 	u32 state = perf_evsel__intval(evsel, sample, "state");
566 	u32 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
567 
568 	p_state_change(tchart, cpu_id, sample->time, state);
569 	return 0;
570 }
571 
572 static int
573 process_sample_sched_wakeup(struct timechart *tchart,
574 			    struct perf_evsel *evsel,
575 			    struct perf_sample *sample,
576 			    const char *backtrace)
577 {
578 	u8 flags = perf_evsel__intval(evsel, sample, "common_flags");
579 	int waker = perf_evsel__intval(evsel, sample, "common_pid");
580 	int wakee = perf_evsel__intval(evsel, sample, "pid");
581 
582 	sched_wakeup(tchart, sample->cpu, sample->time, waker, wakee, flags, backtrace);
583 	return 0;
584 }
585 
586 static int
587 process_sample_sched_switch(struct timechart *tchart,
588 			    struct perf_evsel *evsel,
589 			    struct perf_sample *sample,
590 			    const char *backtrace)
591 {
592 	int prev_pid = perf_evsel__intval(evsel, sample, "prev_pid");
593 	int next_pid = perf_evsel__intval(evsel, sample, "next_pid");
594 	u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
595 
596 	sched_switch(tchart, sample->cpu, sample->time, prev_pid, next_pid,
597 		     prev_state, backtrace);
598 	return 0;
599 }
600 
601 #ifdef SUPPORT_OLD_POWER_EVENTS
602 static int
603 process_sample_power_start(struct timechart *tchart __maybe_unused,
604 			   struct perf_evsel *evsel,
605 			   struct perf_sample *sample,
606 			   const char *backtrace __maybe_unused)
607 {
608 	u64 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
609 	u64 value = perf_evsel__intval(evsel, sample, "value");
610 
611 	c_state_start(cpu_id, sample->time, value);
612 	return 0;
613 }
614 
615 static int
616 process_sample_power_end(struct timechart *tchart,
617 			 struct perf_evsel *evsel __maybe_unused,
618 			 struct perf_sample *sample,
619 			 const char *backtrace __maybe_unused)
620 {
621 	c_state_end(tchart, sample->cpu, sample->time);
622 	return 0;
623 }
624 
625 static int
626 process_sample_power_frequency(struct timechart *tchart,
627 			       struct perf_evsel *evsel,
628 			       struct perf_sample *sample,
629 			       const char *backtrace __maybe_unused)
630 {
631 	u64 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
632 	u64 value = perf_evsel__intval(evsel, sample, "value");
633 
634 	p_state_change(tchart, cpu_id, sample->time, value);
635 	return 0;
636 }
637 #endif /* SUPPORT_OLD_POWER_EVENTS */
638 
639 /*
640  * After the last sample we need to wrap up the current C/P state
641  * and close out each CPU for these.
642  */
643 static void end_sample_processing(struct timechart *tchart)
644 {
645 	u64 cpu;
646 	struct power_event *pwr;
647 
648 	for (cpu = 0; cpu <= tchart->numcpus; cpu++) {
649 		/* C state */
650 #if 0
651 		pwr = zalloc(sizeof(*pwr));
652 		if (!pwr)
653 			return;
654 
655 		pwr->state = cpus_cstate_state[cpu];
656 		pwr->start_time = cpus_cstate_start_times[cpu];
657 		pwr->end_time = tchart->last_time;
658 		pwr->cpu = cpu;
659 		pwr->type = CSTATE;
660 		pwr->next = tchart->power_events;
661 
662 		tchart->power_events = pwr;
663 #endif
664 		/* P state */
665 
666 		pwr = zalloc(sizeof(*pwr));
667 		if (!pwr)
668 			return;
669 
670 		pwr->state = cpus_pstate_state[cpu];
671 		pwr->start_time = cpus_pstate_start_times[cpu];
672 		pwr->end_time = tchart->last_time;
673 		pwr->cpu = cpu;
674 		pwr->type = PSTATE;
675 		pwr->next = tchart->power_events;
676 
677 		if (!pwr->start_time)
678 			pwr->start_time = tchart->first_time;
679 		if (!pwr->state)
680 			pwr->state = tchart->min_freq;
681 		tchart->power_events = pwr;
682 	}
683 }
684 
685 /*
686  * Sort the pid datastructure
687  */
688 static void sort_pids(struct timechart *tchart)
689 {
690 	struct per_pid *new_list, *p, *cursor, *prev;
691 	/* sort by ppid first, then by pid, lowest to highest */
692 
693 	new_list = NULL;
694 
695 	while (tchart->all_data) {
696 		p = tchart->all_data;
697 		tchart->all_data = p->next;
698 		p->next = NULL;
699 
700 		if (new_list == NULL) {
701 			new_list = p;
702 			p->next = NULL;
703 			continue;
704 		}
705 		prev = NULL;
706 		cursor = new_list;
707 		while (cursor) {
708 			if (cursor->ppid > p->ppid ||
709 				(cursor->ppid == p->ppid && cursor->pid > p->pid)) {
710 				/* must insert before */
711 				if (prev) {
712 					p->next = prev->next;
713 					prev->next = p;
714 					cursor = NULL;
715 					continue;
716 				} else {
717 					p->next = new_list;
718 					new_list = p;
719 					cursor = NULL;
720 					continue;
721 				}
722 			}
723 
724 			prev = cursor;
725 			cursor = cursor->next;
726 			if (!cursor)
727 				prev->next = p;
728 		}
729 	}
730 	tchart->all_data = new_list;
731 }
732 
733 
734 static void draw_c_p_states(struct timechart *tchart)
735 {
736 	struct power_event *pwr;
737 	pwr = tchart->power_events;
738 
739 	/*
740 	 * two pass drawing so that the P state bars are on top of the C state blocks
741 	 */
742 	while (pwr) {
743 		if (pwr->type == CSTATE)
744 			svg_cstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
745 		pwr = pwr->next;
746 	}
747 
748 	pwr = tchart->power_events;
749 	while (pwr) {
750 		if (pwr->type == PSTATE) {
751 			if (!pwr->state)
752 				pwr->state = tchart->min_freq;
753 			svg_pstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
754 		}
755 		pwr = pwr->next;
756 	}
757 }
758 
759 static void draw_wakeups(struct timechart *tchart)
760 {
761 	struct wake_event *we;
762 	struct per_pid *p;
763 	struct per_pidcomm *c;
764 
765 	we = tchart->wake_events;
766 	while (we) {
767 		int from = 0, to = 0;
768 		char *task_from = NULL, *task_to = NULL;
769 
770 		/* locate the column of the waker and wakee */
771 		p = tchart->all_data;
772 		while (p) {
773 			if (p->pid == we->waker || p->pid == we->wakee) {
774 				c = p->all;
775 				while (c) {
776 					if (c->Y && c->start_time <= we->time && c->end_time >= we->time) {
777 						if (p->pid == we->waker && !from) {
778 							from = c->Y;
779 							task_from = strdup(c->comm);
780 						}
781 						if (p->pid == we->wakee && !to) {
782 							to = c->Y;
783 							task_to = strdup(c->comm);
784 						}
785 					}
786 					c = c->next;
787 				}
788 				c = p->all;
789 				while (c) {
790 					if (p->pid == we->waker && !from) {
791 						from = c->Y;
792 						task_from = strdup(c->comm);
793 					}
794 					if (p->pid == we->wakee && !to) {
795 						to = c->Y;
796 						task_to = strdup(c->comm);
797 					}
798 					c = c->next;
799 				}
800 			}
801 			p = p->next;
802 		}
803 
804 		if (!task_from) {
805 			task_from = malloc(40);
806 			sprintf(task_from, "[%i]", we->waker);
807 		}
808 		if (!task_to) {
809 			task_to = malloc(40);
810 			sprintf(task_to, "[%i]", we->wakee);
811 		}
812 
813 		if (we->waker == -1)
814 			svg_interrupt(we->time, to, we->backtrace);
815 		else if (from && to && abs(from - to) == 1)
816 			svg_wakeline(we->time, from, to, we->backtrace);
817 		else
818 			svg_partial_wakeline(we->time, from, task_from, to,
819 					     task_to, we->backtrace);
820 		we = we->next;
821 
822 		free(task_from);
823 		free(task_to);
824 	}
825 }
826 
827 static void draw_cpu_usage(struct timechart *tchart)
828 {
829 	struct per_pid *p;
830 	struct per_pidcomm *c;
831 	struct cpu_sample *sample;
832 	p = tchart->all_data;
833 	while (p) {
834 		c = p->all;
835 		while (c) {
836 			sample = c->samples;
837 			while (sample) {
838 				if (sample->type == TYPE_RUNNING) {
839 					svg_process(sample->cpu,
840 						    sample->start_time,
841 						    sample->end_time,
842 						    p->pid,
843 						    "sample",
844 						    c->comm,
845 						    sample->backtrace);
846 				}
847 
848 				sample = sample->next;
849 			}
850 			c = c->next;
851 		}
852 		p = p->next;
853 	}
854 }
855 
856 static void draw_process_bars(struct timechart *tchart)
857 {
858 	struct per_pid *p;
859 	struct per_pidcomm *c;
860 	struct cpu_sample *sample;
861 	int Y = 0;
862 
863 	Y = 2 * tchart->numcpus + 2;
864 
865 	p = tchart->all_data;
866 	while (p) {
867 		c = p->all;
868 		while (c) {
869 			if (!c->display) {
870 				c->Y = 0;
871 				c = c->next;
872 				continue;
873 			}
874 
875 			svg_box(Y, c->start_time, c->end_time, "process");
876 			sample = c->samples;
877 			while (sample) {
878 				if (sample->type == TYPE_RUNNING)
879 					svg_running(Y, sample->cpu,
880 						    sample->start_time,
881 						    sample->end_time,
882 						    sample->backtrace);
883 				if (sample->type == TYPE_BLOCKED)
884 					svg_blocked(Y, sample->cpu,
885 						    sample->start_time,
886 						    sample->end_time,
887 						    sample->backtrace);
888 				if (sample->type == TYPE_WAITING)
889 					svg_waiting(Y, sample->cpu,
890 						    sample->start_time,
891 						    sample->end_time,
892 						    sample->backtrace);
893 				sample = sample->next;
894 			}
895 
896 			if (c->comm) {
897 				char comm[256];
898 				if (c->total_time > 5000000000) /* 5 seconds */
899 					sprintf(comm, "%s:%i (%2.2fs)", c->comm, p->pid, c->total_time / 1000000000.0);
900 				else
901 					sprintf(comm, "%s:%i (%3.1fms)", c->comm, p->pid, c->total_time / 1000000.0);
902 
903 				svg_text(Y, c->start_time, comm);
904 			}
905 			c->Y = Y;
906 			Y++;
907 			c = c->next;
908 		}
909 		p = p->next;
910 	}
911 }
912 
913 static void add_process_filter(const char *string)
914 {
915 	int pid = strtoull(string, NULL, 10);
916 	struct process_filter *filt = malloc(sizeof(*filt));
917 
918 	if (!filt)
919 		return;
920 
921 	filt->name = strdup(string);
922 	filt->pid  = pid;
923 	filt->next = process_filter;
924 
925 	process_filter = filt;
926 }
927 
928 static int passes_filter(struct per_pid *p, struct per_pidcomm *c)
929 {
930 	struct process_filter *filt;
931 	if (!process_filter)
932 		return 1;
933 
934 	filt = process_filter;
935 	while (filt) {
936 		if (filt->pid && p->pid == filt->pid)
937 			return 1;
938 		if (strcmp(filt->name, c->comm) == 0)
939 			return 1;
940 		filt = filt->next;
941 	}
942 	return 0;
943 }
944 
945 static int determine_display_tasks_filtered(struct timechart *tchart)
946 {
947 	struct per_pid *p;
948 	struct per_pidcomm *c;
949 	int count = 0;
950 
951 	p = tchart->all_data;
952 	while (p) {
953 		p->display = 0;
954 		if (p->start_time == 1)
955 			p->start_time = tchart->first_time;
956 
957 		/* no exit marker, task kept running to the end */
958 		if (p->end_time == 0)
959 			p->end_time = tchart->last_time;
960 
961 		c = p->all;
962 
963 		while (c) {
964 			c->display = 0;
965 
966 			if (c->start_time == 1)
967 				c->start_time = tchart->first_time;
968 
969 			if (passes_filter(p, c)) {
970 				c->display = 1;
971 				p->display = 1;
972 				count++;
973 			}
974 
975 			if (c->end_time == 0)
976 				c->end_time = tchart->last_time;
977 
978 			c = c->next;
979 		}
980 		p = p->next;
981 	}
982 	return count;
983 }
984 
985 static int determine_display_tasks(struct timechart *tchart, u64 threshold)
986 {
987 	struct per_pid *p;
988 	struct per_pidcomm *c;
989 	int count = 0;
990 
991 	if (process_filter)
992 		return determine_display_tasks_filtered(tchart);
993 
994 	p = tchart->all_data;
995 	while (p) {
996 		p->display = 0;
997 		if (p->start_time == 1)
998 			p->start_time = tchart->first_time;
999 
1000 		/* no exit marker, task kept running to the end */
1001 		if (p->end_time == 0)
1002 			p->end_time = tchart->last_time;
1003 		if (p->total_time >= threshold)
1004 			p->display = 1;
1005 
1006 		c = p->all;
1007 
1008 		while (c) {
1009 			c->display = 0;
1010 
1011 			if (c->start_time == 1)
1012 				c->start_time = tchart->first_time;
1013 
1014 			if (c->total_time >= threshold) {
1015 				c->display = 1;
1016 				count++;
1017 			}
1018 
1019 			if (c->end_time == 0)
1020 				c->end_time = tchart->last_time;
1021 
1022 			c = c->next;
1023 		}
1024 		p = p->next;
1025 	}
1026 	return count;
1027 }
1028 
1029 
1030 
1031 #define TIME_THRESH 10000000
1032 
1033 static void write_svg_file(struct timechart *tchart, const char *filename)
1034 {
1035 	u64 i;
1036 	int count;
1037 	int thresh = TIME_THRESH;
1038 
1039 	if (tchart->power_only)
1040 		tchart->proc_num = 0;
1041 
1042 	/* We'd like to show at least proc_num tasks;
1043 	 * be less picky if we have fewer */
1044 	do {
1045 		count = determine_display_tasks(tchart, thresh);
1046 		thresh /= 10;
1047 	} while (!process_filter && thresh && count < tchart->proc_num);
1048 
1049 	open_svg(filename, tchart->numcpus, count, tchart->first_time, tchart->last_time);
1050 
1051 	svg_time_grid();
1052 	svg_legenda();
1053 
1054 	for (i = 0; i < tchart->numcpus; i++)
1055 		svg_cpu_box(i, tchart->max_freq, tchart->turbo_frequency);
1056 
1057 	draw_cpu_usage(tchart);
1058 	if (tchart->proc_num)
1059 		draw_process_bars(tchart);
1060 	if (!tchart->tasks_only)
1061 		draw_c_p_states(tchart);
1062 	if (tchart->proc_num)
1063 		draw_wakeups(tchart);
1064 
1065 	svg_close();
1066 }
1067 
1068 static int process_header(struct perf_file_section *section __maybe_unused,
1069 			  struct perf_header *ph,
1070 			  int feat,
1071 			  int fd __maybe_unused,
1072 			  void *data)
1073 {
1074 	struct timechart *tchart = data;
1075 
1076 	switch (feat) {
1077 	case HEADER_NRCPUS:
1078 		tchart->numcpus = ph->env.nr_cpus_avail;
1079 		break;
1080 	default:
1081 		break;
1082 	}
1083 
1084 	return 0;
1085 }
1086 
1087 static int __cmd_timechart(struct timechart *tchart, const char *output_name)
1088 {
1089 	const struct perf_evsel_str_handler power_tracepoints[] = {
1090 		{ "power:cpu_idle",		process_sample_cpu_idle },
1091 		{ "power:cpu_frequency",	process_sample_cpu_frequency },
1092 		{ "sched:sched_wakeup",		process_sample_sched_wakeup },
1093 		{ "sched:sched_switch",		process_sample_sched_switch },
1094 #ifdef SUPPORT_OLD_POWER_EVENTS
1095 		{ "power:power_start",		process_sample_power_start },
1096 		{ "power:power_end",		process_sample_power_end },
1097 		{ "power:power_frequency",	process_sample_power_frequency },
1098 #endif
1099 	};
1100 	struct perf_data_file file = {
1101 		.path = input_name,
1102 		.mode = PERF_DATA_MODE_READ,
1103 	};
1104 
1105 	struct perf_session *session = perf_session__new(&file, false,
1106 							 &tchart->tool);
1107 	int ret = -EINVAL;
1108 
1109 	if (session == NULL)
1110 		return -ENOMEM;
1111 
1112 	(void)perf_header__process_sections(&session->header,
1113 					    perf_data_file__fd(session->file),
1114 					    tchart,
1115 					    process_header);
1116 
1117 	if (!perf_session__has_traces(session, "timechart record"))
1118 		goto out_delete;
1119 
1120 	if (perf_session__set_tracepoints_handlers(session,
1121 						   power_tracepoints)) {
1122 		pr_err("Initializing session tracepoint handlers failed\n");
1123 		goto out_delete;
1124 	}
1125 
1126 	ret = perf_session__process_events(session, &tchart->tool);
1127 	if (ret)
1128 		goto out_delete;
1129 
1130 	end_sample_processing(tchart);
1131 
1132 	sort_pids(tchart);
1133 
1134 	write_svg_file(tchart, output_name);
1135 
1136 	pr_info("Written %2.1f seconds of trace to %s.\n",
1137 		(tchart->last_time - tchart->first_time) / 1000000000.0, output_name);
1138 out_delete:
1139 	perf_session__delete(session);
1140 	return ret;
1141 }
1142 
1143 static int timechart__record(struct timechart *tchart, int argc, const char **argv)
1144 {
1145 	unsigned int rec_argc, i, j;
1146 	const char **rec_argv;
1147 	const char **p;
1148 	unsigned int record_elems;
1149 
1150 	const char * const common_args[] = {
1151 		"record", "-a", "-R", "-c", "1",
1152 	};
1153 	unsigned int common_args_nr = ARRAY_SIZE(common_args);
1154 
1155 	const char * const backtrace_args[] = {
1156 		"-g",
1157 	};
1158 	unsigned int backtrace_args_no = ARRAY_SIZE(backtrace_args);
1159 
1160 	const char * const power_args[] = {
1161 		"-e", "power:cpu_frequency",
1162 		"-e", "power:cpu_idle",
1163 	};
1164 	unsigned int power_args_nr = ARRAY_SIZE(power_args);
1165 
1166 	const char * const old_power_args[] = {
1167 #ifdef SUPPORT_OLD_POWER_EVENTS
1168 		"-e", "power:power_start",
1169 		"-e", "power:power_end",
1170 		"-e", "power:power_frequency",
1171 #endif
1172 	};
1173 	unsigned int old_power_args_nr = ARRAY_SIZE(old_power_args);
1174 
1175 	const char * const tasks_args[] = {
1176 		"-e", "sched:sched_wakeup",
1177 		"-e", "sched:sched_switch",
1178 	};
1179 	unsigned int tasks_args_nr = ARRAY_SIZE(tasks_args);
1180 
1181 #ifdef SUPPORT_OLD_POWER_EVENTS
1182 	if (!is_valid_tracepoint("power:cpu_idle") &&
1183 	    is_valid_tracepoint("power:power_start")) {
1184 		use_old_power_events = 1;
1185 		power_args_nr = 0;
1186 	} else {
1187 		old_power_args_nr = 0;
1188 	}
1189 #endif
1190 
1191 	if (tchart->power_only)
1192 		tasks_args_nr = 0;
1193 
1194 	if (tchart->tasks_only) {
1195 		power_args_nr = 0;
1196 		old_power_args_nr = 0;
1197 	}
1198 
1199 	if (!tchart->with_backtrace)
1200 		backtrace_args_no = 0;
1201 
1202 	record_elems = common_args_nr + tasks_args_nr +
1203 		power_args_nr + old_power_args_nr + backtrace_args_no;
1204 
1205 	rec_argc = record_elems + argc;
1206 	rec_argv = calloc(rec_argc + 1, sizeof(char *));
1207 
1208 	if (rec_argv == NULL)
1209 		return -ENOMEM;
1210 
1211 	p = rec_argv;
1212 	for (i = 0; i < common_args_nr; i++)
1213 		*p++ = strdup(common_args[i]);
1214 
1215 	for (i = 0; i < backtrace_args_no; i++)
1216 		*p++ = strdup(backtrace_args[i]);
1217 
1218 	for (i = 0; i < tasks_args_nr; i++)
1219 		*p++ = strdup(tasks_args[i]);
1220 
1221 	for (i = 0; i < power_args_nr; i++)
1222 		*p++ = strdup(power_args[i]);
1223 
1224 	for (i = 0; i < old_power_args_nr; i++)
1225 		*p++ = strdup(old_power_args[i]);
1226 
1227 	for (j = 1; j < (unsigned int)argc; j++)
1228 		*p++ = argv[j];
1229 
1230 	return cmd_record(rec_argc, rec_argv, NULL);
1231 }
1232 
1233 static int
1234 parse_process(const struct option *opt __maybe_unused, const char *arg,
1235 	      int __maybe_unused unset)
1236 {
1237 	if (arg)
1238 		add_process_filter(arg);
1239 	return 0;
1240 }
1241 
1242 int cmd_timechart(int argc, const char **argv,
1243 		  const char *prefix __maybe_unused)
1244 {
1245 	struct timechart tchart = {
1246 		.tool = {
1247 			.comm		 = process_comm_event,
1248 			.fork		 = process_fork_event,
1249 			.exit		 = process_exit_event,
1250 			.sample		 = process_sample_event,
1251 			.ordered_samples = true,
1252 		},
1253 		.proc_num = 15,
1254 	};
1255 	const char *output_name = "output.svg";
1256 	const struct option timechart_options[] = {
1257 	OPT_STRING('i', "input", &input_name, "file", "input file name"),
1258 	OPT_STRING('o', "output", &output_name, "file", "output file name"),
1259 	OPT_INTEGER('w', "width", &svg_page_width, "page width"),
1260 	OPT_BOOLEAN('P', "power-only", &tchart.power_only, "output power data only"),
1261 	OPT_BOOLEAN('T', "tasks-only", &tchart.tasks_only,
1262 		    "output processes data only"),
1263 	OPT_CALLBACK('p', "process", NULL, "process",
1264 		      "process selector. Pass a pid or process name.",
1265 		       parse_process),
1266 	OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
1267 		    "Look for files with symbols relative to this directory"),
1268 	OPT_INTEGER('n', "proc-num", &tchart.proc_num,
1269 		    "min. number of tasks to print"),
1270 	OPT_END()
1271 	};
1272 	const char * const timechart_usage[] = {
1273 		"perf timechart [<options>] {record}",
1274 		NULL
1275 	};
1276 
1277 	const struct option record_options[] = {
1278 	OPT_BOOLEAN('P', "power-only", &tchart.power_only, "output power data only"),
1279 	OPT_BOOLEAN('T', "tasks-only", &tchart.tasks_only,
1280 		    "output processes data only"),
1281 	OPT_BOOLEAN('g', "callchain", &tchart.with_backtrace, "record callchain"),
1282 	OPT_END()
1283 	};
1284 	const char * const record_usage[] = {
1285 		"perf timechart record [<options>]",
1286 		NULL
1287 	};
1288 	argc = parse_options(argc, argv, timechart_options, timechart_usage,
1289 			PARSE_OPT_STOP_AT_NON_OPTION);
1290 
1291 	if (tchart.power_only && tchart.tasks_only) {
1292 		pr_err("-P and -T options cannot be used at the same time.\n");
1293 		return -1;
1294 	}
1295 
1296 	symbol__init();
1297 
1298 	if (argc && !strncmp(argv[0], "rec", 3)) {
1299 		argc = parse_options(argc, argv, record_options, record_usage,
1300 				     PARSE_OPT_STOP_AT_NON_OPTION);
1301 
1302 		if (tchart.power_only && tchart.tasks_only) {
1303 			pr_err("-P and -T options cannot be used at the same time.\n");
1304 			return -1;
1305 		}
1306 
1307 		return timechart__record(&tchart, argc, argv);
1308 	} else if (argc)
1309 		usage_with_options(timechart_usage, timechart_options);
1310 
1311 	setup_pager();
1312 
1313 	return __cmd_timechart(&tchart, output_name);
1314 }
1315