xref: /openbmc/linux/tools/perf/builtin-sched.c (revision b08918fb)
1 // SPDX-License-Identifier: GPL-2.0
2 #include "builtin.h"
3 #include "perf.h"
4 #include "perf-sys.h"
5 
6 #include "util/evlist.h"
7 #include "util/evsel.h"
8 #include "util/symbol.h"
9 #include "util/thread.h"
10 #include "util/header.h"
11 #include "util/session.h"
12 #include "util/tool.h"
13 #include "util/cloexec.h"
14 #include "util/thread_map.h"
15 #include "util/color.h"
16 #include "util/stat.h"
17 #include "util/string2.h"
18 #include "util/callchain.h"
19 #include "util/time-utils.h"
20 
21 #include <subcmd/pager.h>
22 #include <subcmd/parse-options.h>
23 #include "util/trace-event.h"
24 
25 #include "util/debug.h"
26 
27 #include <linux/kernel.h>
28 #include <linux/log2.h>
29 #include <linux/zalloc.h>
30 #include <sys/prctl.h>
31 #include <sys/resource.h>
32 #include <inttypes.h>
33 
34 #include <errno.h>
35 #include <semaphore.h>
36 #include <pthread.h>
37 #include <math.h>
38 #include <api/fs/fs.h>
39 #include <linux/time64.h>
40 
41 #include <linux/ctype.h>
42 
43 #define PR_SET_NAME		15               /* Set process name */
44 #define MAX_CPUS		4096
45 #define COMM_LEN		20
46 #define SYM_LEN			129
47 #define MAX_PID			1024000
48 
49 struct sched_atom;
50 
51 struct task_desc {
52 	unsigned long		nr;
53 	unsigned long		pid;
54 	char			comm[COMM_LEN];
55 
56 	unsigned long		nr_events;
57 	unsigned long		curr_event;
58 	struct sched_atom	**atoms;
59 
60 	pthread_t		thread;
61 	sem_t			sleep_sem;
62 
63 	sem_t			ready_for_work;
64 	sem_t			work_done_sem;
65 
66 	u64			cpu_usage;
67 };
68 
69 enum sched_event_type {
70 	SCHED_EVENT_RUN,
71 	SCHED_EVENT_SLEEP,
72 	SCHED_EVENT_WAKEUP,
73 	SCHED_EVENT_MIGRATION,
74 };
75 
76 struct sched_atom {
77 	enum sched_event_type	type;
78 	int			specific_wait;
79 	u64			timestamp;
80 	u64			duration;
81 	unsigned long		nr;
82 	sem_t			*wait_sem;
83 	struct task_desc	*wakee;
84 };
85 
86 #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
87 
88 /* task state bitmask, copied from include/linux/sched.h */
89 #define TASK_RUNNING		0
90 #define TASK_INTERRUPTIBLE	1
91 #define TASK_UNINTERRUPTIBLE	2
92 #define __TASK_STOPPED		4
93 #define __TASK_TRACED		8
94 /* in tsk->exit_state */
95 #define EXIT_DEAD		16
96 #define EXIT_ZOMBIE		32
97 #define EXIT_TRACE		(EXIT_ZOMBIE | EXIT_DEAD)
98 /* in tsk->state again */
99 #define TASK_DEAD		64
100 #define TASK_WAKEKILL		128
101 #define TASK_WAKING		256
102 #define TASK_PARKED		512
103 
104 enum thread_state {
105 	THREAD_SLEEPING = 0,
106 	THREAD_WAIT_CPU,
107 	THREAD_SCHED_IN,
108 	THREAD_IGNORE
109 };
110 
111 struct work_atom {
112 	struct list_head	list;
113 	enum thread_state	state;
114 	u64			sched_out_time;
115 	u64			wake_up_time;
116 	u64			sched_in_time;
117 	u64			runtime;
118 };
119 
120 struct work_atoms {
121 	struct list_head	work_list;
122 	struct thread		*thread;
123 	struct rb_node		node;
124 	u64			max_lat;
125 	u64			max_lat_at;
126 	u64			total_lat;
127 	u64			nb_atoms;
128 	u64			total_runtime;
129 	int			num_merged;
130 };
131 
132 typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
133 
134 struct perf_sched;
135 
136 struct trace_sched_handler {
137 	int (*switch_event)(struct perf_sched *sched, struct evsel *evsel,
138 			    struct perf_sample *sample, struct machine *machine);
139 
140 	int (*runtime_event)(struct perf_sched *sched, struct evsel *evsel,
141 			     struct perf_sample *sample, struct machine *machine);
142 
143 	int (*wakeup_event)(struct perf_sched *sched, struct evsel *evsel,
144 			    struct perf_sample *sample, struct machine *machine);
145 
146 	/* PERF_RECORD_FORK event, not sched_process_fork tracepoint */
147 	int (*fork_event)(struct perf_sched *sched, union perf_event *event,
148 			  struct machine *machine);
149 
150 	int (*migrate_task_event)(struct perf_sched *sched,
151 				  struct evsel *evsel,
152 				  struct perf_sample *sample,
153 				  struct machine *machine);
154 };
155 
156 #define COLOR_PIDS PERF_COLOR_BLUE
157 #define COLOR_CPUS PERF_COLOR_BG_RED
158 
159 struct perf_sched_map {
160 	DECLARE_BITMAP(comp_cpus_mask, MAX_CPUS);
161 	int			*comp_cpus;
162 	bool			 comp;
163 	struct perf_thread_map *color_pids;
164 	const char		*color_pids_str;
165 	struct perf_cpu_map	*color_cpus;
166 	const char		*color_cpus_str;
167 	struct perf_cpu_map	*cpus;
168 	const char		*cpus_str;
169 };
170 
171 struct perf_sched {
172 	struct perf_tool tool;
173 	const char	 *sort_order;
174 	unsigned long	 nr_tasks;
175 	struct task_desc **pid_to_task;
176 	struct task_desc **tasks;
177 	const struct trace_sched_handler *tp_handler;
178 	pthread_mutex_t	 start_work_mutex;
179 	pthread_mutex_t	 work_done_wait_mutex;
180 	int		 profile_cpu;
181 /*
182  * Track the current task - that way we can know whether there's any
183  * weird events, such as a task being switched away that is not current.
184  */
185 	int		 max_cpu;
186 	u32		 curr_pid[MAX_CPUS];
187 	struct thread	 *curr_thread[MAX_CPUS];
188 	char		 next_shortname1;
189 	char		 next_shortname2;
190 	unsigned int	 replay_repeat;
191 	unsigned long	 nr_run_events;
192 	unsigned long	 nr_sleep_events;
193 	unsigned long	 nr_wakeup_events;
194 	unsigned long	 nr_sleep_corrections;
195 	unsigned long	 nr_run_events_optimized;
196 	unsigned long	 targetless_wakeups;
197 	unsigned long	 multitarget_wakeups;
198 	unsigned long	 nr_runs;
199 	unsigned long	 nr_timestamps;
200 	unsigned long	 nr_unordered_timestamps;
201 	unsigned long	 nr_context_switch_bugs;
202 	unsigned long	 nr_events;
203 	unsigned long	 nr_lost_chunks;
204 	unsigned long	 nr_lost_events;
205 	u64		 run_measurement_overhead;
206 	u64		 sleep_measurement_overhead;
207 	u64		 start_time;
208 	u64		 cpu_usage;
209 	u64		 runavg_cpu_usage;
210 	u64		 parent_cpu_usage;
211 	u64		 runavg_parent_cpu_usage;
212 	u64		 sum_runtime;
213 	u64		 sum_fluct;
214 	u64		 run_avg;
215 	u64		 all_runtime;
216 	u64		 all_count;
217 	u64		 cpu_last_switched[MAX_CPUS];
218 	struct rb_root_cached atom_root, sorted_atom_root, merged_atom_root;
219 	struct list_head sort_list, cmp_pid;
220 	bool force;
221 	bool skip_merge;
222 	struct perf_sched_map map;
223 
224 	/* options for timehist command */
225 	bool		summary;
226 	bool		summary_only;
227 	bool		idle_hist;
228 	bool		show_callchain;
229 	unsigned int	max_stack;
230 	bool		show_cpu_visual;
231 	bool		show_wakeups;
232 	bool		show_next;
233 	bool		show_migrations;
234 	bool		show_state;
235 	u64		skipped_samples;
236 	const char	*time_str;
237 	struct perf_time_interval ptime;
238 	struct perf_time_interval hist_time;
239 };
240 
241 /* per thread run time data */
242 struct thread_runtime {
243 	u64 last_time;      /* time of previous sched in/out event */
244 	u64 dt_run;         /* run time */
245 	u64 dt_sleep;       /* time between CPU access by sleep (off cpu) */
246 	u64 dt_iowait;      /* time between CPU access by iowait (off cpu) */
247 	u64 dt_preempt;     /* time between CPU access by preempt (off cpu) */
248 	u64 dt_delay;       /* time between wakeup and sched-in */
249 	u64 ready_to_run;   /* time of wakeup */
250 
251 	struct stats run_stats;
252 	u64 total_run_time;
253 	u64 total_sleep_time;
254 	u64 total_iowait_time;
255 	u64 total_preempt_time;
256 	u64 total_delay_time;
257 
258 	int last_state;
259 
260 	char shortname[3];
261 	bool comm_changed;
262 
263 	u64 migrations;
264 };
265 
266 /* per event run time data */
267 struct evsel_runtime {
268 	u64 *last_time; /* time this event was last seen per cpu */
269 	u32 ncpu;       /* highest cpu slot allocated */
270 };
271 
272 /* per cpu idle time data */
273 struct idle_thread_runtime {
274 	struct thread_runtime	tr;
275 	struct thread		*last_thread;
276 	struct rb_root_cached	sorted_root;
277 	struct callchain_root	callchain;
278 	struct callchain_cursor	cursor;
279 };
280 
281 /* track idle times per cpu */
282 static struct thread **idle_threads;
283 static int idle_max_cpu;
284 static char idle_comm[] = "<idle>";
285 
286 static u64 get_nsecs(void)
287 {
288 	struct timespec ts;
289 
290 	clock_gettime(CLOCK_MONOTONIC, &ts);
291 
292 	return ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec;
293 }
294 
295 static void burn_nsecs(struct perf_sched *sched, u64 nsecs)
296 {
297 	u64 T0 = get_nsecs(), T1;
298 
299 	do {
300 		T1 = get_nsecs();
301 	} while (T1 + sched->run_measurement_overhead < T0 + nsecs);
302 }
303 
304 static void sleep_nsecs(u64 nsecs)
305 {
306 	struct timespec ts;
307 
308 	ts.tv_nsec = nsecs % 999999999;
309 	ts.tv_sec = nsecs / 999999999;
310 
311 	nanosleep(&ts, NULL);
312 }
313 
314 static void calibrate_run_measurement_overhead(struct perf_sched *sched)
315 {
316 	u64 T0, T1, delta, min_delta = NSEC_PER_SEC;
317 	int i;
318 
319 	for (i = 0; i < 10; i++) {
320 		T0 = get_nsecs();
321 		burn_nsecs(sched, 0);
322 		T1 = get_nsecs();
323 		delta = T1-T0;
324 		min_delta = min(min_delta, delta);
325 	}
326 	sched->run_measurement_overhead = min_delta;
327 
328 	printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta);
329 }
330 
331 static void calibrate_sleep_measurement_overhead(struct perf_sched *sched)
332 {
333 	u64 T0, T1, delta, min_delta = NSEC_PER_SEC;
334 	int i;
335 
336 	for (i = 0; i < 10; i++) {
337 		T0 = get_nsecs();
338 		sleep_nsecs(10000);
339 		T1 = get_nsecs();
340 		delta = T1-T0;
341 		min_delta = min(min_delta, delta);
342 	}
343 	min_delta -= 10000;
344 	sched->sleep_measurement_overhead = min_delta;
345 
346 	printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta);
347 }
348 
349 static struct sched_atom *
350 get_new_event(struct task_desc *task, u64 timestamp)
351 {
352 	struct sched_atom *event = zalloc(sizeof(*event));
353 	unsigned long idx = task->nr_events;
354 	size_t size;
355 
356 	event->timestamp = timestamp;
357 	event->nr = idx;
358 
359 	task->nr_events++;
360 	size = sizeof(struct sched_atom *) * task->nr_events;
361 	task->atoms = realloc(task->atoms, size);
362 	BUG_ON(!task->atoms);
363 
364 	task->atoms[idx] = event;
365 
366 	return event;
367 }
368 
369 static struct sched_atom *last_event(struct task_desc *task)
370 {
371 	if (!task->nr_events)
372 		return NULL;
373 
374 	return task->atoms[task->nr_events - 1];
375 }
376 
377 static void add_sched_event_run(struct perf_sched *sched, struct task_desc *task,
378 				u64 timestamp, u64 duration)
379 {
380 	struct sched_atom *event, *curr_event = last_event(task);
381 
382 	/*
383 	 * optimize an existing RUN event by merging this one
384 	 * to it:
385 	 */
386 	if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
387 		sched->nr_run_events_optimized++;
388 		curr_event->duration += duration;
389 		return;
390 	}
391 
392 	event = get_new_event(task, timestamp);
393 
394 	event->type = SCHED_EVENT_RUN;
395 	event->duration = duration;
396 
397 	sched->nr_run_events++;
398 }
399 
400 static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *task,
401 				   u64 timestamp, struct task_desc *wakee)
402 {
403 	struct sched_atom *event, *wakee_event;
404 
405 	event = get_new_event(task, timestamp);
406 	event->type = SCHED_EVENT_WAKEUP;
407 	event->wakee = wakee;
408 
409 	wakee_event = last_event(wakee);
410 	if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
411 		sched->targetless_wakeups++;
412 		return;
413 	}
414 	if (wakee_event->wait_sem) {
415 		sched->multitarget_wakeups++;
416 		return;
417 	}
418 
419 	wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem));
420 	sem_init(wakee_event->wait_sem, 0, 0);
421 	wakee_event->specific_wait = 1;
422 	event->wait_sem = wakee_event->wait_sem;
423 
424 	sched->nr_wakeup_events++;
425 }
426 
427 static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task,
428 				  u64 timestamp, u64 task_state __maybe_unused)
429 {
430 	struct sched_atom *event = get_new_event(task, timestamp);
431 
432 	event->type = SCHED_EVENT_SLEEP;
433 
434 	sched->nr_sleep_events++;
435 }
436 
437 static struct task_desc *register_pid(struct perf_sched *sched,
438 				      unsigned long pid, const char *comm)
439 {
440 	struct task_desc *task;
441 	static int pid_max;
442 
443 	if (sched->pid_to_task == NULL) {
444 		if (sysctl__read_int("kernel/pid_max", &pid_max) < 0)
445 			pid_max = MAX_PID;
446 		BUG_ON((sched->pid_to_task = calloc(pid_max, sizeof(struct task_desc *))) == NULL);
447 	}
448 	if (pid >= (unsigned long)pid_max) {
449 		BUG_ON((sched->pid_to_task = realloc(sched->pid_to_task, (pid + 1) *
450 			sizeof(struct task_desc *))) == NULL);
451 		while (pid >= (unsigned long)pid_max)
452 			sched->pid_to_task[pid_max++] = NULL;
453 	}
454 
455 	task = sched->pid_to_task[pid];
456 
457 	if (task)
458 		return task;
459 
460 	task = zalloc(sizeof(*task));
461 	task->pid = pid;
462 	task->nr = sched->nr_tasks;
463 	strcpy(task->comm, comm);
464 	/*
465 	 * every task starts in sleeping state - this gets ignored
466 	 * if there's no wakeup pointing to this sleep state:
467 	 */
468 	add_sched_event_sleep(sched, task, 0, 0);
469 
470 	sched->pid_to_task[pid] = task;
471 	sched->nr_tasks++;
472 	sched->tasks = realloc(sched->tasks, sched->nr_tasks * sizeof(struct task_desc *));
473 	BUG_ON(!sched->tasks);
474 	sched->tasks[task->nr] = task;
475 
476 	if (verbose > 0)
477 		printf("registered task #%ld, PID %ld (%s)\n", sched->nr_tasks, pid, comm);
478 
479 	return task;
480 }
481 
482 
483 static void print_task_traces(struct perf_sched *sched)
484 {
485 	struct task_desc *task;
486 	unsigned long i;
487 
488 	for (i = 0; i < sched->nr_tasks; i++) {
489 		task = sched->tasks[i];
490 		printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
491 			task->nr, task->comm, task->pid, task->nr_events);
492 	}
493 }
494 
495 static void add_cross_task_wakeups(struct perf_sched *sched)
496 {
497 	struct task_desc *task1, *task2;
498 	unsigned long i, j;
499 
500 	for (i = 0; i < sched->nr_tasks; i++) {
501 		task1 = sched->tasks[i];
502 		j = i + 1;
503 		if (j == sched->nr_tasks)
504 			j = 0;
505 		task2 = sched->tasks[j];
506 		add_sched_event_wakeup(sched, task1, 0, task2);
507 	}
508 }
509 
510 static void perf_sched__process_event(struct perf_sched *sched,
511 				      struct sched_atom *atom)
512 {
513 	int ret = 0;
514 
515 	switch (atom->type) {
516 		case SCHED_EVENT_RUN:
517 			burn_nsecs(sched, atom->duration);
518 			break;
519 		case SCHED_EVENT_SLEEP:
520 			if (atom->wait_sem)
521 				ret = sem_wait(atom->wait_sem);
522 			BUG_ON(ret);
523 			break;
524 		case SCHED_EVENT_WAKEUP:
525 			if (atom->wait_sem)
526 				ret = sem_post(atom->wait_sem);
527 			BUG_ON(ret);
528 			break;
529 		case SCHED_EVENT_MIGRATION:
530 			break;
531 		default:
532 			BUG_ON(1);
533 	}
534 }
535 
536 static u64 get_cpu_usage_nsec_parent(void)
537 {
538 	struct rusage ru;
539 	u64 sum;
540 	int err;
541 
542 	err = getrusage(RUSAGE_SELF, &ru);
543 	BUG_ON(err);
544 
545 	sum =  ru.ru_utime.tv_sec * NSEC_PER_SEC + ru.ru_utime.tv_usec * NSEC_PER_USEC;
546 	sum += ru.ru_stime.tv_sec * NSEC_PER_SEC + ru.ru_stime.tv_usec * NSEC_PER_USEC;
547 
548 	return sum;
549 }
550 
551 static int self_open_counters(struct perf_sched *sched, unsigned long cur_task)
552 {
553 	struct perf_event_attr attr;
554 	char sbuf[STRERR_BUFSIZE], info[STRERR_BUFSIZE];
555 	int fd;
556 	struct rlimit limit;
557 	bool need_privilege = false;
558 
559 	memset(&attr, 0, sizeof(attr));
560 
561 	attr.type = PERF_TYPE_SOFTWARE;
562 	attr.config = PERF_COUNT_SW_TASK_CLOCK;
563 
564 force_again:
565 	fd = sys_perf_event_open(&attr, 0, -1, -1,
566 				 perf_event_open_cloexec_flag());
567 
568 	if (fd < 0) {
569 		if (errno == EMFILE) {
570 			if (sched->force) {
571 				BUG_ON(getrlimit(RLIMIT_NOFILE, &limit) == -1);
572 				limit.rlim_cur += sched->nr_tasks - cur_task;
573 				if (limit.rlim_cur > limit.rlim_max) {
574 					limit.rlim_max = limit.rlim_cur;
575 					need_privilege = true;
576 				}
577 				if (setrlimit(RLIMIT_NOFILE, &limit) == -1) {
578 					if (need_privilege && errno == EPERM)
579 						strcpy(info, "Need privilege\n");
580 				} else
581 					goto force_again;
582 			} else
583 				strcpy(info, "Have a try with -f option\n");
584 		}
585 		pr_err("Error: sys_perf_event_open() syscall returned "
586 		       "with %d (%s)\n%s", fd,
587 		       str_error_r(errno, sbuf, sizeof(sbuf)), info);
588 		exit(EXIT_FAILURE);
589 	}
590 	return fd;
591 }
592 
593 static u64 get_cpu_usage_nsec_self(int fd)
594 {
595 	u64 runtime;
596 	int ret;
597 
598 	ret = read(fd, &runtime, sizeof(runtime));
599 	BUG_ON(ret != sizeof(runtime));
600 
601 	return runtime;
602 }
603 
604 struct sched_thread_parms {
605 	struct task_desc  *task;
606 	struct perf_sched *sched;
607 	int fd;
608 };
609 
610 static void *thread_func(void *ctx)
611 {
612 	struct sched_thread_parms *parms = ctx;
613 	struct task_desc *this_task = parms->task;
614 	struct perf_sched *sched = parms->sched;
615 	u64 cpu_usage_0, cpu_usage_1;
616 	unsigned long i, ret;
617 	char comm2[22];
618 	int fd = parms->fd;
619 
620 	zfree(&parms);
621 
622 	sprintf(comm2, ":%s", this_task->comm);
623 	prctl(PR_SET_NAME, comm2);
624 	if (fd < 0)
625 		return NULL;
626 again:
627 	ret = sem_post(&this_task->ready_for_work);
628 	BUG_ON(ret);
629 	ret = pthread_mutex_lock(&sched->start_work_mutex);
630 	BUG_ON(ret);
631 	ret = pthread_mutex_unlock(&sched->start_work_mutex);
632 	BUG_ON(ret);
633 
634 	cpu_usage_0 = get_cpu_usage_nsec_self(fd);
635 
636 	for (i = 0; i < this_task->nr_events; i++) {
637 		this_task->curr_event = i;
638 		perf_sched__process_event(sched, this_task->atoms[i]);
639 	}
640 
641 	cpu_usage_1 = get_cpu_usage_nsec_self(fd);
642 	this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
643 	ret = sem_post(&this_task->work_done_sem);
644 	BUG_ON(ret);
645 
646 	ret = pthread_mutex_lock(&sched->work_done_wait_mutex);
647 	BUG_ON(ret);
648 	ret = pthread_mutex_unlock(&sched->work_done_wait_mutex);
649 	BUG_ON(ret);
650 
651 	goto again;
652 }
653 
654 static void create_tasks(struct perf_sched *sched)
655 {
656 	struct task_desc *task;
657 	pthread_attr_t attr;
658 	unsigned long i;
659 	int err;
660 
661 	err = pthread_attr_init(&attr);
662 	BUG_ON(err);
663 	err = pthread_attr_setstacksize(&attr,
664 			(size_t) max(16 * 1024, PTHREAD_STACK_MIN));
665 	BUG_ON(err);
666 	err = pthread_mutex_lock(&sched->start_work_mutex);
667 	BUG_ON(err);
668 	err = pthread_mutex_lock(&sched->work_done_wait_mutex);
669 	BUG_ON(err);
670 	for (i = 0; i < sched->nr_tasks; i++) {
671 		struct sched_thread_parms *parms = malloc(sizeof(*parms));
672 		BUG_ON(parms == NULL);
673 		parms->task = task = sched->tasks[i];
674 		parms->sched = sched;
675 		parms->fd = self_open_counters(sched, i);
676 		sem_init(&task->sleep_sem, 0, 0);
677 		sem_init(&task->ready_for_work, 0, 0);
678 		sem_init(&task->work_done_sem, 0, 0);
679 		task->curr_event = 0;
680 		err = pthread_create(&task->thread, &attr, thread_func, parms);
681 		BUG_ON(err);
682 	}
683 }
684 
685 static void wait_for_tasks(struct perf_sched *sched)
686 {
687 	u64 cpu_usage_0, cpu_usage_1;
688 	struct task_desc *task;
689 	unsigned long i, ret;
690 
691 	sched->start_time = get_nsecs();
692 	sched->cpu_usage = 0;
693 	pthread_mutex_unlock(&sched->work_done_wait_mutex);
694 
695 	for (i = 0; i < sched->nr_tasks; i++) {
696 		task = sched->tasks[i];
697 		ret = sem_wait(&task->ready_for_work);
698 		BUG_ON(ret);
699 		sem_init(&task->ready_for_work, 0, 0);
700 	}
701 	ret = pthread_mutex_lock(&sched->work_done_wait_mutex);
702 	BUG_ON(ret);
703 
704 	cpu_usage_0 = get_cpu_usage_nsec_parent();
705 
706 	pthread_mutex_unlock(&sched->start_work_mutex);
707 
708 	for (i = 0; i < sched->nr_tasks; i++) {
709 		task = sched->tasks[i];
710 		ret = sem_wait(&task->work_done_sem);
711 		BUG_ON(ret);
712 		sem_init(&task->work_done_sem, 0, 0);
713 		sched->cpu_usage += task->cpu_usage;
714 		task->cpu_usage = 0;
715 	}
716 
717 	cpu_usage_1 = get_cpu_usage_nsec_parent();
718 	if (!sched->runavg_cpu_usage)
719 		sched->runavg_cpu_usage = sched->cpu_usage;
720 	sched->runavg_cpu_usage = (sched->runavg_cpu_usage * (sched->replay_repeat - 1) + sched->cpu_usage) / sched->replay_repeat;
721 
722 	sched->parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
723 	if (!sched->runavg_parent_cpu_usage)
724 		sched->runavg_parent_cpu_usage = sched->parent_cpu_usage;
725 	sched->runavg_parent_cpu_usage = (sched->runavg_parent_cpu_usage * (sched->replay_repeat - 1) +
726 					 sched->parent_cpu_usage)/sched->replay_repeat;
727 
728 	ret = pthread_mutex_lock(&sched->start_work_mutex);
729 	BUG_ON(ret);
730 
731 	for (i = 0; i < sched->nr_tasks; i++) {
732 		task = sched->tasks[i];
733 		sem_init(&task->sleep_sem, 0, 0);
734 		task->curr_event = 0;
735 	}
736 }
737 
738 static void run_one_test(struct perf_sched *sched)
739 {
740 	u64 T0, T1, delta, avg_delta, fluct;
741 
742 	T0 = get_nsecs();
743 	wait_for_tasks(sched);
744 	T1 = get_nsecs();
745 
746 	delta = T1 - T0;
747 	sched->sum_runtime += delta;
748 	sched->nr_runs++;
749 
750 	avg_delta = sched->sum_runtime / sched->nr_runs;
751 	if (delta < avg_delta)
752 		fluct = avg_delta - delta;
753 	else
754 		fluct = delta - avg_delta;
755 	sched->sum_fluct += fluct;
756 	if (!sched->run_avg)
757 		sched->run_avg = delta;
758 	sched->run_avg = (sched->run_avg * (sched->replay_repeat - 1) + delta) / sched->replay_repeat;
759 
760 	printf("#%-3ld: %0.3f, ", sched->nr_runs, (double)delta / NSEC_PER_MSEC);
761 
762 	printf("ravg: %0.2f, ", (double)sched->run_avg / NSEC_PER_MSEC);
763 
764 	printf("cpu: %0.2f / %0.2f",
765 		(double)sched->cpu_usage / NSEC_PER_MSEC, (double)sched->runavg_cpu_usage / NSEC_PER_MSEC);
766 
767 #if 0
768 	/*
769 	 * rusage statistics done by the parent, these are less
770 	 * accurate than the sched->sum_exec_runtime based statistics:
771 	 */
772 	printf(" [%0.2f / %0.2f]",
773 		(double)sched->parent_cpu_usage / NSEC_PER_MSEC,
774 		(double)sched->runavg_parent_cpu_usage / NSEC_PER_MSEC);
775 #endif
776 
777 	printf("\n");
778 
779 	if (sched->nr_sleep_corrections)
780 		printf(" (%ld sleep corrections)\n", sched->nr_sleep_corrections);
781 	sched->nr_sleep_corrections = 0;
782 }
783 
784 static void test_calibrations(struct perf_sched *sched)
785 {
786 	u64 T0, T1;
787 
788 	T0 = get_nsecs();
789 	burn_nsecs(sched, NSEC_PER_MSEC);
790 	T1 = get_nsecs();
791 
792 	printf("the run test took %" PRIu64 " nsecs\n", T1 - T0);
793 
794 	T0 = get_nsecs();
795 	sleep_nsecs(NSEC_PER_MSEC);
796 	T1 = get_nsecs();
797 
798 	printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0);
799 }
800 
801 static int
802 replay_wakeup_event(struct perf_sched *sched,
803 		    struct evsel *evsel, struct perf_sample *sample,
804 		    struct machine *machine __maybe_unused)
805 {
806 	const char *comm = perf_evsel__strval(evsel, sample, "comm");
807 	const u32 pid	 = perf_evsel__intval(evsel, sample, "pid");
808 	struct task_desc *waker, *wakee;
809 
810 	if (verbose > 0) {
811 		printf("sched_wakeup event %p\n", evsel);
812 
813 		printf(" ... pid %d woke up %s/%d\n", sample->tid, comm, pid);
814 	}
815 
816 	waker = register_pid(sched, sample->tid, "<unknown>");
817 	wakee = register_pid(sched, pid, comm);
818 
819 	add_sched_event_wakeup(sched, waker, sample->time, wakee);
820 	return 0;
821 }
822 
823 static int replay_switch_event(struct perf_sched *sched,
824 			       struct evsel *evsel,
825 			       struct perf_sample *sample,
826 			       struct machine *machine __maybe_unused)
827 {
828 	const char *prev_comm  = perf_evsel__strval(evsel, sample, "prev_comm"),
829 		   *next_comm  = perf_evsel__strval(evsel, sample, "next_comm");
830 	const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
831 		  next_pid = perf_evsel__intval(evsel, sample, "next_pid");
832 	const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
833 	struct task_desc *prev, __maybe_unused *next;
834 	u64 timestamp0, timestamp = sample->time;
835 	int cpu = sample->cpu;
836 	s64 delta;
837 
838 	if (verbose > 0)
839 		printf("sched_switch event %p\n", evsel);
840 
841 	if (cpu >= MAX_CPUS || cpu < 0)
842 		return 0;
843 
844 	timestamp0 = sched->cpu_last_switched[cpu];
845 	if (timestamp0)
846 		delta = timestamp - timestamp0;
847 	else
848 		delta = 0;
849 
850 	if (delta < 0) {
851 		pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
852 		return -1;
853 	}
854 
855 	pr_debug(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n",
856 		 prev_comm, prev_pid, next_comm, next_pid, delta);
857 
858 	prev = register_pid(sched, prev_pid, prev_comm);
859 	next = register_pid(sched, next_pid, next_comm);
860 
861 	sched->cpu_last_switched[cpu] = timestamp;
862 
863 	add_sched_event_run(sched, prev, timestamp, delta);
864 	add_sched_event_sleep(sched, prev, timestamp, prev_state);
865 
866 	return 0;
867 }
868 
869 static int replay_fork_event(struct perf_sched *sched,
870 			     union perf_event *event,
871 			     struct machine *machine)
872 {
873 	struct thread *child, *parent;
874 
875 	child = machine__findnew_thread(machine, event->fork.pid,
876 					event->fork.tid);
877 	parent = machine__findnew_thread(machine, event->fork.ppid,
878 					 event->fork.ptid);
879 
880 	if (child == NULL || parent == NULL) {
881 		pr_debug("thread does not exist on fork event: child %p, parent %p\n",
882 				 child, parent);
883 		goto out_put;
884 	}
885 
886 	if (verbose > 0) {
887 		printf("fork event\n");
888 		printf("... parent: %s/%d\n", thread__comm_str(parent), parent->tid);
889 		printf("...  child: %s/%d\n", thread__comm_str(child), child->tid);
890 	}
891 
892 	register_pid(sched, parent->tid, thread__comm_str(parent));
893 	register_pid(sched, child->tid, thread__comm_str(child));
894 out_put:
895 	thread__put(child);
896 	thread__put(parent);
897 	return 0;
898 }
899 
900 struct sort_dimension {
901 	const char		*name;
902 	sort_fn_t		cmp;
903 	struct list_head	list;
904 };
905 
906 /*
907  * handle runtime stats saved per thread
908  */
909 static struct thread_runtime *thread__init_runtime(struct thread *thread)
910 {
911 	struct thread_runtime *r;
912 
913 	r = zalloc(sizeof(struct thread_runtime));
914 	if (!r)
915 		return NULL;
916 
917 	init_stats(&r->run_stats);
918 	thread__set_priv(thread, r);
919 
920 	return r;
921 }
922 
923 static struct thread_runtime *thread__get_runtime(struct thread *thread)
924 {
925 	struct thread_runtime *tr;
926 
927 	tr = thread__priv(thread);
928 	if (tr == NULL) {
929 		tr = thread__init_runtime(thread);
930 		if (tr == NULL)
931 			pr_debug("Failed to malloc memory for runtime data.\n");
932 	}
933 
934 	return tr;
935 }
936 
937 static int
938 thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
939 {
940 	struct sort_dimension *sort;
941 	int ret = 0;
942 
943 	BUG_ON(list_empty(list));
944 
945 	list_for_each_entry(sort, list, list) {
946 		ret = sort->cmp(l, r);
947 		if (ret)
948 			return ret;
949 	}
950 
951 	return ret;
952 }
953 
954 static struct work_atoms *
955 thread_atoms_search(struct rb_root_cached *root, struct thread *thread,
956 			 struct list_head *sort_list)
957 {
958 	struct rb_node *node = root->rb_root.rb_node;
959 	struct work_atoms key = { .thread = thread };
960 
961 	while (node) {
962 		struct work_atoms *atoms;
963 		int cmp;
964 
965 		atoms = container_of(node, struct work_atoms, node);
966 
967 		cmp = thread_lat_cmp(sort_list, &key, atoms);
968 		if (cmp > 0)
969 			node = node->rb_left;
970 		else if (cmp < 0)
971 			node = node->rb_right;
972 		else {
973 			BUG_ON(thread != atoms->thread);
974 			return atoms;
975 		}
976 	}
977 	return NULL;
978 }
979 
980 static void
981 __thread_latency_insert(struct rb_root_cached *root, struct work_atoms *data,
982 			 struct list_head *sort_list)
983 {
984 	struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
985 	bool leftmost = true;
986 
987 	while (*new) {
988 		struct work_atoms *this;
989 		int cmp;
990 
991 		this = container_of(*new, struct work_atoms, node);
992 		parent = *new;
993 
994 		cmp = thread_lat_cmp(sort_list, data, this);
995 
996 		if (cmp > 0)
997 			new = &((*new)->rb_left);
998 		else {
999 			new = &((*new)->rb_right);
1000 			leftmost = false;
1001 		}
1002 	}
1003 
1004 	rb_link_node(&data->node, parent, new);
1005 	rb_insert_color_cached(&data->node, root, leftmost);
1006 }
1007 
1008 static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread)
1009 {
1010 	struct work_atoms *atoms = zalloc(sizeof(*atoms));
1011 	if (!atoms) {
1012 		pr_err("No memory at %s\n", __func__);
1013 		return -1;
1014 	}
1015 
1016 	atoms->thread = thread__get(thread);
1017 	INIT_LIST_HEAD(&atoms->work_list);
1018 	__thread_latency_insert(&sched->atom_root, atoms, &sched->cmp_pid);
1019 	return 0;
1020 }
1021 
1022 static char sched_out_state(u64 prev_state)
1023 {
1024 	const char *str = TASK_STATE_TO_CHAR_STR;
1025 
1026 	return str[prev_state];
1027 }
1028 
1029 static int
1030 add_sched_out_event(struct work_atoms *atoms,
1031 		    char run_state,
1032 		    u64 timestamp)
1033 {
1034 	struct work_atom *atom = zalloc(sizeof(*atom));
1035 	if (!atom) {
1036 		pr_err("Non memory at %s", __func__);
1037 		return -1;
1038 	}
1039 
1040 	atom->sched_out_time = timestamp;
1041 
1042 	if (run_state == 'R') {
1043 		atom->state = THREAD_WAIT_CPU;
1044 		atom->wake_up_time = atom->sched_out_time;
1045 	}
1046 
1047 	list_add_tail(&atom->list, &atoms->work_list);
1048 	return 0;
1049 }
1050 
1051 static void
1052 add_runtime_event(struct work_atoms *atoms, u64 delta,
1053 		  u64 timestamp __maybe_unused)
1054 {
1055 	struct work_atom *atom;
1056 
1057 	BUG_ON(list_empty(&atoms->work_list));
1058 
1059 	atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1060 
1061 	atom->runtime += delta;
1062 	atoms->total_runtime += delta;
1063 }
1064 
1065 static void
1066 add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
1067 {
1068 	struct work_atom *atom;
1069 	u64 delta;
1070 
1071 	if (list_empty(&atoms->work_list))
1072 		return;
1073 
1074 	atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1075 
1076 	if (atom->state != THREAD_WAIT_CPU)
1077 		return;
1078 
1079 	if (timestamp < atom->wake_up_time) {
1080 		atom->state = THREAD_IGNORE;
1081 		return;
1082 	}
1083 
1084 	atom->state = THREAD_SCHED_IN;
1085 	atom->sched_in_time = timestamp;
1086 
1087 	delta = atom->sched_in_time - atom->wake_up_time;
1088 	atoms->total_lat += delta;
1089 	if (delta > atoms->max_lat) {
1090 		atoms->max_lat = delta;
1091 		atoms->max_lat_at = timestamp;
1092 	}
1093 	atoms->nb_atoms++;
1094 }
1095 
1096 static int latency_switch_event(struct perf_sched *sched,
1097 				struct evsel *evsel,
1098 				struct perf_sample *sample,
1099 				struct machine *machine)
1100 {
1101 	const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
1102 		  next_pid = perf_evsel__intval(evsel, sample, "next_pid");
1103 	const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
1104 	struct work_atoms *out_events, *in_events;
1105 	struct thread *sched_out, *sched_in;
1106 	u64 timestamp0, timestamp = sample->time;
1107 	int cpu = sample->cpu, err = -1;
1108 	s64 delta;
1109 
1110 	BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1111 
1112 	timestamp0 = sched->cpu_last_switched[cpu];
1113 	sched->cpu_last_switched[cpu] = timestamp;
1114 	if (timestamp0)
1115 		delta = timestamp - timestamp0;
1116 	else
1117 		delta = 0;
1118 
1119 	if (delta < 0) {
1120 		pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1121 		return -1;
1122 	}
1123 
1124 	sched_out = machine__findnew_thread(machine, -1, prev_pid);
1125 	sched_in = machine__findnew_thread(machine, -1, next_pid);
1126 	if (sched_out == NULL || sched_in == NULL)
1127 		goto out_put;
1128 
1129 	out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
1130 	if (!out_events) {
1131 		if (thread_atoms_insert(sched, sched_out))
1132 			goto out_put;
1133 		out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
1134 		if (!out_events) {
1135 			pr_err("out-event: Internal tree error");
1136 			goto out_put;
1137 		}
1138 	}
1139 	if (add_sched_out_event(out_events, sched_out_state(prev_state), timestamp))
1140 		return -1;
1141 
1142 	in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
1143 	if (!in_events) {
1144 		if (thread_atoms_insert(sched, sched_in))
1145 			goto out_put;
1146 		in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
1147 		if (!in_events) {
1148 			pr_err("in-event: Internal tree error");
1149 			goto out_put;
1150 		}
1151 		/*
1152 		 * Take came in we have not heard about yet,
1153 		 * add in an initial atom in runnable state:
1154 		 */
1155 		if (add_sched_out_event(in_events, 'R', timestamp))
1156 			goto out_put;
1157 	}
1158 	add_sched_in_event(in_events, timestamp);
1159 	err = 0;
1160 out_put:
1161 	thread__put(sched_out);
1162 	thread__put(sched_in);
1163 	return err;
1164 }
1165 
1166 static int latency_runtime_event(struct perf_sched *sched,
1167 				 struct evsel *evsel,
1168 				 struct perf_sample *sample,
1169 				 struct machine *machine)
1170 {
1171 	const u32 pid	   = perf_evsel__intval(evsel, sample, "pid");
1172 	const u64 runtime  = perf_evsel__intval(evsel, sample, "runtime");
1173 	struct thread *thread = machine__findnew_thread(machine, -1, pid);
1174 	struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
1175 	u64 timestamp = sample->time;
1176 	int cpu = sample->cpu, err = -1;
1177 
1178 	if (thread == NULL)
1179 		return -1;
1180 
1181 	BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1182 	if (!atoms) {
1183 		if (thread_atoms_insert(sched, thread))
1184 			goto out_put;
1185 		atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
1186 		if (!atoms) {
1187 			pr_err("in-event: Internal tree error");
1188 			goto out_put;
1189 		}
1190 		if (add_sched_out_event(atoms, 'R', timestamp))
1191 			goto out_put;
1192 	}
1193 
1194 	add_runtime_event(atoms, runtime, timestamp);
1195 	err = 0;
1196 out_put:
1197 	thread__put(thread);
1198 	return err;
1199 }
1200 
1201 static int latency_wakeup_event(struct perf_sched *sched,
1202 				struct evsel *evsel,
1203 				struct perf_sample *sample,
1204 				struct machine *machine)
1205 {
1206 	const u32 pid	  = perf_evsel__intval(evsel, sample, "pid");
1207 	struct work_atoms *atoms;
1208 	struct work_atom *atom;
1209 	struct thread *wakee;
1210 	u64 timestamp = sample->time;
1211 	int err = -1;
1212 
1213 	wakee = machine__findnew_thread(machine, -1, pid);
1214 	if (wakee == NULL)
1215 		return -1;
1216 	atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
1217 	if (!atoms) {
1218 		if (thread_atoms_insert(sched, wakee))
1219 			goto out_put;
1220 		atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
1221 		if (!atoms) {
1222 			pr_err("wakeup-event: Internal tree error");
1223 			goto out_put;
1224 		}
1225 		if (add_sched_out_event(atoms, 'S', timestamp))
1226 			goto out_put;
1227 	}
1228 
1229 	BUG_ON(list_empty(&atoms->work_list));
1230 
1231 	atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1232 
1233 	/*
1234 	 * As we do not guarantee the wakeup event happens when
1235 	 * task is out of run queue, also may happen when task is
1236 	 * on run queue and wakeup only change ->state to TASK_RUNNING,
1237 	 * then we should not set the ->wake_up_time when wake up a
1238 	 * task which is on run queue.
1239 	 *
1240 	 * You WILL be missing events if you've recorded only
1241 	 * one CPU, or are only looking at only one, so don't
1242 	 * skip in this case.
1243 	 */
1244 	if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING)
1245 		goto out_ok;
1246 
1247 	sched->nr_timestamps++;
1248 	if (atom->sched_out_time > timestamp) {
1249 		sched->nr_unordered_timestamps++;
1250 		goto out_ok;
1251 	}
1252 
1253 	atom->state = THREAD_WAIT_CPU;
1254 	atom->wake_up_time = timestamp;
1255 out_ok:
1256 	err = 0;
1257 out_put:
1258 	thread__put(wakee);
1259 	return err;
1260 }
1261 
1262 static int latency_migrate_task_event(struct perf_sched *sched,
1263 				      struct evsel *evsel,
1264 				      struct perf_sample *sample,
1265 				      struct machine *machine)
1266 {
1267 	const u32 pid = perf_evsel__intval(evsel, sample, "pid");
1268 	u64 timestamp = sample->time;
1269 	struct work_atoms *atoms;
1270 	struct work_atom *atom;
1271 	struct thread *migrant;
1272 	int err = -1;
1273 
1274 	/*
1275 	 * Only need to worry about migration when profiling one CPU.
1276 	 */
1277 	if (sched->profile_cpu == -1)
1278 		return 0;
1279 
1280 	migrant = machine__findnew_thread(machine, -1, pid);
1281 	if (migrant == NULL)
1282 		return -1;
1283 	atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
1284 	if (!atoms) {
1285 		if (thread_atoms_insert(sched, migrant))
1286 			goto out_put;
1287 		register_pid(sched, migrant->tid, thread__comm_str(migrant));
1288 		atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
1289 		if (!atoms) {
1290 			pr_err("migration-event: Internal tree error");
1291 			goto out_put;
1292 		}
1293 		if (add_sched_out_event(atoms, 'R', timestamp))
1294 			goto out_put;
1295 	}
1296 
1297 	BUG_ON(list_empty(&atoms->work_list));
1298 
1299 	atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1300 	atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp;
1301 
1302 	sched->nr_timestamps++;
1303 
1304 	if (atom->sched_out_time > timestamp)
1305 		sched->nr_unordered_timestamps++;
1306 	err = 0;
1307 out_put:
1308 	thread__put(migrant);
1309 	return err;
1310 }
1311 
1312 static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_list)
1313 {
1314 	int i;
1315 	int ret;
1316 	u64 avg;
1317 	char max_lat_at[32];
1318 
1319 	if (!work_list->nb_atoms)
1320 		return;
1321 	/*
1322 	 * Ignore idle threads:
1323 	 */
1324 	if (!strcmp(thread__comm_str(work_list->thread), "swapper"))
1325 		return;
1326 
1327 	sched->all_runtime += work_list->total_runtime;
1328 	sched->all_count   += work_list->nb_atoms;
1329 
1330 	if (work_list->num_merged > 1)
1331 		ret = printf("  %s:(%d) ", thread__comm_str(work_list->thread), work_list->num_merged);
1332 	else
1333 		ret = printf("  %s:%d ", thread__comm_str(work_list->thread), work_list->thread->tid);
1334 
1335 	for (i = 0; i < 24 - ret; i++)
1336 		printf(" ");
1337 
1338 	avg = work_list->total_lat / work_list->nb_atoms;
1339 	timestamp__scnprintf_usec(work_list->max_lat_at, max_lat_at, sizeof(max_lat_at));
1340 
1341 	printf("|%11.3f ms |%9" PRIu64 " | avg:%9.3f ms | max:%9.3f ms | max at: %13s s\n",
1342 	      (double)work_list->total_runtime / NSEC_PER_MSEC,
1343 		 work_list->nb_atoms, (double)avg / NSEC_PER_MSEC,
1344 		 (double)work_list->max_lat / NSEC_PER_MSEC,
1345 		 max_lat_at);
1346 }
1347 
1348 static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
1349 {
1350 	if (l->thread == r->thread)
1351 		return 0;
1352 	if (l->thread->tid < r->thread->tid)
1353 		return -1;
1354 	if (l->thread->tid > r->thread->tid)
1355 		return 1;
1356 	return (int)(l->thread - r->thread);
1357 }
1358 
1359 static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
1360 {
1361 	u64 avgl, avgr;
1362 
1363 	if (!l->nb_atoms)
1364 		return -1;
1365 
1366 	if (!r->nb_atoms)
1367 		return 1;
1368 
1369 	avgl = l->total_lat / l->nb_atoms;
1370 	avgr = r->total_lat / r->nb_atoms;
1371 
1372 	if (avgl < avgr)
1373 		return -1;
1374 	if (avgl > avgr)
1375 		return 1;
1376 
1377 	return 0;
1378 }
1379 
1380 static int max_cmp(struct work_atoms *l, struct work_atoms *r)
1381 {
1382 	if (l->max_lat < r->max_lat)
1383 		return -1;
1384 	if (l->max_lat > r->max_lat)
1385 		return 1;
1386 
1387 	return 0;
1388 }
1389 
1390 static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
1391 {
1392 	if (l->nb_atoms < r->nb_atoms)
1393 		return -1;
1394 	if (l->nb_atoms > r->nb_atoms)
1395 		return 1;
1396 
1397 	return 0;
1398 }
1399 
1400 static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
1401 {
1402 	if (l->total_runtime < r->total_runtime)
1403 		return -1;
1404 	if (l->total_runtime > r->total_runtime)
1405 		return 1;
1406 
1407 	return 0;
1408 }
1409 
1410 static int sort_dimension__add(const char *tok, struct list_head *list)
1411 {
1412 	size_t i;
1413 	static struct sort_dimension avg_sort_dimension = {
1414 		.name = "avg",
1415 		.cmp  = avg_cmp,
1416 	};
1417 	static struct sort_dimension max_sort_dimension = {
1418 		.name = "max",
1419 		.cmp  = max_cmp,
1420 	};
1421 	static struct sort_dimension pid_sort_dimension = {
1422 		.name = "pid",
1423 		.cmp  = pid_cmp,
1424 	};
1425 	static struct sort_dimension runtime_sort_dimension = {
1426 		.name = "runtime",
1427 		.cmp  = runtime_cmp,
1428 	};
1429 	static struct sort_dimension switch_sort_dimension = {
1430 		.name = "switch",
1431 		.cmp  = switch_cmp,
1432 	};
1433 	struct sort_dimension *available_sorts[] = {
1434 		&pid_sort_dimension,
1435 		&avg_sort_dimension,
1436 		&max_sort_dimension,
1437 		&switch_sort_dimension,
1438 		&runtime_sort_dimension,
1439 	};
1440 
1441 	for (i = 0; i < ARRAY_SIZE(available_sorts); i++) {
1442 		if (!strcmp(available_sorts[i]->name, tok)) {
1443 			list_add_tail(&available_sorts[i]->list, list);
1444 
1445 			return 0;
1446 		}
1447 	}
1448 
1449 	return -1;
1450 }
1451 
1452 static void perf_sched__sort_lat(struct perf_sched *sched)
1453 {
1454 	struct rb_node *node;
1455 	struct rb_root_cached *root = &sched->atom_root;
1456 again:
1457 	for (;;) {
1458 		struct work_atoms *data;
1459 		node = rb_first_cached(root);
1460 		if (!node)
1461 			break;
1462 
1463 		rb_erase_cached(node, root);
1464 		data = rb_entry(node, struct work_atoms, node);
1465 		__thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list);
1466 	}
1467 	if (root == &sched->atom_root) {
1468 		root = &sched->merged_atom_root;
1469 		goto again;
1470 	}
1471 }
1472 
1473 static int process_sched_wakeup_event(struct perf_tool *tool,
1474 				      struct evsel *evsel,
1475 				      struct perf_sample *sample,
1476 				      struct machine *machine)
1477 {
1478 	struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1479 
1480 	if (sched->tp_handler->wakeup_event)
1481 		return sched->tp_handler->wakeup_event(sched, evsel, sample, machine);
1482 
1483 	return 0;
1484 }
1485 
1486 union map_priv {
1487 	void	*ptr;
1488 	bool	 color;
1489 };
1490 
1491 static bool thread__has_color(struct thread *thread)
1492 {
1493 	union map_priv priv = {
1494 		.ptr = thread__priv(thread),
1495 	};
1496 
1497 	return priv.color;
1498 }
1499 
1500 static struct thread*
1501 map__findnew_thread(struct perf_sched *sched, struct machine *machine, pid_t pid, pid_t tid)
1502 {
1503 	struct thread *thread = machine__findnew_thread(machine, pid, tid);
1504 	union map_priv priv = {
1505 		.color = false,
1506 	};
1507 
1508 	if (!sched->map.color_pids || !thread || thread__priv(thread))
1509 		return thread;
1510 
1511 	if (thread_map__has(sched->map.color_pids, tid))
1512 		priv.color = true;
1513 
1514 	thread__set_priv(thread, priv.ptr);
1515 	return thread;
1516 }
1517 
1518 static int map_switch_event(struct perf_sched *sched, struct evsel *evsel,
1519 			    struct perf_sample *sample, struct machine *machine)
1520 {
1521 	const u32 next_pid = perf_evsel__intval(evsel, sample, "next_pid");
1522 	struct thread *sched_in;
1523 	struct thread_runtime *tr;
1524 	int new_shortname;
1525 	u64 timestamp0, timestamp = sample->time;
1526 	s64 delta;
1527 	int i, this_cpu = sample->cpu;
1528 	int cpus_nr;
1529 	bool new_cpu = false;
1530 	const char *color = PERF_COLOR_NORMAL;
1531 	char stimestamp[32];
1532 
1533 	BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0);
1534 
1535 	if (this_cpu > sched->max_cpu)
1536 		sched->max_cpu = this_cpu;
1537 
1538 	if (sched->map.comp) {
1539 		cpus_nr = bitmap_weight(sched->map.comp_cpus_mask, MAX_CPUS);
1540 		if (!test_and_set_bit(this_cpu, sched->map.comp_cpus_mask)) {
1541 			sched->map.comp_cpus[cpus_nr++] = this_cpu;
1542 			new_cpu = true;
1543 		}
1544 	} else
1545 		cpus_nr = sched->max_cpu;
1546 
1547 	timestamp0 = sched->cpu_last_switched[this_cpu];
1548 	sched->cpu_last_switched[this_cpu] = timestamp;
1549 	if (timestamp0)
1550 		delta = timestamp - timestamp0;
1551 	else
1552 		delta = 0;
1553 
1554 	if (delta < 0) {
1555 		pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1556 		return -1;
1557 	}
1558 
1559 	sched_in = map__findnew_thread(sched, machine, -1, next_pid);
1560 	if (sched_in == NULL)
1561 		return -1;
1562 
1563 	tr = thread__get_runtime(sched_in);
1564 	if (tr == NULL) {
1565 		thread__put(sched_in);
1566 		return -1;
1567 	}
1568 
1569 	sched->curr_thread[this_cpu] = thread__get(sched_in);
1570 
1571 	printf("  ");
1572 
1573 	new_shortname = 0;
1574 	if (!tr->shortname[0]) {
1575 		if (!strcmp(thread__comm_str(sched_in), "swapper")) {
1576 			/*
1577 			 * Don't allocate a letter-number for swapper:0
1578 			 * as a shortname. Instead, we use '.' for it.
1579 			 */
1580 			tr->shortname[0] = '.';
1581 			tr->shortname[1] = ' ';
1582 		} else {
1583 			tr->shortname[0] = sched->next_shortname1;
1584 			tr->shortname[1] = sched->next_shortname2;
1585 
1586 			if (sched->next_shortname1 < 'Z') {
1587 				sched->next_shortname1++;
1588 			} else {
1589 				sched->next_shortname1 = 'A';
1590 				if (sched->next_shortname2 < '9')
1591 					sched->next_shortname2++;
1592 				else
1593 					sched->next_shortname2 = '0';
1594 			}
1595 		}
1596 		new_shortname = 1;
1597 	}
1598 
1599 	for (i = 0; i < cpus_nr; i++) {
1600 		int cpu = sched->map.comp ? sched->map.comp_cpus[i] : i;
1601 		struct thread *curr_thread = sched->curr_thread[cpu];
1602 		struct thread_runtime *curr_tr;
1603 		const char *pid_color = color;
1604 		const char *cpu_color = color;
1605 
1606 		if (curr_thread && thread__has_color(curr_thread))
1607 			pid_color = COLOR_PIDS;
1608 
1609 		if (sched->map.cpus && !cpu_map__has(sched->map.cpus, cpu))
1610 			continue;
1611 
1612 		if (sched->map.color_cpus && cpu_map__has(sched->map.color_cpus, cpu))
1613 			cpu_color = COLOR_CPUS;
1614 
1615 		if (cpu != this_cpu)
1616 			color_fprintf(stdout, color, " ");
1617 		else
1618 			color_fprintf(stdout, cpu_color, "*");
1619 
1620 		if (sched->curr_thread[cpu]) {
1621 			curr_tr = thread__get_runtime(sched->curr_thread[cpu]);
1622 			if (curr_tr == NULL) {
1623 				thread__put(sched_in);
1624 				return -1;
1625 			}
1626 			color_fprintf(stdout, pid_color, "%2s ", curr_tr->shortname);
1627 		} else
1628 			color_fprintf(stdout, color, "   ");
1629 	}
1630 
1631 	if (sched->map.cpus && !cpu_map__has(sched->map.cpus, this_cpu))
1632 		goto out;
1633 
1634 	timestamp__scnprintf_usec(timestamp, stimestamp, sizeof(stimestamp));
1635 	color_fprintf(stdout, color, "  %12s secs ", stimestamp);
1636 	if (new_shortname || tr->comm_changed || (verbose > 0 && sched_in->tid)) {
1637 		const char *pid_color = color;
1638 
1639 		if (thread__has_color(sched_in))
1640 			pid_color = COLOR_PIDS;
1641 
1642 		color_fprintf(stdout, pid_color, "%s => %s:%d",
1643 		       tr->shortname, thread__comm_str(sched_in), sched_in->tid);
1644 		tr->comm_changed = false;
1645 	}
1646 
1647 	if (sched->map.comp && new_cpu)
1648 		color_fprintf(stdout, color, " (CPU %d)", this_cpu);
1649 
1650 out:
1651 	color_fprintf(stdout, color, "\n");
1652 
1653 	thread__put(sched_in);
1654 
1655 	return 0;
1656 }
1657 
1658 static int process_sched_switch_event(struct perf_tool *tool,
1659 				      struct evsel *evsel,
1660 				      struct perf_sample *sample,
1661 				      struct machine *machine)
1662 {
1663 	struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1664 	int this_cpu = sample->cpu, err = 0;
1665 	u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
1666 	    next_pid = perf_evsel__intval(evsel, sample, "next_pid");
1667 
1668 	if (sched->curr_pid[this_cpu] != (u32)-1) {
1669 		/*
1670 		 * Are we trying to switch away a PID that is
1671 		 * not current?
1672 		 */
1673 		if (sched->curr_pid[this_cpu] != prev_pid)
1674 			sched->nr_context_switch_bugs++;
1675 	}
1676 
1677 	if (sched->tp_handler->switch_event)
1678 		err = sched->tp_handler->switch_event(sched, evsel, sample, machine);
1679 
1680 	sched->curr_pid[this_cpu] = next_pid;
1681 	return err;
1682 }
1683 
1684 static int process_sched_runtime_event(struct perf_tool *tool,
1685 				       struct evsel *evsel,
1686 				       struct perf_sample *sample,
1687 				       struct machine *machine)
1688 {
1689 	struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1690 
1691 	if (sched->tp_handler->runtime_event)
1692 		return sched->tp_handler->runtime_event(sched, evsel, sample, machine);
1693 
1694 	return 0;
1695 }
1696 
1697 static int perf_sched__process_fork_event(struct perf_tool *tool,
1698 					  union perf_event *event,
1699 					  struct perf_sample *sample,
1700 					  struct machine *machine)
1701 {
1702 	struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1703 
1704 	/* run the fork event through the perf machineruy */
1705 	perf_event__process_fork(tool, event, sample, machine);
1706 
1707 	/* and then run additional processing needed for this command */
1708 	if (sched->tp_handler->fork_event)
1709 		return sched->tp_handler->fork_event(sched, event, machine);
1710 
1711 	return 0;
1712 }
1713 
1714 static int process_sched_migrate_task_event(struct perf_tool *tool,
1715 					    struct evsel *evsel,
1716 					    struct perf_sample *sample,
1717 					    struct machine *machine)
1718 {
1719 	struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1720 
1721 	if (sched->tp_handler->migrate_task_event)
1722 		return sched->tp_handler->migrate_task_event(sched, evsel, sample, machine);
1723 
1724 	return 0;
1725 }
1726 
1727 typedef int (*tracepoint_handler)(struct perf_tool *tool,
1728 				  struct evsel *evsel,
1729 				  struct perf_sample *sample,
1730 				  struct machine *machine);
1731 
1732 static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_unused,
1733 						 union perf_event *event __maybe_unused,
1734 						 struct perf_sample *sample,
1735 						 struct evsel *evsel,
1736 						 struct machine *machine)
1737 {
1738 	int err = 0;
1739 
1740 	if (evsel->handler != NULL) {
1741 		tracepoint_handler f = evsel->handler;
1742 		err = f(tool, evsel, sample, machine);
1743 	}
1744 
1745 	return err;
1746 }
1747 
1748 static int perf_sched__process_comm(struct perf_tool *tool __maybe_unused,
1749 				    union perf_event *event,
1750 				    struct perf_sample *sample,
1751 				    struct machine *machine)
1752 {
1753 	struct thread *thread;
1754 	struct thread_runtime *tr;
1755 	int err;
1756 
1757 	err = perf_event__process_comm(tool, event, sample, machine);
1758 	if (err)
1759 		return err;
1760 
1761 	thread = machine__find_thread(machine, sample->pid, sample->tid);
1762 	if (!thread) {
1763 		pr_err("Internal error: can't find thread\n");
1764 		return -1;
1765 	}
1766 
1767 	tr = thread__get_runtime(thread);
1768 	if (tr == NULL) {
1769 		thread__put(thread);
1770 		return -1;
1771 	}
1772 
1773 	tr->comm_changed = true;
1774 	thread__put(thread);
1775 
1776 	return 0;
1777 }
1778 
1779 static int perf_sched__read_events(struct perf_sched *sched)
1780 {
1781 	const struct evsel_str_handler handlers[] = {
1782 		{ "sched:sched_switch",	      process_sched_switch_event, },
1783 		{ "sched:sched_stat_runtime", process_sched_runtime_event, },
1784 		{ "sched:sched_wakeup",	      process_sched_wakeup_event, },
1785 		{ "sched:sched_wakeup_new",   process_sched_wakeup_event, },
1786 		{ "sched:sched_migrate_task", process_sched_migrate_task_event, },
1787 	};
1788 	struct perf_session *session;
1789 	struct perf_data data = {
1790 		.path  = input_name,
1791 		.mode  = PERF_DATA_MODE_READ,
1792 		.force = sched->force,
1793 	};
1794 	int rc = -1;
1795 
1796 	session = perf_session__new(&data, false, &sched->tool);
1797 	if (session == NULL) {
1798 		pr_debug("No Memory for session\n");
1799 		return -1;
1800 	}
1801 
1802 	symbol__init(&session->header.env);
1803 
1804 	if (perf_session__set_tracepoints_handlers(session, handlers))
1805 		goto out_delete;
1806 
1807 	if (perf_session__has_traces(session, "record -R")) {
1808 		int err = perf_session__process_events(session);
1809 		if (err) {
1810 			pr_err("Failed to process events, error %d", err);
1811 			goto out_delete;
1812 		}
1813 
1814 		sched->nr_events      = session->evlist->stats.nr_events[0];
1815 		sched->nr_lost_events = session->evlist->stats.total_lost;
1816 		sched->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST];
1817 	}
1818 
1819 	rc = 0;
1820 out_delete:
1821 	perf_session__delete(session);
1822 	return rc;
1823 }
1824 
1825 /*
1826  * scheduling times are printed as msec.usec
1827  */
1828 static inline void print_sched_time(unsigned long long nsecs, int width)
1829 {
1830 	unsigned long msecs;
1831 	unsigned long usecs;
1832 
1833 	msecs  = nsecs / NSEC_PER_MSEC;
1834 	nsecs -= msecs * NSEC_PER_MSEC;
1835 	usecs  = nsecs / NSEC_PER_USEC;
1836 	printf("%*lu.%03lu ", width, msecs, usecs);
1837 }
1838 
1839 /*
1840  * returns runtime data for event, allocating memory for it the
1841  * first time it is used.
1842  */
1843 static struct evsel_runtime *perf_evsel__get_runtime(struct evsel *evsel)
1844 {
1845 	struct evsel_runtime *r = evsel->priv;
1846 
1847 	if (r == NULL) {
1848 		r = zalloc(sizeof(struct evsel_runtime));
1849 		evsel->priv = r;
1850 	}
1851 
1852 	return r;
1853 }
1854 
1855 /*
1856  * save last time event was seen per cpu
1857  */
1858 static void perf_evsel__save_time(struct evsel *evsel,
1859 				  u64 timestamp, u32 cpu)
1860 {
1861 	struct evsel_runtime *r = perf_evsel__get_runtime(evsel);
1862 
1863 	if (r == NULL)
1864 		return;
1865 
1866 	if ((cpu >= r->ncpu) || (r->last_time == NULL)) {
1867 		int i, n = __roundup_pow_of_two(cpu+1);
1868 		void *p = r->last_time;
1869 
1870 		p = realloc(r->last_time, n * sizeof(u64));
1871 		if (!p)
1872 			return;
1873 
1874 		r->last_time = p;
1875 		for (i = r->ncpu; i < n; ++i)
1876 			r->last_time[i] = (u64) 0;
1877 
1878 		r->ncpu = n;
1879 	}
1880 
1881 	r->last_time[cpu] = timestamp;
1882 }
1883 
1884 /* returns last time this event was seen on the given cpu */
1885 static u64 perf_evsel__get_time(struct evsel *evsel, u32 cpu)
1886 {
1887 	struct evsel_runtime *r = perf_evsel__get_runtime(evsel);
1888 
1889 	if ((r == NULL) || (r->last_time == NULL) || (cpu >= r->ncpu))
1890 		return 0;
1891 
1892 	return r->last_time[cpu];
1893 }
1894 
1895 static int comm_width = 30;
1896 
1897 static char *timehist_get_commstr(struct thread *thread)
1898 {
1899 	static char str[32];
1900 	const char *comm = thread__comm_str(thread);
1901 	pid_t tid = thread->tid;
1902 	pid_t pid = thread->pid_;
1903 	int n;
1904 
1905 	if (pid == 0)
1906 		n = scnprintf(str, sizeof(str), "%s", comm);
1907 
1908 	else if (tid != pid)
1909 		n = scnprintf(str, sizeof(str), "%s[%d/%d]", comm, tid, pid);
1910 
1911 	else
1912 		n = scnprintf(str, sizeof(str), "%s[%d]", comm, tid);
1913 
1914 	if (n > comm_width)
1915 		comm_width = n;
1916 
1917 	return str;
1918 }
1919 
1920 static void timehist_header(struct perf_sched *sched)
1921 {
1922 	u32 ncpus = sched->max_cpu + 1;
1923 	u32 i, j;
1924 
1925 	printf("%15s %6s ", "time", "cpu");
1926 
1927 	if (sched->show_cpu_visual) {
1928 		printf(" ");
1929 		for (i = 0, j = 0; i < ncpus; ++i) {
1930 			printf("%x", j++);
1931 			if (j > 15)
1932 				j = 0;
1933 		}
1934 		printf(" ");
1935 	}
1936 
1937 	printf(" %-*s  %9s  %9s  %9s", comm_width,
1938 		"task name", "wait time", "sch delay", "run time");
1939 
1940 	if (sched->show_state)
1941 		printf("  %s", "state");
1942 
1943 	printf("\n");
1944 
1945 	/*
1946 	 * units row
1947 	 */
1948 	printf("%15s %-6s ", "", "");
1949 
1950 	if (sched->show_cpu_visual)
1951 		printf(" %*s ", ncpus, "");
1952 
1953 	printf(" %-*s  %9s  %9s  %9s", comm_width,
1954 	       "[tid/pid]", "(msec)", "(msec)", "(msec)");
1955 
1956 	if (sched->show_state)
1957 		printf("  %5s", "");
1958 
1959 	printf("\n");
1960 
1961 	/*
1962 	 * separator
1963 	 */
1964 	printf("%.15s %.6s ", graph_dotted_line, graph_dotted_line);
1965 
1966 	if (sched->show_cpu_visual)
1967 		printf(" %.*s ", ncpus, graph_dotted_line);
1968 
1969 	printf(" %.*s  %.9s  %.9s  %.9s", comm_width,
1970 		graph_dotted_line, graph_dotted_line, graph_dotted_line,
1971 		graph_dotted_line);
1972 
1973 	if (sched->show_state)
1974 		printf("  %.5s", graph_dotted_line);
1975 
1976 	printf("\n");
1977 }
1978 
1979 static char task_state_char(struct thread *thread, int state)
1980 {
1981 	static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
1982 	unsigned bit = state ? ffs(state) : 0;
1983 
1984 	/* 'I' for idle */
1985 	if (thread->tid == 0)
1986 		return 'I';
1987 
1988 	return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
1989 }
1990 
1991 static void timehist_print_sample(struct perf_sched *sched,
1992 				  struct evsel *evsel,
1993 				  struct perf_sample *sample,
1994 				  struct addr_location *al,
1995 				  struct thread *thread,
1996 				  u64 t, int state)
1997 {
1998 	struct thread_runtime *tr = thread__priv(thread);
1999 	const char *next_comm = perf_evsel__strval(evsel, sample, "next_comm");
2000 	const u32 next_pid = perf_evsel__intval(evsel, sample, "next_pid");
2001 	u32 max_cpus = sched->max_cpu + 1;
2002 	char tstr[64];
2003 	char nstr[30];
2004 	u64 wait_time;
2005 
2006 	timestamp__scnprintf_usec(t, tstr, sizeof(tstr));
2007 	printf("%15s [%04d] ", tstr, sample->cpu);
2008 
2009 	if (sched->show_cpu_visual) {
2010 		u32 i;
2011 		char c;
2012 
2013 		printf(" ");
2014 		for (i = 0; i < max_cpus; ++i) {
2015 			/* flag idle times with 'i'; others are sched events */
2016 			if (i == sample->cpu)
2017 				c = (thread->tid == 0) ? 'i' : 's';
2018 			else
2019 				c = ' ';
2020 			printf("%c", c);
2021 		}
2022 		printf(" ");
2023 	}
2024 
2025 	printf(" %-*s ", comm_width, timehist_get_commstr(thread));
2026 
2027 	wait_time = tr->dt_sleep + tr->dt_iowait + tr->dt_preempt;
2028 	print_sched_time(wait_time, 6);
2029 
2030 	print_sched_time(tr->dt_delay, 6);
2031 	print_sched_time(tr->dt_run, 6);
2032 
2033 	if (sched->show_state)
2034 		printf(" %5c ", task_state_char(thread, state));
2035 
2036 	if (sched->show_next) {
2037 		snprintf(nstr, sizeof(nstr), "next: %s[%d]", next_comm, next_pid);
2038 		printf(" %-*s", comm_width, nstr);
2039 	}
2040 
2041 	if (sched->show_wakeups && !sched->show_next)
2042 		printf("  %-*s", comm_width, "");
2043 
2044 	if (thread->tid == 0)
2045 		goto out;
2046 
2047 	if (sched->show_callchain)
2048 		printf("  ");
2049 
2050 	sample__fprintf_sym(sample, al, 0,
2051 			    EVSEL__PRINT_SYM | EVSEL__PRINT_ONELINE |
2052 			    EVSEL__PRINT_CALLCHAIN_ARROW |
2053 			    EVSEL__PRINT_SKIP_IGNORED,
2054 			    &callchain_cursor, stdout);
2055 
2056 out:
2057 	printf("\n");
2058 }
2059 
2060 /*
2061  * Explanation of delta-time stats:
2062  *
2063  *            t = time of current schedule out event
2064  *        tprev = time of previous sched out event
2065  *                also time of schedule-in event for current task
2066  *    last_time = time of last sched change event for current task
2067  *                (i.e, time process was last scheduled out)
2068  * ready_to_run = time of wakeup for current task
2069  *
2070  * -----|------------|------------|------------|------
2071  *    last         ready        tprev          t
2072  *    time         to run
2073  *
2074  *      |-------- dt_wait --------|
2075  *                   |- dt_delay -|-- dt_run --|
2076  *
2077  *   dt_run = run time of current task
2078  *  dt_wait = time between last schedule out event for task and tprev
2079  *            represents time spent off the cpu
2080  * dt_delay = time between wakeup and schedule-in of task
2081  */
2082 
2083 static void timehist_update_runtime_stats(struct thread_runtime *r,
2084 					 u64 t, u64 tprev)
2085 {
2086 	r->dt_delay   = 0;
2087 	r->dt_sleep   = 0;
2088 	r->dt_iowait  = 0;
2089 	r->dt_preempt = 0;
2090 	r->dt_run     = 0;
2091 
2092 	if (tprev) {
2093 		r->dt_run = t - tprev;
2094 		if (r->ready_to_run) {
2095 			if (r->ready_to_run > tprev)
2096 				pr_debug("time travel: wakeup time for task > previous sched_switch event\n");
2097 			else
2098 				r->dt_delay = tprev - r->ready_to_run;
2099 		}
2100 
2101 		if (r->last_time > tprev)
2102 			pr_debug("time travel: last sched out time for task > previous sched_switch event\n");
2103 		else if (r->last_time) {
2104 			u64 dt_wait = tprev - r->last_time;
2105 
2106 			if (r->last_state == TASK_RUNNING)
2107 				r->dt_preempt = dt_wait;
2108 			else if (r->last_state == TASK_UNINTERRUPTIBLE)
2109 				r->dt_iowait = dt_wait;
2110 			else
2111 				r->dt_sleep = dt_wait;
2112 		}
2113 	}
2114 
2115 	update_stats(&r->run_stats, r->dt_run);
2116 
2117 	r->total_run_time     += r->dt_run;
2118 	r->total_delay_time   += r->dt_delay;
2119 	r->total_sleep_time   += r->dt_sleep;
2120 	r->total_iowait_time  += r->dt_iowait;
2121 	r->total_preempt_time += r->dt_preempt;
2122 }
2123 
2124 static bool is_idle_sample(struct perf_sample *sample,
2125 			   struct evsel *evsel)
2126 {
2127 	/* pid 0 == swapper == idle task */
2128 	if (strcmp(perf_evsel__name(evsel), "sched:sched_switch") == 0)
2129 		return perf_evsel__intval(evsel, sample, "prev_pid") == 0;
2130 
2131 	return sample->pid == 0;
2132 }
2133 
2134 static void save_task_callchain(struct perf_sched *sched,
2135 				struct perf_sample *sample,
2136 				struct evsel *evsel,
2137 				struct machine *machine)
2138 {
2139 	struct callchain_cursor *cursor = &callchain_cursor;
2140 	struct thread *thread;
2141 
2142 	/* want main thread for process - has maps */
2143 	thread = machine__findnew_thread(machine, sample->pid, sample->pid);
2144 	if (thread == NULL) {
2145 		pr_debug("Failed to get thread for pid %d.\n", sample->pid);
2146 		return;
2147 	}
2148 
2149 	if (!sched->show_callchain || sample->callchain == NULL)
2150 		return;
2151 
2152 	if (thread__resolve_callchain(thread, cursor, evsel, sample,
2153 				      NULL, NULL, sched->max_stack + 2) != 0) {
2154 		if (verbose > 0)
2155 			pr_err("Failed to resolve callchain. Skipping\n");
2156 
2157 		return;
2158 	}
2159 
2160 	callchain_cursor_commit(cursor);
2161 
2162 	while (true) {
2163 		struct callchain_cursor_node *node;
2164 		struct symbol *sym;
2165 
2166 		node = callchain_cursor_current(cursor);
2167 		if (node == NULL)
2168 			break;
2169 
2170 		sym = node->sym;
2171 		if (sym) {
2172 			if (!strcmp(sym->name, "schedule") ||
2173 			    !strcmp(sym->name, "__schedule") ||
2174 			    !strcmp(sym->name, "preempt_schedule"))
2175 				sym->ignore = 1;
2176 		}
2177 
2178 		callchain_cursor_advance(cursor);
2179 	}
2180 }
2181 
2182 static int init_idle_thread(struct thread *thread)
2183 {
2184 	struct idle_thread_runtime *itr;
2185 
2186 	thread__set_comm(thread, idle_comm, 0);
2187 
2188 	itr = zalloc(sizeof(*itr));
2189 	if (itr == NULL)
2190 		return -ENOMEM;
2191 
2192 	init_stats(&itr->tr.run_stats);
2193 	callchain_init(&itr->callchain);
2194 	callchain_cursor_reset(&itr->cursor);
2195 	thread__set_priv(thread, itr);
2196 
2197 	return 0;
2198 }
2199 
2200 /*
2201  * Track idle stats per cpu by maintaining a local thread
2202  * struct for the idle task on each cpu.
2203  */
2204 static int init_idle_threads(int ncpu)
2205 {
2206 	int i, ret;
2207 
2208 	idle_threads = zalloc(ncpu * sizeof(struct thread *));
2209 	if (!idle_threads)
2210 		return -ENOMEM;
2211 
2212 	idle_max_cpu = ncpu;
2213 
2214 	/* allocate the actual thread struct if needed */
2215 	for (i = 0; i < ncpu; ++i) {
2216 		idle_threads[i] = thread__new(0, 0);
2217 		if (idle_threads[i] == NULL)
2218 			return -ENOMEM;
2219 
2220 		ret = init_idle_thread(idle_threads[i]);
2221 		if (ret < 0)
2222 			return ret;
2223 	}
2224 
2225 	return 0;
2226 }
2227 
2228 static void free_idle_threads(void)
2229 {
2230 	int i;
2231 
2232 	if (idle_threads == NULL)
2233 		return;
2234 
2235 	for (i = 0; i < idle_max_cpu; ++i) {
2236 		if ((idle_threads[i]))
2237 			thread__delete(idle_threads[i]);
2238 	}
2239 
2240 	free(idle_threads);
2241 }
2242 
2243 static struct thread *get_idle_thread(int cpu)
2244 {
2245 	/*
2246 	 * expand/allocate array of pointers to local thread
2247 	 * structs if needed
2248 	 */
2249 	if ((cpu >= idle_max_cpu) || (idle_threads == NULL)) {
2250 		int i, j = __roundup_pow_of_two(cpu+1);
2251 		void *p;
2252 
2253 		p = realloc(idle_threads, j * sizeof(struct thread *));
2254 		if (!p)
2255 			return NULL;
2256 
2257 		idle_threads = (struct thread **) p;
2258 		for (i = idle_max_cpu; i < j; ++i)
2259 			idle_threads[i] = NULL;
2260 
2261 		idle_max_cpu = j;
2262 	}
2263 
2264 	/* allocate a new thread struct if needed */
2265 	if (idle_threads[cpu] == NULL) {
2266 		idle_threads[cpu] = thread__new(0, 0);
2267 
2268 		if (idle_threads[cpu]) {
2269 			if (init_idle_thread(idle_threads[cpu]) < 0)
2270 				return NULL;
2271 		}
2272 	}
2273 
2274 	return idle_threads[cpu];
2275 }
2276 
2277 static void save_idle_callchain(struct perf_sched *sched,
2278 				struct idle_thread_runtime *itr,
2279 				struct perf_sample *sample)
2280 {
2281 	if (!sched->show_callchain || sample->callchain == NULL)
2282 		return;
2283 
2284 	callchain_cursor__copy(&itr->cursor, &callchain_cursor);
2285 }
2286 
2287 static struct thread *timehist_get_thread(struct perf_sched *sched,
2288 					  struct perf_sample *sample,
2289 					  struct machine *machine,
2290 					  struct evsel *evsel)
2291 {
2292 	struct thread *thread;
2293 
2294 	if (is_idle_sample(sample, evsel)) {
2295 		thread = get_idle_thread(sample->cpu);
2296 		if (thread == NULL)
2297 			pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu);
2298 
2299 	} else {
2300 		/* there were samples with tid 0 but non-zero pid */
2301 		thread = machine__findnew_thread(machine, sample->pid,
2302 						 sample->tid ?: sample->pid);
2303 		if (thread == NULL) {
2304 			pr_debug("Failed to get thread for tid %d. skipping sample.\n",
2305 				 sample->tid);
2306 		}
2307 
2308 		save_task_callchain(sched, sample, evsel, machine);
2309 		if (sched->idle_hist) {
2310 			struct thread *idle;
2311 			struct idle_thread_runtime *itr;
2312 
2313 			idle = get_idle_thread(sample->cpu);
2314 			if (idle == NULL) {
2315 				pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu);
2316 				return NULL;
2317 			}
2318 
2319 			itr = thread__priv(idle);
2320 			if (itr == NULL)
2321 				return NULL;
2322 
2323 			itr->last_thread = thread;
2324 
2325 			/* copy task callchain when entering to idle */
2326 			if (perf_evsel__intval(evsel, sample, "next_pid") == 0)
2327 				save_idle_callchain(sched, itr, sample);
2328 		}
2329 	}
2330 
2331 	return thread;
2332 }
2333 
2334 static bool timehist_skip_sample(struct perf_sched *sched,
2335 				 struct thread *thread,
2336 				 struct evsel *evsel,
2337 				 struct perf_sample *sample)
2338 {
2339 	bool rc = false;
2340 
2341 	if (thread__is_filtered(thread)) {
2342 		rc = true;
2343 		sched->skipped_samples++;
2344 	}
2345 
2346 	if (sched->idle_hist) {
2347 		if (strcmp(perf_evsel__name(evsel), "sched:sched_switch"))
2348 			rc = true;
2349 		else if (perf_evsel__intval(evsel, sample, "prev_pid") != 0 &&
2350 			 perf_evsel__intval(evsel, sample, "next_pid") != 0)
2351 			rc = true;
2352 	}
2353 
2354 	return rc;
2355 }
2356 
2357 static void timehist_print_wakeup_event(struct perf_sched *sched,
2358 					struct evsel *evsel,
2359 					struct perf_sample *sample,
2360 					struct machine *machine,
2361 					struct thread *awakened)
2362 {
2363 	struct thread *thread;
2364 	char tstr[64];
2365 
2366 	thread = machine__findnew_thread(machine, sample->pid, sample->tid);
2367 	if (thread == NULL)
2368 		return;
2369 
2370 	/* show wakeup unless both awakee and awaker are filtered */
2371 	if (timehist_skip_sample(sched, thread, evsel, sample) &&
2372 	    timehist_skip_sample(sched, awakened, evsel, sample)) {
2373 		return;
2374 	}
2375 
2376 	timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2377 	printf("%15s [%04d] ", tstr, sample->cpu);
2378 	if (sched->show_cpu_visual)
2379 		printf(" %*s ", sched->max_cpu + 1, "");
2380 
2381 	printf(" %-*s ", comm_width, timehist_get_commstr(thread));
2382 
2383 	/* dt spacer */
2384 	printf("  %9s  %9s  %9s ", "", "", "");
2385 
2386 	printf("awakened: %s", timehist_get_commstr(awakened));
2387 
2388 	printf("\n");
2389 }
2390 
2391 static int timehist_sched_wakeup_event(struct perf_tool *tool,
2392 				       union perf_event *event __maybe_unused,
2393 				       struct evsel *evsel,
2394 				       struct perf_sample *sample,
2395 				       struct machine *machine)
2396 {
2397 	struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2398 	struct thread *thread;
2399 	struct thread_runtime *tr = NULL;
2400 	/* want pid of awakened task not pid in sample */
2401 	const u32 pid = perf_evsel__intval(evsel, sample, "pid");
2402 
2403 	thread = machine__findnew_thread(machine, 0, pid);
2404 	if (thread == NULL)
2405 		return -1;
2406 
2407 	tr = thread__get_runtime(thread);
2408 	if (tr == NULL)
2409 		return -1;
2410 
2411 	if (tr->ready_to_run == 0)
2412 		tr->ready_to_run = sample->time;
2413 
2414 	/* show wakeups if requested */
2415 	if (sched->show_wakeups &&
2416 	    !perf_time__skip_sample(&sched->ptime, sample->time))
2417 		timehist_print_wakeup_event(sched, evsel, sample, machine, thread);
2418 
2419 	return 0;
2420 }
2421 
2422 static void timehist_print_migration_event(struct perf_sched *sched,
2423 					struct evsel *evsel,
2424 					struct perf_sample *sample,
2425 					struct machine *machine,
2426 					struct thread *migrated)
2427 {
2428 	struct thread *thread;
2429 	char tstr[64];
2430 	u32 max_cpus = sched->max_cpu + 1;
2431 	u32 ocpu, dcpu;
2432 
2433 	if (sched->summary_only)
2434 		return;
2435 
2436 	max_cpus = sched->max_cpu + 1;
2437 	ocpu = perf_evsel__intval(evsel, sample, "orig_cpu");
2438 	dcpu = perf_evsel__intval(evsel, sample, "dest_cpu");
2439 
2440 	thread = machine__findnew_thread(machine, sample->pid, sample->tid);
2441 	if (thread == NULL)
2442 		return;
2443 
2444 	if (timehist_skip_sample(sched, thread, evsel, sample) &&
2445 	    timehist_skip_sample(sched, migrated, evsel, sample)) {
2446 		return;
2447 	}
2448 
2449 	timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2450 	printf("%15s [%04d] ", tstr, sample->cpu);
2451 
2452 	if (sched->show_cpu_visual) {
2453 		u32 i;
2454 		char c;
2455 
2456 		printf("  ");
2457 		for (i = 0; i < max_cpus; ++i) {
2458 			c = (i == sample->cpu) ? 'm' : ' ';
2459 			printf("%c", c);
2460 		}
2461 		printf("  ");
2462 	}
2463 
2464 	printf(" %-*s ", comm_width, timehist_get_commstr(thread));
2465 
2466 	/* dt spacer */
2467 	printf("  %9s  %9s  %9s ", "", "", "");
2468 
2469 	printf("migrated: %s", timehist_get_commstr(migrated));
2470 	printf(" cpu %d => %d", ocpu, dcpu);
2471 
2472 	printf("\n");
2473 }
2474 
2475 static int timehist_migrate_task_event(struct perf_tool *tool,
2476 				       union perf_event *event __maybe_unused,
2477 				       struct evsel *evsel,
2478 				       struct perf_sample *sample,
2479 				       struct machine *machine)
2480 {
2481 	struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2482 	struct thread *thread;
2483 	struct thread_runtime *tr = NULL;
2484 	/* want pid of migrated task not pid in sample */
2485 	const u32 pid = perf_evsel__intval(evsel, sample, "pid");
2486 
2487 	thread = machine__findnew_thread(machine, 0, pid);
2488 	if (thread == NULL)
2489 		return -1;
2490 
2491 	tr = thread__get_runtime(thread);
2492 	if (tr == NULL)
2493 		return -1;
2494 
2495 	tr->migrations++;
2496 
2497 	/* show migrations if requested */
2498 	timehist_print_migration_event(sched, evsel, sample, machine, thread);
2499 
2500 	return 0;
2501 }
2502 
2503 static int timehist_sched_change_event(struct perf_tool *tool,
2504 				       union perf_event *event,
2505 				       struct evsel *evsel,
2506 				       struct perf_sample *sample,
2507 				       struct machine *machine)
2508 {
2509 	struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2510 	struct perf_time_interval *ptime = &sched->ptime;
2511 	struct addr_location al;
2512 	struct thread *thread;
2513 	struct thread_runtime *tr = NULL;
2514 	u64 tprev, t = sample->time;
2515 	int rc = 0;
2516 	int state = perf_evsel__intval(evsel, sample, "prev_state");
2517 
2518 
2519 	if (machine__resolve(machine, &al, sample) < 0) {
2520 		pr_err("problem processing %d event. skipping it\n",
2521 		       event->header.type);
2522 		rc = -1;
2523 		goto out;
2524 	}
2525 
2526 	thread = timehist_get_thread(sched, sample, machine, evsel);
2527 	if (thread == NULL) {
2528 		rc = -1;
2529 		goto out;
2530 	}
2531 
2532 	if (timehist_skip_sample(sched, thread, evsel, sample))
2533 		goto out;
2534 
2535 	tr = thread__get_runtime(thread);
2536 	if (tr == NULL) {
2537 		rc = -1;
2538 		goto out;
2539 	}
2540 
2541 	tprev = perf_evsel__get_time(evsel, sample->cpu);
2542 
2543 	/*
2544 	 * If start time given:
2545 	 * - sample time is under window user cares about - skip sample
2546 	 * - tprev is under window user cares about  - reset to start of window
2547 	 */
2548 	if (ptime->start && ptime->start > t)
2549 		goto out;
2550 
2551 	if (tprev && ptime->start > tprev)
2552 		tprev = ptime->start;
2553 
2554 	/*
2555 	 * If end time given:
2556 	 * - previous sched event is out of window - we are done
2557 	 * - sample time is beyond window user cares about - reset it
2558 	 *   to close out stats for time window interest
2559 	 */
2560 	if (ptime->end) {
2561 		if (tprev > ptime->end)
2562 			goto out;
2563 
2564 		if (t > ptime->end)
2565 			t = ptime->end;
2566 	}
2567 
2568 	if (!sched->idle_hist || thread->tid == 0) {
2569 		timehist_update_runtime_stats(tr, t, tprev);
2570 
2571 		if (sched->idle_hist) {
2572 			struct idle_thread_runtime *itr = (void *)tr;
2573 			struct thread_runtime *last_tr;
2574 
2575 			BUG_ON(thread->tid != 0);
2576 
2577 			if (itr->last_thread == NULL)
2578 				goto out;
2579 
2580 			/* add current idle time as last thread's runtime */
2581 			last_tr = thread__get_runtime(itr->last_thread);
2582 			if (last_tr == NULL)
2583 				goto out;
2584 
2585 			timehist_update_runtime_stats(last_tr, t, tprev);
2586 			/*
2587 			 * remove delta time of last thread as it's not updated
2588 			 * and otherwise it will show an invalid value next
2589 			 * time.  we only care total run time and run stat.
2590 			 */
2591 			last_tr->dt_run = 0;
2592 			last_tr->dt_delay = 0;
2593 			last_tr->dt_sleep = 0;
2594 			last_tr->dt_iowait = 0;
2595 			last_tr->dt_preempt = 0;
2596 
2597 			if (itr->cursor.nr)
2598 				callchain_append(&itr->callchain, &itr->cursor, t - tprev);
2599 
2600 			itr->last_thread = NULL;
2601 		}
2602 	}
2603 
2604 	if (!sched->summary_only)
2605 		timehist_print_sample(sched, evsel, sample, &al, thread, t, state);
2606 
2607 out:
2608 	if (sched->hist_time.start == 0 && t >= ptime->start)
2609 		sched->hist_time.start = t;
2610 	if (ptime->end == 0 || t <= ptime->end)
2611 		sched->hist_time.end = t;
2612 
2613 	if (tr) {
2614 		/* time of this sched_switch event becomes last time task seen */
2615 		tr->last_time = sample->time;
2616 
2617 		/* last state is used to determine where to account wait time */
2618 		tr->last_state = state;
2619 
2620 		/* sched out event for task so reset ready to run time */
2621 		tr->ready_to_run = 0;
2622 	}
2623 
2624 	perf_evsel__save_time(evsel, sample->time, sample->cpu);
2625 
2626 	return rc;
2627 }
2628 
2629 static int timehist_sched_switch_event(struct perf_tool *tool,
2630 			     union perf_event *event,
2631 			     struct evsel *evsel,
2632 			     struct perf_sample *sample,
2633 			     struct machine *machine __maybe_unused)
2634 {
2635 	return timehist_sched_change_event(tool, event, evsel, sample, machine);
2636 }
2637 
2638 static int process_lost(struct perf_tool *tool __maybe_unused,
2639 			union perf_event *event,
2640 			struct perf_sample *sample,
2641 			struct machine *machine __maybe_unused)
2642 {
2643 	char tstr[64];
2644 
2645 	timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2646 	printf("%15s ", tstr);
2647 	printf("lost %" PRI_lu64 " events on cpu %d\n", event->lost.lost, sample->cpu);
2648 
2649 	return 0;
2650 }
2651 
2652 
2653 static void print_thread_runtime(struct thread *t,
2654 				 struct thread_runtime *r)
2655 {
2656 	double mean = avg_stats(&r->run_stats);
2657 	float stddev;
2658 
2659 	printf("%*s   %5d  %9" PRIu64 " ",
2660 	       comm_width, timehist_get_commstr(t), t->ppid,
2661 	       (u64) r->run_stats.n);
2662 
2663 	print_sched_time(r->total_run_time, 8);
2664 	stddev = rel_stddev_stats(stddev_stats(&r->run_stats), mean);
2665 	print_sched_time(r->run_stats.min, 6);
2666 	printf(" ");
2667 	print_sched_time((u64) mean, 6);
2668 	printf(" ");
2669 	print_sched_time(r->run_stats.max, 6);
2670 	printf("  ");
2671 	printf("%5.2f", stddev);
2672 	printf("   %5" PRIu64, r->migrations);
2673 	printf("\n");
2674 }
2675 
2676 static void print_thread_waittime(struct thread *t,
2677 				  struct thread_runtime *r)
2678 {
2679 	printf("%*s   %5d  %9" PRIu64 " ",
2680 	       comm_width, timehist_get_commstr(t), t->ppid,
2681 	       (u64) r->run_stats.n);
2682 
2683 	print_sched_time(r->total_run_time, 8);
2684 	print_sched_time(r->total_sleep_time, 6);
2685 	printf(" ");
2686 	print_sched_time(r->total_iowait_time, 6);
2687 	printf(" ");
2688 	print_sched_time(r->total_preempt_time, 6);
2689 	printf(" ");
2690 	print_sched_time(r->total_delay_time, 6);
2691 	printf("\n");
2692 }
2693 
2694 struct total_run_stats {
2695 	struct perf_sched *sched;
2696 	u64  sched_count;
2697 	u64  task_count;
2698 	u64  total_run_time;
2699 };
2700 
2701 static int __show_thread_runtime(struct thread *t, void *priv)
2702 {
2703 	struct total_run_stats *stats = priv;
2704 	struct thread_runtime *r;
2705 
2706 	if (thread__is_filtered(t))
2707 		return 0;
2708 
2709 	r = thread__priv(t);
2710 	if (r && r->run_stats.n) {
2711 		stats->task_count++;
2712 		stats->sched_count += r->run_stats.n;
2713 		stats->total_run_time += r->total_run_time;
2714 
2715 		if (stats->sched->show_state)
2716 			print_thread_waittime(t, r);
2717 		else
2718 			print_thread_runtime(t, r);
2719 	}
2720 
2721 	return 0;
2722 }
2723 
2724 static int show_thread_runtime(struct thread *t, void *priv)
2725 {
2726 	if (t->dead)
2727 		return 0;
2728 
2729 	return __show_thread_runtime(t, priv);
2730 }
2731 
2732 static int show_deadthread_runtime(struct thread *t, void *priv)
2733 {
2734 	if (!t->dead)
2735 		return 0;
2736 
2737 	return __show_thread_runtime(t, priv);
2738 }
2739 
2740 static size_t callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
2741 {
2742 	const char *sep = " <- ";
2743 	struct callchain_list *chain;
2744 	size_t ret = 0;
2745 	char bf[1024];
2746 	bool first;
2747 
2748 	if (node == NULL)
2749 		return 0;
2750 
2751 	ret = callchain__fprintf_folded(fp, node->parent);
2752 	first = (ret == 0);
2753 
2754 	list_for_each_entry(chain, &node->val, list) {
2755 		if (chain->ip >= PERF_CONTEXT_MAX)
2756 			continue;
2757 		if (chain->ms.sym && chain->ms.sym->ignore)
2758 			continue;
2759 		ret += fprintf(fp, "%s%s", first ? "" : sep,
2760 			       callchain_list__sym_name(chain, bf, sizeof(bf),
2761 							false));
2762 		first = false;
2763 	}
2764 
2765 	return ret;
2766 }
2767 
2768 static size_t timehist_print_idlehist_callchain(struct rb_root_cached *root)
2769 {
2770 	size_t ret = 0;
2771 	FILE *fp = stdout;
2772 	struct callchain_node *chain;
2773 	struct rb_node *rb_node = rb_first_cached(root);
2774 
2775 	printf("  %16s  %8s  %s\n", "Idle time (msec)", "Count", "Callchains");
2776 	printf("  %.16s  %.8s  %.50s\n", graph_dotted_line, graph_dotted_line,
2777 	       graph_dotted_line);
2778 
2779 	while (rb_node) {
2780 		chain = rb_entry(rb_node, struct callchain_node, rb_node);
2781 		rb_node = rb_next(rb_node);
2782 
2783 		ret += fprintf(fp, "  ");
2784 		print_sched_time(chain->hit, 12);
2785 		ret += 16;  /* print_sched_time returns 2nd arg + 4 */
2786 		ret += fprintf(fp, " %8d  ", chain->count);
2787 		ret += callchain__fprintf_folded(fp, chain);
2788 		ret += fprintf(fp, "\n");
2789 	}
2790 
2791 	return ret;
2792 }
2793 
2794 static void timehist_print_summary(struct perf_sched *sched,
2795 				   struct perf_session *session)
2796 {
2797 	struct machine *m = &session->machines.host;
2798 	struct total_run_stats totals;
2799 	u64 task_count;
2800 	struct thread *t;
2801 	struct thread_runtime *r;
2802 	int i;
2803 	u64 hist_time = sched->hist_time.end - sched->hist_time.start;
2804 
2805 	memset(&totals, 0, sizeof(totals));
2806 	totals.sched = sched;
2807 
2808 	if (sched->idle_hist) {
2809 		printf("\nIdle-time summary\n");
2810 		printf("%*s  parent  sched-out  ", comm_width, "comm");
2811 		printf("  idle-time   min-idle    avg-idle    max-idle  stddev  migrations\n");
2812 	} else if (sched->show_state) {
2813 		printf("\nWait-time summary\n");
2814 		printf("%*s  parent   sched-in  ", comm_width, "comm");
2815 		printf("   run-time      sleep      iowait     preempt       delay\n");
2816 	} else {
2817 		printf("\nRuntime summary\n");
2818 		printf("%*s  parent   sched-in  ", comm_width, "comm");
2819 		printf("   run-time    min-run     avg-run     max-run  stddev  migrations\n");
2820 	}
2821 	printf("%*s            (count)  ", comm_width, "");
2822 	printf("     (msec)     (msec)      (msec)      (msec)       %s\n",
2823 	       sched->show_state ? "(msec)" : "%");
2824 	printf("%.117s\n", graph_dotted_line);
2825 
2826 	machine__for_each_thread(m, show_thread_runtime, &totals);
2827 	task_count = totals.task_count;
2828 	if (!task_count)
2829 		printf("<no still running tasks>\n");
2830 
2831 	printf("\nTerminated tasks:\n");
2832 	machine__for_each_thread(m, show_deadthread_runtime, &totals);
2833 	if (task_count == totals.task_count)
2834 		printf("<no terminated tasks>\n");
2835 
2836 	/* CPU idle stats not tracked when samples were skipped */
2837 	if (sched->skipped_samples && !sched->idle_hist)
2838 		return;
2839 
2840 	printf("\nIdle stats:\n");
2841 	for (i = 0; i < idle_max_cpu; ++i) {
2842 		t = idle_threads[i];
2843 		if (!t)
2844 			continue;
2845 
2846 		r = thread__priv(t);
2847 		if (r && r->run_stats.n) {
2848 			totals.sched_count += r->run_stats.n;
2849 			printf("    CPU %2d idle for ", i);
2850 			print_sched_time(r->total_run_time, 6);
2851 			printf(" msec  (%6.2f%%)\n", 100.0 * r->total_run_time / hist_time);
2852 		} else
2853 			printf("    CPU %2d idle entire time window\n", i);
2854 	}
2855 
2856 	if (sched->idle_hist && sched->show_callchain) {
2857 		callchain_param.mode  = CHAIN_FOLDED;
2858 		callchain_param.value = CCVAL_PERIOD;
2859 
2860 		callchain_register_param(&callchain_param);
2861 
2862 		printf("\nIdle stats by callchain:\n");
2863 		for (i = 0; i < idle_max_cpu; ++i) {
2864 			struct idle_thread_runtime *itr;
2865 
2866 			t = idle_threads[i];
2867 			if (!t)
2868 				continue;
2869 
2870 			itr = thread__priv(t);
2871 			if (itr == NULL)
2872 				continue;
2873 
2874 			callchain_param.sort(&itr->sorted_root.rb_root, &itr->callchain,
2875 					     0, &callchain_param);
2876 
2877 			printf("  CPU %2d:", i);
2878 			print_sched_time(itr->tr.total_run_time, 6);
2879 			printf(" msec\n");
2880 			timehist_print_idlehist_callchain(&itr->sorted_root);
2881 			printf("\n");
2882 		}
2883 	}
2884 
2885 	printf("\n"
2886 	       "    Total number of unique tasks: %" PRIu64 "\n"
2887 	       "Total number of context switches: %" PRIu64 "\n",
2888 	       totals.task_count, totals.sched_count);
2889 
2890 	printf("           Total run time (msec): ");
2891 	print_sched_time(totals.total_run_time, 2);
2892 	printf("\n");
2893 
2894 	printf("    Total scheduling time (msec): ");
2895 	print_sched_time(hist_time, 2);
2896 	printf(" (x %d)\n", sched->max_cpu);
2897 }
2898 
2899 typedef int (*sched_handler)(struct perf_tool *tool,
2900 			  union perf_event *event,
2901 			  struct evsel *evsel,
2902 			  struct perf_sample *sample,
2903 			  struct machine *machine);
2904 
2905 static int perf_timehist__process_sample(struct perf_tool *tool,
2906 					 union perf_event *event,
2907 					 struct perf_sample *sample,
2908 					 struct evsel *evsel,
2909 					 struct machine *machine)
2910 {
2911 	struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2912 	int err = 0;
2913 	int this_cpu = sample->cpu;
2914 
2915 	if (this_cpu > sched->max_cpu)
2916 		sched->max_cpu = this_cpu;
2917 
2918 	if (evsel->handler != NULL) {
2919 		sched_handler f = evsel->handler;
2920 
2921 		err = f(tool, event, evsel, sample, machine);
2922 	}
2923 
2924 	return err;
2925 }
2926 
2927 static int timehist_check_attr(struct perf_sched *sched,
2928 			       struct evlist *evlist)
2929 {
2930 	struct evsel *evsel;
2931 	struct evsel_runtime *er;
2932 
2933 	list_for_each_entry(evsel, &evlist->core.entries, core.node) {
2934 		er = perf_evsel__get_runtime(evsel);
2935 		if (er == NULL) {
2936 			pr_err("Failed to allocate memory for evsel runtime data\n");
2937 			return -1;
2938 		}
2939 
2940 		if (sched->show_callchain && !evsel__has_callchain(evsel)) {
2941 			pr_info("Samples do not have callchains.\n");
2942 			sched->show_callchain = 0;
2943 			symbol_conf.use_callchain = 0;
2944 		}
2945 	}
2946 
2947 	return 0;
2948 }
2949 
2950 static int perf_sched__timehist(struct perf_sched *sched)
2951 {
2952 	const struct evsel_str_handler handlers[] = {
2953 		{ "sched:sched_switch",       timehist_sched_switch_event, },
2954 		{ "sched:sched_wakeup",	      timehist_sched_wakeup_event, },
2955 		{ "sched:sched_wakeup_new",   timehist_sched_wakeup_event, },
2956 	};
2957 	const struct evsel_str_handler migrate_handlers[] = {
2958 		{ "sched:sched_migrate_task", timehist_migrate_task_event, },
2959 	};
2960 	struct perf_data data = {
2961 		.path  = input_name,
2962 		.mode  = PERF_DATA_MODE_READ,
2963 		.force = sched->force,
2964 	};
2965 
2966 	struct perf_session *session;
2967 	struct evlist *evlist;
2968 	int err = -1;
2969 
2970 	/*
2971 	 * event handlers for timehist option
2972 	 */
2973 	sched->tool.sample	 = perf_timehist__process_sample;
2974 	sched->tool.mmap	 = perf_event__process_mmap;
2975 	sched->tool.comm	 = perf_event__process_comm;
2976 	sched->tool.exit	 = perf_event__process_exit;
2977 	sched->tool.fork	 = perf_event__process_fork;
2978 	sched->tool.lost	 = process_lost;
2979 	sched->tool.attr	 = perf_event__process_attr;
2980 	sched->tool.tracing_data = perf_event__process_tracing_data;
2981 	sched->tool.build_id	 = perf_event__process_build_id;
2982 
2983 	sched->tool.ordered_events = true;
2984 	sched->tool.ordering_requires_timestamps = true;
2985 
2986 	symbol_conf.use_callchain = sched->show_callchain;
2987 
2988 	session = perf_session__new(&data, false, &sched->tool);
2989 	if (session == NULL)
2990 		return -ENOMEM;
2991 
2992 	evlist = session->evlist;
2993 
2994 	symbol__init(&session->header.env);
2995 
2996 	if (perf_time__parse_str(&sched->ptime, sched->time_str) != 0) {
2997 		pr_err("Invalid time string\n");
2998 		return -EINVAL;
2999 	}
3000 
3001 	if (timehist_check_attr(sched, evlist) != 0)
3002 		goto out;
3003 
3004 	setup_pager();
3005 
3006 	/* setup per-evsel handlers */
3007 	if (perf_session__set_tracepoints_handlers(session, handlers))
3008 		goto out;
3009 
3010 	/* sched_switch event at a minimum needs to exist */
3011 	if (!perf_evlist__find_tracepoint_by_name(session->evlist,
3012 						  "sched:sched_switch")) {
3013 		pr_err("No sched_switch events found. Have you run 'perf sched record'?\n");
3014 		goto out;
3015 	}
3016 
3017 	if (sched->show_migrations &&
3018 	    perf_session__set_tracepoints_handlers(session, migrate_handlers))
3019 		goto out;
3020 
3021 	/* pre-allocate struct for per-CPU idle stats */
3022 	sched->max_cpu = session->header.env.nr_cpus_online;
3023 	if (sched->max_cpu == 0)
3024 		sched->max_cpu = 4;
3025 	if (init_idle_threads(sched->max_cpu))
3026 		goto out;
3027 
3028 	/* summary_only implies summary option, but don't overwrite summary if set */
3029 	if (sched->summary_only)
3030 		sched->summary = sched->summary_only;
3031 
3032 	if (!sched->summary_only)
3033 		timehist_header(sched);
3034 
3035 	err = perf_session__process_events(session);
3036 	if (err) {
3037 		pr_err("Failed to process events, error %d", err);
3038 		goto out;
3039 	}
3040 
3041 	sched->nr_events      = evlist->stats.nr_events[0];
3042 	sched->nr_lost_events = evlist->stats.total_lost;
3043 	sched->nr_lost_chunks = evlist->stats.nr_events[PERF_RECORD_LOST];
3044 
3045 	if (sched->summary)
3046 		timehist_print_summary(sched, session);
3047 
3048 out:
3049 	free_idle_threads();
3050 	perf_session__delete(session);
3051 
3052 	return err;
3053 }
3054 
3055 
3056 static void print_bad_events(struct perf_sched *sched)
3057 {
3058 	if (sched->nr_unordered_timestamps && sched->nr_timestamps) {
3059 		printf("  INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
3060 			(double)sched->nr_unordered_timestamps/(double)sched->nr_timestamps*100.0,
3061 			sched->nr_unordered_timestamps, sched->nr_timestamps);
3062 	}
3063 	if (sched->nr_lost_events && sched->nr_events) {
3064 		printf("  INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
3065 			(double)sched->nr_lost_events/(double)sched->nr_events * 100.0,
3066 			sched->nr_lost_events, sched->nr_events, sched->nr_lost_chunks);
3067 	}
3068 	if (sched->nr_context_switch_bugs && sched->nr_timestamps) {
3069 		printf("  INFO: %.3f%% context switch bugs (%ld out of %ld)",
3070 			(double)sched->nr_context_switch_bugs/(double)sched->nr_timestamps*100.0,
3071 			sched->nr_context_switch_bugs, sched->nr_timestamps);
3072 		if (sched->nr_lost_events)
3073 			printf(" (due to lost events?)");
3074 		printf("\n");
3075 	}
3076 }
3077 
3078 static void __merge_work_atoms(struct rb_root_cached *root, struct work_atoms *data)
3079 {
3080 	struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
3081 	struct work_atoms *this;
3082 	const char *comm = thread__comm_str(data->thread), *this_comm;
3083 	bool leftmost = true;
3084 
3085 	while (*new) {
3086 		int cmp;
3087 
3088 		this = container_of(*new, struct work_atoms, node);
3089 		parent = *new;
3090 
3091 		this_comm = thread__comm_str(this->thread);
3092 		cmp = strcmp(comm, this_comm);
3093 		if (cmp > 0) {
3094 			new = &((*new)->rb_left);
3095 		} else if (cmp < 0) {
3096 			new = &((*new)->rb_right);
3097 			leftmost = false;
3098 		} else {
3099 			this->num_merged++;
3100 			this->total_runtime += data->total_runtime;
3101 			this->nb_atoms += data->nb_atoms;
3102 			this->total_lat += data->total_lat;
3103 			list_splice(&data->work_list, &this->work_list);
3104 			if (this->max_lat < data->max_lat) {
3105 				this->max_lat = data->max_lat;
3106 				this->max_lat_at = data->max_lat_at;
3107 			}
3108 			zfree(&data);
3109 			return;
3110 		}
3111 	}
3112 
3113 	data->num_merged++;
3114 	rb_link_node(&data->node, parent, new);
3115 	rb_insert_color_cached(&data->node, root, leftmost);
3116 }
3117 
3118 static void perf_sched__merge_lat(struct perf_sched *sched)
3119 {
3120 	struct work_atoms *data;
3121 	struct rb_node *node;
3122 
3123 	if (sched->skip_merge)
3124 		return;
3125 
3126 	while ((node = rb_first_cached(&sched->atom_root))) {
3127 		rb_erase_cached(node, &sched->atom_root);
3128 		data = rb_entry(node, struct work_atoms, node);
3129 		__merge_work_atoms(&sched->merged_atom_root, data);
3130 	}
3131 }
3132 
3133 static int perf_sched__lat(struct perf_sched *sched)
3134 {
3135 	struct rb_node *next;
3136 
3137 	setup_pager();
3138 
3139 	if (perf_sched__read_events(sched))
3140 		return -1;
3141 
3142 	perf_sched__merge_lat(sched);
3143 	perf_sched__sort_lat(sched);
3144 
3145 	printf("\n -----------------------------------------------------------------------------------------------------------------\n");
3146 	printf("  Task                  |   Runtime ms  | Switches | Average delay ms | Maximum delay ms | Maximum delay at       |\n");
3147 	printf(" -----------------------------------------------------------------------------------------------------------------\n");
3148 
3149 	next = rb_first_cached(&sched->sorted_atom_root);
3150 
3151 	while (next) {
3152 		struct work_atoms *work_list;
3153 
3154 		work_list = rb_entry(next, struct work_atoms, node);
3155 		output_lat_thread(sched, work_list);
3156 		next = rb_next(next);
3157 		thread__zput(work_list->thread);
3158 	}
3159 
3160 	printf(" -----------------------------------------------------------------------------------------------------------------\n");
3161 	printf("  TOTAL:                |%11.3f ms |%9" PRIu64 " |\n",
3162 		(double)sched->all_runtime / NSEC_PER_MSEC, sched->all_count);
3163 
3164 	printf(" ---------------------------------------------------\n");
3165 
3166 	print_bad_events(sched);
3167 	printf("\n");
3168 
3169 	return 0;
3170 }
3171 
3172 static int setup_map_cpus(struct perf_sched *sched)
3173 {
3174 	struct perf_cpu_map *map;
3175 
3176 	sched->max_cpu  = sysconf(_SC_NPROCESSORS_CONF);
3177 
3178 	if (sched->map.comp) {
3179 		sched->map.comp_cpus = zalloc(sched->max_cpu * sizeof(int));
3180 		if (!sched->map.comp_cpus)
3181 			return -1;
3182 	}
3183 
3184 	if (!sched->map.cpus_str)
3185 		return 0;
3186 
3187 	map = perf_cpu_map__new(sched->map.cpus_str);
3188 	if (!map) {
3189 		pr_err("failed to get cpus map from %s\n", sched->map.cpus_str);
3190 		return -1;
3191 	}
3192 
3193 	sched->map.cpus = map;
3194 	return 0;
3195 }
3196 
3197 static int setup_color_pids(struct perf_sched *sched)
3198 {
3199 	struct perf_thread_map *map;
3200 
3201 	if (!sched->map.color_pids_str)
3202 		return 0;
3203 
3204 	map = thread_map__new_by_tid_str(sched->map.color_pids_str);
3205 	if (!map) {
3206 		pr_err("failed to get thread map from %s\n", sched->map.color_pids_str);
3207 		return -1;
3208 	}
3209 
3210 	sched->map.color_pids = map;
3211 	return 0;
3212 }
3213 
3214 static int setup_color_cpus(struct perf_sched *sched)
3215 {
3216 	struct perf_cpu_map *map;
3217 
3218 	if (!sched->map.color_cpus_str)
3219 		return 0;
3220 
3221 	map = perf_cpu_map__new(sched->map.color_cpus_str);
3222 	if (!map) {
3223 		pr_err("failed to get thread map from %s\n", sched->map.color_cpus_str);
3224 		return -1;
3225 	}
3226 
3227 	sched->map.color_cpus = map;
3228 	return 0;
3229 }
3230 
3231 static int perf_sched__map(struct perf_sched *sched)
3232 {
3233 	if (setup_map_cpus(sched))
3234 		return -1;
3235 
3236 	if (setup_color_pids(sched))
3237 		return -1;
3238 
3239 	if (setup_color_cpus(sched))
3240 		return -1;
3241 
3242 	setup_pager();
3243 	if (perf_sched__read_events(sched))
3244 		return -1;
3245 	print_bad_events(sched);
3246 	return 0;
3247 }
3248 
3249 static int perf_sched__replay(struct perf_sched *sched)
3250 {
3251 	unsigned long i;
3252 
3253 	calibrate_run_measurement_overhead(sched);
3254 	calibrate_sleep_measurement_overhead(sched);
3255 
3256 	test_calibrations(sched);
3257 
3258 	if (perf_sched__read_events(sched))
3259 		return -1;
3260 
3261 	printf("nr_run_events:        %ld\n", sched->nr_run_events);
3262 	printf("nr_sleep_events:      %ld\n", sched->nr_sleep_events);
3263 	printf("nr_wakeup_events:     %ld\n", sched->nr_wakeup_events);
3264 
3265 	if (sched->targetless_wakeups)
3266 		printf("target-less wakeups:  %ld\n", sched->targetless_wakeups);
3267 	if (sched->multitarget_wakeups)
3268 		printf("multi-target wakeups: %ld\n", sched->multitarget_wakeups);
3269 	if (sched->nr_run_events_optimized)
3270 		printf("run atoms optimized: %ld\n",
3271 			sched->nr_run_events_optimized);
3272 
3273 	print_task_traces(sched);
3274 	add_cross_task_wakeups(sched);
3275 
3276 	create_tasks(sched);
3277 	printf("------------------------------------------------------------\n");
3278 	for (i = 0; i < sched->replay_repeat; i++)
3279 		run_one_test(sched);
3280 
3281 	return 0;
3282 }
3283 
3284 static void setup_sorting(struct perf_sched *sched, const struct option *options,
3285 			  const char * const usage_msg[])
3286 {
3287 	char *tmp, *tok, *str = strdup(sched->sort_order);
3288 
3289 	for (tok = strtok_r(str, ", ", &tmp);
3290 			tok; tok = strtok_r(NULL, ", ", &tmp)) {
3291 		if (sort_dimension__add(tok, &sched->sort_list) < 0) {
3292 			usage_with_options_msg(usage_msg, options,
3293 					"Unknown --sort key: `%s'", tok);
3294 		}
3295 	}
3296 
3297 	free(str);
3298 
3299 	sort_dimension__add("pid", &sched->cmp_pid);
3300 }
3301 
3302 static int __cmd_record(int argc, const char **argv)
3303 {
3304 	unsigned int rec_argc, i, j;
3305 	const char **rec_argv;
3306 	const char * const record_args[] = {
3307 		"record",
3308 		"-a",
3309 		"-R",
3310 		"-m", "1024",
3311 		"-c", "1",
3312 		"-e", "sched:sched_switch",
3313 		"-e", "sched:sched_stat_wait",
3314 		"-e", "sched:sched_stat_sleep",
3315 		"-e", "sched:sched_stat_iowait",
3316 		"-e", "sched:sched_stat_runtime",
3317 		"-e", "sched:sched_process_fork",
3318 		"-e", "sched:sched_wakeup",
3319 		"-e", "sched:sched_wakeup_new",
3320 		"-e", "sched:sched_migrate_task",
3321 	};
3322 
3323 	rec_argc = ARRAY_SIZE(record_args) + argc - 1;
3324 	rec_argv = calloc(rec_argc + 1, sizeof(char *));
3325 
3326 	if (rec_argv == NULL)
3327 		return -ENOMEM;
3328 
3329 	for (i = 0; i < ARRAY_SIZE(record_args); i++)
3330 		rec_argv[i] = strdup(record_args[i]);
3331 
3332 	for (j = 1; j < (unsigned int)argc; j++, i++)
3333 		rec_argv[i] = argv[j];
3334 
3335 	BUG_ON(i != rec_argc);
3336 
3337 	return cmd_record(i, rec_argv);
3338 }
3339 
3340 int cmd_sched(int argc, const char **argv)
3341 {
3342 	static const char default_sort_order[] = "avg, max, switch, runtime";
3343 	struct perf_sched sched = {
3344 		.tool = {
3345 			.sample		 = perf_sched__process_tracepoint_sample,
3346 			.comm		 = perf_sched__process_comm,
3347 			.namespaces	 = perf_event__process_namespaces,
3348 			.lost		 = perf_event__process_lost,
3349 			.fork		 = perf_sched__process_fork_event,
3350 			.ordered_events = true,
3351 		},
3352 		.cmp_pid	      = LIST_HEAD_INIT(sched.cmp_pid),
3353 		.sort_list	      = LIST_HEAD_INIT(sched.sort_list),
3354 		.start_work_mutex     = PTHREAD_MUTEX_INITIALIZER,
3355 		.work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER,
3356 		.sort_order	      = default_sort_order,
3357 		.replay_repeat	      = 10,
3358 		.profile_cpu	      = -1,
3359 		.next_shortname1      = 'A',
3360 		.next_shortname2      = '0',
3361 		.skip_merge           = 0,
3362 		.show_callchain	      = 1,
3363 		.max_stack            = 5,
3364 	};
3365 	const struct option sched_options[] = {
3366 	OPT_STRING('i', "input", &input_name, "file",
3367 		    "input file name"),
3368 	OPT_INCR('v', "verbose", &verbose,
3369 		    "be more verbose (show symbol address, etc)"),
3370 	OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
3371 		    "dump raw trace in ASCII"),
3372 	OPT_BOOLEAN('f', "force", &sched.force, "don't complain, do it"),
3373 	OPT_END()
3374 	};
3375 	const struct option latency_options[] = {
3376 	OPT_STRING('s', "sort", &sched.sort_order, "key[,key2...]",
3377 		   "sort by key(s): runtime, switch, avg, max"),
3378 	OPT_INTEGER('C', "CPU", &sched.profile_cpu,
3379 		    "CPU to profile on"),
3380 	OPT_BOOLEAN('p', "pids", &sched.skip_merge,
3381 		    "latency stats per pid instead of per comm"),
3382 	OPT_PARENT(sched_options)
3383 	};
3384 	const struct option replay_options[] = {
3385 	OPT_UINTEGER('r', "repeat", &sched.replay_repeat,
3386 		     "repeat the workload replay N times (-1: infinite)"),
3387 	OPT_PARENT(sched_options)
3388 	};
3389 	const struct option map_options[] = {
3390 	OPT_BOOLEAN(0, "compact", &sched.map.comp,
3391 		    "map output in compact mode"),
3392 	OPT_STRING(0, "color-pids", &sched.map.color_pids_str, "pids",
3393 		   "highlight given pids in map"),
3394 	OPT_STRING(0, "color-cpus", &sched.map.color_cpus_str, "cpus",
3395                     "highlight given CPUs in map"),
3396 	OPT_STRING(0, "cpus", &sched.map.cpus_str, "cpus",
3397                     "display given CPUs in map"),
3398 	OPT_PARENT(sched_options)
3399 	};
3400 	const struct option timehist_options[] = {
3401 	OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
3402 		   "file", "vmlinux pathname"),
3403 	OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
3404 		   "file", "kallsyms pathname"),
3405 	OPT_BOOLEAN('g', "call-graph", &sched.show_callchain,
3406 		    "Display call chains if present (default on)"),
3407 	OPT_UINTEGER(0, "max-stack", &sched.max_stack,
3408 		   "Maximum number of functions to display backtrace."),
3409 	OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
3410 		    "Look for files with symbols relative to this directory"),
3411 	OPT_BOOLEAN('s', "summary", &sched.summary_only,
3412 		    "Show only syscall summary with statistics"),
3413 	OPT_BOOLEAN('S', "with-summary", &sched.summary,
3414 		    "Show all syscalls and summary with statistics"),
3415 	OPT_BOOLEAN('w', "wakeups", &sched.show_wakeups, "Show wakeup events"),
3416 	OPT_BOOLEAN('n', "next", &sched.show_next, "Show next task"),
3417 	OPT_BOOLEAN('M', "migrations", &sched.show_migrations, "Show migration events"),
3418 	OPT_BOOLEAN('V', "cpu-visual", &sched.show_cpu_visual, "Add CPU visual"),
3419 	OPT_BOOLEAN('I', "idle-hist", &sched.idle_hist, "Show idle events only"),
3420 	OPT_STRING(0, "time", &sched.time_str, "str",
3421 		   "Time span for analysis (start,stop)"),
3422 	OPT_BOOLEAN(0, "state", &sched.show_state, "Show task state when sched-out"),
3423 	OPT_STRING('p', "pid", &symbol_conf.pid_list_str, "pid[,pid...]",
3424 		   "analyze events only for given process id(s)"),
3425 	OPT_STRING('t', "tid", &symbol_conf.tid_list_str, "tid[,tid...]",
3426 		   "analyze events only for given thread id(s)"),
3427 	OPT_PARENT(sched_options)
3428 	};
3429 
3430 	const char * const latency_usage[] = {
3431 		"perf sched latency [<options>]",
3432 		NULL
3433 	};
3434 	const char * const replay_usage[] = {
3435 		"perf sched replay [<options>]",
3436 		NULL
3437 	};
3438 	const char * const map_usage[] = {
3439 		"perf sched map [<options>]",
3440 		NULL
3441 	};
3442 	const char * const timehist_usage[] = {
3443 		"perf sched timehist [<options>]",
3444 		NULL
3445 	};
3446 	const char *const sched_subcommands[] = { "record", "latency", "map",
3447 						  "replay", "script",
3448 						  "timehist", NULL };
3449 	const char *sched_usage[] = {
3450 		NULL,
3451 		NULL
3452 	};
3453 	struct trace_sched_handler lat_ops  = {
3454 		.wakeup_event	    = latency_wakeup_event,
3455 		.switch_event	    = latency_switch_event,
3456 		.runtime_event	    = latency_runtime_event,
3457 		.migrate_task_event = latency_migrate_task_event,
3458 	};
3459 	struct trace_sched_handler map_ops  = {
3460 		.switch_event	    = map_switch_event,
3461 	};
3462 	struct trace_sched_handler replay_ops  = {
3463 		.wakeup_event	    = replay_wakeup_event,
3464 		.switch_event	    = replay_switch_event,
3465 		.fork_event	    = replay_fork_event,
3466 	};
3467 	unsigned int i;
3468 
3469 	for (i = 0; i < ARRAY_SIZE(sched.curr_pid); i++)
3470 		sched.curr_pid[i] = -1;
3471 
3472 	argc = parse_options_subcommand(argc, argv, sched_options, sched_subcommands,
3473 					sched_usage, PARSE_OPT_STOP_AT_NON_OPTION);
3474 	if (!argc)
3475 		usage_with_options(sched_usage, sched_options);
3476 
3477 	/*
3478 	 * Aliased to 'perf script' for now:
3479 	 */
3480 	if (!strcmp(argv[0], "script"))
3481 		return cmd_script(argc, argv);
3482 
3483 	if (!strncmp(argv[0], "rec", 3)) {
3484 		return __cmd_record(argc, argv);
3485 	} else if (!strncmp(argv[0], "lat", 3)) {
3486 		sched.tp_handler = &lat_ops;
3487 		if (argc > 1) {
3488 			argc = parse_options(argc, argv, latency_options, latency_usage, 0);
3489 			if (argc)
3490 				usage_with_options(latency_usage, latency_options);
3491 		}
3492 		setup_sorting(&sched, latency_options, latency_usage);
3493 		return perf_sched__lat(&sched);
3494 	} else if (!strcmp(argv[0], "map")) {
3495 		if (argc) {
3496 			argc = parse_options(argc, argv, map_options, map_usage, 0);
3497 			if (argc)
3498 				usage_with_options(map_usage, map_options);
3499 		}
3500 		sched.tp_handler = &map_ops;
3501 		setup_sorting(&sched, latency_options, latency_usage);
3502 		return perf_sched__map(&sched);
3503 	} else if (!strncmp(argv[0], "rep", 3)) {
3504 		sched.tp_handler = &replay_ops;
3505 		if (argc) {
3506 			argc = parse_options(argc, argv, replay_options, replay_usage, 0);
3507 			if (argc)
3508 				usage_with_options(replay_usage, replay_options);
3509 		}
3510 		return perf_sched__replay(&sched);
3511 	} else if (!strcmp(argv[0], "timehist")) {
3512 		if (argc) {
3513 			argc = parse_options(argc, argv, timehist_options,
3514 					     timehist_usage, 0);
3515 			if (argc)
3516 				usage_with_options(timehist_usage, timehist_options);
3517 		}
3518 		if ((sched.show_wakeups || sched.show_next) &&
3519 		    sched.summary_only) {
3520 			pr_err(" Error: -s and -[n|w] are mutually exclusive.\n");
3521 			parse_options_usage(timehist_usage, timehist_options, "s", true);
3522 			if (sched.show_wakeups)
3523 				parse_options_usage(NULL, timehist_options, "w", true);
3524 			if (sched.show_next)
3525 				parse_options_usage(NULL, timehist_options, "n", true);
3526 			return -EINVAL;
3527 		}
3528 
3529 		return perf_sched__timehist(&sched);
3530 	} else {
3531 		usage_with_options(sched_usage, sched_options);
3532 	}
3533 
3534 	return 0;
3535 }
3536