xref: /openbmc/linux/tools/perf/builtin-sched.c (revision baa7eb025ab14f3cba2e35c0a8648f9c9f01d24f)
1 #include "builtin.h"
2 #include "perf.h"
3 
4 #include "util/util.h"
5 #include "util/cache.h"
6 #include "util/symbol.h"
7 #include "util/thread.h"
8 #include "util/header.h"
9 #include "util/session.h"
10 
11 #include "util/parse-options.h"
12 #include "util/trace-event.h"
13 
14 #include "util/debug.h"
15 
16 #include <sys/prctl.h>
17 
18 #include <semaphore.h>
19 #include <pthread.h>
20 #include <math.h>
21 
22 static char			const *input_name = "perf.data";
23 
24 static char			default_sort_order[] = "avg, max, switch, runtime";
25 static const char		*sort_order = default_sort_order;
26 
27 static int			profile_cpu = -1;
28 
29 #define PR_SET_NAME		15               /* Set process name */
30 #define MAX_CPUS		4096
31 
32 static u64			run_measurement_overhead;
33 static u64			sleep_measurement_overhead;
34 
35 #define COMM_LEN		20
36 #define SYM_LEN			129
37 
38 #define MAX_PID			65536
39 
40 static unsigned long		nr_tasks;
41 
42 struct sched_atom;
43 
44 struct task_desc {
45 	unsigned long		nr;
46 	unsigned long		pid;
47 	char			comm[COMM_LEN];
48 
49 	unsigned long		nr_events;
50 	unsigned long		curr_event;
51 	struct sched_atom	**atoms;
52 
53 	pthread_t		thread;
54 	sem_t			sleep_sem;
55 
56 	sem_t			ready_for_work;
57 	sem_t			work_done_sem;
58 
59 	u64			cpu_usage;
60 };
61 
62 enum sched_event_type {
63 	SCHED_EVENT_RUN,
64 	SCHED_EVENT_SLEEP,
65 	SCHED_EVENT_WAKEUP,
66 	SCHED_EVENT_MIGRATION,
67 };
68 
69 struct sched_atom {
70 	enum sched_event_type	type;
71 	int			specific_wait;
72 	u64			timestamp;
73 	u64			duration;
74 	unsigned long		nr;
75 	sem_t			*wait_sem;
76 	struct task_desc	*wakee;
77 };
78 
79 static struct task_desc		*pid_to_task[MAX_PID];
80 
81 static struct task_desc		**tasks;
82 
83 static pthread_mutex_t		start_work_mutex = PTHREAD_MUTEX_INITIALIZER;
84 static u64			start_time;
85 
86 static pthread_mutex_t		work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER;
87 
88 static unsigned long		nr_run_events;
89 static unsigned long		nr_sleep_events;
90 static unsigned long		nr_wakeup_events;
91 
92 static unsigned long		nr_sleep_corrections;
93 static unsigned long		nr_run_events_optimized;
94 
95 static unsigned long		targetless_wakeups;
96 static unsigned long		multitarget_wakeups;
97 
98 static u64			cpu_usage;
99 static u64			runavg_cpu_usage;
100 static u64			parent_cpu_usage;
101 static u64			runavg_parent_cpu_usage;
102 
103 static unsigned long		nr_runs;
104 static u64			sum_runtime;
105 static u64			sum_fluct;
106 static u64			run_avg;
107 
108 static unsigned int		replay_repeat = 10;
109 static unsigned long		nr_timestamps;
110 static unsigned long		nr_unordered_timestamps;
111 static unsigned long		nr_state_machine_bugs;
112 static unsigned long		nr_context_switch_bugs;
113 static unsigned long		nr_events;
114 static unsigned long		nr_lost_chunks;
115 static unsigned long		nr_lost_events;
116 
117 #define TASK_STATE_TO_CHAR_STR "RSDTtZX"
118 
119 enum thread_state {
120 	THREAD_SLEEPING = 0,
121 	THREAD_WAIT_CPU,
122 	THREAD_SCHED_IN,
123 	THREAD_IGNORE
124 };
125 
126 struct work_atom {
127 	struct list_head	list;
128 	enum thread_state	state;
129 	u64			sched_out_time;
130 	u64			wake_up_time;
131 	u64			sched_in_time;
132 	u64			runtime;
133 };
134 
135 struct work_atoms {
136 	struct list_head	work_list;
137 	struct thread		*thread;
138 	struct rb_node		node;
139 	u64			max_lat;
140 	u64			max_lat_at;
141 	u64			total_lat;
142 	u64			nb_atoms;
143 	u64			total_runtime;
144 };
145 
146 typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
147 
148 static struct rb_root		atom_root, sorted_atom_root;
149 
150 static u64			all_runtime;
151 static u64			all_count;
152 
153 
154 static u64 get_nsecs(void)
155 {
156 	struct timespec ts;
157 
158 	clock_gettime(CLOCK_MONOTONIC, &ts);
159 
160 	return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
161 }
162 
163 static void burn_nsecs(u64 nsecs)
164 {
165 	u64 T0 = get_nsecs(), T1;
166 
167 	do {
168 		T1 = get_nsecs();
169 	} while (T1 + run_measurement_overhead < T0 + nsecs);
170 }
171 
172 static void sleep_nsecs(u64 nsecs)
173 {
174 	struct timespec ts;
175 
176 	ts.tv_nsec = nsecs % 999999999;
177 	ts.tv_sec = nsecs / 999999999;
178 
179 	nanosleep(&ts, NULL);
180 }
181 
182 static void calibrate_run_measurement_overhead(void)
183 {
184 	u64 T0, T1, delta, min_delta = 1000000000ULL;
185 	int i;
186 
187 	for (i = 0; i < 10; i++) {
188 		T0 = get_nsecs();
189 		burn_nsecs(0);
190 		T1 = get_nsecs();
191 		delta = T1-T0;
192 		min_delta = min(min_delta, delta);
193 	}
194 	run_measurement_overhead = min_delta;
195 
196 	printf("run measurement overhead: %Ld nsecs\n", min_delta);
197 }
198 
199 static void calibrate_sleep_measurement_overhead(void)
200 {
201 	u64 T0, T1, delta, min_delta = 1000000000ULL;
202 	int i;
203 
204 	for (i = 0; i < 10; i++) {
205 		T0 = get_nsecs();
206 		sleep_nsecs(10000);
207 		T1 = get_nsecs();
208 		delta = T1-T0;
209 		min_delta = min(min_delta, delta);
210 	}
211 	min_delta -= 10000;
212 	sleep_measurement_overhead = min_delta;
213 
214 	printf("sleep measurement overhead: %Ld nsecs\n", min_delta);
215 }
216 
217 static struct sched_atom *
218 get_new_event(struct task_desc *task, u64 timestamp)
219 {
220 	struct sched_atom *event = zalloc(sizeof(*event));
221 	unsigned long idx = task->nr_events;
222 	size_t size;
223 
224 	event->timestamp = timestamp;
225 	event->nr = idx;
226 
227 	task->nr_events++;
228 	size = sizeof(struct sched_atom *) * task->nr_events;
229 	task->atoms = realloc(task->atoms, size);
230 	BUG_ON(!task->atoms);
231 
232 	task->atoms[idx] = event;
233 
234 	return event;
235 }
236 
237 static struct sched_atom *last_event(struct task_desc *task)
238 {
239 	if (!task->nr_events)
240 		return NULL;
241 
242 	return task->atoms[task->nr_events - 1];
243 }
244 
245 static void
246 add_sched_event_run(struct task_desc *task, u64 timestamp, u64 duration)
247 {
248 	struct sched_atom *event, *curr_event = last_event(task);
249 
250 	/*
251 	 * optimize an existing RUN event by merging this one
252 	 * to it:
253 	 */
254 	if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
255 		nr_run_events_optimized++;
256 		curr_event->duration += duration;
257 		return;
258 	}
259 
260 	event = get_new_event(task, timestamp);
261 
262 	event->type = SCHED_EVENT_RUN;
263 	event->duration = duration;
264 
265 	nr_run_events++;
266 }
267 
268 static void
269 add_sched_event_wakeup(struct task_desc *task, u64 timestamp,
270 		       struct task_desc *wakee)
271 {
272 	struct sched_atom *event, *wakee_event;
273 
274 	event = get_new_event(task, timestamp);
275 	event->type = SCHED_EVENT_WAKEUP;
276 	event->wakee = wakee;
277 
278 	wakee_event = last_event(wakee);
279 	if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
280 		targetless_wakeups++;
281 		return;
282 	}
283 	if (wakee_event->wait_sem) {
284 		multitarget_wakeups++;
285 		return;
286 	}
287 
288 	wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem));
289 	sem_init(wakee_event->wait_sem, 0, 0);
290 	wakee_event->specific_wait = 1;
291 	event->wait_sem = wakee_event->wait_sem;
292 
293 	nr_wakeup_events++;
294 }
295 
296 static void
297 add_sched_event_sleep(struct task_desc *task, u64 timestamp,
298 		      u64 task_state __used)
299 {
300 	struct sched_atom *event = get_new_event(task, timestamp);
301 
302 	event->type = SCHED_EVENT_SLEEP;
303 
304 	nr_sleep_events++;
305 }
306 
307 static struct task_desc *register_pid(unsigned long pid, const char *comm)
308 {
309 	struct task_desc *task;
310 
311 	BUG_ON(pid >= MAX_PID);
312 
313 	task = pid_to_task[pid];
314 
315 	if (task)
316 		return task;
317 
318 	task = zalloc(sizeof(*task));
319 	task->pid = pid;
320 	task->nr = nr_tasks;
321 	strcpy(task->comm, comm);
322 	/*
323 	 * every task starts in sleeping state - this gets ignored
324 	 * if there's no wakeup pointing to this sleep state:
325 	 */
326 	add_sched_event_sleep(task, 0, 0);
327 
328 	pid_to_task[pid] = task;
329 	nr_tasks++;
330 	tasks = realloc(tasks, nr_tasks*sizeof(struct task_task *));
331 	BUG_ON(!tasks);
332 	tasks[task->nr] = task;
333 
334 	if (verbose)
335 		printf("registered task #%ld, PID %ld (%s)\n", nr_tasks, pid, comm);
336 
337 	return task;
338 }
339 
340 
341 static void print_task_traces(void)
342 {
343 	struct task_desc *task;
344 	unsigned long i;
345 
346 	for (i = 0; i < nr_tasks; i++) {
347 		task = tasks[i];
348 		printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
349 			task->nr, task->comm, task->pid, task->nr_events);
350 	}
351 }
352 
353 static void add_cross_task_wakeups(void)
354 {
355 	struct task_desc *task1, *task2;
356 	unsigned long i, j;
357 
358 	for (i = 0; i < nr_tasks; i++) {
359 		task1 = tasks[i];
360 		j = i + 1;
361 		if (j == nr_tasks)
362 			j = 0;
363 		task2 = tasks[j];
364 		add_sched_event_wakeup(task1, 0, task2);
365 	}
366 }
367 
368 static void
369 process_sched_event(struct task_desc *this_task __used, struct sched_atom *atom)
370 {
371 	int ret = 0;
372 	u64 now;
373 	long long delta;
374 
375 	now = get_nsecs();
376 	delta = start_time + atom->timestamp - now;
377 
378 	switch (atom->type) {
379 		case SCHED_EVENT_RUN:
380 			burn_nsecs(atom->duration);
381 			break;
382 		case SCHED_EVENT_SLEEP:
383 			if (atom->wait_sem)
384 				ret = sem_wait(atom->wait_sem);
385 			BUG_ON(ret);
386 			break;
387 		case SCHED_EVENT_WAKEUP:
388 			if (atom->wait_sem)
389 				ret = sem_post(atom->wait_sem);
390 			BUG_ON(ret);
391 			break;
392 		case SCHED_EVENT_MIGRATION:
393 			break;
394 		default:
395 			BUG_ON(1);
396 	}
397 }
398 
399 static u64 get_cpu_usage_nsec_parent(void)
400 {
401 	struct rusage ru;
402 	u64 sum;
403 	int err;
404 
405 	err = getrusage(RUSAGE_SELF, &ru);
406 	BUG_ON(err);
407 
408 	sum =  ru.ru_utime.tv_sec*1e9 + ru.ru_utime.tv_usec*1e3;
409 	sum += ru.ru_stime.tv_sec*1e9 + ru.ru_stime.tv_usec*1e3;
410 
411 	return sum;
412 }
413 
414 static int self_open_counters(void)
415 {
416 	struct perf_event_attr attr;
417 	int fd;
418 
419 	memset(&attr, 0, sizeof(attr));
420 
421 	attr.type = PERF_TYPE_SOFTWARE;
422 	attr.config = PERF_COUNT_SW_TASK_CLOCK;
423 
424 	fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
425 
426 	if (fd < 0)
427 		die("Error: sys_perf_event_open() syscall returned"
428 		    "with %d (%s)\n", fd, strerror(errno));
429 	return fd;
430 }
431 
432 static u64 get_cpu_usage_nsec_self(int fd)
433 {
434 	u64 runtime;
435 	int ret;
436 
437 	ret = read(fd, &runtime, sizeof(runtime));
438 	BUG_ON(ret != sizeof(runtime));
439 
440 	return runtime;
441 }
442 
443 static void *thread_func(void *ctx)
444 {
445 	struct task_desc *this_task = ctx;
446 	u64 cpu_usage_0, cpu_usage_1;
447 	unsigned long i, ret;
448 	char comm2[22];
449 	int fd;
450 
451 	sprintf(comm2, ":%s", this_task->comm);
452 	prctl(PR_SET_NAME, comm2);
453 	fd = self_open_counters();
454 
455 again:
456 	ret = sem_post(&this_task->ready_for_work);
457 	BUG_ON(ret);
458 	ret = pthread_mutex_lock(&start_work_mutex);
459 	BUG_ON(ret);
460 	ret = pthread_mutex_unlock(&start_work_mutex);
461 	BUG_ON(ret);
462 
463 	cpu_usage_0 = get_cpu_usage_nsec_self(fd);
464 
465 	for (i = 0; i < this_task->nr_events; i++) {
466 		this_task->curr_event = i;
467 		process_sched_event(this_task, this_task->atoms[i]);
468 	}
469 
470 	cpu_usage_1 = get_cpu_usage_nsec_self(fd);
471 	this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
472 	ret = sem_post(&this_task->work_done_sem);
473 	BUG_ON(ret);
474 
475 	ret = pthread_mutex_lock(&work_done_wait_mutex);
476 	BUG_ON(ret);
477 	ret = pthread_mutex_unlock(&work_done_wait_mutex);
478 	BUG_ON(ret);
479 
480 	goto again;
481 }
482 
483 static void create_tasks(void)
484 {
485 	struct task_desc *task;
486 	pthread_attr_t attr;
487 	unsigned long i;
488 	int err;
489 
490 	err = pthread_attr_init(&attr);
491 	BUG_ON(err);
492 	err = pthread_attr_setstacksize(&attr, (size_t)(16*1024));
493 	BUG_ON(err);
494 	err = pthread_mutex_lock(&start_work_mutex);
495 	BUG_ON(err);
496 	err = pthread_mutex_lock(&work_done_wait_mutex);
497 	BUG_ON(err);
498 	for (i = 0; i < nr_tasks; i++) {
499 		task = tasks[i];
500 		sem_init(&task->sleep_sem, 0, 0);
501 		sem_init(&task->ready_for_work, 0, 0);
502 		sem_init(&task->work_done_sem, 0, 0);
503 		task->curr_event = 0;
504 		err = pthread_create(&task->thread, &attr, thread_func, task);
505 		BUG_ON(err);
506 	}
507 }
508 
509 static void wait_for_tasks(void)
510 {
511 	u64 cpu_usage_0, cpu_usage_1;
512 	struct task_desc *task;
513 	unsigned long i, ret;
514 
515 	start_time = get_nsecs();
516 	cpu_usage = 0;
517 	pthread_mutex_unlock(&work_done_wait_mutex);
518 
519 	for (i = 0; i < nr_tasks; i++) {
520 		task = tasks[i];
521 		ret = sem_wait(&task->ready_for_work);
522 		BUG_ON(ret);
523 		sem_init(&task->ready_for_work, 0, 0);
524 	}
525 	ret = pthread_mutex_lock(&work_done_wait_mutex);
526 	BUG_ON(ret);
527 
528 	cpu_usage_0 = get_cpu_usage_nsec_parent();
529 
530 	pthread_mutex_unlock(&start_work_mutex);
531 
532 	for (i = 0; i < nr_tasks; i++) {
533 		task = tasks[i];
534 		ret = sem_wait(&task->work_done_sem);
535 		BUG_ON(ret);
536 		sem_init(&task->work_done_sem, 0, 0);
537 		cpu_usage += task->cpu_usage;
538 		task->cpu_usage = 0;
539 	}
540 
541 	cpu_usage_1 = get_cpu_usage_nsec_parent();
542 	if (!runavg_cpu_usage)
543 		runavg_cpu_usage = cpu_usage;
544 	runavg_cpu_usage = (runavg_cpu_usage*9 + cpu_usage)/10;
545 
546 	parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
547 	if (!runavg_parent_cpu_usage)
548 		runavg_parent_cpu_usage = parent_cpu_usage;
549 	runavg_parent_cpu_usage = (runavg_parent_cpu_usage*9 +
550 				   parent_cpu_usage)/10;
551 
552 	ret = pthread_mutex_lock(&start_work_mutex);
553 	BUG_ON(ret);
554 
555 	for (i = 0; i < nr_tasks; i++) {
556 		task = tasks[i];
557 		sem_init(&task->sleep_sem, 0, 0);
558 		task->curr_event = 0;
559 	}
560 }
561 
562 static void run_one_test(void)
563 {
564 	u64 T0, T1, delta, avg_delta, fluct, std_dev;
565 
566 	T0 = get_nsecs();
567 	wait_for_tasks();
568 	T1 = get_nsecs();
569 
570 	delta = T1 - T0;
571 	sum_runtime += delta;
572 	nr_runs++;
573 
574 	avg_delta = sum_runtime / nr_runs;
575 	if (delta < avg_delta)
576 		fluct = avg_delta - delta;
577 	else
578 		fluct = delta - avg_delta;
579 	sum_fluct += fluct;
580 	std_dev = sum_fluct / nr_runs / sqrt(nr_runs);
581 	if (!run_avg)
582 		run_avg = delta;
583 	run_avg = (run_avg*9 + delta)/10;
584 
585 	printf("#%-3ld: %0.3f, ",
586 		nr_runs, (double)delta/1000000.0);
587 
588 	printf("ravg: %0.2f, ",
589 		(double)run_avg/1e6);
590 
591 	printf("cpu: %0.2f / %0.2f",
592 		(double)cpu_usage/1e6, (double)runavg_cpu_usage/1e6);
593 
594 #if 0
595 	/*
596 	 * rusage statistics done by the parent, these are less
597 	 * accurate than the sum_exec_runtime based statistics:
598 	 */
599 	printf(" [%0.2f / %0.2f]",
600 		(double)parent_cpu_usage/1e6,
601 		(double)runavg_parent_cpu_usage/1e6);
602 #endif
603 
604 	printf("\n");
605 
606 	if (nr_sleep_corrections)
607 		printf(" (%ld sleep corrections)\n", nr_sleep_corrections);
608 	nr_sleep_corrections = 0;
609 }
610 
611 static void test_calibrations(void)
612 {
613 	u64 T0, T1;
614 
615 	T0 = get_nsecs();
616 	burn_nsecs(1e6);
617 	T1 = get_nsecs();
618 
619 	printf("the run test took %Ld nsecs\n", T1-T0);
620 
621 	T0 = get_nsecs();
622 	sleep_nsecs(1e6);
623 	T1 = get_nsecs();
624 
625 	printf("the sleep test took %Ld nsecs\n", T1-T0);
626 }
627 
628 #define FILL_FIELD(ptr, field, event, data)	\
629 	ptr.field = (typeof(ptr.field)) raw_field_value(event, #field, data)
630 
631 #define FILL_ARRAY(ptr, array, event, data)			\
632 do {								\
633 	void *__array = raw_field_ptr(event, #array, data);	\
634 	memcpy(ptr.array, __array, sizeof(ptr.array));	\
635 } while(0)
636 
637 #define FILL_COMMON_FIELDS(ptr, event, data)			\
638 do {								\
639 	FILL_FIELD(ptr, common_type, event, data);		\
640 	FILL_FIELD(ptr, common_flags, event, data);		\
641 	FILL_FIELD(ptr, common_preempt_count, event, data);	\
642 	FILL_FIELD(ptr, common_pid, event, data);		\
643 	FILL_FIELD(ptr, common_tgid, event, data);		\
644 } while (0)
645 
646 
647 
648 struct trace_switch_event {
649 	u32 size;
650 
651 	u16 common_type;
652 	u8 common_flags;
653 	u8 common_preempt_count;
654 	u32 common_pid;
655 	u32 common_tgid;
656 
657 	char prev_comm[16];
658 	u32 prev_pid;
659 	u32 prev_prio;
660 	u64 prev_state;
661 	char next_comm[16];
662 	u32 next_pid;
663 	u32 next_prio;
664 };
665 
666 struct trace_runtime_event {
667 	u32 size;
668 
669 	u16 common_type;
670 	u8 common_flags;
671 	u8 common_preempt_count;
672 	u32 common_pid;
673 	u32 common_tgid;
674 
675 	char comm[16];
676 	u32 pid;
677 	u64 runtime;
678 	u64 vruntime;
679 };
680 
681 struct trace_wakeup_event {
682 	u32 size;
683 
684 	u16 common_type;
685 	u8 common_flags;
686 	u8 common_preempt_count;
687 	u32 common_pid;
688 	u32 common_tgid;
689 
690 	char comm[16];
691 	u32 pid;
692 
693 	u32 prio;
694 	u32 success;
695 	u32 cpu;
696 };
697 
698 struct trace_fork_event {
699 	u32 size;
700 
701 	u16 common_type;
702 	u8 common_flags;
703 	u8 common_preempt_count;
704 	u32 common_pid;
705 	u32 common_tgid;
706 
707 	char parent_comm[16];
708 	u32 parent_pid;
709 	char child_comm[16];
710 	u32 child_pid;
711 };
712 
713 struct trace_migrate_task_event {
714 	u32 size;
715 
716 	u16 common_type;
717 	u8 common_flags;
718 	u8 common_preempt_count;
719 	u32 common_pid;
720 	u32 common_tgid;
721 
722 	char comm[16];
723 	u32 pid;
724 
725 	u32 prio;
726 	u32 cpu;
727 };
728 
729 struct trace_sched_handler {
730 	void (*switch_event)(struct trace_switch_event *,
731 			     struct perf_session *,
732 			     struct event *,
733 			     int cpu,
734 			     u64 timestamp,
735 			     struct thread *thread);
736 
737 	void (*runtime_event)(struct trace_runtime_event *,
738 			      struct perf_session *,
739 			      struct event *,
740 			      int cpu,
741 			      u64 timestamp,
742 			      struct thread *thread);
743 
744 	void (*wakeup_event)(struct trace_wakeup_event *,
745 			     struct perf_session *,
746 			     struct event *,
747 			     int cpu,
748 			     u64 timestamp,
749 			     struct thread *thread);
750 
751 	void (*fork_event)(struct trace_fork_event *,
752 			   struct event *,
753 			   int cpu,
754 			   u64 timestamp,
755 			   struct thread *thread);
756 
757 	void (*migrate_task_event)(struct trace_migrate_task_event *,
758 			   struct perf_session *session,
759 			   struct event *,
760 			   int cpu,
761 			   u64 timestamp,
762 			   struct thread *thread);
763 };
764 
765 
766 static void
767 replay_wakeup_event(struct trace_wakeup_event *wakeup_event,
768 		    struct perf_session *session __used,
769 		    struct event *event,
770 		    int cpu __used,
771 		    u64 timestamp __used,
772 		    struct thread *thread __used)
773 {
774 	struct task_desc *waker, *wakee;
775 
776 	if (verbose) {
777 		printf("sched_wakeup event %p\n", event);
778 
779 		printf(" ... pid %d woke up %s/%d\n",
780 			wakeup_event->common_pid,
781 			wakeup_event->comm,
782 			wakeup_event->pid);
783 	}
784 
785 	waker = register_pid(wakeup_event->common_pid, "<unknown>");
786 	wakee = register_pid(wakeup_event->pid, wakeup_event->comm);
787 
788 	add_sched_event_wakeup(waker, timestamp, wakee);
789 }
790 
791 static u64 cpu_last_switched[MAX_CPUS];
792 
793 static void
794 replay_switch_event(struct trace_switch_event *switch_event,
795 		    struct perf_session *session __used,
796 		    struct event *event,
797 		    int cpu,
798 		    u64 timestamp,
799 		    struct thread *thread __used)
800 {
801 	struct task_desc *prev, *next;
802 	u64 timestamp0;
803 	s64 delta;
804 
805 	if (verbose)
806 		printf("sched_switch event %p\n", event);
807 
808 	if (cpu >= MAX_CPUS || cpu < 0)
809 		return;
810 
811 	timestamp0 = cpu_last_switched[cpu];
812 	if (timestamp0)
813 		delta = timestamp - timestamp0;
814 	else
815 		delta = 0;
816 
817 	if (delta < 0)
818 		die("hm, delta: %Ld < 0 ?\n", delta);
819 
820 	if (verbose) {
821 		printf(" ... switch from %s/%d to %s/%d [ran %Ld nsecs]\n",
822 			switch_event->prev_comm, switch_event->prev_pid,
823 			switch_event->next_comm, switch_event->next_pid,
824 			delta);
825 	}
826 
827 	prev = register_pid(switch_event->prev_pid, switch_event->prev_comm);
828 	next = register_pid(switch_event->next_pid, switch_event->next_comm);
829 
830 	cpu_last_switched[cpu] = timestamp;
831 
832 	add_sched_event_run(prev, timestamp, delta);
833 	add_sched_event_sleep(prev, timestamp, switch_event->prev_state);
834 }
835 
836 
837 static void
838 replay_fork_event(struct trace_fork_event *fork_event,
839 		  struct event *event,
840 		  int cpu __used,
841 		  u64 timestamp __used,
842 		  struct thread *thread __used)
843 {
844 	if (verbose) {
845 		printf("sched_fork event %p\n", event);
846 		printf("... parent: %s/%d\n", fork_event->parent_comm, fork_event->parent_pid);
847 		printf("...  child: %s/%d\n", fork_event->child_comm, fork_event->child_pid);
848 	}
849 	register_pid(fork_event->parent_pid, fork_event->parent_comm);
850 	register_pid(fork_event->child_pid, fork_event->child_comm);
851 }
852 
853 static struct trace_sched_handler replay_ops  = {
854 	.wakeup_event		= replay_wakeup_event,
855 	.switch_event		= replay_switch_event,
856 	.fork_event		= replay_fork_event,
857 };
858 
859 struct sort_dimension {
860 	const char		*name;
861 	sort_fn_t		cmp;
862 	struct list_head	list;
863 };
864 
865 static LIST_HEAD(cmp_pid);
866 
867 static int
868 thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
869 {
870 	struct sort_dimension *sort;
871 	int ret = 0;
872 
873 	BUG_ON(list_empty(list));
874 
875 	list_for_each_entry(sort, list, list) {
876 		ret = sort->cmp(l, r);
877 		if (ret)
878 			return ret;
879 	}
880 
881 	return ret;
882 }
883 
884 static struct work_atoms *
885 thread_atoms_search(struct rb_root *root, struct thread *thread,
886 			 struct list_head *sort_list)
887 {
888 	struct rb_node *node = root->rb_node;
889 	struct work_atoms key = { .thread = thread };
890 
891 	while (node) {
892 		struct work_atoms *atoms;
893 		int cmp;
894 
895 		atoms = container_of(node, struct work_atoms, node);
896 
897 		cmp = thread_lat_cmp(sort_list, &key, atoms);
898 		if (cmp > 0)
899 			node = node->rb_left;
900 		else if (cmp < 0)
901 			node = node->rb_right;
902 		else {
903 			BUG_ON(thread != atoms->thread);
904 			return atoms;
905 		}
906 	}
907 	return NULL;
908 }
909 
910 static void
911 __thread_latency_insert(struct rb_root *root, struct work_atoms *data,
912 			 struct list_head *sort_list)
913 {
914 	struct rb_node **new = &(root->rb_node), *parent = NULL;
915 
916 	while (*new) {
917 		struct work_atoms *this;
918 		int cmp;
919 
920 		this = container_of(*new, struct work_atoms, node);
921 		parent = *new;
922 
923 		cmp = thread_lat_cmp(sort_list, data, this);
924 
925 		if (cmp > 0)
926 			new = &((*new)->rb_left);
927 		else
928 			new = &((*new)->rb_right);
929 	}
930 
931 	rb_link_node(&data->node, parent, new);
932 	rb_insert_color(&data->node, root);
933 }
934 
935 static void thread_atoms_insert(struct thread *thread)
936 {
937 	struct work_atoms *atoms = zalloc(sizeof(*atoms));
938 	if (!atoms)
939 		die("No memory");
940 
941 	atoms->thread = thread;
942 	INIT_LIST_HEAD(&atoms->work_list);
943 	__thread_latency_insert(&atom_root, atoms, &cmp_pid);
944 }
945 
946 static void
947 latency_fork_event(struct trace_fork_event *fork_event __used,
948 		   struct event *event __used,
949 		   int cpu __used,
950 		   u64 timestamp __used,
951 		   struct thread *thread __used)
952 {
953 	/* should insert the newcomer */
954 }
955 
956 __used
957 static char sched_out_state(struct trace_switch_event *switch_event)
958 {
959 	const char *str = TASK_STATE_TO_CHAR_STR;
960 
961 	return str[switch_event->prev_state];
962 }
963 
964 static void
965 add_sched_out_event(struct work_atoms *atoms,
966 		    char run_state,
967 		    u64 timestamp)
968 {
969 	struct work_atom *atom = zalloc(sizeof(*atom));
970 	if (!atom)
971 		die("Non memory");
972 
973 	atom->sched_out_time = timestamp;
974 
975 	if (run_state == 'R') {
976 		atom->state = THREAD_WAIT_CPU;
977 		atom->wake_up_time = atom->sched_out_time;
978 	}
979 
980 	list_add_tail(&atom->list, &atoms->work_list);
981 }
982 
983 static void
984 add_runtime_event(struct work_atoms *atoms, u64 delta, u64 timestamp __used)
985 {
986 	struct work_atom *atom;
987 
988 	BUG_ON(list_empty(&atoms->work_list));
989 
990 	atom = list_entry(atoms->work_list.prev, struct work_atom, list);
991 
992 	atom->runtime += delta;
993 	atoms->total_runtime += delta;
994 }
995 
996 static void
997 add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
998 {
999 	struct work_atom *atom;
1000 	u64 delta;
1001 
1002 	if (list_empty(&atoms->work_list))
1003 		return;
1004 
1005 	atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1006 
1007 	if (atom->state != THREAD_WAIT_CPU)
1008 		return;
1009 
1010 	if (timestamp < atom->wake_up_time) {
1011 		atom->state = THREAD_IGNORE;
1012 		return;
1013 	}
1014 
1015 	atom->state = THREAD_SCHED_IN;
1016 	atom->sched_in_time = timestamp;
1017 
1018 	delta = atom->sched_in_time - atom->wake_up_time;
1019 	atoms->total_lat += delta;
1020 	if (delta > atoms->max_lat) {
1021 		atoms->max_lat = delta;
1022 		atoms->max_lat_at = timestamp;
1023 	}
1024 	atoms->nb_atoms++;
1025 }
1026 
1027 static void
1028 latency_switch_event(struct trace_switch_event *switch_event,
1029 		     struct perf_session *session,
1030 		     struct event *event __used,
1031 		     int cpu,
1032 		     u64 timestamp,
1033 		     struct thread *thread __used)
1034 {
1035 	struct work_atoms *out_events, *in_events;
1036 	struct thread *sched_out, *sched_in;
1037 	u64 timestamp0;
1038 	s64 delta;
1039 
1040 	BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1041 
1042 	timestamp0 = cpu_last_switched[cpu];
1043 	cpu_last_switched[cpu] = timestamp;
1044 	if (timestamp0)
1045 		delta = timestamp - timestamp0;
1046 	else
1047 		delta = 0;
1048 
1049 	if (delta < 0)
1050 		die("hm, delta: %Ld < 0 ?\n", delta);
1051 
1052 
1053 	sched_out = perf_session__findnew(session, switch_event->prev_pid);
1054 	sched_in = perf_session__findnew(session, switch_event->next_pid);
1055 
1056 	out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
1057 	if (!out_events) {
1058 		thread_atoms_insert(sched_out);
1059 		out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
1060 		if (!out_events)
1061 			die("out-event: Internal tree error");
1062 	}
1063 	add_sched_out_event(out_events, sched_out_state(switch_event), timestamp);
1064 
1065 	in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
1066 	if (!in_events) {
1067 		thread_atoms_insert(sched_in);
1068 		in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
1069 		if (!in_events)
1070 			die("in-event: Internal tree error");
1071 		/*
1072 		 * Take came in we have not heard about yet,
1073 		 * add in an initial atom in runnable state:
1074 		 */
1075 		add_sched_out_event(in_events, 'R', timestamp);
1076 	}
1077 	add_sched_in_event(in_events, timestamp);
1078 }
1079 
1080 static void
1081 latency_runtime_event(struct trace_runtime_event *runtime_event,
1082 		     struct perf_session *session,
1083 		     struct event *event __used,
1084 		     int cpu,
1085 		     u64 timestamp,
1086 		     struct thread *this_thread __used)
1087 {
1088 	struct thread *thread = perf_session__findnew(session, runtime_event->pid);
1089 	struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
1090 
1091 	BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1092 	if (!atoms) {
1093 		thread_atoms_insert(thread);
1094 		atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
1095 		if (!atoms)
1096 			die("in-event: Internal tree error");
1097 		add_sched_out_event(atoms, 'R', timestamp);
1098 	}
1099 
1100 	add_runtime_event(atoms, runtime_event->runtime, timestamp);
1101 }
1102 
1103 static void
1104 latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
1105 		     struct perf_session *session,
1106 		     struct event *__event __used,
1107 		     int cpu __used,
1108 		     u64 timestamp,
1109 		     struct thread *thread __used)
1110 {
1111 	struct work_atoms *atoms;
1112 	struct work_atom *atom;
1113 	struct thread *wakee;
1114 
1115 	/* Note for later, it may be interesting to observe the failing cases */
1116 	if (!wakeup_event->success)
1117 		return;
1118 
1119 	wakee = perf_session__findnew(session, wakeup_event->pid);
1120 	atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
1121 	if (!atoms) {
1122 		thread_atoms_insert(wakee);
1123 		atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
1124 		if (!atoms)
1125 			die("wakeup-event: Internal tree error");
1126 		add_sched_out_event(atoms, 'S', timestamp);
1127 	}
1128 
1129 	BUG_ON(list_empty(&atoms->work_list));
1130 
1131 	atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1132 
1133 	/*
1134 	 * You WILL be missing events if you've recorded only
1135 	 * one CPU, or are only looking at only one, so don't
1136 	 * make useless noise.
1137 	 */
1138 	if (profile_cpu == -1 && atom->state != THREAD_SLEEPING)
1139 		nr_state_machine_bugs++;
1140 
1141 	nr_timestamps++;
1142 	if (atom->sched_out_time > timestamp) {
1143 		nr_unordered_timestamps++;
1144 		return;
1145 	}
1146 
1147 	atom->state = THREAD_WAIT_CPU;
1148 	atom->wake_up_time = timestamp;
1149 }
1150 
1151 static void
1152 latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event,
1153 		     struct perf_session *session,
1154 		     struct event *__event __used,
1155 		     int cpu __used,
1156 		     u64 timestamp,
1157 		     struct thread *thread __used)
1158 {
1159 	struct work_atoms *atoms;
1160 	struct work_atom *atom;
1161 	struct thread *migrant;
1162 
1163 	/*
1164 	 * Only need to worry about migration when profiling one CPU.
1165 	 */
1166 	if (profile_cpu == -1)
1167 		return;
1168 
1169 	migrant = perf_session__findnew(session, migrate_task_event->pid);
1170 	atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
1171 	if (!atoms) {
1172 		thread_atoms_insert(migrant);
1173 		register_pid(migrant->pid, migrant->comm);
1174 		atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid);
1175 		if (!atoms)
1176 			die("migration-event: Internal tree error");
1177 		add_sched_out_event(atoms, 'R', timestamp);
1178 	}
1179 
1180 	BUG_ON(list_empty(&atoms->work_list));
1181 
1182 	atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1183 	atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp;
1184 
1185 	nr_timestamps++;
1186 
1187 	if (atom->sched_out_time > timestamp)
1188 		nr_unordered_timestamps++;
1189 }
1190 
1191 static struct trace_sched_handler lat_ops  = {
1192 	.wakeup_event		= latency_wakeup_event,
1193 	.switch_event		= latency_switch_event,
1194 	.runtime_event		= latency_runtime_event,
1195 	.fork_event		= latency_fork_event,
1196 	.migrate_task_event	= latency_migrate_task_event,
1197 };
1198 
1199 static void output_lat_thread(struct work_atoms *work_list)
1200 {
1201 	int i;
1202 	int ret;
1203 	u64 avg;
1204 
1205 	if (!work_list->nb_atoms)
1206 		return;
1207 	/*
1208 	 * Ignore idle threads:
1209 	 */
1210 	if (!strcmp(work_list->thread->comm, "swapper"))
1211 		return;
1212 
1213 	all_runtime += work_list->total_runtime;
1214 	all_count += work_list->nb_atoms;
1215 
1216 	ret = printf("  %s:%d ", work_list->thread->comm, work_list->thread->pid);
1217 
1218 	for (i = 0; i < 24 - ret; i++)
1219 		printf(" ");
1220 
1221 	avg = work_list->total_lat / work_list->nb_atoms;
1222 
1223 	printf("|%11.3f ms |%9llu | avg:%9.3f ms | max:%9.3f ms | max at: %9.6f s\n",
1224 	      (double)work_list->total_runtime / 1e6,
1225 		 work_list->nb_atoms, (double)avg / 1e6,
1226 		 (double)work_list->max_lat / 1e6,
1227 		 (double)work_list->max_lat_at / 1e9);
1228 }
1229 
1230 static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
1231 {
1232 	if (l->thread->pid < r->thread->pid)
1233 		return -1;
1234 	if (l->thread->pid > r->thread->pid)
1235 		return 1;
1236 
1237 	return 0;
1238 }
1239 
1240 static struct sort_dimension pid_sort_dimension = {
1241 	.name			= "pid",
1242 	.cmp			= pid_cmp,
1243 };
1244 
1245 static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
1246 {
1247 	u64 avgl, avgr;
1248 
1249 	if (!l->nb_atoms)
1250 		return -1;
1251 
1252 	if (!r->nb_atoms)
1253 		return 1;
1254 
1255 	avgl = l->total_lat / l->nb_atoms;
1256 	avgr = r->total_lat / r->nb_atoms;
1257 
1258 	if (avgl < avgr)
1259 		return -1;
1260 	if (avgl > avgr)
1261 		return 1;
1262 
1263 	return 0;
1264 }
1265 
1266 static struct sort_dimension avg_sort_dimension = {
1267 	.name			= "avg",
1268 	.cmp			= avg_cmp,
1269 };
1270 
1271 static int max_cmp(struct work_atoms *l, struct work_atoms *r)
1272 {
1273 	if (l->max_lat < r->max_lat)
1274 		return -1;
1275 	if (l->max_lat > r->max_lat)
1276 		return 1;
1277 
1278 	return 0;
1279 }
1280 
1281 static struct sort_dimension max_sort_dimension = {
1282 	.name			= "max",
1283 	.cmp			= max_cmp,
1284 };
1285 
1286 static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
1287 {
1288 	if (l->nb_atoms < r->nb_atoms)
1289 		return -1;
1290 	if (l->nb_atoms > r->nb_atoms)
1291 		return 1;
1292 
1293 	return 0;
1294 }
1295 
1296 static struct sort_dimension switch_sort_dimension = {
1297 	.name			= "switch",
1298 	.cmp			= switch_cmp,
1299 };
1300 
1301 static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
1302 {
1303 	if (l->total_runtime < r->total_runtime)
1304 		return -1;
1305 	if (l->total_runtime > r->total_runtime)
1306 		return 1;
1307 
1308 	return 0;
1309 }
1310 
1311 static struct sort_dimension runtime_sort_dimension = {
1312 	.name			= "runtime",
1313 	.cmp			= runtime_cmp,
1314 };
1315 
1316 static struct sort_dimension *available_sorts[] = {
1317 	&pid_sort_dimension,
1318 	&avg_sort_dimension,
1319 	&max_sort_dimension,
1320 	&switch_sort_dimension,
1321 	&runtime_sort_dimension,
1322 };
1323 
1324 #define NB_AVAILABLE_SORTS	(int)(sizeof(available_sorts) / sizeof(struct sort_dimension *))
1325 
1326 static LIST_HEAD(sort_list);
1327 
1328 static int sort_dimension__add(const char *tok, struct list_head *list)
1329 {
1330 	int i;
1331 
1332 	for (i = 0; i < NB_AVAILABLE_SORTS; i++) {
1333 		if (!strcmp(available_sorts[i]->name, tok)) {
1334 			list_add_tail(&available_sorts[i]->list, list);
1335 
1336 			return 0;
1337 		}
1338 	}
1339 
1340 	return -1;
1341 }
1342 
1343 static void setup_sorting(void);
1344 
1345 static void sort_lat(void)
1346 {
1347 	struct rb_node *node;
1348 
1349 	for (;;) {
1350 		struct work_atoms *data;
1351 		node = rb_first(&atom_root);
1352 		if (!node)
1353 			break;
1354 
1355 		rb_erase(node, &atom_root);
1356 		data = rb_entry(node, struct work_atoms, node);
1357 		__thread_latency_insert(&sorted_atom_root, data, &sort_list);
1358 	}
1359 }
1360 
1361 static struct trace_sched_handler *trace_handler;
1362 
1363 static void
1364 process_sched_wakeup_event(void *data, struct perf_session *session,
1365 			   struct event *event,
1366 			   int cpu __used,
1367 			   u64 timestamp __used,
1368 			   struct thread *thread __used)
1369 {
1370 	struct trace_wakeup_event wakeup_event;
1371 
1372 	FILL_COMMON_FIELDS(wakeup_event, event, data);
1373 
1374 	FILL_ARRAY(wakeup_event, comm, event, data);
1375 	FILL_FIELD(wakeup_event, pid, event, data);
1376 	FILL_FIELD(wakeup_event, prio, event, data);
1377 	FILL_FIELD(wakeup_event, success, event, data);
1378 	FILL_FIELD(wakeup_event, cpu, event, data);
1379 
1380 	if (trace_handler->wakeup_event)
1381 		trace_handler->wakeup_event(&wakeup_event, session, event,
1382 					    cpu, timestamp, thread);
1383 }
1384 
1385 /*
1386  * Track the current task - that way we can know whether there's any
1387  * weird events, such as a task being switched away that is not current.
1388  */
1389 static int max_cpu;
1390 
1391 static u32 curr_pid[MAX_CPUS] = { [0 ... MAX_CPUS-1] = -1 };
1392 
1393 static struct thread *curr_thread[MAX_CPUS];
1394 
1395 static char next_shortname1 = 'A';
1396 static char next_shortname2 = '0';
1397 
1398 static void
1399 map_switch_event(struct trace_switch_event *switch_event,
1400 		 struct perf_session *session,
1401 		 struct event *event __used,
1402 		 int this_cpu,
1403 		 u64 timestamp,
1404 		 struct thread *thread __used)
1405 {
1406 	struct thread *sched_out, *sched_in;
1407 	int new_shortname;
1408 	u64 timestamp0;
1409 	s64 delta;
1410 	int cpu;
1411 
1412 	BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0);
1413 
1414 	if (this_cpu > max_cpu)
1415 		max_cpu = this_cpu;
1416 
1417 	timestamp0 = cpu_last_switched[this_cpu];
1418 	cpu_last_switched[this_cpu] = timestamp;
1419 	if (timestamp0)
1420 		delta = timestamp - timestamp0;
1421 	else
1422 		delta = 0;
1423 
1424 	if (delta < 0)
1425 		die("hm, delta: %Ld < 0 ?\n", delta);
1426 
1427 
1428 	sched_out = perf_session__findnew(session, switch_event->prev_pid);
1429 	sched_in = perf_session__findnew(session, switch_event->next_pid);
1430 
1431 	curr_thread[this_cpu] = sched_in;
1432 
1433 	printf("  ");
1434 
1435 	new_shortname = 0;
1436 	if (!sched_in->shortname[0]) {
1437 		sched_in->shortname[0] = next_shortname1;
1438 		sched_in->shortname[1] = next_shortname2;
1439 
1440 		if (next_shortname1 < 'Z') {
1441 			next_shortname1++;
1442 		} else {
1443 			next_shortname1='A';
1444 			if (next_shortname2 < '9') {
1445 				next_shortname2++;
1446 			} else {
1447 				next_shortname2='0';
1448 			}
1449 		}
1450 		new_shortname = 1;
1451 	}
1452 
1453 	for (cpu = 0; cpu <= max_cpu; cpu++) {
1454 		if (cpu != this_cpu)
1455 			printf(" ");
1456 		else
1457 			printf("*");
1458 
1459 		if (curr_thread[cpu]) {
1460 			if (curr_thread[cpu]->pid)
1461 				printf("%2s ", curr_thread[cpu]->shortname);
1462 			else
1463 				printf(".  ");
1464 		} else
1465 			printf("   ");
1466 	}
1467 
1468 	printf("  %12.6f secs ", (double)timestamp/1e9);
1469 	if (new_shortname) {
1470 		printf("%s => %s:%d\n",
1471 			sched_in->shortname, sched_in->comm, sched_in->pid);
1472 	} else {
1473 		printf("\n");
1474 	}
1475 }
1476 
1477 
1478 static void
1479 process_sched_switch_event(void *data, struct perf_session *session,
1480 			   struct event *event,
1481 			   int this_cpu,
1482 			   u64 timestamp __used,
1483 			   struct thread *thread __used)
1484 {
1485 	struct trace_switch_event switch_event;
1486 
1487 	FILL_COMMON_FIELDS(switch_event, event, data);
1488 
1489 	FILL_ARRAY(switch_event, prev_comm, event, data);
1490 	FILL_FIELD(switch_event, prev_pid, event, data);
1491 	FILL_FIELD(switch_event, prev_prio, event, data);
1492 	FILL_FIELD(switch_event, prev_state, event, data);
1493 	FILL_ARRAY(switch_event, next_comm, event, data);
1494 	FILL_FIELD(switch_event, next_pid, event, data);
1495 	FILL_FIELD(switch_event, next_prio, event, data);
1496 
1497 	if (curr_pid[this_cpu] != (u32)-1) {
1498 		/*
1499 		 * Are we trying to switch away a PID that is
1500 		 * not current?
1501 		 */
1502 		if (curr_pid[this_cpu] != switch_event.prev_pid)
1503 			nr_context_switch_bugs++;
1504 	}
1505 	if (trace_handler->switch_event)
1506 		trace_handler->switch_event(&switch_event, session, event,
1507 					    this_cpu, timestamp, thread);
1508 
1509 	curr_pid[this_cpu] = switch_event.next_pid;
1510 }
1511 
1512 static void
1513 process_sched_runtime_event(void *data, struct perf_session *session,
1514 			   struct event *event,
1515 			   int cpu __used,
1516 			   u64 timestamp __used,
1517 			   struct thread *thread __used)
1518 {
1519 	struct trace_runtime_event runtime_event;
1520 
1521 	FILL_ARRAY(runtime_event, comm, event, data);
1522 	FILL_FIELD(runtime_event, pid, event, data);
1523 	FILL_FIELD(runtime_event, runtime, event, data);
1524 	FILL_FIELD(runtime_event, vruntime, event, data);
1525 
1526 	if (trace_handler->runtime_event)
1527 		trace_handler->runtime_event(&runtime_event, session, event, cpu, timestamp, thread);
1528 }
1529 
1530 static void
1531 process_sched_fork_event(void *data,
1532 			 struct event *event,
1533 			 int cpu __used,
1534 			 u64 timestamp __used,
1535 			 struct thread *thread __used)
1536 {
1537 	struct trace_fork_event fork_event;
1538 
1539 	FILL_COMMON_FIELDS(fork_event, event, data);
1540 
1541 	FILL_ARRAY(fork_event, parent_comm, event, data);
1542 	FILL_FIELD(fork_event, parent_pid, event, data);
1543 	FILL_ARRAY(fork_event, child_comm, event, data);
1544 	FILL_FIELD(fork_event, child_pid, event, data);
1545 
1546 	if (trace_handler->fork_event)
1547 		trace_handler->fork_event(&fork_event, event,
1548 					  cpu, timestamp, thread);
1549 }
1550 
1551 static void
1552 process_sched_exit_event(struct event *event,
1553 			 int cpu __used,
1554 			 u64 timestamp __used,
1555 			 struct thread *thread __used)
1556 {
1557 	if (verbose)
1558 		printf("sched_exit event %p\n", event);
1559 }
1560 
1561 static void
1562 process_sched_migrate_task_event(void *data, struct perf_session *session,
1563 			   struct event *event,
1564 			   int cpu __used,
1565 			   u64 timestamp __used,
1566 			   struct thread *thread __used)
1567 {
1568 	struct trace_migrate_task_event migrate_task_event;
1569 
1570 	FILL_COMMON_FIELDS(migrate_task_event, event, data);
1571 
1572 	FILL_ARRAY(migrate_task_event, comm, event, data);
1573 	FILL_FIELD(migrate_task_event, pid, event, data);
1574 	FILL_FIELD(migrate_task_event, prio, event, data);
1575 	FILL_FIELD(migrate_task_event, cpu, event, data);
1576 
1577 	if (trace_handler->migrate_task_event)
1578 		trace_handler->migrate_task_event(&migrate_task_event, session,
1579 						 event, cpu, timestamp, thread);
1580 }
1581 
1582 static void
1583 process_raw_event(event_t *raw_event __used, struct perf_session *session,
1584 		  void *data, int cpu, u64 timestamp, struct thread *thread)
1585 {
1586 	struct event *event;
1587 	int type;
1588 
1589 
1590 	type = trace_parse_common_type(data);
1591 	event = trace_find_event(type);
1592 
1593 	if (!strcmp(event->name, "sched_switch"))
1594 		process_sched_switch_event(data, session, event, cpu, timestamp, thread);
1595 	if (!strcmp(event->name, "sched_stat_runtime"))
1596 		process_sched_runtime_event(data, session, event, cpu, timestamp, thread);
1597 	if (!strcmp(event->name, "sched_wakeup"))
1598 		process_sched_wakeup_event(data, session, event, cpu, timestamp, thread);
1599 	if (!strcmp(event->name, "sched_wakeup_new"))
1600 		process_sched_wakeup_event(data, session, event, cpu, timestamp, thread);
1601 	if (!strcmp(event->name, "sched_process_fork"))
1602 		process_sched_fork_event(data, event, cpu, timestamp, thread);
1603 	if (!strcmp(event->name, "sched_process_exit"))
1604 		process_sched_exit_event(event, cpu, timestamp, thread);
1605 	if (!strcmp(event->name, "sched_migrate_task"))
1606 		process_sched_migrate_task_event(data, session, event, cpu, timestamp, thread);
1607 }
1608 
1609 static int process_sample_event(event_t *event, struct sample_data *sample,
1610 				struct perf_session *session)
1611 {
1612 	struct thread *thread;
1613 
1614 	if (!(session->sample_type & PERF_SAMPLE_RAW))
1615 		return 0;
1616 
1617 	thread = perf_session__findnew(session, sample->pid);
1618 	if (thread == NULL) {
1619 		pr_debug("problem processing %d event, skipping it.\n",
1620 			 event->header.type);
1621 		return -1;
1622 	}
1623 
1624 	dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
1625 
1626 	if (profile_cpu != -1 && profile_cpu != (int)sample->cpu)
1627 		return 0;
1628 
1629 	process_raw_event(event, session, sample->raw_data, sample->cpu,
1630 			  sample->time, thread);
1631 
1632 	return 0;
1633 }
1634 
1635 static struct perf_event_ops event_ops = {
1636 	.sample			= process_sample_event,
1637 	.comm			= event__process_comm,
1638 	.lost			= event__process_lost,
1639 	.fork			= event__process_task,
1640 	.ordered_samples	= true,
1641 };
1642 
1643 static int read_events(void)
1644 {
1645 	int err = -EINVAL;
1646 	struct perf_session *session = perf_session__new(input_name, O_RDONLY,
1647 							 0, false, &event_ops);
1648 	if (session == NULL)
1649 		return -ENOMEM;
1650 
1651 	if (perf_session__has_traces(session, "record -R")) {
1652 		err = perf_session__process_events(session, &event_ops);
1653 		nr_events      = session->hists.stats.nr_events[0];
1654 		nr_lost_events = session->hists.stats.total_lost;
1655 		nr_lost_chunks = session->hists.stats.nr_events[PERF_RECORD_LOST];
1656 	}
1657 
1658 	perf_session__delete(session);
1659 	return err;
1660 }
1661 
1662 static void print_bad_events(void)
1663 {
1664 	if (nr_unordered_timestamps && nr_timestamps) {
1665 		printf("  INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
1666 			(double)nr_unordered_timestamps/(double)nr_timestamps*100.0,
1667 			nr_unordered_timestamps, nr_timestamps);
1668 	}
1669 	if (nr_lost_events && nr_events) {
1670 		printf("  INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
1671 			(double)nr_lost_events/(double)nr_events*100.0,
1672 			nr_lost_events, nr_events, nr_lost_chunks);
1673 	}
1674 	if (nr_state_machine_bugs && nr_timestamps) {
1675 		printf("  INFO: %.3f%% state machine bugs (%ld out of %ld)",
1676 			(double)nr_state_machine_bugs/(double)nr_timestamps*100.0,
1677 			nr_state_machine_bugs, nr_timestamps);
1678 		if (nr_lost_events)
1679 			printf(" (due to lost events?)");
1680 		printf("\n");
1681 	}
1682 	if (nr_context_switch_bugs && nr_timestamps) {
1683 		printf("  INFO: %.3f%% context switch bugs (%ld out of %ld)",
1684 			(double)nr_context_switch_bugs/(double)nr_timestamps*100.0,
1685 			nr_context_switch_bugs, nr_timestamps);
1686 		if (nr_lost_events)
1687 			printf(" (due to lost events?)");
1688 		printf("\n");
1689 	}
1690 }
1691 
1692 static void __cmd_lat(void)
1693 {
1694 	struct rb_node *next;
1695 
1696 	setup_pager();
1697 	read_events();
1698 	sort_lat();
1699 
1700 	printf("\n ---------------------------------------------------------------------------------------------------------------\n");
1701 	printf("  Task                  |   Runtime ms  | Switches | Average delay ms | Maximum delay ms | Maximum delay at     |\n");
1702 	printf(" ---------------------------------------------------------------------------------------------------------------\n");
1703 
1704 	next = rb_first(&sorted_atom_root);
1705 
1706 	while (next) {
1707 		struct work_atoms *work_list;
1708 
1709 		work_list = rb_entry(next, struct work_atoms, node);
1710 		output_lat_thread(work_list);
1711 		next = rb_next(next);
1712 	}
1713 
1714 	printf(" -----------------------------------------------------------------------------------------\n");
1715 	printf("  TOTAL:                |%11.3f ms |%9Ld |\n",
1716 		(double)all_runtime/1e6, all_count);
1717 
1718 	printf(" ---------------------------------------------------\n");
1719 
1720 	print_bad_events();
1721 	printf("\n");
1722 
1723 }
1724 
1725 static struct trace_sched_handler map_ops  = {
1726 	.wakeup_event		= NULL,
1727 	.switch_event		= map_switch_event,
1728 	.runtime_event		= NULL,
1729 	.fork_event		= NULL,
1730 };
1731 
1732 static void __cmd_map(void)
1733 {
1734 	max_cpu = sysconf(_SC_NPROCESSORS_CONF);
1735 
1736 	setup_pager();
1737 	read_events();
1738 	print_bad_events();
1739 }
1740 
1741 static void __cmd_replay(void)
1742 {
1743 	unsigned long i;
1744 
1745 	calibrate_run_measurement_overhead();
1746 	calibrate_sleep_measurement_overhead();
1747 
1748 	test_calibrations();
1749 
1750 	read_events();
1751 
1752 	printf("nr_run_events:        %ld\n", nr_run_events);
1753 	printf("nr_sleep_events:      %ld\n", nr_sleep_events);
1754 	printf("nr_wakeup_events:     %ld\n", nr_wakeup_events);
1755 
1756 	if (targetless_wakeups)
1757 		printf("target-less wakeups:  %ld\n", targetless_wakeups);
1758 	if (multitarget_wakeups)
1759 		printf("multi-target wakeups: %ld\n", multitarget_wakeups);
1760 	if (nr_run_events_optimized)
1761 		printf("run atoms optimized: %ld\n",
1762 			nr_run_events_optimized);
1763 
1764 	print_task_traces();
1765 	add_cross_task_wakeups();
1766 
1767 	create_tasks();
1768 	printf("------------------------------------------------------------\n");
1769 	for (i = 0; i < replay_repeat; i++)
1770 		run_one_test();
1771 }
1772 
1773 
1774 static const char * const sched_usage[] = {
1775 	"perf sched [<options>] {record|latency|map|replay|trace}",
1776 	NULL
1777 };
1778 
1779 static const struct option sched_options[] = {
1780 	OPT_STRING('i', "input", &input_name, "file",
1781 		    "input file name"),
1782 	OPT_INCR('v', "verbose", &verbose,
1783 		    "be more verbose (show symbol address, etc)"),
1784 	OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1785 		    "dump raw trace in ASCII"),
1786 	OPT_END()
1787 };
1788 
1789 static const char * const latency_usage[] = {
1790 	"perf sched latency [<options>]",
1791 	NULL
1792 };
1793 
1794 static const struct option latency_options[] = {
1795 	OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
1796 		   "sort by key(s): runtime, switch, avg, max"),
1797 	OPT_INCR('v', "verbose", &verbose,
1798 		    "be more verbose (show symbol address, etc)"),
1799 	OPT_INTEGER('C', "CPU", &profile_cpu,
1800 		    "CPU to profile on"),
1801 	OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1802 		    "dump raw trace in ASCII"),
1803 	OPT_END()
1804 };
1805 
1806 static const char * const replay_usage[] = {
1807 	"perf sched replay [<options>]",
1808 	NULL
1809 };
1810 
1811 static const struct option replay_options[] = {
1812 	OPT_UINTEGER('r', "repeat", &replay_repeat,
1813 		     "repeat the workload replay N times (-1: infinite)"),
1814 	OPT_INCR('v', "verbose", &verbose,
1815 		    "be more verbose (show symbol address, etc)"),
1816 	OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1817 		    "dump raw trace in ASCII"),
1818 	OPT_END()
1819 };
1820 
1821 static void setup_sorting(void)
1822 {
1823 	char *tmp, *tok, *str = strdup(sort_order);
1824 
1825 	for (tok = strtok_r(str, ", ", &tmp);
1826 			tok; tok = strtok_r(NULL, ", ", &tmp)) {
1827 		if (sort_dimension__add(tok, &sort_list) < 0) {
1828 			error("Unknown --sort key: `%s'", tok);
1829 			usage_with_options(latency_usage, latency_options);
1830 		}
1831 	}
1832 
1833 	free(str);
1834 
1835 	sort_dimension__add("pid", &cmp_pid);
1836 }
1837 
1838 static const char *record_args[] = {
1839 	"record",
1840 	"-a",
1841 	"-R",
1842 	"-f",
1843 	"-m", "1024",
1844 	"-c", "1",
1845 	"-e", "sched:sched_switch:r",
1846 	"-e", "sched:sched_stat_wait:r",
1847 	"-e", "sched:sched_stat_sleep:r",
1848 	"-e", "sched:sched_stat_iowait:r",
1849 	"-e", "sched:sched_stat_runtime:r",
1850 	"-e", "sched:sched_process_exit:r",
1851 	"-e", "sched:sched_process_fork:r",
1852 	"-e", "sched:sched_wakeup:r",
1853 	"-e", "sched:sched_migrate_task:r",
1854 };
1855 
1856 static int __cmd_record(int argc, const char **argv)
1857 {
1858 	unsigned int rec_argc, i, j;
1859 	const char **rec_argv;
1860 
1861 	rec_argc = ARRAY_SIZE(record_args) + argc - 1;
1862 	rec_argv = calloc(rec_argc + 1, sizeof(char *));
1863 
1864 	if (rec_argv)
1865 		return -ENOMEM;
1866 
1867 	for (i = 0; i < ARRAY_SIZE(record_args); i++)
1868 		rec_argv[i] = strdup(record_args[i]);
1869 
1870 	for (j = 1; j < (unsigned int)argc; j++, i++)
1871 		rec_argv[i] = argv[j];
1872 
1873 	BUG_ON(i != rec_argc);
1874 
1875 	return cmd_record(i, rec_argv, NULL);
1876 }
1877 
1878 int cmd_sched(int argc, const char **argv, const char *prefix __used)
1879 {
1880 	argc = parse_options(argc, argv, sched_options, sched_usage,
1881 			     PARSE_OPT_STOP_AT_NON_OPTION);
1882 	if (!argc)
1883 		usage_with_options(sched_usage, sched_options);
1884 
1885 	/*
1886 	 * Aliased to 'perf script' for now:
1887 	 */
1888 	if (!strcmp(argv[0], "script"))
1889 		return cmd_script(argc, argv, prefix);
1890 
1891 	symbol__init();
1892 	if (!strncmp(argv[0], "rec", 3)) {
1893 		return __cmd_record(argc, argv);
1894 	} else if (!strncmp(argv[0], "lat", 3)) {
1895 		trace_handler = &lat_ops;
1896 		if (argc > 1) {
1897 			argc = parse_options(argc, argv, latency_options, latency_usage, 0);
1898 			if (argc)
1899 				usage_with_options(latency_usage, latency_options);
1900 		}
1901 		setup_sorting();
1902 		__cmd_lat();
1903 	} else if (!strcmp(argv[0], "map")) {
1904 		trace_handler = &map_ops;
1905 		setup_sorting();
1906 		__cmd_map();
1907 	} else if (!strncmp(argv[0], "rep", 3)) {
1908 		trace_handler = &replay_ops;
1909 		if (argc) {
1910 			argc = parse_options(argc, argv, replay_options, replay_usage, 0);
1911 			if (argc)
1912 				usage_with_options(replay_usage, replay_options);
1913 		}
1914 		__cmd_replay();
1915 	} else {
1916 		usage_with_options(sched_usage, sched_options);
1917 	}
1918 
1919 	return 0;
1920 }
1921