xref: /openbmc/linux/tools/perf/builtin-sched.c (revision d0b73b48)
1 #include "builtin.h"
2 #include "perf.h"
3 
4 #include "util/util.h"
5 #include "util/evlist.h"
6 #include "util/cache.h"
7 #include "util/evsel.h"
8 #include "util/symbol.h"
9 #include "util/thread.h"
10 #include "util/header.h"
11 #include "util/session.h"
12 #include "util/tool.h"
13 
14 #include "util/parse-options.h"
15 #include "util/trace-event.h"
16 
17 #include "util/debug.h"
18 
19 #include <sys/prctl.h>
20 #include <sys/resource.h>
21 
22 #include <semaphore.h>
23 #include <pthread.h>
24 #include <math.h>
25 
26 #define PR_SET_NAME		15               /* Set process name */
27 #define MAX_CPUS		4096
28 #define COMM_LEN		20
29 #define SYM_LEN			129
30 #define MAX_PID			65536
31 
32 struct sched_atom;
33 
34 struct task_desc {
35 	unsigned long		nr;
36 	unsigned long		pid;
37 	char			comm[COMM_LEN];
38 
39 	unsigned long		nr_events;
40 	unsigned long		curr_event;
41 	struct sched_atom	**atoms;
42 
43 	pthread_t		thread;
44 	sem_t			sleep_sem;
45 
46 	sem_t			ready_for_work;
47 	sem_t			work_done_sem;
48 
49 	u64			cpu_usage;
50 };
51 
52 enum sched_event_type {
53 	SCHED_EVENT_RUN,
54 	SCHED_EVENT_SLEEP,
55 	SCHED_EVENT_WAKEUP,
56 	SCHED_EVENT_MIGRATION,
57 };
58 
59 struct sched_atom {
60 	enum sched_event_type	type;
61 	int			specific_wait;
62 	u64			timestamp;
63 	u64			duration;
64 	unsigned long		nr;
65 	sem_t			*wait_sem;
66 	struct task_desc	*wakee;
67 };
68 
69 #define TASK_STATE_TO_CHAR_STR "RSDTtZX"
70 
71 enum thread_state {
72 	THREAD_SLEEPING = 0,
73 	THREAD_WAIT_CPU,
74 	THREAD_SCHED_IN,
75 	THREAD_IGNORE
76 };
77 
78 struct work_atom {
79 	struct list_head	list;
80 	enum thread_state	state;
81 	u64			sched_out_time;
82 	u64			wake_up_time;
83 	u64			sched_in_time;
84 	u64			runtime;
85 };
86 
87 struct work_atoms {
88 	struct list_head	work_list;
89 	struct thread		*thread;
90 	struct rb_node		node;
91 	u64			max_lat;
92 	u64			max_lat_at;
93 	u64			total_lat;
94 	u64			nb_atoms;
95 	u64			total_runtime;
96 };
97 
98 typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
99 
100 struct perf_sched;
101 
102 struct trace_sched_handler {
103 	int (*switch_event)(struct perf_sched *sched, struct perf_evsel *evsel,
104 			    struct perf_sample *sample, struct machine *machine);
105 
106 	int (*runtime_event)(struct perf_sched *sched, struct perf_evsel *evsel,
107 			     struct perf_sample *sample, struct machine *machine);
108 
109 	int (*wakeup_event)(struct perf_sched *sched, struct perf_evsel *evsel,
110 			    struct perf_sample *sample, struct machine *machine);
111 
112 	int (*fork_event)(struct perf_sched *sched, struct perf_evsel *evsel,
113 			  struct perf_sample *sample);
114 
115 	int (*migrate_task_event)(struct perf_sched *sched,
116 				  struct perf_evsel *evsel,
117 				  struct perf_sample *sample,
118 				  struct machine *machine);
119 };
120 
121 struct perf_sched {
122 	struct perf_tool tool;
123 	const char	 *sort_order;
124 	unsigned long	 nr_tasks;
125 	struct task_desc *pid_to_task[MAX_PID];
126 	struct task_desc **tasks;
127 	const struct trace_sched_handler *tp_handler;
128 	pthread_mutex_t	 start_work_mutex;
129 	pthread_mutex_t	 work_done_wait_mutex;
130 	int		 profile_cpu;
131 /*
132  * Track the current task - that way we can know whether there's any
133  * weird events, such as a task being switched away that is not current.
134  */
135 	int		 max_cpu;
136 	u32		 curr_pid[MAX_CPUS];
137 	struct thread	 *curr_thread[MAX_CPUS];
138 	char		 next_shortname1;
139 	char		 next_shortname2;
140 	unsigned int	 replay_repeat;
141 	unsigned long	 nr_run_events;
142 	unsigned long	 nr_sleep_events;
143 	unsigned long	 nr_wakeup_events;
144 	unsigned long	 nr_sleep_corrections;
145 	unsigned long	 nr_run_events_optimized;
146 	unsigned long	 targetless_wakeups;
147 	unsigned long	 multitarget_wakeups;
148 	unsigned long	 nr_runs;
149 	unsigned long	 nr_timestamps;
150 	unsigned long	 nr_unordered_timestamps;
151 	unsigned long	 nr_state_machine_bugs;
152 	unsigned long	 nr_context_switch_bugs;
153 	unsigned long	 nr_events;
154 	unsigned long	 nr_lost_chunks;
155 	unsigned long	 nr_lost_events;
156 	u64		 run_measurement_overhead;
157 	u64		 sleep_measurement_overhead;
158 	u64		 start_time;
159 	u64		 cpu_usage;
160 	u64		 runavg_cpu_usage;
161 	u64		 parent_cpu_usage;
162 	u64		 runavg_parent_cpu_usage;
163 	u64		 sum_runtime;
164 	u64		 sum_fluct;
165 	u64		 run_avg;
166 	u64		 all_runtime;
167 	u64		 all_count;
168 	u64		 cpu_last_switched[MAX_CPUS];
169 	struct rb_root	 atom_root, sorted_atom_root;
170 	struct list_head sort_list, cmp_pid;
171 };
172 
173 static u64 get_nsecs(void)
174 {
175 	struct timespec ts;
176 
177 	clock_gettime(CLOCK_MONOTONIC, &ts);
178 
179 	return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
180 }
181 
182 static void burn_nsecs(struct perf_sched *sched, u64 nsecs)
183 {
184 	u64 T0 = get_nsecs(), T1;
185 
186 	do {
187 		T1 = get_nsecs();
188 	} while (T1 + sched->run_measurement_overhead < T0 + nsecs);
189 }
190 
191 static void sleep_nsecs(u64 nsecs)
192 {
193 	struct timespec ts;
194 
195 	ts.tv_nsec = nsecs % 999999999;
196 	ts.tv_sec = nsecs / 999999999;
197 
198 	nanosleep(&ts, NULL);
199 }
200 
201 static void calibrate_run_measurement_overhead(struct perf_sched *sched)
202 {
203 	u64 T0, T1, delta, min_delta = 1000000000ULL;
204 	int i;
205 
206 	for (i = 0; i < 10; i++) {
207 		T0 = get_nsecs();
208 		burn_nsecs(sched, 0);
209 		T1 = get_nsecs();
210 		delta = T1-T0;
211 		min_delta = min(min_delta, delta);
212 	}
213 	sched->run_measurement_overhead = min_delta;
214 
215 	printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta);
216 }
217 
218 static void calibrate_sleep_measurement_overhead(struct perf_sched *sched)
219 {
220 	u64 T0, T1, delta, min_delta = 1000000000ULL;
221 	int i;
222 
223 	for (i = 0; i < 10; i++) {
224 		T0 = get_nsecs();
225 		sleep_nsecs(10000);
226 		T1 = get_nsecs();
227 		delta = T1-T0;
228 		min_delta = min(min_delta, delta);
229 	}
230 	min_delta -= 10000;
231 	sched->sleep_measurement_overhead = min_delta;
232 
233 	printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta);
234 }
235 
236 static struct sched_atom *
237 get_new_event(struct task_desc *task, u64 timestamp)
238 {
239 	struct sched_atom *event = zalloc(sizeof(*event));
240 	unsigned long idx = task->nr_events;
241 	size_t size;
242 
243 	event->timestamp = timestamp;
244 	event->nr = idx;
245 
246 	task->nr_events++;
247 	size = sizeof(struct sched_atom *) * task->nr_events;
248 	task->atoms = realloc(task->atoms, size);
249 	BUG_ON(!task->atoms);
250 
251 	task->atoms[idx] = event;
252 
253 	return event;
254 }
255 
256 static struct sched_atom *last_event(struct task_desc *task)
257 {
258 	if (!task->nr_events)
259 		return NULL;
260 
261 	return task->atoms[task->nr_events - 1];
262 }
263 
264 static void add_sched_event_run(struct perf_sched *sched, struct task_desc *task,
265 				u64 timestamp, u64 duration)
266 {
267 	struct sched_atom *event, *curr_event = last_event(task);
268 
269 	/*
270 	 * optimize an existing RUN event by merging this one
271 	 * to it:
272 	 */
273 	if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
274 		sched->nr_run_events_optimized++;
275 		curr_event->duration += duration;
276 		return;
277 	}
278 
279 	event = get_new_event(task, timestamp);
280 
281 	event->type = SCHED_EVENT_RUN;
282 	event->duration = duration;
283 
284 	sched->nr_run_events++;
285 }
286 
287 static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *task,
288 				   u64 timestamp, struct task_desc *wakee)
289 {
290 	struct sched_atom *event, *wakee_event;
291 
292 	event = get_new_event(task, timestamp);
293 	event->type = SCHED_EVENT_WAKEUP;
294 	event->wakee = wakee;
295 
296 	wakee_event = last_event(wakee);
297 	if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
298 		sched->targetless_wakeups++;
299 		return;
300 	}
301 	if (wakee_event->wait_sem) {
302 		sched->multitarget_wakeups++;
303 		return;
304 	}
305 
306 	wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem));
307 	sem_init(wakee_event->wait_sem, 0, 0);
308 	wakee_event->specific_wait = 1;
309 	event->wait_sem = wakee_event->wait_sem;
310 
311 	sched->nr_wakeup_events++;
312 }
313 
314 static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task,
315 				  u64 timestamp, u64 task_state __maybe_unused)
316 {
317 	struct sched_atom *event = get_new_event(task, timestamp);
318 
319 	event->type = SCHED_EVENT_SLEEP;
320 
321 	sched->nr_sleep_events++;
322 }
323 
324 static struct task_desc *register_pid(struct perf_sched *sched,
325 				      unsigned long pid, const char *comm)
326 {
327 	struct task_desc *task;
328 
329 	BUG_ON(pid >= MAX_PID);
330 
331 	task = sched->pid_to_task[pid];
332 
333 	if (task)
334 		return task;
335 
336 	task = zalloc(sizeof(*task));
337 	task->pid = pid;
338 	task->nr = sched->nr_tasks;
339 	strcpy(task->comm, comm);
340 	/*
341 	 * every task starts in sleeping state - this gets ignored
342 	 * if there's no wakeup pointing to this sleep state:
343 	 */
344 	add_sched_event_sleep(sched, task, 0, 0);
345 
346 	sched->pid_to_task[pid] = task;
347 	sched->nr_tasks++;
348 	sched->tasks = realloc(sched->tasks, sched->nr_tasks * sizeof(struct task_task *));
349 	BUG_ON(!sched->tasks);
350 	sched->tasks[task->nr] = task;
351 
352 	if (verbose)
353 		printf("registered task #%ld, PID %ld (%s)\n", sched->nr_tasks, pid, comm);
354 
355 	return task;
356 }
357 
358 
359 static void print_task_traces(struct perf_sched *sched)
360 {
361 	struct task_desc *task;
362 	unsigned long i;
363 
364 	for (i = 0; i < sched->nr_tasks; i++) {
365 		task = sched->tasks[i];
366 		printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
367 			task->nr, task->comm, task->pid, task->nr_events);
368 	}
369 }
370 
371 static void add_cross_task_wakeups(struct perf_sched *sched)
372 {
373 	struct task_desc *task1, *task2;
374 	unsigned long i, j;
375 
376 	for (i = 0; i < sched->nr_tasks; i++) {
377 		task1 = sched->tasks[i];
378 		j = i + 1;
379 		if (j == sched->nr_tasks)
380 			j = 0;
381 		task2 = sched->tasks[j];
382 		add_sched_event_wakeup(sched, task1, 0, task2);
383 	}
384 }
385 
386 static void perf_sched__process_event(struct perf_sched *sched,
387 				      struct sched_atom *atom)
388 {
389 	int ret = 0;
390 
391 	switch (atom->type) {
392 		case SCHED_EVENT_RUN:
393 			burn_nsecs(sched, atom->duration);
394 			break;
395 		case SCHED_EVENT_SLEEP:
396 			if (atom->wait_sem)
397 				ret = sem_wait(atom->wait_sem);
398 			BUG_ON(ret);
399 			break;
400 		case SCHED_EVENT_WAKEUP:
401 			if (atom->wait_sem)
402 				ret = sem_post(atom->wait_sem);
403 			BUG_ON(ret);
404 			break;
405 		case SCHED_EVENT_MIGRATION:
406 			break;
407 		default:
408 			BUG_ON(1);
409 	}
410 }
411 
412 static u64 get_cpu_usage_nsec_parent(void)
413 {
414 	struct rusage ru;
415 	u64 sum;
416 	int err;
417 
418 	err = getrusage(RUSAGE_SELF, &ru);
419 	BUG_ON(err);
420 
421 	sum =  ru.ru_utime.tv_sec*1e9 + ru.ru_utime.tv_usec*1e3;
422 	sum += ru.ru_stime.tv_sec*1e9 + ru.ru_stime.tv_usec*1e3;
423 
424 	return sum;
425 }
426 
427 static int self_open_counters(void)
428 {
429 	struct perf_event_attr attr;
430 	int fd;
431 
432 	memset(&attr, 0, sizeof(attr));
433 
434 	attr.type = PERF_TYPE_SOFTWARE;
435 	attr.config = PERF_COUNT_SW_TASK_CLOCK;
436 
437 	fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
438 
439 	if (fd < 0)
440 		pr_err("Error: sys_perf_event_open() syscall returned "
441 		       "with %d (%s)\n", fd, strerror(errno));
442 	return fd;
443 }
444 
445 static u64 get_cpu_usage_nsec_self(int fd)
446 {
447 	u64 runtime;
448 	int ret;
449 
450 	ret = read(fd, &runtime, sizeof(runtime));
451 	BUG_ON(ret != sizeof(runtime));
452 
453 	return runtime;
454 }
455 
456 struct sched_thread_parms {
457 	struct task_desc  *task;
458 	struct perf_sched *sched;
459 };
460 
461 static void *thread_func(void *ctx)
462 {
463 	struct sched_thread_parms *parms = ctx;
464 	struct task_desc *this_task = parms->task;
465 	struct perf_sched *sched = parms->sched;
466 	u64 cpu_usage_0, cpu_usage_1;
467 	unsigned long i, ret;
468 	char comm2[22];
469 	int fd;
470 
471 	free(parms);
472 
473 	sprintf(comm2, ":%s", this_task->comm);
474 	prctl(PR_SET_NAME, comm2);
475 	fd = self_open_counters();
476 	if (fd < 0)
477 		return NULL;
478 again:
479 	ret = sem_post(&this_task->ready_for_work);
480 	BUG_ON(ret);
481 	ret = pthread_mutex_lock(&sched->start_work_mutex);
482 	BUG_ON(ret);
483 	ret = pthread_mutex_unlock(&sched->start_work_mutex);
484 	BUG_ON(ret);
485 
486 	cpu_usage_0 = get_cpu_usage_nsec_self(fd);
487 
488 	for (i = 0; i < this_task->nr_events; i++) {
489 		this_task->curr_event = i;
490 		perf_sched__process_event(sched, this_task->atoms[i]);
491 	}
492 
493 	cpu_usage_1 = get_cpu_usage_nsec_self(fd);
494 	this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
495 	ret = sem_post(&this_task->work_done_sem);
496 	BUG_ON(ret);
497 
498 	ret = pthread_mutex_lock(&sched->work_done_wait_mutex);
499 	BUG_ON(ret);
500 	ret = pthread_mutex_unlock(&sched->work_done_wait_mutex);
501 	BUG_ON(ret);
502 
503 	goto again;
504 }
505 
506 static void create_tasks(struct perf_sched *sched)
507 {
508 	struct task_desc *task;
509 	pthread_attr_t attr;
510 	unsigned long i;
511 	int err;
512 
513 	err = pthread_attr_init(&attr);
514 	BUG_ON(err);
515 	err = pthread_attr_setstacksize(&attr,
516 			(size_t) max(16 * 1024, PTHREAD_STACK_MIN));
517 	BUG_ON(err);
518 	err = pthread_mutex_lock(&sched->start_work_mutex);
519 	BUG_ON(err);
520 	err = pthread_mutex_lock(&sched->work_done_wait_mutex);
521 	BUG_ON(err);
522 	for (i = 0; i < sched->nr_tasks; i++) {
523 		struct sched_thread_parms *parms = malloc(sizeof(*parms));
524 		BUG_ON(parms == NULL);
525 		parms->task = task = sched->tasks[i];
526 		parms->sched = sched;
527 		sem_init(&task->sleep_sem, 0, 0);
528 		sem_init(&task->ready_for_work, 0, 0);
529 		sem_init(&task->work_done_sem, 0, 0);
530 		task->curr_event = 0;
531 		err = pthread_create(&task->thread, &attr, thread_func, parms);
532 		BUG_ON(err);
533 	}
534 }
535 
536 static void wait_for_tasks(struct perf_sched *sched)
537 {
538 	u64 cpu_usage_0, cpu_usage_1;
539 	struct task_desc *task;
540 	unsigned long i, ret;
541 
542 	sched->start_time = get_nsecs();
543 	sched->cpu_usage = 0;
544 	pthread_mutex_unlock(&sched->work_done_wait_mutex);
545 
546 	for (i = 0; i < sched->nr_tasks; i++) {
547 		task = sched->tasks[i];
548 		ret = sem_wait(&task->ready_for_work);
549 		BUG_ON(ret);
550 		sem_init(&task->ready_for_work, 0, 0);
551 	}
552 	ret = pthread_mutex_lock(&sched->work_done_wait_mutex);
553 	BUG_ON(ret);
554 
555 	cpu_usage_0 = get_cpu_usage_nsec_parent();
556 
557 	pthread_mutex_unlock(&sched->start_work_mutex);
558 
559 	for (i = 0; i < sched->nr_tasks; i++) {
560 		task = sched->tasks[i];
561 		ret = sem_wait(&task->work_done_sem);
562 		BUG_ON(ret);
563 		sem_init(&task->work_done_sem, 0, 0);
564 		sched->cpu_usage += task->cpu_usage;
565 		task->cpu_usage = 0;
566 	}
567 
568 	cpu_usage_1 = get_cpu_usage_nsec_parent();
569 	if (!sched->runavg_cpu_usage)
570 		sched->runavg_cpu_usage = sched->cpu_usage;
571 	sched->runavg_cpu_usage = (sched->runavg_cpu_usage * 9 + sched->cpu_usage) / 10;
572 
573 	sched->parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
574 	if (!sched->runavg_parent_cpu_usage)
575 		sched->runavg_parent_cpu_usage = sched->parent_cpu_usage;
576 	sched->runavg_parent_cpu_usage = (sched->runavg_parent_cpu_usage * 9 +
577 					 sched->parent_cpu_usage)/10;
578 
579 	ret = pthread_mutex_lock(&sched->start_work_mutex);
580 	BUG_ON(ret);
581 
582 	for (i = 0; i < sched->nr_tasks; i++) {
583 		task = sched->tasks[i];
584 		sem_init(&task->sleep_sem, 0, 0);
585 		task->curr_event = 0;
586 	}
587 }
588 
589 static void run_one_test(struct perf_sched *sched)
590 {
591 	u64 T0, T1, delta, avg_delta, fluct;
592 
593 	T0 = get_nsecs();
594 	wait_for_tasks(sched);
595 	T1 = get_nsecs();
596 
597 	delta = T1 - T0;
598 	sched->sum_runtime += delta;
599 	sched->nr_runs++;
600 
601 	avg_delta = sched->sum_runtime / sched->nr_runs;
602 	if (delta < avg_delta)
603 		fluct = avg_delta - delta;
604 	else
605 		fluct = delta - avg_delta;
606 	sched->sum_fluct += fluct;
607 	if (!sched->run_avg)
608 		sched->run_avg = delta;
609 	sched->run_avg = (sched->run_avg * 9 + delta) / 10;
610 
611 	printf("#%-3ld: %0.3f, ", sched->nr_runs, (double)delta / 1000000.0);
612 
613 	printf("ravg: %0.2f, ", (double)sched->run_avg / 1e6);
614 
615 	printf("cpu: %0.2f / %0.2f",
616 		(double)sched->cpu_usage / 1e6, (double)sched->runavg_cpu_usage / 1e6);
617 
618 #if 0
619 	/*
620 	 * rusage statistics done by the parent, these are less
621 	 * accurate than the sched->sum_exec_runtime based statistics:
622 	 */
623 	printf(" [%0.2f / %0.2f]",
624 		(double)sched->parent_cpu_usage/1e6,
625 		(double)sched->runavg_parent_cpu_usage/1e6);
626 #endif
627 
628 	printf("\n");
629 
630 	if (sched->nr_sleep_corrections)
631 		printf(" (%ld sleep corrections)\n", sched->nr_sleep_corrections);
632 	sched->nr_sleep_corrections = 0;
633 }
634 
635 static void test_calibrations(struct perf_sched *sched)
636 {
637 	u64 T0, T1;
638 
639 	T0 = get_nsecs();
640 	burn_nsecs(sched, 1e6);
641 	T1 = get_nsecs();
642 
643 	printf("the run test took %" PRIu64 " nsecs\n", T1 - T0);
644 
645 	T0 = get_nsecs();
646 	sleep_nsecs(1e6);
647 	T1 = get_nsecs();
648 
649 	printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0);
650 }
651 
652 static int
653 replay_wakeup_event(struct perf_sched *sched,
654 		    struct perf_evsel *evsel, struct perf_sample *sample,
655 		    struct machine *machine __maybe_unused)
656 {
657 	const char *comm = perf_evsel__strval(evsel, sample, "comm");
658 	const u32 pid	 = perf_evsel__intval(evsel, sample, "pid");
659 	struct task_desc *waker, *wakee;
660 
661 	if (verbose) {
662 		printf("sched_wakeup event %p\n", evsel);
663 
664 		printf(" ... pid %d woke up %s/%d\n", sample->tid, comm, pid);
665 	}
666 
667 	waker = register_pid(sched, sample->tid, "<unknown>");
668 	wakee = register_pid(sched, pid, comm);
669 
670 	add_sched_event_wakeup(sched, waker, sample->time, wakee);
671 	return 0;
672 }
673 
674 static int replay_switch_event(struct perf_sched *sched,
675 			       struct perf_evsel *evsel,
676 			       struct perf_sample *sample,
677 			       struct machine *machine __maybe_unused)
678 {
679 	const char *prev_comm  = perf_evsel__strval(evsel, sample, "prev_comm"),
680 		   *next_comm  = perf_evsel__strval(evsel, sample, "next_comm");
681 	const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
682 		  next_pid = perf_evsel__intval(evsel, sample, "next_pid");
683 	const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
684 	struct task_desc *prev, __maybe_unused *next;
685 	u64 timestamp0, timestamp = sample->time;
686 	int cpu = sample->cpu;
687 	s64 delta;
688 
689 	if (verbose)
690 		printf("sched_switch event %p\n", evsel);
691 
692 	if (cpu >= MAX_CPUS || cpu < 0)
693 		return 0;
694 
695 	timestamp0 = sched->cpu_last_switched[cpu];
696 	if (timestamp0)
697 		delta = timestamp - timestamp0;
698 	else
699 		delta = 0;
700 
701 	if (delta < 0) {
702 		pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
703 		return -1;
704 	}
705 
706 	pr_debug(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n",
707 		 prev_comm, prev_pid, next_comm, next_pid, delta);
708 
709 	prev = register_pid(sched, prev_pid, prev_comm);
710 	next = register_pid(sched, next_pid, next_comm);
711 
712 	sched->cpu_last_switched[cpu] = timestamp;
713 
714 	add_sched_event_run(sched, prev, timestamp, delta);
715 	add_sched_event_sleep(sched, prev, timestamp, prev_state);
716 
717 	return 0;
718 }
719 
720 static int replay_fork_event(struct perf_sched *sched, struct perf_evsel *evsel,
721 			     struct perf_sample *sample)
722 {
723 	const char *parent_comm = perf_evsel__strval(evsel, sample, "parent_comm"),
724 		   *child_comm  = perf_evsel__strval(evsel, sample, "child_comm");
725 	const u32 parent_pid  = perf_evsel__intval(evsel, sample, "parent_pid"),
726 		  child_pid  = perf_evsel__intval(evsel, sample, "child_pid");
727 
728 	if (verbose) {
729 		printf("sched_fork event %p\n", evsel);
730 		printf("... parent: %s/%d\n", parent_comm, parent_pid);
731 		printf("...  child: %s/%d\n", child_comm, child_pid);
732 	}
733 
734 	register_pid(sched, parent_pid, parent_comm);
735 	register_pid(sched, child_pid, child_comm);
736 	return 0;
737 }
738 
739 struct sort_dimension {
740 	const char		*name;
741 	sort_fn_t		cmp;
742 	struct list_head	list;
743 };
744 
745 static int
746 thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
747 {
748 	struct sort_dimension *sort;
749 	int ret = 0;
750 
751 	BUG_ON(list_empty(list));
752 
753 	list_for_each_entry(sort, list, list) {
754 		ret = sort->cmp(l, r);
755 		if (ret)
756 			return ret;
757 	}
758 
759 	return ret;
760 }
761 
762 static struct work_atoms *
763 thread_atoms_search(struct rb_root *root, struct thread *thread,
764 			 struct list_head *sort_list)
765 {
766 	struct rb_node *node = root->rb_node;
767 	struct work_atoms key = { .thread = thread };
768 
769 	while (node) {
770 		struct work_atoms *atoms;
771 		int cmp;
772 
773 		atoms = container_of(node, struct work_atoms, node);
774 
775 		cmp = thread_lat_cmp(sort_list, &key, atoms);
776 		if (cmp > 0)
777 			node = node->rb_left;
778 		else if (cmp < 0)
779 			node = node->rb_right;
780 		else {
781 			BUG_ON(thread != atoms->thread);
782 			return atoms;
783 		}
784 	}
785 	return NULL;
786 }
787 
788 static void
789 __thread_latency_insert(struct rb_root *root, struct work_atoms *data,
790 			 struct list_head *sort_list)
791 {
792 	struct rb_node **new = &(root->rb_node), *parent = NULL;
793 
794 	while (*new) {
795 		struct work_atoms *this;
796 		int cmp;
797 
798 		this = container_of(*new, struct work_atoms, node);
799 		parent = *new;
800 
801 		cmp = thread_lat_cmp(sort_list, data, this);
802 
803 		if (cmp > 0)
804 			new = &((*new)->rb_left);
805 		else
806 			new = &((*new)->rb_right);
807 	}
808 
809 	rb_link_node(&data->node, parent, new);
810 	rb_insert_color(&data->node, root);
811 }
812 
813 static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread)
814 {
815 	struct work_atoms *atoms = zalloc(sizeof(*atoms));
816 	if (!atoms) {
817 		pr_err("No memory at %s\n", __func__);
818 		return -1;
819 	}
820 
821 	atoms->thread = thread;
822 	INIT_LIST_HEAD(&atoms->work_list);
823 	__thread_latency_insert(&sched->atom_root, atoms, &sched->cmp_pid);
824 	return 0;
825 }
826 
827 static int latency_fork_event(struct perf_sched *sched __maybe_unused,
828 			      struct perf_evsel *evsel __maybe_unused,
829 			      struct perf_sample *sample __maybe_unused)
830 {
831 	/* should insert the newcomer */
832 	return 0;
833 }
834 
835 static char sched_out_state(u64 prev_state)
836 {
837 	const char *str = TASK_STATE_TO_CHAR_STR;
838 
839 	return str[prev_state];
840 }
841 
842 static int
843 add_sched_out_event(struct work_atoms *atoms,
844 		    char run_state,
845 		    u64 timestamp)
846 {
847 	struct work_atom *atom = zalloc(sizeof(*atom));
848 	if (!atom) {
849 		pr_err("Non memory at %s", __func__);
850 		return -1;
851 	}
852 
853 	atom->sched_out_time = timestamp;
854 
855 	if (run_state == 'R') {
856 		atom->state = THREAD_WAIT_CPU;
857 		atom->wake_up_time = atom->sched_out_time;
858 	}
859 
860 	list_add_tail(&atom->list, &atoms->work_list);
861 	return 0;
862 }
863 
864 static void
865 add_runtime_event(struct work_atoms *atoms, u64 delta,
866 		  u64 timestamp __maybe_unused)
867 {
868 	struct work_atom *atom;
869 
870 	BUG_ON(list_empty(&atoms->work_list));
871 
872 	atom = list_entry(atoms->work_list.prev, struct work_atom, list);
873 
874 	atom->runtime += delta;
875 	atoms->total_runtime += delta;
876 }
877 
878 static void
879 add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
880 {
881 	struct work_atom *atom;
882 	u64 delta;
883 
884 	if (list_empty(&atoms->work_list))
885 		return;
886 
887 	atom = list_entry(atoms->work_list.prev, struct work_atom, list);
888 
889 	if (atom->state != THREAD_WAIT_CPU)
890 		return;
891 
892 	if (timestamp < atom->wake_up_time) {
893 		atom->state = THREAD_IGNORE;
894 		return;
895 	}
896 
897 	atom->state = THREAD_SCHED_IN;
898 	atom->sched_in_time = timestamp;
899 
900 	delta = atom->sched_in_time - atom->wake_up_time;
901 	atoms->total_lat += delta;
902 	if (delta > atoms->max_lat) {
903 		atoms->max_lat = delta;
904 		atoms->max_lat_at = timestamp;
905 	}
906 	atoms->nb_atoms++;
907 }
908 
909 static int latency_switch_event(struct perf_sched *sched,
910 				struct perf_evsel *evsel,
911 				struct perf_sample *sample,
912 				struct machine *machine)
913 {
914 	const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
915 		  next_pid = perf_evsel__intval(evsel, sample, "next_pid");
916 	const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
917 	struct work_atoms *out_events, *in_events;
918 	struct thread *sched_out, *sched_in;
919 	u64 timestamp0, timestamp = sample->time;
920 	int cpu = sample->cpu;
921 	s64 delta;
922 
923 	BUG_ON(cpu >= MAX_CPUS || cpu < 0);
924 
925 	timestamp0 = sched->cpu_last_switched[cpu];
926 	sched->cpu_last_switched[cpu] = timestamp;
927 	if (timestamp0)
928 		delta = timestamp - timestamp0;
929 	else
930 		delta = 0;
931 
932 	if (delta < 0) {
933 		pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
934 		return -1;
935 	}
936 
937 	sched_out = machine__findnew_thread(machine, prev_pid);
938 	sched_in = machine__findnew_thread(machine, next_pid);
939 
940 	out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
941 	if (!out_events) {
942 		if (thread_atoms_insert(sched, sched_out))
943 			return -1;
944 		out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
945 		if (!out_events) {
946 			pr_err("out-event: Internal tree error");
947 			return -1;
948 		}
949 	}
950 	if (add_sched_out_event(out_events, sched_out_state(prev_state), timestamp))
951 		return -1;
952 
953 	in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
954 	if (!in_events) {
955 		if (thread_atoms_insert(sched, sched_in))
956 			return -1;
957 		in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
958 		if (!in_events) {
959 			pr_err("in-event: Internal tree error");
960 			return -1;
961 		}
962 		/*
963 		 * Take came in we have not heard about yet,
964 		 * add in an initial atom in runnable state:
965 		 */
966 		if (add_sched_out_event(in_events, 'R', timestamp))
967 			return -1;
968 	}
969 	add_sched_in_event(in_events, timestamp);
970 
971 	return 0;
972 }
973 
974 static int latency_runtime_event(struct perf_sched *sched,
975 				 struct perf_evsel *evsel,
976 				 struct perf_sample *sample,
977 				 struct machine *machine)
978 {
979 	const u32 pid	   = perf_evsel__intval(evsel, sample, "pid");
980 	const u64 runtime  = perf_evsel__intval(evsel, sample, "runtime");
981 	struct thread *thread = machine__findnew_thread(machine, pid);
982 	struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
983 	u64 timestamp = sample->time;
984 	int cpu = sample->cpu;
985 
986 	BUG_ON(cpu >= MAX_CPUS || cpu < 0);
987 	if (!atoms) {
988 		if (thread_atoms_insert(sched, thread))
989 			return -1;
990 		atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
991 		if (!atoms) {
992 			pr_err("in-event: Internal tree error");
993 			return -1;
994 		}
995 		if (add_sched_out_event(atoms, 'R', timestamp))
996 			return -1;
997 	}
998 
999 	add_runtime_event(atoms, runtime, timestamp);
1000 	return 0;
1001 }
1002 
1003 static int latency_wakeup_event(struct perf_sched *sched,
1004 				struct perf_evsel *evsel,
1005 				struct perf_sample *sample,
1006 				struct machine *machine)
1007 {
1008 	const u32 pid	  = perf_evsel__intval(evsel, sample, "pid"),
1009 		  success = perf_evsel__intval(evsel, sample, "success");
1010 	struct work_atoms *atoms;
1011 	struct work_atom *atom;
1012 	struct thread *wakee;
1013 	u64 timestamp = sample->time;
1014 
1015 	/* Note for later, it may be interesting to observe the failing cases */
1016 	if (!success)
1017 		return 0;
1018 
1019 	wakee = machine__findnew_thread(machine, pid);
1020 	atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
1021 	if (!atoms) {
1022 		if (thread_atoms_insert(sched, wakee))
1023 			return -1;
1024 		atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
1025 		if (!atoms) {
1026 			pr_err("wakeup-event: Internal tree error");
1027 			return -1;
1028 		}
1029 		if (add_sched_out_event(atoms, 'S', timestamp))
1030 			return -1;
1031 	}
1032 
1033 	BUG_ON(list_empty(&atoms->work_list));
1034 
1035 	atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1036 
1037 	/*
1038 	 * You WILL be missing events if you've recorded only
1039 	 * one CPU, or are only looking at only one, so don't
1040 	 * make useless noise.
1041 	 */
1042 	if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING)
1043 		sched->nr_state_machine_bugs++;
1044 
1045 	sched->nr_timestamps++;
1046 	if (atom->sched_out_time > timestamp) {
1047 		sched->nr_unordered_timestamps++;
1048 		return 0;
1049 	}
1050 
1051 	atom->state = THREAD_WAIT_CPU;
1052 	atom->wake_up_time = timestamp;
1053 	return 0;
1054 }
1055 
1056 static int latency_migrate_task_event(struct perf_sched *sched,
1057 				      struct perf_evsel *evsel,
1058 				      struct perf_sample *sample,
1059 				      struct machine *machine)
1060 {
1061 	const u32 pid = perf_evsel__intval(evsel, sample, "pid");
1062 	u64 timestamp = sample->time;
1063 	struct work_atoms *atoms;
1064 	struct work_atom *atom;
1065 	struct thread *migrant;
1066 
1067 	/*
1068 	 * Only need to worry about migration when profiling one CPU.
1069 	 */
1070 	if (sched->profile_cpu == -1)
1071 		return 0;
1072 
1073 	migrant = machine__findnew_thread(machine, pid);
1074 	atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
1075 	if (!atoms) {
1076 		if (thread_atoms_insert(sched, migrant))
1077 			return -1;
1078 		register_pid(sched, migrant->pid, migrant->comm);
1079 		atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
1080 		if (!atoms) {
1081 			pr_err("migration-event: Internal tree error");
1082 			return -1;
1083 		}
1084 		if (add_sched_out_event(atoms, 'R', timestamp))
1085 			return -1;
1086 	}
1087 
1088 	BUG_ON(list_empty(&atoms->work_list));
1089 
1090 	atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1091 	atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp;
1092 
1093 	sched->nr_timestamps++;
1094 
1095 	if (atom->sched_out_time > timestamp)
1096 		sched->nr_unordered_timestamps++;
1097 
1098 	return 0;
1099 }
1100 
1101 static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_list)
1102 {
1103 	int i;
1104 	int ret;
1105 	u64 avg;
1106 
1107 	if (!work_list->nb_atoms)
1108 		return;
1109 	/*
1110 	 * Ignore idle threads:
1111 	 */
1112 	if (!strcmp(work_list->thread->comm, "swapper"))
1113 		return;
1114 
1115 	sched->all_runtime += work_list->total_runtime;
1116 	sched->all_count   += work_list->nb_atoms;
1117 
1118 	ret = printf("  %s:%d ", work_list->thread->comm, work_list->thread->pid);
1119 
1120 	for (i = 0; i < 24 - ret; i++)
1121 		printf(" ");
1122 
1123 	avg = work_list->total_lat / work_list->nb_atoms;
1124 
1125 	printf("|%11.3f ms |%9" PRIu64 " | avg:%9.3f ms | max:%9.3f ms | max at: %9.6f s\n",
1126 	      (double)work_list->total_runtime / 1e6,
1127 		 work_list->nb_atoms, (double)avg / 1e6,
1128 		 (double)work_list->max_lat / 1e6,
1129 		 (double)work_list->max_lat_at / 1e9);
1130 }
1131 
1132 static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
1133 {
1134 	if (l->thread->pid < r->thread->pid)
1135 		return -1;
1136 	if (l->thread->pid > r->thread->pid)
1137 		return 1;
1138 
1139 	return 0;
1140 }
1141 
1142 static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
1143 {
1144 	u64 avgl, avgr;
1145 
1146 	if (!l->nb_atoms)
1147 		return -1;
1148 
1149 	if (!r->nb_atoms)
1150 		return 1;
1151 
1152 	avgl = l->total_lat / l->nb_atoms;
1153 	avgr = r->total_lat / r->nb_atoms;
1154 
1155 	if (avgl < avgr)
1156 		return -1;
1157 	if (avgl > avgr)
1158 		return 1;
1159 
1160 	return 0;
1161 }
1162 
1163 static int max_cmp(struct work_atoms *l, struct work_atoms *r)
1164 {
1165 	if (l->max_lat < r->max_lat)
1166 		return -1;
1167 	if (l->max_lat > r->max_lat)
1168 		return 1;
1169 
1170 	return 0;
1171 }
1172 
1173 static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
1174 {
1175 	if (l->nb_atoms < r->nb_atoms)
1176 		return -1;
1177 	if (l->nb_atoms > r->nb_atoms)
1178 		return 1;
1179 
1180 	return 0;
1181 }
1182 
1183 static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
1184 {
1185 	if (l->total_runtime < r->total_runtime)
1186 		return -1;
1187 	if (l->total_runtime > r->total_runtime)
1188 		return 1;
1189 
1190 	return 0;
1191 }
1192 
1193 static int sort_dimension__add(const char *tok, struct list_head *list)
1194 {
1195 	size_t i;
1196 	static struct sort_dimension avg_sort_dimension = {
1197 		.name = "avg",
1198 		.cmp  = avg_cmp,
1199 	};
1200 	static struct sort_dimension max_sort_dimension = {
1201 		.name = "max",
1202 		.cmp  = max_cmp,
1203 	};
1204 	static struct sort_dimension pid_sort_dimension = {
1205 		.name = "pid",
1206 		.cmp  = pid_cmp,
1207 	};
1208 	static struct sort_dimension runtime_sort_dimension = {
1209 		.name = "runtime",
1210 		.cmp  = runtime_cmp,
1211 	};
1212 	static struct sort_dimension switch_sort_dimension = {
1213 		.name = "switch",
1214 		.cmp  = switch_cmp,
1215 	};
1216 	struct sort_dimension *available_sorts[] = {
1217 		&pid_sort_dimension,
1218 		&avg_sort_dimension,
1219 		&max_sort_dimension,
1220 		&switch_sort_dimension,
1221 		&runtime_sort_dimension,
1222 	};
1223 
1224 	for (i = 0; i < ARRAY_SIZE(available_sorts); i++) {
1225 		if (!strcmp(available_sorts[i]->name, tok)) {
1226 			list_add_tail(&available_sorts[i]->list, list);
1227 
1228 			return 0;
1229 		}
1230 	}
1231 
1232 	return -1;
1233 }
1234 
1235 static void perf_sched__sort_lat(struct perf_sched *sched)
1236 {
1237 	struct rb_node *node;
1238 
1239 	for (;;) {
1240 		struct work_atoms *data;
1241 		node = rb_first(&sched->atom_root);
1242 		if (!node)
1243 			break;
1244 
1245 		rb_erase(node, &sched->atom_root);
1246 		data = rb_entry(node, struct work_atoms, node);
1247 		__thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list);
1248 	}
1249 }
1250 
1251 static int process_sched_wakeup_event(struct perf_tool *tool,
1252 				      struct perf_evsel *evsel,
1253 				      struct perf_sample *sample,
1254 				      struct machine *machine)
1255 {
1256 	struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1257 
1258 	if (sched->tp_handler->wakeup_event)
1259 		return sched->tp_handler->wakeup_event(sched, evsel, sample, machine);
1260 
1261 	return 0;
1262 }
1263 
1264 static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel,
1265 			    struct perf_sample *sample, struct machine *machine)
1266 {
1267 	const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
1268 		  next_pid = perf_evsel__intval(evsel, sample, "next_pid");
1269 	struct thread *sched_out __maybe_unused, *sched_in;
1270 	int new_shortname;
1271 	u64 timestamp0, timestamp = sample->time;
1272 	s64 delta;
1273 	int cpu, this_cpu = sample->cpu;
1274 
1275 	BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0);
1276 
1277 	if (this_cpu > sched->max_cpu)
1278 		sched->max_cpu = this_cpu;
1279 
1280 	timestamp0 = sched->cpu_last_switched[this_cpu];
1281 	sched->cpu_last_switched[this_cpu] = timestamp;
1282 	if (timestamp0)
1283 		delta = timestamp - timestamp0;
1284 	else
1285 		delta = 0;
1286 
1287 	if (delta < 0) {
1288 		pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1289 		return -1;
1290 	}
1291 
1292 	sched_out = machine__findnew_thread(machine, prev_pid);
1293 	sched_in = machine__findnew_thread(machine, next_pid);
1294 
1295 	sched->curr_thread[this_cpu] = sched_in;
1296 
1297 	printf("  ");
1298 
1299 	new_shortname = 0;
1300 	if (!sched_in->shortname[0]) {
1301 		sched_in->shortname[0] = sched->next_shortname1;
1302 		sched_in->shortname[1] = sched->next_shortname2;
1303 
1304 		if (sched->next_shortname1 < 'Z') {
1305 			sched->next_shortname1++;
1306 		} else {
1307 			sched->next_shortname1='A';
1308 			if (sched->next_shortname2 < '9') {
1309 				sched->next_shortname2++;
1310 			} else {
1311 				sched->next_shortname2='0';
1312 			}
1313 		}
1314 		new_shortname = 1;
1315 	}
1316 
1317 	for (cpu = 0; cpu <= sched->max_cpu; cpu++) {
1318 		if (cpu != this_cpu)
1319 			printf(" ");
1320 		else
1321 			printf("*");
1322 
1323 		if (sched->curr_thread[cpu]) {
1324 			if (sched->curr_thread[cpu]->pid)
1325 				printf("%2s ", sched->curr_thread[cpu]->shortname);
1326 			else
1327 				printf(".  ");
1328 		} else
1329 			printf("   ");
1330 	}
1331 
1332 	printf("  %12.6f secs ", (double)timestamp/1e9);
1333 	if (new_shortname) {
1334 		printf("%s => %s:%d\n",
1335 			sched_in->shortname, sched_in->comm, sched_in->pid);
1336 	} else {
1337 		printf("\n");
1338 	}
1339 
1340 	return 0;
1341 }
1342 
1343 static int process_sched_switch_event(struct perf_tool *tool,
1344 				      struct perf_evsel *evsel,
1345 				      struct perf_sample *sample,
1346 				      struct machine *machine)
1347 {
1348 	struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1349 	int this_cpu = sample->cpu, err = 0;
1350 	u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
1351 	    next_pid = perf_evsel__intval(evsel, sample, "next_pid");
1352 
1353 	if (sched->curr_pid[this_cpu] != (u32)-1) {
1354 		/*
1355 		 * Are we trying to switch away a PID that is
1356 		 * not current?
1357 		 */
1358 		if (sched->curr_pid[this_cpu] != prev_pid)
1359 			sched->nr_context_switch_bugs++;
1360 	}
1361 
1362 	if (sched->tp_handler->switch_event)
1363 		err = sched->tp_handler->switch_event(sched, evsel, sample, machine);
1364 
1365 	sched->curr_pid[this_cpu] = next_pid;
1366 	return err;
1367 }
1368 
1369 static int process_sched_runtime_event(struct perf_tool *tool,
1370 				       struct perf_evsel *evsel,
1371 				       struct perf_sample *sample,
1372 				       struct machine *machine)
1373 {
1374 	struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1375 
1376 	if (sched->tp_handler->runtime_event)
1377 		return sched->tp_handler->runtime_event(sched, evsel, sample, machine);
1378 
1379 	return 0;
1380 }
1381 
1382 static int process_sched_fork_event(struct perf_tool *tool,
1383 				    struct perf_evsel *evsel,
1384 				    struct perf_sample *sample,
1385 				    struct machine *machine __maybe_unused)
1386 {
1387 	struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1388 
1389 	if (sched->tp_handler->fork_event)
1390 		return sched->tp_handler->fork_event(sched, evsel, sample);
1391 
1392 	return 0;
1393 }
1394 
1395 static int process_sched_exit_event(struct perf_tool *tool __maybe_unused,
1396 				    struct perf_evsel *evsel,
1397 				    struct perf_sample *sample __maybe_unused,
1398 				    struct machine *machine __maybe_unused)
1399 {
1400 	pr_debug("sched_exit event %p\n", evsel);
1401 	return 0;
1402 }
1403 
1404 static int process_sched_migrate_task_event(struct perf_tool *tool,
1405 					    struct perf_evsel *evsel,
1406 					    struct perf_sample *sample,
1407 					    struct machine *machine)
1408 {
1409 	struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1410 
1411 	if (sched->tp_handler->migrate_task_event)
1412 		return sched->tp_handler->migrate_task_event(sched, evsel, sample, machine);
1413 
1414 	return 0;
1415 }
1416 
1417 typedef int (*tracepoint_handler)(struct perf_tool *tool,
1418 				  struct perf_evsel *evsel,
1419 				  struct perf_sample *sample,
1420 				  struct machine *machine);
1421 
1422 static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_unused,
1423 						 union perf_event *event __maybe_unused,
1424 						 struct perf_sample *sample,
1425 						 struct perf_evsel *evsel,
1426 						 struct machine *machine)
1427 {
1428 	struct thread *thread = machine__findnew_thread(machine, sample->tid);
1429 	int err = 0;
1430 
1431 	if (thread == NULL) {
1432 		pr_debug("problem processing %s event, skipping it.\n",
1433 			 perf_evsel__name(evsel));
1434 		return -1;
1435 	}
1436 
1437 	evsel->hists.stats.total_period += sample->period;
1438 	hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE);
1439 
1440 	if (evsel->handler.func != NULL) {
1441 		tracepoint_handler f = evsel->handler.func;
1442 		err = f(tool, evsel, sample, machine);
1443 	}
1444 
1445 	return err;
1446 }
1447 
1448 static int perf_sched__read_events(struct perf_sched *sched, bool destroy,
1449 				   struct perf_session **psession)
1450 {
1451 	const struct perf_evsel_str_handler handlers[] = {
1452 		{ "sched:sched_switch",	      process_sched_switch_event, },
1453 		{ "sched:sched_stat_runtime", process_sched_runtime_event, },
1454 		{ "sched:sched_wakeup",	      process_sched_wakeup_event, },
1455 		{ "sched:sched_wakeup_new",   process_sched_wakeup_event, },
1456 		{ "sched:sched_process_fork", process_sched_fork_event, },
1457 		{ "sched:sched_process_exit", process_sched_exit_event, },
1458 		{ "sched:sched_migrate_task", process_sched_migrate_task_event, },
1459 	};
1460 	struct perf_session *session;
1461 
1462 	session = perf_session__new(input_name, O_RDONLY, 0, false, &sched->tool);
1463 	if (session == NULL) {
1464 		pr_debug("No Memory for session\n");
1465 		return -1;
1466 	}
1467 
1468 	if (perf_session__set_tracepoints_handlers(session, handlers))
1469 		goto out_delete;
1470 
1471 	if (perf_session__has_traces(session, "record -R")) {
1472 		int err = perf_session__process_events(session, &sched->tool);
1473 		if (err) {
1474 			pr_err("Failed to process events, error %d", err);
1475 			goto out_delete;
1476 		}
1477 
1478 		sched->nr_events      = session->hists.stats.nr_events[0];
1479 		sched->nr_lost_events = session->hists.stats.total_lost;
1480 		sched->nr_lost_chunks = session->hists.stats.nr_events[PERF_RECORD_LOST];
1481 	}
1482 
1483 	if (destroy)
1484 		perf_session__delete(session);
1485 
1486 	if (psession)
1487 		*psession = session;
1488 
1489 	return 0;
1490 
1491 out_delete:
1492 	perf_session__delete(session);
1493 	return -1;
1494 }
1495 
1496 static void print_bad_events(struct perf_sched *sched)
1497 {
1498 	if (sched->nr_unordered_timestamps && sched->nr_timestamps) {
1499 		printf("  INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
1500 			(double)sched->nr_unordered_timestamps/(double)sched->nr_timestamps*100.0,
1501 			sched->nr_unordered_timestamps, sched->nr_timestamps);
1502 	}
1503 	if (sched->nr_lost_events && sched->nr_events) {
1504 		printf("  INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
1505 			(double)sched->nr_lost_events/(double)sched->nr_events * 100.0,
1506 			sched->nr_lost_events, sched->nr_events, sched->nr_lost_chunks);
1507 	}
1508 	if (sched->nr_state_machine_bugs && sched->nr_timestamps) {
1509 		printf("  INFO: %.3f%% state machine bugs (%ld out of %ld)",
1510 			(double)sched->nr_state_machine_bugs/(double)sched->nr_timestamps*100.0,
1511 			sched->nr_state_machine_bugs, sched->nr_timestamps);
1512 		if (sched->nr_lost_events)
1513 			printf(" (due to lost events?)");
1514 		printf("\n");
1515 	}
1516 	if (sched->nr_context_switch_bugs && sched->nr_timestamps) {
1517 		printf("  INFO: %.3f%% context switch bugs (%ld out of %ld)",
1518 			(double)sched->nr_context_switch_bugs/(double)sched->nr_timestamps*100.0,
1519 			sched->nr_context_switch_bugs, sched->nr_timestamps);
1520 		if (sched->nr_lost_events)
1521 			printf(" (due to lost events?)");
1522 		printf("\n");
1523 	}
1524 }
1525 
1526 static int perf_sched__lat(struct perf_sched *sched)
1527 {
1528 	struct rb_node *next;
1529 	struct perf_session *session;
1530 
1531 	setup_pager();
1532 	if (perf_sched__read_events(sched, false, &session))
1533 		return -1;
1534 	perf_sched__sort_lat(sched);
1535 
1536 	printf("\n ---------------------------------------------------------------------------------------------------------------\n");
1537 	printf("  Task                  |   Runtime ms  | Switches | Average delay ms | Maximum delay ms | Maximum delay at     |\n");
1538 	printf(" ---------------------------------------------------------------------------------------------------------------\n");
1539 
1540 	next = rb_first(&sched->sorted_atom_root);
1541 
1542 	while (next) {
1543 		struct work_atoms *work_list;
1544 
1545 		work_list = rb_entry(next, struct work_atoms, node);
1546 		output_lat_thread(sched, work_list);
1547 		next = rb_next(next);
1548 	}
1549 
1550 	printf(" -----------------------------------------------------------------------------------------\n");
1551 	printf("  TOTAL:                |%11.3f ms |%9" PRIu64 " |\n",
1552 		(double)sched->all_runtime / 1e6, sched->all_count);
1553 
1554 	printf(" ---------------------------------------------------\n");
1555 
1556 	print_bad_events(sched);
1557 	printf("\n");
1558 
1559 	perf_session__delete(session);
1560 	return 0;
1561 }
1562 
1563 static int perf_sched__map(struct perf_sched *sched)
1564 {
1565 	sched->max_cpu = sysconf(_SC_NPROCESSORS_CONF);
1566 
1567 	setup_pager();
1568 	if (perf_sched__read_events(sched, true, NULL))
1569 		return -1;
1570 	print_bad_events(sched);
1571 	return 0;
1572 }
1573 
1574 static int perf_sched__replay(struct perf_sched *sched)
1575 {
1576 	unsigned long i;
1577 
1578 	calibrate_run_measurement_overhead(sched);
1579 	calibrate_sleep_measurement_overhead(sched);
1580 
1581 	test_calibrations(sched);
1582 
1583 	if (perf_sched__read_events(sched, true, NULL))
1584 		return -1;
1585 
1586 	printf("nr_run_events:        %ld\n", sched->nr_run_events);
1587 	printf("nr_sleep_events:      %ld\n", sched->nr_sleep_events);
1588 	printf("nr_wakeup_events:     %ld\n", sched->nr_wakeup_events);
1589 
1590 	if (sched->targetless_wakeups)
1591 		printf("target-less wakeups:  %ld\n", sched->targetless_wakeups);
1592 	if (sched->multitarget_wakeups)
1593 		printf("multi-target wakeups: %ld\n", sched->multitarget_wakeups);
1594 	if (sched->nr_run_events_optimized)
1595 		printf("run atoms optimized: %ld\n",
1596 			sched->nr_run_events_optimized);
1597 
1598 	print_task_traces(sched);
1599 	add_cross_task_wakeups(sched);
1600 
1601 	create_tasks(sched);
1602 	printf("------------------------------------------------------------\n");
1603 	for (i = 0; i < sched->replay_repeat; i++)
1604 		run_one_test(sched);
1605 
1606 	return 0;
1607 }
1608 
1609 static void setup_sorting(struct perf_sched *sched, const struct option *options,
1610 			  const char * const usage_msg[])
1611 {
1612 	char *tmp, *tok, *str = strdup(sched->sort_order);
1613 
1614 	for (tok = strtok_r(str, ", ", &tmp);
1615 			tok; tok = strtok_r(NULL, ", ", &tmp)) {
1616 		if (sort_dimension__add(tok, &sched->sort_list) < 0) {
1617 			error("Unknown --sort key: `%s'", tok);
1618 			usage_with_options(usage_msg, options);
1619 		}
1620 	}
1621 
1622 	free(str);
1623 
1624 	sort_dimension__add("pid", &sched->cmp_pid);
1625 }
1626 
1627 static int __cmd_record(int argc, const char **argv)
1628 {
1629 	unsigned int rec_argc, i, j;
1630 	const char **rec_argv;
1631 	const char * const record_args[] = {
1632 		"record",
1633 		"-a",
1634 		"-R",
1635 		"-f",
1636 		"-m", "1024",
1637 		"-c", "1",
1638 		"-e", "sched:sched_switch",
1639 		"-e", "sched:sched_stat_wait",
1640 		"-e", "sched:sched_stat_sleep",
1641 		"-e", "sched:sched_stat_iowait",
1642 		"-e", "sched:sched_stat_runtime",
1643 		"-e", "sched:sched_process_exit",
1644 		"-e", "sched:sched_process_fork",
1645 		"-e", "sched:sched_wakeup",
1646 		"-e", "sched:sched_migrate_task",
1647 	};
1648 
1649 	rec_argc = ARRAY_SIZE(record_args) + argc - 1;
1650 	rec_argv = calloc(rec_argc + 1, sizeof(char *));
1651 
1652 	if (rec_argv == NULL)
1653 		return -ENOMEM;
1654 
1655 	for (i = 0; i < ARRAY_SIZE(record_args); i++)
1656 		rec_argv[i] = strdup(record_args[i]);
1657 
1658 	for (j = 1; j < (unsigned int)argc; j++, i++)
1659 		rec_argv[i] = argv[j];
1660 
1661 	BUG_ON(i != rec_argc);
1662 
1663 	return cmd_record(i, rec_argv, NULL);
1664 }
1665 
1666 int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused)
1667 {
1668 	const char default_sort_order[] = "avg, max, switch, runtime";
1669 	struct perf_sched sched = {
1670 		.tool = {
1671 			.sample		 = perf_sched__process_tracepoint_sample,
1672 			.comm		 = perf_event__process_comm,
1673 			.lost		 = perf_event__process_lost,
1674 			.exit		 = perf_event__process_exit,
1675 			.fork		 = perf_event__process_fork,
1676 			.ordered_samples = true,
1677 		},
1678 		.cmp_pid	      = LIST_HEAD_INIT(sched.cmp_pid),
1679 		.sort_list	      = LIST_HEAD_INIT(sched.sort_list),
1680 		.start_work_mutex     = PTHREAD_MUTEX_INITIALIZER,
1681 		.work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER,
1682 		.curr_pid	      = { [0 ... MAX_CPUS - 1] = -1 },
1683 		.sort_order	      = default_sort_order,
1684 		.replay_repeat	      = 10,
1685 		.profile_cpu	      = -1,
1686 		.next_shortname1      = 'A',
1687 		.next_shortname2      = '0',
1688 	};
1689 	const struct option latency_options[] = {
1690 	OPT_STRING('s', "sort", &sched.sort_order, "key[,key2...]",
1691 		   "sort by key(s): runtime, switch, avg, max"),
1692 	OPT_INCR('v', "verbose", &verbose,
1693 		    "be more verbose (show symbol address, etc)"),
1694 	OPT_INTEGER('C', "CPU", &sched.profile_cpu,
1695 		    "CPU to profile on"),
1696 	OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1697 		    "dump raw trace in ASCII"),
1698 	OPT_END()
1699 	};
1700 	const struct option replay_options[] = {
1701 	OPT_UINTEGER('r', "repeat", &sched.replay_repeat,
1702 		     "repeat the workload replay N times (-1: infinite)"),
1703 	OPT_INCR('v', "verbose", &verbose,
1704 		    "be more verbose (show symbol address, etc)"),
1705 	OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1706 		    "dump raw trace in ASCII"),
1707 	OPT_END()
1708 	};
1709 	const struct option sched_options[] = {
1710 	OPT_STRING('i', "input", &input_name, "file",
1711 		    "input file name"),
1712 	OPT_INCR('v', "verbose", &verbose,
1713 		    "be more verbose (show symbol address, etc)"),
1714 	OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1715 		    "dump raw trace in ASCII"),
1716 	OPT_END()
1717 	};
1718 	const char * const latency_usage[] = {
1719 		"perf sched latency [<options>]",
1720 		NULL
1721 	};
1722 	const char * const replay_usage[] = {
1723 		"perf sched replay [<options>]",
1724 		NULL
1725 	};
1726 	const char * const sched_usage[] = {
1727 		"perf sched [<options>] {record|latency|map|replay|script}",
1728 		NULL
1729 	};
1730 	struct trace_sched_handler lat_ops  = {
1731 		.wakeup_event	    = latency_wakeup_event,
1732 		.switch_event	    = latency_switch_event,
1733 		.runtime_event	    = latency_runtime_event,
1734 		.fork_event	    = latency_fork_event,
1735 		.migrate_task_event = latency_migrate_task_event,
1736 	};
1737 	struct trace_sched_handler map_ops  = {
1738 		.switch_event	    = map_switch_event,
1739 	};
1740 	struct trace_sched_handler replay_ops  = {
1741 		.wakeup_event	    = replay_wakeup_event,
1742 		.switch_event	    = replay_switch_event,
1743 		.fork_event	    = replay_fork_event,
1744 	};
1745 
1746 	argc = parse_options(argc, argv, sched_options, sched_usage,
1747 			     PARSE_OPT_STOP_AT_NON_OPTION);
1748 	if (!argc)
1749 		usage_with_options(sched_usage, sched_options);
1750 
1751 	/*
1752 	 * Aliased to 'perf script' for now:
1753 	 */
1754 	if (!strcmp(argv[0], "script"))
1755 		return cmd_script(argc, argv, prefix);
1756 
1757 	symbol__init();
1758 	if (!strncmp(argv[0], "rec", 3)) {
1759 		return __cmd_record(argc, argv);
1760 	} else if (!strncmp(argv[0], "lat", 3)) {
1761 		sched.tp_handler = &lat_ops;
1762 		if (argc > 1) {
1763 			argc = parse_options(argc, argv, latency_options, latency_usage, 0);
1764 			if (argc)
1765 				usage_with_options(latency_usage, latency_options);
1766 		}
1767 		setup_sorting(&sched, latency_options, latency_usage);
1768 		return perf_sched__lat(&sched);
1769 	} else if (!strcmp(argv[0], "map")) {
1770 		sched.tp_handler = &map_ops;
1771 		setup_sorting(&sched, latency_options, latency_usage);
1772 		return perf_sched__map(&sched);
1773 	} else if (!strncmp(argv[0], "rep", 3)) {
1774 		sched.tp_handler = &replay_ops;
1775 		if (argc) {
1776 			argc = parse_options(argc, argv, replay_options, replay_usage, 0);
1777 			if (argc)
1778 				usage_with_options(replay_usage, replay_options);
1779 		}
1780 		return perf_sched__replay(&sched);
1781 	} else {
1782 		usage_with_options(sched_usage, sched_options);
1783 	}
1784 
1785 	return 0;
1786 }
1787