1 // SPDX-License-Identifier: GPL-2.0
2 #include "builtin.h"
3 #include "perf-sys.h"
4
5 #include "util/cpumap.h"
6 #include "util/evlist.h"
7 #include "util/evsel.h"
8 #include "util/evsel_fprintf.h"
9 #include "util/mutex.h"
10 #include "util/symbol.h"
11 #include "util/thread.h"
12 #include "util/header.h"
13 #include "util/session.h"
14 #include "util/tool.h"
15 #include "util/cloexec.h"
16 #include "util/thread_map.h"
17 #include "util/color.h"
18 #include "util/stat.h"
19 #include "util/string2.h"
20 #include "util/callchain.h"
21 #include "util/time-utils.h"
22
23 #include <subcmd/pager.h>
24 #include <subcmd/parse-options.h>
25 #include "util/trace-event.h"
26
27 #include "util/debug.h"
28 #include "util/event.h"
29 #include "util/util.h"
30
31 #include <linux/kernel.h>
32 #include <linux/log2.h>
33 #include <linux/zalloc.h>
34 #include <sys/prctl.h>
35 #include <sys/resource.h>
36 #include <inttypes.h>
37
38 #include <errno.h>
39 #include <semaphore.h>
40 #include <pthread.h>
41 #include <math.h>
42 #include <api/fs/fs.h>
43 #include <perf/cpumap.h>
44 #include <linux/time64.h>
45 #include <linux/err.h>
46
47 #include <linux/ctype.h>
48
49 #define PR_SET_NAME 15 /* Set process name */
50 #define MAX_CPUS 4096
51 #define COMM_LEN 20
52 #define SYM_LEN 129
53 #define MAX_PID 1024000
54
55 static const char *cpu_list;
56 static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
57
58 struct sched_atom;
59
60 struct task_desc {
61 unsigned long nr;
62 unsigned long pid;
63 char comm[COMM_LEN];
64
65 unsigned long nr_events;
66 unsigned long curr_event;
67 struct sched_atom **atoms;
68
69 pthread_t thread;
70 sem_t sleep_sem;
71
72 sem_t ready_for_work;
73 sem_t work_done_sem;
74
75 u64 cpu_usage;
76 };
77
78 enum sched_event_type {
79 SCHED_EVENT_RUN,
80 SCHED_EVENT_SLEEP,
81 SCHED_EVENT_WAKEUP,
82 SCHED_EVENT_MIGRATION,
83 };
84
85 struct sched_atom {
86 enum sched_event_type type;
87 int specific_wait;
88 u64 timestamp;
89 u64 duration;
90 unsigned long nr;
91 sem_t *wait_sem;
92 struct task_desc *wakee;
93 };
94
95 #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
96
97 /* task state bitmask, copied from include/linux/sched.h */
98 #define TASK_RUNNING 0
99 #define TASK_INTERRUPTIBLE 1
100 #define TASK_UNINTERRUPTIBLE 2
101 #define __TASK_STOPPED 4
102 #define __TASK_TRACED 8
103 /* in tsk->exit_state */
104 #define EXIT_DEAD 16
105 #define EXIT_ZOMBIE 32
106 #define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
107 /* in tsk->state again */
108 #define TASK_DEAD 64
109 #define TASK_WAKEKILL 128
110 #define TASK_WAKING 256
111 #define TASK_PARKED 512
112
113 enum thread_state {
114 THREAD_SLEEPING = 0,
115 THREAD_WAIT_CPU,
116 THREAD_SCHED_IN,
117 THREAD_IGNORE
118 };
119
120 struct work_atom {
121 struct list_head list;
122 enum thread_state state;
123 u64 sched_out_time;
124 u64 wake_up_time;
125 u64 sched_in_time;
126 u64 runtime;
127 };
128
129 struct work_atoms {
130 struct list_head work_list;
131 struct thread *thread;
132 struct rb_node node;
133 u64 max_lat;
134 u64 max_lat_start;
135 u64 max_lat_end;
136 u64 total_lat;
137 u64 nb_atoms;
138 u64 total_runtime;
139 int num_merged;
140 };
141
142 typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
143
144 struct perf_sched;
145
146 struct trace_sched_handler {
147 int (*switch_event)(struct perf_sched *sched, struct evsel *evsel,
148 struct perf_sample *sample, struct machine *machine);
149
150 int (*runtime_event)(struct perf_sched *sched, struct evsel *evsel,
151 struct perf_sample *sample, struct machine *machine);
152
153 int (*wakeup_event)(struct perf_sched *sched, struct evsel *evsel,
154 struct perf_sample *sample, struct machine *machine);
155
156 /* PERF_RECORD_FORK event, not sched_process_fork tracepoint */
157 int (*fork_event)(struct perf_sched *sched, union perf_event *event,
158 struct machine *machine);
159
160 int (*migrate_task_event)(struct perf_sched *sched,
161 struct evsel *evsel,
162 struct perf_sample *sample,
163 struct machine *machine);
164 };
165
166 #define COLOR_PIDS PERF_COLOR_BLUE
167 #define COLOR_CPUS PERF_COLOR_BG_RED
168
169 struct perf_sched_map {
170 DECLARE_BITMAP(comp_cpus_mask, MAX_CPUS);
171 struct perf_cpu *comp_cpus;
172 bool comp;
173 struct perf_thread_map *color_pids;
174 const char *color_pids_str;
175 struct perf_cpu_map *color_cpus;
176 const char *color_cpus_str;
177 struct perf_cpu_map *cpus;
178 const char *cpus_str;
179 };
180
181 struct perf_sched {
182 struct perf_tool tool;
183 const char *sort_order;
184 unsigned long nr_tasks;
185 struct task_desc **pid_to_task;
186 struct task_desc **tasks;
187 const struct trace_sched_handler *tp_handler;
188 struct mutex start_work_mutex;
189 struct mutex work_done_wait_mutex;
190 int profile_cpu;
191 /*
192 * Track the current task - that way we can know whether there's any
193 * weird events, such as a task being switched away that is not current.
194 */
195 struct perf_cpu max_cpu;
196 u32 *curr_pid;
197 struct thread **curr_thread;
198 char next_shortname1;
199 char next_shortname2;
200 unsigned int replay_repeat;
201 unsigned long nr_run_events;
202 unsigned long nr_sleep_events;
203 unsigned long nr_wakeup_events;
204 unsigned long nr_sleep_corrections;
205 unsigned long nr_run_events_optimized;
206 unsigned long targetless_wakeups;
207 unsigned long multitarget_wakeups;
208 unsigned long nr_runs;
209 unsigned long nr_timestamps;
210 unsigned long nr_unordered_timestamps;
211 unsigned long nr_context_switch_bugs;
212 unsigned long nr_events;
213 unsigned long nr_lost_chunks;
214 unsigned long nr_lost_events;
215 u64 run_measurement_overhead;
216 u64 sleep_measurement_overhead;
217 u64 start_time;
218 u64 cpu_usage;
219 u64 runavg_cpu_usage;
220 u64 parent_cpu_usage;
221 u64 runavg_parent_cpu_usage;
222 u64 sum_runtime;
223 u64 sum_fluct;
224 u64 run_avg;
225 u64 all_runtime;
226 u64 all_count;
227 u64 *cpu_last_switched;
228 struct rb_root_cached atom_root, sorted_atom_root, merged_atom_root;
229 struct list_head sort_list, cmp_pid;
230 bool force;
231 bool skip_merge;
232 struct perf_sched_map map;
233
234 /* options for timehist command */
235 bool summary;
236 bool summary_only;
237 bool idle_hist;
238 bool show_callchain;
239 unsigned int max_stack;
240 bool show_cpu_visual;
241 bool show_wakeups;
242 bool show_next;
243 bool show_migrations;
244 bool show_state;
245 u64 skipped_samples;
246 const char *time_str;
247 struct perf_time_interval ptime;
248 struct perf_time_interval hist_time;
249 volatile bool thread_funcs_exit;
250 };
251
252 /* per thread run time data */
253 struct thread_runtime {
254 u64 last_time; /* time of previous sched in/out event */
255 u64 dt_run; /* run time */
256 u64 dt_sleep; /* time between CPU access by sleep (off cpu) */
257 u64 dt_iowait; /* time between CPU access by iowait (off cpu) */
258 u64 dt_preempt; /* time between CPU access by preempt (off cpu) */
259 u64 dt_delay; /* time between wakeup and sched-in */
260 u64 ready_to_run; /* time of wakeup */
261
262 struct stats run_stats;
263 u64 total_run_time;
264 u64 total_sleep_time;
265 u64 total_iowait_time;
266 u64 total_preempt_time;
267 u64 total_delay_time;
268
269 int last_state;
270
271 char shortname[3];
272 bool comm_changed;
273
274 u64 migrations;
275 };
276
277 /* per event run time data */
278 struct evsel_runtime {
279 u64 *last_time; /* time this event was last seen per cpu */
280 u32 ncpu; /* highest cpu slot allocated */
281 };
282
283 /* per cpu idle time data */
284 struct idle_thread_runtime {
285 struct thread_runtime tr;
286 struct thread *last_thread;
287 struct rb_root_cached sorted_root;
288 struct callchain_root callchain;
289 struct callchain_cursor cursor;
290 };
291
292 /* track idle times per cpu */
293 static struct thread **idle_threads;
294 static int idle_max_cpu;
295 static char idle_comm[] = "<idle>";
296
get_nsecs(void)297 static u64 get_nsecs(void)
298 {
299 struct timespec ts;
300
301 clock_gettime(CLOCK_MONOTONIC, &ts);
302
303 return ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec;
304 }
305
burn_nsecs(struct perf_sched * sched,u64 nsecs)306 static void burn_nsecs(struct perf_sched *sched, u64 nsecs)
307 {
308 u64 T0 = get_nsecs(), T1;
309
310 do {
311 T1 = get_nsecs();
312 } while (T1 + sched->run_measurement_overhead < T0 + nsecs);
313 }
314
sleep_nsecs(u64 nsecs)315 static void sleep_nsecs(u64 nsecs)
316 {
317 struct timespec ts;
318
319 ts.tv_nsec = nsecs % 999999999;
320 ts.tv_sec = nsecs / 999999999;
321
322 nanosleep(&ts, NULL);
323 }
324
calibrate_run_measurement_overhead(struct perf_sched * sched)325 static void calibrate_run_measurement_overhead(struct perf_sched *sched)
326 {
327 u64 T0, T1, delta, min_delta = NSEC_PER_SEC;
328 int i;
329
330 for (i = 0; i < 10; i++) {
331 T0 = get_nsecs();
332 burn_nsecs(sched, 0);
333 T1 = get_nsecs();
334 delta = T1-T0;
335 min_delta = min(min_delta, delta);
336 }
337 sched->run_measurement_overhead = min_delta;
338
339 printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta);
340 }
341
calibrate_sleep_measurement_overhead(struct perf_sched * sched)342 static void calibrate_sleep_measurement_overhead(struct perf_sched *sched)
343 {
344 u64 T0, T1, delta, min_delta = NSEC_PER_SEC;
345 int i;
346
347 for (i = 0; i < 10; i++) {
348 T0 = get_nsecs();
349 sleep_nsecs(10000);
350 T1 = get_nsecs();
351 delta = T1-T0;
352 min_delta = min(min_delta, delta);
353 }
354 min_delta -= 10000;
355 sched->sleep_measurement_overhead = min_delta;
356
357 printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta);
358 }
359
360 static struct sched_atom *
get_new_event(struct task_desc * task,u64 timestamp)361 get_new_event(struct task_desc *task, u64 timestamp)
362 {
363 struct sched_atom *event = zalloc(sizeof(*event));
364 unsigned long idx = task->nr_events;
365 size_t size;
366
367 event->timestamp = timestamp;
368 event->nr = idx;
369
370 task->nr_events++;
371 size = sizeof(struct sched_atom *) * task->nr_events;
372 task->atoms = realloc(task->atoms, size);
373 BUG_ON(!task->atoms);
374
375 task->atoms[idx] = event;
376
377 return event;
378 }
379
last_event(struct task_desc * task)380 static struct sched_atom *last_event(struct task_desc *task)
381 {
382 if (!task->nr_events)
383 return NULL;
384
385 return task->atoms[task->nr_events - 1];
386 }
387
add_sched_event_run(struct perf_sched * sched,struct task_desc * task,u64 timestamp,u64 duration)388 static void add_sched_event_run(struct perf_sched *sched, struct task_desc *task,
389 u64 timestamp, u64 duration)
390 {
391 struct sched_atom *event, *curr_event = last_event(task);
392
393 /*
394 * optimize an existing RUN event by merging this one
395 * to it:
396 */
397 if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
398 sched->nr_run_events_optimized++;
399 curr_event->duration += duration;
400 return;
401 }
402
403 event = get_new_event(task, timestamp);
404
405 event->type = SCHED_EVENT_RUN;
406 event->duration = duration;
407
408 sched->nr_run_events++;
409 }
410
add_sched_event_wakeup(struct perf_sched * sched,struct task_desc * task,u64 timestamp,struct task_desc * wakee)411 static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *task,
412 u64 timestamp, struct task_desc *wakee)
413 {
414 struct sched_atom *event, *wakee_event;
415
416 event = get_new_event(task, timestamp);
417 event->type = SCHED_EVENT_WAKEUP;
418 event->wakee = wakee;
419
420 wakee_event = last_event(wakee);
421 if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
422 sched->targetless_wakeups++;
423 return;
424 }
425 if (wakee_event->wait_sem) {
426 sched->multitarget_wakeups++;
427 return;
428 }
429
430 wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem));
431 sem_init(wakee_event->wait_sem, 0, 0);
432 wakee_event->specific_wait = 1;
433 event->wait_sem = wakee_event->wait_sem;
434
435 sched->nr_wakeup_events++;
436 }
437
add_sched_event_sleep(struct perf_sched * sched,struct task_desc * task,u64 timestamp,u64 task_state __maybe_unused)438 static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task,
439 u64 timestamp, u64 task_state __maybe_unused)
440 {
441 struct sched_atom *event = get_new_event(task, timestamp);
442
443 event->type = SCHED_EVENT_SLEEP;
444
445 sched->nr_sleep_events++;
446 }
447
register_pid(struct perf_sched * sched,unsigned long pid,const char * comm)448 static struct task_desc *register_pid(struct perf_sched *sched,
449 unsigned long pid, const char *comm)
450 {
451 struct task_desc *task;
452 static int pid_max;
453
454 if (sched->pid_to_task == NULL) {
455 if (sysctl__read_int("kernel/pid_max", &pid_max) < 0)
456 pid_max = MAX_PID;
457 BUG_ON((sched->pid_to_task = calloc(pid_max, sizeof(struct task_desc *))) == NULL);
458 }
459 if (pid >= (unsigned long)pid_max) {
460 BUG_ON((sched->pid_to_task = realloc(sched->pid_to_task, (pid + 1) *
461 sizeof(struct task_desc *))) == NULL);
462 while (pid >= (unsigned long)pid_max)
463 sched->pid_to_task[pid_max++] = NULL;
464 }
465
466 task = sched->pid_to_task[pid];
467
468 if (task)
469 return task;
470
471 task = zalloc(sizeof(*task));
472 task->pid = pid;
473 task->nr = sched->nr_tasks;
474 strcpy(task->comm, comm);
475 /*
476 * every task starts in sleeping state - this gets ignored
477 * if there's no wakeup pointing to this sleep state:
478 */
479 add_sched_event_sleep(sched, task, 0, 0);
480
481 sched->pid_to_task[pid] = task;
482 sched->nr_tasks++;
483 sched->tasks = realloc(sched->tasks, sched->nr_tasks * sizeof(struct task_desc *));
484 BUG_ON(!sched->tasks);
485 sched->tasks[task->nr] = task;
486
487 if (verbose > 0)
488 printf("registered task #%ld, PID %ld (%s)\n", sched->nr_tasks, pid, comm);
489
490 return task;
491 }
492
493
print_task_traces(struct perf_sched * sched)494 static void print_task_traces(struct perf_sched *sched)
495 {
496 struct task_desc *task;
497 unsigned long i;
498
499 for (i = 0; i < sched->nr_tasks; i++) {
500 task = sched->tasks[i];
501 printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
502 task->nr, task->comm, task->pid, task->nr_events);
503 }
504 }
505
add_cross_task_wakeups(struct perf_sched * sched)506 static void add_cross_task_wakeups(struct perf_sched *sched)
507 {
508 struct task_desc *task1, *task2;
509 unsigned long i, j;
510
511 for (i = 0; i < sched->nr_tasks; i++) {
512 task1 = sched->tasks[i];
513 j = i + 1;
514 if (j == sched->nr_tasks)
515 j = 0;
516 task2 = sched->tasks[j];
517 add_sched_event_wakeup(sched, task1, 0, task2);
518 }
519 }
520
perf_sched__process_event(struct perf_sched * sched,struct sched_atom * atom)521 static void perf_sched__process_event(struct perf_sched *sched,
522 struct sched_atom *atom)
523 {
524 int ret = 0;
525
526 switch (atom->type) {
527 case SCHED_EVENT_RUN:
528 burn_nsecs(sched, atom->duration);
529 break;
530 case SCHED_EVENT_SLEEP:
531 if (atom->wait_sem)
532 ret = sem_wait(atom->wait_sem);
533 BUG_ON(ret);
534 break;
535 case SCHED_EVENT_WAKEUP:
536 if (atom->wait_sem)
537 ret = sem_post(atom->wait_sem);
538 BUG_ON(ret);
539 break;
540 case SCHED_EVENT_MIGRATION:
541 break;
542 default:
543 BUG_ON(1);
544 }
545 }
546
get_cpu_usage_nsec_parent(void)547 static u64 get_cpu_usage_nsec_parent(void)
548 {
549 struct rusage ru;
550 u64 sum;
551 int err;
552
553 err = getrusage(RUSAGE_SELF, &ru);
554 BUG_ON(err);
555
556 sum = ru.ru_utime.tv_sec * NSEC_PER_SEC + ru.ru_utime.tv_usec * NSEC_PER_USEC;
557 sum += ru.ru_stime.tv_sec * NSEC_PER_SEC + ru.ru_stime.tv_usec * NSEC_PER_USEC;
558
559 return sum;
560 }
561
self_open_counters(struct perf_sched * sched,unsigned long cur_task)562 static int self_open_counters(struct perf_sched *sched, unsigned long cur_task)
563 {
564 struct perf_event_attr attr;
565 char sbuf[STRERR_BUFSIZE], info[STRERR_BUFSIZE];
566 int fd;
567 struct rlimit limit;
568 bool need_privilege = false;
569
570 memset(&attr, 0, sizeof(attr));
571
572 attr.type = PERF_TYPE_SOFTWARE;
573 attr.config = PERF_COUNT_SW_TASK_CLOCK;
574
575 force_again:
576 fd = sys_perf_event_open(&attr, 0, -1, -1,
577 perf_event_open_cloexec_flag());
578
579 if (fd < 0) {
580 if (errno == EMFILE) {
581 if (sched->force) {
582 BUG_ON(getrlimit(RLIMIT_NOFILE, &limit) == -1);
583 limit.rlim_cur += sched->nr_tasks - cur_task;
584 if (limit.rlim_cur > limit.rlim_max) {
585 limit.rlim_max = limit.rlim_cur;
586 need_privilege = true;
587 }
588 if (setrlimit(RLIMIT_NOFILE, &limit) == -1) {
589 if (need_privilege && errno == EPERM)
590 strcpy(info, "Need privilege\n");
591 } else
592 goto force_again;
593 } else
594 strcpy(info, "Have a try with -f option\n");
595 }
596 pr_err("Error: sys_perf_event_open() syscall returned "
597 "with %d (%s)\n%s", fd,
598 str_error_r(errno, sbuf, sizeof(sbuf)), info);
599 exit(EXIT_FAILURE);
600 }
601 return fd;
602 }
603
get_cpu_usage_nsec_self(int fd)604 static u64 get_cpu_usage_nsec_self(int fd)
605 {
606 u64 runtime;
607 int ret;
608
609 ret = read(fd, &runtime, sizeof(runtime));
610 BUG_ON(ret != sizeof(runtime));
611
612 return runtime;
613 }
614
615 struct sched_thread_parms {
616 struct task_desc *task;
617 struct perf_sched *sched;
618 int fd;
619 };
620
thread_func(void * ctx)621 static void *thread_func(void *ctx)
622 {
623 struct sched_thread_parms *parms = ctx;
624 struct task_desc *this_task = parms->task;
625 struct perf_sched *sched = parms->sched;
626 u64 cpu_usage_0, cpu_usage_1;
627 unsigned long i, ret;
628 char comm2[22];
629 int fd = parms->fd;
630
631 zfree(&parms);
632
633 sprintf(comm2, ":%s", this_task->comm);
634 prctl(PR_SET_NAME, comm2);
635 if (fd < 0)
636 return NULL;
637
638 while (!sched->thread_funcs_exit) {
639 ret = sem_post(&this_task->ready_for_work);
640 BUG_ON(ret);
641 mutex_lock(&sched->start_work_mutex);
642 mutex_unlock(&sched->start_work_mutex);
643
644 cpu_usage_0 = get_cpu_usage_nsec_self(fd);
645
646 for (i = 0; i < this_task->nr_events; i++) {
647 this_task->curr_event = i;
648 perf_sched__process_event(sched, this_task->atoms[i]);
649 }
650
651 cpu_usage_1 = get_cpu_usage_nsec_self(fd);
652 this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
653 ret = sem_post(&this_task->work_done_sem);
654 BUG_ON(ret);
655
656 mutex_lock(&sched->work_done_wait_mutex);
657 mutex_unlock(&sched->work_done_wait_mutex);
658 }
659 return NULL;
660 }
661
create_tasks(struct perf_sched * sched)662 static void create_tasks(struct perf_sched *sched)
663 EXCLUSIVE_LOCK_FUNCTION(sched->start_work_mutex)
664 EXCLUSIVE_LOCK_FUNCTION(sched->work_done_wait_mutex)
665 {
666 struct task_desc *task;
667 pthread_attr_t attr;
668 unsigned long i;
669 int err;
670
671 err = pthread_attr_init(&attr);
672 BUG_ON(err);
673 err = pthread_attr_setstacksize(&attr,
674 (size_t) max(16 * 1024, (int)PTHREAD_STACK_MIN));
675 BUG_ON(err);
676 mutex_lock(&sched->start_work_mutex);
677 mutex_lock(&sched->work_done_wait_mutex);
678 for (i = 0; i < sched->nr_tasks; i++) {
679 struct sched_thread_parms *parms = malloc(sizeof(*parms));
680 BUG_ON(parms == NULL);
681 parms->task = task = sched->tasks[i];
682 parms->sched = sched;
683 parms->fd = self_open_counters(sched, i);
684 sem_init(&task->sleep_sem, 0, 0);
685 sem_init(&task->ready_for_work, 0, 0);
686 sem_init(&task->work_done_sem, 0, 0);
687 task->curr_event = 0;
688 err = pthread_create(&task->thread, &attr, thread_func, parms);
689 BUG_ON(err);
690 }
691 }
692
destroy_tasks(struct perf_sched * sched)693 static void destroy_tasks(struct perf_sched *sched)
694 UNLOCK_FUNCTION(sched->start_work_mutex)
695 UNLOCK_FUNCTION(sched->work_done_wait_mutex)
696 {
697 struct task_desc *task;
698 unsigned long i;
699 int err;
700
701 mutex_unlock(&sched->start_work_mutex);
702 mutex_unlock(&sched->work_done_wait_mutex);
703 /* Get rid of threads so they won't be upset by mutex destrunction */
704 for (i = 0; i < sched->nr_tasks; i++) {
705 task = sched->tasks[i];
706 err = pthread_join(task->thread, NULL);
707 BUG_ON(err);
708 sem_destroy(&task->sleep_sem);
709 sem_destroy(&task->ready_for_work);
710 sem_destroy(&task->work_done_sem);
711 }
712 }
713
wait_for_tasks(struct perf_sched * sched)714 static void wait_for_tasks(struct perf_sched *sched)
715 EXCLUSIVE_LOCKS_REQUIRED(sched->work_done_wait_mutex)
716 EXCLUSIVE_LOCKS_REQUIRED(sched->start_work_mutex)
717 {
718 u64 cpu_usage_0, cpu_usage_1;
719 struct task_desc *task;
720 unsigned long i, ret;
721
722 sched->start_time = get_nsecs();
723 sched->cpu_usage = 0;
724 mutex_unlock(&sched->work_done_wait_mutex);
725
726 for (i = 0; i < sched->nr_tasks; i++) {
727 task = sched->tasks[i];
728 ret = sem_wait(&task->ready_for_work);
729 BUG_ON(ret);
730 sem_init(&task->ready_for_work, 0, 0);
731 }
732 mutex_lock(&sched->work_done_wait_mutex);
733
734 cpu_usage_0 = get_cpu_usage_nsec_parent();
735
736 mutex_unlock(&sched->start_work_mutex);
737
738 for (i = 0; i < sched->nr_tasks; i++) {
739 task = sched->tasks[i];
740 ret = sem_wait(&task->work_done_sem);
741 BUG_ON(ret);
742 sem_init(&task->work_done_sem, 0, 0);
743 sched->cpu_usage += task->cpu_usage;
744 task->cpu_usage = 0;
745 }
746
747 cpu_usage_1 = get_cpu_usage_nsec_parent();
748 if (!sched->runavg_cpu_usage)
749 sched->runavg_cpu_usage = sched->cpu_usage;
750 sched->runavg_cpu_usage = (sched->runavg_cpu_usage * (sched->replay_repeat - 1) + sched->cpu_usage) / sched->replay_repeat;
751
752 sched->parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
753 if (!sched->runavg_parent_cpu_usage)
754 sched->runavg_parent_cpu_usage = sched->parent_cpu_usage;
755 sched->runavg_parent_cpu_usage = (sched->runavg_parent_cpu_usage * (sched->replay_repeat - 1) +
756 sched->parent_cpu_usage)/sched->replay_repeat;
757
758 mutex_lock(&sched->start_work_mutex);
759
760 for (i = 0; i < sched->nr_tasks; i++) {
761 task = sched->tasks[i];
762 sem_init(&task->sleep_sem, 0, 0);
763 task->curr_event = 0;
764 }
765 }
766
run_one_test(struct perf_sched * sched)767 static void run_one_test(struct perf_sched *sched)
768 EXCLUSIVE_LOCKS_REQUIRED(sched->work_done_wait_mutex)
769 EXCLUSIVE_LOCKS_REQUIRED(sched->start_work_mutex)
770 {
771 u64 T0, T1, delta, avg_delta, fluct;
772
773 T0 = get_nsecs();
774 wait_for_tasks(sched);
775 T1 = get_nsecs();
776
777 delta = T1 - T0;
778 sched->sum_runtime += delta;
779 sched->nr_runs++;
780
781 avg_delta = sched->sum_runtime / sched->nr_runs;
782 if (delta < avg_delta)
783 fluct = avg_delta - delta;
784 else
785 fluct = delta - avg_delta;
786 sched->sum_fluct += fluct;
787 if (!sched->run_avg)
788 sched->run_avg = delta;
789 sched->run_avg = (sched->run_avg * (sched->replay_repeat - 1) + delta) / sched->replay_repeat;
790
791 printf("#%-3ld: %0.3f, ", sched->nr_runs, (double)delta / NSEC_PER_MSEC);
792
793 printf("ravg: %0.2f, ", (double)sched->run_avg / NSEC_PER_MSEC);
794
795 printf("cpu: %0.2f / %0.2f",
796 (double)sched->cpu_usage / NSEC_PER_MSEC, (double)sched->runavg_cpu_usage / NSEC_PER_MSEC);
797
798 #if 0
799 /*
800 * rusage statistics done by the parent, these are less
801 * accurate than the sched->sum_exec_runtime based statistics:
802 */
803 printf(" [%0.2f / %0.2f]",
804 (double)sched->parent_cpu_usage / NSEC_PER_MSEC,
805 (double)sched->runavg_parent_cpu_usage / NSEC_PER_MSEC);
806 #endif
807
808 printf("\n");
809
810 if (sched->nr_sleep_corrections)
811 printf(" (%ld sleep corrections)\n", sched->nr_sleep_corrections);
812 sched->nr_sleep_corrections = 0;
813 }
814
test_calibrations(struct perf_sched * sched)815 static void test_calibrations(struct perf_sched *sched)
816 {
817 u64 T0, T1;
818
819 T0 = get_nsecs();
820 burn_nsecs(sched, NSEC_PER_MSEC);
821 T1 = get_nsecs();
822
823 printf("the run test took %" PRIu64 " nsecs\n", T1 - T0);
824
825 T0 = get_nsecs();
826 sleep_nsecs(NSEC_PER_MSEC);
827 T1 = get_nsecs();
828
829 printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0);
830 }
831
832 static int
replay_wakeup_event(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct machine * machine __maybe_unused)833 replay_wakeup_event(struct perf_sched *sched,
834 struct evsel *evsel, struct perf_sample *sample,
835 struct machine *machine __maybe_unused)
836 {
837 const char *comm = evsel__strval(evsel, sample, "comm");
838 const u32 pid = evsel__intval(evsel, sample, "pid");
839 struct task_desc *waker, *wakee;
840
841 if (verbose > 0) {
842 printf("sched_wakeup event %p\n", evsel);
843
844 printf(" ... pid %d woke up %s/%d\n", sample->tid, comm, pid);
845 }
846
847 waker = register_pid(sched, sample->tid, "<unknown>");
848 wakee = register_pid(sched, pid, comm);
849
850 add_sched_event_wakeup(sched, waker, sample->time, wakee);
851 return 0;
852 }
853
replay_switch_event(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct machine * machine __maybe_unused)854 static int replay_switch_event(struct perf_sched *sched,
855 struct evsel *evsel,
856 struct perf_sample *sample,
857 struct machine *machine __maybe_unused)
858 {
859 const char *prev_comm = evsel__strval(evsel, sample, "prev_comm"),
860 *next_comm = evsel__strval(evsel, sample, "next_comm");
861 const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
862 next_pid = evsel__intval(evsel, sample, "next_pid");
863 const u64 prev_state = evsel__intval(evsel, sample, "prev_state");
864 struct task_desc *prev, __maybe_unused *next;
865 u64 timestamp0, timestamp = sample->time;
866 int cpu = sample->cpu;
867 s64 delta;
868
869 if (verbose > 0)
870 printf("sched_switch event %p\n", evsel);
871
872 if (cpu >= MAX_CPUS || cpu < 0)
873 return 0;
874
875 timestamp0 = sched->cpu_last_switched[cpu];
876 if (timestamp0)
877 delta = timestamp - timestamp0;
878 else
879 delta = 0;
880
881 if (delta < 0) {
882 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
883 return -1;
884 }
885
886 pr_debug(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n",
887 prev_comm, prev_pid, next_comm, next_pid, delta);
888
889 prev = register_pid(sched, prev_pid, prev_comm);
890 next = register_pid(sched, next_pid, next_comm);
891
892 sched->cpu_last_switched[cpu] = timestamp;
893
894 add_sched_event_run(sched, prev, timestamp, delta);
895 add_sched_event_sleep(sched, prev, timestamp, prev_state);
896
897 return 0;
898 }
899
replay_fork_event(struct perf_sched * sched,union perf_event * event,struct machine * machine)900 static int replay_fork_event(struct perf_sched *sched,
901 union perf_event *event,
902 struct machine *machine)
903 {
904 struct thread *child, *parent;
905
906 child = machine__findnew_thread(machine, event->fork.pid,
907 event->fork.tid);
908 parent = machine__findnew_thread(machine, event->fork.ppid,
909 event->fork.ptid);
910
911 if (child == NULL || parent == NULL) {
912 pr_debug("thread does not exist on fork event: child %p, parent %p\n",
913 child, parent);
914 goto out_put;
915 }
916
917 if (verbose > 0) {
918 printf("fork event\n");
919 printf("... parent: %s/%d\n", thread__comm_str(parent), thread__tid(parent));
920 printf("... child: %s/%d\n", thread__comm_str(child), thread__tid(child));
921 }
922
923 register_pid(sched, thread__tid(parent), thread__comm_str(parent));
924 register_pid(sched, thread__tid(child), thread__comm_str(child));
925 out_put:
926 thread__put(child);
927 thread__put(parent);
928 return 0;
929 }
930
931 struct sort_dimension {
932 const char *name;
933 sort_fn_t cmp;
934 struct list_head list;
935 };
936
937 /*
938 * handle runtime stats saved per thread
939 */
thread__init_runtime(struct thread * thread)940 static struct thread_runtime *thread__init_runtime(struct thread *thread)
941 {
942 struct thread_runtime *r;
943
944 r = zalloc(sizeof(struct thread_runtime));
945 if (!r)
946 return NULL;
947
948 init_stats(&r->run_stats);
949 thread__set_priv(thread, r);
950
951 return r;
952 }
953
thread__get_runtime(struct thread * thread)954 static struct thread_runtime *thread__get_runtime(struct thread *thread)
955 {
956 struct thread_runtime *tr;
957
958 tr = thread__priv(thread);
959 if (tr == NULL) {
960 tr = thread__init_runtime(thread);
961 if (tr == NULL)
962 pr_debug("Failed to malloc memory for runtime data.\n");
963 }
964
965 return tr;
966 }
967
968 static int
thread_lat_cmp(struct list_head * list,struct work_atoms * l,struct work_atoms * r)969 thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
970 {
971 struct sort_dimension *sort;
972 int ret = 0;
973
974 BUG_ON(list_empty(list));
975
976 list_for_each_entry(sort, list, list) {
977 ret = sort->cmp(l, r);
978 if (ret)
979 return ret;
980 }
981
982 return ret;
983 }
984
985 static struct work_atoms *
thread_atoms_search(struct rb_root_cached * root,struct thread * thread,struct list_head * sort_list)986 thread_atoms_search(struct rb_root_cached *root, struct thread *thread,
987 struct list_head *sort_list)
988 {
989 struct rb_node *node = root->rb_root.rb_node;
990 struct work_atoms key = { .thread = thread };
991
992 while (node) {
993 struct work_atoms *atoms;
994 int cmp;
995
996 atoms = container_of(node, struct work_atoms, node);
997
998 cmp = thread_lat_cmp(sort_list, &key, atoms);
999 if (cmp > 0)
1000 node = node->rb_left;
1001 else if (cmp < 0)
1002 node = node->rb_right;
1003 else {
1004 BUG_ON(thread != atoms->thread);
1005 return atoms;
1006 }
1007 }
1008 return NULL;
1009 }
1010
1011 static void
__thread_latency_insert(struct rb_root_cached * root,struct work_atoms * data,struct list_head * sort_list)1012 __thread_latency_insert(struct rb_root_cached *root, struct work_atoms *data,
1013 struct list_head *sort_list)
1014 {
1015 struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
1016 bool leftmost = true;
1017
1018 while (*new) {
1019 struct work_atoms *this;
1020 int cmp;
1021
1022 this = container_of(*new, struct work_atoms, node);
1023 parent = *new;
1024
1025 cmp = thread_lat_cmp(sort_list, data, this);
1026
1027 if (cmp > 0)
1028 new = &((*new)->rb_left);
1029 else {
1030 new = &((*new)->rb_right);
1031 leftmost = false;
1032 }
1033 }
1034
1035 rb_link_node(&data->node, parent, new);
1036 rb_insert_color_cached(&data->node, root, leftmost);
1037 }
1038
thread_atoms_insert(struct perf_sched * sched,struct thread * thread)1039 static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread)
1040 {
1041 struct work_atoms *atoms = zalloc(sizeof(*atoms));
1042 if (!atoms) {
1043 pr_err("No memory at %s\n", __func__);
1044 return -1;
1045 }
1046
1047 atoms->thread = thread__get(thread);
1048 INIT_LIST_HEAD(&atoms->work_list);
1049 __thread_latency_insert(&sched->atom_root, atoms, &sched->cmp_pid);
1050 return 0;
1051 }
1052
sched_out_state(u64 prev_state)1053 static char sched_out_state(u64 prev_state)
1054 {
1055 const char *str = TASK_STATE_TO_CHAR_STR;
1056
1057 return str[prev_state];
1058 }
1059
1060 static int
add_sched_out_event(struct work_atoms * atoms,char run_state,u64 timestamp)1061 add_sched_out_event(struct work_atoms *atoms,
1062 char run_state,
1063 u64 timestamp)
1064 {
1065 struct work_atom *atom = zalloc(sizeof(*atom));
1066 if (!atom) {
1067 pr_err("Non memory at %s", __func__);
1068 return -1;
1069 }
1070
1071 atom->sched_out_time = timestamp;
1072
1073 if (run_state == 'R') {
1074 atom->state = THREAD_WAIT_CPU;
1075 atom->wake_up_time = atom->sched_out_time;
1076 }
1077
1078 list_add_tail(&atom->list, &atoms->work_list);
1079 return 0;
1080 }
1081
1082 static void
add_runtime_event(struct work_atoms * atoms,u64 delta,u64 timestamp __maybe_unused)1083 add_runtime_event(struct work_atoms *atoms, u64 delta,
1084 u64 timestamp __maybe_unused)
1085 {
1086 struct work_atom *atom;
1087
1088 BUG_ON(list_empty(&atoms->work_list));
1089
1090 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1091
1092 atom->runtime += delta;
1093 atoms->total_runtime += delta;
1094 }
1095
1096 static void
add_sched_in_event(struct work_atoms * atoms,u64 timestamp)1097 add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
1098 {
1099 struct work_atom *atom;
1100 u64 delta;
1101
1102 if (list_empty(&atoms->work_list))
1103 return;
1104
1105 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1106
1107 if (atom->state != THREAD_WAIT_CPU)
1108 return;
1109
1110 if (timestamp < atom->wake_up_time) {
1111 atom->state = THREAD_IGNORE;
1112 return;
1113 }
1114
1115 atom->state = THREAD_SCHED_IN;
1116 atom->sched_in_time = timestamp;
1117
1118 delta = atom->sched_in_time - atom->wake_up_time;
1119 atoms->total_lat += delta;
1120 if (delta > atoms->max_lat) {
1121 atoms->max_lat = delta;
1122 atoms->max_lat_start = atom->wake_up_time;
1123 atoms->max_lat_end = timestamp;
1124 }
1125 atoms->nb_atoms++;
1126 }
1127
latency_switch_event(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)1128 static int latency_switch_event(struct perf_sched *sched,
1129 struct evsel *evsel,
1130 struct perf_sample *sample,
1131 struct machine *machine)
1132 {
1133 const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
1134 next_pid = evsel__intval(evsel, sample, "next_pid");
1135 const u64 prev_state = evsel__intval(evsel, sample, "prev_state");
1136 struct work_atoms *out_events, *in_events;
1137 struct thread *sched_out, *sched_in;
1138 u64 timestamp0, timestamp = sample->time;
1139 int cpu = sample->cpu, err = -1;
1140 s64 delta;
1141
1142 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1143
1144 timestamp0 = sched->cpu_last_switched[cpu];
1145 sched->cpu_last_switched[cpu] = timestamp;
1146 if (timestamp0)
1147 delta = timestamp - timestamp0;
1148 else
1149 delta = 0;
1150
1151 if (delta < 0) {
1152 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1153 return -1;
1154 }
1155
1156 sched_out = machine__findnew_thread(machine, -1, prev_pid);
1157 sched_in = machine__findnew_thread(machine, -1, next_pid);
1158 if (sched_out == NULL || sched_in == NULL)
1159 goto out_put;
1160
1161 out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
1162 if (!out_events) {
1163 if (thread_atoms_insert(sched, sched_out))
1164 goto out_put;
1165 out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
1166 if (!out_events) {
1167 pr_err("out-event: Internal tree error");
1168 goto out_put;
1169 }
1170 }
1171 if (add_sched_out_event(out_events, sched_out_state(prev_state), timestamp))
1172 return -1;
1173
1174 in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
1175 if (!in_events) {
1176 if (thread_atoms_insert(sched, sched_in))
1177 goto out_put;
1178 in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
1179 if (!in_events) {
1180 pr_err("in-event: Internal tree error");
1181 goto out_put;
1182 }
1183 /*
1184 * Take came in we have not heard about yet,
1185 * add in an initial atom in runnable state:
1186 */
1187 if (add_sched_out_event(in_events, 'R', timestamp))
1188 goto out_put;
1189 }
1190 add_sched_in_event(in_events, timestamp);
1191 err = 0;
1192 out_put:
1193 thread__put(sched_out);
1194 thread__put(sched_in);
1195 return err;
1196 }
1197
latency_runtime_event(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)1198 static int latency_runtime_event(struct perf_sched *sched,
1199 struct evsel *evsel,
1200 struct perf_sample *sample,
1201 struct machine *machine)
1202 {
1203 const u32 pid = evsel__intval(evsel, sample, "pid");
1204 const u64 runtime = evsel__intval(evsel, sample, "runtime");
1205 struct thread *thread = machine__findnew_thread(machine, -1, pid);
1206 struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
1207 u64 timestamp = sample->time;
1208 int cpu = sample->cpu, err = -1;
1209
1210 if (thread == NULL)
1211 return -1;
1212
1213 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1214 if (!atoms) {
1215 if (thread_atoms_insert(sched, thread))
1216 goto out_put;
1217 atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
1218 if (!atoms) {
1219 pr_err("in-event: Internal tree error");
1220 goto out_put;
1221 }
1222 if (add_sched_out_event(atoms, 'R', timestamp))
1223 goto out_put;
1224 }
1225
1226 add_runtime_event(atoms, runtime, timestamp);
1227 err = 0;
1228 out_put:
1229 thread__put(thread);
1230 return err;
1231 }
1232
latency_wakeup_event(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)1233 static int latency_wakeup_event(struct perf_sched *sched,
1234 struct evsel *evsel,
1235 struct perf_sample *sample,
1236 struct machine *machine)
1237 {
1238 const u32 pid = evsel__intval(evsel, sample, "pid");
1239 struct work_atoms *atoms;
1240 struct work_atom *atom;
1241 struct thread *wakee;
1242 u64 timestamp = sample->time;
1243 int err = -1;
1244
1245 wakee = machine__findnew_thread(machine, -1, pid);
1246 if (wakee == NULL)
1247 return -1;
1248 atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
1249 if (!atoms) {
1250 if (thread_atoms_insert(sched, wakee))
1251 goto out_put;
1252 atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
1253 if (!atoms) {
1254 pr_err("wakeup-event: Internal tree error");
1255 goto out_put;
1256 }
1257 if (add_sched_out_event(atoms, 'S', timestamp))
1258 goto out_put;
1259 }
1260
1261 BUG_ON(list_empty(&atoms->work_list));
1262
1263 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1264
1265 /*
1266 * As we do not guarantee the wakeup event happens when
1267 * task is out of run queue, also may happen when task is
1268 * on run queue and wakeup only change ->state to TASK_RUNNING,
1269 * then we should not set the ->wake_up_time when wake up a
1270 * task which is on run queue.
1271 *
1272 * You WILL be missing events if you've recorded only
1273 * one CPU, or are only looking at only one, so don't
1274 * skip in this case.
1275 */
1276 if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING)
1277 goto out_ok;
1278
1279 sched->nr_timestamps++;
1280 if (atom->sched_out_time > timestamp) {
1281 sched->nr_unordered_timestamps++;
1282 goto out_ok;
1283 }
1284
1285 atom->state = THREAD_WAIT_CPU;
1286 atom->wake_up_time = timestamp;
1287 out_ok:
1288 err = 0;
1289 out_put:
1290 thread__put(wakee);
1291 return err;
1292 }
1293
latency_migrate_task_event(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)1294 static int latency_migrate_task_event(struct perf_sched *sched,
1295 struct evsel *evsel,
1296 struct perf_sample *sample,
1297 struct machine *machine)
1298 {
1299 const u32 pid = evsel__intval(evsel, sample, "pid");
1300 u64 timestamp = sample->time;
1301 struct work_atoms *atoms;
1302 struct work_atom *atom;
1303 struct thread *migrant;
1304 int err = -1;
1305
1306 /*
1307 * Only need to worry about migration when profiling one CPU.
1308 */
1309 if (sched->profile_cpu == -1)
1310 return 0;
1311
1312 migrant = machine__findnew_thread(machine, -1, pid);
1313 if (migrant == NULL)
1314 return -1;
1315 atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
1316 if (!atoms) {
1317 if (thread_atoms_insert(sched, migrant))
1318 goto out_put;
1319 register_pid(sched, thread__tid(migrant), thread__comm_str(migrant));
1320 atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
1321 if (!atoms) {
1322 pr_err("migration-event: Internal tree error");
1323 goto out_put;
1324 }
1325 if (add_sched_out_event(atoms, 'R', timestamp))
1326 goto out_put;
1327 }
1328
1329 BUG_ON(list_empty(&atoms->work_list));
1330
1331 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1332 atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp;
1333
1334 sched->nr_timestamps++;
1335
1336 if (atom->sched_out_time > timestamp)
1337 sched->nr_unordered_timestamps++;
1338 err = 0;
1339 out_put:
1340 thread__put(migrant);
1341 return err;
1342 }
1343
output_lat_thread(struct perf_sched * sched,struct work_atoms * work_list)1344 static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_list)
1345 {
1346 int i;
1347 int ret;
1348 u64 avg;
1349 char max_lat_start[32], max_lat_end[32];
1350
1351 if (!work_list->nb_atoms)
1352 return;
1353 /*
1354 * Ignore idle threads:
1355 */
1356 if (!strcmp(thread__comm_str(work_list->thread), "swapper"))
1357 return;
1358
1359 sched->all_runtime += work_list->total_runtime;
1360 sched->all_count += work_list->nb_atoms;
1361
1362 if (work_list->num_merged > 1) {
1363 ret = printf(" %s:(%d) ", thread__comm_str(work_list->thread),
1364 work_list->num_merged);
1365 } else {
1366 ret = printf(" %s:%d ", thread__comm_str(work_list->thread),
1367 thread__tid(work_list->thread));
1368 }
1369
1370 for (i = 0; i < 24 - ret; i++)
1371 printf(" ");
1372
1373 avg = work_list->total_lat / work_list->nb_atoms;
1374 timestamp__scnprintf_usec(work_list->max_lat_start, max_lat_start, sizeof(max_lat_start));
1375 timestamp__scnprintf_usec(work_list->max_lat_end, max_lat_end, sizeof(max_lat_end));
1376
1377 printf("|%11.3f ms |%9" PRIu64 " | avg:%8.3f ms | max:%8.3f ms | max start: %12s s | max end: %12s s\n",
1378 (double)work_list->total_runtime / NSEC_PER_MSEC,
1379 work_list->nb_atoms, (double)avg / NSEC_PER_MSEC,
1380 (double)work_list->max_lat / NSEC_PER_MSEC,
1381 max_lat_start, max_lat_end);
1382 }
1383
pid_cmp(struct work_atoms * l,struct work_atoms * r)1384 static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
1385 {
1386 pid_t l_tid, r_tid;
1387
1388 if (RC_CHK_ACCESS(l->thread) == RC_CHK_ACCESS(r->thread))
1389 return 0;
1390 l_tid = thread__tid(l->thread);
1391 r_tid = thread__tid(r->thread);
1392 if (l_tid < r_tid)
1393 return -1;
1394 if (l_tid > r_tid)
1395 return 1;
1396 return (int)(RC_CHK_ACCESS(l->thread) - RC_CHK_ACCESS(r->thread));
1397 }
1398
avg_cmp(struct work_atoms * l,struct work_atoms * r)1399 static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
1400 {
1401 u64 avgl, avgr;
1402
1403 if (!l->nb_atoms)
1404 return -1;
1405
1406 if (!r->nb_atoms)
1407 return 1;
1408
1409 avgl = l->total_lat / l->nb_atoms;
1410 avgr = r->total_lat / r->nb_atoms;
1411
1412 if (avgl < avgr)
1413 return -1;
1414 if (avgl > avgr)
1415 return 1;
1416
1417 return 0;
1418 }
1419
max_cmp(struct work_atoms * l,struct work_atoms * r)1420 static int max_cmp(struct work_atoms *l, struct work_atoms *r)
1421 {
1422 if (l->max_lat < r->max_lat)
1423 return -1;
1424 if (l->max_lat > r->max_lat)
1425 return 1;
1426
1427 return 0;
1428 }
1429
switch_cmp(struct work_atoms * l,struct work_atoms * r)1430 static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
1431 {
1432 if (l->nb_atoms < r->nb_atoms)
1433 return -1;
1434 if (l->nb_atoms > r->nb_atoms)
1435 return 1;
1436
1437 return 0;
1438 }
1439
runtime_cmp(struct work_atoms * l,struct work_atoms * r)1440 static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
1441 {
1442 if (l->total_runtime < r->total_runtime)
1443 return -1;
1444 if (l->total_runtime > r->total_runtime)
1445 return 1;
1446
1447 return 0;
1448 }
1449
sort_dimension__add(const char * tok,struct list_head * list)1450 static int sort_dimension__add(const char *tok, struct list_head *list)
1451 {
1452 size_t i;
1453 static struct sort_dimension avg_sort_dimension = {
1454 .name = "avg",
1455 .cmp = avg_cmp,
1456 };
1457 static struct sort_dimension max_sort_dimension = {
1458 .name = "max",
1459 .cmp = max_cmp,
1460 };
1461 static struct sort_dimension pid_sort_dimension = {
1462 .name = "pid",
1463 .cmp = pid_cmp,
1464 };
1465 static struct sort_dimension runtime_sort_dimension = {
1466 .name = "runtime",
1467 .cmp = runtime_cmp,
1468 };
1469 static struct sort_dimension switch_sort_dimension = {
1470 .name = "switch",
1471 .cmp = switch_cmp,
1472 };
1473 struct sort_dimension *available_sorts[] = {
1474 &pid_sort_dimension,
1475 &avg_sort_dimension,
1476 &max_sort_dimension,
1477 &switch_sort_dimension,
1478 &runtime_sort_dimension,
1479 };
1480
1481 for (i = 0; i < ARRAY_SIZE(available_sorts); i++) {
1482 if (!strcmp(available_sorts[i]->name, tok)) {
1483 list_add_tail(&available_sorts[i]->list, list);
1484
1485 return 0;
1486 }
1487 }
1488
1489 return -1;
1490 }
1491
perf_sched__sort_lat(struct perf_sched * sched)1492 static void perf_sched__sort_lat(struct perf_sched *sched)
1493 {
1494 struct rb_node *node;
1495 struct rb_root_cached *root = &sched->atom_root;
1496 again:
1497 for (;;) {
1498 struct work_atoms *data;
1499 node = rb_first_cached(root);
1500 if (!node)
1501 break;
1502
1503 rb_erase_cached(node, root);
1504 data = rb_entry(node, struct work_atoms, node);
1505 __thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list);
1506 }
1507 if (root == &sched->atom_root) {
1508 root = &sched->merged_atom_root;
1509 goto again;
1510 }
1511 }
1512
process_sched_wakeup_event(struct perf_tool * tool,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)1513 static int process_sched_wakeup_event(struct perf_tool *tool,
1514 struct evsel *evsel,
1515 struct perf_sample *sample,
1516 struct machine *machine)
1517 {
1518 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1519
1520 if (sched->tp_handler->wakeup_event)
1521 return sched->tp_handler->wakeup_event(sched, evsel, sample, machine);
1522
1523 return 0;
1524 }
1525
process_sched_wakeup_ignore(struct perf_tool * tool __maybe_unused,struct evsel * evsel __maybe_unused,struct perf_sample * sample __maybe_unused,struct machine * machine __maybe_unused)1526 static int process_sched_wakeup_ignore(struct perf_tool *tool __maybe_unused,
1527 struct evsel *evsel __maybe_unused,
1528 struct perf_sample *sample __maybe_unused,
1529 struct machine *machine __maybe_unused)
1530 {
1531 return 0;
1532 }
1533
1534 union map_priv {
1535 void *ptr;
1536 bool color;
1537 };
1538
thread__has_color(struct thread * thread)1539 static bool thread__has_color(struct thread *thread)
1540 {
1541 union map_priv priv = {
1542 .ptr = thread__priv(thread),
1543 };
1544
1545 return priv.color;
1546 }
1547
1548 static struct thread*
map__findnew_thread(struct perf_sched * sched,struct machine * machine,pid_t pid,pid_t tid)1549 map__findnew_thread(struct perf_sched *sched, struct machine *machine, pid_t pid, pid_t tid)
1550 {
1551 struct thread *thread = machine__findnew_thread(machine, pid, tid);
1552 union map_priv priv = {
1553 .color = false,
1554 };
1555
1556 if (!sched->map.color_pids || !thread || thread__priv(thread))
1557 return thread;
1558
1559 if (thread_map__has(sched->map.color_pids, tid))
1560 priv.color = true;
1561
1562 thread__set_priv(thread, priv.ptr);
1563 return thread;
1564 }
1565
map_switch_event(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)1566 static int map_switch_event(struct perf_sched *sched, struct evsel *evsel,
1567 struct perf_sample *sample, struct machine *machine)
1568 {
1569 const u32 next_pid = evsel__intval(evsel, sample, "next_pid");
1570 struct thread *sched_in;
1571 struct thread_runtime *tr;
1572 int new_shortname;
1573 u64 timestamp0, timestamp = sample->time;
1574 s64 delta;
1575 int i;
1576 struct perf_cpu this_cpu = {
1577 .cpu = sample->cpu,
1578 };
1579 int cpus_nr;
1580 bool new_cpu = false;
1581 const char *color = PERF_COLOR_NORMAL;
1582 char stimestamp[32];
1583
1584 BUG_ON(this_cpu.cpu >= MAX_CPUS || this_cpu.cpu < 0);
1585
1586 if (this_cpu.cpu > sched->max_cpu.cpu)
1587 sched->max_cpu = this_cpu;
1588
1589 if (sched->map.comp) {
1590 cpus_nr = bitmap_weight(sched->map.comp_cpus_mask, MAX_CPUS);
1591 if (!__test_and_set_bit(this_cpu.cpu, sched->map.comp_cpus_mask)) {
1592 sched->map.comp_cpus[cpus_nr++] = this_cpu;
1593 new_cpu = true;
1594 }
1595 } else
1596 cpus_nr = sched->max_cpu.cpu;
1597
1598 timestamp0 = sched->cpu_last_switched[this_cpu.cpu];
1599 sched->cpu_last_switched[this_cpu.cpu] = timestamp;
1600 if (timestamp0)
1601 delta = timestamp - timestamp0;
1602 else
1603 delta = 0;
1604
1605 if (delta < 0) {
1606 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
1607 return -1;
1608 }
1609
1610 sched_in = map__findnew_thread(sched, machine, -1, next_pid);
1611 if (sched_in == NULL)
1612 return -1;
1613
1614 tr = thread__get_runtime(sched_in);
1615 if (tr == NULL) {
1616 thread__put(sched_in);
1617 return -1;
1618 }
1619
1620 sched->curr_thread[this_cpu.cpu] = thread__get(sched_in);
1621
1622 printf(" ");
1623
1624 new_shortname = 0;
1625 if (!tr->shortname[0]) {
1626 if (!strcmp(thread__comm_str(sched_in), "swapper")) {
1627 /*
1628 * Don't allocate a letter-number for swapper:0
1629 * as a shortname. Instead, we use '.' for it.
1630 */
1631 tr->shortname[0] = '.';
1632 tr->shortname[1] = ' ';
1633 } else {
1634 tr->shortname[0] = sched->next_shortname1;
1635 tr->shortname[1] = sched->next_shortname2;
1636
1637 if (sched->next_shortname1 < 'Z') {
1638 sched->next_shortname1++;
1639 } else {
1640 sched->next_shortname1 = 'A';
1641 if (sched->next_shortname2 < '9')
1642 sched->next_shortname2++;
1643 else
1644 sched->next_shortname2 = '0';
1645 }
1646 }
1647 new_shortname = 1;
1648 }
1649
1650 for (i = 0; i < cpus_nr; i++) {
1651 struct perf_cpu cpu = {
1652 .cpu = sched->map.comp ? sched->map.comp_cpus[i].cpu : i,
1653 };
1654 struct thread *curr_thread = sched->curr_thread[cpu.cpu];
1655 struct thread_runtime *curr_tr;
1656 const char *pid_color = color;
1657 const char *cpu_color = color;
1658
1659 if (curr_thread && thread__has_color(curr_thread))
1660 pid_color = COLOR_PIDS;
1661
1662 if (sched->map.cpus && !perf_cpu_map__has(sched->map.cpus, cpu))
1663 continue;
1664
1665 if (sched->map.color_cpus && perf_cpu_map__has(sched->map.color_cpus, cpu))
1666 cpu_color = COLOR_CPUS;
1667
1668 if (cpu.cpu != this_cpu.cpu)
1669 color_fprintf(stdout, color, " ");
1670 else
1671 color_fprintf(stdout, cpu_color, "*");
1672
1673 if (sched->curr_thread[cpu.cpu]) {
1674 curr_tr = thread__get_runtime(sched->curr_thread[cpu.cpu]);
1675 if (curr_tr == NULL) {
1676 thread__put(sched_in);
1677 return -1;
1678 }
1679 color_fprintf(stdout, pid_color, "%2s ", curr_tr->shortname);
1680 } else
1681 color_fprintf(stdout, color, " ");
1682 }
1683
1684 if (sched->map.cpus && !perf_cpu_map__has(sched->map.cpus, this_cpu))
1685 goto out;
1686
1687 timestamp__scnprintf_usec(timestamp, stimestamp, sizeof(stimestamp));
1688 color_fprintf(stdout, color, " %12s secs ", stimestamp);
1689 if (new_shortname || tr->comm_changed || (verbose > 0 && thread__tid(sched_in))) {
1690 const char *pid_color = color;
1691
1692 if (thread__has_color(sched_in))
1693 pid_color = COLOR_PIDS;
1694
1695 color_fprintf(stdout, pid_color, "%s => %s:%d",
1696 tr->shortname, thread__comm_str(sched_in), thread__tid(sched_in));
1697 tr->comm_changed = false;
1698 }
1699
1700 if (sched->map.comp && new_cpu)
1701 color_fprintf(stdout, color, " (CPU %d)", this_cpu);
1702
1703 out:
1704 color_fprintf(stdout, color, "\n");
1705
1706 thread__put(sched_in);
1707
1708 return 0;
1709 }
1710
process_sched_switch_event(struct perf_tool * tool,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)1711 static int process_sched_switch_event(struct perf_tool *tool,
1712 struct evsel *evsel,
1713 struct perf_sample *sample,
1714 struct machine *machine)
1715 {
1716 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1717 int this_cpu = sample->cpu, err = 0;
1718 u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
1719 next_pid = evsel__intval(evsel, sample, "next_pid");
1720
1721 if (sched->curr_pid[this_cpu] != (u32)-1) {
1722 /*
1723 * Are we trying to switch away a PID that is
1724 * not current?
1725 */
1726 if (sched->curr_pid[this_cpu] != prev_pid)
1727 sched->nr_context_switch_bugs++;
1728 }
1729
1730 if (sched->tp_handler->switch_event)
1731 err = sched->tp_handler->switch_event(sched, evsel, sample, machine);
1732
1733 sched->curr_pid[this_cpu] = next_pid;
1734 return err;
1735 }
1736
process_sched_runtime_event(struct perf_tool * tool,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)1737 static int process_sched_runtime_event(struct perf_tool *tool,
1738 struct evsel *evsel,
1739 struct perf_sample *sample,
1740 struct machine *machine)
1741 {
1742 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1743
1744 if (sched->tp_handler->runtime_event)
1745 return sched->tp_handler->runtime_event(sched, evsel, sample, machine);
1746
1747 return 0;
1748 }
1749
perf_sched__process_fork_event(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct machine * machine)1750 static int perf_sched__process_fork_event(struct perf_tool *tool,
1751 union perf_event *event,
1752 struct perf_sample *sample,
1753 struct machine *machine)
1754 {
1755 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1756
1757 /* run the fork event through the perf machinery */
1758 perf_event__process_fork(tool, event, sample, machine);
1759
1760 /* and then run additional processing needed for this command */
1761 if (sched->tp_handler->fork_event)
1762 return sched->tp_handler->fork_event(sched, event, machine);
1763
1764 return 0;
1765 }
1766
process_sched_migrate_task_event(struct perf_tool * tool,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)1767 static int process_sched_migrate_task_event(struct perf_tool *tool,
1768 struct evsel *evsel,
1769 struct perf_sample *sample,
1770 struct machine *machine)
1771 {
1772 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
1773
1774 if (sched->tp_handler->migrate_task_event)
1775 return sched->tp_handler->migrate_task_event(sched, evsel, sample, machine);
1776
1777 return 0;
1778 }
1779
1780 typedef int (*tracepoint_handler)(struct perf_tool *tool,
1781 struct evsel *evsel,
1782 struct perf_sample *sample,
1783 struct machine *machine);
1784
perf_sched__process_tracepoint_sample(struct perf_tool * tool __maybe_unused,union perf_event * event __maybe_unused,struct perf_sample * sample,struct evsel * evsel,struct machine * machine)1785 static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_unused,
1786 union perf_event *event __maybe_unused,
1787 struct perf_sample *sample,
1788 struct evsel *evsel,
1789 struct machine *machine)
1790 {
1791 int err = 0;
1792
1793 if (evsel->handler != NULL) {
1794 tracepoint_handler f = evsel->handler;
1795 err = f(tool, evsel, sample, machine);
1796 }
1797
1798 return err;
1799 }
1800
perf_sched__process_comm(struct perf_tool * tool __maybe_unused,union perf_event * event,struct perf_sample * sample,struct machine * machine)1801 static int perf_sched__process_comm(struct perf_tool *tool __maybe_unused,
1802 union perf_event *event,
1803 struct perf_sample *sample,
1804 struct machine *machine)
1805 {
1806 struct thread *thread;
1807 struct thread_runtime *tr;
1808 int err;
1809
1810 err = perf_event__process_comm(tool, event, sample, machine);
1811 if (err)
1812 return err;
1813
1814 thread = machine__find_thread(machine, sample->pid, sample->tid);
1815 if (!thread) {
1816 pr_err("Internal error: can't find thread\n");
1817 return -1;
1818 }
1819
1820 tr = thread__get_runtime(thread);
1821 if (tr == NULL) {
1822 thread__put(thread);
1823 return -1;
1824 }
1825
1826 tr->comm_changed = true;
1827 thread__put(thread);
1828
1829 return 0;
1830 }
1831
perf_sched__read_events(struct perf_sched * sched)1832 static int perf_sched__read_events(struct perf_sched *sched)
1833 {
1834 struct evsel_str_handler handlers[] = {
1835 { "sched:sched_switch", process_sched_switch_event, },
1836 { "sched:sched_stat_runtime", process_sched_runtime_event, },
1837 { "sched:sched_wakeup", process_sched_wakeup_event, },
1838 { "sched:sched_waking", process_sched_wakeup_event, },
1839 { "sched:sched_wakeup_new", process_sched_wakeup_event, },
1840 { "sched:sched_migrate_task", process_sched_migrate_task_event, },
1841 };
1842 struct perf_session *session;
1843 struct perf_data data = {
1844 .path = input_name,
1845 .mode = PERF_DATA_MODE_READ,
1846 .force = sched->force,
1847 };
1848 int rc = -1;
1849
1850 session = perf_session__new(&data, &sched->tool);
1851 if (IS_ERR(session)) {
1852 pr_debug("Error creating perf session");
1853 return PTR_ERR(session);
1854 }
1855
1856 symbol__init(&session->header.env);
1857
1858 /* prefer sched_waking if it is captured */
1859 if (evlist__find_tracepoint_by_name(session->evlist, "sched:sched_waking"))
1860 handlers[2].handler = process_sched_wakeup_ignore;
1861
1862 if (perf_session__set_tracepoints_handlers(session, handlers))
1863 goto out_delete;
1864
1865 if (perf_session__has_traces(session, "record -R")) {
1866 int err = perf_session__process_events(session);
1867 if (err) {
1868 pr_err("Failed to process events, error %d", err);
1869 goto out_delete;
1870 }
1871
1872 sched->nr_events = session->evlist->stats.nr_events[0];
1873 sched->nr_lost_events = session->evlist->stats.total_lost;
1874 sched->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST];
1875 }
1876
1877 rc = 0;
1878 out_delete:
1879 perf_session__delete(session);
1880 return rc;
1881 }
1882
1883 /*
1884 * scheduling times are printed as msec.usec
1885 */
print_sched_time(unsigned long long nsecs,int width)1886 static inline void print_sched_time(unsigned long long nsecs, int width)
1887 {
1888 unsigned long msecs;
1889 unsigned long usecs;
1890
1891 msecs = nsecs / NSEC_PER_MSEC;
1892 nsecs -= msecs * NSEC_PER_MSEC;
1893 usecs = nsecs / NSEC_PER_USEC;
1894 printf("%*lu.%03lu ", width, msecs, usecs);
1895 }
1896
1897 /*
1898 * returns runtime data for event, allocating memory for it the
1899 * first time it is used.
1900 */
evsel__get_runtime(struct evsel * evsel)1901 static struct evsel_runtime *evsel__get_runtime(struct evsel *evsel)
1902 {
1903 struct evsel_runtime *r = evsel->priv;
1904
1905 if (r == NULL) {
1906 r = zalloc(sizeof(struct evsel_runtime));
1907 evsel->priv = r;
1908 }
1909
1910 return r;
1911 }
1912
1913 /*
1914 * save last time event was seen per cpu
1915 */
evsel__save_time(struct evsel * evsel,u64 timestamp,u32 cpu)1916 static void evsel__save_time(struct evsel *evsel, u64 timestamp, u32 cpu)
1917 {
1918 struct evsel_runtime *r = evsel__get_runtime(evsel);
1919
1920 if (r == NULL)
1921 return;
1922
1923 if ((cpu >= r->ncpu) || (r->last_time == NULL)) {
1924 int i, n = __roundup_pow_of_two(cpu+1);
1925 void *p = r->last_time;
1926
1927 p = realloc(r->last_time, n * sizeof(u64));
1928 if (!p)
1929 return;
1930
1931 r->last_time = p;
1932 for (i = r->ncpu; i < n; ++i)
1933 r->last_time[i] = (u64) 0;
1934
1935 r->ncpu = n;
1936 }
1937
1938 r->last_time[cpu] = timestamp;
1939 }
1940
1941 /* returns last time this event was seen on the given cpu */
evsel__get_time(struct evsel * evsel,u32 cpu)1942 static u64 evsel__get_time(struct evsel *evsel, u32 cpu)
1943 {
1944 struct evsel_runtime *r = evsel__get_runtime(evsel);
1945
1946 if ((r == NULL) || (r->last_time == NULL) || (cpu >= r->ncpu))
1947 return 0;
1948
1949 return r->last_time[cpu];
1950 }
1951
1952 static int comm_width = 30;
1953
timehist_get_commstr(struct thread * thread)1954 static char *timehist_get_commstr(struct thread *thread)
1955 {
1956 static char str[32];
1957 const char *comm = thread__comm_str(thread);
1958 pid_t tid = thread__tid(thread);
1959 pid_t pid = thread__pid(thread);
1960 int n;
1961
1962 if (pid == 0)
1963 n = scnprintf(str, sizeof(str), "%s", comm);
1964
1965 else if (tid != pid)
1966 n = scnprintf(str, sizeof(str), "%s[%d/%d]", comm, tid, pid);
1967
1968 else
1969 n = scnprintf(str, sizeof(str), "%s[%d]", comm, tid);
1970
1971 if (n > comm_width)
1972 comm_width = n;
1973
1974 return str;
1975 }
1976
timehist_header(struct perf_sched * sched)1977 static void timehist_header(struct perf_sched *sched)
1978 {
1979 u32 ncpus = sched->max_cpu.cpu + 1;
1980 u32 i, j;
1981
1982 printf("%15s %6s ", "time", "cpu");
1983
1984 if (sched->show_cpu_visual) {
1985 printf(" ");
1986 for (i = 0, j = 0; i < ncpus; ++i) {
1987 printf("%x", j++);
1988 if (j > 15)
1989 j = 0;
1990 }
1991 printf(" ");
1992 }
1993
1994 printf(" %-*s %9s %9s %9s", comm_width,
1995 "task name", "wait time", "sch delay", "run time");
1996
1997 if (sched->show_state)
1998 printf(" %s", "state");
1999
2000 printf("\n");
2001
2002 /*
2003 * units row
2004 */
2005 printf("%15s %-6s ", "", "");
2006
2007 if (sched->show_cpu_visual)
2008 printf(" %*s ", ncpus, "");
2009
2010 printf(" %-*s %9s %9s %9s", comm_width,
2011 "[tid/pid]", "(msec)", "(msec)", "(msec)");
2012
2013 if (sched->show_state)
2014 printf(" %5s", "");
2015
2016 printf("\n");
2017
2018 /*
2019 * separator
2020 */
2021 printf("%.15s %.6s ", graph_dotted_line, graph_dotted_line);
2022
2023 if (sched->show_cpu_visual)
2024 printf(" %.*s ", ncpus, graph_dotted_line);
2025
2026 printf(" %.*s %.9s %.9s %.9s", comm_width,
2027 graph_dotted_line, graph_dotted_line, graph_dotted_line,
2028 graph_dotted_line);
2029
2030 if (sched->show_state)
2031 printf(" %.5s", graph_dotted_line);
2032
2033 printf("\n");
2034 }
2035
task_state_char(struct thread * thread,int state)2036 static char task_state_char(struct thread *thread, int state)
2037 {
2038 static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
2039 unsigned bit = state ? ffs(state) : 0;
2040
2041 /* 'I' for idle */
2042 if (thread__tid(thread) == 0)
2043 return 'I';
2044
2045 return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
2046 }
2047
timehist_print_sample(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct addr_location * al,struct thread * thread,u64 t,int state)2048 static void timehist_print_sample(struct perf_sched *sched,
2049 struct evsel *evsel,
2050 struct perf_sample *sample,
2051 struct addr_location *al,
2052 struct thread *thread,
2053 u64 t, int state)
2054 {
2055 struct thread_runtime *tr = thread__priv(thread);
2056 const char *next_comm = evsel__strval(evsel, sample, "next_comm");
2057 const u32 next_pid = evsel__intval(evsel, sample, "next_pid");
2058 u32 max_cpus = sched->max_cpu.cpu + 1;
2059 char tstr[64];
2060 char nstr[30];
2061 u64 wait_time;
2062
2063 if (cpu_list && !test_bit(sample->cpu, cpu_bitmap))
2064 return;
2065
2066 timestamp__scnprintf_usec(t, tstr, sizeof(tstr));
2067 printf("%15s [%04d] ", tstr, sample->cpu);
2068
2069 if (sched->show_cpu_visual) {
2070 u32 i;
2071 char c;
2072
2073 printf(" ");
2074 for (i = 0; i < max_cpus; ++i) {
2075 /* flag idle times with 'i'; others are sched events */
2076 if (i == sample->cpu)
2077 c = (thread__tid(thread) == 0) ? 'i' : 's';
2078 else
2079 c = ' ';
2080 printf("%c", c);
2081 }
2082 printf(" ");
2083 }
2084
2085 printf(" %-*s ", comm_width, timehist_get_commstr(thread));
2086
2087 wait_time = tr->dt_sleep + tr->dt_iowait + tr->dt_preempt;
2088 print_sched_time(wait_time, 6);
2089
2090 print_sched_time(tr->dt_delay, 6);
2091 print_sched_time(tr->dt_run, 6);
2092
2093 if (sched->show_state)
2094 printf(" %5c ", task_state_char(thread, state));
2095
2096 if (sched->show_next) {
2097 snprintf(nstr, sizeof(nstr), "next: %s[%d]", next_comm, next_pid);
2098 printf(" %-*s", comm_width, nstr);
2099 }
2100
2101 if (sched->show_wakeups && !sched->show_next)
2102 printf(" %-*s", comm_width, "");
2103
2104 if (thread__tid(thread) == 0)
2105 goto out;
2106
2107 if (sched->show_callchain)
2108 printf(" ");
2109
2110 sample__fprintf_sym(sample, al, 0,
2111 EVSEL__PRINT_SYM | EVSEL__PRINT_ONELINE |
2112 EVSEL__PRINT_CALLCHAIN_ARROW |
2113 EVSEL__PRINT_SKIP_IGNORED,
2114 get_tls_callchain_cursor(), symbol_conf.bt_stop_list, stdout);
2115
2116 out:
2117 printf("\n");
2118 }
2119
2120 /*
2121 * Explanation of delta-time stats:
2122 *
2123 * t = time of current schedule out event
2124 * tprev = time of previous sched out event
2125 * also time of schedule-in event for current task
2126 * last_time = time of last sched change event for current task
2127 * (i.e, time process was last scheduled out)
2128 * ready_to_run = time of wakeup for current task
2129 *
2130 * -----|------------|------------|------------|------
2131 * last ready tprev t
2132 * time to run
2133 *
2134 * |-------- dt_wait --------|
2135 * |- dt_delay -|-- dt_run --|
2136 *
2137 * dt_run = run time of current task
2138 * dt_wait = time between last schedule out event for task and tprev
2139 * represents time spent off the cpu
2140 * dt_delay = time between wakeup and schedule-in of task
2141 */
2142
timehist_update_runtime_stats(struct thread_runtime * r,u64 t,u64 tprev)2143 static void timehist_update_runtime_stats(struct thread_runtime *r,
2144 u64 t, u64 tprev)
2145 {
2146 r->dt_delay = 0;
2147 r->dt_sleep = 0;
2148 r->dt_iowait = 0;
2149 r->dt_preempt = 0;
2150 r->dt_run = 0;
2151
2152 if (tprev) {
2153 r->dt_run = t - tprev;
2154 if (r->ready_to_run) {
2155 if (r->ready_to_run > tprev)
2156 pr_debug("time travel: wakeup time for task > previous sched_switch event\n");
2157 else
2158 r->dt_delay = tprev - r->ready_to_run;
2159 }
2160
2161 if (r->last_time > tprev)
2162 pr_debug("time travel: last sched out time for task > previous sched_switch event\n");
2163 else if (r->last_time) {
2164 u64 dt_wait = tprev - r->last_time;
2165
2166 if (r->last_state == TASK_RUNNING)
2167 r->dt_preempt = dt_wait;
2168 else if (r->last_state == TASK_UNINTERRUPTIBLE)
2169 r->dt_iowait = dt_wait;
2170 else
2171 r->dt_sleep = dt_wait;
2172 }
2173 }
2174
2175 update_stats(&r->run_stats, r->dt_run);
2176
2177 r->total_run_time += r->dt_run;
2178 r->total_delay_time += r->dt_delay;
2179 r->total_sleep_time += r->dt_sleep;
2180 r->total_iowait_time += r->dt_iowait;
2181 r->total_preempt_time += r->dt_preempt;
2182 }
2183
is_idle_sample(struct perf_sample * sample,struct evsel * evsel)2184 static bool is_idle_sample(struct perf_sample *sample,
2185 struct evsel *evsel)
2186 {
2187 /* pid 0 == swapper == idle task */
2188 if (strcmp(evsel__name(evsel), "sched:sched_switch") == 0)
2189 return evsel__intval(evsel, sample, "prev_pid") == 0;
2190
2191 return sample->pid == 0;
2192 }
2193
save_task_callchain(struct perf_sched * sched,struct perf_sample * sample,struct evsel * evsel,struct machine * machine)2194 static void save_task_callchain(struct perf_sched *sched,
2195 struct perf_sample *sample,
2196 struct evsel *evsel,
2197 struct machine *machine)
2198 {
2199 struct callchain_cursor *cursor;
2200 struct thread *thread;
2201
2202 /* want main thread for process - has maps */
2203 thread = machine__findnew_thread(machine, sample->pid, sample->pid);
2204 if (thread == NULL) {
2205 pr_debug("Failed to get thread for pid %d.\n", sample->pid);
2206 return;
2207 }
2208
2209 if (!sched->show_callchain || sample->callchain == NULL)
2210 return;
2211
2212 cursor = get_tls_callchain_cursor();
2213
2214 if (thread__resolve_callchain(thread, cursor, evsel, sample,
2215 NULL, NULL, sched->max_stack + 2) != 0) {
2216 if (verbose > 0)
2217 pr_err("Failed to resolve callchain. Skipping\n");
2218
2219 return;
2220 }
2221
2222 callchain_cursor_commit(cursor);
2223
2224 while (true) {
2225 struct callchain_cursor_node *node;
2226 struct symbol *sym;
2227
2228 node = callchain_cursor_current(cursor);
2229 if (node == NULL)
2230 break;
2231
2232 sym = node->ms.sym;
2233 if (sym) {
2234 if (!strcmp(sym->name, "schedule") ||
2235 !strcmp(sym->name, "__schedule") ||
2236 !strcmp(sym->name, "preempt_schedule"))
2237 sym->ignore = 1;
2238 }
2239
2240 callchain_cursor_advance(cursor);
2241 }
2242 }
2243
init_idle_thread(struct thread * thread)2244 static int init_idle_thread(struct thread *thread)
2245 {
2246 struct idle_thread_runtime *itr;
2247
2248 thread__set_comm(thread, idle_comm, 0);
2249
2250 itr = zalloc(sizeof(*itr));
2251 if (itr == NULL)
2252 return -ENOMEM;
2253
2254 init_stats(&itr->tr.run_stats);
2255 callchain_init(&itr->callchain);
2256 callchain_cursor_reset(&itr->cursor);
2257 thread__set_priv(thread, itr);
2258
2259 return 0;
2260 }
2261
2262 /*
2263 * Track idle stats per cpu by maintaining a local thread
2264 * struct for the idle task on each cpu.
2265 */
init_idle_threads(int ncpu)2266 static int init_idle_threads(int ncpu)
2267 {
2268 int i, ret;
2269
2270 idle_threads = zalloc(ncpu * sizeof(struct thread *));
2271 if (!idle_threads)
2272 return -ENOMEM;
2273
2274 idle_max_cpu = ncpu;
2275
2276 /* allocate the actual thread struct if needed */
2277 for (i = 0; i < ncpu; ++i) {
2278 idle_threads[i] = thread__new(0, 0);
2279 if (idle_threads[i] == NULL)
2280 return -ENOMEM;
2281
2282 ret = init_idle_thread(idle_threads[i]);
2283 if (ret < 0)
2284 return ret;
2285 }
2286
2287 return 0;
2288 }
2289
free_idle_threads(void)2290 static void free_idle_threads(void)
2291 {
2292 int i;
2293
2294 if (idle_threads == NULL)
2295 return;
2296
2297 for (i = 0; i < idle_max_cpu; ++i) {
2298 if ((idle_threads[i]))
2299 thread__delete(idle_threads[i]);
2300 }
2301
2302 free(idle_threads);
2303 }
2304
get_idle_thread(int cpu)2305 static struct thread *get_idle_thread(int cpu)
2306 {
2307 /*
2308 * expand/allocate array of pointers to local thread
2309 * structs if needed
2310 */
2311 if ((cpu >= idle_max_cpu) || (idle_threads == NULL)) {
2312 int i, j = __roundup_pow_of_two(cpu+1);
2313 void *p;
2314
2315 p = realloc(idle_threads, j * sizeof(struct thread *));
2316 if (!p)
2317 return NULL;
2318
2319 idle_threads = (struct thread **) p;
2320 for (i = idle_max_cpu; i < j; ++i)
2321 idle_threads[i] = NULL;
2322
2323 idle_max_cpu = j;
2324 }
2325
2326 /* allocate a new thread struct if needed */
2327 if (idle_threads[cpu] == NULL) {
2328 idle_threads[cpu] = thread__new(0, 0);
2329
2330 if (idle_threads[cpu]) {
2331 if (init_idle_thread(idle_threads[cpu]) < 0)
2332 return NULL;
2333 }
2334 }
2335
2336 return idle_threads[cpu];
2337 }
2338
save_idle_callchain(struct perf_sched * sched,struct idle_thread_runtime * itr,struct perf_sample * sample)2339 static void save_idle_callchain(struct perf_sched *sched,
2340 struct idle_thread_runtime *itr,
2341 struct perf_sample *sample)
2342 {
2343 struct callchain_cursor *cursor;
2344
2345 if (!sched->show_callchain || sample->callchain == NULL)
2346 return;
2347
2348 cursor = get_tls_callchain_cursor();
2349 if (cursor == NULL)
2350 return;
2351
2352 callchain_cursor__copy(&itr->cursor, cursor);
2353 }
2354
timehist_get_thread(struct perf_sched * sched,struct perf_sample * sample,struct machine * machine,struct evsel * evsel)2355 static struct thread *timehist_get_thread(struct perf_sched *sched,
2356 struct perf_sample *sample,
2357 struct machine *machine,
2358 struct evsel *evsel)
2359 {
2360 struct thread *thread;
2361
2362 if (is_idle_sample(sample, evsel)) {
2363 thread = get_idle_thread(sample->cpu);
2364 if (thread == NULL)
2365 pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu);
2366
2367 } else {
2368 /* there were samples with tid 0 but non-zero pid */
2369 thread = machine__findnew_thread(machine, sample->pid,
2370 sample->tid ?: sample->pid);
2371 if (thread == NULL) {
2372 pr_debug("Failed to get thread for tid %d. skipping sample.\n",
2373 sample->tid);
2374 }
2375
2376 save_task_callchain(sched, sample, evsel, machine);
2377 if (sched->idle_hist) {
2378 struct thread *idle;
2379 struct idle_thread_runtime *itr;
2380
2381 idle = get_idle_thread(sample->cpu);
2382 if (idle == NULL) {
2383 pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu);
2384 return NULL;
2385 }
2386
2387 itr = thread__priv(idle);
2388 if (itr == NULL)
2389 return NULL;
2390
2391 itr->last_thread = thread;
2392
2393 /* copy task callchain when entering to idle */
2394 if (evsel__intval(evsel, sample, "next_pid") == 0)
2395 save_idle_callchain(sched, itr, sample);
2396 }
2397 }
2398
2399 return thread;
2400 }
2401
timehist_skip_sample(struct perf_sched * sched,struct thread * thread,struct evsel * evsel,struct perf_sample * sample)2402 static bool timehist_skip_sample(struct perf_sched *sched,
2403 struct thread *thread,
2404 struct evsel *evsel,
2405 struct perf_sample *sample)
2406 {
2407 bool rc = false;
2408
2409 if (thread__is_filtered(thread)) {
2410 rc = true;
2411 sched->skipped_samples++;
2412 }
2413
2414 if (sched->idle_hist) {
2415 if (strcmp(evsel__name(evsel), "sched:sched_switch"))
2416 rc = true;
2417 else if (evsel__intval(evsel, sample, "prev_pid") != 0 &&
2418 evsel__intval(evsel, sample, "next_pid") != 0)
2419 rc = true;
2420 }
2421
2422 return rc;
2423 }
2424
timehist_print_wakeup_event(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct machine * machine,struct thread * awakened)2425 static void timehist_print_wakeup_event(struct perf_sched *sched,
2426 struct evsel *evsel,
2427 struct perf_sample *sample,
2428 struct machine *machine,
2429 struct thread *awakened)
2430 {
2431 struct thread *thread;
2432 char tstr[64];
2433
2434 thread = machine__findnew_thread(machine, sample->pid, sample->tid);
2435 if (thread == NULL)
2436 return;
2437
2438 /* show wakeup unless both awakee and awaker are filtered */
2439 if (timehist_skip_sample(sched, thread, evsel, sample) &&
2440 timehist_skip_sample(sched, awakened, evsel, sample)) {
2441 return;
2442 }
2443
2444 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2445 printf("%15s [%04d] ", tstr, sample->cpu);
2446 if (sched->show_cpu_visual)
2447 printf(" %*s ", sched->max_cpu.cpu + 1, "");
2448
2449 printf(" %-*s ", comm_width, timehist_get_commstr(thread));
2450
2451 /* dt spacer */
2452 printf(" %9s %9s %9s ", "", "", "");
2453
2454 printf("awakened: %s", timehist_get_commstr(awakened));
2455
2456 printf("\n");
2457 }
2458
timehist_sched_wakeup_ignore(struct perf_tool * tool __maybe_unused,union perf_event * event __maybe_unused,struct evsel * evsel __maybe_unused,struct perf_sample * sample __maybe_unused,struct machine * machine __maybe_unused)2459 static int timehist_sched_wakeup_ignore(struct perf_tool *tool __maybe_unused,
2460 union perf_event *event __maybe_unused,
2461 struct evsel *evsel __maybe_unused,
2462 struct perf_sample *sample __maybe_unused,
2463 struct machine *machine __maybe_unused)
2464 {
2465 return 0;
2466 }
2467
timehist_sched_wakeup_event(struct perf_tool * tool,union perf_event * event __maybe_unused,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)2468 static int timehist_sched_wakeup_event(struct perf_tool *tool,
2469 union perf_event *event __maybe_unused,
2470 struct evsel *evsel,
2471 struct perf_sample *sample,
2472 struct machine *machine)
2473 {
2474 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2475 struct thread *thread;
2476 struct thread_runtime *tr = NULL;
2477 /* want pid of awakened task not pid in sample */
2478 const u32 pid = evsel__intval(evsel, sample, "pid");
2479
2480 thread = machine__findnew_thread(machine, 0, pid);
2481 if (thread == NULL)
2482 return -1;
2483
2484 tr = thread__get_runtime(thread);
2485 if (tr == NULL)
2486 return -1;
2487
2488 if (tr->ready_to_run == 0)
2489 tr->ready_to_run = sample->time;
2490
2491 /* show wakeups if requested */
2492 if (sched->show_wakeups &&
2493 !perf_time__skip_sample(&sched->ptime, sample->time))
2494 timehist_print_wakeup_event(sched, evsel, sample, machine, thread);
2495
2496 return 0;
2497 }
2498
timehist_print_migration_event(struct perf_sched * sched,struct evsel * evsel,struct perf_sample * sample,struct machine * machine,struct thread * migrated)2499 static void timehist_print_migration_event(struct perf_sched *sched,
2500 struct evsel *evsel,
2501 struct perf_sample *sample,
2502 struct machine *machine,
2503 struct thread *migrated)
2504 {
2505 struct thread *thread;
2506 char tstr[64];
2507 u32 max_cpus;
2508 u32 ocpu, dcpu;
2509
2510 if (sched->summary_only)
2511 return;
2512
2513 max_cpus = sched->max_cpu.cpu + 1;
2514 ocpu = evsel__intval(evsel, sample, "orig_cpu");
2515 dcpu = evsel__intval(evsel, sample, "dest_cpu");
2516
2517 thread = machine__findnew_thread(machine, sample->pid, sample->tid);
2518 if (thread == NULL)
2519 return;
2520
2521 if (timehist_skip_sample(sched, thread, evsel, sample) &&
2522 timehist_skip_sample(sched, migrated, evsel, sample)) {
2523 return;
2524 }
2525
2526 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2527 printf("%15s [%04d] ", tstr, sample->cpu);
2528
2529 if (sched->show_cpu_visual) {
2530 u32 i;
2531 char c;
2532
2533 printf(" ");
2534 for (i = 0; i < max_cpus; ++i) {
2535 c = (i == sample->cpu) ? 'm' : ' ';
2536 printf("%c", c);
2537 }
2538 printf(" ");
2539 }
2540
2541 printf(" %-*s ", comm_width, timehist_get_commstr(thread));
2542
2543 /* dt spacer */
2544 printf(" %9s %9s %9s ", "", "", "");
2545
2546 printf("migrated: %s", timehist_get_commstr(migrated));
2547 printf(" cpu %d => %d", ocpu, dcpu);
2548
2549 printf("\n");
2550 }
2551
timehist_migrate_task_event(struct perf_tool * tool,union perf_event * event __maybe_unused,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)2552 static int timehist_migrate_task_event(struct perf_tool *tool,
2553 union perf_event *event __maybe_unused,
2554 struct evsel *evsel,
2555 struct perf_sample *sample,
2556 struct machine *machine)
2557 {
2558 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2559 struct thread *thread;
2560 struct thread_runtime *tr = NULL;
2561 /* want pid of migrated task not pid in sample */
2562 const u32 pid = evsel__intval(evsel, sample, "pid");
2563
2564 thread = machine__findnew_thread(machine, 0, pid);
2565 if (thread == NULL)
2566 return -1;
2567
2568 tr = thread__get_runtime(thread);
2569 if (tr == NULL)
2570 return -1;
2571
2572 tr->migrations++;
2573
2574 /* show migrations if requested */
2575 timehist_print_migration_event(sched, evsel, sample, machine, thread);
2576
2577 return 0;
2578 }
2579
timehist_sched_change_event(struct perf_tool * tool,union perf_event * event,struct evsel * evsel,struct perf_sample * sample,struct machine * machine)2580 static int timehist_sched_change_event(struct perf_tool *tool,
2581 union perf_event *event,
2582 struct evsel *evsel,
2583 struct perf_sample *sample,
2584 struct machine *machine)
2585 {
2586 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2587 struct perf_time_interval *ptime = &sched->ptime;
2588 struct addr_location al;
2589 struct thread *thread;
2590 struct thread_runtime *tr = NULL;
2591 u64 tprev, t = sample->time;
2592 int rc = 0;
2593 int state = evsel__intval(evsel, sample, "prev_state");
2594
2595 addr_location__init(&al);
2596 if (machine__resolve(machine, &al, sample) < 0) {
2597 pr_err("problem processing %d event. skipping it\n",
2598 event->header.type);
2599 rc = -1;
2600 goto out;
2601 }
2602
2603 thread = timehist_get_thread(sched, sample, machine, evsel);
2604 if (thread == NULL) {
2605 rc = -1;
2606 goto out;
2607 }
2608
2609 if (timehist_skip_sample(sched, thread, evsel, sample))
2610 goto out;
2611
2612 tr = thread__get_runtime(thread);
2613 if (tr == NULL) {
2614 rc = -1;
2615 goto out;
2616 }
2617
2618 tprev = evsel__get_time(evsel, sample->cpu);
2619
2620 /*
2621 * If start time given:
2622 * - sample time is under window user cares about - skip sample
2623 * - tprev is under window user cares about - reset to start of window
2624 */
2625 if (ptime->start && ptime->start > t)
2626 goto out;
2627
2628 if (tprev && ptime->start > tprev)
2629 tprev = ptime->start;
2630
2631 /*
2632 * If end time given:
2633 * - previous sched event is out of window - we are done
2634 * - sample time is beyond window user cares about - reset it
2635 * to close out stats for time window interest
2636 * - If tprev is 0, that is, sched_in event for current task is
2637 * not recorded, cannot determine whether sched_in event is
2638 * within time window interest - ignore it
2639 */
2640 if (ptime->end) {
2641 if (!tprev || tprev > ptime->end)
2642 goto out;
2643
2644 if (t > ptime->end)
2645 t = ptime->end;
2646 }
2647
2648 if (!sched->idle_hist || thread__tid(thread) == 0) {
2649 if (!cpu_list || test_bit(sample->cpu, cpu_bitmap))
2650 timehist_update_runtime_stats(tr, t, tprev);
2651
2652 if (sched->idle_hist) {
2653 struct idle_thread_runtime *itr = (void *)tr;
2654 struct thread_runtime *last_tr;
2655
2656 BUG_ON(thread__tid(thread) != 0);
2657
2658 if (itr->last_thread == NULL)
2659 goto out;
2660
2661 /* add current idle time as last thread's runtime */
2662 last_tr = thread__get_runtime(itr->last_thread);
2663 if (last_tr == NULL)
2664 goto out;
2665
2666 timehist_update_runtime_stats(last_tr, t, tprev);
2667 /*
2668 * remove delta time of last thread as it's not updated
2669 * and otherwise it will show an invalid value next
2670 * time. we only care total run time and run stat.
2671 */
2672 last_tr->dt_run = 0;
2673 last_tr->dt_delay = 0;
2674 last_tr->dt_sleep = 0;
2675 last_tr->dt_iowait = 0;
2676 last_tr->dt_preempt = 0;
2677
2678 if (itr->cursor.nr)
2679 callchain_append(&itr->callchain, &itr->cursor, t - tprev);
2680
2681 itr->last_thread = NULL;
2682 }
2683 }
2684
2685 if (!sched->summary_only)
2686 timehist_print_sample(sched, evsel, sample, &al, thread, t, state);
2687
2688 out:
2689 if (sched->hist_time.start == 0 && t >= ptime->start)
2690 sched->hist_time.start = t;
2691 if (ptime->end == 0 || t <= ptime->end)
2692 sched->hist_time.end = t;
2693
2694 if (tr) {
2695 /* time of this sched_switch event becomes last time task seen */
2696 tr->last_time = sample->time;
2697
2698 /* last state is used to determine where to account wait time */
2699 tr->last_state = state;
2700
2701 /* sched out event for task so reset ready to run time */
2702 tr->ready_to_run = 0;
2703 }
2704
2705 evsel__save_time(evsel, sample->time, sample->cpu);
2706
2707 addr_location__exit(&al);
2708 return rc;
2709 }
2710
timehist_sched_switch_event(struct perf_tool * tool,union perf_event * event,struct evsel * evsel,struct perf_sample * sample,struct machine * machine __maybe_unused)2711 static int timehist_sched_switch_event(struct perf_tool *tool,
2712 union perf_event *event,
2713 struct evsel *evsel,
2714 struct perf_sample *sample,
2715 struct machine *machine __maybe_unused)
2716 {
2717 return timehist_sched_change_event(tool, event, evsel, sample, machine);
2718 }
2719
process_lost(struct perf_tool * tool __maybe_unused,union perf_event * event,struct perf_sample * sample,struct machine * machine __maybe_unused)2720 static int process_lost(struct perf_tool *tool __maybe_unused,
2721 union perf_event *event,
2722 struct perf_sample *sample,
2723 struct machine *machine __maybe_unused)
2724 {
2725 char tstr[64];
2726
2727 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
2728 printf("%15s ", tstr);
2729 printf("lost %" PRI_lu64 " events on cpu %d\n", event->lost.lost, sample->cpu);
2730
2731 return 0;
2732 }
2733
2734
print_thread_runtime(struct thread * t,struct thread_runtime * r)2735 static void print_thread_runtime(struct thread *t,
2736 struct thread_runtime *r)
2737 {
2738 double mean = avg_stats(&r->run_stats);
2739 float stddev;
2740
2741 printf("%*s %5d %9" PRIu64 " ",
2742 comm_width, timehist_get_commstr(t), thread__ppid(t),
2743 (u64) r->run_stats.n);
2744
2745 print_sched_time(r->total_run_time, 8);
2746 stddev = rel_stddev_stats(stddev_stats(&r->run_stats), mean);
2747 print_sched_time(r->run_stats.min, 6);
2748 printf(" ");
2749 print_sched_time((u64) mean, 6);
2750 printf(" ");
2751 print_sched_time(r->run_stats.max, 6);
2752 printf(" ");
2753 printf("%5.2f", stddev);
2754 printf(" %5" PRIu64, r->migrations);
2755 printf("\n");
2756 }
2757
print_thread_waittime(struct thread * t,struct thread_runtime * r)2758 static void print_thread_waittime(struct thread *t,
2759 struct thread_runtime *r)
2760 {
2761 printf("%*s %5d %9" PRIu64 " ",
2762 comm_width, timehist_get_commstr(t), thread__ppid(t),
2763 (u64) r->run_stats.n);
2764
2765 print_sched_time(r->total_run_time, 8);
2766 print_sched_time(r->total_sleep_time, 6);
2767 printf(" ");
2768 print_sched_time(r->total_iowait_time, 6);
2769 printf(" ");
2770 print_sched_time(r->total_preempt_time, 6);
2771 printf(" ");
2772 print_sched_time(r->total_delay_time, 6);
2773 printf("\n");
2774 }
2775
2776 struct total_run_stats {
2777 struct perf_sched *sched;
2778 u64 sched_count;
2779 u64 task_count;
2780 u64 total_run_time;
2781 };
2782
show_thread_runtime(struct thread * t,void * priv)2783 static int show_thread_runtime(struct thread *t, void *priv)
2784 {
2785 struct total_run_stats *stats = priv;
2786 struct thread_runtime *r;
2787
2788 if (thread__is_filtered(t))
2789 return 0;
2790
2791 r = thread__priv(t);
2792 if (r && r->run_stats.n) {
2793 stats->task_count++;
2794 stats->sched_count += r->run_stats.n;
2795 stats->total_run_time += r->total_run_time;
2796
2797 if (stats->sched->show_state)
2798 print_thread_waittime(t, r);
2799 else
2800 print_thread_runtime(t, r);
2801 }
2802
2803 return 0;
2804 }
2805
callchain__fprintf_folded(FILE * fp,struct callchain_node * node)2806 static size_t callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
2807 {
2808 const char *sep = " <- ";
2809 struct callchain_list *chain;
2810 size_t ret = 0;
2811 char bf[1024];
2812 bool first;
2813
2814 if (node == NULL)
2815 return 0;
2816
2817 ret = callchain__fprintf_folded(fp, node->parent);
2818 first = (ret == 0);
2819
2820 list_for_each_entry(chain, &node->val, list) {
2821 if (chain->ip >= PERF_CONTEXT_MAX)
2822 continue;
2823 if (chain->ms.sym && chain->ms.sym->ignore)
2824 continue;
2825 ret += fprintf(fp, "%s%s", first ? "" : sep,
2826 callchain_list__sym_name(chain, bf, sizeof(bf),
2827 false));
2828 first = false;
2829 }
2830
2831 return ret;
2832 }
2833
timehist_print_idlehist_callchain(struct rb_root_cached * root)2834 static size_t timehist_print_idlehist_callchain(struct rb_root_cached *root)
2835 {
2836 size_t ret = 0;
2837 FILE *fp = stdout;
2838 struct callchain_node *chain;
2839 struct rb_node *rb_node = rb_first_cached(root);
2840
2841 printf(" %16s %8s %s\n", "Idle time (msec)", "Count", "Callchains");
2842 printf(" %.16s %.8s %.50s\n", graph_dotted_line, graph_dotted_line,
2843 graph_dotted_line);
2844
2845 while (rb_node) {
2846 chain = rb_entry(rb_node, struct callchain_node, rb_node);
2847 rb_node = rb_next(rb_node);
2848
2849 ret += fprintf(fp, " ");
2850 print_sched_time(chain->hit, 12);
2851 ret += 16; /* print_sched_time returns 2nd arg + 4 */
2852 ret += fprintf(fp, " %8d ", chain->count);
2853 ret += callchain__fprintf_folded(fp, chain);
2854 ret += fprintf(fp, "\n");
2855 }
2856
2857 return ret;
2858 }
2859
timehist_print_summary(struct perf_sched * sched,struct perf_session * session)2860 static void timehist_print_summary(struct perf_sched *sched,
2861 struct perf_session *session)
2862 {
2863 struct machine *m = &session->machines.host;
2864 struct total_run_stats totals;
2865 u64 task_count;
2866 struct thread *t;
2867 struct thread_runtime *r;
2868 int i;
2869 u64 hist_time = sched->hist_time.end - sched->hist_time.start;
2870
2871 memset(&totals, 0, sizeof(totals));
2872 totals.sched = sched;
2873
2874 if (sched->idle_hist) {
2875 printf("\nIdle-time summary\n");
2876 printf("%*s parent sched-out ", comm_width, "comm");
2877 printf(" idle-time min-idle avg-idle max-idle stddev migrations\n");
2878 } else if (sched->show_state) {
2879 printf("\nWait-time summary\n");
2880 printf("%*s parent sched-in ", comm_width, "comm");
2881 printf(" run-time sleep iowait preempt delay\n");
2882 } else {
2883 printf("\nRuntime summary\n");
2884 printf("%*s parent sched-in ", comm_width, "comm");
2885 printf(" run-time min-run avg-run max-run stddev migrations\n");
2886 }
2887 printf("%*s (count) ", comm_width, "");
2888 printf(" (msec) (msec) (msec) (msec) %s\n",
2889 sched->show_state ? "(msec)" : "%");
2890 printf("%.117s\n", graph_dotted_line);
2891
2892 machine__for_each_thread(m, show_thread_runtime, &totals);
2893 task_count = totals.task_count;
2894 if (!task_count)
2895 printf("<no still running tasks>\n");
2896
2897 /* CPU idle stats not tracked when samples were skipped */
2898 if (sched->skipped_samples && !sched->idle_hist)
2899 return;
2900
2901 printf("\nIdle stats:\n");
2902 for (i = 0; i < idle_max_cpu; ++i) {
2903 if (cpu_list && !test_bit(i, cpu_bitmap))
2904 continue;
2905
2906 t = idle_threads[i];
2907 if (!t)
2908 continue;
2909
2910 r = thread__priv(t);
2911 if (r && r->run_stats.n) {
2912 totals.sched_count += r->run_stats.n;
2913 printf(" CPU %2d idle for ", i);
2914 print_sched_time(r->total_run_time, 6);
2915 printf(" msec (%6.2f%%)\n", 100.0 * r->total_run_time / hist_time);
2916 } else
2917 printf(" CPU %2d idle entire time window\n", i);
2918 }
2919
2920 if (sched->idle_hist && sched->show_callchain) {
2921 callchain_param.mode = CHAIN_FOLDED;
2922 callchain_param.value = CCVAL_PERIOD;
2923
2924 callchain_register_param(&callchain_param);
2925
2926 printf("\nIdle stats by callchain:\n");
2927 for (i = 0; i < idle_max_cpu; ++i) {
2928 struct idle_thread_runtime *itr;
2929
2930 t = idle_threads[i];
2931 if (!t)
2932 continue;
2933
2934 itr = thread__priv(t);
2935 if (itr == NULL)
2936 continue;
2937
2938 callchain_param.sort(&itr->sorted_root.rb_root, &itr->callchain,
2939 0, &callchain_param);
2940
2941 printf(" CPU %2d:", i);
2942 print_sched_time(itr->tr.total_run_time, 6);
2943 printf(" msec\n");
2944 timehist_print_idlehist_callchain(&itr->sorted_root);
2945 printf("\n");
2946 }
2947 }
2948
2949 printf("\n"
2950 " Total number of unique tasks: %" PRIu64 "\n"
2951 "Total number of context switches: %" PRIu64 "\n",
2952 totals.task_count, totals.sched_count);
2953
2954 printf(" Total run time (msec): ");
2955 print_sched_time(totals.total_run_time, 2);
2956 printf("\n");
2957
2958 printf(" Total scheduling time (msec): ");
2959 print_sched_time(hist_time, 2);
2960 printf(" (x %d)\n", sched->max_cpu.cpu);
2961 }
2962
2963 typedef int (*sched_handler)(struct perf_tool *tool,
2964 union perf_event *event,
2965 struct evsel *evsel,
2966 struct perf_sample *sample,
2967 struct machine *machine);
2968
perf_timehist__process_sample(struct perf_tool * tool,union perf_event * event,struct perf_sample * sample,struct evsel * evsel,struct machine * machine)2969 static int perf_timehist__process_sample(struct perf_tool *tool,
2970 union perf_event *event,
2971 struct perf_sample *sample,
2972 struct evsel *evsel,
2973 struct machine *machine)
2974 {
2975 struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
2976 int err = 0;
2977 struct perf_cpu this_cpu = {
2978 .cpu = sample->cpu,
2979 };
2980
2981 if (this_cpu.cpu > sched->max_cpu.cpu)
2982 sched->max_cpu = this_cpu;
2983
2984 if (evsel->handler != NULL) {
2985 sched_handler f = evsel->handler;
2986
2987 err = f(tool, event, evsel, sample, machine);
2988 }
2989
2990 return err;
2991 }
2992
timehist_check_attr(struct perf_sched * sched,struct evlist * evlist)2993 static int timehist_check_attr(struct perf_sched *sched,
2994 struct evlist *evlist)
2995 {
2996 struct evsel *evsel;
2997 struct evsel_runtime *er;
2998
2999 list_for_each_entry(evsel, &evlist->core.entries, core.node) {
3000 er = evsel__get_runtime(evsel);
3001 if (er == NULL) {
3002 pr_err("Failed to allocate memory for evsel runtime data\n");
3003 return -1;
3004 }
3005
3006 /* only need to save callchain related to sched_switch event */
3007 if (sched->show_callchain &&
3008 evsel__name_is(evsel, "sched:sched_switch") &&
3009 !evsel__has_callchain(evsel)) {
3010 pr_info("Samples of sched_switch event do not have callchains.\n");
3011 sched->show_callchain = 0;
3012 symbol_conf.use_callchain = 0;
3013 }
3014 }
3015
3016 return 0;
3017 }
3018
perf_sched__timehist(struct perf_sched * sched)3019 static int perf_sched__timehist(struct perf_sched *sched)
3020 {
3021 struct evsel_str_handler handlers[] = {
3022 { "sched:sched_switch", timehist_sched_switch_event, },
3023 { "sched:sched_wakeup", timehist_sched_wakeup_event, },
3024 { "sched:sched_waking", timehist_sched_wakeup_event, },
3025 { "sched:sched_wakeup_new", timehist_sched_wakeup_event, },
3026 };
3027 const struct evsel_str_handler migrate_handlers[] = {
3028 { "sched:sched_migrate_task", timehist_migrate_task_event, },
3029 };
3030 struct perf_data data = {
3031 .path = input_name,
3032 .mode = PERF_DATA_MODE_READ,
3033 .force = sched->force,
3034 };
3035
3036 struct perf_session *session;
3037 struct evlist *evlist;
3038 int err = -1;
3039
3040 /*
3041 * event handlers for timehist option
3042 */
3043 sched->tool.sample = perf_timehist__process_sample;
3044 sched->tool.mmap = perf_event__process_mmap;
3045 sched->tool.comm = perf_event__process_comm;
3046 sched->tool.exit = perf_event__process_exit;
3047 sched->tool.fork = perf_event__process_fork;
3048 sched->tool.lost = process_lost;
3049 sched->tool.attr = perf_event__process_attr;
3050 sched->tool.tracing_data = perf_event__process_tracing_data;
3051 sched->tool.build_id = perf_event__process_build_id;
3052
3053 sched->tool.ordered_events = true;
3054 sched->tool.ordering_requires_timestamps = true;
3055
3056 symbol_conf.use_callchain = sched->show_callchain;
3057
3058 session = perf_session__new(&data, &sched->tool);
3059 if (IS_ERR(session))
3060 return PTR_ERR(session);
3061
3062 if (cpu_list) {
3063 err = perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap);
3064 if (err < 0)
3065 goto out;
3066 }
3067
3068 evlist = session->evlist;
3069
3070 symbol__init(&session->header.env);
3071
3072 if (perf_time__parse_str(&sched->ptime, sched->time_str) != 0) {
3073 pr_err("Invalid time string\n");
3074 err = -EINVAL;
3075 goto out;
3076 }
3077
3078 if (timehist_check_attr(sched, evlist) != 0)
3079 goto out;
3080
3081 setup_pager();
3082
3083 /* prefer sched_waking if it is captured */
3084 if (evlist__find_tracepoint_by_name(session->evlist, "sched:sched_waking"))
3085 handlers[1].handler = timehist_sched_wakeup_ignore;
3086
3087 /* setup per-evsel handlers */
3088 if (perf_session__set_tracepoints_handlers(session, handlers))
3089 goto out;
3090
3091 /* sched_switch event at a minimum needs to exist */
3092 if (!evlist__find_tracepoint_by_name(session->evlist, "sched:sched_switch")) {
3093 pr_err("No sched_switch events found. Have you run 'perf sched record'?\n");
3094 goto out;
3095 }
3096
3097 if (sched->show_migrations &&
3098 perf_session__set_tracepoints_handlers(session, migrate_handlers))
3099 goto out;
3100
3101 /* pre-allocate struct for per-CPU idle stats */
3102 sched->max_cpu.cpu = session->header.env.nr_cpus_online;
3103 if (sched->max_cpu.cpu == 0)
3104 sched->max_cpu.cpu = 4;
3105 if (init_idle_threads(sched->max_cpu.cpu))
3106 goto out;
3107
3108 /* summary_only implies summary option, but don't overwrite summary if set */
3109 if (sched->summary_only)
3110 sched->summary = sched->summary_only;
3111
3112 if (!sched->summary_only)
3113 timehist_header(sched);
3114
3115 err = perf_session__process_events(session);
3116 if (err) {
3117 pr_err("Failed to process events, error %d", err);
3118 goto out;
3119 }
3120
3121 sched->nr_events = evlist->stats.nr_events[0];
3122 sched->nr_lost_events = evlist->stats.total_lost;
3123 sched->nr_lost_chunks = evlist->stats.nr_events[PERF_RECORD_LOST];
3124
3125 if (sched->summary)
3126 timehist_print_summary(sched, session);
3127
3128 out:
3129 free_idle_threads();
3130 perf_session__delete(session);
3131
3132 return err;
3133 }
3134
3135
print_bad_events(struct perf_sched * sched)3136 static void print_bad_events(struct perf_sched *sched)
3137 {
3138 if (sched->nr_unordered_timestamps && sched->nr_timestamps) {
3139 printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
3140 (double)sched->nr_unordered_timestamps/(double)sched->nr_timestamps*100.0,
3141 sched->nr_unordered_timestamps, sched->nr_timestamps);
3142 }
3143 if (sched->nr_lost_events && sched->nr_events) {
3144 printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
3145 (double)sched->nr_lost_events/(double)sched->nr_events * 100.0,
3146 sched->nr_lost_events, sched->nr_events, sched->nr_lost_chunks);
3147 }
3148 if (sched->nr_context_switch_bugs && sched->nr_timestamps) {
3149 printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)",
3150 (double)sched->nr_context_switch_bugs/(double)sched->nr_timestamps*100.0,
3151 sched->nr_context_switch_bugs, sched->nr_timestamps);
3152 if (sched->nr_lost_events)
3153 printf(" (due to lost events?)");
3154 printf("\n");
3155 }
3156 }
3157
__merge_work_atoms(struct rb_root_cached * root,struct work_atoms * data)3158 static void __merge_work_atoms(struct rb_root_cached *root, struct work_atoms *data)
3159 {
3160 struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
3161 struct work_atoms *this;
3162 const char *comm = thread__comm_str(data->thread), *this_comm;
3163 bool leftmost = true;
3164
3165 while (*new) {
3166 int cmp;
3167
3168 this = container_of(*new, struct work_atoms, node);
3169 parent = *new;
3170
3171 this_comm = thread__comm_str(this->thread);
3172 cmp = strcmp(comm, this_comm);
3173 if (cmp > 0) {
3174 new = &((*new)->rb_left);
3175 } else if (cmp < 0) {
3176 new = &((*new)->rb_right);
3177 leftmost = false;
3178 } else {
3179 this->num_merged++;
3180 this->total_runtime += data->total_runtime;
3181 this->nb_atoms += data->nb_atoms;
3182 this->total_lat += data->total_lat;
3183 list_splice(&data->work_list, &this->work_list);
3184 if (this->max_lat < data->max_lat) {
3185 this->max_lat = data->max_lat;
3186 this->max_lat_start = data->max_lat_start;
3187 this->max_lat_end = data->max_lat_end;
3188 }
3189 zfree(&data);
3190 return;
3191 }
3192 }
3193
3194 data->num_merged++;
3195 rb_link_node(&data->node, parent, new);
3196 rb_insert_color_cached(&data->node, root, leftmost);
3197 }
3198
perf_sched__merge_lat(struct perf_sched * sched)3199 static void perf_sched__merge_lat(struct perf_sched *sched)
3200 {
3201 struct work_atoms *data;
3202 struct rb_node *node;
3203
3204 if (sched->skip_merge)
3205 return;
3206
3207 while ((node = rb_first_cached(&sched->atom_root))) {
3208 rb_erase_cached(node, &sched->atom_root);
3209 data = rb_entry(node, struct work_atoms, node);
3210 __merge_work_atoms(&sched->merged_atom_root, data);
3211 }
3212 }
3213
setup_cpus_switch_event(struct perf_sched * sched)3214 static int setup_cpus_switch_event(struct perf_sched *sched)
3215 {
3216 unsigned int i;
3217
3218 sched->cpu_last_switched = calloc(MAX_CPUS, sizeof(*(sched->cpu_last_switched)));
3219 if (!sched->cpu_last_switched)
3220 return -1;
3221
3222 sched->curr_pid = malloc(MAX_CPUS * sizeof(*(sched->curr_pid)));
3223 if (!sched->curr_pid) {
3224 zfree(&sched->cpu_last_switched);
3225 return -1;
3226 }
3227
3228 for (i = 0; i < MAX_CPUS; i++)
3229 sched->curr_pid[i] = -1;
3230
3231 return 0;
3232 }
3233
free_cpus_switch_event(struct perf_sched * sched)3234 static void free_cpus_switch_event(struct perf_sched *sched)
3235 {
3236 zfree(&sched->curr_pid);
3237 zfree(&sched->cpu_last_switched);
3238 }
3239
perf_sched__lat(struct perf_sched * sched)3240 static int perf_sched__lat(struct perf_sched *sched)
3241 {
3242 int rc = -1;
3243 struct rb_node *next;
3244
3245 setup_pager();
3246
3247 if (setup_cpus_switch_event(sched))
3248 return rc;
3249
3250 if (perf_sched__read_events(sched))
3251 goto out_free_cpus_switch_event;
3252
3253 perf_sched__merge_lat(sched);
3254 perf_sched__sort_lat(sched);
3255
3256 printf("\n -------------------------------------------------------------------------------------------------------------------------------------------\n");
3257 printf(" Task | Runtime ms | Switches | Avg delay ms | Max delay ms | Max delay start | Max delay end |\n");
3258 printf(" -------------------------------------------------------------------------------------------------------------------------------------------\n");
3259
3260 next = rb_first_cached(&sched->sorted_atom_root);
3261
3262 while (next) {
3263 struct work_atoms *work_list;
3264
3265 work_list = rb_entry(next, struct work_atoms, node);
3266 output_lat_thread(sched, work_list);
3267 next = rb_next(next);
3268 thread__zput(work_list->thread);
3269 }
3270
3271 printf(" -----------------------------------------------------------------------------------------------------------------\n");
3272 printf(" TOTAL: |%11.3f ms |%9" PRIu64 " |\n",
3273 (double)sched->all_runtime / NSEC_PER_MSEC, sched->all_count);
3274
3275 printf(" ---------------------------------------------------\n");
3276
3277 print_bad_events(sched);
3278 printf("\n");
3279
3280 rc = 0;
3281
3282 out_free_cpus_switch_event:
3283 free_cpus_switch_event(sched);
3284 return rc;
3285 }
3286
setup_map_cpus(struct perf_sched * sched)3287 static int setup_map_cpus(struct perf_sched *sched)
3288 {
3289 sched->max_cpu.cpu = sysconf(_SC_NPROCESSORS_CONF);
3290
3291 if (sched->map.comp) {
3292 sched->map.comp_cpus = zalloc(sched->max_cpu.cpu * sizeof(int));
3293 if (!sched->map.comp_cpus)
3294 return -1;
3295 }
3296
3297 if (sched->map.cpus_str) {
3298 sched->map.cpus = perf_cpu_map__new(sched->map.cpus_str);
3299 if (!sched->map.cpus) {
3300 pr_err("failed to get cpus map from %s\n", sched->map.cpus_str);
3301 zfree(&sched->map.comp_cpus);
3302 return -1;
3303 }
3304 }
3305
3306 return 0;
3307 }
3308
setup_color_pids(struct perf_sched * sched)3309 static int setup_color_pids(struct perf_sched *sched)
3310 {
3311 struct perf_thread_map *map;
3312
3313 if (!sched->map.color_pids_str)
3314 return 0;
3315
3316 map = thread_map__new_by_tid_str(sched->map.color_pids_str);
3317 if (!map) {
3318 pr_err("failed to get thread map from %s\n", sched->map.color_pids_str);
3319 return -1;
3320 }
3321
3322 sched->map.color_pids = map;
3323 return 0;
3324 }
3325
setup_color_cpus(struct perf_sched * sched)3326 static int setup_color_cpus(struct perf_sched *sched)
3327 {
3328 struct perf_cpu_map *map;
3329
3330 if (!sched->map.color_cpus_str)
3331 return 0;
3332
3333 map = perf_cpu_map__new(sched->map.color_cpus_str);
3334 if (!map) {
3335 pr_err("failed to get thread map from %s\n", sched->map.color_cpus_str);
3336 return -1;
3337 }
3338
3339 sched->map.color_cpus = map;
3340 return 0;
3341 }
3342
perf_sched__map(struct perf_sched * sched)3343 static int perf_sched__map(struct perf_sched *sched)
3344 {
3345 int rc = -1;
3346
3347 sched->curr_thread = calloc(MAX_CPUS, sizeof(*(sched->curr_thread)));
3348 if (!sched->curr_thread)
3349 return rc;
3350
3351 if (setup_cpus_switch_event(sched))
3352 goto out_free_curr_thread;
3353
3354 if (setup_map_cpus(sched))
3355 goto out_free_cpus_switch_event;
3356
3357 if (setup_color_pids(sched))
3358 goto out_put_map_cpus;
3359
3360 if (setup_color_cpus(sched))
3361 goto out_put_color_pids;
3362
3363 setup_pager();
3364 if (perf_sched__read_events(sched))
3365 goto out_put_color_cpus;
3366
3367 rc = 0;
3368 print_bad_events(sched);
3369
3370 out_put_color_cpus:
3371 perf_cpu_map__put(sched->map.color_cpus);
3372
3373 out_put_color_pids:
3374 perf_thread_map__put(sched->map.color_pids);
3375
3376 out_put_map_cpus:
3377 zfree(&sched->map.comp_cpus);
3378 perf_cpu_map__put(sched->map.cpus);
3379
3380 out_free_cpus_switch_event:
3381 free_cpus_switch_event(sched);
3382
3383 out_free_curr_thread:
3384 zfree(&sched->curr_thread);
3385 return rc;
3386 }
3387
perf_sched__replay(struct perf_sched * sched)3388 static int perf_sched__replay(struct perf_sched *sched)
3389 {
3390 int ret;
3391 unsigned long i;
3392
3393 mutex_init(&sched->start_work_mutex);
3394 mutex_init(&sched->work_done_wait_mutex);
3395
3396 ret = setup_cpus_switch_event(sched);
3397 if (ret)
3398 goto out_mutex_destroy;
3399
3400 calibrate_run_measurement_overhead(sched);
3401 calibrate_sleep_measurement_overhead(sched);
3402
3403 test_calibrations(sched);
3404
3405 ret = perf_sched__read_events(sched);
3406 if (ret)
3407 goto out_free_cpus_switch_event;
3408
3409 printf("nr_run_events: %ld\n", sched->nr_run_events);
3410 printf("nr_sleep_events: %ld\n", sched->nr_sleep_events);
3411 printf("nr_wakeup_events: %ld\n", sched->nr_wakeup_events);
3412
3413 if (sched->targetless_wakeups)
3414 printf("target-less wakeups: %ld\n", sched->targetless_wakeups);
3415 if (sched->multitarget_wakeups)
3416 printf("multi-target wakeups: %ld\n", sched->multitarget_wakeups);
3417 if (sched->nr_run_events_optimized)
3418 printf("run atoms optimized: %ld\n",
3419 sched->nr_run_events_optimized);
3420
3421 print_task_traces(sched);
3422 add_cross_task_wakeups(sched);
3423
3424 sched->thread_funcs_exit = false;
3425 create_tasks(sched);
3426 printf("------------------------------------------------------------\n");
3427 for (i = 0; i < sched->replay_repeat; i++)
3428 run_one_test(sched);
3429
3430 sched->thread_funcs_exit = true;
3431 destroy_tasks(sched);
3432
3433 out_free_cpus_switch_event:
3434 free_cpus_switch_event(sched);
3435
3436 out_mutex_destroy:
3437 mutex_destroy(&sched->start_work_mutex);
3438 mutex_destroy(&sched->work_done_wait_mutex);
3439 return ret;
3440 }
3441
setup_sorting(struct perf_sched * sched,const struct option * options,const char * const usage_msg[])3442 static void setup_sorting(struct perf_sched *sched, const struct option *options,
3443 const char * const usage_msg[])
3444 {
3445 char *tmp, *tok, *str = strdup(sched->sort_order);
3446
3447 for (tok = strtok_r(str, ", ", &tmp);
3448 tok; tok = strtok_r(NULL, ", ", &tmp)) {
3449 if (sort_dimension__add(tok, &sched->sort_list) < 0) {
3450 usage_with_options_msg(usage_msg, options,
3451 "Unknown --sort key: `%s'", tok);
3452 }
3453 }
3454
3455 free(str);
3456
3457 sort_dimension__add("pid", &sched->cmp_pid);
3458 }
3459
schedstat_events_exposed(void)3460 static bool schedstat_events_exposed(void)
3461 {
3462 /*
3463 * Select "sched:sched_stat_wait" event to check
3464 * whether schedstat tracepoints are exposed.
3465 */
3466 return IS_ERR(trace_event__tp_format("sched", "sched_stat_wait")) ?
3467 false : true;
3468 }
3469
__cmd_record(int argc,const char ** argv)3470 static int __cmd_record(int argc, const char **argv)
3471 {
3472 unsigned int rec_argc, i, j;
3473 char **rec_argv;
3474 const char **rec_argv_copy;
3475 const char * const record_args[] = {
3476 "record",
3477 "-a",
3478 "-R",
3479 "-m", "1024",
3480 "-c", "1",
3481 "-e", "sched:sched_switch",
3482 "-e", "sched:sched_stat_runtime",
3483 "-e", "sched:sched_process_fork",
3484 "-e", "sched:sched_wakeup_new",
3485 "-e", "sched:sched_migrate_task",
3486 };
3487
3488 /*
3489 * The tracepoints trace_sched_stat_{wait, sleep, iowait}
3490 * are not exposed to user if CONFIG_SCHEDSTATS is not set,
3491 * to prevent "perf sched record" execution failure, determine
3492 * whether to record schedstat events according to actual situation.
3493 */
3494 const char * const schedstat_args[] = {
3495 "-e", "sched:sched_stat_wait",
3496 "-e", "sched:sched_stat_sleep",
3497 "-e", "sched:sched_stat_iowait",
3498 };
3499 unsigned int schedstat_argc = schedstat_events_exposed() ?
3500 ARRAY_SIZE(schedstat_args) : 0;
3501
3502 struct tep_event *waking_event;
3503 int ret;
3504
3505 /*
3506 * +2 for either "-e", "sched:sched_wakeup" or
3507 * "-e", "sched:sched_waking"
3508 */
3509 rec_argc = ARRAY_SIZE(record_args) + 2 + schedstat_argc + argc - 1;
3510 rec_argv = calloc(rec_argc + 1, sizeof(char *));
3511 if (rec_argv == NULL)
3512 return -ENOMEM;
3513 rec_argv_copy = calloc(rec_argc + 1, sizeof(char *));
3514 if (rec_argv_copy == NULL) {
3515 free(rec_argv);
3516 return -ENOMEM;
3517 }
3518
3519 for (i = 0; i < ARRAY_SIZE(record_args); i++)
3520 rec_argv[i] = strdup(record_args[i]);
3521
3522 rec_argv[i++] = strdup("-e");
3523 waking_event = trace_event__tp_format("sched", "sched_waking");
3524 if (!IS_ERR(waking_event))
3525 rec_argv[i++] = strdup("sched:sched_waking");
3526 else
3527 rec_argv[i++] = strdup("sched:sched_wakeup");
3528
3529 for (j = 0; j < schedstat_argc; j++)
3530 rec_argv[i++] = strdup(schedstat_args[j]);
3531
3532 for (j = 1; j < (unsigned int)argc; j++, i++)
3533 rec_argv[i] = strdup(argv[j]);
3534
3535 BUG_ON(i != rec_argc);
3536
3537 memcpy(rec_argv_copy, rec_argv, sizeof(char *) * rec_argc);
3538 ret = cmd_record(rec_argc, rec_argv_copy);
3539
3540 for (i = 0; i < rec_argc; i++)
3541 free(rec_argv[i]);
3542 free(rec_argv);
3543 free(rec_argv_copy);
3544
3545 return ret;
3546 }
3547
cmd_sched(int argc,const char ** argv)3548 int cmd_sched(int argc, const char **argv)
3549 {
3550 static const char default_sort_order[] = "avg, max, switch, runtime";
3551 struct perf_sched sched = {
3552 .tool = {
3553 .sample = perf_sched__process_tracepoint_sample,
3554 .comm = perf_sched__process_comm,
3555 .namespaces = perf_event__process_namespaces,
3556 .lost = perf_event__process_lost,
3557 .fork = perf_sched__process_fork_event,
3558 .ordered_events = true,
3559 },
3560 .cmp_pid = LIST_HEAD_INIT(sched.cmp_pid),
3561 .sort_list = LIST_HEAD_INIT(sched.sort_list),
3562 .sort_order = default_sort_order,
3563 .replay_repeat = 10,
3564 .profile_cpu = -1,
3565 .next_shortname1 = 'A',
3566 .next_shortname2 = '0',
3567 .skip_merge = 0,
3568 .show_callchain = 1,
3569 .max_stack = 5,
3570 };
3571 const struct option sched_options[] = {
3572 OPT_STRING('i', "input", &input_name, "file",
3573 "input file name"),
3574 OPT_INCR('v', "verbose", &verbose,
3575 "be more verbose (show symbol address, etc)"),
3576 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
3577 "dump raw trace in ASCII"),
3578 OPT_BOOLEAN('f', "force", &sched.force, "don't complain, do it"),
3579 OPT_END()
3580 };
3581 const struct option latency_options[] = {
3582 OPT_STRING('s', "sort", &sched.sort_order, "key[,key2...]",
3583 "sort by key(s): runtime, switch, avg, max"),
3584 OPT_INTEGER('C', "CPU", &sched.profile_cpu,
3585 "CPU to profile on"),
3586 OPT_BOOLEAN('p', "pids", &sched.skip_merge,
3587 "latency stats per pid instead of per comm"),
3588 OPT_PARENT(sched_options)
3589 };
3590 const struct option replay_options[] = {
3591 OPT_UINTEGER('r', "repeat", &sched.replay_repeat,
3592 "repeat the workload replay N times (-1: infinite)"),
3593 OPT_PARENT(sched_options)
3594 };
3595 const struct option map_options[] = {
3596 OPT_BOOLEAN(0, "compact", &sched.map.comp,
3597 "map output in compact mode"),
3598 OPT_STRING(0, "color-pids", &sched.map.color_pids_str, "pids",
3599 "highlight given pids in map"),
3600 OPT_STRING(0, "color-cpus", &sched.map.color_cpus_str, "cpus",
3601 "highlight given CPUs in map"),
3602 OPT_STRING(0, "cpus", &sched.map.cpus_str, "cpus",
3603 "display given CPUs in map"),
3604 OPT_PARENT(sched_options)
3605 };
3606 const struct option timehist_options[] = {
3607 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
3608 "file", "vmlinux pathname"),
3609 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
3610 "file", "kallsyms pathname"),
3611 OPT_BOOLEAN('g', "call-graph", &sched.show_callchain,
3612 "Display call chains if present (default on)"),
3613 OPT_UINTEGER(0, "max-stack", &sched.max_stack,
3614 "Maximum number of functions to display backtrace."),
3615 OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
3616 "Look for files with symbols relative to this directory"),
3617 OPT_BOOLEAN('s', "summary", &sched.summary_only,
3618 "Show only syscall summary with statistics"),
3619 OPT_BOOLEAN('S', "with-summary", &sched.summary,
3620 "Show all syscalls and summary with statistics"),
3621 OPT_BOOLEAN('w', "wakeups", &sched.show_wakeups, "Show wakeup events"),
3622 OPT_BOOLEAN('n', "next", &sched.show_next, "Show next task"),
3623 OPT_BOOLEAN('M', "migrations", &sched.show_migrations, "Show migration events"),
3624 OPT_BOOLEAN('V', "cpu-visual", &sched.show_cpu_visual, "Add CPU visual"),
3625 OPT_BOOLEAN('I', "idle-hist", &sched.idle_hist, "Show idle events only"),
3626 OPT_STRING(0, "time", &sched.time_str, "str",
3627 "Time span for analysis (start,stop)"),
3628 OPT_BOOLEAN(0, "state", &sched.show_state, "Show task state when sched-out"),
3629 OPT_STRING('p', "pid", &symbol_conf.pid_list_str, "pid[,pid...]",
3630 "analyze events only for given process id(s)"),
3631 OPT_STRING('t', "tid", &symbol_conf.tid_list_str, "tid[,tid...]",
3632 "analyze events only for given thread id(s)"),
3633 OPT_STRING('C', "cpu", &cpu_list, "cpu", "list of cpus to profile"),
3634 OPT_PARENT(sched_options)
3635 };
3636
3637 const char * const latency_usage[] = {
3638 "perf sched latency [<options>]",
3639 NULL
3640 };
3641 const char * const replay_usage[] = {
3642 "perf sched replay [<options>]",
3643 NULL
3644 };
3645 const char * const map_usage[] = {
3646 "perf sched map [<options>]",
3647 NULL
3648 };
3649 const char * const timehist_usage[] = {
3650 "perf sched timehist [<options>]",
3651 NULL
3652 };
3653 const char *const sched_subcommands[] = { "record", "latency", "map",
3654 "replay", "script",
3655 "timehist", NULL };
3656 const char *sched_usage[] = {
3657 NULL,
3658 NULL
3659 };
3660 struct trace_sched_handler lat_ops = {
3661 .wakeup_event = latency_wakeup_event,
3662 .switch_event = latency_switch_event,
3663 .runtime_event = latency_runtime_event,
3664 .migrate_task_event = latency_migrate_task_event,
3665 };
3666 struct trace_sched_handler map_ops = {
3667 .switch_event = map_switch_event,
3668 };
3669 struct trace_sched_handler replay_ops = {
3670 .wakeup_event = replay_wakeup_event,
3671 .switch_event = replay_switch_event,
3672 .fork_event = replay_fork_event,
3673 };
3674 int ret;
3675
3676 argc = parse_options_subcommand(argc, argv, sched_options, sched_subcommands,
3677 sched_usage, PARSE_OPT_STOP_AT_NON_OPTION);
3678 if (!argc)
3679 usage_with_options(sched_usage, sched_options);
3680
3681 /*
3682 * Aliased to 'perf script' for now:
3683 */
3684 if (!strcmp(argv[0], "script")) {
3685 return cmd_script(argc, argv);
3686 } else if (strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
3687 return __cmd_record(argc, argv);
3688 } else if (strlen(argv[0]) > 2 && strstarts("latency", argv[0])) {
3689 sched.tp_handler = &lat_ops;
3690 if (argc > 1) {
3691 argc = parse_options(argc, argv, latency_options, latency_usage, 0);
3692 if (argc)
3693 usage_with_options(latency_usage, latency_options);
3694 }
3695 setup_sorting(&sched, latency_options, latency_usage);
3696 return perf_sched__lat(&sched);
3697 } else if (!strcmp(argv[0], "map")) {
3698 if (argc) {
3699 argc = parse_options(argc, argv, map_options, map_usage, 0);
3700 if (argc)
3701 usage_with_options(map_usage, map_options);
3702 }
3703 sched.tp_handler = &map_ops;
3704 setup_sorting(&sched, latency_options, latency_usage);
3705 return perf_sched__map(&sched);
3706 } else if (strlen(argv[0]) > 2 && strstarts("replay", argv[0])) {
3707 sched.tp_handler = &replay_ops;
3708 if (argc) {
3709 argc = parse_options(argc, argv, replay_options, replay_usage, 0);
3710 if (argc)
3711 usage_with_options(replay_usage, replay_options);
3712 }
3713 return perf_sched__replay(&sched);
3714 } else if (!strcmp(argv[0], "timehist")) {
3715 if (argc) {
3716 argc = parse_options(argc, argv, timehist_options,
3717 timehist_usage, 0);
3718 if (argc)
3719 usage_with_options(timehist_usage, timehist_options);
3720 }
3721 if ((sched.show_wakeups || sched.show_next) &&
3722 sched.summary_only) {
3723 pr_err(" Error: -s and -[n|w] are mutually exclusive.\n");
3724 parse_options_usage(timehist_usage, timehist_options, "s", true);
3725 if (sched.show_wakeups)
3726 parse_options_usage(NULL, timehist_options, "w", true);
3727 if (sched.show_next)
3728 parse_options_usage(NULL, timehist_options, "n", true);
3729 return -EINVAL;
3730 }
3731 ret = symbol__validate_sym_arguments();
3732 if (ret)
3733 return ret;
3734
3735 return perf_sched__timehist(&sched);
3736 } else {
3737 usage_with_options(sched_usage, sched_options);
3738 }
3739
3740 /* free usage string allocated by parse_options_subcommand */
3741 free((void *)sched_usage[0]);
3742
3743 return 0;
3744 }
3745