Lines Matching refs:sched
147 int (*switch_event)(struct perf_sched *sched, struct evsel *evsel,
150 int (*runtime_event)(struct perf_sched *sched, struct evsel *evsel,
153 int (*wakeup_event)(struct perf_sched *sched, struct evsel *evsel,
157 int (*fork_event)(struct perf_sched *sched, union perf_event *event,
160 int (*migrate_task_event)(struct perf_sched *sched,
306 static void burn_nsecs(struct perf_sched *sched, u64 nsecs) in burn_nsecs() argument
312 } while (T1 + sched->run_measurement_overhead < T0 + nsecs); in burn_nsecs()
325 static void calibrate_run_measurement_overhead(struct perf_sched *sched) in calibrate_run_measurement_overhead() argument
332 burn_nsecs(sched, 0); in calibrate_run_measurement_overhead()
337 sched->run_measurement_overhead = min_delta; in calibrate_run_measurement_overhead()
342 static void calibrate_sleep_measurement_overhead(struct perf_sched *sched) in calibrate_sleep_measurement_overhead() argument
355 sched->sleep_measurement_overhead = min_delta; in calibrate_sleep_measurement_overhead()
388 static void add_sched_event_run(struct perf_sched *sched, struct task_desc *task, in add_sched_event_run() argument
398 sched->nr_run_events_optimized++; in add_sched_event_run()
408 sched->nr_run_events++; in add_sched_event_run()
411 static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *task, in add_sched_event_wakeup() argument
422 sched->targetless_wakeups++; in add_sched_event_wakeup()
426 sched->multitarget_wakeups++; in add_sched_event_wakeup()
435 sched->nr_wakeup_events++; in add_sched_event_wakeup()
438 static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task, in add_sched_event_sleep() argument
445 sched->nr_sleep_events++; in add_sched_event_sleep()
448 static struct task_desc *register_pid(struct perf_sched *sched, in register_pid() argument
454 if (sched->pid_to_task == NULL) { in register_pid()
457 BUG_ON((sched->pid_to_task = calloc(pid_max, sizeof(struct task_desc *))) == NULL); in register_pid()
460 BUG_ON((sched->pid_to_task = realloc(sched->pid_to_task, (pid + 1) * in register_pid()
463 sched->pid_to_task[pid_max++] = NULL; in register_pid()
466 task = sched->pid_to_task[pid]; in register_pid()
473 task->nr = sched->nr_tasks; in register_pid()
479 add_sched_event_sleep(sched, task, 0, 0); in register_pid()
481 sched->pid_to_task[pid] = task; in register_pid()
482 sched->nr_tasks++; in register_pid()
483 sched->tasks = realloc(sched->tasks, sched->nr_tasks * sizeof(struct task_desc *)); in register_pid()
484 BUG_ON(!sched->tasks); in register_pid()
485 sched->tasks[task->nr] = task; in register_pid()
488 printf("registered task #%ld, PID %ld (%s)\n", sched->nr_tasks, pid, comm); in register_pid()
494 static void print_task_traces(struct perf_sched *sched) in print_task_traces() argument
499 for (i = 0; i < sched->nr_tasks; i++) { in print_task_traces()
500 task = sched->tasks[i]; in print_task_traces()
506 static void add_cross_task_wakeups(struct perf_sched *sched) in add_cross_task_wakeups() argument
511 for (i = 0; i < sched->nr_tasks; i++) { in add_cross_task_wakeups()
512 task1 = sched->tasks[i]; in add_cross_task_wakeups()
514 if (j == sched->nr_tasks) in add_cross_task_wakeups()
516 task2 = sched->tasks[j]; in add_cross_task_wakeups()
517 add_sched_event_wakeup(sched, task1, 0, task2); in add_cross_task_wakeups()
521 static void perf_sched__process_event(struct perf_sched *sched, in perf_sched__process_event() argument
528 burn_nsecs(sched, atom->duration); in perf_sched__process_event()
562 static int self_open_counters(struct perf_sched *sched, unsigned long cur_task) in self_open_counters() argument
581 if (sched->force) { in self_open_counters()
583 limit.rlim_cur += sched->nr_tasks - cur_task; in self_open_counters()
617 struct perf_sched *sched; member
625 struct perf_sched *sched = parms->sched; in thread_func() local
638 while (!sched->thread_funcs_exit) { in thread_func()
641 mutex_lock(&sched->start_work_mutex); in thread_func()
642 mutex_unlock(&sched->start_work_mutex); in thread_func()
648 perf_sched__process_event(sched, this_task->atoms[i]); in thread_func()
656 mutex_lock(&sched->work_done_wait_mutex); in thread_func()
657 mutex_unlock(&sched->work_done_wait_mutex); in thread_func()
662 static void create_tasks(struct perf_sched *sched) in create_tasks() argument
663 EXCLUSIVE_LOCK_FUNCTION(sched->start_work_mutex) in create_tasks()
664 EXCLUSIVE_LOCK_FUNCTION(sched->work_done_wait_mutex) in create_tasks()
676 mutex_lock(&sched->start_work_mutex); in create_tasks()
677 mutex_lock(&sched->work_done_wait_mutex); in create_tasks()
678 for (i = 0; i < sched->nr_tasks; i++) { in create_tasks()
681 parms->task = task = sched->tasks[i]; in create_tasks()
682 parms->sched = sched; in create_tasks()
683 parms->fd = self_open_counters(sched, i); in create_tasks()
693 static void destroy_tasks(struct perf_sched *sched) in destroy_tasks() argument
694 UNLOCK_FUNCTION(sched->start_work_mutex) in destroy_tasks()
695 UNLOCK_FUNCTION(sched->work_done_wait_mutex) in destroy_tasks()
701 mutex_unlock(&sched->start_work_mutex); in destroy_tasks()
702 mutex_unlock(&sched->work_done_wait_mutex); in destroy_tasks()
704 for (i = 0; i < sched->nr_tasks; i++) { in destroy_tasks()
705 task = sched->tasks[i]; in destroy_tasks()
714 static void wait_for_tasks(struct perf_sched *sched) in wait_for_tasks() argument
715 EXCLUSIVE_LOCKS_REQUIRED(sched->work_done_wait_mutex) in wait_for_tasks()
716 EXCLUSIVE_LOCKS_REQUIRED(sched->start_work_mutex) in wait_for_tasks()
722 sched->start_time = get_nsecs(); in wait_for_tasks()
723 sched->cpu_usage = 0; in wait_for_tasks()
724 mutex_unlock(&sched->work_done_wait_mutex); in wait_for_tasks()
726 for (i = 0; i < sched->nr_tasks; i++) { in wait_for_tasks()
727 task = sched->tasks[i]; in wait_for_tasks()
732 mutex_lock(&sched->work_done_wait_mutex); in wait_for_tasks()
736 mutex_unlock(&sched->start_work_mutex); in wait_for_tasks()
738 for (i = 0; i < sched->nr_tasks; i++) { in wait_for_tasks()
739 task = sched->tasks[i]; in wait_for_tasks()
743 sched->cpu_usage += task->cpu_usage; in wait_for_tasks()
748 if (!sched->runavg_cpu_usage) in wait_for_tasks()
749 sched->runavg_cpu_usage = sched->cpu_usage; in wait_for_tasks()
750 …sched->runavg_cpu_usage = (sched->runavg_cpu_usage * (sched->replay_repeat - 1) + sched->cpu_usage… in wait_for_tasks()
752 sched->parent_cpu_usage = cpu_usage_1 - cpu_usage_0; in wait_for_tasks()
753 if (!sched->runavg_parent_cpu_usage) in wait_for_tasks()
754 sched->runavg_parent_cpu_usage = sched->parent_cpu_usage; in wait_for_tasks()
755 sched->runavg_parent_cpu_usage = (sched->runavg_parent_cpu_usage * (sched->replay_repeat - 1) + in wait_for_tasks()
756 sched->parent_cpu_usage)/sched->replay_repeat; in wait_for_tasks()
758 mutex_lock(&sched->start_work_mutex); in wait_for_tasks()
760 for (i = 0; i < sched->nr_tasks; i++) { in wait_for_tasks()
761 task = sched->tasks[i]; in wait_for_tasks()
767 static void run_one_test(struct perf_sched *sched) in run_one_test() argument
768 EXCLUSIVE_LOCKS_REQUIRED(sched->work_done_wait_mutex) in run_one_test()
769 EXCLUSIVE_LOCKS_REQUIRED(sched->start_work_mutex) in run_one_test()
774 wait_for_tasks(sched); in run_one_test()
778 sched->sum_runtime += delta; in run_one_test()
779 sched->nr_runs++; in run_one_test()
781 avg_delta = sched->sum_runtime / sched->nr_runs; in run_one_test()
786 sched->sum_fluct += fluct; in run_one_test()
787 if (!sched->run_avg) in run_one_test()
788 sched->run_avg = delta; in run_one_test()
789 sched->run_avg = (sched->run_avg * (sched->replay_repeat - 1) + delta) / sched->replay_repeat; in run_one_test()
791 printf("#%-3ld: %0.3f, ", sched->nr_runs, (double)delta / NSEC_PER_MSEC); in run_one_test()
793 printf("ravg: %0.2f, ", (double)sched->run_avg / NSEC_PER_MSEC); in run_one_test()
796 (double)sched->cpu_usage / NSEC_PER_MSEC, (double)sched->runavg_cpu_usage / NSEC_PER_MSEC); in run_one_test()
804 (double)sched->parent_cpu_usage / NSEC_PER_MSEC, in run_one_test()
805 (double)sched->runavg_parent_cpu_usage / NSEC_PER_MSEC); in run_one_test()
810 if (sched->nr_sleep_corrections) in run_one_test()
811 printf(" (%ld sleep corrections)\n", sched->nr_sleep_corrections); in run_one_test()
812 sched->nr_sleep_corrections = 0; in run_one_test()
815 static void test_calibrations(struct perf_sched *sched) in test_calibrations() argument
820 burn_nsecs(sched, NSEC_PER_MSEC); in test_calibrations()
833 replay_wakeup_event(struct perf_sched *sched, in replay_wakeup_event() argument
847 waker = register_pid(sched, sample->tid, "<unknown>"); in replay_wakeup_event()
848 wakee = register_pid(sched, pid, comm); in replay_wakeup_event()
850 add_sched_event_wakeup(sched, waker, sample->time, wakee); in replay_wakeup_event()
854 static int replay_switch_event(struct perf_sched *sched, in replay_switch_event() argument
875 timestamp0 = sched->cpu_last_switched[cpu]; in replay_switch_event()
889 prev = register_pid(sched, prev_pid, prev_comm); in replay_switch_event()
890 next = register_pid(sched, next_pid, next_comm); in replay_switch_event()
892 sched->cpu_last_switched[cpu] = timestamp; in replay_switch_event()
894 add_sched_event_run(sched, prev, timestamp, delta); in replay_switch_event()
895 add_sched_event_sleep(sched, prev, timestamp, prev_state); in replay_switch_event()
900 static int replay_fork_event(struct perf_sched *sched, in replay_fork_event() argument
923 register_pid(sched, thread__tid(parent), thread__comm_str(parent)); in replay_fork_event()
924 register_pid(sched, thread__tid(child), thread__comm_str(child)); in replay_fork_event()
1039 static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread) in thread_atoms_insert() argument
1049 __thread_latency_insert(&sched->atom_root, atoms, &sched->cmp_pid); in thread_atoms_insert()
1128 static int latency_switch_event(struct perf_sched *sched, in latency_switch_event() argument
1144 timestamp0 = sched->cpu_last_switched[cpu]; in latency_switch_event()
1145 sched->cpu_last_switched[cpu] = timestamp; in latency_switch_event()
1161 out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid); in latency_switch_event()
1163 if (thread_atoms_insert(sched, sched_out)) in latency_switch_event()
1165 out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid); in latency_switch_event()
1174 in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid); in latency_switch_event()
1176 if (thread_atoms_insert(sched, sched_in)) in latency_switch_event()
1178 in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid); in latency_switch_event()
1198 static int latency_runtime_event(struct perf_sched *sched, in latency_runtime_event() argument
1206 struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid); in latency_runtime_event()
1215 if (thread_atoms_insert(sched, thread)) in latency_runtime_event()
1217 atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid); in latency_runtime_event()
1233 static int latency_wakeup_event(struct perf_sched *sched, in latency_wakeup_event() argument
1248 atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid); in latency_wakeup_event()
1250 if (thread_atoms_insert(sched, wakee)) in latency_wakeup_event()
1252 atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid); in latency_wakeup_event()
1276 if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING) in latency_wakeup_event()
1279 sched->nr_timestamps++; in latency_wakeup_event()
1281 sched->nr_unordered_timestamps++; in latency_wakeup_event()
1294 static int latency_migrate_task_event(struct perf_sched *sched, in latency_migrate_task_event() argument
1309 if (sched->profile_cpu == -1) in latency_migrate_task_event()
1315 atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid); in latency_migrate_task_event()
1317 if (thread_atoms_insert(sched, migrant)) in latency_migrate_task_event()
1319 register_pid(sched, thread__tid(migrant), thread__comm_str(migrant)); in latency_migrate_task_event()
1320 atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid); in latency_migrate_task_event()
1334 sched->nr_timestamps++; in latency_migrate_task_event()
1337 sched->nr_unordered_timestamps++; in latency_migrate_task_event()
1344 static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_list) in output_lat_thread() argument
1359 sched->all_runtime += work_list->total_runtime; in output_lat_thread()
1360 sched->all_count += work_list->nb_atoms; in output_lat_thread()
1492 static void perf_sched__sort_lat(struct perf_sched *sched) in perf_sched__sort_lat() argument
1495 struct rb_root_cached *root = &sched->atom_root; in perf_sched__sort_lat()
1505 __thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list); in perf_sched__sort_lat()
1507 if (root == &sched->atom_root) { in perf_sched__sort_lat()
1508 root = &sched->merged_atom_root; in perf_sched__sort_lat()
1518 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); in process_sched_wakeup_event() local
1520 if (sched->tp_handler->wakeup_event) in process_sched_wakeup_event()
1521 return sched->tp_handler->wakeup_event(sched, evsel, sample, machine); in process_sched_wakeup_event()
1549 map__findnew_thread(struct perf_sched *sched, struct machine *machine, pid_t pid, pid_t tid) in map__findnew_thread() argument
1556 if (!sched->map.color_pids || !thread || thread__priv(thread)) in map__findnew_thread()
1559 if (thread_map__has(sched->map.color_pids, tid)) in map__findnew_thread()
1566 static int map_switch_event(struct perf_sched *sched, struct evsel *evsel, in map_switch_event() argument
1586 if (this_cpu.cpu > sched->max_cpu.cpu) in map_switch_event()
1587 sched->max_cpu = this_cpu; in map_switch_event()
1589 if (sched->map.comp) { in map_switch_event()
1590 cpus_nr = bitmap_weight(sched->map.comp_cpus_mask, MAX_CPUS); in map_switch_event()
1591 if (!__test_and_set_bit(this_cpu.cpu, sched->map.comp_cpus_mask)) { in map_switch_event()
1592 sched->map.comp_cpus[cpus_nr++] = this_cpu; in map_switch_event()
1596 cpus_nr = sched->max_cpu.cpu; in map_switch_event()
1598 timestamp0 = sched->cpu_last_switched[this_cpu.cpu]; in map_switch_event()
1599 sched->cpu_last_switched[this_cpu.cpu] = timestamp; in map_switch_event()
1610 sched_in = map__findnew_thread(sched, machine, -1, next_pid); in map_switch_event()
1620 sched->curr_thread[this_cpu.cpu] = thread__get(sched_in); in map_switch_event()
1634 tr->shortname[0] = sched->next_shortname1; in map_switch_event()
1635 tr->shortname[1] = sched->next_shortname2; in map_switch_event()
1637 if (sched->next_shortname1 < 'Z') { in map_switch_event()
1638 sched->next_shortname1++; in map_switch_event()
1640 sched->next_shortname1 = 'A'; in map_switch_event()
1641 if (sched->next_shortname2 < '9') in map_switch_event()
1642 sched->next_shortname2++; in map_switch_event()
1644 sched->next_shortname2 = '0'; in map_switch_event()
1652 .cpu = sched->map.comp ? sched->map.comp_cpus[i].cpu : i, in map_switch_event()
1654 struct thread *curr_thread = sched->curr_thread[cpu.cpu]; in map_switch_event()
1662 if (sched->map.cpus && !perf_cpu_map__has(sched->map.cpus, cpu)) in map_switch_event()
1665 if (sched->map.color_cpus && perf_cpu_map__has(sched->map.color_cpus, cpu)) in map_switch_event()
1673 if (sched->curr_thread[cpu.cpu]) { in map_switch_event()
1674 curr_tr = thread__get_runtime(sched->curr_thread[cpu.cpu]); in map_switch_event()
1684 if (sched->map.cpus && !perf_cpu_map__has(sched->map.cpus, this_cpu)) in map_switch_event()
1700 if (sched->map.comp && new_cpu) in map_switch_event()
1716 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); in process_sched_switch_event() local
1721 if (sched->curr_pid[this_cpu] != (u32)-1) { in process_sched_switch_event()
1726 if (sched->curr_pid[this_cpu] != prev_pid) in process_sched_switch_event()
1727 sched->nr_context_switch_bugs++; in process_sched_switch_event()
1730 if (sched->tp_handler->switch_event) in process_sched_switch_event()
1731 err = sched->tp_handler->switch_event(sched, evsel, sample, machine); in process_sched_switch_event()
1733 sched->curr_pid[this_cpu] = next_pid; in process_sched_switch_event()
1742 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); in process_sched_runtime_event() local
1744 if (sched->tp_handler->runtime_event) in process_sched_runtime_event()
1745 return sched->tp_handler->runtime_event(sched, evsel, sample, machine); in process_sched_runtime_event()
1755 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); in perf_sched__process_fork_event() local
1761 if (sched->tp_handler->fork_event) in perf_sched__process_fork_event()
1762 return sched->tp_handler->fork_event(sched, event, machine); in perf_sched__process_fork_event()
1772 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); in process_sched_migrate_task_event() local
1774 if (sched->tp_handler->migrate_task_event) in process_sched_migrate_task_event()
1775 return sched->tp_handler->migrate_task_event(sched, evsel, sample, machine); in process_sched_migrate_task_event()
1832 static int perf_sched__read_events(struct perf_sched *sched) in perf_sched__read_events() argument
1846 .force = sched->force, in perf_sched__read_events()
1850 session = perf_session__new(&data, &sched->tool); in perf_sched__read_events()
1872 sched->nr_events = session->evlist->stats.nr_events[0]; in perf_sched__read_events()
1873 sched->nr_lost_events = session->evlist->stats.total_lost; in perf_sched__read_events()
1874 sched->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST]; in perf_sched__read_events()
1977 static void timehist_header(struct perf_sched *sched) in timehist_header() argument
1979 u32 ncpus = sched->max_cpu.cpu + 1; in timehist_header()
1984 if (sched->show_cpu_visual) { in timehist_header()
1997 if (sched->show_state) in timehist_header()
2007 if (sched->show_cpu_visual) in timehist_header()
2013 if (sched->show_state) in timehist_header()
2023 if (sched->show_cpu_visual) in timehist_header()
2030 if (sched->show_state) in timehist_header()
2048 static void timehist_print_sample(struct perf_sched *sched, in timehist_print_sample() argument
2058 u32 max_cpus = sched->max_cpu.cpu + 1; in timehist_print_sample()
2069 if (sched->show_cpu_visual) { in timehist_print_sample()
2093 if (sched->show_state) in timehist_print_sample()
2096 if (sched->show_next) { in timehist_print_sample()
2101 if (sched->show_wakeups && !sched->show_next) in timehist_print_sample()
2107 if (sched->show_callchain) in timehist_print_sample()
2194 static void save_task_callchain(struct perf_sched *sched, in save_task_callchain() argument
2209 if (!sched->show_callchain || sample->callchain == NULL) in save_task_callchain()
2215 NULL, NULL, sched->max_stack + 2) != 0) { in save_task_callchain()
2339 static void save_idle_callchain(struct perf_sched *sched, in save_idle_callchain() argument
2345 if (!sched->show_callchain || sample->callchain == NULL) in save_idle_callchain()
2355 static struct thread *timehist_get_thread(struct perf_sched *sched, in timehist_get_thread() argument
2376 save_task_callchain(sched, sample, evsel, machine); in timehist_get_thread()
2377 if (sched->idle_hist) { in timehist_get_thread()
2395 save_idle_callchain(sched, itr, sample); in timehist_get_thread()
2402 static bool timehist_skip_sample(struct perf_sched *sched, in timehist_skip_sample() argument
2411 sched->skipped_samples++; in timehist_skip_sample()
2414 if (sched->idle_hist) { in timehist_skip_sample()
2425 static void timehist_print_wakeup_event(struct perf_sched *sched, in timehist_print_wakeup_event() argument
2439 if (timehist_skip_sample(sched, thread, evsel, sample) && in timehist_print_wakeup_event()
2440 timehist_skip_sample(sched, awakened, evsel, sample)) { in timehist_print_wakeup_event()
2446 if (sched->show_cpu_visual) in timehist_print_wakeup_event()
2447 printf(" %*s ", sched->max_cpu.cpu + 1, ""); in timehist_print_wakeup_event()
2474 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); in timehist_sched_wakeup_event() local
2492 if (sched->show_wakeups && in timehist_sched_wakeup_event()
2493 !perf_time__skip_sample(&sched->ptime, sample->time)) in timehist_sched_wakeup_event()
2494 timehist_print_wakeup_event(sched, evsel, sample, machine, thread); in timehist_sched_wakeup_event()
2499 static void timehist_print_migration_event(struct perf_sched *sched, in timehist_print_migration_event() argument
2510 if (sched->summary_only) in timehist_print_migration_event()
2513 max_cpus = sched->max_cpu.cpu + 1; in timehist_print_migration_event()
2521 if (timehist_skip_sample(sched, thread, evsel, sample) && in timehist_print_migration_event()
2522 timehist_skip_sample(sched, migrated, evsel, sample)) { in timehist_print_migration_event()
2529 if (sched->show_cpu_visual) { in timehist_print_migration_event()
2558 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); in timehist_migrate_task_event() local
2575 timehist_print_migration_event(sched, evsel, sample, machine, thread); in timehist_migrate_task_event()
2586 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); in timehist_sched_change_event() local
2587 struct perf_time_interval *ptime = &sched->ptime; in timehist_sched_change_event()
2603 thread = timehist_get_thread(sched, sample, machine, evsel); in timehist_sched_change_event()
2609 if (timehist_skip_sample(sched, thread, evsel, sample)) in timehist_sched_change_event()
2648 if (!sched->idle_hist || thread__tid(thread) == 0) { in timehist_sched_change_event()
2652 if (sched->idle_hist) { in timehist_sched_change_event()
2685 if (!sched->summary_only) in timehist_sched_change_event()
2686 timehist_print_sample(sched, evsel, sample, &al, thread, t, state); in timehist_sched_change_event()
2689 if (sched->hist_time.start == 0 && t >= ptime->start) in timehist_sched_change_event()
2690 sched->hist_time.start = t; in timehist_sched_change_event()
2692 sched->hist_time.end = t; in timehist_sched_change_event()
2777 struct perf_sched *sched; member
2797 if (stats->sched->show_state) in show_thread_runtime()
2860 static void timehist_print_summary(struct perf_sched *sched, in timehist_print_summary() argument
2869 u64 hist_time = sched->hist_time.end - sched->hist_time.start; in timehist_print_summary()
2872 totals.sched = sched; in timehist_print_summary()
2874 if (sched->idle_hist) { in timehist_print_summary()
2878 } else if (sched->show_state) { in timehist_print_summary()
2889 sched->show_state ? "(msec)" : "%"); in timehist_print_summary()
2898 if (sched->skipped_samples && !sched->idle_hist) in timehist_print_summary()
2920 if (sched->idle_hist && sched->show_callchain) { in timehist_print_summary()
2960 printf(" (x %d)\n", sched->max_cpu.cpu); in timehist_print_summary()
2975 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); in perf_timehist__process_sample() local
2981 if (this_cpu.cpu > sched->max_cpu.cpu) in perf_timehist__process_sample()
2982 sched->max_cpu = this_cpu; in perf_timehist__process_sample()
2993 static int timehist_check_attr(struct perf_sched *sched, in timehist_check_attr() argument
3007 if (sched->show_callchain && in timehist_check_attr()
3011 sched->show_callchain = 0; in timehist_check_attr()
3019 static int perf_sched__timehist(struct perf_sched *sched) in perf_sched__timehist() argument
3033 .force = sched->force, in perf_sched__timehist()
3043 sched->tool.sample = perf_timehist__process_sample; in perf_sched__timehist()
3044 sched->tool.mmap = perf_event__process_mmap; in perf_sched__timehist()
3045 sched->tool.comm = perf_event__process_comm; in perf_sched__timehist()
3046 sched->tool.exit = perf_event__process_exit; in perf_sched__timehist()
3047 sched->tool.fork = perf_event__process_fork; in perf_sched__timehist()
3048 sched->tool.lost = process_lost; in perf_sched__timehist()
3049 sched->tool.attr = perf_event__process_attr; in perf_sched__timehist()
3050 sched->tool.tracing_data = perf_event__process_tracing_data; in perf_sched__timehist()
3051 sched->tool.build_id = perf_event__process_build_id; in perf_sched__timehist()
3053 sched->tool.ordered_events = true; in perf_sched__timehist()
3054 sched->tool.ordering_requires_timestamps = true; in perf_sched__timehist()
3056 symbol_conf.use_callchain = sched->show_callchain; in perf_sched__timehist()
3058 session = perf_session__new(&data, &sched->tool); in perf_sched__timehist()
3072 if (perf_time__parse_str(&sched->ptime, sched->time_str) != 0) { in perf_sched__timehist()
3078 if (timehist_check_attr(sched, evlist) != 0) in perf_sched__timehist()
3097 if (sched->show_migrations && in perf_sched__timehist()
3102 sched->max_cpu.cpu = session->header.env.nr_cpus_online; in perf_sched__timehist()
3103 if (sched->max_cpu.cpu == 0) in perf_sched__timehist()
3104 sched->max_cpu.cpu = 4; in perf_sched__timehist()
3105 if (init_idle_threads(sched->max_cpu.cpu)) in perf_sched__timehist()
3109 if (sched->summary_only) in perf_sched__timehist()
3110 sched->summary = sched->summary_only; in perf_sched__timehist()
3112 if (!sched->summary_only) in perf_sched__timehist()
3113 timehist_header(sched); in perf_sched__timehist()
3121 sched->nr_events = evlist->stats.nr_events[0]; in perf_sched__timehist()
3122 sched->nr_lost_events = evlist->stats.total_lost; in perf_sched__timehist()
3123 sched->nr_lost_chunks = evlist->stats.nr_events[PERF_RECORD_LOST]; in perf_sched__timehist()
3125 if (sched->summary) in perf_sched__timehist()
3126 timehist_print_summary(sched, session); in perf_sched__timehist()
3136 static void print_bad_events(struct perf_sched *sched) in print_bad_events() argument
3138 if (sched->nr_unordered_timestamps && sched->nr_timestamps) { in print_bad_events()
3140 (double)sched->nr_unordered_timestamps/(double)sched->nr_timestamps*100.0, in print_bad_events()
3141 sched->nr_unordered_timestamps, sched->nr_timestamps); in print_bad_events()
3143 if (sched->nr_lost_events && sched->nr_events) { in print_bad_events()
3145 (double)sched->nr_lost_events/(double)sched->nr_events * 100.0, in print_bad_events()
3146 sched->nr_lost_events, sched->nr_events, sched->nr_lost_chunks); in print_bad_events()
3148 if (sched->nr_context_switch_bugs && sched->nr_timestamps) { in print_bad_events()
3150 (double)sched->nr_context_switch_bugs/(double)sched->nr_timestamps*100.0, in print_bad_events()
3151 sched->nr_context_switch_bugs, sched->nr_timestamps); in print_bad_events()
3152 if (sched->nr_lost_events) in print_bad_events()
3199 static void perf_sched__merge_lat(struct perf_sched *sched) in perf_sched__merge_lat() argument
3204 if (sched->skip_merge) in perf_sched__merge_lat()
3207 while ((node = rb_first_cached(&sched->atom_root))) { in perf_sched__merge_lat()
3208 rb_erase_cached(node, &sched->atom_root); in perf_sched__merge_lat()
3210 __merge_work_atoms(&sched->merged_atom_root, data); in perf_sched__merge_lat()
3214 static int setup_cpus_switch_event(struct perf_sched *sched) in setup_cpus_switch_event() argument
3218 sched->cpu_last_switched = calloc(MAX_CPUS, sizeof(*(sched->cpu_last_switched))); in setup_cpus_switch_event()
3219 if (!sched->cpu_last_switched) in setup_cpus_switch_event()
3222 sched->curr_pid = malloc(MAX_CPUS * sizeof(*(sched->curr_pid))); in setup_cpus_switch_event()
3223 if (!sched->curr_pid) { in setup_cpus_switch_event()
3224 zfree(&sched->cpu_last_switched); in setup_cpus_switch_event()
3229 sched->curr_pid[i] = -1; in setup_cpus_switch_event()
3234 static void free_cpus_switch_event(struct perf_sched *sched) in free_cpus_switch_event() argument
3236 zfree(&sched->curr_pid); in free_cpus_switch_event()
3237 zfree(&sched->cpu_last_switched); in free_cpus_switch_event()
3240 static int perf_sched__lat(struct perf_sched *sched) in perf_sched__lat() argument
3247 if (setup_cpus_switch_event(sched)) in perf_sched__lat()
3250 if (perf_sched__read_events(sched)) in perf_sched__lat()
3253 perf_sched__merge_lat(sched); in perf_sched__lat()
3254 perf_sched__sort_lat(sched); in perf_sched__lat()
3260 next = rb_first_cached(&sched->sorted_atom_root); in perf_sched__lat()
3266 output_lat_thread(sched, work_list); in perf_sched__lat()
3273 (double)sched->all_runtime / NSEC_PER_MSEC, sched->all_count); in perf_sched__lat()
3277 print_bad_events(sched); in perf_sched__lat()
3283 free_cpus_switch_event(sched); in perf_sched__lat()
3287 static int setup_map_cpus(struct perf_sched *sched) in setup_map_cpus() argument
3289 sched->max_cpu.cpu = sysconf(_SC_NPROCESSORS_CONF); in setup_map_cpus()
3291 if (sched->map.comp) { in setup_map_cpus()
3292 sched->map.comp_cpus = zalloc(sched->max_cpu.cpu * sizeof(int)); in setup_map_cpus()
3293 if (!sched->map.comp_cpus) in setup_map_cpus()
3297 if (sched->map.cpus_str) { in setup_map_cpus()
3298 sched->map.cpus = perf_cpu_map__new(sched->map.cpus_str); in setup_map_cpus()
3299 if (!sched->map.cpus) { in setup_map_cpus()
3300 pr_err("failed to get cpus map from %s\n", sched->map.cpus_str); in setup_map_cpus()
3301 zfree(&sched->map.comp_cpus); in setup_map_cpus()
3309 static int setup_color_pids(struct perf_sched *sched) in setup_color_pids() argument
3313 if (!sched->map.color_pids_str) in setup_color_pids()
3316 map = thread_map__new_by_tid_str(sched->map.color_pids_str); in setup_color_pids()
3318 pr_err("failed to get thread map from %s\n", sched->map.color_pids_str); in setup_color_pids()
3322 sched->map.color_pids = map; in setup_color_pids()
3326 static int setup_color_cpus(struct perf_sched *sched) in setup_color_cpus() argument
3330 if (!sched->map.color_cpus_str) in setup_color_cpus()
3333 map = perf_cpu_map__new(sched->map.color_cpus_str); in setup_color_cpus()
3335 pr_err("failed to get thread map from %s\n", sched->map.color_cpus_str); in setup_color_cpus()
3339 sched->map.color_cpus = map; in setup_color_cpus()
3343 static int perf_sched__map(struct perf_sched *sched) in perf_sched__map() argument
3347 sched->curr_thread = calloc(MAX_CPUS, sizeof(*(sched->curr_thread))); in perf_sched__map()
3348 if (!sched->curr_thread) in perf_sched__map()
3351 if (setup_cpus_switch_event(sched)) in perf_sched__map()
3354 if (setup_map_cpus(sched)) in perf_sched__map()
3357 if (setup_color_pids(sched)) in perf_sched__map()
3360 if (setup_color_cpus(sched)) in perf_sched__map()
3364 if (perf_sched__read_events(sched)) in perf_sched__map()
3368 print_bad_events(sched); in perf_sched__map()
3371 perf_cpu_map__put(sched->map.color_cpus); in perf_sched__map()
3374 perf_thread_map__put(sched->map.color_pids); in perf_sched__map()
3377 zfree(&sched->map.comp_cpus); in perf_sched__map()
3378 perf_cpu_map__put(sched->map.cpus); in perf_sched__map()
3381 free_cpus_switch_event(sched); in perf_sched__map()
3384 zfree(&sched->curr_thread); in perf_sched__map()
3388 static int perf_sched__replay(struct perf_sched *sched) in perf_sched__replay() argument
3393 mutex_init(&sched->start_work_mutex); in perf_sched__replay()
3394 mutex_init(&sched->work_done_wait_mutex); in perf_sched__replay()
3396 ret = setup_cpus_switch_event(sched); in perf_sched__replay()
3400 calibrate_run_measurement_overhead(sched); in perf_sched__replay()
3401 calibrate_sleep_measurement_overhead(sched); in perf_sched__replay()
3403 test_calibrations(sched); in perf_sched__replay()
3405 ret = perf_sched__read_events(sched); in perf_sched__replay()
3409 printf("nr_run_events: %ld\n", sched->nr_run_events); in perf_sched__replay()
3410 printf("nr_sleep_events: %ld\n", sched->nr_sleep_events); in perf_sched__replay()
3411 printf("nr_wakeup_events: %ld\n", sched->nr_wakeup_events); in perf_sched__replay()
3413 if (sched->targetless_wakeups) in perf_sched__replay()
3414 printf("target-less wakeups: %ld\n", sched->targetless_wakeups); in perf_sched__replay()
3415 if (sched->multitarget_wakeups) in perf_sched__replay()
3416 printf("multi-target wakeups: %ld\n", sched->multitarget_wakeups); in perf_sched__replay()
3417 if (sched->nr_run_events_optimized) in perf_sched__replay()
3419 sched->nr_run_events_optimized); in perf_sched__replay()
3421 print_task_traces(sched); in perf_sched__replay()
3422 add_cross_task_wakeups(sched); in perf_sched__replay()
3424 sched->thread_funcs_exit = false; in perf_sched__replay()
3425 create_tasks(sched); in perf_sched__replay()
3427 for (i = 0; i < sched->replay_repeat; i++) in perf_sched__replay()
3428 run_one_test(sched); in perf_sched__replay()
3430 sched->thread_funcs_exit = true; in perf_sched__replay()
3431 destroy_tasks(sched); in perf_sched__replay()
3434 free_cpus_switch_event(sched); in perf_sched__replay()
3437 mutex_destroy(&sched->start_work_mutex); in perf_sched__replay()
3438 mutex_destroy(&sched->work_done_wait_mutex); in perf_sched__replay()
3442 static void setup_sorting(struct perf_sched *sched, const struct option *options, in setup_sorting() argument
3445 char *tmp, *tok, *str = strdup(sched->sort_order); in setup_sorting()
3449 if (sort_dimension__add(tok, &sched->sort_list) < 0) { in setup_sorting()
3457 sort_dimension__add("pid", &sched->cmp_pid); in setup_sorting()
3551 struct perf_sched sched = { in cmd_sched() local
3560 .cmp_pid = LIST_HEAD_INIT(sched.cmp_pid), in cmd_sched()
3561 .sort_list = LIST_HEAD_INIT(sched.sort_list), in cmd_sched()
3578 OPT_BOOLEAN('f', "force", &sched.force, "don't complain, do it"), in cmd_sched()
3582 OPT_STRING('s', "sort", &sched.sort_order, "key[,key2...]", in cmd_sched()
3584 OPT_INTEGER('C', "CPU", &sched.profile_cpu, in cmd_sched()
3586 OPT_BOOLEAN('p', "pids", &sched.skip_merge, in cmd_sched()
3591 OPT_UINTEGER('r', "repeat", &sched.replay_repeat, in cmd_sched()
3596 OPT_BOOLEAN(0, "compact", &sched.map.comp, in cmd_sched()
3598 OPT_STRING(0, "color-pids", &sched.map.color_pids_str, "pids", in cmd_sched()
3600 OPT_STRING(0, "color-cpus", &sched.map.color_cpus_str, "cpus", in cmd_sched()
3602 OPT_STRING(0, "cpus", &sched.map.cpus_str, "cpus", in cmd_sched()
3611 OPT_BOOLEAN('g', "call-graph", &sched.show_callchain, in cmd_sched()
3613 OPT_UINTEGER(0, "max-stack", &sched.max_stack, in cmd_sched()
3617 OPT_BOOLEAN('s', "summary", &sched.summary_only, in cmd_sched()
3619 OPT_BOOLEAN('S', "with-summary", &sched.summary, in cmd_sched()
3621 OPT_BOOLEAN('w', "wakeups", &sched.show_wakeups, "Show wakeup events"), in cmd_sched()
3622 OPT_BOOLEAN('n', "next", &sched.show_next, "Show next task"), in cmd_sched()
3623 OPT_BOOLEAN('M', "migrations", &sched.show_migrations, "Show migration events"), in cmd_sched()
3624 OPT_BOOLEAN('V', "cpu-visual", &sched.show_cpu_visual, "Add CPU visual"), in cmd_sched()
3625 OPT_BOOLEAN('I', "idle-hist", &sched.idle_hist, "Show idle events only"), in cmd_sched()
3626 OPT_STRING(0, "time", &sched.time_str, "str", in cmd_sched()
3628 OPT_BOOLEAN(0, "state", &sched.show_state, "Show task state when sched-out"), in cmd_sched()
3689 sched.tp_handler = &lat_ops; in cmd_sched()
3695 setup_sorting(&sched, latency_options, latency_usage); in cmd_sched()
3696 return perf_sched__lat(&sched); in cmd_sched()
3703 sched.tp_handler = &map_ops; in cmd_sched()
3704 setup_sorting(&sched, latency_options, latency_usage); in cmd_sched()
3705 return perf_sched__map(&sched); in cmd_sched()
3707 sched.tp_handler = &replay_ops; in cmd_sched()
3713 return perf_sched__replay(&sched); in cmd_sched()
3721 if ((sched.show_wakeups || sched.show_next) && in cmd_sched()
3722 sched.summary_only) { in cmd_sched()
3725 if (sched.show_wakeups) in cmd_sched()
3727 if (sched.show_next) in cmd_sched()
3735 return perf_sched__timehist(&sched); in cmd_sched()