1 // SPDX-License-Identifier: GPL-2.0 2 #include "builtin.h" 3 #include "perf.h" 4 #include "perf-sys.h" 5 6 #include "util/cpumap.h" 7 #include "util/evlist.h" 8 #include "util/evsel.h" 9 #include "util/evsel_fprintf.h" 10 #include "util/symbol.h" 11 #include "util/thread.h" 12 #include "util/header.h" 13 #include "util/session.h" 14 #include "util/tool.h" 15 #include "util/cloexec.h" 16 #include "util/thread_map.h" 17 #include "util/color.h" 18 #include "util/stat.h" 19 #include "util/string2.h" 20 #include "util/callchain.h" 21 #include "util/time-utils.h" 22 23 #include <subcmd/pager.h> 24 #include <subcmd/parse-options.h> 25 #include "util/trace-event.h" 26 27 #include "util/debug.h" 28 #include "util/event.h" 29 30 #include <linux/kernel.h> 31 #include <linux/log2.h> 32 #include <linux/zalloc.h> 33 #include <sys/prctl.h> 34 #include <sys/resource.h> 35 #include <inttypes.h> 36 37 #include <errno.h> 38 #include <semaphore.h> 39 #include <pthread.h> 40 #include <math.h> 41 #include <api/fs/fs.h> 42 #include <perf/cpumap.h> 43 #include <linux/time64.h> 44 #include <linux/err.h> 45 46 #include <linux/ctype.h> 47 48 #define PR_SET_NAME 15 /* Set process name */ 49 #define MAX_CPUS 4096 50 #define COMM_LEN 20 51 #define SYM_LEN 129 52 #define MAX_PID 1024000 53 54 struct sched_atom; 55 56 struct task_desc { 57 unsigned long nr; 58 unsigned long pid; 59 char comm[COMM_LEN]; 60 61 unsigned long nr_events; 62 unsigned long curr_event; 63 struct sched_atom **atoms; 64 65 pthread_t thread; 66 sem_t sleep_sem; 67 68 sem_t ready_for_work; 69 sem_t work_done_sem; 70 71 u64 cpu_usage; 72 }; 73 74 enum sched_event_type { 75 SCHED_EVENT_RUN, 76 SCHED_EVENT_SLEEP, 77 SCHED_EVENT_WAKEUP, 78 SCHED_EVENT_MIGRATION, 79 }; 80 81 struct sched_atom { 82 enum sched_event_type type; 83 int specific_wait; 84 u64 timestamp; 85 u64 duration; 86 unsigned long nr; 87 sem_t *wait_sem; 88 struct task_desc *wakee; 89 }; 90 91 #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP" 92 93 /* task state bitmask, copied from include/linux/sched.h */ 94 #define TASK_RUNNING 0 95 #define TASK_INTERRUPTIBLE 1 96 #define TASK_UNINTERRUPTIBLE 2 97 #define __TASK_STOPPED 4 98 #define __TASK_TRACED 8 99 /* in tsk->exit_state */ 100 #define EXIT_DEAD 16 101 #define EXIT_ZOMBIE 32 102 #define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD) 103 /* in tsk->state again */ 104 #define TASK_DEAD 64 105 #define TASK_WAKEKILL 128 106 #define TASK_WAKING 256 107 #define TASK_PARKED 512 108 109 enum thread_state { 110 THREAD_SLEEPING = 0, 111 THREAD_WAIT_CPU, 112 THREAD_SCHED_IN, 113 THREAD_IGNORE 114 }; 115 116 struct work_atom { 117 struct list_head list; 118 enum thread_state state; 119 u64 sched_out_time; 120 u64 wake_up_time; 121 u64 sched_in_time; 122 u64 runtime; 123 }; 124 125 struct work_atoms { 126 struct list_head work_list; 127 struct thread *thread; 128 struct rb_node node; 129 u64 max_lat; 130 u64 max_lat_at; 131 u64 total_lat; 132 u64 nb_atoms; 133 u64 total_runtime; 134 int num_merged; 135 }; 136 137 typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *); 138 139 struct perf_sched; 140 141 struct trace_sched_handler { 142 int (*switch_event)(struct perf_sched *sched, struct evsel *evsel, 143 struct perf_sample *sample, struct machine *machine); 144 145 int (*runtime_event)(struct perf_sched *sched, struct evsel *evsel, 146 struct perf_sample *sample, struct machine *machine); 147 148 int (*wakeup_event)(struct perf_sched *sched, struct evsel *evsel, 149 struct perf_sample *sample, struct machine *machine); 150 151 /* PERF_RECORD_FORK event, not sched_process_fork tracepoint */ 152 int (*fork_event)(struct perf_sched *sched, union perf_event *event, 153 struct machine *machine); 154 155 int (*migrate_task_event)(struct perf_sched *sched, 156 struct evsel *evsel, 157 struct perf_sample *sample, 158 struct machine *machine); 159 }; 160 161 #define COLOR_PIDS PERF_COLOR_BLUE 162 #define COLOR_CPUS PERF_COLOR_BG_RED 163 164 struct perf_sched_map { 165 DECLARE_BITMAP(comp_cpus_mask, MAX_CPUS); 166 int *comp_cpus; 167 bool comp; 168 struct perf_thread_map *color_pids; 169 const char *color_pids_str; 170 struct perf_cpu_map *color_cpus; 171 const char *color_cpus_str; 172 struct perf_cpu_map *cpus; 173 const char *cpus_str; 174 }; 175 176 struct perf_sched { 177 struct perf_tool tool; 178 const char *sort_order; 179 unsigned long nr_tasks; 180 struct task_desc **pid_to_task; 181 struct task_desc **tasks; 182 const struct trace_sched_handler *tp_handler; 183 pthread_mutex_t start_work_mutex; 184 pthread_mutex_t work_done_wait_mutex; 185 int profile_cpu; 186 /* 187 * Track the current task - that way we can know whether there's any 188 * weird events, such as a task being switched away that is not current. 189 */ 190 int max_cpu; 191 u32 curr_pid[MAX_CPUS]; 192 struct thread *curr_thread[MAX_CPUS]; 193 char next_shortname1; 194 char next_shortname2; 195 unsigned int replay_repeat; 196 unsigned long nr_run_events; 197 unsigned long nr_sleep_events; 198 unsigned long nr_wakeup_events; 199 unsigned long nr_sleep_corrections; 200 unsigned long nr_run_events_optimized; 201 unsigned long targetless_wakeups; 202 unsigned long multitarget_wakeups; 203 unsigned long nr_runs; 204 unsigned long nr_timestamps; 205 unsigned long nr_unordered_timestamps; 206 unsigned long nr_context_switch_bugs; 207 unsigned long nr_events; 208 unsigned long nr_lost_chunks; 209 unsigned long nr_lost_events; 210 u64 run_measurement_overhead; 211 u64 sleep_measurement_overhead; 212 u64 start_time; 213 u64 cpu_usage; 214 u64 runavg_cpu_usage; 215 u64 parent_cpu_usage; 216 u64 runavg_parent_cpu_usage; 217 u64 sum_runtime; 218 u64 sum_fluct; 219 u64 run_avg; 220 u64 all_runtime; 221 u64 all_count; 222 u64 cpu_last_switched[MAX_CPUS]; 223 struct rb_root_cached atom_root, sorted_atom_root, merged_atom_root; 224 struct list_head sort_list, cmp_pid; 225 bool force; 226 bool skip_merge; 227 struct perf_sched_map map; 228 229 /* options for timehist command */ 230 bool summary; 231 bool summary_only; 232 bool idle_hist; 233 bool show_callchain; 234 unsigned int max_stack; 235 bool show_cpu_visual; 236 bool show_wakeups; 237 bool show_next; 238 bool show_migrations; 239 bool show_state; 240 u64 skipped_samples; 241 const char *time_str; 242 struct perf_time_interval ptime; 243 struct perf_time_interval hist_time; 244 }; 245 246 /* per thread run time data */ 247 struct thread_runtime { 248 u64 last_time; /* time of previous sched in/out event */ 249 u64 dt_run; /* run time */ 250 u64 dt_sleep; /* time between CPU access by sleep (off cpu) */ 251 u64 dt_iowait; /* time between CPU access by iowait (off cpu) */ 252 u64 dt_preempt; /* time between CPU access by preempt (off cpu) */ 253 u64 dt_delay; /* time between wakeup and sched-in */ 254 u64 ready_to_run; /* time of wakeup */ 255 256 struct stats run_stats; 257 u64 total_run_time; 258 u64 total_sleep_time; 259 u64 total_iowait_time; 260 u64 total_preempt_time; 261 u64 total_delay_time; 262 263 int last_state; 264 265 char shortname[3]; 266 bool comm_changed; 267 268 u64 migrations; 269 }; 270 271 /* per event run time data */ 272 struct evsel_runtime { 273 u64 *last_time; /* time this event was last seen per cpu */ 274 u32 ncpu; /* highest cpu slot allocated */ 275 }; 276 277 /* per cpu idle time data */ 278 struct idle_thread_runtime { 279 struct thread_runtime tr; 280 struct thread *last_thread; 281 struct rb_root_cached sorted_root; 282 struct callchain_root callchain; 283 struct callchain_cursor cursor; 284 }; 285 286 /* track idle times per cpu */ 287 static struct thread **idle_threads; 288 static int idle_max_cpu; 289 static char idle_comm[] = "<idle>"; 290 291 static u64 get_nsecs(void) 292 { 293 struct timespec ts; 294 295 clock_gettime(CLOCK_MONOTONIC, &ts); 296 297 return ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec; 298 } 299 300 static void burn_nsecs(struct perf_sched *sched, u64 nsecs) 301 { 302 u64 T0 = get_nsecs(), T1; 303 304 do { 305 T1 = get_nsecs(); 306 } while (T1 + sched->run_measurement_overhead < T0 + nsecs); 307 } 308 309 static void sleep_nsecs(u64 nsecs) 310 { 311 struct timespec ts; 312 313 ts.tv_nsec = nsecs % 999999999; 314 ts.tv_sec = nsecs / 999999999; 315 316 nanosleep(&ts, NULL); 317 } 318 319 static void calibrate_run_measurement_overhead(struct perf_sched *sched) 320 { 321 u64 T0, T1, delta, min_delta = NSEC_PER_SEC; 322 int i; 323 324 for (i = 0; i < 10; i++) { 325 T0 = get_nsecs(); 326 burn_nsecs(sched, 0); 327 T1 = get_nsecs(); 328 delta = T1-T0; 329 min_delta = min(min_delta, delta); 330 } 331 sched->run_measurement_overhead = min_delta; 332 333 printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta); 334 } 335 336 static void calibrate_sleep_measurement_overhead(struct perf_sched *sched) 337 { 338 u64 T0, T1, delta, min_delta = NSEC_PER_SEC; 339 int i; 340 341 for (i = 0; i < 10; i++) { 342 T0 = get_nsecs(); 343 sleep_nsecs(10000); 344 T1 = get_nsecs(); 345 delta = T1-T0; 346 min_delta = min(min_delta, delta); 347 } 348 min_delta -= 10000; 349 sched->sleep_measurement_overhead = min_delta; 350 351 printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta); 352 } 353 354 static struct sched_atom * 355 get_new_event(struct task_desc *task, u64 timestamp) 356 { 357 struct sched_atom *event = zalloc(sizeof(*event)); 358 unsigned long idx = task->nr_events; 359 size_t size; 360 361 event->timestamp = timestamp; 362 event->nr = idx; 363 364 task->nr_events++; 365 size = sizeof(struct sched_atom *) * task->nr_events; 366 task->atoms = realloc(task->atoms, size); 367 BUG_ON(!task->atoms); 368 369 task->atoms[idx] = event; 370 371 return event; 372 } 373 374 static struct sched_atom *last_event(struct task_desc *task) 375 { 376 if (!task->nr_events) 377 return NULL; 378 379 return task->atoms[task->nr_events - 1]; 380 } 381 382 static void add_sched_event_run(struct perf_sched *sched, struct task_desc *task, 383 u64 timestamp, u64 duration) 384 { 385 struct sched_atom *event, *curr_event = last_event(task); 386 387 /* 388 * optimize an existing RUN event by merging this one 389 * to it: 390 */ 391 if (curr_event && curr_event->type == SCHED_EVENT_RUN) { 392 sched->nr_run_events_optimized++; 393 curr_event->duration += duration; 394 return; 395 } 396 397 event = get_new_event(task, timestamp); 398 399 event->type = SCHED_EVENT_RUN; 400 event->duration = duration; 401 402 sched->nr_run_events++; 403 } 404 405 static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *task, 406 u64 timestamp, struct task_desc *wakee) 407 { 408 struct sched_atom *event, *wakee_event; 409 410 event = get_new_event(task, timestamp); 411 event->type = SCHED_EVENT_WAKEUP; 412 event->wakee = wakee; 413 414 wakee_event = last_event(wakee); 415 if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) { 416 sched->targetless_wakeups++; 417 return; 418 } 419 if (wakee_event->wait_sem) { 420 sched->multitarget_wakeups++; 421 return; 422 } 423 424 wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem)); 425 sem_init(wakee_event->wait_sem, 0, 0); 426 wakee_event->specific_wait = 1; 427 event->wait_sem = wakee_event->wait_sem; 428 429 sched->nr_wakeup_events++; 430 } 431 432 static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task, 433 u64 timestamp, u64 task_state __maybe_unused) 434 { 435 struct sched_atom *event = get_new_event(task, timestamp); 436 437 event->type = SCHED_EVENT_SLEEP; 438 439 sched->nr_sleep_events++; 440 } 441 442 static struct task_desc *register_pid(struct perf_sched *sched, 443 unsigned long pid, const char *comm) 444 { 445 struct task_desc *task; 446 static int pid_max; 447 448 if (sched->pid_to_task == NULL) { 449 if (sysctl__read_int("kernel/pid_max", &pid_max) < 0) 450 pid_max = MAX_PID; 451 BUG_ON((sched->pid_to_task = calloc(pid_max, sizeof(struct task_desc *))) == NULL); 452 } 453 if (pid >= (unsigned long)pid_max) { 454 BUG_ON((sched->pid_to_task = realloc(sched->pid_to_task, (pid + 1) * 455 sizeof(struct task_desc *))) == NULL); 456 while (pid >= (unsigned long)pid_max) 457 sched->pid_to_task[pid_max++] = NULL; 458 } 459 460 task = sched->pid_to_task[pid]; 461 462 if (task) 463 return task; 464 465 task = zalloc(sizeof(*task)); 466 task->pid = pid; 467 task->nr = sched->nr_tasks; 468 strcpy(task->comm, comm); 469 /* 470 * every task starts in sleeping state - this gets ignored 471 * if there's no wakeup pointing to this sleep state: 472 */ 473 add_sched_event_sleep(sched, task, 0, 0); 474 475 sched->pid_to_task[pid] = task; 476 sched->nr_tasks++; 477 sched->tasks = realloc(sched->tasks, sched->nr_tasks * sizeof(struct task_desc *)); 478 BUG_ON(!sched->tasks); 479 sched->tasks[task->nr] = task; 480 481 if (verbose > 0) 482 printf("registered task #%ld, PID %ld (%s)\n", sched->nr_tasks, pid, comm); 483 484 return task; 485 } 486 487 488 static void print_task_traces(struct perf_sched *sched) 489 { 490 struct task_desc *task; 491 unsigned long i; 492 493 for (i = 0; i < sched->nr_tasks; i++) { 494 task = sched->tasks[i]; 495 printf("task %6ld (%20s:%10ld), nr_events: %ld\n", 496 task->nr, task->comm, task->pid, task->nr_events); 497 } 498 } 499 500 static void add_cross_task_wakeups(struct perf_sched *sched) 501 { 502 struct task_desc *task1, *task2; 503 unsigned long i, j; 504 505 for (i = 0; i < sched->nr_tasks; i++) { 506 task1 = sched->tasks[i]; 507 j = i + 1; 508 if (j == sched->nr_tasks) 509 j = 0; 510 task2 = sched->tasks[j]; 511 add_sched_event_wakeup(sched, task1, 0, task2); 512 } 513 } 514 515 static void perf_sched__process_event(struct perf_sched *sched, 516 struct sched_atom *atom) 517 { 518 int ret = 0; 519 520 switch (atom->type) { 521 case SCHED_EVENT_RUN: 522 burn_nsecs(sched, atom->duration); 523 break; 524 case SCHED_EVENT_SLEEP: 525 if (atom->wait_sem) 526 ret = sem_wait(atom->wait_sem); 527 BUG_ON(ret); 528 break; 529 case SCHED_EVENT_WAKEUP: 530 if (atom->wait_sem) 531 ret = sem_post(atom->wait_sem); 532 BUG_ON(ret); 533 break; 534 case SCHED_EVENT_MIGRATION: 535 break; 536 default: 537 BUG_ON(1); 538 } 539 } 540 541 static u64 get_cpu_usage_nsec_parent(void) 542 { 543 struct rusage ru; 544 u64 sum; 545 int err; 546 547 err = getrusage(RUSAGE_SELF, &ru); 548 BUG_ON(err); 549 550 sum = ru.ru_utime.tv_sec * NSEC_PER_SEC + ru.ru_utime.tv_usec * NSEC_PER_USEC; 551 sum += ru.ru_stime.tv_sec * NSEC_PER_SEC + ru.ru_stime.tv_usec * NSEC_PER_USEC; 552 553 return sum; 554 } 555 556 static int self_open_counters(struct perf_sched *sched, unsigned long cur_task) 557 { 558 struct perf_event_attr attr; 559 char sbuf[STRERR_BUFSIZE], info[STRERR_BUFSIZE]; 560 int fd; 561 struct rlimit limit; 562 bool need_privilege = false; 563 564 memset(&attr, 0, sizeof(attr)); 565 566 attr.type = PERF_TYPE_SOFTWARE; 567 attr.config = PERF_COUNT_SW_TASK_CLOCK; 568 569 force_again: 570 fd = sys_perf_event_open(&attr, 0, -1, -1, 571 perf_event_open_cloexec_flag()); 572 573 if (fd < 0) { 574 if (errno == EMFILE) { 575 if (sched->force) { 576 BUG_ON(getrlimit(RLIMIT_NOFILE, &limit) == -1); 577 limit.rlim_cur += sched->nr_tasks - cur_task; 578 if (limit.rlim_cur > limit.rlim_max) { 579 limit.rlim_max = limit.rlim_cur; 580 need_privilege = true; 581 } 582 if (setrlimit(RLIMIT_NOFILE, &limit) == -1) { 583 if (need_privilege && errno == EPERM) 584 strcpy(info, "Need privilege\n"); 585 } else 586 goto force_again; 587 } else 588 strcpy(info, "Have a try with -f option\n"); 589 } 590 pr_err("Error: sys_perf_event_open() syscall returned " 591 "with %d (%s)\n%s", fd, 592 str_error_r(errno, sbuf, sizeof(sbuf)), info); 593 exit(EXIT_FAILURE); 594 } 595 return fd; 596 } 597 598 static u64 get_cpu_usage_nsec_self(int fd) 599 { 600 u64 runtime; 601 int ret; 602 603 ret = read(fd, &runtime, sizeof(runtime)); 604 BUG_ON(ret != sizeof(runtime)); 605 606 return runtime; 607 } 608 609 struct sched_thread_parms { 610 struct task_desc *task; 611 struct perf_sched *sched; 612 int fd; 613 }; 614 615 static void *thread_func(void *ctx) 616 { 617 struct sched_thread_parms *parms = ctx; 618 struct task_desc *this_task = parms->task; 619 struct perf_sched *sched = parms->sched; 620 u64 cpu_usage_0, cpu_usage_1; 621 unsigned long i, ret; 622 char comm2[22]; 623 int fd = parms->fd; 624 625 zfree(&parms); 626 627 sprintf(comm2, ":%s", this_task->comm); 628 prctl(PR_SET_NAME, comm2); 629 if (fd < 0) 630 return NULL; 631 again: 632 ret = sem_post(&this_task->ready_for_work); 633 BUG_ON(ret); 634 ret = pthread_mutex_lock(&sched->start_work_mutex); 635 BUG_ON(ret); 636 ret = pthread_mutex_unlock(&sched->start_work_mutex); 637 BUG_ON(ret); 638 639 cpu_usage_0 = get_cpu_usage_nsec_self(fd); 640 641 for (i = 0; i < this_task->nr_events; i++) { 642 this_task->curr_event = i; 643 perf_sched__process_event(sched, this_task->atoms[i]); 644 } 645 646 cpu_usage_1 = get_cpu_usage_nsec_self(fd); 647 this_task->cpu_usage = cpu_usage_1 - cpu_usage_0; 648 ret = sem_post(&this_task->work_done_sem); 649 BUG_ON(ret); 650 651 ret = pthread_mutex_lock(&sched->work_done_wait_mutex); 652 BUG_ON(ret); 653 ret = pthread_mutex_unlock(&sched->work_done_wait_mutex); 654 BUG_ON(ret); 655 656 goto again; 657 } 658 659 static void create_tasks(struct perf_sched *sched) 660 { 661 struct task_desc *task; 662 pthread_attr_t attr; 663 unsigned long i; 664 int err; 665 666 err = pthread_attr_init(&attr); 667 BUG_ON(err); 668 err = pthread_attr_setstacksize(&attr, 669 (size_t) max(16 * 1024, PTHREAD_STACK_MIN)); 670 BUG_ON(err); 671 err = pthread_mutex_lock(&sched->start_work_mutex); 672 BUG_ON(err); 673 err = pthread_mutex_lock(&sched->work_done_wait_mutex); 674 BUG_ON(err); 675 for (i = 0; i < sched->nr_tasks; i++) { 676 struct sched_thread_parms *parms = malloc(sizeof(*parms)); 677 BUG_ON(parms == NULL); 678 parms->task = task = sched->tasks[i]; 679 parms->sched = sched; 680 parms->fd = self_open_counters(sched, i); 681 sem_init(&task->sleep_sem, 0, 0); 682 sem_init(&task->ready_for_work, 0, 0); 683 sem_init(&task->work_done_sem, 0, 0); 684 task->curr_event = 0; 685 err = pthread_create(&task->thread, &attr, thread_func, parms); 686 BUG_ON(err); 687 } 688 } 689 690 static void wait_for_tasks(struct perf_sched *sched) 691 { 692 u64 cpu_usage_0, cpu_usage_1; 693 struct task_desc *task; 694 unsigned long i, ret; 695 696 sched->start_time = get_nsecs(); 697 sched->cpu_usage = 0; 698 pthread_mutex_unlock(&sched->work_done_wait_mutex); 699 700 for (i = 0; i < sched->nr_tasks; i++) { 701 task = sched->tasks[i]; 702 ret = sem_wait(&task->ready_for_work); 703 BUG_ON(ret); 704 sem_init(&task->ready_for_work, 0, 0); 705 } 706 ret = pthread_mutex_lock(&sched->work_done_wait_mutex); 707 BUG_ON(ret); 708 709 cpu_usage_0 = get_cpu_usage_nsec_parent(); 710 711 pthread_mutex_unlock(&sched->start_work_mutex); 712 713 for (i = 0; i < sched->nr_tasks; i++) { 714 task = sched->tasks[i]; 715 ret = sem_wait(&task->work_done_sem); 716 BUG_ON(ret); 717 sem_init(&task->work_done_sem, 0, 0); 718 sched->cpu_usage += task->cpu_usage; 719 task->cpu_usage = 0; 720 } 721 722 cpu_usage_1 = get_cpu_usage_nsec_parent(); 723 if (!sched->runavg_cpu_usage) 724 sched->runavg_cpu_usage = sched->cpu_usage; 725 sched->runavg_cpu_usage = (sched->runavg_cpu_usage * (sched->replay_repeat - 1) + sched->cpu_usage) / sched->replay_repeat; 726 727 sched->parent_cpu_usage = cpu_usage_1 - cpu_usage_0; 728 if (!sched->runavg_parent_cpu_usage) 729 sched->runavg_parent_cpu_usage = sched->parent_cpu_usage; 730 sched->runavg_parent_cpu_usage = (sched->runavg_parent_cpu_usage * (sched->replay_repeat - 1) + 731 sched->parent_cpu_usage)/sched->replay_repeat; 732 733 ret = pthread_mutex_lock(&sched->start_work_mutex); 734 BUG_ON(ret); 735 736 for (i = 0; i < sched->nr_tasks; i++) { 737 task = sched->tasks[i]; 738 sem_init(&task->sleep_sem, 0, 0); 739 task->curr_event = 0; 740 } 741 } 742 743 static void run_one_test(struct perf_sched *sched) 744 { 745 u64 T0, T1, delta, avg_delta, fluct; 746 747 T0 = get_nsecs(); 748 wait_for_tasks(sched); 749 T1 = get_nsecs(); 750 751 delta = T1 - T0; 752 sched->sum_runtime += delta; 753 sched->nr_runs++; 754 755 avg_delta = sched->sum_runtime / sched->nr_runs; 756 if (delta < avg_delta) 757 fluct = avg_delta - delta; 758 else 759 fluct = delta - avg_delta; 760 sched->sum_fluct += fluct; 761 if (!sched->run_avg) 762 sched->run_avg = delta; 763 sched->run_avg = (sched->run_avg * (sched->replay_repeat - 1) + delta) / sched->replay_repeat; 764 765 printf("#%-3ld: %0.3f, ", sched->nr_runs, (double)delta / NSEC_PER_MSEC); 766 767 printf("ravg: %0.2f, ", (double)sched->run_avg / NSEC_PER_MSEC); 768 769 printf("cpu: %0.2f / %0.2f", 770 (double)sched->cpu_usage / NSEC_PER_MSEC, (double)sched->runavg_cpu_usage / NSEC_PER_MSEC); 771 772 #if 0 773 /* 774 * rusage statistics done by the parent, these are less 775 * accurate than the sched->sum_exec_runtime based statistics: 776 */ 777 printf(" [%0.2f / %0.2f]", 778 (double)sched->parent_cpu_usage / NSEC_PER_MSEC, 779 (double)sched->runavg_parent_cpu_usage / NSEC_PER_MSEC); 780 #endif 781 782 printf("\n"); 783 784 if (sched->nr_sleep_corrections) 785 printf(" (%ld sleep corrections)\n", sched->nr_sleep_corrections); 786 sched->nr_sleep_corrections = 0; 787 } 788 789 static void test_calibrations(struct perf_sched *sched) 790 { 791 u64 T0, T1; 792 793 T0 = get_nsecs(); 794 burn_nsecs(sched, NSEC_PER_MSEC); 795 T1 = get_nsecs(); 796 797 printf("the run test took %" PRIu64 " nsecs\n", T1 - T0); 798 799 T0 = get_nsecs(); 800 sleep_nsecs(NSEC_PER_MSEC); 801 T1 = get_nsecs(); 802 803 printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0); 804 } 805 806 static int 807 replay_wakeup_event(struct perf_sched *sched, 808 struct evsel *evsel, struct perf_sample *sample, 809 struct machine *machine __maybe_unused) 810 { 811 const char *comm = perf_evsel__strval(evsel, sample, "comm"); 812 const u32 pid = perf_evsel__intval(evsel, sample, "pid"); 813 struct task_desc *waker, *wakee; 814 815 if (verbose > 0) { 816 printf("sched_wakeup event %p\n", evsel); 817 818 printf(" ... pid %d woke up %s/%d\n", sample->tid, comm, pid); 819 } 820 821 waker = register_pid(sched, sample->tid, "<unknown>"); 822 wakee = register_pid(sched, pid, comm); 823 824 add_sched_event_wakeup(sched, waker, sample->time, wakee); 825 return 0; 826 } 827 828 static int replay_switch_event(struct perf_sched *sched, 829 struct evsel *evsel, 830 struct perf_sample *sample, 831 struct machine *machine __maybe_unused) 832 { 833 const char *prev_comm = perf_evsel__strval(evsel, sample, "prev_comm"), 834 *next_comm = perf_evsel__strval(evsel, sample, "next_comm"); 835 const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"), 836 next_pid = perf_evsel__intval(evsel, sample, "next_pid"); 837 const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state"); 838 struct task_desc *prev, __maybe_unused *next; 839 u64 timestamp0, timestamp = sample->time; 840 int cpu = sample->cpu; 841 s64 delta; 842 843 if (verbose > 0) 844 printf("sched_switch event %p\n", evsel); 845 846 if (cpu >= MAX_CPUS || cpu < 0) 847 return 0; 848 849 timestamp0 = sched->cpu_last_switched[cpu]; 850 if (timestamp0) 851 delta = timestamp - timestamp0; 852 else 853 delta = 0; 854 855 if (delta < 0) { 856 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta); 857 return -1; 858 } 859 860 pr_debug(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n", 861 prev_comm, prev_pid, next_comm, next_pid, delta); 862 863 prev = register_pid(sched, prev_pid, prev_comm); 864 next = register_pid(sched, next_pid, next_comm); 865 866 sched->cpu_last_switched[cpu] = timestamp; 867 868 add_sched_event_run(sched, prev, timestamp, delta); 869 add_sched_event_sleep(sched, prev, timestamp, prev_state); 870 871 return 0; 872 } 873 874 static int replay_fork_event(struct perf_sched *sched, 875 union perf_event *event, 876 struct machine *machine) 877 { 878 struct thread *child, *parent; 879 880 child = machine__findnew_thread(machine, event->fork.pid, 881 event->fork.tid); 882 parent = machine__findnew_thread(machine, event->fork.ppid, 883 event->fork.ptid); 884 885 if (child == NULL || parent == NULL) { 886 pr_debug("thread does not exist on fork event: child %p, parent %p\n", 887 child, parent); 888 goto out_put; 889 } 890 891 if (verbose > 0) { 892 printf("fork event\n"); 893 printf("... parent: %s/%d\n", thread__comm_str(parent), parent->tid); 894 printf("... child: %s/%d\n", thread__comm_str(child), child->tid); 895 } 896 897 register_pid(sched, parent->tid, thread__comm_str(parent)); 898 register_pid(sched, child->tid, thread__comm_str(child)); 899 out_put: 900 thread__put(child); 901 thread__put(parent); 902 return 0; 903 } 904 905 struct sort_dimension { 906 const char *name; 907 sort_fn_t cmp; 908 struct list_head list; 909 }; 910 911 /* 912 * handle runtime stats saved per thread 913 */ 914 static struct thread_runtime *thread__init_runtime(struct thread *thread) 915 { 916 struct thread_runtime *r; 917 918 r = zalloc(sizeof(struct thread_runtime)); 919 if (!r) 920 return NULL; 921 922 init_stats(&r->run_stats); 923 thread__set_priv(thread, r); 924 925 return r; 926 } 927 928 static struct thread_runtime *thread__get_runtime(struct thread *thread) 929 { 930 struct thread_runtime *tr; 931 932 tr = thread__priv(thread); 933 if (tr == NULL) { 934 tr = thread__init_runtime(thread); 935 if (tr == NULL) 936 pr_debug("Failed to malloc memory for runtime data.\n"); 937 } 938 939 return tr; 940 } 941 942 static int 943 thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r) 944 { 945 struct sort_dimension *sort; 946 int ret = 0; 947 948 BUG_ON(list_empty(list)); 949 950 list_for_each_entry(sort, list, list) { 951 ret = sort->cmp(l, r); 952 if (ret) 953 return ret; 954 } 955 956 return ret; 957 } 958 959 static struct work_atoms * 960 thread_atoms_search(struct rb_root_cached *root, struct thread *thread, 961 struct list_head *sort_list) 962 { 963 struct rb_node *node = root->rb_root.rb_node; 964 struct work_atoms key = { .thread = thread }; 965 966 while (node) { 967 struct work_atoms *atoms; 968 int cmp; 969 970 atoms = container_of(node, struct work_atoms, node); 971 972 cmp = thread_lat_cmp(sort_list, &key, atoms); 973 if (cmp > 0) 974 node = node->rb_left; 975 else if (cmp < 0) 976 node = node->rb_right; 977 else { 978 BUG_ON(thread != atoms->thread); 979 return atoms; 980 } 981 } 982 return NULL; 983 } 984 985 static void 986 __thread_latency_insert(struct rb_root_cached *root, struct work_atoms *data, 987 struct list_head *sort_list) 988 { 989 struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL; 990 bool leftmost = true; 991 992 while (*new) { 993 struct work_atoms *this; 994 int cmp; 995 996 this = container_of(*new, struct work_atoms, node); 997 parent = *new; 998 999 cmp = thread_lat_cmp(sort_list, data, this); 1000 1001 if (cmp > 0) 1002 new = &((*new)->rb_left); 1003 else { 1004 new = &((*new)->rb_right); 1005 leftmost = false; 1006 } 1007 } 1008 1009 rb_link_node(&data->node, parent, new); 1010 rb_insert_color_cached(&data->node, root, leftmost); 1011 } 1012 1013 static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread) 1014 { 1015 struct work_atoms *atoms = zalloc(sizeof(*atoms)); 1016 if (!atoms) { 1017 pr_err("No memory at %s\n", __func__); 1018 return -1; 1019 } 1020 1021 atoms->thread = thread__get(thread); 1022 INIT_LIST_HEAD(&atoms->work_list); 1023 __thread_latency_insert(&sched->atom_root, atoms, &sched->cmp_pid); 1024 return 0; 1025 } 1026 1027 static char sched_out_state(u64 prev_state) 1028 { 1029 const char *str = TASK_STATE_TO_CHAR_STR; 1030 1031 return str[prev_state]; 1032 } 1033 1034 static int 1035 add_sched_out_event(struct work_atoms *atoms, 1036 char run_state, 1037 u64 timestamp) 1038 { 1039 struct work_atom *atom = zalloc(sizeof(*atom)); 1040 if (!atom) { 1041 pr_err("Non memory at %s", __func__); 1042 return -1; 1043 } 1044 1045 atom->sched_out_time = timestamp; 1046 1047 if (run_state == 'R') { 1048 atom->state = THREAD_WAIT_CPU; 1049 atom->wake_up_time = atom->sched_out_time; 1050 } 1051 1052 list_add_tail(&atom->list, &atoms->work_list); 1053 return 0; 1054 } 1055 1056 static void 1057 add_runtime_event(struct work_atoms *atoms, u64 delta, 1058 u64 timestamp __maybe_unused) 1059 { 1060 struct work_atom *atom; 1061 1062 BUG_ON(list_empty(&atoms->work_list)); 1063 1064 atom = list_entry(atoms->work_list.prev, struct work_atom, list); 1065 1066 atom->runtime += delta; 1067 atoms->total_runtime += delta; 1068 } 1069 1070 static void 1071 add_sched_in_event(struct work_atoms *atoms, u64 timestamp) 1072 { 1073 struct work_atom *atom; 1074 u64 delta; 1075 1076 if (list_empty(&atoms->work_list)) 1077 return; 1078 1079 atom = list_entry(atoms->work_list.prev, struct work_atom, list); 1080 1081 if (atom->state != THREAD_WAIT_CPU) 1082 return; 1083 1084 if (timestamp < atom->wake_up_time) { 1085 atom->state = THREAD_IGNORE; 1086 return; 1087 } 1088 1089 atom->state = THREAD_SCHED_IN; 1090 atom->sched_in_time = timestamp; 1091 1092 delta = atom->sched_in_time - atom->wake_up_time; 1093 atoms->total_lat += delta; 1094 if (delta > atoms->max_lat) { 1095 atoms->max_lat = delta; 1096 atoms->max_lat_at = timestamp; 1097 } 1098 atoms->nb_atoms++; 1099 } 1100 1101 static int latency_switch_event(struct perf_sched *sched, 1102 struct evsel *evsel, 1103 struct perf_sample *sample, 1104 struct machine *machine) 1105 { 1106 const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"), 1107 next_pid = perf_evsel__intval(evsel, sample, "next_pid"); 1108 const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state"); 1109 struct work_atoms *out_events, *in_events; 1110 struct thread *sched_out, *sched_in; 1111 u64 timestamp0, timestamp = sample->time; 1112 int cpu = sample->cpu, err = -1; 1113 s64 delta; 1114 1115 BUG_ON(cpu >= MAX_CPUS || cpu < 0); 1116 1117 timestamp0 = sched->cpu_last_switched[cpu]; 1118 sched->cpu_last_switched[cpu] = timestamp; 1119 if (timestamp0) 1120 delta = timestamp - timestamp0; 1121 else 1122 delta = 0; 1123 1124 if (delta < 0) { 1125 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta); 1126 return -1; 1127 } 1128 1129 sched_out = machine__findnew_thread(machine, -1, prev_pid); 1130 sched_in = machine__findnew_thread(machine, -1, next_pid); 1131 if (sched_out == NULL || sched_in == NULL) 1132 goto out_put; 1133 1134 out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid); 1135 if (!out_events) { 1136 if (thread_atoms_insert(sched, sched_out)) 1137 goto out_put; 1138 out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid); 1139 if (!out_events) { 1140 pr_err("out-event: Internal tree error"); 1141 goto out_put; 1142 } 1143 } 1144 if (add_sched_out_event(out_events, sched_out_state(prev_state), timestamp)) 1145 return -1; 1146 1147 in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid); 1148 if (!in_events) { 1149 if (thread_atoms_insert(sched, sched_in)) 1150 goto out_put; 1151 in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid); 1152 if (!in_events) { 1153 pr_err("in-event: Internal tree error"); 1154 goto out_put; 1155 } 1156 /* 1157 * Take came in we have not heard about yet, 1158 * add in an initial atom in runnable state: 1159 */ 1160 if (add_sched_out_event(in_events, 'R', timestamp)) 1161 goto out_put; 1162 } 1163 add_sched_in_event(in_events, timestamp); 1164 err = 0; 1165 out_put: 1166 thread__put(sched_out); 1167 thread__put(sched_in); 1168 return err; 1169 } 1170 1171 static int latency_runtime_event(struct perf_sched *sched, 1172 struct evsel *evsel, 1173 struct perf_sample *sample, 1174 struct machine *machine) 1175 { 1176 const u32 pid = perf_evsel__intval(evsel, sample, "pid"); 1177 const u64 runtime = perf_evsel__intval(evsel, sample, "runtime"); 1178 struct thread *thread = machine__findnew_thread(machine, -1, pid); 1179 struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid); 1180 u64 timestamp = sample->time; 1181 int cpu = sample->cpu, err = -1; 1182 1183 if (thread == NULL) 1184 return -1; 1185 1186 BUG_ON(cpu >= MAX_CPUS || cpu < 0); 1187 if (!atoms) { 1188 if (thread_atoms_insert(sched, thread)) 1189 goto out_put; 1190 atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid); 1191 if (!atoms) { 1192 pr_err("in-event: Internal tree error"); 1193 goto out_put; 1194 } 1195 if (add_sched_out_event(atoms, 'R', timestamp)) 1196 goto out_put; 1197 } 1198 1199 add_runtime_event(atoms, runtime, timestamp); 1200 err = 0; 1201 out_put: 1202 thread__put(thread); 1203 return err; 1204 } 1205 1206 static int latency_wakeup_event(struct perf_sched *sched, 1207 struct evsel *evsel, 1208 struct perf_sample *sample, 1209 struct machine *machine) 1210 { 1211 const u32 pid = perf_evsel__intval(evsel, sample, "pid"); 1212 struct work_atoms *atoms; 1213 struct work_atom *atom; 1214 struct thread *wakee; 1215 u64 timestamp = sample->time; 1216 int err = -1; 1217 1218 wakee = machine__findnew_thread(machine, -1, pid); 1219 if (wakee == NULL) 1220 return -1; 1221 atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid); 1222 if (!atoms) { 1223 if (thread_atoms_insert(sched, wakee)) 1224 goto out_put; 1225 atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid); 1226 if (!atoms) { 1227 pr_err("wakeup-event: Internal tree error"); 1228 goto out_put; 1229 } 1230 if (add_sched_out_event(atoms, 'S', timestamp)) 1231 goto out_put; 1232 } 1233 1234 BUG_ON(list_empty(&atoms->work_list)); 1235 1236 atom = list_entry(atoms->work_list.prev, struct work_atom, list); 1237 1238 /* 1239 * As we do not guarantee the wakeup event happens when 1240 * task is out of run queue, also may happen when task is 1241 * on run queue and wakeup only change ->state to TASK_RUNNING, 1242 * then we should not set the ->wake_up_time when wake up a 1243 * task which is on run queue. 1244 * 1245 * You WILL be missing events if you've recorded only 1246 * one CPU, or are only looking at only one, so don't 1247 * skip in this case. 1248 */ 1249 if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING) 1250 goto out_ok; 1251 1252 sched->nr_timestamps++; 1253 if (atom->sched_out_time > timestamp) { 1254 sched->nr_unordered_timestamps++; 1255 goto out_ok; 1256 } 1257 1258 atom->state = THREAD_WAIT_CPU; 1259 atom->wake_up_time = timestamp; 1260 out_ok: 1261 err = 0; 1262 out_put: 1263 thread__put(wakee); 1264 return err; 1265 } 1266 1267 static int latency_migrate_task_event(struct perf_sched *sched, 1268 struct evsel *evsel, 1269 struct perf_sample *sample, 1270 struct machine *machine) 1271 { 1272 const u32 pid = perf_evsel__intval(evsel, sample, "pid"); 1273 u64 timestamp = sample->time; 1274 struct work_atoms *atoms; 1275 struct work_atom *atom; 1276 struct thread *migrant; 1277 int err = -1; 1278 1279 /* 1280 * Only need to worry about migration when profiling one CPU. 1281 */ 1282 if (sched->profile_cpu == -1) 1283 return 0; 1284 1285 migrant = machine__findnew_thread(machine, -1, pid); 1286 if (migrant == NULL) 1287 return -1; 1288 atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid); 1289 if (!atoms) { 1290 if (thread_atoms_insert(sched, migrant)) 1291 goto out_put; 1292 register_pid(sched, migrant->tid, thread__comm_str(migrant)); 1293 atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid); 1294 if (!atoms) { 1295 pr_err("migration-event: Internal tree error"); 1296 goto out_put; 1297 } 1298 if (add_sched_out_event(atoms, 'R', timestamp)) 1299 goto out_put; 1300 } 1301 1302 BUG_ON(list_empty(&atoms->work_list)); 1303 1304 atom = list_entry(atoms->work_list.prev, struct work_atom, list); 1305 atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp; 1306 1307 sched->nr_timestamps++; 1308 1309 if (atom->sched_out_time > timestamp) 1310 sched->nr_unordered_timestamps++; 1311 err = 0; 1312 out_put: 1313 thread__put(migrant); 1314 return err; 1315 } 1316 1317 static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_list) 1318 { 1319 int i; 1320 int ret; 1321 u64 avg; 1322 char max_lat_at[32]; 1323 1324 if (!work_list->nb_atoms) 1325 return; 1326 /* 1327 * Ignore idle threads: 1328 */ 1329 if (!strcmp(thread__comm_str(work_list->thread), "swapper")) 1330 return; 1331 1332 sched->all_runtime += work_list->total_runtime; 1333 sched->all_count += work_list->nb_atoms; 1334 1335 if (work_list->num_merged > 1) 1336 ret = printf(" %s:(%d) ", thread__comm_str(work_list->thread), work_list->num_merged); 1337 else 1338 ret = printf(" %s:%d ", thread__comm_str(work_list->thread), work_list->thread->tid); 1339 1340 for (i = 0; i < 24 - ret; i++) 1341 printf(" "); 1342 1343 avg = work_list->total_lat / work_list->nb_atoms; 1344 timestamp__scnprintf_usec(work_list->max_lat_at, max_lat_at, sizeof(max_lat_at)); 1345 1346 printf("|%11.3f ms |%9" PRIu64 " | avg:%9.3f ms | max:%9.3f ms | max at: %13s s\n", 1347 (double)work_list->total_runtime / NSEC_PER_MSEC, 1348 work_list->nb_atoms, (double)avg / NSEC_PER_MSEC, 1349 (double)work_list->max_lat / NSEC_PER_MSEC, 1350 max_lat_at); 1351 } 1352 1353 static int pid_cmp(struct work_atoms *l, struct work_atoms *r) 1354 { 1355 if (l->thread == r->thread) 1356 return 0; 1357 if (l->thread->tid < r->thread->tid) 1358 return -1; 1359 if (l->thread->tid > r->thread->tid) 1360 return 1; 1361 return (int)(l->thread - r->thread); 1362 } 1363 1364 static int avg_cmp(struct work_atoms *l, struct work_atoms *r) 1365 { 1366 u64 avgl, avgr; 1367 1368 if (!l->nb_atoms) 1369 return -1; 1370 1371 if (!r->nb_atoms) 1372 return 1; 1373 1374 avgl = l->total_lat / l->nb_atoms; 1375 avgr = r->total_lat / r->nb_atoms; 1376 1377 if (avgl < avgr) 1378 return -1; 1379 if (avgl > avgr) 1380 return 1; 1381 1382 return 0; 1383 } 1384 1385 static int max_cmp(struct work_atoms *l, struct work_atoms *r) 1386 { 1387 if (l->max_lat < r->max_lat) 1388 return -1; 1389 if (l->max_lat > r->max_lat) 1390 return 1; 1391 1392 return 0; 1393 } 1394 1395 static int switch_cmp(struct work_atoms *l, struct work_atoms *r) 1396 { 1397 if (l->nb_atoms < r->nb_atoms) 1398 return -1; 1399 if (l->nb_atoms > r->nb_atoms) 1400 return 1; 1401 1402 return 0; 1403 } 1404 1405 static int runtime_cmp(struct work_atoms *l, struct work_atoms *r) 1406 { 1407 if (l->total_runtime < r->total_runtime) 1408 return -1; 1409 if (l->total_runtime > r->total_runtime) 1410 return 1; 1411 1412 return 0; 1413 } 1414 1415 static int sort_dimension__add(const char *tok, struct list_head *list) 1416 { 1417 size_t i; 1418 static struct sort_dimension avg_sort_dimension = { 1419 .name = "avg", 1420 .cmp = avg_cmp, 1421 }; 1422 static struct sort_dimension max_sort_dimension = { 1423 .name = "max", 1424 .cmp = max_cmp, 1425 }; 1426 static struct sort_dimension pid_sort_dimension = { 1427 .name = "pid", 1428 .cmp = pid_cmp, 1429 }; 1430 static struct sort_dimension runtime_sort_dimension = { 1431 .name = "runtime", 1432 .cmp = runtime_cmp, 1433 }; 1434 static struct sort_dimension switch_sort_dimension = { 1435 .name = "switch", 1436 .cmp = switch_cmp, 1437 }; 1438 struct sort_dimension *available_sorts[] = { 1439 &pid_sort_dimension, 1440 &avg_sort_dimension, 1441 &max_sort_dimension, 1442 &switch_sort_dimension, 1443 &runtime_sort_dimension, 1444 }; 1445 1446 for (i = 0; i < ARRAY_SIZE(available_sorts); i++) { 1447 if (!strcmp(available_sorts[i]->name, tok)) { 1448 list_add_tail(&available_sorts[i]->list, list); 1449 1450 return 0; 1451 } 1452 } 1453 1454 return -1; 1455 } 1456 1457 static void perf_sched__sort_lat(struct perf_sched *sched) 1458 { 1459 struct rb_node *node; 1460 struct rb_root_cached *root = &sched->atom_root; 1461 again: 1462 for (;;) { 1463 struct work_atoms *data; 1464 node = rb_first_cached(root); 1465 if (!node) 1466 break; 1467 1468 rb_erase_cached(node, root); 1469 data = rb_entry(node, struct work_atoms, node); 1470 __thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list); 1471 } 1472 if (root == &sched->atom_root) { 1473 root = &sched->merged_atom_root; 1474 goto again; 1475 } 1476 } 1477 1478 static int process_sched_wakeup_event(struct perf_tool *tool, 1479 struct evsel *evsel, 1480 struct perf_sample *sample, 1481 struct machine *machine) 1482 { 1483 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); 1484 1485 if (sched->tp_handler->wakeup_event) 1486 return sched->tp_handler->wakeup_event(sched, evsel, sample, machine); 1487 1488 return 0; 1489 } 1490 1491 union map_priv { 1492 void *ptr; 1493 bool color; 1494 }; 1495 1496 static bool thread__has_color(struct thread *thread) 1497 { 1498 union map_priv priv = { 1499 .ptr = thread__priv(thread), 1500 }; 1501 1502 return priv.color; 1503 } 1504 1505 static struct thread* 1506 map__findnew_thread(struct perf_sched *sched, struct machine *machine, pid_t pid, pid_t tid) 1507 { 1508 struct thread *thread = machine__findnew_thread(machine, pid, tid); 1509 union map_priv priv = { 1510 .color = false, 1511 }; 1512 1513 if (!sched->map.color_pids || !thread || thread__priv(thread)) 1514 return thread; 1515 1516 if (thread_map__has(sched->map.color_pids, tid)) 1517 priv.color = true; 1518 1519 thread__set_priv(thread, priv.ptr); 1520 return thread; 1521 } 1522 1523 static int map_switch_event(struct perf_sched *sched, struct evsel *evsel, 1524 struct perf_sample *sample, struct machine *machine) 1525 { 1526 const u32 next_pid = perf_evsel__intval(evsel, sample, "next_pid"); 1527 struct thread *sched_in; 1528 struct thread_runtime *tr; 1529 int new_shortname; 1530 u64 timestamp0, timestamp = sample->time; 1531 s64 delta; 1532 int i, this_cpu = sample->cpu; 1533 int cpus_nr; 1534 bool new_cpu = false; 1535 const char *color = PERF_COLOR_NORMAL; 1536 char stimestamp[32]; 1537 1538 BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0); 1539 1540 if (this_cpu > sched->max_cpu) 1541 sched->max_cpu = this_cpu; 1542 1543 if (sched->map.comp) { 1544 cpus_nr = bitmap_weight(sched->map.comp_cpus_mask, MAX_CPUS); 1545 if (!test_and_set_bit(this_cpu, sched->map.comp_cpus_mask)) { 1546 sched->map.comp_cpus[cpus_nr++] = this_cpu; 1547 new_cpu = true; 1548 } 1549 } else 1550 cpus_nr = sched->max_cpu; 1551 1552 timestamp0 = sched->cpu_last_switched[this_cpu]; 1553 sched->cpu_last_switched[this_cpu] = timestamp; 1554 if (timestamp0) 1555 delta = timestamp - timestamp0; 1556 else 1557 delta = 0; 1558 1559 if (delta < 0) { 1560 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta); 1561 return -1; 1562 } 1563 1564 sched_in = map__findnew_thread(sched, machine, -1, next_pid); 1565 if (sched_in == NULL) 1566 return -1; 1567 1568 tr = thread__get_runtime(sched_in); 1569 if (tr == NULL) { 1570 thread__put(sched_in); 1571 return -1; 1572 } 1573 1574 sched->curr_thread[this_cpu] = thread__get(sched_in); 1575 1576 printf(" "); 1577 1578 new_shortname = 0; 1579 if (!tr->shortname[0]) { 1580 if (!strcmp(thread__comm_str(sched_in), "swapper")) { 1581 /* 1582 * Don't allocate a letter-number for swapper:0 1583 * as a shortname. Instead, we use '.' for it. 1584 */ 1585 tr->shortname[0] = '.'; 1586 tr->shortname[1] = ' '; 1587 } else { 1588 tr->shortname[0] = sched->next_shortname1; 1589 tr->shortname[1] = sched->next_shortname2; 1590 1591 if (sched->next_shortname1 < 'Z') { 1592 sched->next_shortname1++; 1593 } else { 1594 sched->next_shortname1 = 'A'; 1595 if (sched->next_shortname2 < '9') 1596 sched->next_shortname2++; 1597 else 1598 sched->next_shortname2 = '0'; 1599 } 1600 } 1601 new_shortname = 1; 1602 } 1603 1604 for (i = 0; i < cpus_nr; i++) { 1605 int cpu = sched->map.comp ? sched->map.comp_cpus[i] : i; 1606 struct thread *curr_thread = sched->curr_thread[cpu]; 1607 struct thread_runtime *curr_tr; 1608 const char *pid_color = color; 1609 const char *cpu_color = color; 1610 1611 if (curr_thread && thread__has_color(curr_thread)) 1612 pid_color = COLOR_PIDS; 1613 1614 if (sched->map.cpus && !cpu_map__has(sched->map.cpus, cpu)) 1615 continue; 1616 1617 if (sched->map.color_cpus && cpu_map__has(sched->map.color_cpus, cpu)) 1618 cpu_color = COLOR_CPUS; 1619 1620 if (cpu != this_cpu) 1621 color_fprintf(stdout, color, " "); 1622 else 1623 color_fprintf(stdout, cpu_color, "*"); 1624 1625 if (sched->curr_thread[cpu]) { 1626 curr_tr = thread__get_runtime(sched->curr_thread[cpu]); 1627 if (curr_tr == NULL) { 1628 thread__put(sched_in); 1629 return -1; 1630 } 1631 color_fprintf(stdout, pid_color, "%2s ", curr_tr->shortname); 1632 } else 1633 color_fprintf(stdout, color, " "); 1634 } 1635 1636 if (sched->map.cpus && !cpu_map__has(sched->map.cpus, this_cpu)) 1637 goto out; 1638 1639 timestamp__scnprintf_usec(timestamp, stimestamp, sizeof(stimestamp)); 1640 color_fprintf(stdout, color, " %12s secs ", stimestamp); 1641 if (new_shortname || tr->comm_changed || (verbose > 0 && sched_in->tid)) { 1642 const char *pid_color = color; 1643 1644 if (thread__has_color(sched_in)) 1645 pid_color = COLOR_PIDS; 1646 1647 color_fprintf(stdout, pid_color, "%s => %s:%d", 1648 tr->shortname, thread__comm_str(sched_in), sched_in->tid); 1649 tr->comm_changed = false; 1650 } 1651 1652 if (sched->map.comp && new_cpu) 1653 color_fprintf(stdout, color, " (CPU %d)", this_cpu); 1654 1655 out: 1656 color_fprintf(stdout, color, "\n"); 1657 1658 thread__put(sched_in); 1659 1660 return 0; 1661 } 1662 1663 static int process_sched_switch_event(struct perf_tool *tool, 1664 struct evsel *evsel, 1665 struct perf_sample *sample, 1666 struct machine *machine) 1667 { 1668 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); 1669 int this_cpu = sample->cpu, err = 0; 1670 u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"), 1671 next_pid = perf_evsel__intval(evsel, sample, "next_pid"); 1672 1673 if (sched->curr_pid[this_cpu] != (u32)-1) { 1674 /* 1675 * Are we trying to switch away a PID that is 1676 * not current? 1677 */ 1678 if (sched->curr_pid[this_cpu] != prev_pid) 1679 sched->nr_context_switch_bugs++; 1680 } 1681 1682 if (sched->tp_handler->switch_event) 1683 err = sched->tp_handler->switch_event(sched, evsel, sample, machine); 1684 1685 sched->curr_pid[this_cpu] = next_pid; 1686 return err; 1687 } 1688 1689 static int process_sched_runtime_event(struct perf_tool *tool, 1690 struct evsel *evsel, 1691 struct perf_sample *sample, 1692 struct machine *machine) 1693 { 1694 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); 1695 1696 if (sched->tp_handler->runtime_event) 1697 return sched->tp_handler->runtime_event(sched, evsel, sample, machine); 1698 1699 return 0; 1700 } 1701 1702 static int perf_sched__process_fork_event(struct perf_tool *tool, 1703 union perf_event *event, 1704 struct perf_sample *sample, 1705 struct machine *machine) 1706 { 1707 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); 1708 1709 /* run the fork event through the perf machineruy */ 1710 perf_event__process_fork(tool, event, sample, machine); 1711 1712 /* and then run additional processing needed for this command */ 1713 if (sched->tp_handler->fork_event) 1714 return sched->tp_handler->fork_event(sched, event, machine); 1715 1716 return 0; 1717 } 1718 1719 static int process_sched_migrate_task_event(struct perf_tool *tool, 1720 struct evsel *evsel, 1721 struct perf_sample *sample, 1722 struct machine *machine) 1723 { 1724 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); 1725 1726 if (sched->tp_handler->migrate_task_event) 1727 return sched->tp_handler->migrate_task_event(sched, evsel, sample, machine); 1728 1729 return 0; 1730 } 1731 1732 typedef int (*tracepoint_handler)(struct perf_tool *tool, 1733 struct evsel *evsel, 1734 struct perf_sample *sample, 1735 struct machine *machine); 1736 1737 static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_unused, 1738 union perf_event *event __maybe_unused, 1739 struct perf_sample *sample, 1740 struct evsel *evsel, 1741 struct machine *machine) 1742 { 1743 int err = 0; 1744 1745 if (evsel->handler != NULL) { 1746 tracepoint_handler f = evsel->handler; 1747 err = f(tool, evsel, sample, machine); 1748 } 1749 1750 return err; 1751 } 1752 1753 static int perf_sched__process_comm(struct perf_tool *tool __maybe_unused, 1754 union perf_event *event, 1755 struct perf_sample *sample, 1756 struct machine *machine) 1757 { 1758 struct thread *thread; 1759 struct thread_runtime *tr; 1760 int err; 1761 1762 err = perf_event__process_comm(tool, event, sample, machine); 1763 if (err) 1764 return err; 1765 1766 thread = machine__find_thread(machine, sample->pid, sample->tid); 1767 if (!thread) { 1768 pr_err("Internal error: can't find thread\n"); 1769 return -1; 1770 } 1771 1772 tr = thread__get_runtime(thread); 1773 if (tr == NULL) { 1774 thread__put(thread); 1775 return -1; 1776 } 1777 1778 tr->comm_changed = true; 1779 thread__put(thread); 1780 1781 return 0; 1782 } 1783 1784 static int perf_sched__read_events(struct perf_sched *sched) 1785 { 1786 const struct evsel_str_handler handlers[] = { 1787 { "sched:sched_switch", process_sched_switch_event, }, 1788 { "sched:sched_stat_runtime", process_sched_runtime_event, }, 1789 { "sched:sched_wakeup", process_sched_wakeup_event, }, 1790 { "sched:sched_wakeup_new", process_sched_wakeup_event, }, 1791 { "sched:sched_migrate_task", process_sched_migrate_task_event, }, 1792 }; 1793 struct perf_session *session; 1794 struct perf_data data = { 1795 .path = input_name, 1796 .mode = PERF_DATA_MODE_READ, 1797 .force = sched->force, 1798 }; 1799 int rc = -1; 1800 1801 session = perf_session__new(&data, false, &sched->tool); 1802 if (IS_ERR(session)) { 1803 pr_debug("Error creating perf session"); 1804 return PTR_ERR(session); 1805 } 1806 1807 symbol__init(&session->header.env); 1808 1809 if (perf_session__set_tracepoints_handlers(session, handlers)) 1810 goto out_delete; 1811 1812 if (perf_session__has_traces(session, "record -R")) { 1813 int err = perf_session__process_events(session); 1814 if (err) { 1815 pr_err("Failed to process events, error %d", err); 1816 goto out_delete; 1817 } 1818 1819 sched->nr_events = session->evlist->stats.nr_events[0]; 1820 sched->nr_lost_events = session->evlist->stats.total_lost; 1821 sched->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST]; 1822 } 1823 1824 rc = 0; 1825 out_delete: 1826 perf_session__delete(session); 1827 return rc; 1828 } 1829 1830 /* 1831 * scheduling times are printed as msec.usec 1832 */ 1833 static inline void print_sched_time(unsigned long long nsecs, int width) 1834 { 1835 unsigned long msecs; 1836 unsigned long usecs; 1837 1838 msecs = nsecs / NSEC_PER_MSEC; 1839 nsecs -= msecs * NSEC_PER_MSEC; 1840 usecs = nsecs / NSEC_PER_USEC; 1841 printf("%*lu.%03lu ", width, msecs, usecs); 1842 } 1843 1844 /* 1845 * returns runtime data for event, allocating memory for it the 1846 * first time it is used. 1847 */ 1848 static struct evsel_runtime *perf_evsel__get_runtime(struct evsel *evsel) 1849 { 1850 struct evsel_runtime *r = evsel->priv; 1851 1852 if (r == NULL) { 1853 r = zalloc(sizeof(struct evsel_runtime)); 1854 evsel->priv = r; 1855 } 1856 1857 return r; 1858 } 1859 1860 /* 1861 * save last time event was seen per cpu 1862 */ 1863 static void perf_evsel__save_time(struct evsel *evsel, 1864 u64 timestamp, u32 cpu) 1865 { 1866 struct evsel_runtime *r = perf_evsel__get_runtime(evsel); 1867 1868 if (r == NULL) 1869 return; 1870 1871 if ((cpu >= r->ncpu) || (r->last_time == NULL)) { 1872 int i, n = __roundup_pow_of_two(cpu+1); 1873 void *p = r->last_time; 1874 1875 p = realloc(r->last_time, n * sizeof(u64)); 1876 if (!p) 1877 return; 1878 1879 r->last_time = p; 1880 for (i = r->ncpu; i < n; ++i) 1881 r->last_time[i] = (u64) 0; 1882 1883 r->ncpu = n; 1884 } 1885 1886 r->last_time[cpu] = timestamp; 1887 } 1888 1889 /* returns last time this event was seen on the given cpu */ 1890 static u64 perf_evsel__get_time(struct evsel *evsel, u32 cpu) 1891 { 1892 struct evsel_runtime *r = perf_evsel__get_runtime(evsel); 1893 1894 if ((r == NULL) || (r->last_time == NULL) || (cpu >= r->ncpu)) 1895 return 0; 1896 1897 return r->last_time[cpu]; 1898 } 1899 1900 static int comm_width = 30; 1901 1902 static char *timehist_get_commstr(struct thread *thread) 1903 { 1904 static char str[32]; 1905 const char *comm = thread__comm_str(thread); 1906 pid_t tid = thread->tid; 1907 pid_t pid = thread->pid_; 1908 int n; 1909 1910 if (pid == 0) 1911 n = scnprintf(str, sizeof(str), "%s", comm); 1912 1913 else if (tid != pid) 1914 n = scnprintf(str, sizeof(str), "%s[%d/%d]", comm, tid, pid); 1915 1916 else 1917 n = scnprintf(str, sizeof(str), "%s[%d]", comm, tid); 1918 1919 if (n > comm_width) 1920 comm_width = n; 1921 1922 return str; 1923 } 1924 1925 static void timehist_header(struct perf_sched *sched) 1926 { 1927 u32 ncpus = sched->max_cpu + 1; 1928 u32 i, j; 1929 1930 printf("%15s %6s ", "time", "cpu"); 1931 1932 if (sched->show_cpu_visual) { 1933 printf(" "); 1934 for (i = 0, j = 0; i < ncpus; ++i) { 1935 printf("%x", j++); 1936 if (j > 15) 1937 j = 0; 1938 } 1939 printf(" "); 1940 } 1941 1942 printf(" %-*s %9s %9s %9s", comm_width, 1943 "task name", "wait time", "sch delay", "run time"); 1944 1945 if (sched->show_state) 1946 printf(" %s", "state"); 1947 1948 printf("\n"); 1949 1950 /* 1951 * units row 1952 */ 1953 printf("%15s %-6s ", "", ""); 1954 1955 if (sched->show_cpu_visual) 1956 printf(" %*s ", ncpus, ""); 1957 1958 printf(" %-*s %9s %9s %9s", comm_width, 1959 "[tid/pid]", "(msec)", "(msec)", "(msec)"); 1960 1961 if (sched->show_state) 1962 printf(" %5s", ""); 1963 1964 printf("\n"); 1965 1966 /* 1967 * separator 1968 */ 1969 printf("%.15s %.6s ", graph_dotted_line, graph_dotted_line); 1970 1971 if (sched->show_cpu_visual) 1972 printf(" %.*s ", ncpus, graph_dotted_line); 1973 1974 printf(" %.*s %.9s %.9s %.9s", comm_width, 1975 graph_dotted_line, graph_dotted_line, graph_dotted_line, 1976 graph_dotted_line); 1977 1978 if (sched->show_state) 1979 printf(" %.5s", graph_dotted_line); 1980 1981 printf("\n"); 1982 } 1983 1984 static char task_state_char(struct thread *thread, int state) 1985 { 1986 static const char state_to_char[] = TASK_STATE_TO_CHAR_STR; 1987 unsigned bit = state ? ffs(state) : 0; 1988 1989 /* 'I' for idle */ 1990 if (thread->tid == 0) 1991 return 'I'; 1992 1993 return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?'; 1994 } 1995 1996 static void timehist_print_sample(struct perf_sched *sched, 1997 struct evsel *evsel, 1998 struct perf_sample *sample, 1999 struct addr_location *al, 2000 struct thread *thread, 2001 u64 t, int state) 2002 { 2003 struct thread_runtime *tr = thread__priv(thread); 2004 const char *next_comm = perf_evsel__strval(evsel, sample, "next_comm"); 2005 const u32 next_pid = perf_evsel__intval(evsel, sample, "next_pid"); 2006 u32 max_cpus = sched->max_cpu + 1; 2007 char tstr[64]; 2008 char nstr[30]; 2009 u64 wait_time; 2010 2011 timestamp__scnprintf_usec(t, tstr, sizeof(tstr)); 2012 printf("%15s [%04d] ", tstr, sample->cpu); 2013 2014 if (sched->show_cpu_visual) { 2015 u32 i; 2016 char c; 2017 2018 printf(" "); 2019 for (i = 0; i < max_cpus; ++i) { 2020 /* flag idle times with 'i'; others are sched events */ 2021 if (i == sample->cpu) 2022 c = (thread->tid == 0) ? 'i' : 's'; 2023 else 2024 c = ' '; 2025 printf("%c", c); 2026 } 2027 printf(" "); 2028 } 2029 2030 printf(" %-*s ", comm_width, timehist_get_commstr(thread)); 2031 2032 wait_time = tr->dt_sleep + tr->dt_iowait + tr->dt_preempt; 2033 print_sched_time(wait_time, 6); 2034 2035 print_sched_time(tr->dt_delay, 6); 2036 print_sched_time(tr->dt_run, 6); 2037 2038 if (sched->show_state) 2039 printf(" %5c ", task_state_char(thread, state)); 2040 2041 if (sched->show_next) { 2042 snprintf(nstr, sizeof(nstr), "next: %s[%d]", next_comm, next_pid); 2043 printf(" %-*s", comm_width, nstr); 2044 } 2045 2046 if (sched->show_wakeups && !sched->show_next) 2047 printf(" %-*s", comm_width, ""); 2048 2049 if (thread->tid == 0) 2050 goto out; 2051 2052 if (sched->show_callchain) 2053 printf(" "); 2054 2055 sample__fprintf_sym(sample, al, 0, 2056 EVSEL__PRINT_SYM | EVSEL__PRINT_ONELINE | 2057 EVSEL__PRINT_CALLCHAIN_ARROW | 2058 EVSEL__PRINT_SKIP_IGNORED, 2059 &callchain_cursor, symbol_conf.bt_stop_list, stdout); 2060 2061 out: 2062 printf("\n"); 2063 } 2064 2065 /* 2066 * Explanation of delta-time stats: 2067 * 2068 * t = time of current schedule out event 2069 * tprev = time of previous sched out event 2070 * also time of schedule-in event for current task 2071 * last_time = time of last sched change event for current task 2072 * (i.e, time process was last scheduled out) 2073 * ready_to_run = time of wakeup for current task 2074 * 2075 * -----|------------|------------|------------|------ 2076 * last ready tprev t 2077 * time to run 2078 * 2079 * |-------- dt_wait --------| 2080 * |- dt_delay -|-- dt_run --| 2081 * 2082 * dt_run = run time of current task 2083 * dt_wait = time between last schedule out event for task and tprev 2084 * represents time spent off the cpu 2085 * dt_delay = time between wakeup and schedule-in of task 2086 */ 2087 2088 static void timehist_update_runtime_stats(struct thread_runtime *r, 2089 u64 t, u64 tprev) 2090 { 2091 r->dt_delay = 0; 2092 r->dt_sleep = 0; 2093 r->dt_iowait = 0; 2094 r->dt_preempt = 0; 2095 r->dt_run = 0; 2096 2097 if (tprev) { 2098 r->dt_run = t - tprev; 2099 if (r->ready_to_run) { 2100 if (r->ready_to_run > tprev) 2101 pr_debug("time travel: wakeup time for task > previous sched_switch event\n"); 2102 else 2103 r->dt_delay = tprev - r->ready_to_run; 2104 } 2105 2106 if (r->last_time > tprev) 2107 pr_debug("time travel: last sched out time for task > previous sched_switch event\n"); 2108 else if (r->last_time) { 2109 u64 dt_wait = tprev - r->last_time; 2110 2111 if (r->last_state == TASK_RUNNING) 2112 r->dt_preempt = dt_wait; 2113 else if (r->last_state == TASK_UNINTERRUPTIBLE) 2114 r->dt_iowait = dt_wait; 2115 else 2116 r->dt_sleep = dt_wait; 2117 } 2118 } 2119 2120 update_stats(&r->run_stats, r->dt_run); 2121 2122 r->total_run_time += r->dt_run; 2123 r->total_delay_time += r->dt_delay; 2124 r->total_sleep_time += r->dt_sleep; 2125 r->total_iowait_time += r->dt_iowait; 2126 r->total_preempt_time += r->dt_preempt; 2127 } 2128 2129 static bool is_idle_sample(struct perf_sample *sample, 2130 struct evsel *evsel) 2131 { 2132 /* pid 0 == swapper == idle task */ 2133 if (strcmp(perf_evsel__name(evsel), "sched:sched_switch") == 0) 2134 return perf_evsel__intval(evsel, sample, "prev_pid") == 0; 2135 2136 return sample->pid == 0; 2137 } 2138 2139 static void save_task_callchain(struct perf_sched *sched, 2140 struct perf_sample *sample, 2141 struct evsel *evsel, 2142 struct machine *machine) 2143 { 2144 struct callchain_cursor *cursor = &callchain_cursor; 2145 struct thread *thread; 2146 2147 /* want main thread for process - has maps */ 2148 thread = machine__findnew_thread(machine, sample->pid, sample->pid); 2149 if (thread == NULL) { 2150 pr_debug("Failed to get thread for pid %d.\n", sample->pid); 2151 return; 2152 } 2153 2154 if (!sched->show_callchain || sample->callchain == NULL) 2155 return; 2156 2157 if (thread__resolve_callchain(thread, cursor, evsel, sample, 2158 NULL, NULL, sched->max_stack + 2) != 0) { 2159 if (verbose > 0) 2160 pr_err("Failed to resolve callchain. Skipping\n"); 2161 2162 return; 2163 } 2164 2165 callchain_cursor_commit(cursor); 2166 2167 while (true) { 2168 struct callchain_cursor_node *node; 2169 struct symbol *sym; 2170 2171 node = callchain_cursor_current(cursor); 2172 if (node == NULL) 2173 break; 2174 2175 sym = node->sym; 2176 if (sym) { 2177 if (!strcmp(sym->name, "schedule") || 2178 !strcmp(sym->name, "__schedule") || 2179 !strcmp(sym->name, "preempt_schedule")) 2180 sym->ignore = 1; 2181 } 2182 2183 callchain_cursor_advance(cursor); 2184 } 2185 } 2186 2187 static int init_idle_thread(struct thread *thread) 2188 { 2189 struct idle_thread_runtime *itr; 2190 2191 thread__set_comm(thread, idle_comm, 0); 2192 2193 itr = zalloc(sizeof(*itr)); 2194 if (itr == NULL) 2195 return -ENOMEM; 2196 2197 init_stats(&itr->tr.run_stats); 2198 callchain_init(&itr->callchain); 2199 callchain_cursor_reset(&itr->cursor); 2200 thread__set_priv(thread, itr); 2201 2202 return 0; 2203 } 2204 2205 /* 2206 * Track idle stats per cpu by maintaining a local thread 2207 * struct for the idle task on each cpu. 2208 */ 2209 static int init_idle_threads(int ncpu) 2210 { 2211 int i, ret; 2212 2213 idle_threads = zalloc(ncpu * sizeof(struct thread *)); 2214 if (!idle_threads) 2215 return -ENOMEM; 2216 2217 idle_max_cpu = ncpu; 2218 2219 /* allocate the actual thread struct if needed */ 2220 for (i = 0; i < ncpu; ++i) { 2221 idle_threads[i] = thread__new(0, 0); 2222 if (idle_threads[i] == NULL) 2223 return -ENOMEM; 2224 2225 ret = init_idle_thread(idle_threads[i]); 2226 if (ret < 0) 2227 return ret; 2228 } 2229 2230 return 0; 2231 } 2232 2233 static void free_idle_threads(void) 2234 { 2235 int i; 2236 2237 if (idle_threads == NULL) 2238 return; 2239 2240 for (i = 0; i < idle_max_cpu; ++i) { 2241 if ((idle_threads[i])) 2242 thread__delete(idle_threads[i]); 2243 } 2244 2245 free(idle_threads); 2246 } 2247 2248 static struct thread *get_idle_thread(int cpu) 2249 { 2250 /* 2251 * expand/allocate array of pointers to local thread 2252 * structs if needed 2253 */ 2254 if ((cpu >= idle_max_cpu) || (idle_threads == NULL)) { 2255 int i, j = __roundup_pow_of_two(cpu+1); 2256 void *p; 2257 2258 p = realloc(idle_threads, j * sizeof(struct thread *)); 2259 if (!p) 2260 return NULL; 2261 2262 idle_threads = (struct thread **) p; 2263 for (i = idle_max_cpu; i < j; ++i) 2264 idle_threads[i] = NULL; 2265 2266 idle_max_cpu = j; 2267 } 2268 2269 /* allocate a new thread struct if needed */ 2270 if (idle_threads[cpu] == NULL) { 2271 idle_threads[cpu] = thread__new(0, 0); 2272 2273 if (idle_threads[cpu]) { 2274 if (init_idle_thread(idle_threads[cpu]) < 0) 2275 return NULL; 2276 } 2277 } 2278 2279 return idle_threads[cpu]; 2280 } 2281 2282 static void save_idle_callchain(struct perf_sched *sched, 2283 struct idle_thread_runtime *itr, 2284 struct perf_sample *sample) 2285 { 2286 if (!sched->show_callchain || sample->callchain == NULL) 2287 return; 2288 2289 callchain_cursor__copy(&itr->cursor, &callchain_cursor); 2290 } 2291 2292 static struct thread *timehist_get_thread(struct perf_sched *sched, 2293 struct perf_sample *sample, 2294 struct machine *machine, 2295 struct evsel *evsel) 2296 { 2297 struct thread *thread; 2298 2299 if (is_idle_sample(sample, evsel)) { 2300 thread = get_idle_thread(sample->cpu); 2301 if (thread == NULL) 2302 pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu); 2303 2304 } else { 2305 /* there were samples with tid 0 but non-zero pid */ 2306 thread = machine__findnew_thread(machine, sample->pid, 2307 sample->tid ?: sample->pid); 2308 if (thread == NULL) { 2309 pr_debug("Failed to get thread for tid %d. skipping sample.\n", 2310 sample->tid); 2311 } 2312 2313 save_task_callchain(sched, sample, evsel, machine); 2314 if (sched->idle_hist) { 2315 struct thread *idle; 2316 struct idle_thread_runtime *itr; 2317 2318 idle = get_idle_thread(sample->cpu); 2319 if (idle == NULL) { 2320 pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu); 2321 return NULL; 2322 } 2323 2324 itr = thread__priv(idle); 2325 if (itr == NULL) 2326 return NULL; 2327 2328 itr->last_thread = thread; 2329 2330 /* copy task callchain when entering to idle */ 2331 if (perf_evsel__intval(evsel, sample, "next_pid") == 0) 2332 save_idle_callchain(sched, itr, sample); 2333 } 2334 } 2335 2336 return thread; 2337 } 2338 2339 static bool timehist_skip_sample(struct perf_sched *sched, 2340 struct thread *thread, 2341 struct evsel *evsel, 2342 struct perf_sample *sample) 2343 { 2344 bool rc = false; 2345 2346 if (thread__is_filtered(thread)) { 2347 rc = true; 2348 sched->skipped_samples++; 2349 } 2350 2351 if (sched->idle_hist) { 2352 if (strcmp(perf_evsel__name(evsel), "sched:sched_switch")) 2353 rc = true; 2354 else if (perf_evsel__intval(evsel, sample, "prev_pid") != 0 && 2355 perf_evsel__intval(evsel, sample, "next_pid") != 0) 2356 rc = true; 2357 } 2358 2359 return rc; 2360 } 2361 2362 static void timehist_print_wakeup_event(struct perf_sched *sched, 2363 struct evsel *evsel, 2364 struct perf_sample *sample, 2365 struct machine *machine, 2366 struct thread *awakened) 2367 { 2368 struct thread *thread; 2369 char tstr[64]; 2370 2371 thread = machine__findnew_thread(machine, sample->pid, sample->tid); 2372 if (thread == NULL) 2373 return; 2374 2375 /* show wakeup unless both awakee and awaker are filtered */ 2376 if (timehist_skip_sample(sched, thread, evsel, sample) && 2377 timehist_skip_sample(sched, awakened, evsel, sample)) { 2378 return; 2379 } 2380 2381 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr)); 2382 printf("%15s [%04d] ", tstr, sample->cpu); 2383 if (sched->show_cpu_visual) 2384 printf(" %*s ", sched->max_cpu + 1, ""); 2385 2386 printf(" %-*s ", comm_width, timehist_get_commstr(thread)); 2387 2388 /* dt spacer */ 2389 printf(" %9s %9s %9s ", "", "", ""); 2390 2391 printf("awakened: %s", timehist_get_commstr(awakened)); 2392 2393 printf("\n"); 2394 } 2395 2396 static int timehist_sched_wakeup_event(struct perf_tool *tool, 2397 union perf_event *event __maybe_unused, 2398 struct evsel *evsel, 2399 struct perf_sample *sample, 2400 struct machine *machine) 2401 { 2402 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); 2403 struct thread *thread; 2404 struct thread_runtime *tr = NULL; 2405 /* want pid of awakened task not pid in sample */ 2406 const u32 pid = perf_evsel__intval(evsel, sample, "pid"); 2407 2408 thread = machine__findnew_thread(machine, 0, pid); 2409 if (thread == NULL) 2410 return -1; 2411 2412 tr = thread__get_runtime(thread); 2413 if (tr == NULL) 2414 return -1; 2415 2416 if (tr->ready_to_run == 0) 2417 tr->ready_to_run = sample->time; 2418 2419 /* show wakeups if requested */ 2420 if (sched->show_wakeups && 2421 !perf_time__skip_sample(&sched->ptime, sample->time)) 2422 timehist_print_wakeup_event(sched, evsel, sample, machine, thread); 2423 2424 return 0; 2425 } 2426 2427 static void timehist_print_migration_event(struct perf_sched *sched, 2428 struct evsel *evsel, 2429 struct perf_sample *sample, 2430 struct machine *machine, 2431 struct thread *migrated) 2432 { 2433 struct thread *thread; 2434 char tstr[64]; 2435 u32 max_cpus = sched->max_cpu + 1; 2436 u32 ocpu, dcpu; 2437 2438 if (sched->summary_only) 2439 return; 2440 2441 max_cpus = sched->max_cpu + 1; 2442 ocpu = perf_evsel__intval(evsel, sample, "orig_cpu"); 2443 dcpu = perf_evsel__intval(evsel, sample, "dest_cpu"); 2444 2445 thread = machine__findnew_thread(machine, sample->pid, sample->tid); 2446 if (thread == NULL) 2447 return; 2448 2449 if (timehist_skip_sample(sched, thread, evsel, sample) && 2450 timehist_skip_sample(sched, migrated, evsel, sample)) { 2451 return; 2452 } 2453 2454 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr)); 2455 printf("%15s [%04d] ", tstr, sample->cpu); 2456 2457 if (sched->show_cpu_visual) { 2458 u32 i; 2459 char c; 2460 2461 printf(" "); 2462 for (i = 0; i < max_cpus; ++i) { 2463 c = (i == sample->cpu) ? 'm' : ' '; 2464 printf("%c", c); 2465 } 2466 printf(" "); 2467 } 2468 2469 printf(" %-*s ", comm_width, timehist_get_commstr(thread)); 2470 2471 /* dt spacer */ 2472 printf(" %9s %9s %9s ", "", "", ""); 2473 2474 printf("migrated: %s", timehist_get_commstr(migrated)); 2475 printf(" cpu %d => %d", ocpu, dcpu); 2476 2477 printf("\n"); 2478 } 2479 2480 static int timehist_migrate_task_event(struct perf_tool *tool, 2481 union perf_event *event __maybe_unused, 2482 struct evsel *evsel, 2483 struct perf_sample *sample, 2484 struct machine *machine) 2485 { 2486 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); 2487 struct thread *thread; 2488 struct thread_runtime *tr = NULL; 2489 /* want pid of migrated task not pid in sample */ 2490 const u32 pid = perf_evsel__intval(evsel, sample, "pid"); 2491 2492 thread = machine__findnew_thread(machine, 0, pid); 2493 if (thread == NULL) 2494 return -1; 2495 2496 tr = thread__get_runtime(thread); 2497 if (tr == NULL) 2498 return -1; 2499 2500 tr->migrations++; 2501 2502 /* show migrations if requested */ 2503 timehist_print_migration_event(sched, evsel, sample, machine, thread); 2504 2505 return 0; 2506 } 2507 2508 static int timehist_sched_change_event(struct perf_tool *tool, 2509 union perf_event *event, 2510 struct evsel *evsel, 2511 struct perf_sample *sample, 2512 struct machine *machine) 2513 { 2514 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); 2515 struct perf_time_interval *ptime = &sched->ptime; 2516 struct addr_location al; 2517 struct thread *thread; 2518 struct thread_runtime *tr = NULL; 2519 u64 tprev, t = sample->time; 2520 int rc = 0; 2521 int state = perf_evsel__intval(evsel, sample, "prev_state"); 2522 2523 2524 if (machine__resolve(machine, &al, sample) < 0) { 2525 pr_err("problem processing %d event. skipping it\n", 2526 event->header.type); 2527 rc = -1; 2528 goto out; 2529 } 2530 2531 thread = timehist_get_thread(sched, sample, machine, evsel); 2532 if (thread == NULL) { 2533 rc = -1; 2534 goto out; 2535 } 2536 2537 if (timehist_skip_sample(sched, thread, evsel, sample)) 2538 goto out; 2539 2540 tr = thread__get_runtime(thread); 2541 if (tr == NULL) { 2542 rc = -1; 2543 goto out; 2544 } 2545 2546 tprev = perf_evsel__get_time(evsel, sample->cpu); 2547 2548 /* 2549 * If start time given: 2550 * - sample time is under window user cares about - skip sample 2551 * - tprev is under window user cares about - reset to start of window 2552 */ 2553 if (ptime->start && ptime->start > t) 2554 goto out; 2555 2556 if (tprev && ptime->start > tprev) 2557 tprev = ptime->start; 2558 2559 /* 2560 * If end time given: 2561 * - previous sched event is out of window - we are done 2562 * - sample time is beyond window user cares about - reset it 2563 * to close out stats for time window interest 2564 */ 2565 if (ptime->end) { 2566 if (tprev > ptime->end) 2567 goto out; 2568 2569 if (t > ptime->end) 2570 t = ptime->end; 2571 } 2572 2573 if (!sched->idle_hist || thread->tid == 0) { 2574 timehist_update_runtime_stats(tr, t, tprev); 2575 2576 if (sched->idle_hist) { 2577 struct idle_thread_runtime *itr = (void *)tr; 2578 struct thread_runtime *last_tr; 2579 2580 BUG_ON(thread->tid != 0); 2581 2582 if (itr->last_thread == NULL) 2583 goto out; 2584 2585 /* add current idle time as last thread's runtime */ 2586 last_tr = thread__get_runtime(itr->last_thread); 2587 if (last_tr == NULL) 2588 goto out; 2589 2590 timehist_update_runtime_stats(last_tr, t, tprev); 2591 /* 2592 * remove delta time of last thread as it's not updated 2593 * and otherwise it will show an invalid value next 2594 * time. we only care total run time and run stat. 2595 */ 2596 last_tr->dt_run = 0; 2597 last_tr->dt_delay = 0; 2598 last_tr->dt_sleep = 0; 2599 last_tr->dt_iowait = 0; 2600 last_tr->dt_preempt = 0; 2601 2602 if (itr->cursor.nr) 2603 callchain_append(&itr->callchain, &itr->cursor, t - tprev); 2604 2605 itr->last_thread = NULL; 2606 } 2607 } 2608 2609 if (!sched->summary_only) 2610 timehist_print_sample(sched, evsel, sample, &al, thread, t, state); 2611 2612 out: 2613 if (sched->hist_time.start == 0 && t >= ptime->start) 2614 sched->hist_time.start = t; 2615 if (ptime->end == 0 || t <= ptime->end) 2616 sched->hist_time.end = t; 2617 2618 if (tr) { 2619 /* time of this sched_switch event becomes last time task seen */ 2620 tr->last_time = sample->time; 2621 2622 /* last state is used to determine where to account wait time */ 2623 tr->last_state = state; 2624 2625 /* sched out event for task so reset ready to run time */ 2626 tr->ready_to_run = 0; 2627 } 2628 2629 perf_evsel__save_time(evsel, sample->time, sample->cpu); 2630 2631 return rc; 2632 } 2633 2634 static int timehist_sched_switch_event(struct perf_tool *tool, 2635 union perf_event *event, 2636 struct evsel *evsel, 2637 struct perf_sample *sample, 2638 struct machine *machine __maybe_unused) 2639 { 2640 return timehist_sched_change_event(tool, event, evsel, sample, machine); 2641 } 2642 2643 static int process_lost(struct perf_tool *tool __maybe_unused, 2644 union perf_event *event, 2645 struct perf_sample *sample, 2646 struct machine *machine __maybe_unused) 2647 { 2648 char tstr[64]; 2649 2650 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr)); 2651 printf("%15s ", tstr); 2652 printf("lost %" PRI_lu64 " events on cpu %d\n", event->lost.lost, sample->cpu); 2653 2654 return 0; 2655 } 2656 2657 2658 static void print_thread_runtime(struct thread *t, 2659 struct thread_runtime *r) 2660 { 2661 double mean = avg_stats(&r->run_stats); 2662 float stddev; 2663 2664 printf("%*s %5d %9" PRIu64 " ", 2665 comm_width, timehist_get_commstr(t), t->ppid, 2666 (u64) r->run_stats.n); 2667 2668 print_sched_time(r->total_run_time, 8); 2669 stddev = rel_stddev_stats(stddev_stats(&r->run_stats), mean); 2670 print_sched_time(r->run_stats.min, 6); 2671 printf(" "); 2672 print_sched_time((u64) mean, 6); 2673 printf(" "); 2674 print_sched_time(r->run_stats.max, 6); 2675 printf(" "); 2676 printf("%5.2f", stddev); 2677 printf(" %5" PRIu64, r->migrations); 2678 printf("\n"); 2679 } 2680 2681 static void print_thread_waittime(struct thread *t, 2682 struct thread_runtime *r) 2683 { 2684 printf("%*s %5d %9" PRIu64 " ", 2685 comm_width, timehist_get_commstr(t), t->ppid, 2686 (u64) r->run_stats.n); 2687 2688 print_sched_time(r->total_run_time, 8); 2689 print_sched_time(r->total_sleep_time, 6); 2690 printf(" "); 2691 print_sched_time(r->total_iowait_time, 6); 2692 printf(" "); 2693 print_sched_time(r->total_preempt_time, 6); 2694 printf(" "); 2695 print_sched_time(r->total_delay_time, 6); 2696 printf("\n"); 2697 } 2698 2699 struct total_run_stats { 2700 struct perf_sched *sched; 2701 u64 sched_count; 2702 u64 task_count; 2703 u64 total_run_time; 2704 }; 2705 2706 static int __show_thread_runtime(struct thread *t, void *priv) 2707 { 2708 struct total_run_stats *stats = priv; 2709 struct thread_runtime *r; 2710 2711 if (thread__is_filtered(t)) 2712 return 0; 2713 2714 r = thread__priv(t); 2715 if (r && r->run_stats.n) { 2716 stats->task_count++; 2717 stats->sched_count += r->run_stats.n; 2718 stats->total_run_time += r->total_run_time; 2719 2720 if (stats->sched->show_state) 2721 print_thread_waittime(t, r); 2722 else 2723 print_thread_runtime(t, r); 2724 } 2725 2726 return 0; 2727 } 2728 2729 static int show_thread_runtime(struct thread *t, void *priv) 2730 { 2731 if (t->dead) 2732 return 0; 2733 2734 return __show_thread_runtime(t, priv); 2735 } 2736 2737 static int show_deadthread_runtime(struct thread *t, void *priv) 2738 { 2739 if (!t->dead) 2740 return 0; 2741 2742 return __show_thread_runtime(t, priv); 2743 } 2744 2745 static size_t callchain__fprintf_folded(FILE *fp, struct callchain_node *node) 2746 { 2747 const char *sep = " <- "; 2748 struct callchain_list *chain; 2749 size_t ret = 0; 2750 char bf[1024]; 2751 bool first; 2752 2753 if (node == NULL) 2754 return 0; 2755 2756 ret = callchain__fprintf_folded(fp, node->parent); 2757 first = (ret == 0); 2758 2759 list_for_each_entry(chain, &node->val, list) { 2760 if (chain->ip >= PERF_CONTEXT_MAX) 2761 continue; 2762 if (chain->ms.sym && chain->ms.sym->ignore) 2763 continue; 2764 ret += fprintf(fp, "%s%s", first ? "" : sep, 2765 callchain_list__sym_name(chain, bf, sizeof(bf), 2766 false)); 2767 first = false; 2768 } 2769 2770 return ret; 2771 } 2772 2773 static size_t timehist_print_idlehist_callchain(struct rb_root_cached *root) 2774 { 2775 size_t ret = 0; 2776 FILE *fp = stdout; 2777 struct callchain_node *chain; 2778 struct rb_node *rb_node = rb_first_cached(root); 2779 2780 printf(" %16s %8s %s\n", "Idle time (msec)", "Count", "Callchains"); 2781 printf(" %.16s %.8s %.50s\n", graph_dotted_line, graph_dotted_line, 2782 graph_dotted_line); 2783 2784 while (rb_node) { 2785 chain = rb_entry(rb_node, struct callchain_node, rb_node); 2786 rb_node = rb_next(rb_node); 2787 2788 ret += fprintf(fp, " "); 2789 print_sched_time(chain->hit, 12); 2790 ret += 16; /* print_sched_time returns 2nd arg + 4 */ 2791 ret += fprintf(fp, " %8d ", chain->count); 2792 ret += callchain__fprintf_folded(fp, chain); 2793 ret += fprintf(fp, "\n"); 2794 } 2795 2796 return ret; 2797 } 2798 2799 static void timehist_print_summary(struct perf_sched *sched, 2800 struct perf_session *session) 2801 { 2802 struct machine *m = &session->machines.host; 2803 struct total_run_stats totals; 2804 u64 task_count; 2805 struct thread *t; 2806 struct thread_runtime *r; 2807 int i; 2808 u64 hist_time = sched->hist_time.end - sched->hist_time.start; 2809 2810 memset(&totals, 0, sizeof(totals)); 2811 totals.sched = sched; 2812 2813 if (sched->idle_hist) { 2814 printf("\nIdle-time summary\n"); 2815 printf("%*s parent sched-out ", comm_width, "comm"); 2816 printf(" idle-time min-idle avg-idle max-idle stddev migrations\n"); 2817 } else if (sched->show_state) { 2818 printf("\nWait-time summary\n"); 2819 printf("%*s parent sched-in ", comm_width, "comm"); 2820 printf(" run-time sleep iowait preempt delay\n"); 2821 } else { 2822 printf("\nRuntime summary\n"); 2823 printf("%*s parent sched-in ", comm_width, "comm"); 2824 printf(" run-time min-run avg-run max-run stddev migrations\n"); 2825 } 2826 printf("%*s (count) ", comm_width, ""); 2827 printf(" (msec) (msec) (msec) (msec) %s\n", 2828 sched->show_state ? "(msec)" : "%"); 2829 printf("%.117s\n", graph_dotted_line); 2830 2831 machine__for_each_thread(m, show_thread_runtime, &totals); 2832 task_count = totals.task_count; 2833 if (!task_count) 2834 printf("<no still running tasks>\n"); 2835 2836 printf("\nTerminated tasks:\n"); 2837 machine__for_each_thread(m, show_deadthread_runtime, &totals); 2838 if (task_count == totals.task_count) 2839 printf("<no terminated tasks>\n"); 2840 2841 /* CPU idle stats not tracked when samples were skipped */ 2842 if (sched->skipped_samples && !sched->idle_hist) 2843 return; 2844 2845 printf("\nIdle stats:\n"); 2846 for (i = 0; i < idle_max_cpu; ++i) { 2847 t = idle_threads[i]; 2848 if (!t) 2849 continue; 2850 2851 r = thread__priv(t); 2852 if (r && r->run_stats.n) { 2853 totals.sched_count += r->run_stats.n; 2854 printf(" CPU %2d idle for ", i); 2855 print_sched_time(r->total_run_time, 6); 2856 printf(" msec (%6.2f%%)\n", 100.0 * r->total_run_time / hist_time); 2857 } else 2858 printf(" CPU %2d idle entire time window\n", i); 2859 } 2860 2861 if (sched->idle_hist && sched->show_callchain) { 2862 callchain_param.mode = CHAIN_FOLDED; 2863 callchain_param.value = CCVAL_PERIOD; 2864 2865 callchain_register_param(&callchain_param); 2866 2867 printf("\nIdle stats by callchain:\n"); 2868 for (i = 0; i < idle_max_cpu; ++i) { 2869 struct idle_thread_runtime *itr; 2870 2871 t = idle_threads[i]; 2872 if (!t) 2873 continue; 2874 2875 itr = thread__priv(t); 2876 if (itr == NULL) 2877 continue; 2878 2879 callchain_param.sort(&itr->sorted_root.rb_root, &itr->callchain, 2880 0, &callchain_param); 2881 2882 printf(" CPU %2d:", i); 2883 print_sched_time(itr->tr.total_run_time, 6); 2884 printf(" msec\n"); 2885 timehist_print_idlehist_callchain(&itr->sorted_root); 2886 printf("\n"); 2887 } 2888 } 2889 2890 printf("\n" 2891 " Total number of unique tasks: %" PRIu64 "\n" 2892 "Total number of context switches: %" PRIu64 "\n", 2893 totals.task_count, totals.sched_count); 2894 2895 printf(" Total run time (msec): "); 2896 print_sched_time(totals.total_run_time, 2); 2897 printf("\n"); 2898 2899 printf(" Total scheduling time (msec): "); 2900 print_sched_time(hist_time, 2); 2901 printf(" (x %d)\n", sched->max_cpu); 2902 } 2903 2904 typedef int (*sched_handler)(struct perf_tool *tool, 2905 union perf_event *event, 2906 struct evsel *evsel, 2907 struct perf_sample *sample, 2908 struct machine *machine); 2909 2910 static int perf_timehist__process_sample(struct perf_tool *tool, 2911 union perf_event *event, 2912 struct perf_sample *sample, 2913 struct evsel *evsel, 2914 struct machine *machine) 2915 { 2916 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); 2917 int err = 0; 2918 int this_cpu = sample->cpu; 2919 2920 if (this_cpu > sched->max_cpu) 2921 sched->max_cpu = this_cpu; 2922 2923 if (evsel->handler != NULL) { 2924 sched_handler f = evsel->handler; 2925 2926 err = f(tool, event, evsel, sample, machine); 2927 } 2928 2929 return err; 2930 } 2931 2932 static int timehist_check_attr(struct perf_sched *sched, 2933 struct evlist *evlist) 2934 { 2935 struct evsel *evsel; 2936 struct evsel_runtime *er; 2937 2938 list_for_each_entry(evsel, &evlist->core.entries, core.node) { 2939 er = perf_evsel__get_runtime(evsel); 2940 if (er == NULL) { 2941 pr_err("Failed to allocate memory for evsel runtime data\n"); 2942 return -1; 2943 } 2944 2945 if (sched->show_callchain && !evsel__has_callchain(evsel)) { 2946 pr_info("Samples do not have callchains.\n"); 2947 sched->show_callchain = 0; 2948 symbol_conf.use_callchain = 0; 2949 } 2950 } 2951 2952 return 0; 2953 } 2954 2955 static int perf_sched__timehist(struct perf_sched *sched) 2956 { 2957 const struct evsel_str_handler handlers[] = { 2958 { "sched:sched_switch", timehist_sched_switch_event, }, 2959 { "sched:sched_wakeup", timehist_sched_wakeup_event, }, 2960 { "sched:sched_wakeup_new", timehist_sched_wakeup_event, }, 2961 }; 2962 const struct evsel_str_handler migrate_handlers[] = { 2963 { "sched:sched_migrate_task", timehist_migrate_task_event, }, 2964 }; 2965 struct perf_data data = { 2966 .path = input_name, 2967 .mode = PERF_DATA_MODE_READ, 2968 .force = sched->force, 2969 }; 2970 2971 struct perf_session *session; 2972 struct evlist *evlist; 2973 int err = -1; 2974 2975 /* 2976 * event handlers for timehist option 2977 */ 2978 sched->tool.sample = perf_timehist__process_sample; 2979 sched->tool.mmap = perf_event__process_mmap; 2980 sched->tool.comm = perf_event__process_comm; 2981 sched->tool.exit = perf_event__process_exit; 2982 sched->tool.fork = perf_event__process_fork; 2983 sched->tool.lost = process_lost; 2984 sched->tool.attr = perf_event__process_attr; 2985 sched->tool.tracing_data = perf_event__process_tracing_data; 2986 sched->tool.build_id = perf_event__process_build_id; 2987 2988 sched->tool.ordered_events = true; 2989 sched->tool.ordering_requires_timestamps = true; 2990 2991 symbol_conf.use_callchain = sched->show_callchain; 2992 2993 session = perf_session__new(&data, false, &sched->tool); 2994 if (IS_ERR(session)) 2995 return PTR_ERR(session); 2996 2997 evlist = session->evlist; 2998 2999 symbol__init(&session->header.env); 3000 3001 if (perf_time__parse_str(&sched->ptime, sched->time_str) != 0) { 3002 pr_err("Invalid time string\n"); 3003 return -EINVAL; 3004 } 3005 3006 if (timehist_check_attr(sched, evlist) != 0) 3007 goto out; 3008 3009 setup_pager(); 3010 3011 /* setup per-evsel handlers */ 3012 if (perf_session__set_tracepoints_handlers(session, handlers)) 3013 goto out; 3014 3015 /* sched_switch event at a minimum needs to exist */ 3016 if (!perf_evlist__find_tracepoint_by_name(session->evlist, 3017 "sched:sched_switch")) { 3018 pr_err("No sched_switch events found. Have you run 'perf sched record'?\n"); 3019 goto out; 3020 } 3021 3022 if (sched->show_migrations && 3023 perf_session__set_tracepoints_handlers(session, migrate_handlers)) 3024 goto out; 3025 3026 /* pre-allocate struct for per-CPU idle stats */ 3027 sched->max_cpu = session->header.env.nr_cpus_online; 3028 if (sched->max_cpu == 0) 3029 sched->max_cpu = 4; 3030 if (init_idle_threads(sched->max_cpu)) 3031 goto out; 3032 3033 /* summary_only implies summary option, but don't overwrite summary if set */ 3034 if (sched->summary_only) 3035 sched->summary = sched->summary_only; 3036 3037 if (!sched->summary_only) 3038 timehist_header(sched); 3039 3040 err = perf_session__process_events(session); 3041 if (err) { 3042 pr_err("Failed to process events, error %d", err); 3043 goto out; 3044 } 3045 3046 sched->nr_events = evlist->stats.nr_events[0]; 3047 sched->nr_lost_events = evlist->stats.total_lost; 3048 sched->nr_lost_chunks = evlist->stats.nr_events[PERF_RECORD_LOST]; 3049 3050 if (sched->summary) 3051 timehist_print_summary(sched, session); 3052 3053 out: 3054 free_idle_threads(); 3055 perf_session__delete(session); 3056 3057 return err; 3058 } 3059 3060 3061 static void print_bad_events(struct perf_sched *sched) 3062 { 3063 if (sched->nr_unordered_timestamps && sched->nr_timestamps) { 3064 printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n", 3065 (double)sched->nr_unordered_timestamps/(double)sched->nr_timestamps*100.0, 3066 sched->nr_unordered_timestamps, sched->nr_timestamps); 3067 } 3068 if (sched->nr_lost_events && sched->nr_events) { 3069 printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n", 3070 (double)sched->nr_lost_events/(double)sched->nr_events * 100.0, 3071 sched->nr_lost_events, sched->nr_events, sched->nr_lost_chunks); 3072 } 3073 if (sched->nr_context_switch_bugs && sched->nr_timestamps) { 3074 printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)", 3075 (double)sched->nr_context_switch_bugs/(double)sched->nr_timestamps*100.0, 3076 sched->nr_context_switch_bugs, sched->nr_timestamps); 3077 if (sched->nr_lost_events) 3078 printf(" (due to lost events?)"); 3079 printf("\n"); 3080 } 3081 } 3082 3083 static void __merge_work_atoms(struct rb_root_cached *root, struct work_atoms *data) 3084 { 3085 struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL; 3086 struct work_atoms *this; 3087 const char *comm = thread__comm_str(data->thread), *this_comm; 3088 bool leftmost = true; 3089 3090 while (*new) { 3091 int cmp; 3092 3093 this = container_of(*new, struct work_atoms, node); 3094 parent = *new; 3095 3096 this_comm = thread__comm_str(this->thread); 3097 cmp = strcmp(comm, this_comm); 3098 if (cmp > 0) { 3099 new = &((*new)->rb_left); 3100 } else if (cmp < 0) { 3101 new = &((*new)->rb_right); 3102 leftmost = false; 3103 } else { 3104 this->num_merged++; 3105 this->total_runtime += data->total_runtime; 3106 this->nb_atoms += data->nb_atoms; 3107 this->total_lat += data->total_lat; 3108 list_splice(&data->work_list, &this->work_list); 3109 if (this->max_lat < data->max_lat) { 3110 this->max_lat = data->max_lat; 3111 this->max_lat_at = data->max_lat_at; 3112 } 3113 zfree(&data); 3114 return; 3115 } 3116 } 3117 3118 data->num_merged++; 3119 rb_link_node(&data->node, parent, new); 3120 rb_insert_color_cached(&data->node, root, leftmost); 3121 } 3122 3123 static void perf_sched__merge_lat(struct perf_sched *sched) 3124 { 3125 struct work_atoms *data; 3126 struct rb_node *node; 3127 3128 if (sched->skip_merge) 3129 return; 3130 3131 while ((node = rb_first_cached(&sched->atom_root))) { 3132 rb_erase_cached(node, &sched->atom_root); 3133 data = rb_entry(node, struct work_atoms, node); 3134 __merge_work_atoms(&sched->merged_atom_root, data); 3135 } 3136 } 3137 3138 static int perf_sched__lat(struct perf_sched *sched) 3139 { 3140 struct rb_node *next; 3141 3142 setup_pager(); 3143 3144 if (perf_sched__read_events(sched)) 3145 return -1; 3146 3147 perf_sched__merge_lat(sched); 3148 perf_sched__sort_lat(sched); 3149 3150 printf("\n -----------------------------------------------------------------------------------------------------------------\n"); 3151 printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms | Maximum delay at |\n"); 3152 printf(" -----------------------------------------------------------------------------------------------------------------\n"); 3153 3154 next = rb_first_cached(&sched->sorted_atom_root); 3155 3156 while (next) { 3157 struct work_atoms *work_list; 3158 3159 work_list = rb_entry(next, struct work_atoms, node); 3160 output_lat_thread(sched, work_list); 3161 next = rb_next(next); 3162 thread__zput(work_list->thread); 3163 } 3164 3165 printf(" -----------------------------------------------------------------------------------------------------------------\n"); 3166 printf(" TOTAL: |%11.3f ms |%9" PRIu64 " |\n", 3167 (double)sched->all_runtime / NSEC_PER_MSEC, sched->all_count); 3168 3169 printf(" ---------------------------------------------------\n"); 3170 3171 print_bad_events(sched); 3172 printf("\n"); 3173 3174 return 0; 3175 } 3176 3177 static int setup_map_cpus(struct perf_sched *sched) 3178 { 3179 struct perf_cpu_map *map; 3180 3181 sched->max_cpu = sysconf(_SC_NPROCESSORS_CONF); 3182 3183 if (sched->map.comp) { 3184 sched->map.comp_cpus = zalloc(sched->max_cpu * sizeof(int)); 3185 if (!sched->map.comp_cpus) 3186 return -1; 3187 } 3188 3189 if (!sched->map.cpus_str) 3190 return 0; 3191 3192 map = perf_cpu_map__new(sched->map.cpus_str); 3193 if (!map) { 3194 pr_err("failed to get cpus map from %s\n", sched->map.cpus_str); 3195 return -1; 3196 } 3197 3198 sched->map.cpus = map; 3199 return 0; 3200 } 3201 3202 static int setup_color_pids(struct perf_sched *sched) 3203 { 3204 struct perf_thread_map *map; 3205 3206 if (!sched->map.color_pids_str) 3207 return 0; 3208 3209 map = thread_map__new_by_tid_str(sched->map.color_pids_str); 3210 if (!map) { 3211 pr_err("failed to get thread map from %s\n", sched->map.color_pids_str); 3212 return -1; 3213 } 3214 3215 sched->map.color_pids = map; 3216 return 0; 3217 } 3218 3219 static int setup_color_cpus(struct perf_sched *sched) 3220 { 3221 struct perf_cpu_map *map; 3222 3223 if (!sched->map.color_cpus_str) 3224 return 0; 3225 3226 map = perf_cpu_map__new(sched->map.color_cpus_str); 3227 if (!map) { 3228 pr_err("failed to get thread map from %s\n", sched->map.color_cpus_str); 3229 return -1; 3230 } 3231 3232 sched->map.color_cpus = map; 3233 return 0; 3234 } 3235 3236 static int perf_sched__map(struct perf_sched *sched) 3237 { 3238 if (setup_map_cpus(sched)) 3239 return -1; 3240 3241 if (setup_color_pids(sched)) 3242 return -1; 3243 3244 if (setup_color_cpus(sched)) 3245 return -1; 3246 3247 setup_pager(); 3248 if (perf_sched__read_events(sched)) 3249 return -1; 3250 print_bad_events(sched); 3251 return 0; 3252 } 3253 3254 static int perf_sched__replay(struct perf_sched *sched) 3255 { 3256 unsigned long i; 3257 3258 calibrate_run_measurement_overhead(sched); 3259 calibrate_sleep_measurement_overhead(sched); 3260 3261 test_calibrations(sched); 3262 3263 if (perf_sched__read_events(sched)) 3264 return -1; 3265 3266 printf("nr_run_events: %ld\n", sched->nr_run_events); 3267 printf("nr_sleep_events: %ld\n", sched->nr_sleep_events); 3268 printf("nr_wakeup_events: %ld\n", sched->nr_wakeup_events); 3269 3270 if (sched->targetless_wakeups) 3271 printf("target-less wakeups: %ld\n", sched->targetless_wakeups); 3272 if (sched->multitarget_wakeups) 3273 printf("multi-target wakeups: %ld\n", sched->multitarget_wakeups); 3274 if (sched->nr_run_events_optimized) 3275 printf("run atoms optimized: %ld\n", 3276 sched->nr_run_events_optimized); 3277 3278 print_task_traces(sched); 3279 add_cross_task_wakeups(sched); 3280 3281 create_tasks(sched); 3282 printf("------------------------------------------------------------\n"); 3283 for (i = 0; i < sched->replay_repeat; i++) 3284 run_one_test(sched); 3285 3286 return 0; 3287 } 3288 3289 static void setup_sorting(struct perf_sched *sched, const struct option *options, 3290 const char * const usage_msg[]) 3291 { 3292 char *tmp, *tok, *str = strdup(sched->sort_order); 3293 3294 for (tok = strtok_r(str, ", ", &tmp); 3295 tok; tok = strtok_r(NULL, ", ", &tmp)) { 3296 if (sort_dimension__add(tok, &sched->sort_list) < 0) { 3297 usage_with_options_msg(usage_msg, options, 3298 "Unknown --sort key: `%s'", tok); 3299 } 3300 } 3301 3302 free(str); 3303 3304 sort_dimension__add("pid", &sched->cmp_pid); 3305 } 3306 3307 static int __cmd_record(int argc, const char **argv) 3308 { 3309 unsigned int rec_argc, i, j; 3310 const char **rec_argv; 3311 const char * const record_args[] = { 3312 "record", 3313 "-a", 3314 "-R", 3315 "-m", "1024", 3316 "-c", "1", 3317 "-e", "sched:sched_switch", 3318 "-e", "sched:sched_stat_wait", 3319 "-e", "sched:sched_stat_sleep", 3320 "-e", "sched:sched_stat_iowait", 3321 "-e", "sched:sched_stat_runtime", 3322 "-e", "sched:sched_process_fork", 3323 "-e", "sched:sched_wakeup", 3324 "-e", "sched:sched_wakeup_new", 3325 "-e", "sched:sched_migrate_task", 3326 }; 3327 3328 rec_argc = ARRAY_SIZE(record_args) + argc - 1; 3329 rec_argv = calloc(rec_argc + 1, sizeof(char *)); 3330 3331 if (rec_argv == NULL) 3332 return -ENOMEM; 3333 3334 for (i = 0; i < ARRAY_SIZE(record_args); i++) 3335 rec_argv[i] = strdup(record_args[i]); 3336 3337 for (j = 1; j < (unsigned int)argc; j++, i++) 3338 rec_argv[i] = argv[j]; 3339 3340 BUG_ON(i != rec_argc); 3341 3342 return cmd_record(i, rec_argv); 3343 } 3344 3345 int cmd_sched(int argc, const char **argv) 3346 { 3347 static const char default_sort_order[] = "avg, max, switch, runtime"; 3348 struct perf_sched sched = { 3349 .tool = { 3350 .sample = perf_sched__process_tracepoint_sample, 3351 .comm = perf_sched__process_comm, 3352 .namespaces = perf_event__process_namespaces, 3353 .lost = perf_event__process_lost, 3354 .fork = perf_sched__process_fork_event, 3355 .ordered_events = true, 3356 }, 3357 .cmp_pid = LIST_HEAD_INIT(sched.cmp_pid), 3358 .sort_list = LIST_HEAD_INIT(sched.sort_list), 3359 .start_work_mutex = PTHREAD_MUTEX_INITIALIZER, 3360 .work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER, 3361 .sort_order = default_sort_order, 3362 .replay_repeat = 10, 3363 .profile_cpu = -1, 3364 .next_shortname1 = 'A', 3365 .next_shortname2 = '0', 3366 .skip_merge = 0, 3367 .show_callchain = 1, 3368 .max_stack = 5, 3369 }; 3370 const struct option sched_options[] = { 3371 OPT_STRING('i', "input", &input_name, "file", 3372 "input file name"), 3373 OPT_INCR('v', "verbose", &verbose, 3374 "be more verbose (show symbol address, etc)"), 3375 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, 3376 "dump raw trace in ASCII"), 3377 OPT_BOOLEAN('f', "force", &sched.force, "don't complain, do it"), 3378 OPT_END() 3379 }; 3380 const struct option latency_options[] = { 3381 OPT_STRING('s', "sort", &sched.sort_order, "key[,key2...]", 3382 "sort by key(s): runtime, switch, avg, max"), 3383 OPT_INTEGER('C', "CPU", &sched.profile_cpu, 3384 "CPU to profile on"), 3385 OPT_BOOLEAN('p', "pids", &sched.skip_merge, 3386 "latency stats per pid instead of per comm"), 3387 OPT_PARENT(sched_options) 3388 }; 3389 const struct option replay_options[] = { 3390 OPT_UINTEGER('r', "repeat", &sched.replay_repeat, 3391 "repeat the workload replay N times (-1: infinite)"), 3392 OPT_PARENT(sched_options) 3393 }; 3394 const struct option map_options[] = { 3395 OPT_BOOLEAN(0, "compact", &sched.map.comp, 3396 "map output in compact mode"), 3397 OPT_STRING(0, "color-pids", &sched.map.color_pids_str, "pids", 3398 "highlight given pids in map"), 3399 OPT_STRING(0, "color-cpus", &sched.map.color_cpus_str, "cpus", 3400 "highlight given CPUs in map"), 3401 OPT_STRING(0, "cpus", &sched.map.cpus_str, "cpus", 3402 "display given CPUs in map"), 3403 OPT_PARENT(sched_options) 3404 }; 3405 const struct option timehist_options[] = { 3406 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name, 3407 "file", "vmlinux pathname"), 3408 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, 3409 "file", "kallsyms pathname"), 3410 OPT_BOOLEAN('g', "call-graph", &sched.show_callchain, 3411 "Display call chains if present (default on)"), 3412 OPT_UINTEGER(0, "max-stack", &sched.max_stack, 3413 "Maximum number of functions to display backtrace."), 3414 OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory", 3415 "Look for files with symbols relative to this directory"), 3416 OPT_BOOLEAN('s', "summary", &sched.summary_only, 3417 "Show only syscall summary with statistics"), 3418 OPT_BOOLEAN('S', "with-summary", &sched.summary, 3419 "Show all syscalls and summary with statistics"), 3420 OPT_BOOLEAN('w', "wakeups", &sched.show_wakeups, "Show wakeup events"), 3421 OPT_BOOLEAN('n', "next", &sched.show_next, "Show next task"), 3422 OPT_BOOLEAN('M', "migrations", &sched.show_migrations, "Show migration events"), 3423 OPT_BOOLEAN('V', "cpu-visual", &sched.show_cpu_visual, "Add CPU visual"), 3424 OPT_BOOLEAN('I', "idle-hist", &sched.idle_hist, "Show idle events only"), 3425 OPT_STRING(0, "time", &sched.time_str, "str", 3426 "Time span for analysis (start,stop)"), 3427 OPT_BOOLEAN(0, "state", &sched.show_state, "Show task state when sched-out"), 3428 OPT_STRING('p', "pid", &symbol_conf.pid_list_str, "pid[,pid...]", 3429 "analyze events only for given process id(s)"), 3430 OPT_STRING('t', "tid", &symbol_conf.tid_list_str, "tid[,tid...]", 3431 "analyze events only for given thread id(s)"), 3432 OPT_PARENT(sched_options) 3433 }; 3434 3435 const char * const latency_usage[] = { 3436 "perf sched latency [<options>]", 3437 NULL 3438 }; 3439 const char * const replay_usage[] = { 3440 "perf sched replay [<options>]", 3441 NULL 3442 }; 3443 const char * const map_usage[] = { 3444 "perf sched map [<options>]", 3445 NULL 3446 }; 3447 const char * const timehist_usage[] = { 3448 "perf sched timehist [<options>]", 3449 NULL 3450 }; 3451 const char *const sched_subcommands[] = { "record", "latency", "map", 3452 "replay", "script", 3453 "timehist", NULL }; 3454 const char *sched_usage[] = { 3455 NULL, 3456 NULL 3457 }; 3458 struct trace_sched_handler lat_ops = { 3459 .wakeup_event = latency_wakeup_event, 3460 .switch_event = latency_switch_event, 3461 .runtime_event = latency_runtime_event, 3462 .migrate_task_event = latency_migrate_task_event, 3463 }; 3464 struct trace_sched_handler map_ops = { 3465 .switch_event = map_switch_event, 3466 }; 3467 struct trace_sched_handler replay_ops = { 3468 .wakeup_event = replay_wakeup_event, 3469 .switch_event = replay_switch_event, 3470 .fork_event = replay_fork_event, 3471 }; 3472 unsigned int i; 3473 3474 for (i = 0; i < ARRAY_SIZE(sched.curr_pid); i++) 3475 sched.curr_pid[i] = -1; 3476 3477 argc = parse_options_subcommand(argc, argv, sched_options, sched_subcommands, 3478 sched_usage, PARSE_OPT_STOP_AT_NON_OPTION); 3479 if (!argc) 3480 usage_with_options(sched_usage, sched_options); 3481 3482 /* 3483 * Aliased to 'perf script' for now: 3484 */ 3485 if (!strcmp(argv[0], "script")) 3486 return cmd_script(argc, argv); 3487 3488 if (!strncmp(argv[0], "rec", 3)) { 3489 return __cmd_record(argc, argv); 3490 } else if (!strncmp(argv[0], "lat", 3)) { 3491 sched.tp_handler = &lat_ops; 3492 if (argc > 1) { 3493 argc = parse_options(argc, argv, latency_options, latency_usage, 0); 3494 if (argc) 3495 usage_with_options(latency_usage, latency_options); 3496 } 3497 setup_sorting(&sched, latency_options, latency_usage); 3498 return perf_sched__lat(&sched); 3499 } else if (!strcmp(argv[0], "map")) { 3500 if (argc) { 3501 argc = parse_options(argc, argv, map_options, map_usage, 0); 3502 if (argc) 3503 usage_with_options(map_usage, map_options); 3504 } 3505 sched.tp_handler = &map_ops; 3506 setup_sorting(&sched, latency_options, latency_usage); 3507 return perf_sched__map(&sched); 3508 } else if (!strncmp(argv[0], "rep", 3)) { 3509 sched.tp_handler = &replay_ops; 3510 if (argc) { 3511 argc = parse_options(argc, argv, replay_options, replay_usage, 0); 3512 if (argc) 3513 usage_with_options(replay_usage, replay_options); 3514 } 3515 return perf_sched__replay(&sched); 3516 } else if (!strcmp(argv[0], "timehist")) { 3517 if (argc) { 3518 argc = parse_options(argc, argv, timehist_options, 3519 timehist_usage, 0); 3520 if (argc) 3521 usage_with_options(timehist_usage, timehist_options); 3522 } 3523 if ((sched.show_wakeups || sched.show_next) && 3524 sched.summary_only) { 3525 pr_err(" Error: -s and -[n|w] are mutually exclusive.\n"); 3526 parse_options_usage(timehist_usage, timehist_options, "s", true); 3527 if (sched.show_wakeups) 3528 parse_options_usage(NULL, timehist_options, "w", true); 3529 if (sched.show_next) 3530 parse_options_usage(NULL, timehist_options, "n", true); 3531 return -EINVAL; 3532 } 3533 3534 return perf_sched__timehist(&sched); 3535 } else { 3536 usage_with_options(sched_usage, sched_options); 3537 } 3538 3539 return 0; 3540 } 3541