1 // SPDX-License-Identifier: GPL-2.0 2 #include "builtin.h" 3 #include "perf.h" 4 5 #include "util/evlist.h" 6 #include "util/cache.h" 7 #include "util/evsel.h" 8 #include "util/symbol.h" 9 #include "util/thread.h" 10 #include "util/header.h" 11 #include "util/session.h" 12 #include "util/tool.h" 13 #include "util/cloexec.h" 14 #include "util/thread_map.h" 15 #include "util/color.h" 16 #include "util/stat.h" 17 #include "util/string2.h" 18 #include "util/callchain.h" 19 #include "util/time-utils.h" 20 21 #include <subcmd/parse-options.h> 22 #include "util/trace-event.h" 23 24 #include "util/debug.h" 25 26 #include <linux/kernel.h> 27 #include <linux/log2.h> 28 #include <linux/zalloc.h> 29 #include <sys/prctl.h> 30 #include <sys/resource.h> 31 #include <inttypes.h> 32 33 #include <errno.h> 34 #include <semaphore.h> 35 #include <pthread.h> 36 #include <math.h> 37 #include <api/fs/fs.h> 38 #include <linux/time64.h> 39 40 #include <linux/ctype.h> 41 42 #define PR_SET_NAME 15 /* Set process name */ 43 #define MAX_CPUS 4096 44 #define COMM_LEN 20 45 #define SYM_LEN 129 46 #define MAX_PID 1024000 47 48 struct sched_atom; 49 50 struct task_desc { 51 unsigned long nr; 52 unsigned long pid; 53 char comm[COMM_LEN]; 54 55 unsigned long nr_events; 56 unsigned long curr_event; 57 struct sched_atom **atoms; 58 59 pthread_t thread; 60 sem_t sleep_sem; 61 62 sem_t ready_for_work; 63 sem_t work_done_sem; 64 65 u64 cpu_usage; 66 }; 67 68 enum sched_event_type { 69 SCHED_EVENT_RUN, 70 SCHED_EVENT_SLEEP, 71 SCHED_EVENT_WAKEUP, 72 SCHED_EVENT_MIGRATION, 73 }; 74 75 struct sched_atom { 76 enum sched_event_type type; 77 int specific_wait; 78 u64 timestamp; 79 u64 duration; 80 unsigned long nr; 81 sem_t *wait_sem; 82 struct task_desc *wakee; 83 }; 84 85 #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP" 86 87 /* task state bitmask, copied from include/linux/sched.h */ 88 #define TASK_RUNNING 0 89 #define TASK_INTERRUPTIBLE 1 90 #define TASK_UNINTERRUPTIBLE 2 91 #define __TASK_STOPPED 4 92 #define __TASK_TRACED 8 93 /* in tsk->exit_state */ 94 #define EXIT_DEAD 16 95 #define EXIT_ZOMBIE 32 96 #define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD) 97 /* in tsk->state again */ 98 #define TASK_DEAD 64 99 #define TASK_WAKEKILL 128 100 #define TASK_WAKING 256 101 #define TASK_PARKED 512 102 103 enum thread_state { 104 THREAD_SLEEPING = 0, 105 THREAD_WAIT_CPU, 106 THREAD_SCHED_IN, 107 THREAD_IGNORE 108 }; 109 110 struct work_atom { 111 struct list_head list; 112 enum thread_state state; 113 u64 sched_out_time; 114 u64 wake_up_time; 115 u64 sched_in_time; 116 u64 runtime; 117 }; 118 119 struct work_atoms { 120 struct list_head work_list; 121 struct thread *thread; 122 struct rb_node node; 123 u64 max_lat; 124 u64 max_lat_at; 125 u64 total_lat; 126 u64 nb_atoms; 127 u64 total_runtime; 128 int num_merged; 129 }; 130 131 typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *); 132 133 struct perf_sched; 134 135 struct trace_sched_handler { 136 int (*switch_event)(struct perf_sched *sched, struct perf_evsel *evsel, 137 struct perf_sample *sample, struct machine *machine); 138 139 int (*runtime_event)(struct perf_sched *sched, struct perf_evsel *evsel, 140 struct perf_sample *sample, struct machine *machine); 141 142 int (*wakeup_event)(struct perf_sched *sched, struct perf_evsel *evsel, 143 struct perf_sample *sample, struct machine *machine); 144 145 /* PERF_RECORD_FORK event, not sched_process_fork tracepoint */ 146 int (*fork_event)(struct perf_sched *sched, union perf_event *event, 147 struct machine *machine); 148 149 int (*migrate_task_event)(struct perf_sched *sched, 150 struct perf_evsel *evsel, 151 struct perf_sample *sample, 152 struct machine *machine); 153 }; 154 155 #define COLOR_PIDS PERF_COLOR_BLUE 156 #define COLOR_CPUS PERF_COLOR_BG_RED 157 158 struct perf_sched_map { 159 DECLARE_BITMAP(comp_cpus_mask, MAX_CPUS); 160 int *comp_cpus; 161 bool comp; 162 struct thread_map *color_pids; 163 const char *color_pids_str; 164 struct cpu_map *color_cpus; 165 const char *color_cpus_str; 166 struct cpu_map *cpus; 167 const char *cpus_str; 168 }; 169 170 struct perf_sched { 171 struct perf_tool tool; 172 const char *sort_order; 173 unsigned long nr_tasks; 174 struct task_desc **pid_to_task; 175 struct task_desc **tasks; 176 const struct trace_sched_handler *tp_handler; 177 pthread_mutex_t start_work_mutex; 178 pthread_mutex_t work_done_wait_mutex; 179 int profile_cpu; 180 /* 181 * Track the current task - that way we can know whether there's any 182 * weird events, such as a task being switched away that is not current. 183 */ 184 int max_cpu; 185 u32 curr_pid[MAX_CPUS]; 186 struct thread *curr_thread[MAX_CPUS]; 187 char next_shortname1; 188 char next_shortname2; 189 unsigned int replay_repeat; 190 unsigned long nr_run_events; 191 unsigned long nr_sleep_events; 192 unsigned long nr_wakeup_events; 193 unsigned long nr_sleep_corrections; 194 unsigned long nr_run_events_optimized; 195 unsigned long targetless_wakeups; 196 unsigned long multitarget_wakeups; 197 unsigned long nr_runs; 198 unsigned long nr_timestamps; 199 unsigned long nr_unordered_timestamps; 200 unsigned long nr_context_switch_bugs; 201 unsigned long nr_events; 202 unsigned long nr_lost_chunks; 203 unsigned long nr_lost_events; 204 u64 run_measurement_overhead; 205 u64 sleep_measurement_overhead; 206 u64 start_time; 207 u64 cpu_usage; 208 u64 runavg_cpu_usage; 209 u64 parent_cpu_usage; 210 u64 runavg_parent_cpu_usage; 211 u64 sum_runtime; 212 u64 sum_fluct; 213 u64 run_avg; 214 u64 all_runtime; 215 u64 all_count; 216 u64 cpu_last_switched[MAX_CPUS]; 217 struct rb_root_cached atom_root, sorted_atom_root, merged_atom_root; 218 struct list_head sort_list, cmp_pid; 219 bool force; 220 bool skip_merge; 221 struct perf_sched_map map; 222 223 /* options for timehist command */ 224 bool summary; 225 bool summary_only; 226 bool idle_hist; 227 bool show_callchain; 228 unsigned int max_stack; 229 bool show_cpu_visual; 230 bool show_wakeups; 231 bool show_next; 232 bool show_migrations; 233 bool show_state; 234 u64 skipped_samples; 235 const char *time_str; 236 struct perf_time_interval ptime; 237 struct perf_time_interval hist_time; 238 }; 239 240 /* per thread run time data */ 241 struct thread_runtime { 242 u64 last_time; /* time of previous sched in/out event */ 243 u64 dt_run; /* run time */ 244 u64 dt_sleep; /* time between CPU access by sleep (off cpu) */ 245 u64 dt_iowait; /* time between CPU access by iowait (off cpu) */ 246 u64 dt_preempt; /* time between CPU access by preempt (off cpu) */ 247 u64 dt_delay; /* time between wakeup and sched-in */ 248 u64 ready_to_run; /* time of wakeup */ 249 250 struct stats run_stats; 251 u64 total_run_time; 252 u64 total_sleep_time; 253 u64 total_iowait_time; 254 u64 total_preempt_time; 255 u64 total_delay_time; 256 257 int last_state; 258 259 char shortname[3]; 260 bool comm_changed; 261 262 u64 migrations; 263 }; 264 265 /* per event run time data */ 266 struct evsel_runtime { 267 u64 *last_time; /* time this event was last seen per cpu */ 268 u32 ncpu; /* highest cpu slot allocated */ 269 }; 270 271 /* per cpu idle time data */ 272 struct idle_thread_runtime { 273 struct thread_runtime tr; 274 struct thread *last_thread; 275 struct rb_root_cached sorted_root; 276 struct callchain_root callchain; 277 struct callchain_cursor cursor; 278 }; 279 280 /* track idle times per cpu */ 281 static struct thread **idle_threads; 282 static int idle_max_cpu; 283 static char idle_comm[] = "<idle>"; 284 285 static u64 get_nsecs(void) 286 { 287 struct timespec ts; 288 289 clock_gettime(CLOCK_MONOTONIC, &ts); 290 291 return ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec; 292 } 293 294 static void burn_nsecs(struct perf_sched *sched, u64 nsecs) 295 { 296 u64 T0 = get_nsecs(), T1; 297 298 do { 299 T1 = get_nsecs(); 300 } while (T1 + sched->run_measurement_overhead < T0 + nsecs); 301 } 302 303 static void sleep_nsecs(u64 nsecs) 304 { 305 struct timespec ts; 306 307 ts.tv_nsec = nsecs % 999999999; 308 ts.tv_sec = nsecs / 999999999; 309 310 nanosleep(&ts, NULL); 311 } 312 313 static void calibrate_run_measurement_overhead(struct perf_sched *sched) 314 { 315 u64 T0, T1, delta, min_delta = NSEC_PER_SEC; 316 int i; 317 318 for (i = 0; i < 10; i++) { 319 T0 = get_nsecs(); 320 burn_nsecs(sched, 0); 321 T1 = get_nsecs(); 322 delta = T1-T0; 323 min_delta = min(min_delta, delta); 324 } 325 sched->run_measurement_overhead = min_delta; 326 327 printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta); 328 } 329 330 static void calibrate_sleep_measurement_overhead(struct perf_sched *sched) 331 { 332 u64 T0, T1, delta, min_delta = NSEC_PER_SEC; 333 int i; 334 335 for (i = 0; i < 10; i++) { 336 T0 = get_nsecs(); 337 sleep_nsecs(10000); 338 T1 = get_nsecs(); 339 delta = T1-T0; 340 min_delta = min(min_delta, delta); 341 } 342 min_delta -= 10000; 343 sched->sleep_measurement_overhead = min_delta; 344 345 printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta); 346 } 347 348 static struct sched_atom * 349 get_new_event(struct task_desc *task, u64 timestamp) 350 { 351 struct sched_atom *event = zalloc(sizeof(*event)); 352 unsigned long idx = task->nr_events; 353 size_t size; 354 355 event->timestamp = timestamp; 356 event->nr = idx; 357 358 task->nr_events++; 359 size = sizeof(struct sched_atom *) * task->nr_events; 360 task->atoms = realloc(task->atoms, size); 361 BUG_ON(!task->atoms); 362 363 task->atoms[idx] = event; 364 365 return event; 366 } 367 368 static struct sched_atom *last_event(struct task_desc *task) 369 { 370 if (!task->nr_events) 371 return NULL; 372 373 return task->atoms[task->nr_events - 1]; 374 } 375 376 static void add_sched_event_run(struct perf_sched *sched, struct task_desc *task, 377 u64 timestamp, u64 duration) 378 { 379 struct sched_atom *event, *curr_event = last_event(task); 380 381 /* 382 * optimize an existing RUN event by merging this one 383 * to it: 384 */ 385 if (curr_event && curr_event->type == SCHED_EVENT_RUN) { 386 sched->nr_run_events_optimized++; 387 curr_event->duration += duration; 388 return; 389 } 390 391 event = get_new_event(task, timestamp); 392 393 event->type = SCHED_EVENT_RUN; 394 event->duration = duration; 395 396 sched->nr_run_events++; 397 } 398 399 static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *task, 400 u64 timestamp, struct task_desc *wakee) 401 { 402 struct sched_atom *event, *wakee_event; 403 404 event = get_new_event(task, timestamp); 405 event->type = SCHED_EVENT_WAKEUP; 406 event->wakee = wakee; 407 408 wakee_event = last_event(wakee); 409 if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) { 410 sched->targetless_wakeups++; 411 return; 412 } 413 if (wakee_event->wait_sem) { 414 sched->multitarget_wakeups++; 415 return; 416 } 417 418 wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem)); 419 sem_init(wakee_event->wait_sem, 0, 0); 420 wakee_event->specific_wait = 1; 421 event->wait_sem = wakee_event->wait_sem; 422 423 sched->nr_wakeup_events++; 424 } 425 426 static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task, 427 u64 timestamp, u64 task_state __maybe_unused) 428 { 429 struct sched_atom *event = get_new_event(task, timestamp); 430 431 event->type = SCHED_EVENT_SLEEP; 432 433 sched->nr_sleep_events++; 434 } 435 436 static struct task_desc *register_pid(struct perf_sched *sched, 437 unsigned long pid, const char *comm) 438 { 439 struct task_desc *task; 440 static int pid_max; 441 442 if (sched->pid_to_task == NULL) { 443 if (sysctl__read_int("kernel/pid_max", &pid_max) < 0) 444 pid_max = MAX_PID; 445 BUG_ON((sched->pid_to_task = calloc(pid_max, sizeof(struct task_desc *))) == NULL); 446 } 447 if (pid >= (unsigned long)pid_max) { 448 BUG_ON((sched->pid_to_task = realloc(sched->pid_to_task, (pid + 1) * 449 sizeof(struct task_desc *))) == NULL); 450 while (pid >= (unsigned long)pid_max) 451 sched->pid_to_task[pid_max++] = NULL; 452 } 453 454 task = sched->pid_to_task[pid]; 455 456 if (task) 457 return task; 458 459 task = zalloc(sizeof(*task)); 460 task->pid = pid; 461 task->nr = sched->nr_tasks; 462 strcpy(task->comm, comm); 463 /* 464 * every task starts in sleeping state - this gets ignored 465 * if there's no wakeup pointing to this sleep state: 466 */ 467 add_sched_event_sleep(sched, task, 0, 0); 468 469 sched->pid_to_task[pid] = task; 470 sched->nr_tasks++; 471 sched->tasks = realloc(sched->tasks, sched->nr_tasks * sizeof(struct task_desc *)); 472 BUG_ON(!sched->tasks); 473 sched->tasks[task->nr] = task; 474 475 if (verbose > 0) 476 printf("registered task #%ld, PID %ld (%s)\n", sched->nr_tasks, pid, comm); 477 478 return task; 479 } 480 481 482 static void print_task_traces(struct perf_sched *sched) 483 { 484 struct task_desc *task; 485 unsigned long i; 486 487 for (i = 0; i < sched->nr_tasks; i++) { 488 task = sched->tasks[i]; 489 printf("task %6ld (%20s:%10ld), nr_events: %ld\n", 490 task->nr, task->comm, task->pid, task->nr_events); 491 } 492 } 493 494 static void add_cross_task_wakeups(struct perf_sched *sched) 495 { 496 struct task_desc *task1, *task2; 497 unsigned long i, j; 498 499 for (i = 0; i < sched->nr_tasks; i++) { 500 task1 = sched->tasks[i]; 501 j = i + 1; 502 if (j == sched->nr_tasks) 503 j = 0; 504 task2 = sched->tasks[j]; 505 add_sched_event_wakeup(sched, task1, 0, task2); 506 } 507 } 508 509 static void perf_sched__process_event(struct perf_sched *sched, 510 struct sched_atom *atom) 511 { 512 int ret = 0; 513 514 switch (atom->type) { 515 case SCHED_EVENT_RUN: 516 burn_nsecs(sched, atom->duration); 517 break; 518 case SCHED_EVENT_SLEEP: 519 if (atom->wait_sem) 520 ret = sem_wait(atom->wait_sem); 521 BUG_ON(ret); 522 break; 523 case SCHED_EVENT_WAKEUP: 524 if (atom->wait_sem) 525 ret = sem_post(atom->wait_sem); 526 BUG_ON(ret); 527 break; 528 case SCHED_EVENT_MIGRATION: 529 break; 530 default: 531 BUG_ON(1); 532 } 533 } 534 535 static u64 get_cpu_usage_nsec_parent(void) 536 { 537 struct rusage ru; 538 u64 sum; 539 int err; 540 541 err = getrusage(RUSAGE_SELF, &ru); 542 BUG_ON(err); 543 544 sum = ru.ru_utime.tv_sec * NSEC_PER_SEC + ru.ru_utime.tv_usec * NSEC_PER_USEC; 545 sum += ru.ru_stime.tv_sec * NSEC_PER_SEC + ru.ru_stime.tv_usec * NSEC_PER_USEC; 546 547 return sum; 548 } 549 550 static int self_open_counters(struct perf_sched *sched, unsigned long cur_task) 551 { 552 struct perf_event_attr attr; 553 char sbuf[STRERR_BUFSIZE], info[STRERR_BUFSIZE]; 554 int fd; 555 struct rlimit limit; 556 bool need_privilege = false; 557 558 memset(&attr, 0, sizeof(attr)); 559 560 attr.type = PERF_TYPE_SOFTWARE; 561 attr.config = PERF_COUNT_SW_TASK_CLOCK; 562 563 force_again: 564 fd = sys_perf_event_open(&attr, 0, -1, -1, 565 perf_event_open_cloexec_flag()); 566 567 if (fd < 0) { 568 if (errno == EMFILE) { 569 if (sched->force) { 570 BUG_ON(getrlimit(RLIMIT_NOFILE, &limit) == -1); 571 limit.rlim_cur += sched->nr_tasks - cur_task; 572 if (limit.rlim_cur > limit.rlim_max) { 573 limit.rlim_max = limit.rlim_cur; 574 need_privilege = true; 575 } 576 if (setrlimit(RLIMIT_NOFILE, &limit) == -1) { 577 if (need_privilege && errno == EPERM) 578 strcpy(info, "Need privilege\n"); 579 } else 580 goto force_again; 581 } else 582 strcpy(info, "Have a try with -f option\n"); 583 } 584 pr_err("Error: sys_perf_event_open() syscall returned " 585 "with %d (%s)\n%s", fd, 586 str_error_r(errno, sbuf, sizeof(sbuf)), info); 587 exit(EXIT_FAILURE); 588 } 589 return fd; 590 } 591 592 static u64 get_cpu_usage_nsec_self(int fd) 593 { 594 u64 runtime; 595 int ret; 596 597 ret = read(fd, &runtime, sizeof(runtime)); 598 BUG_ON(ret != sizeof(runtime)); 599 600 return runtime; 601 } 602 603 struct sched_thread_parms { 604 struct task_desc *task; 605 struct perf_sched *sched; 606 int fd; 607 }; 608 609 static void *thread_func(void *ctx) 610 { 611 struct sched_thread_parms *parms = ctx; 612 struct task_desc *this_task = parms->task; 613 struct perf_sched *sched = parms->sched; 614 u64 cpu_usage_0, cpu_usage_1; 615 unsigned long i, ret; 616 char comm2[22]; 617 int fd = parms->fd; 618 619 zfree(&parms); 620 621 sprintf(comm2, ":%s", this_task->comm); 622 prctl(PR_SET_NAME, comm2); 623 if (fd < 0) 624 return NULL; 625 again: 626 ret = sem_post(&this_task->ready_for_work); 627 BUG_ON(ret); 628 ret = pthread_mutex_lock(&sched->start_work_mutex); 629 BUG_ON(ret); 630 ret = pthread_mutex_unlock(&sched->start_work_mutex); 631 BUG_ON(ret); 632 633 cpu_usage_0 = get_cpu_usage_nsec_self(fd); 634 635 for (i = 0; i < this_task->nr_events; i++) { 636 this_task->curr_event = i; 637 perf_sched__process_event(sched, this_task->atoms[i]); 638 } 639 640 cpu_usage_1 = get_cpu_usage_nsec_self(fd); 641 this_task->cpu_usage = cpu_usage_1 - cpu_usage_0; 642 ret = sem_post(&this_task->work_done_sem); 643 BUG_ON(ret); 644 645 ret = pthread_mutex_lock(&sched->work_done_wait_mutex); 646 BUG_ON(ret); 647 ret = pthread_mutex_unlock(&sched->work_done_wait_mutex); 648 BUG_ON(ret); 649 650 goto again; 651 } 652 653 static void create_tasks(struct perf_sched *sched) 654 { 655 struct task_desc *task; 656 pthread_attr_t attr; 657 unsigned long i; 658 int err; 659 660 err = pthread_attr_init(&attr); 661 BUG_ON(err); 662 err = pthread_attr_setstacksize(&attr, 663 (size_t) max(16 * 1024, PTHREAD_STACK_MIN)); 664 BUG_ON(err); 665 err = pthread_mutex_lock(&sched->start_work_mutex); 666 BUG_ON(err); 667 err = pthread_mutex_lock(&sched->work_done_wait_mutex); 668 BUG_ON(err); 669 for (i = 0; i < sched->nr_tasks; i++) { 670 struct sched_thread_parms *parms = malloc(sizeof(*parms)); 671 BUG_ON(parms == NULL); 672 parms->task = task = sched->tasks[i]; 673 parms->sched = sched; 674 parms->fd = self_open_counters(sched, i); 675 sem_init(&task->sleep_sem, 0, 0); 676 sem_init(&task->ready_for_work, 0, 0); 677 sem_init(&task->work_done_sem, 0, 0); 678 task->curr_event = 0; 679 err = pthread_create(&task->thread, &attr, thread_func, parms); 680 BUG_ON(err); 681 } 682 } 683 684 static void wait_for_tasks(struct perf_sched *sched) 685 { 686 u64 cpu_usage_0, cpu_usage_1; 687 struct task_desc *task; 688 unsigned long i, ret; 689 690 sched->start_time = get_nsecs(); 691 sched->cpu_usage = 0; 692 pthread_mutex_unlock(&sched->work_done_wait_mutex); 693 694 for (i = 0; i < sched->nr_tasks; i++) { 695 task = sched->tasks[i]; 696 ret = sem_wait(&task->ready_for_work); 697 BUG_ON(ret); 698 sem_init(&task->ready_for_work, 0, 0); 699 } 700 ret = pthread_mutex_lock(&sched->work_done_wait_mutex); 701 BUG_ON(ret); 702 703 cpu_usage_0 = get_cpu_usage_nsec_parent(); 704 705 pthread_mutex_unlock(&sched->start_work_mutex); 706 707 for (i = 0; i < sched->nr_tasks; i++) { 708 task = sched->tasks[i]; 709 ret = sem_wait(&task->work_done_sem); 710 BUG_ON(ret); 711 sem_init(&task->work_done_sem, 0, 0); 712 sched->cpu_usage += task->cpu_usage; 713 task->cpu_usage = 0; 714 } 715 716 cpu_usage_1 = get_cpu_usage_nsec_parent(); 717 if (!sched->runavg_cpu_usage) 718 sched->runavg_cpu_usage = sched->cpu_usage; 719 sched->runavg_cpu_usage = (sched->runavg_cpu_usage * (sched->replay_repeat - 1) + sched->cpu_usage) / sched->replay_repeat; 720 721 sched->parent_cpu_usage = cpu_usage_1 - cpu_usage_0; 722 if (!sched->runavg_parent_cpu_usage) 723 sched->runavg_parent_cpu_usage = sched->parent_cpu_usage; 724 sched->runavg_parent_cpu_usage = (sched->runavg_parent_cpu_usage * (sched->replay_repeat - 1) + 725 sched->parent_cpu_usage)/sched->replay_repeat; 726 727 ret = pthread_mutex_lock(&sched->start_work_mutex); 728 BUG_ON(ret); 729 730 for (i = 0; i < sched->nr_tasks; i++) { 731 task = sched->tasks[i]; 732 sem_init(&task->sleep_sem, 0, 0); 733 task->curr_event = 0; 734 } 735 } 736 737 static void run_one_test(struct perf_sched *sched) 738 { 739 u64 T0, T1, delta, avg_delta, fluct; 740 741 T0 = get_nsecs(); 742 wait_for_tasks(sched); 743 T1 = get_nsecs(); 744 745 delta = T1 - T0; 746 sched->sum_runtime += delta; 747 sched->nr_runs++; 748 749 avg_delta = sched->sum_runtime / sched->nr_runs; 750 if (delta < avg_delta) 751 fluct = avg_delta - delta; 752 else 753 fluct = delta - avg_delta; 754 sched->sum_fluct += fluct; 755 if (!sched->run_avg) 756 sched->run_avg = delta; 757 sched->run_avg = (sched->run_avg * (sched->replay_repeat - 1) + delta) / sched->replay_repeat; 758 759 printf("#%-3ld: %0.3f, ", sched->nr_runs, (double)delta / NSEC_PER_MSEC); 760 761 printf("ravg: %0.2f, ", (double)sched->run_avg / NSEC_PER_MSEC); 762 763 printf("cpu: %0.2f / %0.2f", 764 (double)sched->cpu_usage / NSEC_PER_MSEC, (double)sched->runavg_cpu_usage / NSEC_PER_MSEC); 765 766 #if 0 767 /* 768 * rusage statistics done by the parent, these are less 769 * accurate than the sched->sum_exec_runtime based statistics: 770 */ 771 printf(" [%0.2f / %0.2f]", 772 (double)sched->parent_cpu_usage / NSEC_PER_MSEC, 773 (double)sched->runavg_parent_cpu_usage / NSEC_PER_MSEC); 774 #endif 775 776 printf("\n"); 777 778 if (sched->nr_sleep_corrections) 779 printf(" (%ld sleep corrections)\n", sched->nr_sleep_corrections); 780 sched->nr_sleep_corrections = 0; 781 } 782 783 static void test_calibrations(struct perf_sched *sched) 784 { 785 u64 T0, T1; 786 787 T0 = get_nsecs(); 788 burn_nsecs(sched, NSEC_PER_MSEC); 789 T1 = get_nsecs(); 790 791 printf("the run test took %" PRIu64 " nsecs\n", T1 - T0); 792 793 T0 = get_nsecs(); 794 sleep_nsecs(NSEC_PER_MSEC); 795 T1 = get_nsecs(); 796 797 printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0); 798 } 799 800 static int 801 replay_wakeup_event(struct perf_sched *sched, 802 struct perf_evsel *evsel, struct perf_sample *sample, 803 struct machine *machine __maybe_unused) 804 { 805 const char *comm = perf_evsel__strval(evsel, sample, "comm"); 806 const u32 pid = perf_evsel__intval(evsel, sample, "pid"); 807 struct task_desc *waker, *wakee; 808 809 if (verbose > 0) { 810 printf("sched_wakeup event %p\n", evsel); 811 812 printf(" ... pid %d woke up %s/%d\n", sample->tid, comm, pid); 813 } 814 815 waker = register_pid(sched, sample->tid, "<unknown>"); 816 wakee = register_pid(sched, pid, comm); 817 818 add_sched_event_wakeup(sched, waker, sample->time, wakee); 819 return 0; 820 } 821 822 static int replay_switch_event(struct perf_sched *sched, 823 struct perf_evsel *evsel, 824 struct perf_sample *sample, 825 struct machine *machine __maybe_unused) 826 { 827 const char *prev_comm = perf_evsel__strval(evsel, sample, "prev_comm"), 828 *next_comm = perf_evsel__strval(evsel, sample, "next_comm"); 829 const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"), 830 next_pid = perf_evsel__intval(evsel, sample, "next_pid"); 831 const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state"); 832 struct task_desc *prev, __maybe_unused *next; 833 u64 timestamp0, timestamp = sample->time; 834 int cpu = sample->cpu; 835 s64 delta; 836 837 if (verbose > 0) 838 printf("sched_switch event %p\n", evsel); 839 840 if (cpu >= MAX_CPUS || cpu < 0) 841 return 0; 842 843 timestamp0 = sched->cpu_last_switched[cpu]; 844 if (timestamp0) 845 delta = timestamp - timestamp0; 846 else 847 delta = 0; 848 849 if (delta < 0) { 850 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta); 851 return -1; 852 } 853 854 pr_debug(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n", 855 prev_comm, prev_pid, next_comm, next_pid, delta); 856 857 prev = register_pid(sched, prev_pid, prev_comm); 858 next = register_pid(sched, next_pid, next_comm); 859 860 sched->cpu_last_switched[cpu] = timestamp; 861 862 add_sched_event_run(sched, prev, timestamp, delta); 863 add_sched_event_sleep(sched, prev, timestamp, prev_state); 864 865 return 0; 866 } 867 868 static int replay_fork_event(struct perf_sched *sched, 869 union perf_event *event, 870 struct machine *machine) 871 { 872 struct thread *child, *parent; 873 874 child = machine__findnew_thread(machine, event->fork.pid, 875 event->fork.tid); 876 parent = machine__findnew_thread(machine, event->fork.ppid, 877 event->fork.ptid); 878 879 if (child == NULL || parent == NULL) { 880 pr_debug("thread does not exist on fork event: child %p, parent %p\n", 881 child, parent); 882 goto out_put; 883 } 884 885 if (verbose > 0) { 886 printf("fork event\n"); 887 printf("... parent: %s/%d\n", thread__comm_str(parent), parent->tid); 888 printf("... child: %s/%d\n", thread__comm_str(child), child->tid); 889 } 890 891 register_pid(sched, parent->tid, thread__comm_str(parent)); 892 register_pid(sched, child->tid, thread__comm_str(child)); 893 out_put: 894 thread__put(child); 895 thread__put(parent); 896 return 0; 897 } 898 899 struct sort_dimension { 900 const char *name; 901 sort_fn_t cmp; 902 struct list_head list; 903 }; 904 905 /* 906 * handle runtime stats saved per thread 907 */ 908 static struct thread_runtime *thread__init_runtime(struct thread *thread) 909 { 910 struct thread_runtime *r; 911 912 r = zalloc(sizeof(struct thread_runtime)); 913 if (!r) 914 return NULL; 915 916 init_stats(&r->run_stats); 917 thread__set_priv(thread, r); 918 919 return r; 920 } 921 922 static struct thread_runtime *thread__get_runtime(struct thread *thread) 923 { 924 struct thread_runtime *tr; 925 926 tr = thread__priv(thread); 927 if (tr == NULL) { 928 tr = thread__init_runtime(thread); 929 if (tr == NULL) 930 pr_debug("Failed to malloc memory for runtime data.\n"); 931 } 932 933 return tr; 934 } 935 936 static int 937 thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r) 938 { 939 struct sort_dimension *sort; 940 int ret = 0; 941 942 BUG_ON(list_empty(list)); 943 944 list_for_each_entry(sort, list, list) { 945 ret = sort->cmp(l, r); 946 if (ret) 947 return ret; 948 } 949 950 return ret; 951 } 952 953 static struct work_atoms * 954 thread_atoms_search(struct rb_root_cached *root, struct thread *thread, 955 struct list_head *sort_list) 956 { 957 struct rb_node *node = root->rb_root.rb_node; 958 struct work_atoms key = { .thread = thread }; 959 960 while (node) { 961 struct work_atoms *atoms; 962 int cmp; 963 964 atoms = container_of(node, struct work_atoms, node); 965 966 cmp = thread_lat_cmp(sort_list, &key, atoms); 967 if (cmp > 0) 968 node = node->rb_left; 969 else if (cmp < 0) 970 node = node->rb_right; 971 else { 972 BUG_ON(thread != atoms->thread); 973 return atoms; 974 } 975 } 976 return NULL; 977 } 978 979 static void 980 __thread_latency_insert(struct rb_root_cached *root, struct work_atoms *data, 981 struct list_head *sort_list) 982 { 983 struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL; 984 bool leftmost = true; 985 986 while (*new) { 987 struct work_atoms *this; 988 int cmp; 989 990 this = container_of(*new, struct work_atoms, node); 991 parent = *new; 992 993 cmp = thread_lat_cmp(sort_list, data, this); 994 995 if (cmp > 0) 996 new = &((*new)->rb_left); 997 else { 998 new = &((*new)->rb_right); 999 leftmost = false; 1000 } 1001 } 1002 1003 rb_link_node(&data->node, parent, new); 1004 rb_insert_color_cached(&data->node, root, leftmost); 1005 } 1006 1007 static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread) 1008 { 1009 struct work_atoms *atoms = zalloc(sizeof(*atoms)); 1010 if (!atoms) { 1011 pr_err("No memory at %s\n", __func__); 1012 return -1; 1013 } 1014 1015 atoms->thread = thread__get(thread); 1016 INIT_LIST_HEAD(&atoms->work_list); 1017 __thread_latency_insert(&sched->atom_root, atoms, &sched->cmp_pid); 1018 return 0; 1019 } 1020 1021 static char sched_out_state(u64 prev_state) 1022 { 1023 const char *str = TASK_STATE_TO_CHAR_STR; 1024 1025 return str[prev_state]; 1026 } 1027 1028 static int 1029 add_sched_out_event(struct work_atoms *atoms, 1030 char run_state, 1031 u64 timestamp) 1032 { 1033 struct work_atom *atom = zalloc(sizeof(*atom)); 1034 if (!atom) { 1035 pr_err("Non memory at %s", __func__); 1036 return -1; 1037 } 1038 1039 atom->sched_out_time = timestamp; 1040 1041 if (run_state == 'R') { 1042 atom->state = THREAD_WAIT_CPU; 1043 atom->wake_up_time = atom->sched_out_time; 1044 } 1045 1046 list_add_tail(&atom->list, &atoms->work_list); 1047 return 0; 1048 } 1049 1050 static void 1051 add_runtime_event(struct work_atoms *atoms, u64 delta, 1052 u64 timestamp __maybe_unused) 1053 { 1054 struct work_atom *atom; 1055 1056 BUG_ON(list_empty(&atoms->work_list)); 1057 1058 atom = list_entry(atoms->work_list.prev, struct work_atom, list); 1059 1060 atom->runtime += delta; 1061 atoms->total_runtime += delta; 1062 } 1063 1064 static void 1065 add_sched_in_event(struct work_atoms *atoms, u64 timestamp) 1066 { 1067 struct work_atom *atom; 1068 u64 delta; 1069 1070 if (list_empty(&atoms->work_list)) 1071 return; 1072 1073 atom = list_entry(atoms->work_list.prev, struct work_atom, list); 1074 1075 if (atom->state != THREAD_WAIT_CPU) 1076 return; 1077 1078 if (timestamp < atom->wake_up_time) { 1079 atom->state = THREAD_IGNORE; 1080 return; 1081 } 1082 1083 atom->state = THREAD_SCHED_IN; 1084 atom->sched_in_time = timestamp; 1085 1086 delta = atom->sched_in_time - atom->wake_up_time; 1087 atoms->total_lat += delta; 1088 if (delta > atoms->max_lat) { 1089 atoms->max_lat = delta; 1090 atoms->max_lat_at = timestamp; 1091 } 1092 atoms->nb_atoms++; 1093 } 1094 1095 static int latency_switch_event(struct perf_sched *sched, 1096 struct perf_evsel *evsel, 1097 struct perf_sample *sample, 1098 struct machine *machine) 1099 { 1100 const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"), 1101 next_pid = perf_evsel__intval(evsel, sample, "next_pid"); 1102 const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state"); 1103 struct work_atoms *out_events, *in_events; 1104 struct thread *sched_out, *sched_in; 1105 u64 timestamp0, timestamp = sample->time; 1106 int cpu = sample->cpu, err = -1; 1107 s64 delta; 1108 1109 BUG_ON(cpu >= MAX_CPUS || cpu < 0); 1110 1111 timestamp0 = sched->cpu_last_switched[cpu]; 1112 sched->cpu_last_switched[cpu] = timestamp; 1113 if (timestamp0) 1114 delta = timestamp - timestamp0; 1115 else 1116 delta = 0; 1117 1118 if (delta < 0) { 1119 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta); 1120 return -1; 1121 } 1122 1123 sched_out = machine__findnew_thread(machine, -1, prev_pid); 1124 sched_in = machine__findnew_thread(machine, -1, next_pid); 1125 if (sched_out == NULL || sched_in == NULL) 1126 goto out_put; 1127 1128 out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid); 1129 if (!out_events) { 1130 if (thread_atoms_insert(sched, sched_out)) 1131 goto out_put; 1132 out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid); 1133 if (!out_events) { 1134 pr_err("out-event: Internal tree error"); 1135 goto out_put; 1136 } 1137 } 1138 if (add_sched_out_event(out_events, sched_out_state(prev_state), timestamp)) 1139 return -1; 1140 1141 in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid); 1142 if (!in_events) { 1143 if (thread_atoms_insert(sched, sched_in)) 1144 goto out_put; 1145 in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid); 1146 if (!in_events) { 1147 pr_err("in-event: Internal tree error"); 1148 goto out_put; 1149 } 1150 /* 1151 * Take came in we have not heard about yet, 1152 * add in an initial atom in runnable state: 1153 */ 1154 if (add_sched_out_event(in_events, 'R', timestamp)) 1155 goto out_put; 1156 } 1157 add_sched_in_event(in_events, timestamp); 1158 err = 0; 1159 out_put: 1160 thread__put(sched_out); 1161 thread__put(sched_in); 1162 return err; 1163 } 1164 1165 static int latency_runtime_event(struct perf_sched *sched, 1166 struct perf_evsel *evsel, 1167 struct perf_sample *sample, 1168 struct machine *machine) 1169 { 1170 const u32 pid = perf_evsel__intval(evsel, sample, "pid"); 1171 const u64 runtime = perf_evsel__intval(evsel, sample, "runtime"); 1172 struct thread *thread = machine__findnew_thread(machine, -1, pid); 1173 struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid); 1174 u64 timestamp = sample->time; 1175 int cpu = sample->cpu, err = -1; 1176 1177 if (thread == NULL) 1178 return -1; 1179 1180 BUG_ON(cpu >= MAX_CPUS || cpu < 0); 1181 if (!atoms) { 1182 if (thread_atoms_insert(sched, thread)) 1183 goto out_put; 1184 atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid); 1185 if (!atoms) { 1186 pr_err("in-event: Internal tree error"); 1187 goto out_put; 1188 } 1189 if (add_sched_out_event(atoms, 'R', timestamp)) 1190 goto out_put; 1191 } 1192 1193 add_runtime_event(atoms, runtime, timestamp); 1194 err = 0; 1195 out_put: 1196 thread__put(thread); 1197 return err; 1198 } 1199 1200 static int latency_wakeup_event(struct perf_sched *sched, 1201 struct perf_evsel *evsel, 1202 struct perf_sample *sample, 1203 struct machine *machine) 1204 { 1205 const u32 pid = perf_evsel__intval(evsel, sample, "pid"); 1206 struct work_atoms *atoms; 1207 struct work_atom *atom; 1208 struct thread *wakee; 1209 u64 timestamp = sample->time; 1210 int err = -1; 1211 1212 wakee = machine__findnew_thread(machine, -1, pid); 1213 if (wakee == NULL) 1214 return -1; 1215 atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid); 1216 if (!atoms) { 1217 if (thread_atoms_insert(sched, wakee)) 1218 goto out_put; 1219 atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid); 1220 if (!atoms) { 1221 pr_err("wakeup-event: Internal tree error"); 1222 goto out_put; 1223 } 1224 if (add_sched_out_event(atoms, 'S', timestamp)) 1225 goto out_put; 1226 } 1227 1228 BUG_ON(list_empty(&atoms->work_list)); 1229 1230 atom = list_entry(atoms->work_list.prev, struct work_atom, list); 1231 1232 /* 1233 * As we do not guarantee the wakeup event happens when 1234 * task is out of run queue, also may happen when task is 1235 * on run queue and wakeup only change ->state to TASK_RUNNING, 1236 * then we should not set the ->wake_up_time when wake up a 1237 * task which is on run queue. 1238 * 1239 * You WILL be missing events if you've recorded only 1240 * one CPU, or are only looking at only one, so don't 1241 * skip in this case. 1242 */ 1243 if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING) 1244 goto out_ok; 1245 1246 sched->nr_timestamps++; 1247 if (atom->sched_out_time > timestamp) { 1248 sched->nr_unordered_timestamps++; 1249 goto out_ok; 1250 } 1251 1252 atom->state = THREAD_WAIT_CPU; 1253 atom->wake_up_time = timestamp; 1254 out_ok: 1255 err = 0; 1256 out_put: 1257 thread__put(wakee); 1258 return err; 1259 } 1260 1261 static int latency_migrate_task_event(struct perf_sched *sched, 1262 struct perf_evsel *evsel, 1263 struct perf_sample *sample, 1264 struct machine *machine) 1265 { 1266 const u32 pid = perf_evsel__intval(evsel, sample, "pid"); 1267 u64 timestamp = sample->time; 1268 struct work_atoms *atoms; 1269 struct work_atom *atom; 1270 struct thread *migrant; 1271 int err = -1; 1272 1273 /* 1274 * Only need to worry about migration when profiling one CPU. 1275 */ 1276 if (sched->profile_cpu == -1) 1277 return 0; 1278 1279 migrant = machine__findnew_thread(machine, -1, pid); 1280 if (migrant == NULL) 1281 return -1; 1282 atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid); 1283 if (!atoms) { 1284 if (thread_atoms_insert(sched, migrant)) 1285 goto out_put; 1286 register_pid(sched, migrant->tid, thread__comm_str(migrant)); 1287 atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid); 1288 if (!atoms) { 1289 pr_err("migration-event: Internal tree error"); 1290 goto out_put; 1291 } 1292 if (add_sched_out_event(atoms, 'R', timestamp)) 1293 goto out_put; 1294 } 1295 1296 BUG_ON(list_empty(&atoms->work_list)); 1297 1298 atom = list_entry(atoms->work_list.prev, struct work_atom, list); 1299 atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp; 1300 1301 sched->nr_timestamps++; 1302 1303 if (atom->sched_out_time > timestamp) 1304 sched->nr_unordered_timestamps++; 1305 err = 0; 1306 out_put: 1307 thread__put(migrant); 1308 return err; 1309 } 1310 1311 static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_list) 1312 { 1313 int i; 1314 int ret; 1315 u64 avg; 1316 char max_lat_at[32]; 1317 1318 if (!work_list->nb_atoms) 1319 return; 1320 /* 1321 * Ignore idle threads: 1322 */ 1323 if (!strcmp(thread__comm_str(work_list->thread), "swapper")) 1324 return; 1325 1326 sched->all_runtime += work_list->total_runtime; 1327 sched->all_count += work_list->nb_atoms; 1328 1329 if (work_list->num_merged > 1) 1330 ret = printf(" %s:(%d) ", thread__comm_str(work_list->thread), work_list->num_merged); 1331 else 1332 ret = printf(" %s:%d ", thread__comm_str(work_list->thread), work_list->thread->tid); 1333 1334 for (i = 0; i < 24 - ret; i++) 1335 printf(" "); 1336 1337 avg = work_list->total_lat / work_list->nb_atoms; 1338 timestamp__scnprintf_usec(work_list->max_lat_at, max_lat_at, sizeof(max_lat_at)); 1339 1340 printf("|%11.3f ms |%9" PRIu64 " | avg:%9.3f ms | max:%9.3f ms | max at: %13s s\n", 1341 (double)work_list->total_runtime / NSEC_PER_MSEC, 1342 work_list->nb_atoms, (double)avg / NSEC_PER_MSEC, 1343 (double)work_list->max_lat / NSEC_PER_MSEC, 1344 max_lat_at); 1345 } 1346 1347 static int pid_cmp(struct work_atoms *l, struct work_atoms *r) 1348 { 1349 if (l->thread == r->thread) 1350 return 0; 1351 if (l->thread->tid < r->thread->tid) 1352 return -1; 1353 if (l->thread->tid > r->thread->tid) 1354 return 1; 1355 return (int)(l->thread - r->thread); 1356 } 1357 1358 static int avg_cmp(struct work_atoms *l, struct work_atoms *r) 1359 { 1360 u64 avgl, avgr; 1361 1362 if (!l->nb_atoms) 1363 return -1; 1364 1365 if (!r->nb_atoms) 1366 return 1; 1367 1368 avgl = l->total_lat / l->nb_atoms; 1369 avgr = r->total_lat / r->nb_atoms; 1370 1371 if (avgl < avgr) 1372 return -1; 1373 if (avgl > avgr) 1374 return 1; 1375 1376 return 0; 1377 } 1378 1379 static int max_cmp(struct work_atoms *l, struct work_atoms *r) 1380 { 1381 if (l->max_lat < r->max_lat) 1382 return -1; 1383 if (l->max_lat > r->max_lat) 1384 return 1; 1385 1386 return 0; 1387 } 1388 1389 static int switch_cmp(struct work_atoms *l, struct work_atoms *r) 1390 { 1391 if (l->nb_atoms < r->nb_atoms) 1392 return -1; 1393 if (l->nb_atoms > r->nb_atoms) 1394 return 1; 1395 1396 return 0; 1397 } 1398 1399 static int runtime_cmp(struct work_atoms *l, struct work_atoms *r) 1400 { 1401 if (l->total_runtime < r->total_runtime) 1402 return -1; 1403 if (l->total_runtime > r->total_runtime) 1404 return 1; 1405 1406 return 0; 1407 } 1408 1409 static int sort_dimension__add(const char *tok, struct list_head *list) 1410 { 1411 size_t i; 1412 static struct sort_dimension avg_sort_dimension = { 1413 .name = "avg", 1414 .cmp = avg_cmp, 1415 }; 1416 static struct sort_dimension max_sort_dimension = { 1417 .name = "max", 1418 .cmp = max_cmp, 1419 }; 1420 static struct sort_dimension pid_sort_dimension = { 1421 .name = "pid", 1422 .cmp = pid_cmp, 1423 }; 1424 static struct sort_dimension runtime_sort_dimension = { 1425 .name = "runtime", 1426 .cmp = runtime_cmp, 1427 }; 1428 static struct sort_dimension switch_sort_dimension = { 1429 .name = "switch", 1430 .cmp = switch_cmp, 1431 }; 1432 struct sort_dimension *available_sorts[] = { 1433 &pid_sort_dimension, 1434 &avg_sort_dimension, 1435 &max_sort_dimension, 1436 &switch_sort_dimension, 1437 &runtime_sort_dimension, 1438 }; 1439 1440 for (i = 0; i < ARRAY_SIZE(available_sorts); i++) { 1441 if (!strcmp(available_sorts[i]->name, tok)) { 1442 list_add_tail(&available_sorts[i]->list, list); 1443 1444 return 0; 1445 } 1446 } 1447 1448 return -1; 1449 } 1450 1451 static void perf_sched__sort_lat(struct perf_sched *sched) 1452 { 1453 struct rb_node *node; 1454 struct rb_root_cached *root = &sched->atom_root; 1455 again: 1456 for (;;) { 1457 struct work_atoms *data; 1458 node = rb_first_cached(root); 1459 if (!node) 1460 break; 1461 1462 rb_erase_cached(node, root); 1463 data = rb_entry(node, struct work_atoms, node); 1464 __thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list); 1465 } 1466 if (root == &sched->atom_root) { 1467 root = &sched->merged_atom_root; 1468 goto again; 1469 } 1470 } 1471 1472 static int process_sched_wakeup_event(struct perf_tool *tool, 1473 struct perf_evsel *evsel, 1474 struct perf_sample *sample, 1475 struct machine *machine) 1476 { 1477 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); 1478 1479 if (sched->tp_handler->wakeup_event) 1480 return sched->tp_handler->wakeup_event(sched, evsel, sample, machine); 1481 1482 return 0; 1483 } 1484 1485 union map_priv { 1486 void *ptr; 1487 bool color; 1488 }; 1489 1490 static bool thread__has_color(struct thread *thread) 1491 { 1492 union map_priv priv = { 1493 .ptr = thread__priv(thread), 1494 }; 1495 1496 return priv.color; 1497 } 1498 1499 static struct thread* 1500 map__findnew_thread(struct perf_sched *sched, struct machine *machine, pid_t pid, pid_t tid) 1501 { 1502 struct thread *thread = machine__findnew_thread(machine, pid, tid); 1503 union map_priv priv = { 1504 .color = false, 1505 }; 1506 1507 if (!sched->map.color_pids || !thread || thread__priv(thread)) 1508 return thread; 1509 1510 if (thread_map__has(sched->map.color_pids, tid)) 1511 priv.color = true; 1512 1513 thread__set_priv(thread, priv.ptr); 1514 return thread; 1515 } 1516 1517 static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel, 1518 struct perf_sample *sample, struct machine *machine) 1519 { 1520 const u32 next_pid = perf_evsel__intval(evsel, sample, "next_pid"); 1521 struct thread *sched_in; 1522 struct thread_runtime *tr; 1523 int new_shortname; 1524 u64 timestamp0, timestamp = sample->time; 1525 s64 delta; 1526 int i, this_cpu = sample->cpu; 1527 int cpus_nr; 1528 bool new_cpu = false; 1529 const char *color = PERF_COLOR_NORMAL; 1530 char stimestamp[32]; 1531 1532 BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0); 1533 1534 if (this_cpu > sched->max_cpu) 1535 sched->max_cpu = this_cpu; 1536 1537 if (sched->map.comp) { 1538 cpus_nr = bitmap_weight(sched->map.comp_cpus_mask, MAX_CPUS); 1539 if (!test_and_set_bit(this_cpu, sched->map.comp_cpus_mask)) { 1540 sched->map.comp_cpus[cpus_nr++] = this_cpu; 1541 new_cpu = true; 1542 } 1543 } else 1544 cpus_nr = sched->max_cpu; 1545 1546 timestamp0 = sched->cpu_last_switched[this_cpu]; 1547 sched->cpu_last_switched[this_cpu] = timestamp; 1548 if (timestamp0) 1549 delta = timestamp - timestamp0; 1550 else 1551 delta = 0; 1552 1553 if (delta < 0) { 1554 pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta); 1555 return -1; 1556 } 1557 1558 sched_in = map__findnew_thread(sched, machine, -1, next_pid); 1559 if (sched_in == NULL) 1560 return -1; 1561 1562 tr = thread__get_runtime(sched_in); 1563 if (tr == NULL) { 1564 thread__put(sched_in); 1565 return -1; 1566 } 1567 1568 sched->curr_thread[this_cpu] = thread__get(sched_in); 1569 1570 printf(" "); 1571 1572 new_shortname = 0; 1573 if (!tr->shortname[0]) { 1574 if (!strcmp(thread__comm_str(sched_in), "swapper")) { 1575 /* 1576 * Don't allocate a letter-number for swapper:0 1577 * as a shortname. Instead, we use '.' for it. 1578 */ 1579 tr->shortname[0] = '.'; 1580 tr->shortname[1] = ' '; 1581 } else { 1582 tr->shortname[0] = sched->next_shortname1; 1583 tr->shortname[1] = sched->next_shortname2; 1584 1585 if (sched->next_shortname1 < 'Z') { 1586 sched->next_shortname1++; 1587 } else { 1588 sched->next_shortname1 = 'A'; 1589 if (sched->next_shortname2 < '9') 1590 sched->next_shortname2++; 1591 else 1592 sched->next_shortname2 = '0'; 1593 } 1594 } 1595 new_shortname = 1; 1596 } 1597 1598 for (i = 0; i < cpus_nr; i++) { 1599 int cpu = sched->map.comp ? sched->map.comp_cpus[i] : i; 1600 struct thread *curr_thread = sched->curr_thread[cpu]; 1601 struct thread_runtime *curr_tr; 1602 const char *pid_color = color; 1603 const char *cpu_color = color; 1604 1605 if (curr_thread && thread__has_color(curr_thread)) 1606 pid_color = COLOR_PIDS; 1607 1608 if (sched->map.cpus && !cpu_map__has(sched->map.cpus, cpu)) 1609 continue; 1610 1611 if (sched->map.color_cpus && cpu_map__has(sched->map.color_cpus, cpu)) 1612 cpu_color = COLOR_CPUS; 1613 1614 if (cpu != this_cpu) 1615 color_fprintf(stdout, color, " "); 1616 else 1617 color_fprintf(stdout, cpu_color, "*"); 1618 1619 if (sched->curr_thread[cpu]) { 1620 curr_tr = thread__get_runtime(sched->curr_thread[cpu]); 1621 if (curr_tr == NULL) { 1622 thread__put(sched_in); 1623 return -1; 1624 } 1625 color_fprintf(stdout, pid_color, "%2s ", curr_tr->shortname); 1626 } else 1627 color_fprintf(stdout, color, " "); 1628 } 1629 1630 if (sched->map.cpus && !cpu_map__has(sched->map.cpus, this_cpu)) 1631 goto out; 1632 1633 timestamp__scnprintf_usec(timestamp, stimestamp, sizeof(stimestamp)); 1634 color_fprintf(stdout, color, " %12s secs ", stimestamp); 1635 if (new_shortname || tr->comm_changed || (verbose > 0 && sched_in->tid)) { 1636 const char *pid_color = color; 1637 1638 if (thread__has_color(sched_in)) 1639 pid_color = COLOR_PIDS; 1640 1641 color_fprintf(stdout, pid_color, "%s => %s:%d", 1642 tr->shortname, thread__comm_str(sched_in), sched_in->tid); 1643 tr->comm_changed = false; 1644 } 1645 1646 if (sched->map.comp && new_cpu) 1647 color_fprintf(stdout, color, " (CPU %d)", this_cpu); 1648 1649 out: 1650 color_fprintf(stdout, color, "\n"); 1651 1652 thread__put(sched_in); 1653 1654 return 0; 1655 } 1656 1657 static int process_sched_switch_event(struct perf_tool *tool, 1658 struct perf_evsel *evsel, 1659 struct perf_sample *sample, 1660 struct machine *machine) 1661 { 1662 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); 1663 int this_cpu = sample->cpu, err = 0; 1664 u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"), 1665 next_pid = perf_evsel__intval(evsel, sample, "next_pid"); 1666 1667 if (sched->curr_pid[this_cpu] != (u32)-1) { 1668 /* 1669 * Are we trying to switch away a PID that is 1670 * not current? 1671 */ 1672 if (sched->curr_pid[this_cpu] != prev_pid) 1673 sched->nr_context_switch_bugs++; 1674 } 1675 1676 if (sched->tp_handler->switch_event) 1677 err = sched->tp_handler->switch_event(sched, evsel, sample, machine); 1678 1679 sched->curr_pid[this_cpu] = next_pid; 1680 return err; 1681 } 1682 1683 static int process_sched_runtime_event(struct perf_tool *tool, 1684 struct perf_evsel *evsel, 1685 struct perf_sample *sample, 1686 struct machine *machine) 1687 { 1688 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); 1689 1690 if (sched->tp_handler->runtime_event) 1691 return sched->tp_handler->runtime_event(sched, evsel, sample, machine); 1692 1693 return 0; 1694 } 1695 1696 static int perf_sched__process_fork_event(struct perf_tool *tool, 1697 union perf_event *event, 1698 struct perf_sample *sample, 1699 struct machine *machine) 1700 { 1701 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); 1702 1703 /* run the fork event through the perf machineruy */ 1704 perf_event__process_fork(tool, event, sample, machine); 1705 1706 /* and then run additional processing needed for this command */ 1707 if (sched->tp_handler->fork_event) 1708 return sched->tp_handler->fork_event(sched, event, machine); 1709 1710 return 0; 1711 } 1712 1713 static int process_sched_migrate_task_event(struct perf_tool *tool, 1714 struct perf_evsel *evsel, 1715 struct perf_sample *sample, 1716 struct machine *machine) 1717 { 1718 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); 1719 1720 if (sched->tp_handler->migrate_task_event) 1721 return sched->tp_handler->migrate_task_event(sched, evsel, sample, machine); 1722 1723 return 0; 1724 } 1725 1726 typedef int (*tracepoint_handler)(struct perf_tool *tool, 1727 struct perf_evsel *evsel, 1728 struct perf_sample *sample, 1729 struct machine *machine); 1730 1731 static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_unused, 1732 union perf_event *event __maybe_unused, 1733 struct perf_sample *sample, 1734 struct perf_evsel *evsel, 1735 struct machine *machine) 1736 { 1737 int err = 0; 1738 1739 if (evsel->handler != NULL) { 1740 tracepoint_handler f = evsel->handler; 1741 err = f(tool, evsel, sample, machine); 1742 } 1743 1744 return err; 1745 } 1746 1747 static int perf_sched__process_comm(struct perf_tool *tool __maybe_unused, 1748 union perf_event *event, 1749 struct perf_sample *sample, 1750 struct machine *machine) 1751 { 1752 struct thread *thread; 1753 struct thread_runtime *tr; 1754 int err; 1755 1756 err = perf_event__process_comm(tool, event, sample, machine); 1757 if (err) 1758 return err; 1759 1760 thread = machine__find_thread(machine, sample->pid, sample->tid); 1761 if (!thread) { 1762 pr_err("Internal error: can't find thread\n"); 1763 return -1; 1764 } 1765 1766 tr = thread__get_runtime(thread); 1767 if (tr == NULL) { 1768 thread__put(thread); 1769 return -1; 1770 } 1771 1772 tr->comm_changed = true; 1773 thread__put(thread); 1774 1775 return 0; 1776 } 1777 1778 static int perf_sched__read_events(struct perf_sched *sched) 1779 { 1780 const struct perf_evsel_str_handler handlers[] = { 1781 { "sched:sched_switch", process_sched_switch_event, }, 1782 { "sched:sched_stat_runtime", process_sched_runtime_event, }, 1783 { "sched:sched_wakeup", process_sched_wakeup_event, }, 1784 { "sched:sched_wakeup_new", process_sched_wakeup_event, }, 1785 { "sched:sched_migrate_task", process_sched_migrate_task_event, }, 1786 }; 1787 struct perf_session *session; 1788 struct perf_data data = { 1789 .path = input_name, 1790 .mode = PERF_DATA_MODE_READ, 1791 .force = sched->force, 1792 }; 1793 int rc = -1; 1794 1795 session = perf_session__new(&data, false, &sched->tool); 1796 if (session == NULL) { 1797 pr_debug("No Memory for session\n"); 1798 return -1; 1799 } 1800 1801 symbol__init(&session->header.env); 1802 1803 if (perf_session__set_tracepoints_handlers(session, handlers)) 1804 goto out_delete; 1805 1806 if (perf_session__has_traces(session, "record -R")) { 1807 int err = perf_session__process_events(session); 1808 if (err) { 1809 pr_err("Failed to process events, error %d", err); 1810 goto out_delete; 1811 } 1812 1813 sched->nr_events = session->evlist->stats.nr_events[0]; 1814 sched->nr_lost_events = session->evlist->stats.total_lost; 1815 sched->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST]; 1816 } 1817 1818 rc = 0; 1819 out_delete: 1820 perf_session__delete(session); 1821 return rc; 1822 } 1823 1824 /* 1825 * scheduling times are printed as msec.usec 1826 */ 1827 static inline void print_sched_time(unsigned long long nsecs, int width) 1828 { 1829 unsigned long msecs; 1830 unsigned long usecs; 1831 1832 msecs = nsecs / NSEC_PER_MSEC; 1833 nsecs -= msecs * NSEC_PER_MSEC; 1834 usecs = nsecs / NSEC_PER_USEC; 1835 printf("%*lu.%03lu ", width, msecs, usecs); 1836 } 1837 1838 /* 1839 * returns runtime data for event, allocating memory for it the 1840 * first time it is used. 1841 */ 1842 static struct evsel_runtime *perf_evsel__get_runtime(struct perf_evsel *evsel) 1843 { 1844 struct evsel_runtime *r = evsel->priv; 1845 1846 if (r == NULL) { 1847 r = zalloc(sizeof(struct evsel_runtime)); 1848 evsel->priv = r; 1849 } 1850 1851 return r; 1852 } 1853 1854 /* 1855 * save last time event was seen per cpu 1856 */ 1857 static void perf_evsel__save_time(struct perf_evsel *evsel, 1858 u64 timestamp, u32 cpu) 1859 { 1860 struct evsel_runtime *r = perf_evsel__get_runtime(evsel); 1861 1862 if (r == NULL) 1863 return; 1864 1865 if ((cpu >= r->ncpu) || (r->last_time == NULL)) { 1866 int i, n = __roundup_pow_of_two(cpu+1); 1867 void *p = r->last_time; 1868 1869 p = realloc(r->last_time, n * sizeof(u64)); 1870 if (!p) 1871 return; 1872 1873 r->last_time = p; 1874 for (i = r->ncpu; i < n; ++i) 1875 r->last_time[i] = (u64) 0; 1876 1877 r->ncpu = n; 1878 } 1879 1880 r->last_time[cpu] = timestamp; 1881 } 1882 1883 /* returns last time this event was seen on the given cpu */ 1884 static u64 perf_evsel__get_time(struct perf_evsel *evsel, u32 cpu) 1885 { 1886 struct evsel_runtime *r = perf_evsel__get_runtime(evsel); 1887 1888 if ((r == NULL) || (r->last_time == NULL) || (cpu >= r->ncpu)) 1889 return 0; 1890 1891 return r->last_time[cpu]; 1892 } 1893 1894 static int comm_width = 30; 1895 1896 static char *timehist_get_commstr(struct thread *thread) 1897 { 1898 static char str[32]; 1899 const char *comm = thread__comm_str(thread); 1900 pid_t tid = thread->tid; 1901 pid_t pid = thread->pid_; 1902 int n; 1903 1904 if (pid == 0) 1905 n = scnprintf(str, sizeof(str), "%s", comm); 1906 1907 else if (tid != pid) 1908 n = scnprintf(str, sizeof(str), "%s[%d/%d]", comm, tid, pid); 1909 1910 else 1911 n = scnprintf(str, sizeof(str), "%s[%d]", comm, tid); 1912 1913 if (n > comm_width) 1914 comm_width = n; 1915 1916 return str; 1917 } 1918 1919 static void timehist_header(struct perf_sched *sched) 1920 { 1921 u32 ncpus = sched->max_cpu + 1; 1922 u32 i, j; 1923 1924 printf("%15s %6s ", "time", "cpu"); 1925 1926 if (sched->show_cpu_visual) { 1927 printf(" "); 1928 for (i = 0, j = 0; i < ncpus; ++i) { 1929 printf("%x", j++); 1930 if (j > 15) 1931 j = 0; 1932 } 1933 printf(" "); 1934 } 1935 1936 printf(" %-*s %9s %9s %9s", comm_width, 1937 "task name", "wait time", "sch delay", "run time"); 1938 1939 if (sched->show_state) 1940 printf(" %s", "state"); 1941 1942 printf("\n"); 1943 1944 /* 1945 * units row 1946 */ 1947 printf("%15s %-6s ", "", ""); 1948 1949 if (sched->show_cpu_visual) 1950 printf(" %*s ", ncpus, ""); 1951 1952 printf(" %-*s %9s %9s %9s", comm_width, 1953 "[tid/pid]", "(msec)", "(msec)", "(msec)"); 1954 1955 if (sched->show_state) 1956 printf(" %5s", ""); 1957 1958 printf("\n"); 1959 1960 /* 1961 * separator 1962 */ 1963 printf("%.15s %.6s ", graph_dotted_line, graph_dotted_line); 1964 1965 if (sched->show_cpu_visual) 1966 printf(" %.*s ", ncpus, graph_dotted_line); 1967 1968 printf(" %.*s %.9s %.9s %.9s", comm_width, 1969 graph_dotted_line, graph_dotted_line, graph_dotted_line, 1970 graph_dotted_line); 1971 1972 if (sched->show_state) 1973 printf(" %.5s", graph_dotted_line); 1974 1975 printf("\n"); 1976 } 1977 1978 static char task_state_char(struct thread *thread, int state) 1979 { 1980 static const char state_to_char[] = TASK_STATE_TO_CHAR_STR; 1981 unsigned bit = state ? ffs(state) : 0; 1982 1983 /* 'I' for idle */ 1984 if (thread->tid == 0) 1985 return 'I'; 1986 1987 return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?'; 1988 } 1989 1990 static void timehist_print_sample(struct perf_sched *sched, 1991 struct perf_evsel *evsel, 1992 struct perf_sample *sample, 1993 struct addr_location *al, 1994 struct thread *thread, 1995 u64 t, int state) 1996 { 1997 struct thread_runtime *tr = thread__priv(thread); 1998 const char *next_comm = perf_evsel__strval(evsel, sample, "next_comm"); 1999 const u32 next_pid = perf_evsel__intval(evsel, sample, "next_pid"); 2000 u32 max_cpus = sched->max_cpu + 1; 2001 char tstr[64]; 2002 char nstr[30]; 2003 u64 wait_time; 2004 2005 timestamp__scnprintf_usec(t, tstr, sizeof(tstr)); 2006 printf("%15s [%04d] ", tstr, sample->cpu); 2007 2008 if (sched->show_cpu_visual) { 2009 u32 i; 2010 char c; 2011 2012 printf(" "); 2013 for (i = 0; i < max_cpus; ++i) { 2014 /* flag idle times with 'i'; others are sched events */ 2015 if (i == sample->cpu) 2016 c = (thread->tid == 0) ? 'i' : 's'; 2017 else 2018 c = ' '; 2019 printf("%c", c); 2020 } 2021 printf(" "); 2022 } 2023 2024 printf(" %-*s ", comm_width, timehist_get_commstr(thread)); 2025 2026 wait_time = tr->dt_sleep + tr->dt_iowait + tr->dt_preempt; 2027 print_sched_time(wait_time, 6); 2028 2029 print_sched_time(tr->dt_delay, 6); 2030 print_sched_time(tr->dt_run, 6); 2031 2032 if (sched->show_state) 2033 printf(" %5c ", task_state_char(thread, state)); 2034 2035 if (sched->show_next) { 2036 snprintf(nstr, sizeof(nstr), "next: %s[%d]", next_comm, next_pid); 2037 printf(" %-*s", comm_width, nstr); 2038 } 2039 2040 if (sched->show_wakeups && !sched->show_next) 2041 printf(" %-*s", comm_width, ""); 2042 2043 if (thread->tid == 0) 2044 goto out; 2045 2046 if (sched->show_callchain) 2047 printf(" "); 2048 2049 sample__fprintf_sym(sample, al, 0, 2050 EVSEL__PRINT_SYM | EVSEL__PRINT_ONELINE | 2051 EVSEL__PRINT_CALLCHAIN_ARROW | 2052 EVSEL__PRINT_SKIP_IGNORED, 2053 &callchain_cursor, stdout); 2054 2055 out: 2056 printf("\n"); 2057 } 2058 2059 /* 2060 * Explanation of delta-time stats: 2061 * 2062 * t = time of current schedule out event 2063 * tprev = time of previous sched out event 2064 * also time of schedule-in event for current task 2065 * last_time = time of last sched change event for current task 2066 * (i.e, time process was last scheduled out) 2067 * ready_to_run = time of wakeup for current task 2068 * 2069 * -----|------------|------------|------------|------ 2070 * last ready tprev t 2071 * time to run 2072 * 2073 * |-------- dt_wait --------| 2074 * |- dt_delay -|-- dt_run --| 2075 * 2076 * dt_run = run time of current task 2077 * dt_wait = time between last schedule out event for task and tprev 2078 * represents time spent off the cpu 2079 * dt_delay = time between wakeup and schedule-in of task 2080 */ 2081 2082 static void timehist_update_runtime_stats(struct thread_runtime *r, 2083 u64 t, u64 tprev) 2084 { 2085 r->dt_delay = 0; 2086 r->dt_sleep = 0; 2087 r->dt_iowait = 0; 2088 r->dt_preempt = 0; 2089 r->dt_run = 0; 2090 2091 if (tprev) { 2092 r->dt_run = t - tprev; 2093 if (r->ready_to_run) { 2094 if (r->ready_to_run > tprev) 2095 pr_debug("time travel: wakeup time for task > previous sched_switch event\n"); 2096 else 2097 r->dt_delay = tprev - r->ready_to_run; 2098 } 2099 2100 if (r->last_time > tprev) 2101 pr_debug("time travel: last sched out time for task > previous sched_switch event\n"); 2102 else if (r->last_time) { 2103 u64 dt_wait = tprev - r->last_time; 2104 2105 if (r->last_state == TASK_RUNNING) 2106 r->dt_preempt = dt_wait; 2107 else if (r->last_state == TASK_UNINTERRUPTIBLE) 2108 r->dt_iowait = dt_wait; 2109 else 2110 r->dt_sleep = dt_wait; 2111 } 2112 } 2113 2114 update_stats(&r->run_stats, r->dt_run); 2115 2116 r->total_run_time += r->dt_run; 2117 r->total_delay_time += r->dt_delay; 2118 r->total_sleep_time += r->dt_sleep; 2119 r->total_iowait_time += r->dt_iowait; 2120 r->total_preempt_time += r->dt_preempt; 2121 } 2122 2123 static bool is_idle_sample(struct perf_sample *sample, 2124 struct perf_evsel *evsel) 2125 { 2126 /* pid 0 == swapper == idle task */ 2127 if (strcmp(perf_evsel__name(evsel), "sched:sched_switch") == 0) 2128 return perf_evsel__intval(evsel, sample, "prev_pid") == 0; 2129 2130 return sample->pid == 0; 2131 } 2132 2133 static void save_task_callchain(struct perf_sched *sched, 2134 struct perf_sample *sample, 2135 struct perf_evsel *evsel, 2136 struct machine *machine) 2137 { 2138 struct callchain_cursor *cursor = &callchain_cursor; 2139 struct thread *thread; 2140 2141 /* want main thread for process - has maps */ 2142 thread = machine__findnew_thread(machine, sample->pid, sample->pid); 2143 if (thread == NULL) { 2144 pr_debug("Failed to get thread for pid %d.\n", sample->pid); 2145 return; 2146 } 2147 2148 if (!sched->show_callchain || sample->callchain == NULL) 2149 return; 2150 2151 if (thread__resolve_callchain(thread, cursor, evsel, sample, 2152 NULL, NULL, sched->max_stack + 2) != 0) { 2153 if (verbose > 0) 2154 pr_err("Failed to resolve callchain. Skipping\n"); 2155 2156 return; 2157 } 2158 2159 callchain_cursor_commit(cursor); 2160 2161 while (true) { 2162 struct callchain_cursor_node *node; 2163 struct symbol *sym; 2164 2165 node = callchain_cursor_current(cursor); 2166 if (node == NULL) 2167 break; 2168 2169 sym = node->sym; 2170 if (sym) { 2171 if (!strcmp(sym->name, "schedule") || 2172 !strcmp(sym->name, "__schedule") || 2173 !strcmp(sym->name, "preempt_schedule")) 2174 sym->ignore = 1; 2175 } 2176 2177 callchain_cursor_advance(cursor); 2178 } 2179 } 2180 2181 static int init_idle_thread(struct thread *thread) 2182 { 2183 struct idle_thread_runtime *itr; 2184 2185 thread__set_comm(thread, idle_comm, 0); 2186 2187 itr = zalloc(sizeof(*itr)); 2188 if (itr == NULL) 2189 return -ENOMEM; 2190 2191 init_stats(&itr->tr.run_stats); 2192 callchain_init(&itr->callchain); 2193 callchain_cursor_reset(&itr->cursor); 2194 thread__set_priv(thread, itr); 2195 2196 return 0; 2197 } 2198 2199 /* 2200 * Track idle stats per cpu by maintaining a local thread 2201 * struct for the idle task on each cpu. 2202 */ 2203 static int init_idle_threads(int ncpu) 2204 { 2205 int i, ret; 2206 2207 idle_threads = zalloc(ncpu * sizeof(struct thread *)); 2208 if (!idle_threads) 2209 return -ENOMEM; 2210 2211 idle_max_cpu = ncpu; 2212 2213 /* allocate the actual thread struct if needed */ 2214 for (i = 0; i < ncpu; ++i) { 2215 idle_threads[i] = thread__new(0, 0); 2216 if (idle_threads[i] == NULL) 2217 return -ENOMEM; 2218 2219 ret = init_idle_thread(idle_threads[i]); 2220 if (ret < 0) 2221 return ret; 2222 } 2223 2224 return 0; 2225 } 2226 2227 static void free_idle_threads(void) 2228 { 2229 int i; 2230 2231 if (idle_threads == NULL) 2232 return; 2233 2234 for (i = 0; i < idle_max_cpu; ++i) { 2235 if ((idle_threads[i])) 2236 thread__delete(idle_threads[i]); 2237 } 2238 2239 free(idle_threads); 2240 } 2241 2242 static struct thread *get_idle_thread(int cpu) 2243 { 2244 /* 2245 * expand/allocate array of pointers to local thread 2246 * structs if needed 2247 */ 2248 if ((cpu >= idle_max_cpu) || (idle_threads == NULL)) { 2249 int i, j = __roundup_pow_of_two(cpu+1); 2250 void *p; 2251 2252 p = realloc(idle_threads, j * sizeof(struct thread *)); 2253 if (!p) 2254 return NULL; 2255 2256 idle_threads = (struct thread **) p; 2257 for (i = idle_max_cpu; i < j; ++i) 2258 idle_threads[i] = NULL; 2259 2260 idle_max_cpu = j; 2261 } 2262 2263 /* allocate a new thread struct if needed */ 2264 if (idle_threads[cpu] == NULL) { 2265 idle_threads[cpu] = thread__new(0, 0); 2266 2267 if (idle_threads[cpu]) { 2268 if (init_idle_thread(idle_threads[cpu]) < 0) 2269 return NULL; 2270 } 2271 } 2272 2273 return idle_threads[cpu]; 2274 } 2275 2276 static void save_idle_callchain(struct perf_sched *sched, 2277 struct idle_thread_runtime *itr, 2278 struct perf_sample *sample) 2279 { 2280 if (!sched->show_callchain || sample->callchain == NULL) 2281 return; 2282 2283 callchain_cursor__copy(&itr->cursor, &callchain_cursor); 2284 } 2285 2286 static struct thread *timehist_get_thread(struct perf_sched *sched, 2287 struct perf_sample *sample, 2288 struct machine *machine, 2289 struct perf_evsel *evsel) 2290 { 2291 struct thread *thread; 2292 2293 if (is_idle_sample(sample, evsel)) { 2294 thread = get_idle_thread(sample->cpu); 2295 if (thread == NULL) 2296 pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu); 2297 2298 } else { 2299 /* there were samples with tid 0 but non-zero pid */ 2300 thread = machine__findnew_thread(machine, sample->pid, 2301 sample->tid ?: sample->pid); 2302 if (thread == NULL) { 2303 pr_debug("Failed to get thread for tid %d. skipping sample.\n", 2304 sample->tid); 2305 } 2306 2307 save_task_callchain(sched, sample, evsel, machine); 2308 if (sched->idle_hist) { 2309 struct thread *idle; 2310 struct idle_thread_runtime *itr; 2311 2312 idle = get_idle_thread(sample->cpu); 2313 if (idle == NULL) { 2314 pr_err("Failed to get idle thread for cpu %d.\n", sample->cpu); 2315 return NULL; 2316 } 2317 2318 itr = thread__priv(idle); 2319 if (itr == NULL) 2320 return NULL; 2321 2322 itr->last_thread = thread; 2323 2324 /* copy task callchain when entering to idle */ 2325 if (perf_evsel__intval(evsel, sample, "next_pid") == 0) 2326 save_idle_callchain(sched, itr, sample); 2327 } 2328 } 2329 2330 return thread; 2331 } 2332 2333 static bool timehist_skip_sample(struct perf_sched *sched, 2334 struct thread *thread, 2335 struct perf_evsel *evsel, 2336 struct perf_sample *sample) 2337 { 2338 bool rc = false; 2339 2340 if (thread__is_filtered(thread)) { 2341 rc = true; 2342 sched->skipped_samples++; 2343 } 2344 2345 if (sched->idle_hist) { 2346 if (strcmp(perf_evsel__name(evsel), "sched:sched_switch")) 2347 rc = true; 2348 else if (perf_evsel__intval(evsel, sample, "prev_pid") != 0 && 2349 perf_evsel__intval(evsel, sample, "next_pid") != 0) 2350 rc = true; 2351 } 2352 2353 return rc; 2354 } 2355 2356 static void timehist_print_wakeup_event(struct perf_sched *sched, 2357 struct perf_evsel *evsel, 2358 struct perf_sample *sample, 2359 struct machine *machine, 2360 struct thread *awakened) 2361 { 2362 struct thread *thread; 2363 char tstr[64]; 2364 2365 thread = machine__findnew_thread(machine, sample->pid, sample->tid); 2366 if (thread == NULL) 2367 return; 2368 2369 /* show wakeup unless both awakee and awaker are filtered */ 2370 if (timehist_skip_sample(sched, thread, evsel, sample) && 2371 timehist_skip_sample(sched, awakened, evsel, sample)) { 2372 return; 2373 } 2374 2375 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr)); 2376 printf("%15s [%04d] ", tstr, sample->cpu); 2377 if (sched->show_cpu_visual) 2378 printf(" %*s ", sched->max_cpu + 1, ""); 2379 2380 printf(" %-*s ", comm_width, timehist_get_commstr(thread)); 2381 2382 /* dt spacer */ 2383 printf(" %9s %9s %9s ", "", "", ""); 2384 2385 printf("awakened: %s", timehist_get_commstr(awakened)); 2386 2387 printf("\n"); 2388 } 2389 2390 static int timehist_sched_wakeup_event(struct perf_tool *tool, 2391 union perf_event *event __maybe_unused, 2392 struct perf_evsel *evsel, 2393 struct perf_sample *sample, 2394 struct machine *machine) 2395 { 2396 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); 2397 struct thread *thread; 2398 struct thread_runtime *tr = NULL; 2399 /* want pid of awakened task not pid in sample */ 2400 const u32 pid = perf_evsel__intval(evsel, sample, "pid"); 2401 2402 thread = machine__findnew_thread(machine, 0, pid); 2403 if (thread == NULL) 2404 return -1; 2405 2406 tr = thread__get_runtime(thread); 2407 if (tr == NULL) 2408 return -1; 2409 2410 if (tr->ready_to_run == 0) 2411 tr->ready_to_run = sample->time; 2412 2413 /* show wakeups if requested */ 2414 if (sched->show_wakeups && 2415 !perf_time__skip_sample(&sched->ptime, sample->time)) 2416 timehist_print_wakeup_event(sched, evsel, sample, machine, thread); 2417 2418 return 0; 2419 } 2420 2421 static void timehist_print_migration_event(struct perf_sched *sched, 2422 struct perf_evsel *evsel, 2423 struct perf_sample *sample, 2424 struct machine *machine, 2425 struct thread *migrated) 2426 { 2427 struct thread *thread; 2428 char tstr[64]; 2429 u32 max_cpus = sched->max_cpu + 1; 2430 u32 ocpu, dcpu; 2431 2432 if (sched->summary_only) 2433 return; 2434 2435 max_cpus = sched->max_cpu + 1; 2436 ocpu = perf_evsel__intval(evsel, sample, "orig_cpu"); 2437 dcpu = perf_evsel__intval(evsel, sample, "dest_cpu"); 2438 2439 thread = machine__findnew_thread(machine, sample->pid, sample->tid); 2440 if (thread == NULL) 2441 return; 2442 2443 if (timehist_skip_sample(sched, thread, evsel, sample) && 2444 timehist_skip_sample(sched, migrated, evsel, sample)) { 2445 return; 2446 } 2447 2448 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr)); 2449 printf("%15s [%04d] ", tstr, sample->cpu); 2450 2451 if (sched->show_cpu_visual) { 2452 u32 i; 2453 char c; 2454 2455 printf(" "); 2456 for (i = 0; i < max_cpus; ++i) { 2457 c = (i == sample->cpu) ? 'm' : ' '; 2458 printf("%c", c); 2459 } 2460 printf(" "); 2461 } 2462 2463 printf(" %-*s ", comm_width, timehist_get_commstr(thread)); 2464 2465 /* dt spacer */ 2466 printf(" %9s %9s %9s ", "", "", ""); 2467 2468 printf("migrated: %s", timehist_get_commstr(migrated)); 2469 printf(" cpu %d => %d", ocpu, dcpu); 2470 2471 printf("\n"); 2472 } 2473 2474 static int timehist_migrate_task_event(struct perf_tool *tool, 2475 union perf_event *event __maybe_unused, 2476 struct perf_evsel *evsel, 2477 struct perf_sample *sample, 2478 struct machine *machine) 2479 { 2480 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); 2481 struct thread *thread; 2482 struct thread_runtime *tr = NULL; 2483 /* want pid of migrated task not pid in sample */ 2484 const u32 pid = perf_evsel__intval(evsel, sample, "pid"); 2485 2486 thread = machine__findnew_thread(machine, 0, pid); 2487 if (thread == NULL) 2488 return -1; 2489 2490 tr = thread__get_runtime(thread); 2491 if (tr == NULL) 2492 return -1; 2493 2494 tr->migrations++; 2495 2496 /* show migrations if requested */ 2497 timehist_print_migration_event(sched, evsel, sample, machine, thread); 2498 2499 return 0; 2500 } 2501 2502 static int timehist_sched_change_event(struct perf_tool *tool, 2503 union perf_event *event, 2504 struct perf_evsel *evsel, 2505 struct perf_sample *sample, 2506 struct machine *machine) 2507 { 2508 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); 2509 struct perf_time_interval *ptime = &sched->ptime; 2510 struct addr_location al; 2511 struct thread *thread; 2512 struct thread_runtime *tr = NULL; 2513 u64 tprev, t = sample->time; 2514 int rc = 0; 2515 int state = perf_evsel__intval(evsel, sample, "prev_state"); 2516 2517 2518 if (machine__resolve(machine, &al, sample) < 0) { 2519 pr_err("problem processing %d event. skipping it\n", 2520 event->header.type); 2521 rc = -1; 2522 goto out; 2523 } 2524 2525 thread = timehist_get_thread(sched, sample, machine, evsel); 2526 if (thread == NULL) { 2527 rc = -1; 2528 goto out; 2529 } 2530 2531 if (timehist_skip_sample(sched, thread, evsel, sample)) 2532 goto out; 2533 2534 tr = thread__get_runtime(thread); 2535 if (tr == NULL) { 2536 rc = -1; 2537 goto out; 2538 } 2539 2540 tprev = perf_evsel__get_time(evsel, sample->cpu); 2541 2542 /* 2543 * If start time given: 2544 * - sample time is under window user cares about - skip sample 2545 * - tprev is under window user cares about - reset to start of window 2546 */ 2547 if (ptime->start && ptime->start > t) 2548 goto out; 2549 2550 if (tprev && ptime->start > tprev) 2551 tprev = ptime->start; 2552 2553 /* 2554 * If end time given: 2555 * - previous sched event is out of window - we are done 2556 * - sample time is beyond window user cares about - reset it 2557 * to close out stats for time window interest 2558 */ 2559 if (ptime->end) { 2560 if (tprev > ptime->end) 2561 goto out; 2562 2563 if (t > ptime->end) 2564 t = ptime->end; 2565 } 2566 2567 if (!sched->idle_hist || thread->tid == 0) { 2568 timehist_update_runtime_stats(tr, t, tprev); 2569 2570 if (sched->idle_hist) { 2571 struct idle_thread_runtime *itr = (void *)tr; 2572 struct thread_runtime *last_tr; 2573 2574 BUG_ON(thread->tid != 0); 2575 2576 if (itr->last_thread == NULL) 2577 goto out; 2578 2579 /* add current idle time as last thread's runtime */ 2580 last_tr = thread__get_runtime(itr->last_thread); 2581 if (last_tr == NULL) 2582 goto out; 2583 2584 timehist_update_runtime_stats(last_tr, t, tprev); 2585 /* 2586 * remove delta time of last thread as it's not updated 2587 * and otherwise it will show an invalid value next 2588 * time. we only care total run time and run stat. 2589 */ 2590 last_tr->dt_run = 0; 2591 last_tr->dt_delay = 0; 2592 last_tr->dt_sleep = 0; 2593 last_tr->dt_iowait = 0; 2594 last_tr->dt_preempt = 0; 2595 2596 if (itr->cursor.nr) 2597 callchain_append(&itr->callchain, &itr->cursor, t - tprev); 2598 2599 itr->last_thread = NULL; 2600 } 2601 } 2602 2603 if (!sched->summary_only) 2604 timehist_print_sample(sched, evsel, sample, &al, thread, t, state); 2605 2606 out: 2607 if (sched->hist_time.start == 0 && t >= ptime->start) 2608 sched->hist_time.start = t; 2609 if (ptime->end == 0 || t <= ptime->end) 2610 sched->hist_time.end = t; 2611 2612 if (tr) { 2613 /* time of this sched_switch event becomes last time task seen */ 2614 tr->last_time = sample->time; 2615 2616 /* last state is used to determine where to account wait time */ 2617 tr->last_state = state; 2618 2619 /* sched out event for task so reset ready to run time */ 2620 tr->ready_to_run = 0; 2621 } 2622 2623 perf_evsel__save_time(evsel, sample->time, sample->cpu); 2624 2625 return rc; 2626 } 2627 2628 static int timehist_sched_switch_event(struct perf_tool *tool, 2629 union perf_event *event, 2630 struct perf_evsel *evsel, 2631 struct perf_sample *sample, 2632 struct machine *machine __maybe_unused) 2633 { 2634 return timehist_sched_change_event(tool, event, evsel, sample, machine); 2635 } 2636 2637 static int process_lost(struct perf_tool *tool __maybe_unused, 2638 union perf_event *event, 2639 struct perf_sample *sample, 2640 struct machine *machine __maybe_unused) 2641 { 2642 char tstr[64]; 2643 2644 timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr)); 2645 printf("%15s ", tstr); 2646 printf("lost %" PRIu64 " events on cpu %d\n", event->lost.lost, sample->cpu); 2647 2648 return 0; 2649 } 2650 2651 2652 static void print_thread_runtime(struct thread *t, 2653 struct thread_runtime *r) 2654 { 2655 double mean = avg_stats(&r->run_stats); 2656 float stddev; 2657 2658 printf("%*s %5d %9" PRIu64 " ", 2659 comm_width, timehist_get_commstr(t), t->ppid, 2660 (u64) r->run_stats.n); 2661 2662 print_sched_time(r->total_run_time, 8); 2663 stddev = rel_stddev_stats(stddev_stats(&r->run_stats), mean); 2664 print_sched_time(r->run_stats.min, 6); 2665 printf(" "); 2666 print_sched_time((u64) mean, 6); 2667 printf(" "); 2668 print_sched_time(r->run_stats.max, 6); 2669 printf(" "); 2670 printf("%5.2f", stddev); 2671 printf(" %5" PRIu64, r->migrations); 2672 printf("\n"); 2673 } 2674 2675 static void print_thread_waittime(struct thread *t, 2676 struct thread_runtime *r) 2677 { 2678 printf("%*s %5d %9" PRIu64 " ", 2679 comm_width, timehist_get_commstr(t), t->ppid, 2680 (u64) r->run_stats.n); 2681 2682 print_sched_time(r->total_run_time, 8); 2683 print_sched_time(r->total_sleep_time, 6); 2684 printf(" "); 2685 print_sched_time(r->total_iowait_time, 6); 2686 printf(" "); 2687 print_sched_time(r->total_preempt_time, 6); 2688 printf(" "); 2689 print_sched_time(r->total_delay_time, 6); 2690 printf("\n"); 2691 } 2692 2693 struct total_run_stats { 2694 struct perf_sched *sched; 2695 u64 sched_count; 2696 u64 task_count; 2697 u64 total_run_time; 2698 }; 2699 2700 static int __show_thread_runtime(struct thread *t, void *priv) 2701 { 2702 struct total_run_stats *stats = priv; 2703 struct thread_runtime *r; 2704 2705 if (thread__is_filtered(t)) 2706 return 0; 2707 2708 r = thread__priv(t); 2709 if (r && r->run_stats.n) { 2710 stats->task_count++; 2711 stats->sched_count += r->run_stats.n; 2712 stats->total_run_time += r->total_run_time; 2713 2714 if (stats->sched->show_state) 2715 print_thread_waittime(t, r); 2716 else 2717 print_thread_runtime(t, r); 2718 } 2719 2720 return 0; 2721 } 2722 2723 static int show_thread_runtime(struct thread *t, void *priv) 2724 { 2725 if (t->dead) 2726 return 0; 2727 2728 return __show_thread_runtime(t, priv); 2729 } 2730 2731 static int show_deadthread_runtime(struct thread *t, void *priv) 2732 { 2733 if (!t->dead) 2734 return 0; 2735 2736 return __show_thread_runtime(t, priv); 2737 } 2738 2739 static size_t callchain__fprintf_folded(FILE *fp, struct callchain_node *node) 2740 { 2741 const char *sep = " <- "; 2742 struct callchain_list *chain; 2743 size_t ret = 0; 2744 char bf[1024]; 2745 bool first; 2746 2747 if (node == NULL) 2748 return 0; 2749 2750 ret = callchain__fprintf_folded(fp, node->parent); 2751 first = (ret == 0); 2752 2753 list_for_each_entry(chain, &node->val, list) { 2754 if (chain->ip >= PERF_CONTEXT_MAX) 2755 continue; 2756 if (chain->ms.sym && chain->ms.sym->ignore) 2757 continue; 2758 ret += fprintf(fp, "%s%s", first ? "" : sep, 2759 callchain_list__sym_name(chain, bf, sizeof(bf), 2760 false)); 2761 first = false; 2762 } 2763 2764 return ret; 2765 } 2766 2767 static size_t timehist_print_idlehist_callchain(struct rb_root_cached *root) 2768 { 2769 size_t ret = 0; 2770 FILE *fp = stdout; 2771 struct callchain_node *chain; 2772 struct rb_node *rb_node = rb_first_cached(root); 2773 2774 printf(" %16s %8s %s\n", "Idle time (msec)", "Count", "Callchains"); 2775 printf(" %.16s %.8s %.50s\n", graph_dotted_line, graph_dotted_line, 2776 graph_dotted_line); 2777 2778 while (rb_node) { 2779 chain = rb_entry(rb_node, struct callchain_node, rb_node); 2780 rb_node = rb_next(rb_node); 2781 2782 ret += fprintf(fp, " "); 2783 print_sched_time(chain->hit, 12); 2784 ret += 16; /* print_sched_time returns 2nd arg + 4 */ 2785 ret += fprintf(fp, " %8d ", chain->count); 2786 ret += callchain__fprintf_folded(fp, chain); 2787 ret += fprintf(fp, "\n"); 2788 } 2789 2790 return ret; 2791 } 2792 2793 static void timehist_print_summary(struct perf_sched *sched, 2794 struct perf_session *session) 2795 { 2796 struct machine *m = &session->machines.host; 2797 struct total_run_stats totals; 2798 u64 task_count; 2799 struct thread *t; 2800 struct thread_runtime *r; 2801 int i; 2802 u64 hist_time = sched->hist_time.end - sched->hist_time.start; 2803 2804 memset(&totals, 0, sizeof(totals)); 2805 totals.sched = sched; 2806 2807 if (sched->idle_hist) { 2808 printf("\nIdle-time summary\n"); 2809 printf("%*s parent sched-out ", comm_width, "comm"); 2810 printf(" idle-time min-idle avg-idle max-idle stddev migrations\n"); 2811 } else if (sched->show_state) { 2812 printf("\nWait-time summary\n"); 2813 printf("%*s parent sched-in ", comm_width, "comm"); 2814 printf(" run-time sleep iowait preempt delay\n"); 2815 } else { 2816 printf("\nRuntime summary\n"); 2817 printf("%*s parent sched-in ", comm_width, "comm"); 2818 printf(" run-time min-run avg-run max-run stddev migrations\n"); 2819 } 2820 printf("%*s (count) ", comm_width, ""); 2821 printf(" (msec) (msec) (msec) (msec) %s\n", 2822 sched->show_state ? "(msec)" : "%"); 2823 printf("%.117s\n", graph_dotted_line); 2824 2825 machine__for_each_thread(m, show_thread_runtime, &totals); 2826 task_count = totals.task_count; 2827 if (!task_count) 2828 printf("<no still running tasks>\n"); 2829 2830 printf("\nTerminated tasks:\n"); 2831 machine__for_each_thread(m, show_deadthread_runtime, &totals); 2832 if (task_count == totals.task_count) 2833 printf("<no terminated tasks>\n"); 2834 2835 /* CPU idle stats not tracked when samples were skipped */ 2836 if (sched->skipped_samples && !sched->idle_hist) 2837 return; 2838 2839 printf("\nIdle stats:\n"); 2840 for (i = 0; i < idle_max_cpu; ++i) { 2841 t = idle_threads[i]; 2842 if (!t) 2843 continue; 2844 2845 r = thread__priv(t); 2846 if (r && r->run_stats.n) { 2847 totals.sched_count += r->run_stats.n; 2848 printf(" CPU %2d idle for ", i); 2849 print_sched_time(r->total_run_time, 6); 2850 printf(" msec (%6.2f%%)\n", 100.0 * r->total_run_time / hist_time); 2851 } else 2852 printf(" CPU %2d idle entire time window\n", i); 2853 } 2854 2855 if (sched->idle_hist && sched->show_callchain) { 2856 callchain_param.mode = CHAIN_FOLDED; 2857 callchain_param.value = CCVAL_PERIOD; 2858 2859 callchain_register_param(&callchain_param); 2860 2861 printf("\nIdle stats by callchain:\n"); 2862 for (i = 0; i < idle_max_cpu; ++i) { 2863 struct idle_thread_runtime *itr; 2864 2865 t = idle_threads[i]; 2866 if (!t) 2867 continue; 2868 2869 itr = thread__priv(t); 2870 if (itr == NULL) 2871 continue; 2872 2873 callchain_param.sort(&itr->sorted_root.rb_root, &itr->callchain, 2874 0, &callchain_param); 2875 2876 printf(" CPU %2d:", i); 2877 print_sched_time(itr->tr.total_run_time, 6); 2878 printf(" msec\n"); 2879 timehist_print_idlehist_callchain(&itr->sorted_root); 2880 printf("\n"); 2881 } 2882 } 2883 2884 printf("\n" 2885 " Total number of unique tasks: %" PRIu64 "\n" 2886 "Total number of context switches: %" PRIu64 "\n", 2887 totals.task_count, totals.sched_count); 2888 2889 printf(" Total run time (msec): "); 2890 print_sched_time(totals.total_run_time, 2); 2891 printf("\n"); 2892 2893 printf(" Total scheduling time (msec): "); 2894 print_sched_time(hist_time, 2); 2895 printf(" (x %d)\n", sched->max_cpu); 2896 } 2897 2898 typedef int (*sched_handler)(struct perf_tool *tool, 2899 union perf_event *event, 2900 struct perf_evsel *evsel, 2901 struct perf_sample *sample, 2902 struct machine *machine); 2903 2904 static int perf_timehist__process_sample(struct perf_tool *tool, 2905 union perf_event *event, 2906 struct perf_sample *sample, 2907 struct perf_evsel *evsel, 2908 struct machine *machine) 2909 { 2910 struct perf_sched *sched = container_of(tool, struct perf_sched, tool); 2911 int err = 0; 2912 int this_cpu = sample->cpu; 2913 2914 if (this_cpu > sched->max_cpu) 2915 sched->max_cpu = this_cpu; 2916 2917 if (evsel->handler != NULL) { 2918 sched_handler f = evsel->handler; 2919 2920 err = f(tool, event, evsel, sample, machine); 2921 } 2922 2923 return err; 2924 } 2925 2926 static int timehist_check_attr(struct perf_sched *sched, 2927 struct perf_evlist *evlist) 2928 { 2929 struct perf_evsel *evsel; 2930 struct evsel_runtime *er; 2931 2932 list_for_each_entry(evsel, &evlist->entries, node) { 2933 er = perf_evsel__get_runtime(evsel); 2934 if (er == NULL) { 2935 pr_err("Failed to allocate memory for evsel runtime data\n"); 2936 return -1; 2937 } 2938 2939 if (sched->show_callchain && !evsel__has_callchain(evsel)) { 2940 pr_info("Samples do not have callchains.\n"); 2941 sched->show_callchain = 0; 2942 symbol_conf.use_callchain = 0; 2943 } 2944 } 2945 2946 return 0; 2947 } 2948 2949 static int perf_sched__timehist(struct perf_sched *sched) 2950 { 2951 const struct perf_evsel_str_handler handlers[] = { 2952 { "sched:sched_switch", timehist_sched_switch_event, }, 2953 { "sched:sched_wakeup", timehist_sched_wakeup_event, }, 2954 { "sched:sched_wakeup_new", timehist_sched_wakeup_event, }, 2955 }; 2956 const struct perf_evsel_str_handler migrate_handlers[] = { 2957 { "sched:sched_migrate_task", timehist_migrate_task_event, }, 2958 }; 2959 struct perf_data data = { 2960 .path = input_name, 2961 .mode = PERF_DATA_MODE_READ, 2962 .force = sched->force, 2963 }; 2964 2965 struct perf_session *session; 2966 struct perf_evlist *evlist; 2967 int err = -1; 2968 2969 /* 2970 * event handlers for timehist option 2971 */ 2972 sched->tool.sample = perf_timehist__process_sample; 2973 sched->tool.mmap = perf_event__process_mmap; 2974 sched->tool.comm = perf_event__process_comm; 2975 sched->tool.exit = perf_event__process_exit; 2976 sched->tool.fork = perf_event__process_fork; 2977 sched->tool.lost = process_lost; 2978 sched->tool.attr = perf_event__process_attr; 2979 sched->tool.tracing_data = perf_event__process_tracing_data; 2980 sched->tool.build_id = perf_event__process_build_id; 2981 2982 sched->tool.ordered_events = true; 2983 sched->tool.ordering_requires_timestamps = true; 2984 2985 symbol_conf.use_callchain = sched->show_callchain; 2986 2987 session = perf_session__new(&data, false, &sched->tool); 2988 if (session == NULL) 2989 return -ENOMEM; 2990 2991 evlist = session->evlist; 2992 2993 symbol__init(&session->header.env); 2994 2995 if (perf_time__parse_str(&sched->ptime, sched->time_str) != 0) { 2996 pr_err("Invalid time string\n"); 2997 return -EINVAL; 2998 } 2999 3000 if (timehist_check_attr(sched, evlist) != 0) 3001 goto out; 3002 3003 setup_pager(); 3004 3005 /* setup per-evsel handlers */ 3006 if (perf_session__set_tracepoints_handlers(session, handlers)) 3007 goto out; 3008 3009 /* sched_switch event at a minimum needs to exist */ 3010 if (!perf_evlist__find_tracepoint_by_name(session->evlist, 3011 "sched:sched_switch")) { 3012 pr_err("No sched_switch events found. Have you run 'perf sched record'?\n"); 3013 goto out; 3014 } 3015 3016 if (sched->show_migrations && 3017 perf_session__set_tracepoints_handlers(session, migrate_handlers)) 3018 goto out; 3019 3020 /* pre-allocate struct for per-CPU idle stats */ 3021 sched->max_cpu = session->header.env.nr_cpus_online; 3022 if (sched->max_cpu == 0) 3023 sched->max_cpu = 4; 3024 if (init_idle_threads(sched->max_cpu)) 3025 goto out; 3026 3027 /* summary_only implies summary option, but don't overwrite summary if set */ 3028 if (sched->summary_only) 3029 sched->summary = sched->summary_only; 3030 3031 if (!sched->summary_only) 3032 timehist_header(sched); 3033 3034 err = perf_session__process_events(session); 3035 if (err) { 3036 pr_err("Failed to process events, error %d", err); 3037 goto out; 3038 } 3039 3040 sched->nr_events = evlist->stats.nr_events[0]; 3041 sched->nr_lost_events = evlist->stats.total_lost; 3042 sched->nr_lost_chunks = evlist->stats.nr_events[PERF_RECORD_LOST]; 3043 3044 if (sched->summary) 3045 timehist_print_summary(sched, session); 3046 3047 out: 3048 free_idle_threads(); 3049 perf_session__delete(session); 3050 3051 return err; 3052 } 3053 3054 3055 static void print_bad_events(struct perf_sched *sched) 3056 { 3057 if (sched->nr_unordered_timestamps && sched->nr_timestamps) { 3058 printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n", 3059 (double)sched->nr_unordered_timestamps/(double)sched->nr_timestamps*100.0, 3060 sched->nr_unordered_timestamps, sched->nr_timestamps); 3061 } 3062 if (sched->nr_lost_events && sched->nr_events) { 3063 printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n", 3064 (double)sched->nr_lost_events/(double)sched->nr_events * 100.0, 3065 sched->nr_lost_events, sched->nr_events, sched->nr_lost_chunks); 3066 } 3067 if (sched->nr_context_switch_bugs && sched->nr_timestamps) { 3068 printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)", 3069 (double)sched->nr_context_switch_bugs/(double)sched->nr_timestamps*100.0, 3070 sched->nr_context_switch_bugs, sched->nr_timestamps); 3071 if (sched->nr_lost_events) 3072 printf(" (due to lost events?)"); 3073 printf("\n"); 3074 } 3075 } 3076 3077 static void __merge_work_atoms(struct rb_root_cached *root, struct work_atoms *data) 3078 { 3079 struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL; 3080 struct work_atoms *this; 3081 const char *comm = thread__comm_str(data->thread), *this_comm; 3082 bool leftmost = true; 3083 3084 while (*new) { 3085 int cmp; 3086 3087 this = container_of(*new, struct work_atoms, node); 3088 parent = *new; 3089 3090 this_comm = thread__comm_str(this->thread); 3091 cmp = strcmp(comm, this_comm); 3092 if (cmp > 0) { 3093 new = &((*new)->rb_left); 3094 } else if (cmp < 0) { 3095 new = &((*new)->rb_right); 3096 leftmost = false; 3097 } else { 3098 this->num_merged++; 3099 this->total_runtime += data->total_runtime; 3100 this->nb_atoms += data->nb_atoms; 3101 this->total_lat += data->total_lat; 3102 list_splice(&data->work_list, &this->work_list); 3103 if (this->max_lat < data->max_lat) { 3104 this->max_lat = data->max_lat; 3105 this->max_lat_at = data->max_lat_at; 3106 } 3107 zfree(&data); 3108 return; 3109 } 3110 } 3111 3112 data->num_merged++; 3113 rb_link_node(&data->node, parent, new); 3114 rb_insert_color_cached(&data->node, root, leftmost); 3115 } 3116 3117 static void perf_sched__merge_lat(struct perf_sched *sched) 3118 { 3119 struct work_atoms *data; 3120 struct rb_node *node; 3121 3122 if (sched->skip_merge) 3123 return; 3124 3125 while ((node = rb_first_cached(&sched->atom_root))) { 3126 rb_erase_cached(node, &sched->atom_root); 3127 data = rb_entry(node, struct work_atoms, node); 3128 __merge_work_atoms(&sched->merged_atom_root, data); 3129 } 3130 } 3131 3132 static int perf_sched__lat(struct perf_sched *sched) 3133 { 3134 struct rb_node *next; 3135 3136 setup_pager(); 3137 3138 if (perf_sched__read_events(sched)) 3139 return -1; 3140 3141 perf_sched__merge_lat(sched); 3142 perf_sched__sort_lat(sched); 3143 3144 printf("\n -----------------------------------------------------------------------------------------------------------------\n"); 3145 printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms | Maximum delay at |\n"); 3146 printf(" -----------------------------------------------------------------------------------------------------------------\n"); 3147 3148 next = rb_first_cached(&sched->sorted_atom_root); 3149 3150 while (next) { 3151 struct work_atoms *work_list; 3152 3153 work_list = rb_entry(next, struct work_atoms, node); 3154 output_lat_thread(sched, work_list); 3155 next = rb_next(next); 3156 thread__zput(work_list->thread); 3157 } 3158 3159 printf(" -----------------------------------------------------------------------------------------------------------------\n"); 3160 printf(" TOTAL: |%11.3f ms |%9" PRIu64 " |\n", 3161 (double)sched->all_runtime / NSEC_PER_MSEC, sched->all_count); 3162 3163 printf(" ---------------------------------------------------\n"); 3164 3165 print_bad_events(sched); 3166 printf("\n"); 3167 3168 return 0; 3169 } 3170 3171 static int setup_map_cpus(struct perf_sched *sched) 3172 { 3173 struct cpu_map *map; 3174 3175 sched->max_cpu = sysconf(_SC_NPROCESSORS_CONF); 3176 3177 if (sched->map.comp) { 3178 sched->map.comp_cpus = zalloc(sched->max_cpu * sizeof(int)); 3179 if (!sched->map.comp_cpus) 3180 return -1; 3181 } 3182 3183 if (!sched->map.cpus_str) 3184 return 0; 3185 3186 map = cpu_map__new(sched->map.cpus_str); 3187 if (!map) { 3188 pr_err("failed to get cpus map from %s\n", sched->map.cpus_str); 3189 return -1; 3190 } 3191 3192 sched->map.cpus = map; 3193 return 0; 3194 } 3195 3196 static int setup_color_pids(struct perf_sched *sched) 3197 { 3198 struct thread_map *map; 3199 3200 if (!sched->map.color_pids_str) 3201 return 0; 3202 3203 map = thread_map__new_by_tid_str(sched->map.color_pids_str); 3204 if (!map) { 3205 pr_err("failed to get thread map from %s\n", sched->map.color_pids_str); 3206 return -1; 3207 } 3208 3209 sched->map.color_pids = map; 3210 return 0; 3211 } 3212 3213 static int setup_color_cpus(struct perf_sched *sched) 3214 { 3215 struct cpu_map *map; 3216 3217 if (!sched->map.color_cpus_str) 3218 return 0; 3219 3220 map = cpu_map__new(sched->map.color_cpus_str); 3221 if (!map) { 3222 pr_err("failed to get thread map from %s\n", sched->map.color_cpus_str); 3223 return -1; 3224 } 3225 3226 sched->map.color_cpus = map; 3227 return 0; 3228 } 3229 3230 static int perf_sched__map(struct perf_sched *sched) 3231 { 3232 if (setup_map_cpus(sched)) 3233 return -1; 3234 3235 if (setup_color_pids(sched)) 3236 return -1; 3237 3238 if (setup_color_cpus(sched)) 3239 return -1; 3240 3241 setup_pager(); 3242 if (perf_sched__read_events(sched)) 3243 return -1; 3244 print_bad_events(sched); 3245 return 0; 3246 } 3247 3248 static int perf_sched__replay(struct perf_sched *sched) 3249 { 3250 unsigned long i; 3251 3252 calibrate_run_measurement_overhead(sched); 3253 calibrate_sleep_measurement_overhead(sched); 3254 3255 test_calibrations(sched); 3256 3257 if (perf_sched__read_events(sched)) 3258 return -1; 3259 3260 printf("nr_run_events: %ld\n", sched->nr_run_events); 3261 printf("nr_sleep_events: %ld\n", sched->nr_sleep_events); 3262 printf("nr_wakeup_events: %ld\n", sched->nr_wakeup_events); 3263 3264 if (sched->targetless_wakeups) 3265 printf("target-less wakeups: %ld\n", sched->targetless_wakeups); 3266 if (sched->multitarget_wakeups) 3267 printf("multi-target wakeups: %ld\n", sched->multitarget_wakeups); 3268 if (sched->nr_run_events_optimized) 3269 printf("run atoms optimized: %ld\n", 3270 sched->nr_run_events_optimized); 3271 3272 print_task_traces(sched); 3273 add_cross_task_wakeups(sched); 3274 3275 create_tasks(sched); 3276 printf("------------------------------------------------------------\n"); 3277 for (i = 0; i < sched->replay_repeat; i++) 3278 run_one_test(sched); 3279 3280 return 0; 3281 } 3282 3283 static void setup_sorting(struct perf_sched *sched, const struct option *options, 3284 const char * const usage_msg[]) 3285 { 3286 char *tmp, *tok, *str = strdup(sched->sort_order); 3287 3288 for (tok = strtok_r(str, ", ", &tmp); 3289 tok; tok = strtok_r(NULL, ", ", &tmp)) { 3290 if (sort_dimension__add(tok, &sched->sort_list) < 0) { 3291 usage_with_options_msg(usage_msg, options, 3292 "Unknown --sort key: `%s'", tok); 3293 } 3294 } 3295 3296 free(str); 3297 3298 sort_dimension__add("pid", &sched->cmp_pid); 3299 } 3300 3301 static int __cmd_record(int argc, const char **argv) 3302 { 3303 unsigned int rec_argc, i, j; 3304 const char **rec_argv; 3305 const char * const record_args[] = { 3306 "record", 3307 "-a", 3308 "-R", 3309 "-m", "1024", 3310 "-c", "1", 3311 "-e", "sched:sched_switch", 3312 "-e", "sched:sched_stat_wait", 3313 "-e", "sched:sched_stat_sleep", 3314 "-e", "sched:sched_stat_iowait", 3315 "-e", "sched:sched_stat_runtime", 3316 "-e", "sched:sched_process_fork", 3317 "-e", "sched:sched_wakeup", 3318 "-e", "sched:sched_wakeup_new", 3319 "-e", "sched:sched_migrate_task", 3320 }; 3321 3322 rec_argc = ARRAY_SIZE(record_args) + argc - 1; 3323 rec_argv = calloc(rec_argc + 1, sizeof(char *)); 3324 3325 if (rec_argv == NULL) 3326 return -ENOMEM; 3327 3328 for (i = 0; i < ARRAY_SIZE(record_args); i++) 3329 rec_argv[i] = strdup(record_args[i]); 3330 3331 for (j = 1; j < (unsigned int)argc; j++, i++) 3332 rec_argv[i] = argv[j]; 3333 3334 BUG_ON(i != rec_argc); 3335 3336 return cmd_record(i, rec_argv); 3337 } 3338 3339 int cmd_sched(int argc, const char **argv) 3340 { 3341 static const char default_sort_order[] = "avg, max, switch, runtime"; 3342 struct perf_sched sched = { 3343 .tool = { 3344 .sample = perf_sched__process_tracepoint_sample, 3345 .comm = perf_sched__process_comm, 3346 .namespaces = perf_event__process_namespaces, 3347 .lost = perf_event__process_lost, 3348 .fork = perf_sched__process_fork_event, 3349 .ordered_events = true, 3350 }, 3351 .cmp_pid = LIST_HEAD_INIT(sched.cmp_pid), 3352 .sort_list = LIST_HEAD_INIT(sched.sort_list), 3353 .start_work_mutex = PTHREAD_MUTEX_INITIALIZER, 3354 .work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER, 3355 .sort_order = default_sort_order, 3356 .replay_repeat = 10, 3357 .profile_cpu = -1, 3358 .next_shortname1 = 'A', 3359 .next_shortname2 = '0', 3360 .skip_merge = 0, 3361 .show_callchain = 1, 3362 .max_stack = 5, 3363 }; 3364 const struct option sched_options[] = { 3365 OPT_STRING('i', "input", &input_name, "file", 3366 "input file name"), 3367 OPT_INCR('v', "verbose", &verbose, 3368 "be more verbose (show symbol address, etc)"), 3369 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, 3370 "dump raw trace in ASCII"), 3371 OPT_BOOLEAN('f', "force", &sched.force, "don't complain, do it"), 3372 OPT_END() 3373 }; 3374 const struct option latency_options[] = { 3375 OPT_STRING('s', "sort", &sched.sort_order, "key[,key2...]", 3376 "sort by key(s): runtime, switch, avg, max"), 3377 OPT_INTEGER('C', "CPU", &sched.profile_cpu, 3378 "CPU to profile on"), 3379 OPT_BOOLEAN('p', "pids", &sched.skip_merge, 3380 "latency stats per pid instead of per comm"), 3381 OPT_PARENT(sched_options) 3382 }; 3383 const struct option replay_options[] = { 3384 OPT_UINTEGER('r', "repeat", &sched.replay_repeat, 3385 "repeat the workload replay N times (-1: infinite)"), 3386 OPT_PARENT(sched_options) 3387 }; 3388 const struct option map_options[] = { 3389 OPT_BOOLEAN(0, "compact", &sched.map.comp, 3390 "map output in compact mode"), 3391 OPT_STRING(0, "color-pids", &sched.map.color_pids_str, "pids", 3392 "highlight given pids in map"), 3393 OPT_STRING(0, "color-cpus", &sched.map.color_cpus_str, "cpus", 3394 "highlight given CPUs in map"), 3395 OPT_STRING(0, "cpus", &sched.map.cpus_str, "cpus", 3396 "display given CPUs in map"), 3397 OPT_PARENT(sched_options) 3398 }; 3399 const struct option timehist_options[] = { 3400 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name, 3401 "file", "vmlinux pathname"), 3402 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, 3403 "file", "kallsyms pathname"), 3404 OPT_BOOLEAN('g', "call-graph", &sched.show_callchain, 3405 "Display call chains if present (default on)"), 3406 OPT_UINTEGER(0, "max-stack", &sched.max_stack, 3407 "Maximum number of functions to display backtrace."), 3408 OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory", 3409 "Look for files with symbols relative to this directory"), 3410 OPT_BOOLEAN('s', "summary", &sched.summary_only, 3411 "Show only syscall summary with statistics"), 3412 OPT_BOOLEAN('S', "with-summary", &sched.summary, 3413 "Show all syscalls and summary with statistics"), 3414 OPT_BOOLEAN('w', "wakeups", &sched.show_wakeups, "Show wakeup events"), 3415 OPT_BOOLEAN('n', "next", &sched.show_next, "Show next task"), 3416 OPT_BOOLEAN('M', "migrations", &sched.show_migrations, "Show migration events"), 3417 OPT_BOOLEAN('V', "cpu-visual", &sched.show_cpu_visual, "Add CPU visual"), 3418 OPT_BOOLEAN('I', "idle-hist", &sched.idle_hist, "Show idle events only"), 3419 OPT_STRING(0, "time", &sched.time_str, "str", 3420 "Time span for analysis (start,stop)"), 3421 OPT_BOOLEAN(0, "state", &sched.show_state, "Show task state when sched-out"), 3422 OPT_STRING('p', "pid", &symbol_conf.pid_list_str, "pid[,pid...]", 3423 "analyze events only for given process id(s)"), 3424 OPT_STRING('t', "tid", &symbol_conf.tid_list_str, "tid[,tid...]", 3425 "analyze events only for given thread id(s)"), 3426 OPT_PARENT(sched_options) 3427 }; 3428 3429 const char * const latency_usage[] = { 3430 "perf sched latency [<options>]", 3431 NULL 3432 }; 3433 const char * const replay_usage[] = { 3434 "perf sched replay [<options>]", 3435 NULL 3436 }; 3437 const char * const map_usage[] = { 3438 "perf sched map [<options>]", 3439 NULL 3440 }; 3441 const char * const timehist_usage[] = { 3442 "perf sched timehist [<options>]", 3443 NULL 3444 }; 3445 const char *const sched_subcommands[] = { "record", "latency", "map", 3446 "replay", "script", 3447 "timehist", NULL }; 3448 const char *sched_usage[] = { 3449 NULL, 3450 NULL 3451 }; 3452 struct trace_sched_handler lat_ops = { 3453 .wakeup_event = latency_wakeup_event, 3454 .switch_event = latency_switch_event, 3455 .runtime_event = latency_runtime_event, 3456 .migrate_task_event = latency_migrate_task_event, 3457 }; 3458 struct trace_sched_handler map_ops = { 3459 .switch_event = map_switch_event, 3460 }; 3461 struct trace_sched_handler replay_ops = { 3462 .wakeup_event = replay_wakeup_event, 3463 .switch_event = replay_switch_event, 3464 .fork_event = replay_fork_event, 3465 }; 3466 unsigned int i; 3467 3468 for (i = 0; i < ARRAY_SIZE(sched.curr_pid); i++) 3469 sched.curr_pid[i] = -1; 3470 3471 argc = parse_options_subcommand(argc, argv, sched_options, sched_subcommands, 3472 sched_usage, PARSE_OPT_STOP_AT_NON_OPTION); 3473 if (!argc) 3474 usage_with_options(sched_usage, sched_options); 3475 3476 /* 3477 * Aliased to 'perf script' for now: 3478 */ 3479 if (!strcmp(argv[0], "script")) 3480 return cmd_script(argc, argv); 3481 3482 if (!strncmp(argv[0], "rec", 3)) { 3483 return __cmd_record(argc, argv); 3484 } else if (!strncmp(argv[0], "lat", 3)) { 3485 sched.tp_handler = &lat_ops; 3486 if (argc > 1) { 3487 argc = parse_options(argc, argv, latency_options, latency_usage, 0); 3488 if (argc) 3489 usage_with_options(latency_usage, latency_options); 3490 } 3491 setup_sorting(&sched, latency_options, latency_usage); 3492 return perf_sched__lat(&sched); 3493 } else if (!strcmp(argv[0], "map")) { 3494 if (argc) { 3495 argc = parse_options(argc, argv, map_options, map_usage, 0); 3496 if (argc) 3497 usage_with_options(map_usage, map_options); 3498 } 3499 sched.tp_handler = &map_ops; 3500 setup_sorting(&sched, latency_options, latency_usage); 3501 return perf_sched__map(&sched); 3502 } else if (!strncmp(argv[0], "rep", 3)) { 3503 sched.tp_handler = &replay_ops; 3504 if (argc) { 3505 argc = parse_options(argc, argv, replay_options, replay_usage, 0); 3506 if (argc) 3507 usage_with_options(replay_usage, replay_options); 3508 } 3509 return perf_sched__replay(&sched); 3510 } else if (!strcmp(argv[0], "timehist")) { 3511 if (argc) { 3512 argc = parse_options(argc, argv, timehist_options, 3513 timehist_usage, 0); 3514 if (argc) 3515 usage_with_options(timehist_usage, timehist_options); 3516 } 3517 if ((sched.show_wakeups || sched.show_next) && 3518 sched.summary_only) { 3519 pr_err(" Error: -s and -[n|w] are mutually exclusive.\n"); 3520 parse_options_usage(timehist_usage, timehist_options, "s", true); 3521 if (sched.show_wakeups) 3522 parse_options_usage(NULL, timehist_options, "w", true); 3523 if (sched.show_next) 3524 parse_options_usage(NULL, timehist_options, "n", true); 3525 return -EINVAL; 3526 } 3527 3528 return perf_sched__timehist(&sched); 3529 } else { 3530 usage_with_options(sched_usage, sched_options); 3531 } 3532 3533 return 0; 3534 } 3535