1 /* 2 * kernel/sched/debug.c 3 * 4 * Print the CFS rbtree 5 * 6 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/proc_fs.h> 14 #include <linux/sched.h> 15 #include <linux/seq_file.h> 16 #include <linux/kallsyms.h> 17 #include <linux/utsname.h> 18 #include <linux/mempolicy.h> 19 20 #include "sched.h" 21 22 static DEFINE_SPINLOCK(sched_debug_lock); 23 24 /* 25 * This allows printing both to /proc/sched_debug and 26 * to the console 27 */ 28 #define SEQ_printf(m, x...) \ 29 do { \ 30 if (m) \ 31 seq_printf(m, x); \ 32 else \ 33 printk(x); \ 34 } while (0) 35 36 /* 37 * Ease the printing of nsec fields: 38 */ 39 static long long nsec_high(unsigned long long nsec) 40 { 41 if ((long long)nsec < 0) { 42 nsec = -nsec; 43 do_div(nsec, 1000000); 44 return -nsec; 45 } 46 do_div(nsec, 1000000); 47 48 return nsec; 49 } 50 51 static unsigned long nsec_low(unsigned long long nsec) 52 { 53 if ((long long)nsec < 0) 54 nsec = -nsec; 55 56 return do_div(nsec, 1000000); 57 } 58 59 #define SPLIT_NS(x) nsec_high(x), nsec_low(x) 60 61 #ifdef CONFIG_FAIR_GROUP_SCHED 62 static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg) 63 { 64 struct sched_entity *se = tg->se[cpu]; 65 66 #define P(F) \ 67 SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F) 68 #define PN(F) \ 69 SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F)) 70 71 if (!se) { 72 struct sched_avg *avg = &cpu_rq(cpu)->avg; 73 P(avg->runnable_avg_sum); 74 P(avg->runnable_avg_period); 75 return; 76 } 77 78 79 PN(se->exec_start); 80 PN(se->vruntime); 81 PN(se->sum_exec_runtime); 82 #ifdef CONFIG_SCHEDSTATS 83 PN(se->statistics.wait_start); 84 PN(se->statistics.sleep_start); 85 PN(se->statistics.block_start); 86 PN(se->statistics.sleep_max); 87 PN(se->statistics.block_max); 88 PN(se->statistics.exec_max); 89 PN(se->statistics.slice_max); 90 PN(se->statistics.wait_max); 91 PN(se->statistics.wait_sum); 92 P(se->statistics.wait_count); 93 #endif 94 P(se->load.weight); 95 #ifdef CONFIG_SMP 96 P(se->avg.runnable_avg_sum); 97 P(se->avg.runnable_avg_period); 98 P(se->avg.load_avg_contrib); 99 P(se->avg.decay_count); 100 #endif 101 #undef PN 102 #undef P 103 } 104 #endif 105 106 #ifdef CONFIG_CGROUP_SCHED 107 static char group_path[PATH_MAX]; 108 109 static char *task_group_path(struct task_group *tg) 110 { 111 if (autogroup_path(tg, group_path, PATH_MAX)) 112 return group_path; 113 114 return cgroup_path(tg->css.cgroup, group_path, PATH_MAX); 115 } 116 #endif 117 118 static void 119 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) 120 { 121 if (rq->curr == p) 122 SEQ_printf(m, "R"); 123 else 124 SEQ_printf(m, " "); 125 126 SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ", 127 p->comm, task_pid_nr(p), 128 SPLIT_NS(p->se.vruntime), 129 (long long)(p->nvcsw + p->nivcsw), 130 p->prio); 131 #ifdef CONFIG_SCHEDSTATS 132 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld", 133 SPLIT_NS(p->se.vruntime), 134 SPLIT_NS(p->se.sum_exec_runtime), 135 SPLIT_NS(p->se.statistics.sum_sleep_runtime)); 136 #else 137 SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld", 138 0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L); 139 #endif 140 #ifdef CONFIG_NUMA_BALANCING 141 SEQ_printf(m, " %d", task_node(p)); 142 #endif 143 #ifdef CONFIG_CGROUP_SCHED 144 SEQ_printf(m, " %s", task_group_path(task_group(p))); 145 #endif 146 147 SEQ_printf(m, "\n"); 148 } 149 150 static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu) 151 { 152 struct task_struct *g, *p; 153 unsigned long flags; 154 155 SEQ_printf(m, 156 "\nrunnable tasks:\n" 157 " task PID tree-key switches prio" 158 " exec-runtime sum-exec sum-sleep\n" 159 "------------------------------------------------------" 160 "----------------------------------------------------\n"); 161 162 read_lock_irqsave(&tasklist_lock, flags); 163 164 do_each_thread(g, p) { 165 if (task_cpu(p) != rq_cpu) 166 continue; 167 168 print_task(m, rq, p); 169 } while_each_thread(g, p); 170 171 read_unlock_irqrestore(&tasklist_lock, flags); 172 } 173 174 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) 175 { 176 s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1, 177 spread, rq0_min_vruntime, spread0; 178 struct rq *rq = cpu_rq(cpu); 179 struct sched_entity *last; 180 unsigned long flags; 181 182 #ifdef CONFIG_FAIR_GROUP_SCHED 183 SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg)); 184 #else 185 SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu); 186 #endif 187 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock", 188 SPLIT_NS(cfs_rq->exec_clock)); 189 190 raw_spin_lock_irqsave(&rq->lock, flags); 191 if (cfs_rq->rb_leftmost) 192 MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime; 193 last = __pick_last_entity(cfs_rq); 194 if (last) 195 max_vruntime = last->vruntime; 196 min_vruntime = cfs_rq->min_vruntime; 197 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime; 198 raw_spin_unlock_irqrestore(&rq->lock, flags); 199 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime", 200 SPLIT_NS(MIN_vruntime)); 201 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime", 202 SPLIT_NS(min_vruntime)); 203 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime", 204 SPLIT_NS(max_vruntime)); 205 spread = max_vruntime - MIN_vruntime; 206 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread", 207 SPLIT_NS(spread)); 208 spread0 = min_vruntime - rq0_min_vruntime; 209 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0", 210 SPLIT_NS(spread0)); 211 SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over", 212 cfs_rq->nr_spread_over); 213 SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running); 214 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight); 215 #ifdef CONFIG_SMP 216 SEQ_printf(m, " .%-30s: %ld\n", "runnable_load_avg", 217 cfs_rq->runnable_load_avg); 218 SEQ_printf(m, " .%-30s: %ld\n", "blocked_load_avg", 219 cfs_rq->blocked_load_avg); 220 #ifdef CONFIG_FAIR_GROUP_SCHED 221 SEQ_printf(m, " .%-30s: %ld\n", "tg_load_contrib", 222 cfs_rq->tg_load_contrib); 223 SEQ_printf(m, " .%-30s: %d\n", "tg_runnable_contrib", 224 cfs_rq->tg_runnable_contrib); 225 SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg", 226 atomic_long_read(&cfs_rq->tg->load_avg)); 227 SEQ_printf(m, " .%-30s: %d\n", "tg->runnable_avg", 228 atomic_read(&cfs_rq->tg->runnable_avg)); 229 #endif 230 #endif 231 #ifdef CONFIG_CFS_BANDWIDTH 232 SEQ_printf(m, " .%-30s: %d\n", "tg->cfs_bandwidth.timer_active", 233 cfs_rq->tg->cfs_bandwidth.timer_active); 234 SEQ_printf(m, " .%-30s: %d\n", "throttled", 235 cfs_rq->throttled); 236 SEQ_printf(m, " .%-30s: %d\n", "throttle_count", 237 cfs_rq->throttle_count); 238 #endif 239 240 #ifdef CONFIG_FAIR_GROUP_SCHED 241 print_cfs_group_stats(m, cpu, cfs_rq->tg); 242 #endif 243 } 244 245 void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq) 246 { 247 #ifdef CONFIG_RT_GROUP_SCHED 248 SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg)); 249 #else 250 SEQ_printf(m, "\nrt_rq[%d]:\n", cpu); 251 #endif 252 253 #define P(x) \ 254 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x)) 255 #define PN(x) \ 256 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x)) 257 258 P(rt_nr_running); 259 P(rt_throttled); 260 PN(rt_time); 261 PN(rt_runtime); 262 263 #undef PN 264 #undef P 265 } 266 267 extern __read_mostly int sched_clock_running; 268 269 static void print_cpu(struct seq_file *m, int cpu) 270 { 271 struct rq *rq = cpu_rq(cpu); 272 unsigned long flags; 273 274 #ifdef CONFIG_X86 275 { 276 unsigned int freq = cpu_khz ? : 1; 277 278 SEQ_printf(m, "cpu#%d, %u.%03u MHz\n", 279 cpu, freq / 1000, (freq % 1000)); 280 } 281 #else 282 SEQ_printf(m, "cpu#%d\n", cpu); 283 #endif 284 285 #define P(x) \ 286 do { \ 287 if (sizeof(rq->x) == 4) \ 288 SEQ_printf(m, " .%-30s: %ld\n", #x, (long)(rq->x)); \ 289 else \ 290 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\ 291 } while (0) 292 293 #define PN(x) \ 294 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x)) 295 296 P(nr_running); 297 SEQ_printf(m, " .%-30s: %lu\n", "load", 298 rq->load.weight); 299 P(nr_switches); 300 P(nr_load_updates); 301 P(nr_uninterruptible); 302 PN(next_balance); 303 SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr))); 304 PN(clock); 305 P(cpu_load[0]); 306 P(cpu_load[1]); 307 P(cpu_load[2]); 308 P(cpu_load[3]); 309 P(cpu_load[4]); 310 #undef P 311 #undef PN 312 313 #ifdef CONFIG_SCHEDSTATS 314 #define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n); 315 #define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n); 316 317 P(yld_count); 318 319 P(sched_count); 320 P(sched_goidle); 321 #ifdef CONFIG_SMP 322 P64(avg_idle); 323 P64(max_idle_balance_cost); 324 #endif 325 326 P(ttwu_count); 327 P(ttwu_local); 328 329 #undef P 330 #undef P64 331 #endif 332 spin_lock_irqsave(&sched_debug_lock, flags); 333 print_cfs_stats(m, cpu); 334 print_rt_stats(m, cpu); 335 336 rcu_read_lock(); 337 print_rq(m, rq, cpu); 338 rcu_read_unlock(); 339 spin_unlock_irqrestore(&sched_debug_lock, flags); 340 SEQ_printf(m, "\n"); 341 } 342 343 static const char *sched_tunable_scaling_names[] = { 344 "none", 345 "logaritmic", 346 "linear" 347 }; 348 349 static void sched_debug_header(struct seq_file *m) 350 { 351 u64 ktime, sched_clk, cpu_clk; 352 unsigned long flags; 353 354 local_irq_save(flags); 355 ktime = ktime_to_ns(ktime_get()); 356 sched_clk = sched_clock(); 357 cpu_clk = local_clock(); 358 local_irq_restore(flags); 359 360 SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n", 361 init_utsname()->release, 362 (int)strcspn(init_utsname()->version, " "), 363 init_utsname()->version); 364 365 #define P(x) \ 366 SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x)) 367 #define PN(x) \ 368 SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x)) 369 PN(ktime); 370 PN(sched_clk); 371 PN(cpu_clk); 372 P(jiffies); 373 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 374 P(sched_clock_stable()); 375 #endif 376 #undef PN 377 #undef P 378 379 SEQ_printf(m, "\n"); 380 SEQ_printf(m, "sysctl_sched\n"); 381 382 #define P(x) \ 383 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x)) 384 #define PN(x) \ 385 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x)) 386 PN(sysctl_sched_latency); 387 PN(sysctl_sched_min_granularity); 388 PN(sysctl_sched_wakeup_granularity); 389 P(sysctl_sched_child_runs_first); 390 P(sysctl_sched_features); 391 #undef PN 392 #undef P 393 394 SEQ_printf(m, " .%-40s: %d (%s)\n", 395 "sysctl_sched_tunable_scaling", 396 sysctl_sched_tunable_scaling, 397 sched_tunable_scaling_names[sysctl_sched_tunable_scaling]); 398 SEQ_printf(m, "\n"); 399 } 400 401 static int sched_debug_show(struct seq_file *m, void *v) 402 { 403 int cpu = (unsigned long)(v - 2); 404 405 if (cpu != -1) 406 print_cpu(m, cpu); 407 else 408 sched_debug_header(m); 409 410 return 0; 411 } 412 413 void sysrq_sched_debug_show(void) 414 { 415 int cpu; 416 417 sched_debug_header(NULL); 418 for_each_online_cpu(cpu) 419 print_cpu(NULL, cpu); 420 421 } 422 423 /* 424 * This itererator needs some explanation. 425 * It returns 1 for the header position. 426 * This means 2 is cpu 0. 427 * In a hotplugged system some cpus, including cpu 0, may be missing so we have 428 * to use cpumask_* to iterate over the cpus. 429 */ 430 static void *sched_debug_start(struct seq_file *file, loff_t *offset) 431 { 432 unsigned long n = *offset; 433 434 if (n == 0) 435 return (void *) 1; 436 437 n--; 438 439 if (n > 0) 440 n = cpumask_next(n - 1, cpu_online_mask); 441 else 442 n = cpumask_first(cpu_online_mask); 443 444 *offset = n + 1; 445 446 if (n < nr_cpu_ids) 447 return (void *)(unsigned long)(n + 2); 448 return NULL; 449 } 450 451 static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset) 452 { 453 (*offset)++; 454 return sched_debug_start(file, offset); 455 } 456 457 static void sched_debug_stop(struct seq_file *file, void *data) 458 { 459 } 460 461 static const struct seq_operations sched_debug_sops = { 462 .start = sched_debug_start, 463 .next = sched_debug_next, 464 .stop = sched_debug_stop, 465 .show = sched_debug_show, 466 }; 467 468 static int sched_debug_release(struct inode *inode, struct file *file) 469 { 470 seq_release(inode, file); 471 472 return 0; 473 } 474 475 static int sched_debug_open(struct inode *inode, struct file *filp) 476 { 477 int ret = 0; 478 479 ret = seq_open(filp, &sched_debug_sops); 480 481 return ret; 482 } 483 484 static const struct file_operations sched_debug_fops = { 485 .open = sched_debug_open, 486 .read = seq_read, 487 .llseek = seq_lseek, 488 .release = sched_debug_release, 489 }; 490 491 static int __init init_sched_debug_procfs(void) 492 { 493 struct proc_dir_entry *pe; 494 495 pe = proc_create("sched_debug", 0444, NULL, &sched_debug_fops); 496 if (!pe) 497 return -ENOMEM; 498 return 0; 499 } 500 501 __initcall(init_sched_debug_procfs); 502 503 #define __P(F) \ 504 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F) 505 #define P(F) \ 506 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F) 507 #define __PN(F) \ 508 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F)) 509 #define PN(F) \ 510 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F)) 511 512 513 static void sched_show_numa(struct task_struct *p, struct seq_file *m) 514 { 515 #ifdef CONFIG_NUMA_BALANCING 516 struct mempolicy *pol; 517 int node, i; 518 519 if (p->mm) 520 P(mm->numa_scan_seq); 521 522 task_lock(p); 523 pol = p->mempolicy; 524 if (pol && !(pol->flags & MPOL_F_MORON)) 525 pol = NULL; 526 mpol_get(pol); 527 task_unlock(p); 528 529 SEQ_printf(m, "numa_migrations, %ld\n", xchg(&p->numa_pages_migrated, 0)); 530 531 for_each_online_node(node) { 532 for (i = 0; i < 2; i++) { 533 unsigned long nr_faults = -1; 534 int cpu_current, home_node; 535 536 if (p->numa_faults_memory) 537 nr_faults = p->numa_faults_memory[2*node + i]; 538 539 cpu_current = !i ? (task_node(p) == node) : 540 (pol && node_isset(node, pol->v.nodes)); 541 542 home_node = (p->numa_preferred_nid == node); 543 544 SEQ_printf(m, "numa_faults_memory, %d, %d, %d, %d, %ld\n", 545 i, node, cpu_current, home_node, nr_faults); 546 } 547 } 548 549 mpol_put(pol); 550 #endif 551 } 552 553 void proc_sched_show_task(struct task_struct *p, struct seq_file *m) 554 { 555 unsigned long nr_switches; 556 557 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr(p), 558 get_nr_threads(p)); 559 SEQ_printf(m, 560 "---------------------------------------------------------" 561 "----------\n"); 562 #define __P(F) \ 563 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F) 564 #define P(F) \ 565 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F) 566 #define __PN(F) \ 567 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F)) 568 #define PN(F) \ 569 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F)) 570 571 PN(se.exec_start); 572 PN(se.vruntime); 573 PN(se.sum_exec_runtime); 574 575 nr_switches = p->nvcsw + p->nivcsw; 576 577 #ifdef CONFIG_SCHEDSTATS 578 PN(se.statistics.wait_start); 579 PN(se.statistics.sleep_start); 580 PN(se.statistics.block_start); 581 PN(se.statistics.sleep_max); 582 PN(se.statistics.block_max); 583 PN(se.statistics.exec_max); 584 PN(se.statistics.slice_max); 585 PN(se.statistics.wait_max); 586 PN(se.statistics.wait_sum); 587 P(se.statistics.wait_count); 588 PN(se.statistics.iowait_sum); 589 P(se.statistics.iowait_count); 590 P(se.nr_migrations); 591 P(se.statistics.nr_migrations_cold); 592 P(se.statistics.nr_failed_migrations_affine); 593 P(se.statistics.nr_failed_migrations_running); 594 P(se.statistics.nr_failed_migrations_hot); 595 P(se.statistics.nr_forced_migrations); 596 P(se.statistics.nr_wakeups); 597 P(se.statistics.nr_wakeups_sync); 598 P(se.statistics.nr_wakeups_migrate); 599 P(se.statistics.nr_wakeups_local); 600 P(se.statistics.nr_wakeups_remote); 601 P(se.statistics.nr_wakeups_affine); 602 P(se.statistics.nr_wakeups_affine_attempts); 603 P(se.statistics.nr_wakeups_passive); 604 P(se.statistics.nr_wakeups_idle); 605 606 { 607 u64 avg_atom, avg_per_cpu; 608 609 avg_atom = p->se.sum_exec_runtime; 610 if (nr_switches) 611 do_div(avg_atom, nr_switches); 612 else 613 avg_atom = -1LL; 614 615 avg_per_cpu = p->se.sum_exec_runtime; 616 if (p->se.nr_migrations) { 617 avg_per_cpu = div64_u64(avg_per_cpu, 618 p->se.nr_migrations); 619 } else { 620 avg_per_cpu = -1LL; 621 } 622 623 __PN(avg_atom); 624 __PN(avg_per_cpu); 625 } 626 #endif 627 __P(nr_switches); 628 SEQ_printf(m, "%-45s:%21Ld\n", 629 "nr_voluntary_switches", (long long)p->nvcsw); 630 SEQ_printf(m, "%-45s:%21Ld\n", 631 "nr_involuntary_switches", (long long)p->nivcsw); 632 633 P(se.load.weight); 634 #ifdef CONFIG_SMP 635 P(se.avg.runnable_avg_sum); 636 P(se.avg.runnable_avg_period); 637 P(se.avg.load_avg_contrib); 638 P(se.avg.decay_count); 639 #endif 640 P(policy); 641 P(prio); 642 #undef PN 643 #undef __PN 644 #undef P 645 #undef __P 646 647 { 648 unsigned int this_cpu = raw_smp_processor_id(); 649 u64 t0, t1; 650 651 t0 = cpu_clock(this_cpu); 652 t1 = cpu_clock(this_cpu); 653 SEQ_printf(m, "%-45s:%21Ld\n", 654 "clock-delta", (long long)(t1-t0)); 655 } 656 657 sched_show_numa(p, m); 658 } 659 660 void proc_sched_set_task(struct task_struct *p) 661 { 662 #ifdef CONFIG_SCHEDSTATS 663 memset(&p->se.statistics, 0, sizeof(p->se.statistics)); 664 #endif 665 } 666