1 /* 2 * kernel/sched/debug.c 3 * 4 * Print the CFS rbtree 5 * 6 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/proc_fs.h> 14 #include <linux/sched.h> 15 #include <linux/seq_file.h> 16 #include <linux/kallsyms.h> 17 #include <linux/utsname.h> 18 19 #include "sched.h" 20 21 static DEFINE_SPINLOCK(sched_debug_lock); 22 23 /* 24 * This allows printing both to /proc/sched_debug and 25 * to the console 26 */ 27 #define SEQ_printf(m, x...) \ 28 do { \ 29 if (m) \ 30 seq_printf(m, x); \ 31 else \ 32 printk(x); \ 33 } while (0) 34 35 /* 36 * Ease the printing of nsec fields: 37 */ 38 static long long nsec_high(unsigned long long nsec) 39 { 40 if ((long long)nsec < 0) { 41 nsec = -nsec; 42 do_div(nsec, 1000000); 43 return -nsec; 44 } 45 do_div(nsec, 1000000); 46 47 return nsec; 48 } 49 50 static unsigned long nsec_low(unsigned long long nsec) 51 { 52 if ((long long)nsec < 0) 53 nsec = -nsec; 54 55 return do_div(nsec, 1000000); 56 } 57 58 #define SPLIT_NS(x) nsec_high(x), nsec_low(x) 59 60 #ifdef CONFIG_FAIR_GROUP_SCHED 61 static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg) 62 { 63 struct sched_entity *se = tg->se[cpu]; 64 65 #define P(F) \ 66 SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F) 67 #define PN(F) \ 68 SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F)) 69 70 if (!se) { 71 struct sched_avg *avg = &cpu_rq(cpu)->avg; 72 P(avg->runnable_avg_sum); 73 P(avg->runnable_avg_period); 74 return; 75 } 76 77 78 PN(se->exec_start); 79 PN(se->vruntime); 80 PN(se->sum_exec_runtime); 81 #ifdef CONFIG_SCHEDSTATS 82 PN(se->statistics.wait_start); 83 PN(se->statistics.sleep_start); 84 PN(se->statistics.block_start); 85 PN(se->statistics.sleep_max); 86 PN(se->statistics.block_max); 87 PN(se->statistics.exec_max); 88 PN(se->statistics.slice_max); 89 PN(se->statistics.wait_max); 90 PN(se->statistics.wait_sum); 91 P(se->statistics.wait_count); 92 #endif 93 P(se->load.weight); 94 #ifdef CONFIG_SMP 95 P(se->avg.runnable_avg_sum); 96 P(se->avg.runnable_avg_period); 97 P(se->avg.load_avg_contrib); 98 P(se->avg.decay_count); 99 #endif 100 #undef PN 101 #undef P 102 } 103 #endif 104 105 #ifdef CONFIG_CGROUP_SCHED 106 static char group_path[PATH_MAX]; 107 108 static char *task_group_path(struct task_group *tg) 109 { 110 if (autogroup_path(tg, group_path, PATH_MAX)) 111 return group_path; 112 113 cgroup_path(tg->css.cgroup, group_path, PATH_MAX); 114 return group_path; 115 } 116 #endif 117 118 static void 119 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) 120 { 121 if (rq->curr == p) 122 SEQ_printf(m, "R"); 123 else 124 SEQ_printf(m, " "); 125 126 SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ", 127 p->comm, task_pid_nr(p), 128 SPLIT_NS(p->se.vruntime), 129 (long long)(p->nvcsw + p->nivcsw), 130 p->prio); 131 #ifdef CONFIG_SCHEDSTATS 132 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld", 133 SPLIT_NS(p->se.vruntime), 134 SPLIT_NS(p->se.sum_exec_runtime), 135 SPLIT_NS(p->se.statistics.sum_sleep_runtime)); 136 #else 137 SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld", 138 0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L); 139 #endif 140 #ifdef CONFIG_CGROUP_SCHED 141 SEQ_printf(m, " %s", task_group_path(task_group(p))); 142 #endif 143 144 SEQ_printf(m, "\n"); 145 } 146 147 static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu) 148 { 149 struct task_struct *g, *p; 150 unsigned long flags; 151 152 SEQ_printf(m, 153 "\nrunnable tasks:\n" 154 " task PID tree-key switches prio" 155 " exec-runtime sum-exec sum-sleep\n" 156 "------------------------------------------------------" 157 "----------------------------------------------------\n"); 158 159 read_lock_irqsave(&tasklist_lock, flags); 160 161 do_each_thread(g, p) { 162 if (!p->on_rq || task_cpu(p) != rq_cpu) 163 continue; 164 165 print_task(m, rq, p); 166 } while_each_thread(g, p); 167 168 read_unlock_irqrestore(&tasklist_lock, flags); 169 } 170 171 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) 172 { 173 s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1, 174 spread, rq0_min_vruntime, spread0; 175 struct rq *rq = cpu_rq(cpu); 176 struct sched_entity *last; 177 unsigned long flags; 178 179 #ifdef CONFIG_FAIR_GROUP_SCHED 180 SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg)); 181 #else 182 SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu); 183 #endif 184 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock", 185 SPLIT_NS(cfs_rq->exec_clock)); 186 187 raw_spin_lock_irqsave(&rq->lock, flags); 188 if (cfs_rq->rb_leftmost) 189 MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime; 190 last = __pick_last_entity(cfs_rq); 191 if (last) 192 max_vruntime = last->vruntime; 193 min_vruntime = cfs_rq->min_vruntime; 194 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime; 195 raw_spin_unlock_irqrestore(&rq->lock, flags); 196 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime", 197 SPLIT_NS(MIN_vruntime)); 198 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime", 199 SPLIT_NS(min_vruntime)); 200 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime", 201 SPLIT_NS(max_vruntime)); 202 spread = max_vruntime - MIN_vruntime; 203 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread", 204 SPLIT_NS(spread)); 205 spread0 = min_vruntime - rq0_min_vruntime; 206 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0", 207 SPLIT_NS(spread0)); 208 SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over", 209 cfs_rq->nr_spread_over); 210 SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running); 211 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight); 212 #ifdef CONFIG_SMP 213 SEQ_printf(m, " .%-30s: %ld\n", "runnable_load_avg", 214 cfs_rq->runnable_load_avg); 215 SEQ_printf(m, " .%-30s: %ld\n", "blocked_load_avg", 216 cfs_rq->blocked_load_avg); 217 #ifdef CONFIG_FAIR_GROUP_SCHED 218 SEQ_printf(m, " .%-30s: %ld\n", "tg_load_contrib", 219 cfs_rq->tg_load_contrib); 220 SEQ_printf(m, " .%-30s: %d\n", "tg_runnable_contrib", 221 cfs_rq->tg_runnable_contrib); 222 SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg", 223 atomic_long_read(&cfs_rq->tg->load_avg)); 224 SEQ_printf(m, " .%-30s: %d\n", "tg->runnable_avg", 225 atomic_read(&cfs_rq->tg->runnable_avg)); 226 #endif 227 #endif 228 229 #ifdef CONFIG_FAIR_GROUP_SCHED 230 print_cfs_group_stats(m, cpu, cfs_rq->tg); 231 #endif 232 } 233 234 void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq) 235 { 236 #ifdef CONFIG_RT_GROUP_SCHED 237 SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg)); 238 #else 239 SEQ_printf(m, "\nrt_rq[%d]:\n", cpu); 240 #endif 241 242 #define P(x) \ 243 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x)) 244 #define PN(x) \ 245 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x)) 246 247 P(rt_nr_running); 248 P(rt_throttled); 249 PN(rt_time); 250 PN(rt_runtime); 251 252 #undef PN 253 #undef P 254 } 255 256 extern __read_mostly int sched_clock_running; 257 258 static void print_cpu(struct seq_file *m, int cpu) 259 { 260 struct rq *rq = cpu_rq(cpu); 261 unsigned long flags; 262 263 #ifdef CONFIG_X86 264 { 265 unsigned int freq = cpu_khz ? : 1; 266 267 SEQ_printf(m, "cpu#%d, %u.%03u MHz\n", 268 cpu, freq / 1000, (freq % 1000)); 269 } 270 #else 271 SEQ_printf(m, "cpu#%d\n", cpu); 272 #endif 273 274 #define P(x) \ 275 do { \ 276 if (sizeof(rq->x) == 4) \ 277 SEQ_printf(m, " .%-30s: %ld\n", #x, (long)(rq->x)); \ 278 else \ 279 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\ 280 } while (0) 281 282 #define PN(x) \ 283 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x)) 284 285 P(nr_running); 286 SEQ_printf(m, " .%-30s: %lu\n", "load", 287 rq->load.weight); 288 P(nr_switches); 289 P(nr_load_updates); 290 P(nr_uninterruptible); 291 PN(next_balance); 292 SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr))); 293 PN(clock); 294 P(cpu_load[0]); 295 P(cpu_load[1]); 296 P(cpu_load[2]); 297 P(cpu_load[3]); 298 P(cpu_load[4]); 299 #undef P 300 #undef PN 301 302 #ifdef CONFIG_SCHEDSTATS 303 #define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n); 304 #define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n); 305 306 P(yld_count); 307 308 P(sched_count); 309 P(sched_goidle); 310 #ifdef CONFIG_SMP 311 P64(avg_idle); 312 #endif 313 314 P(ttwu_count); 315 P(ttwu_local); 316 317 #undef P 318 #undef P64 319 #endif 320 spin_lock_irqsave(&sched_debug_lock, flags); 321 print_cfs_stats(m, cpu); 322 print_rt_stats(m, cpu); 323 324 rcu_read_lock(); 325 print_rq(m, rq, cpu); 326 rcu_read_unlock(); 327 spin_unlock_irqrestore(&sched_debug_lock, flags); 328 SEQ_printf(m, "\n"); 329 } 330 331 static const char *sched_tunable_scaling_names[] = { 332 "none", 333 "logaritmic", 334 "linear" 335 }; 336 337 static void sched_debug_header(struct seq_file *m) 338 { 339 u64 ktime, sched_clk, cpu_clk; 340 unsigned long flags; 341 342 local_irq_save(flags); 343 ktime = ktime_to_ns(ktime_get()); 344 sched_clk = sched_clock(); 345 cpu_clk = local_clock(); 346 local_irq_restore(flags); 347 348 SEQ_printf(m, "Sched Debug Version: v0.10, %s %.*s\n", 349 init_utsname()->release, 350 (int)strcspn(init_utsname()->version, " "), 351 init_utsname()->version); 352 353 #define P(x) \ 354 SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x)) 355 #define PN(x) \ 356 SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x)) 357 PN(ktime); 358 PN(sched_clk); 359 PN(cpu_clk); 360 P(jiffies); 361 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 362 P(sched_clock_stable); 363 #endif 364 #undef PN 365 #undef P 366 367 SEQ_printf(m, "\n"); 368 SEQ_printf(m, "sysctl_sched\n"); 369 370 #define P(x) \ 371 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x)) 372 #define PN(x) \ 373 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x)) 374 PN(sysctl_sched_latency); 375 PN(sysctl_sched_min_granularity); 376 PN(sysctl_sched_wakeup_granularity); 377 P(sysctl_sched_child_runs_first); 378 P(sysctl_sched_features); 379 #undef PN 380 #undef P 381 382 SEQ_printf(m, " .%-40s: %d (%s)\n", 383 "sysctl_sched_tunable_scaling", 384 sysctl_sched_tunable_scaling, 385 sched_tunable_scaling_names[sysctl_sched_tunable_scaling]); 386 SEQ_printf(m, "\n"); 387 } 388 389 static int sched_debug_show(struct seq_file *m, void *v) 390 { 391 int cpu = (unsigned long)(v - 2); 392 393 if (cpu != -1) 394 print_cpu(m, cpu); 395 else 396 sched_debug_header(m); 397 398 return 0; 399 } 400 401 void sysrq_sched_debug_show(void) 402 { 403 int cpu; 404 405 sched_debug_header(NULL); 406 for_each_online_cpu(cpu) 407 print_cpu(NULL, cpu); 408 409 } 410 411 /* 412 * This itererator needs some explanation. 413 * It returns 1 for the header position. 414 * This means 2 is cpu 0. 415 * In a hotplugged system some cpus, including cpu 0, may be missing so we have 416 * to use cpumask_* to iterate over the cpus. 417 */ 418 static void *sched_debug_start(struct seq_file *file, loff_t *offset) 419 { 420 unsigned long n = *offset; 421 422 if (n == 0) 423 return (void *) 1; 424 425 n--; 426 427 if (n > 0) 428 n = cpumask_next(n - 1, cpu_online_mask); 429 else 430 n = cpumask_first(cpu_online_mask); 431 432 *offset = n + 1; 433 434 if (n < nr_cpu_ids) 435 return (void *)(unsigned long)(n + 2); 436 return NULL; 437 } 438 439 static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset) 440 { 441 (*offset)++; 442 return sched_debug_start(file, offset); 443 } 444 445 static void sched_debug_stop(struct seq_file *file, void *data) 446 { 447 } 448 449 static const struct seq_operations sched_debug_sops = { 450 .start = sched_debug_start, 451 .next = sched_debug_next, 452 .stop = sched_debug_stop, 453 .show = sched_debug_show, 454 }; 455 456 static int sched_debug_release(struct inode *inode, struct file *file) 457 { 458 seq_release(inode, file); 459 460 return 0; 461 } 462 463 static int sched_debug_open(struct inode *inode, struct file *filp) 464 { 465 int ret = 0; 466 467 ret = seq_open(filp, &sched_debug_sops); 468 469 return ret; 470 } 471 472 static const struct file_operations sched_debug_fops = { 473 .open = sched_debug_open, 474 .read = seq_read, 475 .llseek = seq_lseek, 476 .release = sched_debug_release, 477 }; 478 479 static int __init init_sched_debug_procfs(void) 480 { 481 struct proc_dir_entry *pe; 482 483 pe = proc_create("sched_debug", 0444, NULL, &sched_debug_fops); 484 if (!pe) 485 return -ENOMEM; 486 return 0; 487 } 488 489 __initcall(init_sched_debug_procfs); 490 491 void proc_sched_show_task(struct task_struct *p, struct seq_file *m) 492 { 493 unsigned long nr_switches; 494 495 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr(p), 496 get_nr_threads(p)); 497 SEQ_printf(m, 498 "---------------------------------------------------------" 499 "----------\n"); 500 #define __P(F) \ 501 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F) 502 #define P(F) \ 503 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F) 504 #define __PN(F) \ 505 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F)) 506 #define PN(F) \ 507 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F)) 508 509 PN(se.exec_start); 510 PN(se.vruntime); 511 PN(se.sum_exec_runtime); 512 513 nr_switches = p->nvcsw + p->nivcsw; 514 515 #ifdef CONFIG_SCHEDSTATS 516 PN(se.statistics.wait_start); 517 PN(se.statistics.sleep_start); 518 PN(se.statistics.block_start); 519 PN(se.statistics.sleep_max); 520 PN(se.statistics.block_max); 521 PN(se.statistics.exec_max); 522 PN(se.statistics.slice_max); 523 PN(se.statistics.wait_max); 524 PN(se.statistics.wait_sum); 525 P(se.statistics.wait_count); 526 PN(se.statistics.iowait_sum); 527 P(se.statistics.iowait_count); 528 P(se.nr_migrations); 529 P(se.statistics.nr_migrations_cold); 530 P(se.statistics.nr_failed_migrations_affine); 531 P(se.statistics.nr_failed_migrations_running); 532 P(se.statistics.nr_failed_migrations_hot); 533 P(se.statistics.nr_forced_migrations); 534 P(se.statistics.nr_wakeups); 535 P(se.statistics.nr_wakeups_sync); 536 P(se.statistics.nr_wakeups_migrate); 537 P(se.statistics.nr_wakeups_local); 538 P(se.statistics.nr_wakeups_remote); 539 P(se.statistics.nr_wakeups_affine); 540 P(se.statistics.nr_wakeups_affine_attempts); 541 P(se.statistics.nr_wakeups_passive); 542 P(se.statistics.nr_wakeups_idle); 543 544 { 545 u64 avg_atom, avg_per_cpu; 546 547 avg_atom = p->se.sum_exec_runtime; 548 if (nr_switches) 549 do_div(avg_atom, nr_switches); 550 else 551 avg_atom = -1LL; 552 553 avg_per_cpu = p->se.sum_exec_runtime; 554 if (p->se.nr_migrations) { 555 avg_per_cpu = div64_u64(avg_per_cpu, 556 p->se.nr_migrations); 557 } else { 558 avg_per_cpu = -1LL; 559 } 560 561 __PN(avg_atom); 562 __PN(avg_per_cpu); 563 } 564 #endif 565 __P(nr_switches); 566 SEQ_printf(m, "%-45s:%21Ld\n", 567 "nr_voluntary_switches", (long long)p->nvcsw); 568 SEQ_printf(m, "%-45s:%21Ld\n", 569 "nr_involuntary_switches", (long long)p->nivcsw); 570 571 P(se.load.weight); 572 #ifdef CONFIG_SMP 573 P(se.avg.runnable_avg_sum); 574 P(se.avg.runnable_avg_period); 575 P(se.avg.load_avg_contrib); 576 P(se.avg.decay_count); 577 #endif 578 P(policy); 579 P(prio); 580 #undef PN 581 #undef __PN 582 #undef P 583 #undef __P 584 585 { 586 unsigned int this_cpu = raw_smp_processor_id(); 587 u64 t0, t1; 588 589 t0 = cpu_clock(this_cpu); 590 t1 = cpu_clock(this_cpu); 591 SEQ_printf(m, "%-45s:%21Ld\n", 592 "clock-delta", (long long)(t1-t0)); 593 } 594 } 595 596 void proc_sched_set_task(struct task_struct *p) 597 { 598 #ifdef CONFIG_SCHEDSTATS 599 memset(&p->se.statistics, 0, sizeof(p->se.statistics)); 600 #endif 601 } 602