1 /* 2 * kernel/sched/debug.c 3 * 4 * Print the CFS rbtree and other debugging details 5 * 6 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 #include "sched.h" 13 14 static DEFINE_SPINLOCK(sched_debug_lock); 15 16 /* 17 * This allows printing both to /proc/sched_debug and 18 * to the console 19 */ 20 #define SEQ_printf(m, x...) \ 21 do { \ 22 if (m) \ 23 seq_printf(m, x); \ 24 else \ 25 pr_cont(x); \ 26 } while (0) 27 28 /* 29 * Ease the printing of nsec fields: 30 */ 31 static long long nsec_high(unsigned long long nsec) 32 { 33 if ((long long)nsec < 0) { 34 nsec = -nsec; 35 do_div(nsec, 1000000); 36 return -nsec; 37 } 38 do_div(nsec, 1000000); 39 40 return nsec; 41 } 42 43 static unsigned long nsec_low(unsigned long long nsec) 44 { 45 if ((long long)nsec < 0) 46 nsec = -nsec; 47 48 return do_div(nsec, 1000000); 49 } 50 51 #define SPLIT_NS(x) nsec_high(x), nsec_low(x) 52 53 #define SCHED_FEAT(name, enabled) \ 54 #name , 55 56 static const char * const sched_feat_names[] = { 57 #include "features.h" 58 }; 59 60 #undef SCHED_FEAT 61 62 static int sched_feat_show(struct seq_file *m, void *v) 63 { 64 int i; 65 66 for (i = 0; i < __SCHED_FEAT_NR; i++) { 67 if (!(sysctl_sched_features & (1UL << i))) 68 seq_puts(m, "NO_"); 69 seq_printf(m, "%s ", sched_feat_names[i]); 70 } 71 seq_puts(m, "\n"); 72 73 return 0; 74 } 75 76 #ifdef CONFIG_JUMP_LABEL 77 78 #define jump_label_key__true STATIC_KEY_INIT_TRUE 79 #define jump_label_key__false STATIC_KEY_INIT_FALSE 80 81 #define SCHED_FEAT(name, enabled) \ 82 jump_label_key__##enabled , 83 84 struct static_key sched_feat_keys[__SCHED_FEAT_NR] = { 85 #include "features.h" 86 }; 87 88 #undef SCHED_FEAT 89 90 static void sched_feat_disable(int i) 91 { 92 static_key_disable_cpuslocked(&sched_feat_keys[i]); 93 } 94 95 static void sched_feat_enable(int i) 96 { 97 static_key_enable_cpuslocked(&sched_feat_keys[i]); 98 } 99 #else 100 static void sched_feat_disable(int i) { }; 101 static void sched_feat_enable(int i) { }; 102 #endif /* CONFIG_JUMP_LABEL */ 103 104 static int sched_feat_set(char *cmp) 105 { 106 int i; 107 int neg = 0; 108 109 if (strncmp(cmp, "NO_", 3) == 0) { 110 neg = 1; 111 cmp += 3; 112 } 113 114 i = match_string(sched_feat_names, __SCHED_FEAT_NR, cmp); 115 if (i < 0) 116 return i; 117 118 if (neg) { 119 sysctl_sched_features &= ~(1UL << i); 120 sched_feat_disable(i); 121 } else { 122 sysctl_sched_features |= (1UL << i); 123 sched_feat_enable(i); 124 } 125 126 return 0; 127 } 128 129 static ssize_t 130 sched_feat_write(struct file *filp, const char __user *ubuf, 131 size_t cnt, loff_t *ppos) 132 { 133 char buf[64]; 134 char *cmp; 135 int ret; 136 struct inode *inode; 137 138 if (cnt > 63) 139 cnt = 63; 140 141 if (copy_from_user(&buf, ubuf, cnt)) 142 return -EFAULT; 143 144 buf[cnt] = 0; 145 cmp = strstrip(buf); 146 147 /* Ensure the static_key remains in a consistent state */ 148 inode = file_inode(filp); 149 cpus_read_lock(); 150 inode_lock(inode); 151 ret = sched_feat_set(cmp); 152 inode_unlock(inode); 153 cpus_read_unlock(); 154 if (ret < 0) 155 return ret; 156 157 *ppos += cnt; 158 159 return cnt; 160 } 161 162 static int sched_feat_open(struct inode *inode, struct file *filp) 163 { 164 return single_open(filp, sched_feat_show, NULL); 165 } 166 167 static const struct file_operations sched_feat_fops = { 168 .open = sched_feat_open, 169 .write = sched_feat_write, 170 .read = seq_read, 171 .llseek = seq_lseek, 172 .release = single_release, 173 }; 174 175 __read_mostly bool sched_debug_enabled; 176 177 static __init int sched_init_debug(void) 178 { 179 debugfs_create_file("sched_features", 0644, NULL, NULL, 180 &sched_feat_fops); 181 182 debugfs_create_bool("sched_debug", 0644, NULL, 183 &sched_debug_enabled); 184 185 return 0; 186 } 187 late_initcall(sched_init_debug); 188 189 #ifdef CONFIG_SMP 190 191 #ifdef CONFIG_SYSCTL 192 193 static struct ctl_table sd_ctl_dir[] = { 194 { 195 .procname = "sched_domain", 196 .mode = 0555, 197 }, 198 {} 199 }; 200 201 static struct ctl_table sd_ctl_root[] = { 202 { 203 .procname = "kernel", 204 .mode = 0555, 205 .child = sd_ctl_dir, 206 }, 207 {} 208 }; 209 210 static struct ctl_table *sd_alloc_ctl_entry(int n) 211 { 212 struct ctl_table *entry = 213 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL); 214 215 return entry; 216 } 217 218 static void sd_free_ctl_entry(struct ctl_table **tablep) 219 { 220 struct ctl_table *entry; 221 222 /* 223 * In the intermediate directories, both the child directory and 224 * procname are dynamically allocated and could fail but the mode 225 * will always be set. In the lowest directory the names are 226 * static strings and all have proc handlers. 227 */ 228 for (entry = *tablep; entry->mode; entry++) { 229 if (entry->child) 230 sd_free_ctl_entry(&entry->child); 231 if (entry->proc_handler == NULL) 232 kfree(entry->procname); 233 } 234 235 kfree(*tablep); 236 *tablep = NULL; 237 } 238 239 static int min_load_idx = 0; 240 static int max_load_idx = CPU_LOAD_IDX_MAX-1; 241 242 static void 243 set_table_entry(struct ctl_table *entry, 244 const char *procname, void *data, int maxlen, 245 umode_t mode, proc_handler *proc_handler, 246 bool load_idx) 247 { 248 entry->procname = procname; 249 entry->data = data; 250 entry->maxlen = maxlen; 251 entry->mode = mode; 252 entry->proc_handler = proc_handler; 253 254 if (load_idx) { 255 entry->extra1 = &min_load_idx; 256 entry->extra2 = &max_load_idx; 257 } 258 } 259 260 static struct ctl_table * 261 sd_alloc_ctl_domain_table(struct sched_domain *sd) 262 { 263 struct ctl_table *table = sd_alloc_ctl_entry(14); 264 265 if (table == NULL) 266 return NULL; 267 268 set_table_entry(&table[0] , "min_interval", &sd->min_interval, sizeof(long), 0644, proc_doulongvec_minmax, false); 269 set_table_entry(&table[1] , "max_interval", &sd->max_interval, sizeof(long), 0644, proc_doulongvec_minmax, false); 270 set_table_entry(&table[2] , "busy_idx", &sd->busy_idx, sizeof(int) , 0644, proc_dointvec_minmax, true ); 271 set_table_entry(&table[3] , "idle_idx", &sd->idle_idx, sizeof(int) , 0644, proc_dointvec_minmax, true ); 272 set_table_entry(&table[4] , "newidle_idx", &sd->newidle_idx, sizeof(int) , 0644, proc_dointvec_minmax, true ); 273 set_table_entry(&table[5] , "wake_idx", &sd->wake_idx, sizeof(int) , 0644, proc_dointvec_minmax, true ); 274 set_table_entry(&table[6] , "forkexec_idx", &sd->forkexec_idx, sizeof(int) , 0644, proc_dointvec_minmax, true ); 275 set_table_entry(&table[7] , "busy_factor", &sd->busy_factor, sizeof(int) , 0644, proc_dointvec_minmax, false); 276 set_table_entry(&table[8] , "imbalance_pct", &sd->imbalance_pct, sizeof(int) , 0644, proc_dointvec_minmax, false); 277 set_table_entry(&table[9] , "cache_nice_tries", &sd->cache_nice_tries, sizeof(int) , 0644, proc_dointvec_minmax, false); 278 set_table_entry(&table[10], "flags", &sd->flags, sizeof(int) , 0644, proc_dointvec_minmax, false); 279 set_table_entry(&table[11], "max_newidle_lb_cost", &sd->max_newidle_lb_cost, sizeof(long), 0644, proc_doulongvec_minmax, false); 280 set_table_entry(&table[12], "name", sd->name, CORENAME_MAX_SIZE, 0444, proc_dostring, false); 281 /* &table[13] is terminator */ 282 283 return table; 284 } 285 286 static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu) 287 { 288 struct ctl_table *entry, *table; 289 struct sched_domain *sd; 290 int domain_num = 0, i; 291 char buf[32]; 292 293 for_each_domain(cpu, sd) 294 domain_num++; 295 entry = table = sd_alloc_ctl_entry(domain_num + 1); 296 if (table == NULL) 297 return NULL; 298 299 i = 0; 300 for_each_domain(cpu, sd) { 301 snprintf(buf, 32, "domain%d", i); 302 entry->procname = kstrdup(buf, GFP_KERNEL); 303 entry->mode = 0555; 304 entry->child = sd_alloc_ctl_domain_table(sd); 305 entry++; 306 i++; 307 } 308 return table; 309 } 310 311 static cpumask_var_t sd_sysctl_cpus; 312 static struct ctl_table_header *sd_sysctl_header; 313 314 void register_sched_domain_sysctl(void) 315 { 316 static struct ctl_table *cpu_entries; 317 static struct ctl_table **cpu_idx; 318 static bool init_done = false; 319 char buf[32]; 320 int i; 321 322 if (!cpu_entries) { 323 cpu_entries = sd_alloc_ctl_entry(num_possible_cpus() + 1); 324 if (!cpu_entries) 325 return; 326 327 WARN_ON(sd_ctl_dir[0].child); 328 sd_ctl_dir[0].child = cpu_entries; 329 } 330 331 if (!cpu_idx) { 332 struct ctl_table *e = cpu_entries; 333 334 cpu_idx = kcalloc(nr_cpu_ids, sizeof(struct ctl_table*), GFP_KERNEL); 335 if (!cpu_idx) 336 return; 337 338 /* deal with sparse possible map */ 339 for_each_possible_cpu(i) { 340 cpu_idx[i] = e; 341 e++; 342 } 343 } 344 345 if (!cpumask_available(sd_sysctl_cpus)) { 346 if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL)) 347 return; 348 } 349 350 if (!init_done) { 351 init_done = true; 352 /* init to possible to not have holes in @cpu_entries */ 353 cpumask_copy(sd_sysctl_cpus, cpu_possible_mask); 354 } 355 356 for_each_cpu(i, sd_sysctl_cpus) { 357 struct ctl_table *e = cpu_idx[i]; 358 359 if (e->child) 360 sd_free_ctl_entry(&e->child); 361 362 if (!e->procname) { 363 snprintf(buf, 32, "cpu%d", i); 364 e->procname = kstrdup(buf, GFP_KERNEL); 365 } 366 e->mode = 0555; 367 e->child = sd_alloc_ctl_cpu_table(i); 368 369 __cpumask_clear_cpu(i, sd_sysctl_cpus); 370 } 371 372 WARN_ON(sd_sysctl_header); 373 sd_sysctl_header = register_sysctl_table(sd_ctl_root); 374 } 375 376 void dirty_sched_domain_sysctl(int cpu) 377 { 378 if (cpumask_available(sd_sysctl_cpus)) 379 __cpumask_set_cpu(cpu, sd_sysctl_cpus); 380 } 381 382 /* may be called multiple times per register */ 383 void unregister_sched_domain_sysctl(void) 384 { 385 unregister_sysctl_table(sd_sysctl_header); 386 sd_sysctl_header = NULL; 387 } 388 #endif /* CONFIG_SYSCTL */ 389 #endif /* CONFIG_SMP */ 390 391 #ifdef CONFIG_FAIR_GROUP_SCHED 392 static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg) 393 { 394 struct sched_entity *se = tg->se[cpu]; 395 396 #define P(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F) 397 #define P_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)schedstat_val(F)) 398 #define PN(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F)) 399 #define PN_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(F))) 400 401 if (!se) 402 return; 403 404 PN(se->exec_start); 405 PN(se->vruntime); 406 PN(se->sum_exec_runtime); 407 408 if (schedstat_enabled()) { 409 PN_SCHEDSTAT(se->statistics.wait_start); 410 PN_SCHEDSTAT(se->statistics.sleep_start); 411 PN_SCHEDSTAT(se->statistics.block_start); 412 PN_SCHEDSTAT(se->statistics.sleep_max); 413 PN_SCHEDSTAT(se->statistics.block_max); 414 PN_SCHEDSTAT(se->statistics.exec_max); 415 PN_SCHEDSTAT(se->statistics.slice_max); 416 PN_SCHEDSTAT(se->statistics.wait_max); 417 PN_SCHEDSTAT(se->statistics.wait_sum); 418 P_SCHEDSTAT(se->statistics.wait_count); 419 } 420 421 P(se->load.weight); 422 P(se->runnable_weight); 423 #ifdef CONFIG_SMP 424 P(se->avg.load_avg); 425 P(se->avg.util_avg); 426 P(se->avg.runnable_load_avg); 427 #endif 428 429 #undef PN_SCHEDSTAT 430 #undef PN 431 #undef P_SCHEDSTAT 432 #undef P 433 } 434 #endif 435 436 #ifdef CONFIG_CGROUP_SCHED 437 static char group_path[PATH_MAX]; 438 439 static char *task_group_path(struct task_group *tg) 440 { 441 if (autogroup_path(tg, group_path, PATH_MAX)) 442 return group_path; 443 444 cgroup_path(tg->css.cgroup, group_path, PATH_MAX); 445 446 return group_path; 447 } 448 #endif 449 450 static void 451 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) 452 { 453 if (rq->curr == p) 454 SEQ_printf(m, ">R"); 455 else 456 SEQ_printf(m, " %c", task_state_to_char(p)); 457 458 SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ", 459 p->comm, task_pid_nr(p), 460 SPLIT_NS(p->se.vruntime), 461 (long long)(p->nvcsw + p->nivcsw), 462 p->prio); 463 464 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld", 465 SPLIT_NS(schedstat_val_or_zero(p->se.statistics.wait_sum)), 466 SPLIT_NS(p->se.sum_exec_runtime), 467 SPLIT_NS(schedstat_val_or_zero(p->se.statistics.sum_sleep_runtime))); 468 469 #ifdef CONFIG_NUMA_BALANCING 470 SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p)); 471 #endif 472 #ifdef CONFIG_CGROUP_SCHED 473 SEQ_printf(m, " %s", task_group_path(task_group(p))); 474 #endif 475 476 SEQ_printf(m, "\n"); 477 } 478 479 static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu) 480 { 481 struct task_struct *g, *p; 482 483 SEQ_printf(m, "\n"); 484 SEQ_printf(m, "runnable tasks:\n"); 485 SEQ_printf(m, " S task PID tree-key switches prio" 486 " wait-time sum-exec sum-sleep\n"); 487 SEQ_printf(m, "-------------------------------------------------------" 488 "----------------------------------------------------\n"); 489 490 rcu_read_lock(); 491 for_each_process_thread(g, p) { 492 if (task_cpu(p) != rq_cpu) 493 continue; 494 495 print_task(m, rq, p); 496 } 497 rcu_read_unlock(); 498 } 499 500 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) 501 { 502 s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1, 503 spread, rq0_min_vruntime, spread0; 504 struct rq *rq = cpu_rq(cpu); 505 struct sched_entity *last; 506 unsigned long flags; 507 508 #ifdef CONFIG_FAIR_GROUP_SCHED 509 SEQ_printf(m, "\n"); 510 SEQ_printf(m, "cfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg)); 511 #else 512 SEQ_printf(m, "\n"); 513 SEQ_printf(m, "cfs_rq[%d]:\n", cpu); 514 #endif 515 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock", 516 SPLIT_NS(cfs_rq->exec_clock)); 517 518 raw_spin_lock_irqsave(&rq->lock, flags); 519 if (rb_first_cached(&cfs_rq->tasks_timeline)) 520 MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime; 521 last = __pick_last_entity(cfs_rq); 522 if (last) 523 max_vruntime = last->vruntime; 524 min_vruntime = cfs_rq->min_vruntime; 525 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime; 526 raw_spin_unlock_irqrestore(&rq->lock, flags); 527 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime", 528 SPLIT_NS(MIN_vruntime)); 529 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime", 530 SPLIT_NS(min_vruntime)); 531 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime", 532 SPLIT_NS(max_vruntime)); 533 spread = max_vruntime - MIN_vruntime; 534 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread", 535 SPLIT_NS(spread)); 536 spread0 = min_vruntime - rq0_min_vruntime; 537 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0", 538 SPLIT_NS(spread0)); 539 SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over", 540 cfs_rq->nr_spread_over); 541 SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running); 542 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight); 543 #ifdef CONFIG_SMP 544 SEQ_printf(m, " .%-30s: %ld\n", "runnable_weight", cfs_rq->runnable_weight); 545 SEQ_printf(m, " .%-30s: %lu\n", "load_avg", 546 cfs_rq->avg.load_avg); 547 SEQ_printf(m, " .%-30s: %lu\n", "runnable_load_avg", 548 cfs_rq->avg.runnable_load_avg); 549 SEQ_printf(m, " .%-30s: %lu\n", "util_avg", 550 cfs_rq->avg.util_avg); 551 SEQ_printf(m, " .%-30s: %u\n", "util_est_enqueued", 552 cfs_rq->avg.util_est.enqueued); 553 SEQ_printf(m, " .%-30s: %ld\n", "removed.load_avg", 554 cfs_rq->removed.load_avg); 555 SEQ_printf(m, " .%-30s: %ld\n", "removed.util_avg", 556 cfs_rq->removed.util_avg); 557 SEQ_printf(m, " .%-30s: %ld\n", "removed.runnable_sum", 558 cfs_rq->removed.runnable_sum); 559 #ifdef CONFIG_FAIR_GROUP_SCHED 560 SEQ_printf(m, " .%-30s: %lu\n", "tg_load_avg_contrib", 561 cfs_rq->tg_load_avg_contrib); 562 SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg", 563 atomic_long_read(&cfs_rq->tg->load_avg)); 564 #endif 565 #endif 566 #ifdef CONFIG_CFS_BANDWIDTH 567 SEQ_printf(m, " .%-30s: %d\n", "throttled", 568 cfs_rq->throttled); 569 SEQ_printf(m, " .%-30s: %d\n", "throttle_count", 570 cfs_rq->throttle_count); 571 #endif 572 573 #ifdef CONFIG_FAIR_GROUP_SCHED 574 print_cfs_group_stats(m, cpu, cfs_rq->tg); 575 #endif 576 } 577 578 void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq) 579 { 580 #ifdef CONFIG_RT_GROUP_SCHED 581 SEQ_printf(m, "\n"); 582 SEQ_printf(m, "rt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg)); 583 #else 584 SEQ_printf(m, "\n"); 585 SEQ_printf(m, "rt_rq[%d]:\n", cpu); 586 #endif 587 588 #define P(x) \ 589 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x)) 590 #define PU(x) \ 591 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x)) 592 #define PN(x) \ 593 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x)) 594 595 PU(rt_nr_running); 596 #ifdef CONFIG_SMP 597 PU(rt_nr_migratory); 598 #endif 599 P(rt_throttled); 600 PN(rt_time); 601 PN(rt_runtime); 602 603 #undef PN 604 #undef PU 605 #undef P 606 } 607 608 void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq) 609 { 610 struct dl_bw *dl_bw; 611 612 SEQ_printf(m, "\n"); 613 SEQ_printf(m, "dl_rq[%d]:\n", cpu); 614 615 #define PU(x) \ 616 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x)) 617 618 PU(dl_nr_running); 619 #ifdef CONFIG_SMP 620 PU(dl_nr_migratory); 621 dl_bw = &cpu_rq(cpu)->rd->dl_bw; 622 #else 623 dl_bw = &dl_rq->dl_bw; 624 #endif 625 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw); 626 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw); 627 628 #undef PU 629 } 630 631 static void print_cpu(struct seq_file *m, int cpu) 632 { 633 struct rq *rq = cpu_rq(cpu); 634 unsigned long flags; 635 636 #ifdef CONFIG_X86 637 { 638 unsigned int freq = cpu_khz ? : 1; 639 640 SEQ_printf(m, "cpu#%d, %u.%03u MHz\n", 641 cpu, freq / 1000, (freq % 1000)); 642 } 643 #else 644 SEQ_printf(m, "cpu#%d\n", cpu); 645 #endif 646 647 #define P(x) \ 648 do { \ 649 if (sizeof(rq->x) == 4) \ 650 SEQ_printf(m, " .%-30s: %ld\n", #x, (long)(rq->x)); \ 651 else \ 652 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\ 653 } while (0) 654 655 #define PN(x) \ 656 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x)) 657 658 P(nr_running); 659 SEQ_printf(m, " .%-30s: %lu\n", "load", 660 rq->load.weight); 661 P(nr_switches); 662 P(nr_load_updates); 663 P(nr_uninterruptible); 664 PN(next_balance); 665 SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr))); 666 PN(clock); 667 PN(clock_task); 668 P(cpu_load[0]); 669 P(cpu_load[1]); 670 P(cpu_load[2]); 671 P(cpu_load[3]); 672 P(cpu_load[4]); 673 #undef P 674 #undef PN 675 676 #ifdef CONFIG_SMP 677 #define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n); 678 P64(avg_idle); 679 P64(max_idle_balance_cost); 680 #undef P64 681 #endif 682 683 #define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, schedstat_val(rq->n)); 684 if (schedstat_enabled()) { 685 P(yld_count); 686 P(sched_count); 687 P(sched_goidle); 688 P(ttwu_count); 689 P(ttwu_local); 690 } 691 #undef P 692 693 spin_lock_irqsave(&sched_debug_lock, flags); 694 print_cfs_stats(m, cpu); 695 print_rt_stats(m, cpu); 696 print_dl_stats(m, cpu); 697 698 print_rq(m, rq, cpu); 699 spin_unlock_irqrestore(&sched_debug_lock, flags); 700 SEQ_printf(m, "\n"); 701 } 702 703 static const char *sched_tunable_scaling_names[] = { 704 "none", 705 "logaritmic", 706 "linear" 707 }; 708 709 static void sched_debug_header(struct seq_file *m) 710 { 711 u64 ktime, sched_clk, cpu_clk; 712 unsigned long flags; 713 714 local_irq_save(flags); 715 ktime = ktime_to_ns(ktime_get()); 716 sched_clk = sched_clock(); 717 cpu_clk = local_clock(); 718 local_irq_restore(flags); 719 720 SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n", 721 init_utsname()->release, 722 (int)strcspn(init_utsname()->version, " "), 723 init_utsname()->version); 724 725 #define P(x) \ 726 SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x)) 727 #define PN(x) \ 728 SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x)) 729 PN(ktime); 730 PN(sched_clk); 731 PN(cpu_clk); 732 P(jiffies); 733 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 734 P(sched_clock_stable()); 735 #endif 736 #undef PN 737 #undef P 738 739 SEQ_printf(m, "\n"); 740 SEQ_printf(m, "sysctl_sched\n"); 741 742 #define P(x) \ 743 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x)) 744 #define PN(x) \ 745 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x)) 746 PN(sysctl_sched_latency); 747 PN(sysctl_sched_min_granularity); 748 PN(sysctl_sched_wakeup_granularity); 749 P(sysctl_sched_child_runs_first); 750 P(sysctl_sched_features); 751 #undef PN 752 #undef P 753 754 SEQ_printf(m, " .%-40s: %d (%s)\n", 755 "sysctl_sched_tunable_scaling", 756 sysctl_sched_tunable_scaling, 757 sched_tunable_scaling_names[sysctl_sched_tunable_scaling]); 758 SEQ_printf(m, "\n"); 759 } 760 761 static int sched_debug_show(struct seq_file *m, void *v) 762 { 763 int cpu = (unsigned long)(v - 2); 764 765 if (cpu != -1) 766 print_cpu(m, cpu); 767 else 768 sched_debug_header(m); 769 770 return 0; 771 } 772 773 void sysrq_sched_debug_show(void) 774 { 775 int cpu; 776 777 sched_debug_header(NULL); 778 for_each_online_cpu(cpu) 779 print_cpu(NULL, cpu); 780 781 } 782 783 /* 784 * This itererator needs some explanation. 785 * It returns 1 for the header position. 786 * This means 2 is CPU 0. 787 * In a hotplugged system some CPUs, including CPU 0, may be missing so we have 788 * to use cpumask_* to iterate over the CPUs. 789 */ 790 static void *sched_debug_start(struct seq_file *file, loff_t *offset) 791 { 792 unsigned long n = *offset; 793 794 if (n == 0) 795 return (void *) 1; 796 797 n--; 798 799 if (n > 0) 800 n = cpumask_next(n - 1, cpu_online_mask); 801 else 802 n = cpumask_first(cpu_online_mask); 803 804 *offset = n + 1; 805 806 if (n < nr_cpu_ids) 807 return (void *)(unsigned long)(n + 2); 808 809 return NULL; 810 } 811 812 static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset) 813 { 814 (*offset)++; 815 return sched_debug_start(file, offset); 816 } 817 818 static void sched_debug_stop(struct seq_file *file, void *data) 819 { 820 } 821 822 static const struct seq_operations sched_debug_sops = { 823 .start = sched_debug_start, 824 .next = sched_debug_next, 825 .stop = sched_debug_stop, 826 .show = sched_debug_show, 827 }; 828 829 static int __init init_sched_debug_procfs(void) 830 { 831 if (!proc_create_seq("sched_debug", 0444, NULL, &sched_debug_sops)) 832 return -ENOMEM; 833 return 0; 834 } 835 836 __initcall(init_sched_debug_procfs); 837 838 #define __P(F) SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F) 839 #define P(F) SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F) 840 #define __PN(F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F)) 841 #define PN(F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F)) 842 843 844 #ifdef CONFIG_NUMA_BALANCING 845 void print_numa_stats(struct seq_file *m, int node, unsigned long tsf, 846 unsigned long tpf, unsigned long gsf, unsigned long gpf) 847 { 848 SEQ_printf(m, "numa_faults node=%d ", node); 849 SEQ_printf(m, "task_private=%lu task_shared=%lu ", tpf, tsf); 850 SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gpf, gsf); 851 } 852 #endif 853 854 855 static void sched_show_numa(struct task_struct *p, struct seq_file *m) 856 { 857 #ifdef CONFIG_NUMA_BALANCING 858 struct mempolicy *pol; 859 860 if (p->mm) 861 P(mm->numa_scan_seq); 862 863 task_lock(p); 864 pol = p->mempolicy; 865 if (pol && !(pol->flags & MPOL_F_MORON)) 866 pol = NULL; 867 mpol_get(pol); 868 task_unlock(p); 869 870 P(numa_pages_migrated); 871 P(numa_preferred_nid); 872 P(total_numa_faults); 873 SEQ_printf(m, "current_node=%d, numa_group_id=%d\n", 874 task_node(p), task_numa_group_id(p)); 875 show_numa_stats(p, m); 876 mpol_put(pol); 877 #endif 878 } 879 880 void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, 881 struct seq_file *m) 882 { 883 unsigned long nr_switches; 884 885 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns), 886 get_nr_threads(p)); 887 SEQ_printf(m, 888 "---------------------------------------------------------" 889 "----------\n"); 890 #define __P(F) \ 891 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F) 892 #define P(F) \ 893 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F) 894 #define P_SCHEDSTAT(F) \ 895 SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)schedstat_val(p->F)) 896 #define __PN(F) \ 897 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F)) 898 #define PN(F) \ 899 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F)) 900 #define PN_SCHEDSTAT(F) \ 901 SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(p->F))) 902 903 PN(se.exec_start); 904 PN(se.vruntime); 905 PN(se.sum_exec_runtime); 906 907 nr_switches = p->nvcsw + p->nivcsw; 908 909 P(se.nr_migrations); 910 911 if (schedstat_enabled()) { 912 u64 avg_atom, avg_per_cpu; 913 914 PN_SCHEDSTAT(se.statistics.sum_sleep_runtime); 915 PN_SCHEDSTAT(se.statistics.wait_start); 916 PN_SCHEDSTAT(se.statistics.sleep_start); 917 PN_SCHEDSTAT(se.statistics.block_start); 918 PN_SCHEDSTAT(se.statistics.sleep_max); 919 PN_SCHEDSTAT(se.statistics.block_max); 920 PN_SCHEDSTAT(se.statistics.exec_max); 921 PN_SCHEDSTAT(se.statistics.slice_max); 922 PN_SCHEDSTAT(se.statistics.wait_max); 923 PN_SCHEDSTAT(se.statistics.wait_sum); 924 P_SCHEDSTAT(se.statistics.wait_count); 925 PN_SCHEDSTAT(se.statistics.iowait_sum); 926 P_SCHEDSTAT(se.statistics.iowait_count); 927 P_SCHEDSTAT(se.statistics.nr_migrations_cold); 928 P_SCHEDSTAT(se.statistics.nr_failed_migrations_affine); 929 P_SCHEDSTAT(se.statistics.nr_failed_migrations_running); 930 P_SCHEDSTAT(se.statistics.nr_failed_migrations_hot); 931 P_SCHEDSTAT(se.statistics.nr_forced_migrations); 932 P_SCHEDSTAT(se.statistics.nr_wakeups); 933 P_SCHEDSTAT(se.statistics.nr_wakeups_sync); 934 P_SCHEDSTAT(se.statistics.nr_wakeups_migrate); 935 P_SCHEDSTAT(se.statistics.nr_wakeups_local); 936 P_SCHEDSTAT(se.statistics.nr_wakeups_remote); 937 P_SCHEDSTAT(se.statistics.nr_wakeups_affine); 938 P_SCHEDSTAT(se.statistics.nr_wakeups_affine_attempts); 939 P_SCHEDSTAT(se.statistics.nr_wakeups_passive); 940 P_SCHEDSTAT(se.statistics.nr_wakeups_idle); 941 942 avg_atom = p->se.sum_exec_runtime; 943 if (nr_switches) 944 avg_atom = div64_ul(avg_atom, nr_switches); 945 else 946 avg_atom = -1LL; 947 948 avg_per_cpu = p->se.sum_exec_runtime; 949 if (p->se.nr_migrations) { 950 avg_per_cpu = div64_u64(avg_per_cpu, 951 p->se.nr_migrations); 952 } else { 953 avg_per_cpu = -1LL; 954 } 955 956 __PN(avg_atom); 957 __PN(avg_per_cpu); 958 } 959 960 __P(nr_switches); 961 SEQ_printf(m, "%-45s:%21Ld\n", 962 "nr_voluntary_switches", (long long)p->nvcsw); 963 SEQ_printf(m, "%-45s:%21Ld\n", 964 "nr_involuntary_switches", (long long)p->nivcsw); 965 966 P(se.load.weight); 967 P(se.runnable_weight); 968 #ifdef CONFIG_SMP 969 P(se.avg.load_sum); 970 P(se.avg.runnable_load_sum); 971 P(se.avg.util_sum); 972 P(se.avg.load_avg); 973 P(se.avg.runnable_load_avg); 974 P(se.avg.util_avg); 975 P(se.avg.last_update_time); 976 P(se.avg.util_est.ewma); 977 P(se.avg.util_est.enqueued); 978 #endif 979 P(policy); 980 P(prio); 981 if (task_has_dl_policy(p)) { 982 P(dl.runtime); 983 P(dl.deadline); 984 } 985 #undef PN_SCHEDSTAT 986 #undef PN 987 #undef __PN 988 #undef P_SCHEDSTAT 989 #undef P 990 #undef __P 991 992 { 993 unsigned int this_cpu = raw_smp_processor_id(); 994 u64 t0, t1; 995 996 t0 = cpu_clock(this_cpu); 997 t1 = cpu_clock(this_cpu); 998 SEQ_printf(m, "%-45s:%21Ld\n", 999 "clock-delta", (long long)(t1-t0)); 1000 } 1001 1002 sched_show_numa(p, m); 1003 } 1004 1005 void proc_sched_set_task(struct task_struct *p) 1006 { 1007 #ifdef CONFIG_SCHEDSTATS 1008 memset(&p->se.statistics, 0, sizeof(p->se.statistics)); 1009 #endif 1010 } 1011