1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kernel/sched/debug.c 4 * 5 * Print the CFS rbtree and other debugging details 6 * 7 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar 8 */ 9 #include "sched.h" 10 11 static DEFINE_SPINLOCK(sched_debug_lock); 12 13 /* 14 * This allows printing both to /proc/sched_debug and 15 * to the console 16 */ 17 #define SEQ_printf(m, x...) \ 18 do { \ 19 if (m) \ 20 seq_printf(m, x); \ 21 else \ 22 pr_cont(x); \ 23 } while (0) 24 25 /* 26 * Ease the printing of nsec fields: 27 */ 28 static long long nsec_high(unsigned long long nsec) 29 { 30 if ((long long)nsec < 0) { 31 nsec = -nsec; 32 do_div(nsec, 1000000); 33 return -nsec; 34 } 35 do_div(nsec, 1000000); 36 37 return nsec; 38 } 39 40 static unsigned long nsec_low(unsigned long long nsec) 41 { 42 if ((long long)nsec < 0) 43 nsec = -nsec; 44 45 return do_div(nsec, 1000000); 46 } 47 48 #define SPLIT_NS(x) nsec_high(x), nsec_low(x) 49 50 #define SCHED_FEAT(name, enabled) \ 51 #name , 52 53 static const char * const sched_feat_names[] = { 54 #include "features.h" 55 }; 56 57 #undef SCHED_FEAT 58 59 static int sched_feat_show(struct seq_file *m, void *v) 60 { 61 int i; 62 63 for (i = 0; i < __SCHED_FEAT_NR; i++) { 64 if (!(sysctl_sched_features & (1UL << i))) 65 seq_puts(m, "NO_"); 66 seq_printf(m, "%s ", sched_feat_names[i]); 67 } 68 seq_puts(m, "\n"); 69 70 return 0; 71 } 72 73 #ifdef CONFIG_JUMP_LABEL 74 75 #define jump_label_key__true STATIC_KEY_INIT_TRUE 76 #define jump_label_key__false STATIC_KEY_INIT_FALSE 77 78 #define SCHED_FEAT(name, enabled) \ 79 jump_label_key__##enabled , 80 81 struct static_key sched_feat_keys[__SCHED_FEAT_NR] = { 82 #include "features.h" 83 }; 84 85 #undef SCHED_FEAT 86 87 static void sched_feat_disable(int i) 88 { 89 static_key_disable_cpuslocked(&sched_feat_keys[i]); 90 } 91 92 static void sched_feat_enable(int i) 93 { 94 static_key_enable_cpuslocked(&sched_feat_keys[i]); 95 } 96 #else 97 static void sched_feat_disable(int i) { }; 98 static void sched_feat_enable(int i) { }; 99 #endif /* CONFIG_JUMP_LABEL */ 100 101 static int sched_feat_set(char *cmp) 102 { 103 int i; 104 int neg = 0; 105 106 if (strncmp(cmp, "NO_", 3) == 0) { 107 neg = 1; 108 cmp += 3; 109 } 110 111 i = match_string(sched_feat_names, __SCHED_FEAT_NR, cmp); 112 if (i < 0) 113 return i; 114 115 if (neg) { 116 sysctl_sched_features &= ~(1UL << i); 117 sched_feat_disable(i); 118 } else { 119 sysctl_sched_features |= (1UL << i); 120 sched_feat_enable(i); 121 } 122 123 return 0; 124 } 125 126 static ssize_t 127 sched_feat_write(struct file *filp, const char __user *ubuf, 128 size_t cnt, loff_t *ppos) 129 { 130 char buf[64]; 131 char *cmp; 132 int ret; 133 struct inode *inode; 134 135 if (cnt > 63) 136 cnt = 63; 137 138 if (copy_from_user(&buf, ubuf, cnt)) 139 return -EFAULT; 140 141 buf[cnt] = 0; 142 cmp = strstrip(buf); 143 144 /* Ensure the static_key remains in a consistent state */ 145 inode = file_inode(filp); 146 cpus_read_lock(); 147 inode_lock(inode); 148 ret = sched_feat_set(cmp); 149 inode_unlock(inode); 150 cpus_read_unlock(); 151 if (ret < 0) 152 return ret; 153 154 *ppos += cnt; 155 156 return cnt; 157 } 158 159 static int sched_feat_open(struct inode *inode, struct file *filp) 160 { 161 return single_open(filp, sched_feat_show, NULL); 162 } 163 164 static const struct file_operations sched_feat_fops = { 165 .open = sched_feat_open, 166 .write = sched_feat_write, 167 .read = seq_read, 168 .llseek = seq_lseek, 169 .release = single_release, 170 }; 171 172 __read_mostly bool sched_debug_enabled; 173 174 static __init int sched_init_debug(void) 175 { 176 debugfs_create_file("sched_features", 0644, NULL, NULL, 177 &sched_feat_fops); 178 179 debugfs_create_bool("sched_debug", 0644, NULL, 180 &sched_debug_enabled); 181 182 return 0; 183 } 184 late_initcall(sched_init_debug); 185 186 #ifdef CONFIG_SMP 187 188 #ifdef CONFIG_SYSCTL 189 190 static struct ctl_table sd_ctl_dir[] = { 191 { 192 .procname = "sched_domain", 193 .mode = 0555, 194 }, 195 {} 196 }; 197 198 static struct ctl_table sd_ctl_root[] = { 199 { 200 .procname = "kernel", 201 .mode = 0555, 202 .child = sd_ctl_dir, 203 }, 204 {} 205 }; 206 207 static struct ctl_table *sd_alloc_ctl_entry(int n) 208 { 209 struct ctl_table *entry = 210 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL); 211 212 return entry; 213 } 214 215 static void sd_free_ctl_entry(struct ctl_table **tablep) 216 { 217 struct ctl_table *entry; 218 219 /* 220 * In the intermediate directories, both the child directory and 221 * procname are dynamically allocated and could fail but the mode 222 * will always be set. In the lowest directory the names are 223 * static strings and all have proc handlers. 224 */ 225 for (entry = *tablep; entry->mode; entry++) { 226 if (entry->child) 227 sd_free_ctl_entry(&entry->child); 228 if (entry->proc_handler == NULL) 229 kfree(entry->procname); 230 } 231 232 kfree(*tablep); 233 *tablep = NULL; 234 } 235 236 static void 237 set_table_entry(struct ctl_table *entry, 238 const char *procname, void *data, int maxlen, 239 umode_t mode, proc_handler *proc_handler) 240 { 241 entry->procname = procname; 242 entry->data = data; 243 entry->maxlen = maxlen; 244 entry->mode = mode; 245 entry->proc_handler = proc_handler; 246 } 247 248 static int sd_ctl_doflags(struct ctl_table *table, int write, 249 void *buffer, size_t *lenp, loff_t *ppos) 250 { 251 unsigned long flags = *(unsigned long *)table->data; 252 size_t data_size = 0; 253 size_t len = 0; 254 char *tmp; 255 int idx; 256 257 if (write) 258 return 0; 259 260 for_each_set_bit(idx, &flags, __SD_FLAG_CNT) { 261 char *name = sd_flag_debug[idx].name; 262 263 /* Name plus whitespace */ 264 data_size += strlen(name) + 1; 265 } 266 267 if (*ppos > data_size) { 268 *lenp = 0; 269 return 0; 270 } 271 272 tmp = kcalloc(data_size + 1, sizeof(*tmp), GFP_KERNEL); 273 if (!tmp) 274 return -ENOMEM; 275 276 for_each_set_bit(idx, &flags, __SD_FLAG_CNT) { 277 char *name = sd_flag_debug[idx].name; 278 279 len += snprintf(tmp + len, strlen(name) + 2, "%s ", name); 280 } 281 282 tmp += *ppos; 283 len -= *ppos; 284 285 if (len > *lenp) 286 len = *lenp; 287 if (len) 288 memcpy(buffer, tmp, len); 289 if (len < *lenp) { 290 ((char *)buffer)[len] = '\n'; 291 len++; 292 } 293 294 *lenp = len; 295 *ppos += len; 296 297 kfree(tmp); 298 299 return 0; 300 } 301 302 static struct ctl_table * 303 sd_alloc_ctl_domain_table(struct sched_domain *sd) 304 { 305 struct ctl_table *table = sd_alloc_ctl_entry(9); 306 307 if (table == NULL) 308 return NULL; 309 310 set_table_entry(&table[0], "min_interval", &sd->min_interval, sizeof(long), 0644, proc_doulongvec_minmax); 311 set_table_entry(&table[1], "max_interval", &sd->max_interval, sizeof(long), 0644, proc_doulongvec_minmax); 312 set_table_entry(&table[2], "busy_factor", &sd->busy_factor, sizeof(int), 0644, proc_dointvec_minmax); 313 set_table_entry(&table[3], "imbalance_pct", &sd->imbalance_pct, sizeof(int), 0644, proc_dointvec_minmax); 314 set_table_entry(&table[4], "cache_nice_tries", &sd->cache_nice_tries, sizeof(int), 0644, proc_dointvec_minmax); 315 set_table_entry(&table[5], "flags", &sd->flags, sizeof(int), 0444, sd_ctl_doflags); 316 set_table_entry(&table[6], "max_newidle_lb_cost", &sd->max_newidle_lb_cost, sizeof(long), 0644, proc_doulongvec_minmax); 317 set_table_entry(&table[7], "name", sd->name, CORENAME_MAX_SIZE, 0444, proc_dostring); 318 /* &table[8] is terminator */ 319 320 return table; 321 } 322 323 static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu) 324 { 325 struct ctl_table *entry, *table; 326 struct sched_domain *sd; 327 int domain_num = 0, i; 328 char buf[32]; 329 330 for_each_domain(cpu, sd) 331 domain_num++; 332 entry = table = sd_alloc_ctl_entry(domain_num + 1); 333 if (table == NULL) 334 return NULL; 335 336 i = 0; 337 for_each_domain(cpu, sd) { 338 snprintf(buf, 32, "domain%d", i); 339 entry->procname = kstrdup(buf, GFP_KERNEL); 340 entry->mode = 0555; 341 entry->child = sd_alloc_ctl_domain_table(sd); 342 entry++; 343 i++; 344 } 345 return table; 346 } 347 348 static cpumask_var_t sd_sysctl_cpus; 349 static struct ctl_table_header *sd_sysctl_header; 350 351 void register_sched_domain_sysctl(void) 352 { 353 static struct ctl_table *cpu_entries; 354 static struct ctl_table **cpu_idx; 355 static bool init_done = false; 356 char buf[32]; 357 int i; 358 359 if (!cpu_entries) { 360 cpu_entries = sd_alloc_ctl_entry(num_possible_cpus() + 1); 361 if (!cpu_entries) 362 return; 363 364 WARN_ON(sd_ctl_dir[0].child); 365 sd_ctl_dir[0].child = cpu_entries; 366 } 367 368 if (!cpu_idx) { 369 struct ctl_table *e = cpu_entries; 370 371 cpu_idx = kcalloc(nr_cpu_ids, sizeof(struct ctl_table*), GFP_KERNEL); 372 if (!cpu_idx) 373 return; 374 375 /* deal with sparse possible map */ 376 for_each_possible_cpu(i) { 377 cpu_idx[i] = e; 378 e++; 379 } 380 } 381 382 if (!cpumask_available(sd_sysctl_cpus)) { 383 if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL)) 384 return; 385 } 386 387 if (!init_done) { 388 init_done = true; 389 /* init to possible to not have holes in @cpu_entries */ 390 cpumask_copy(sd_sysctl_cpus, cpu_possible_mask); 391 } 392 393 for_each_cpu(i, sd_sysctl_cpus) { 394 struct ctl_table *e = cpu_idx[i]; 395 396 if (e->child) 397 sd_free_ctl_entry(&e->child); 398 399 if (!e->procname) { 400 snprintf(buf, 32, "cpu%d", i); 401 e->procname = kstrdup(buf, GFP_KERNEL); 402 } 403 e->mode = 0555; 404 e->child = sd_alloc_ctl_cpu_table(i); 405 406 __cpumask_clear_cpu(i, sd_sysctl_cpus); 407 } 408 409 WARN_ON(sd_sysctl_header); 410 sd_sysctl_header = register_sysctl_table(sd_ctl_root); 411 } 412 413 void dirty_sched_domain_sysctl(int cpu) 414 { 415 if (cpumask_available(sd_sysctl_cpus)) 416 __cpumask_set_cpu(cpu, sd_sysctl_cpus); 417 } 418 419 /* may be called multiple times per register */ 420 void unregister_sched_domain_sysctl(void) 421 { 422 unregister_sysctl_table(sd_sysctl_header); 423 sd_sysctl_header = NULL; 424 } 425 #endif /* CONFIG_SYSCTL */ 426 #endif /* CONFIG_SMP */ 427 428 #ifdef CONFIG_FAIR_GROUP_SCHED 429 static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg) 430 { 431 struct sched_entity *se = tg->se[cpu]; 432 433 #define P(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F) 434 #define P_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)schedstat_val(F)) 435 #define PN(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F)) 436 #define PN_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(F))) 437 438 if (!se) 439 return; 440 441 PN(se->exec_start); 442 PN(se->vruntime); 443 PN(se->sum_exec_runtime); 444 445 if (schedstat_enabled()) { 446 PN_SCHEDSTAT(se->statistics.wait_start); 447 PN_SCHEDSTAT(se->statistics.sleep_start); 448 PN_SCHEDSTAT(se->statistics.block_start); 449 PN_SCHEDSTAT(se->statistics.sleep_max); 450 PN_SCHEDSTAT(se->statistics.block_max); 451 PN_SCHEDSTAT(se->statistics.exec_max); 452 PN_SCHEDSTAT(se->statistics.slice_max); 453 PN_SCHEDSTAT(se->statistics.wait_max); 454 PN_SCHEDSTAT(se->statistics.wait_sum); 455 P_SCHEDSTAT(se->statistics.wait_count); 456 } 457 458 P(se->load.weight); 459 #ifdef CONFIG_SMP 460 P(se->avg.load_avg); 461 P(se->avg.util_avg); 462 P(se->avg.runnable_avg); 463 #endif 464 465 #undef PN_SCHEDSTAT 466 #undef PN 467 #undef P_SCHEDSTAT 468 #undef P 469 } 470 #endif 471 472 #ifdef CONFIG_CGROUP_SCHED 473 static char group_path[PATH_MAX]; 474 475 static char *task_group_path(struct task_group *tg) 476 { 477 if (autogroup_path(tg, group_path, PATH_MAX)) 478 return group_path; 479 480 cgroup_path(tg->css.cgroup, group_path, PATH_MAX); 481 482 return group_path; 483 } 484 #endif 485 486 static void 487 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) 488 { 489 if (rq->curr == p) 490 SEQ_printf(m, ">R"); 491 else 492 SEQ_printf(m, " %c", task_state_to_char(p)); 493 494 SEQ_printf(m, " %15s %5d %9Ld.%06ld %9Ld %5d ", 495 p->comm, task_pid_nr(p), 496 SPLIT_NS(p->se.vruntime), 497 (long long)(p->nvcsw + p->nivcsw), 498 p->prio); 499 500 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld", 501 SPLIT_NS(schedstat_val_or_zero(p->se.statistics.wait_sum)), 502 SPLIT_NS(p->se.sum_exec_runtime), 503 SPLIT_NS(schedstat_val_or_zero(p->se.statistics.sum_sleep_runtime))); 504 505 #ifdef CONFIG_NUMA_BALANCING 506 SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p)); 507 #endif 508 #ifdef CONFIG_CGROUP_SCHED 509 SEQ_printf(m, " %s", task_group_path(task_group(p))); 510 #endif 511 512 SEQ_printf(m, "\n"); 513 } 514 515 static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu) 516 { 517 struct task_struct *g, *p; 518 519 SEQ_printf(m, "\n"); 520 SEQ_printf(m, "runnable tasks:\n"); 521 SEQ_printf(m, " S task PID tree-key switches prio" 522 " wait-time sum-exec sum-sleep\n"); 523 SEQ_printf(m, "-------------------------------------------------------" 524 "------------------------------------------------------\n"); 525 526 rcu_read_lock(); 527 for_each_process_thread(g, p) { 528 if (task_cpu(p) != rq_cpu) 529 continue; 530 531 print_task(m, rq, p); 532 } 533 rcu_read_unlock(); 534 } 535 536 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) 537 { 538 s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1, 539 spread, rq0_min_vruntime, spread0; 540 struct rq *rq = cpu_rq(cpu); 541 struct sched_entity *last; 542 unsigned long flags; 543 544 #ifdef CONFIG_FAIR_GROUP_SCHED 545 SEQ_printf(m, "\n"); 546 SEQ_printf(m, "cfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg)); 547 #else 548 SEQ_printf(m, "\n"); 549 SEQ_printf(m, "cfs_rq[%d]:\n", cpu); 550 #endif 551 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock", 552 SPLIT_NS(cfs_rq->exec_clock)); 553 554 raw_spin_lock_irqsave(&rq->lock, flags); 555 if (rb_first_cached(&cfs_rq->tasks_timeline)) 556 MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime; 557 last = __pick_last_entity(cfs_rq); 558 if (last) 559 max_vruntime = last->vruntime; 560 min_vruntime = cfs_rq->min_vruntime; 561 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime; 562 raw_spin_unlock_irqrestore(&rq->lock, flags); 563 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime", 564 SPLIT_NS(MIN_vruntime)); 565 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime", 566 SPLIT_NS(min_vruntime)); 567 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime", 568 SPLIT_NS(max_vruntime)); 569 spread = max_vruntime - MIN_vruntime; 570 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread", 571 SPLIT_NS(spread)); 572 spread0 = min_vruntime - rq0_min_vruntime; 573 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0", 574 SPLIT_NS(spread0)); 575 SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over", 576 cfs_rq->nr_spread_over); 577 SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running); 578 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight); 579 #ifdef CONFIG_SMP 580 SEQ_printf(m, " .%-30s: %lu\n", "load_avg", 581 cfs_rq->avg.load_avg); 582 SEQ_printf(m, " .%-30s: %lu\n", "runnable_avg", 583 cfs_rq->avg.runnable_avg); 584 SEQ_printf(m, " .%-30s: %lu\n", "util_avg", 585 cfs_rq->avg.util_avg); 586 SEQ_printf(m, " .%-30s: %u\n", "util_est_enqueued", 587 cfs_rq->avg.util_est.enqueued); 588 SEQ_printf(m, " .%-30s: %ld\n", "removed.load_avg", 589 cfs_rq->removed.load_avg); 590 SEQ_printf(m, " .%-30s: %ld\n", "removed.util_avg", 591 cfs_rq->removed.util_avg); 592 SEQ_printf(m, " .%-30s: %ld\n", "removed.runnable_avg", 593 cfs_rq->removed.runnable_avg); 594 #ifdef CONFIG_FAIR_GROUP_SCHED 595 SEQ_printf(m, " .%-30s: %lu\n", "tg_load_avg_contrib", 596 cfs_rq->tg_load_avg_contrib); 597 SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg", 598 atomic_long_read(&cfs_rq->tg->load_avg)); 599 #endif 600 #endif 601 #ifdef CONFIG_CFS_BANDWIDTH 602 SEQ_printf(m, " .%-30s: %d\n", "throttled", 603 cfs_rq->throttled); 604 SEQ_printf(m, " .%-30s: %d\n", "throttle_count", 605 cfs_rq->throttle_count); 606 #endif 607 608 #ifdef CONFIG_FAIR_GROUP_SCHED 609 print_cfs_group_stats(m, cpu, cfs_rq->tg); 610 #endif 611 } 612 613 void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq) 614 { 615 #ifdef CONFIG_RT_GROUP_SCHED 616 SEQ_printf(m, "\n"); 617 SEQ_printf(m, "rt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg)); 618 #else 619 SEQ_printf(m, "\n"); 620 SEQ_printf(m, "rt_rq[%d]:\n", cpu); 621 #endif 622 623 #define P(x) \ 624 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x)) 625 #define PU(x) \ 626 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x)) 627 #define PN(x) \ 628 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x)) 629 630 PU(rt_nr_running); 631 #ifdef CONFIG_SMP 632 PU(rt_nr_migratory); 633 #endif 634 P(rt_throttled); 635 PN(rt_time); 636 PN(rt_runtime); 637 638 #undef PN 639 #undef PU 640 #undef P 641 } 642 643 void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq) 644 { 645 struct dl_bw *dl_bw; 646 647 SEQ_printf(m, "\n"); 648 SEQ_printf(m, "dl_rq[%d]:\n", cpu); 649 650 #define PU(x) \ 651 SEQ_printf(m, " .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x)) 652 653 PU(dl_nr_running); 654 #ifdef CONFIG_SMP 655 PU(dl_nr_migratory); 656 dl_bw = &cpu_rq(cpu)->rd->dl_bw; 657 #else 658 dl_bw = &dl_rq->dl_bw; 659 #endif 660 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw); 661 SEQ_printf(m, " .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw); 662 663 #undef PU 664 } 665 666 static void print_cpu(struct seq_file *m, int cpu) 667 { 668 struct rq *rq = cpu_rq(cpu); 669 unsigned long flags; 670 671 #ifdef CONFIG_X86 672 { 673 unsigned int freq = cpu_khz ? : 1; 674 675 SEQ_printf(m, "cpu#%d, %u.%03u MHz\n", 676 cpu, freq / 1000, (freq % 1000)); 677 } 678 #else 679 SEQ_printf(m, "cpu#%d\n", cpu); 680 #endif 681 682 #define P(x) \ 683 do { \ 684 if (sizeof(rq->x) == 4) \ 685 SEQ_printf(m, " .%-30s: %ld\n", #x, (long)(rq->x)); \ 686 else \ 687 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\ 688 } while (0) 689 690 #define PN(x) \ 691 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x)) 692 693 P(nr_running); 694 P(nr_switches); 695 P(nr_uninterruptible); 696 PN(next_balance); 697 SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr))); 698 PN(clock); 699 PN(clock_task); 700 #undef P 701 #undef PN 702 703 #ifdef CONFIG_SMP 704 #define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n); 705 P64(avg_idle); 706 P64(max_idle_balance_cost); 707 #undef P64 708 #endif 709 710 #define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, schedstat_val(rq->n)); 711 if (schedstat_enabled()) { 712 P(yld_count); 713 P(sched_count); 714 P(sched_goidle); 715 P(ttwu_count); 716 P(ttwu_local); 717 } 718 #undef P 719 720 spin_lock_irqsave(&sched_debug_lock, flags); 721 print_cfs_stats(m, cpu); 722 print_rt_stats(m, cpu); 723 print_dl_stats(m, cpu); 724 725 print_rq(m, rq, cpu); 726 spin_unlock_irqrestore(&sched_debug_lock, flags); 727 SEQ_printf(m, "\n"); 728 } 729 730 static const char *sched_tunable_scaling_names[] = { 731 "none", 732 "logarithmic", 733 "linear" 734 }; 735 736 static void sched_debug_header(struct seq_file *m) 737 { 738 u64 ktime, sched_clk, cpu_clk; 739 unsigned long flags; 740 741 local_irq_save(flags); 742 ktime = ktime_to_ns(ktime_get()); 743 sched_clk = sched_clock(); 744 cpu_clk = local_clock(); 745 local_irq_restore(flags); 746 747 SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n", 748 init_utsname()->release, 749 (int)strcspn(init_utsname()->version, " "), 750 init_utsname()->version); 751 752 #define P(x) \ 753 SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x)) 754 #define PN(x) \ 755 SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x)) 756 PN(ktime); 757 PN(sched_clk); 758 PN(cpu_clk); 759 P(jiffies); 760 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 761 P(sched_clock_stable()); 762 #endif 763 #undef PN 764 #undef P 765 766 SEQ_printf(m, "\n"); 767 SEQ_printf(m, "sysctl_sched\n"); 768 769 #define P(x) \ 770 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x)) 771 #define PN(x) \ 772 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x)) 773 PN(sysctl_sched_latency); 774 PN(sysctl_sched_min_granularity); 775 PN(sysctl_sched_wakeup_granularity); 776 P(sysctl_sched_child_runs_first); 777 P(sysctl_sched_features); 778 #undef PN 779 #undef P 780 781 SEQ_printf(m, " .%-40s: %d (%s)\n", 782 "sysctl_sched_tunable_scaling", 783 sysctl_sched_tunable_scaling, 784 sched_tunable_scaling_names[sysctl_sched_tunable_scaling]); 785 SEQ_printf(m, "\n"); 786 } 787 788 static int sched_debug_show(struct seq_file *m, void *v) 789 { 790 int cpu = (unsigned long)(v - 2); 791 792 if (cpu != -1) 793 print_cpu(m, cpu); 794 else 795 sched_debug_header(m); 796 797 return 0; 798 } 799 800 void sysrq_sched_debug_show(void) 801 { 802 int cpu; 803 804 sched_debug_header(NULL); 805 for_each_online_cpu(cpu) { 806 /* 807 * Need to reset softlockup watchdogs on all CPUs, because 808 * another CPU might be blocked waiting for us to process 809 * an IPI or stop_machine. 810 */ 811 touch_nmi_watchdog(); 812 touch_all_softlockup_watchdogs(); 813 print_cpu(NULL, cpu); 814 } 815 } 816 817 /* 818 * This itererator needs some explanation. 819 * It returns 1 for the header position. 820 * This means 2 is CPU 0. 821 * In a hotplugged system some CPUs, including CPU 0, may be missing so we have 822 * to use cpumask_* to iterate over the CPUs. 823 */ 824 static void *sched_debug_start(struct seq_file *file, loff_t *offset) 825 { 826 unsigned long n = *offset; 827 828 if (n == 0) 829 return (void *) 1; 830 831 n--; 832 833 if (n > 0) 834 n = cpumask_next(n - 1, cpu_online_mask); 835 else 836 n = cpumask_first(cpu_online_mask); 837 838 *offset = n + 1; 839 840 if (n < nr_cpu_ids) 841 return (void *)(unsigned long)(n + 2); 842 843 return NULL; 844 } 845 846 static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset) 847 { 848 (*offset)++; 849 return sched_debug_start(file, offset); 850 } 851 852 static void sched_debug_stop(struct seq_file *file, void *data) 853 { 854 } 855 856 static const struct seq_operations sched_debug_sops = { 857 .start = sched_debug_start, 858 .next = sched_debug_next, 859 .stop = sched_debug_stop, 860 .show = sched_debug_show, 861 }; 862 863 static int __init init_sched_debug_procfs(void) 864 { 865 if (!proc_create_seq("sched_debug", 0444, NULL, &sched_debug_sops)) 866 return -ENOMEM; 867 return 0; 868 } 869 870 __initcall(init_sched_debug_procfs); 871 872 #define __PS(S, F) SEQ_printf(m, "%-45s:%21Ld\n", S, (long long)(F)) 873 #define __P(F) __PS(#F, F) 874 #define P(F) __PS(#F, p->F) 875 #define __PSN(S, F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", S, SPLIT_NS((long long)(F))) 876 #define __PN(F) __PSN(#F, F) 877 #define PN(F) __PSN(#F, p->F) 878 879 880 #ifdef CONFIG_NUMA_BALANCING 881 void print_numa_stats(struct seq_file *m, int node, unsigned long tsf, 882 unsigned long tpf, unsigned long gsf, unsigned long gpf) 883 { 884 SEQ_printf(m, "numa_faults node=%d ", node); 885 SEQ_printf(m, "task_private=%lu task_shared=%lu ", tpf, tsf); 886 SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gpf, gsf); 887 } 888 #endif 889 890 891 static void sched_show_numa(struct task_struct *p, struct seq_file *m) 892 { 893 #ifdef CONFIG_NUMA_BALANCING 894 struct mempolicy *pol; 895 896 if (p->mm) 897 P(mm->numa_scan_seq); 898 899 task_lock(p); 900 pol = p->mempolicy; 901 if (pol && !(pol->flags & MPOL_F_MORON)) 902 pol = NULL; 903 mpol_get(pol); 904 task_unlock(p); 905 906 P(numa_pages_migrated); 907 P(numa_preferred_nid); 908 P(total_numa_faults); 909 SEQ_printf(m, "current_node=%d, numa_group_id=%d\n", 910 task_node(p), task_numa_group_id(p)); 911 show_numa_stats(p, m); 912 mpol_put(pol); 913 #endif 914 } 915 916 void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, 917 struct seq_file *m) 918 { 919 unsigned long nr_switches; 920 921 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns), 922 get_nr_threads(p)); 923 SEQ_printf(m, 924 "---------------------------------------------------------" 925 "----------\n"); 926 927 #define P_SCHEDSTAT(F) __PS(#F, schedstat_val(p->F)) 928 #define PN_SCHEDSTAT(F) __PSN(#F, schedstat_val(p->F)) 929 930 PN(se.exec_start); 931 PN(se.vruntime); 932 PN(se.sum_exec_runtime); 933 934 nr_switches = p->nvcsw + p->nivcsw; 935 936 P(se.nr_migrations); 937 938 if (schedstat_enabled()) { 939 u64 avg_atom, avg_per_cpu; 940 941 PN_SCHEDSTAT(se.statistics.sum_sleep_runtime); 942 PN_SCHEDSTAT(se.statistics.wait_start); 943 PN_SCHEDSTAT(se.statistics.sleep_start); 944 PN_SCHEDSTAT(se.statistics.block_start); 945 PN_SCHEDSTAT(se.statistics.sleep_max); 946 PN_SCHEDSTAT(se.statistics.block_max); 947 PN_SCHEDSTAT(se.statistics.exec_max); 948 PN_SCHEDSTAT(se.statistics.slice_max); 949 PN_SCHEDSTAT(se.statistics.wait_max); 950 PN_SCHEDSTAT(se.statistics.wait_sum); 951 P_SCHEDSTAT(se.statistics.wait_count); 952 PN_SCHEDSTAT(se.statistics.iowait_sum); 953 P_SCHEDSTAT(se.statistics.iowait_count); 954 P_SCHEDSTAT(se.statistics.nr_migrations_cold); 955 P_SCHEDSTAT(se.statistics.nr_failed_migrations_affine); 956 P_SCHEDSTAT(se.statistics.nr_failed_migrations_running); 957 P_SCHEDSTAT(se.statistics.nr_failed_migrations_hot); 958 P_SCHEDSTAT(se.statistics.nr_forced_migrations); 959 P_SCHEDSTAT(se.statistics.nr_wakeups); 960 P_SCHEDSTAT(se.statistics.nr_wakeups_sync); 961 P_SCHEDSTAT(se.statistics.nr_wakeups_migrate); 962 P_SCHEDSTAT(se.statistics.nr_wakeups_local); 963 P_SCHEDSTAT(se.statistics.nr_wakeups_remote); 964 P_SCHEDSTAT(se.statistics.nr_wakeups_affine); 965 P_SCHEDSTAT(se.statistics.nr_wakeups_affine_attempts); 966 P_SCHEDSTAT(se.statistics.nr_wakeups_passive); 967 P_SCHEDSTAT(se.statistics.nr_wakeups_idle); 968 969 avg_atom = p->se.sum_exec_runtime; 970 if (nr_switches) 971 avg_atom = div64_ul(avg_atom, nr_switches); 972 else 973 avg_atom = -1LL; 974 975 avg_per_cpu = p->se.sum_exec_runtime; 976 if (p->se.nr_migrations) { 977 avg_per_cpu = div64_u64(avg_per_cpu, 978 p->se.nr_migrations); 979 } else { 980 avg_per_cpu = -1LL; 981 } 982 983 __PN(avg_atom); 984 __PN(avg_per_cpu); 985 } 986 987 __P(nr_switches); 988 __PS("nr_voluntary_switches", p->nvcsw); 989 __PS("nr_involuntary_switches", p->nivcsw); 990 991 P(se.load.weight); 992 #ifdef CONFIG_SMP 993 P(se.avg.load_sum); 994 P(se.avg.runnable_sum); 995 P(se.avg.util_sum); 996 P(se.avg.load_avg); 997 P(se.avg.runnable_avg); 998 P(se.avg.util_avg); 999 P(se.avg.last_update_time); 1000 P(se.avg.util_est.ewma); 1001 P(se.avg.util_est.enqueued); 1002 #endif 1003 #ifdef CONFIG_UCLAMP_TASK 1004 __PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value); 1005 __PS("uclamp.max", p->uclamp_req[UCLAMP_MAX].value); 1006 __PS("effective uclamp.min", uclamp_eff_value(p, UCLAMP_MIN)); 1007 __PS("effective uclamp.max", uclamp_eff_value(p, UCLAMP_MAX)); 1008 #endif 1009 P(policy); 1010 P(prio); 1011 if (task_has_dl_policy(p)) { 1012 P(dl.runtime); 1013 P(dl.deadline); 1014 } 1015 #undef PN_SCHEDSTAT 1016 #undef P_SCHEDSTAT 1017 1018 { 1019 unsigned int this_cpu = raw_smp_processor_id(); 1020 u64 t0, t1; 1021 1022 t0 = cpu_clock(this_cpu); 1023 t1 = cpu_clock(this_cpu); 1024 __PS("clock-delta", t1-t0); 1025 } 1026 1027 sched_show_numa(p, m); 1028 } 1029 1030 void proc_sched_set_task(struct task_struct *p) 1031 { 1032 #ifdef CONFIG_SCHEDSTATS 1033 memset(&p->se.statistics, 0, sizeof(p->se.statistics)); 1034 #endif 1035 } 1036