xref: /openbmc/linux/kernel/sched/debug.c (revision ed74cc4995d314ea6cbf406caf978c442f451fa5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * kernel/sched/debug.c
4  *
5  * Print the CFS rbtree and other debugging details
6  *
7  * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
8  */
9 
10 /*
11  * This allows printing both to /proc/sched_debug and
12  * to the console
13  */
14 #define SEQ_printf(m, x...)			\
15  do {						\
16 	if (m)					\
17 		seq_printf(m, x);		\
18 	else					\
19 		pr_cont(x);			\
20  } while (0)
21 
22 /*
23  * Ease the printing of nsec fields:
24  */
25 static long long nsec_high(unsigned long long nsec)
26 {
27 	if ((long long)nsec < 0) {
28 		nsec = -nsec;
29 		do_div(nsec, 1000000);
30 		return -nsec;
31 	}
32 	do_div(nsec, 1000000);
33 
34 	return nsec;
35 }
36 
37 static unsigned long nsec_low(unsigned long long nsec)
38 {
39 	if ((long long)nsec < 0)
40 		nsec = -nsec;
41 
42 	return do_div(nsec, 1000000);
43 }
44 
45 #define SPLIT_NS(x) nsec_high(x), nsec_low(x)
46 
47 #define SCHED_FEAT(name, enabled)	\
48 	#name ,
49 
50 static const char * const sched_feat_names[] = {
51 #include "features.h"
52 };
53 
54 #undef SCHED_FEAT
55 
56 static int sched_feat_show(struct seq_file *m, void *v)
57 {
58 	int i;
59 
60 	for (i = 0; i < __SCHED_FEAT_NR; i++) {
61 		if (!(sysctl_sched_features & (1UL << i)))
62 			seq_puts(m, "NO_");
63 		seq_printf(m, "%s ", sched_feat_names[i]);
64 	}
65 	seq_puts(m, "\n");
66 
67 	return 0;
68 }
69 
70 #ifdef CONFIG_JUMP_LABEL
71 
72 #define jump_label_key__true  STATIC_KEY_INIT_TRUE
73 #define jump_label_key__false STATIC_KEY_INIT_FALSE
74 
75 #define SCHED_FEAT(name, enabled)	\
76 	jump_label_key__##enabled ,
77 
78 struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
79 #include "features.h"
80 };
81 
82 #undef SCHED_FEAT
83 
84 static void sched_feat_disable(int i)
85 {
86 	static_key_disable_cpuslocked(&sched_feat_keys[i]);
87 }
88 
89 static void sched_feat_enable(int i)
90 {
91 	static_key_enable_cpuslocked(&sched_feat_keys[i]);
92 }
93 #else
94 static void sched_feat_disable(int i) { };
95 static void sched_feat_enable(int i) { };
96 #endif /* CONFIG_JUMP_LABEL */
97 
98 static int sched_feat_set(char *cmp)
99 {
100 	int i;
101 	int neg = 0;
102 
103 	if (strncmp(cmp, "NO_", 3) == 0) {
104 		neg = 1;
105 		cmp += 3;
106 	}
107 
108 	i = match_string(sched_feat_names, __SCHED_FEAT_NR, cmp);
109 	if (i < 0)
110 		return i;
111 
112 	if (neg) {
113 		sysctl_sched_features &= ~(1UL << i);
114 		sched_feat_disable(i);
115 	} else {
116 		sysctl_sched_features |= (1UL << i);
117 		sched_feat_enable(i);
118 	}
119 
120 	return 0;
121 }
122 
123 static ssize_t
124 sched_feat_write(struct file *filp, const char __user *ubuf,
125 		size_t cnt, loff_t *ppos)
126 {
127 	char buf[64];
128 	char *cmp;
129 	int ret;
130 	struct inode *inode;
131 
132 	if (cnt > 63)
133 		cnt = 63;
134 
135 	if (copy_from_user(&buf, ubuf, cnt))
136 		return -EFAULT;
137 
138 	buf[cnt] = 0;
139 	cmp = strstrip(buf);
140 
141 	/* Ensure the static_key remains in a consistent state */
142 	inode = file_inode(filp);
143 	cpus_read_lock();
144 	inode_lock(inode);
145 	ret = sched_feat_set(cmp);
146 	inode_unlock(inode);
147 	cpus_read_unlock();
148 	if (ret < 0)
149 		return ret;
150 
151 	*ppos += cnt;
152 
153 	return cnt;
154 }
155 
156 static int sched_feat_open(struct inode *inode, struct file *filp)
157 {
158 	return single_open(filp, sched_feat_show, NULL);
159 }
160 
161 static const struct file_operations sched_feat_fops = {
162 	.open		= sched_feat_open,
163 	.write		= sched_feat_write,
164 	.read		= seq_read,
165 	.llseek		= seq_lseek,
166 	.release	= single_release,
167 };
168 
169 #ifdef CONFIG_SMP
170 
171 static ssize_t sched_scaling_write(struct file *filp, const char __user *ubuf,
172 				   size_t cnt, loff_t *ppos)
173 {
174 	char buf[16];
175 	unsigned int scaling;
176 
177 	if (cnt > 15)
178 		cnt = 15;
179 
180 	if (copy_from_user(&buf, ubuf, cnt))
181 		return -EFAULT;
182 	buf[cnt] = '\0';
183 
184 	if (kstrtouint(buf, 10, &scaling))
185 		return -EINVAL;
186 
187 	if (scaling >= SCHED_TUNABLESCALING_END)
188 		return -EINVAL;
189 
190 	sysctl_sched_tunable_scaling = scaling;
191 	if (sched_update_scaling())
192 		return -EINVAL;
193 
194 	*ppos += cnt;
195 	return cnt;
196 }
197 
198 static int sched_scaling_show(struct seq_file *m, void *v)
199 {
200 	seq_printf(m, "%d\n", sysctl_sched_tunable_scaling);
201 	return 0;
202 }
203 
204 static int sched_scaling_open(struct inode *inode, struct file *filp)
205 {
206 	return single_open(filp, sched_scaling_show, NULL);
207 }
208 
209 static const struct file_operations sched_scaling_fops = {
210 	.open		= sched_scaling_open,
211 	.write		= sched_scaling_write,
212 	.read		= seq_read,
213 	.llseek		= seq_lseek,
214 	.release	= single_release,
215 };
216 
217 #endif /* SMP */
218 
219 #ifdef CONFIG_PREEMPT_DYNAMIC
220 
221 static ssize_t sched_dynamic_write(struct file *filp, const char __user *ubuf,
222 				   size_t cnt, loff_t *ppos)
223 {
224 	char buf[16];
225 	int mode;
226 
227 	if (cnt > 15)
228 		cnt = 15;
229 
230 	if (copy_from_user(&buf, ubuf, cnt))
231 		return -EFAULT;
232 
233 	buf[cnt] = 0;
234 	mode = sched_dynamic_mode(strstrip(buf));
235 	if (mode < 0)
236 		return mode;
237 
238 	sched_dynamic_update(mode);
239 
240 	*ppos += cnt;
241 
242 	return cnt;
243 }
244 
245 static int sched_dynamic_show(struct seq_file *m, void *v)
246 {
247 	static const char * preempt_modes[] = {
248 		"none", "voluntary", "full"
249 	};
250 	int i;
251 
252 	for (i = 0; i < ARRAY_SIZE(preempt_modes); i++) {
253 		if (preempt_dynamic_mode == i)
254 			seq_puts(m, "(");
255 		seq_puts(m, preempt_modes[i]);
256 		if (preempt_dynamic_mode == i)
257 			seq_puts(m, ")");
258 
259 		seq_puts(m, " ");
260 	}
261 
262 	seq_puts(m, "\n");
263 	return 0;
264 }
265 
266 static int sched_dynamic_open(struct inode *inode, struct file *filp)
267 {
268 	return single_open(filp, sched_dynamic_show, NULL);
269 }
270 
271 static const struct file_operations sched_dynamic_fops = {
272 	.open		= sched_dynamic_open,
273 	.write		= sched_dynamic_write,
274 	.read		= seq_read,
275 	.llseek		= seq_lseek,
276 	.release	= single_release,
277 };
278 
279 #endif /* CONFIG_PREEMPT_DYNAMIC */
280 
281 __read_mostly bool sched_debug_verbose;
282 
283 #ifdef CONFIG_SMP
284 static struct dentry           *sd_dentry;
285 
286 
287 static ssize_t sched_verbose_write(struct file *filp, const char __user *ubuf,
288 				  size_t cnt, loff_t *ppos)
289 {
290 	ssize_t result;
291 	bool orig;
292 
293 	cpus_read_lock();
294 	mutex_lock(&sched_domains_mutex);
295 
296 	orig = sched_debug_verbose;
297 	result = debugfs_write_file_bool(filp, ubuf, cnt, ppos);
298 
299 	if (sched_debug_verbose && !orig)
300 		update_sched_domain_debugfs();
301 	else if (!sched_debug_verbose && orig) {
302 		debugfs_remove(sd_dentry);
303 		sd_dentry = NULL;
304 	}
305 
306 	mutex_unlock(&sched_domains_mutex);
307 	cpus_read_unlock();
308 
309 	return result;
310 }
311 #else
312 #define sched_verbose_write debugfs_write_file_bool
313 #endif
314 
315 static const struct file_operations sched_verbose_fops = {
316 	.read =         debugfs_read_file_bool,
317 	.write =        sched_verbose_write,
318 	.open =         simple_open,
319 	.llseek =       default_llseek,
320 };
321 
322 static const struct seq_operations sched_debug_sops;
323 
324 static int sched_debug_open(struct inode *inode, struct file *filp)
325 {
326 	return seq_open(filp, &sched_debug_sops);
327 }
328 
329 static const struct file_operations sched_debug_fops = {
330 	.open		= sched_debug_open,
331 	.read		= seq_read,
332 	.llseek		= seq_lseek,
333 	.release	= seq_release,
334 };
335 
336 static struct dentry *debugfs_sched;
337 
338 static __init int sched_init_debug(void)
339 {
340 	struct dentry __maybe_unused *numa;
341 
342 	debugfs_sched = debugfs_create_dir("sched", NULL);
343 
344 	debugfs_create_file("features", 0644, debugfs_sched, NULL, &sched_feat_fops);
345 	debugfs_create_file_unsafe("verbose", 0644, debugfs_sched, &sched_debug_verbose, &sched_verbose_fops);
346 #ifdef CONFIG_PREEMPT_DYNAMIC
347 	debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
348 #endif
349 
350 	debugfs_create_u32("latency_ns", 0644, debugfs_sched, &sysctl_sched_latency);
351 	debugfs_create_u32("min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_min_granularity);
352 	debugfs_create_u32("idle_min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_idle_min_granularity);
353 	debugfs_create_u32("wakeup_granularity_ns", 0644, debugfs_sched, &sysctl_sched_wakeup_granularity);
354 
355 	debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resched_latency_warn_ms);
356 	debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resched_latency_warn_once);
357 
358 #ifdef CONFIG_SMP
359 	debugfs_create_file("tunable_scaling", 0644, debugfs_sched, NULL, &sched_scaling_fops);
360 	debugfs_create_u32("migration_cost_ns", 0644, debugfs_sched, &sysctl_sched_migration_cost);
361 	debugfs_create_u32("nr_migrate", 0644, debugfs_sched, &sysctl_sched_nr_migrate);
362 
363 	mutex_lock(&sched_domains_mutex);
364 	update_sched_domain_debugfs();
365 	mutex_unlock(&sched_domains_mutex);
366 #endif
367 
368 #ifdef CONFIG_NUMA_BALANCING
369 	numa = debugfs_create_dir("numa_balancing", debugfs_sched);
370 
371 	debugfs_create_u32("scan_delay_ms", 0644, numa, &sysctl_numa_balancing_scan_delay);
372 	debugfs_create_u32("scan_period_min_ms", 0644, numa, &sysctl_numa_balancing_scan_period_min);
373 	debugfs_create_u32("scan_period_max_ms", 0644, numa, &sysctl_numa_balancing_scan_period_max);
374 	debugfs_create_u32("scan_size_mb", 0644, numa, &sysctl_numa_balancing_scan_size);
375 	debugfs_create_u32("hot_threshold_ms", 0644, numa, &sysctl_numa_balancing_hot_threshold);
376 #endif
377 
378 	debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
379 
380 	return 0;
381 }
382 late_initcall(sched_init_debug);
383 
384 #ifdef CONFIG_SMP
385 
386 static cpumask_var_t		sd_sysctl_cpus;
387 
388 static int sd_flags_show(struct seq_file *m, void *v)
389 {
390 	unsigned long flags = *(unsigned int *)m->private;
391 	int idx;
392 
393 	for_each_set_bit(idx, &flags, __SD_FLAG_CNT) {
394 		seq_puts(m, sd_flag_debug[idx].name);
395 		seq_puts(m, " ");
396 	}
397 	seq_puts(m, "\n");
398 
399 	return 0;
400 }
401 
402 static int sd_flags_open(struct inode *inode, struct file *file)
403 {
404 	return single_open(file, sd_flags_show, inode->i_private);
405 }
406 
407 static const struct file_operations sd_flags_fops = {
408 	.open		= sd_flags_open,
409 	.read		= seq_read,
410 	.llseek		= seq_lseek,
411 	.release	= single_release,
412 };
413 
414 static void register_sd(struct sched_domain *sd, struct dentry *parent)
415 {
416 #define SDM(type, mode, member)	\
417 	debugfs_create_##type(#member, mode, parent, &sd->member)
418 
419 	SDM(ulong, 0644, min_interval);
420 	SDM(ulong, 0644, max_interval);
421 	SDM(u64,   0644, max_newidle_lb_cost);
422 	SDM(u32,   0644, busy_factor);
423 	SDM(u32,   0644, imbalance_pct);
424 	SDM(u32,   0644, cache_nice_tries);
425 	SDM(str,   0444, name);
426 
427 #undef SDM
428 
429 	debugfs_create_file("flags", 0444, parent, &sd->flags, &sd_flags_fops);
430 	debugfs_create_file("groups_flags", 0444, parent, &sd->groups->flags, &sd_flags_fops);
431 }
432 
433 void update_sched_domain_debugfs(void)
434 {
435 	int cpu, i;
436 
437 	/*
438 	 * This can unfortunately be invoked before sched_debug_init() creates
439 	 * the debug directory. Don't touch sd_sysctl_cpus until then.
440 	 */
441 	if (!debugfs_sched)
442 		return;
443 
444 	if (!sched_debug_verbose)
445 		return;
446 
447 	if (!cpumask_available(sd_sysctl_cpus)) {
448 		if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
449 			return;
450 		cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
451 	}
452 
453 	if (!sd_dentry) {
454 		sd_dentry = debugfs_create_dir("domains", debugfs_sched);
455 
456 		/* rebuild sd_sysctl_cpus if empty since it gets cleared below */
457 		if (cpumask_empty(sd_sysctl_cpus))
458 			cpumask_copy(sd_sysctl_cpus, cpu_online_mask);
459 	}
460 
461 	for_each_cpu(cpu, sd_sysctl_cpus) {
462 		struct sched_domain *sd;
463 		struct dentry *d_cpu;
464 		char buf[32];
465 
466 		snprintf(buf, sizeof(buf), "cpu%d", cpu);
467 		debugfs_lookup_and_remove(buf, sd_dentry);
468 		d_cpu = debugfs_create_dir(buf, sd_dentry);
469 
470 		i = 0;
471 		for_each_domain(cpu, sd) {
472 			struct dentry *d_sd;
473 
474 			snprintf(buf, sizeof(buf), "domain%d", i);
475 			d_sd = debugfs_create_dir(buf, d_cpu);
476 
477 			register_sd(sd, d_sd);
478 			i++;
479 		}
480 
481 		__cpumask_clear_cpu(cpu, sd_sysctl_cpus);
482 	}
483 }
484 
485 void dirty_sched_domain_sysctl(int cpu)
486 {
487 	if (cpumask_available(sd_sysctl_cpus))
488 		__cpumask_set_cpu(cpu, sd_sysctl_cpus);
489 }
490 
491 #endif /* CONFIG_SMP */
492 
493 #ifdef CONFIG_FAIR_GROUP_SCHED
494 static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
495 {
496 	struct sched_entity *se = tg->se[cpu];
497 
498 #define P(F)		SEQ_printf(m, "  .%-30s: %lld\n",	#F, (long long)F)
499 #define P_SCHEDSTAT(F)	SEQ_printf(m, "  .%-30s: %lld\n",	\
500 		#F, (long long)schedstat_val(stats->F))
501 #define PN(F)		SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
502 #define PN_SCHEDSTAT(F)	SEQ_printf(m, "  .%-30s: %lld.%06ld\n", \
503 		#F, SPLIT_NS((long long)schedstat_val(stats->F)))
504 
505 	if (!se)
506 		return;
507 
508 	PN(se->exec_start);
509 	PN(se->vruntime);
510 	PN(se->sum_exec_runtime);
511 
512 	if (schedstat_enabled()) {
513 		struct sched_statistics *stats;
514 		stats = __schedstats_from_se(se);
515 
516 		PN_SCHEDSTAT(wait_start);
517 		PN_SCHEDSTAT(sleep_start);
518 		PN_SCHEDSTAT(block_start);
519 		PN_SCHEDSTAT(sleep_max);
520 		PN_SCHEDSTAT(block_max);
521 		PN_SCHEDSTAT(exec_max);
522 		PN_SCHEDSTAT(slice_max);
523 		PN_SCHEDSTAT(wait_max);
524 		PN_SCHEDSTAT(wait_sum);
525 		P_SCHEDSTAT(wait_count);
526 	}
527 
528 	P(se->load.weight);
529 #ifdef CONFIG_SMP
530 	P(se->avg.load_avg);
531 	P(se->avg.util_avg);
532 	P(se->avg.runnable_avg);
533 #endif
534 
535 #undef PN_SCHEDSTAT
536 #undef PN
537 #undef P_SCHEDSTAT
538 #undef P
539 }
540 #endif
541 
542 #ifdef CONFIG_CGROUP_SCHED
543 static DEFINE_SPINLOCK(sched_debug_lock);
544 static char group_path[PATH_MAX];
545 
546 static void task_group_path(struct task_group *tg, char *path, int plen)
547 {
548 	if (autogroup_path(tg, path, plen))
549 		return;
550 
551 	cgroup_path(tg->css.cgroup, path, plen);
552 }
553 
554 /*
555  * Only 1 SEQ_printf_task_group_path() caller can use the full length
556  * group_path[] for cgroup path. Other simultaneous callers will have
557  * to use a shorter stack buffer. A "..." suffix is appended at the end
558  * of the stack buffer so that it will show up in case the output length
559  * matches the given buffer size to indicate possible path name truncation.
560  */
561 #define SEQ_printf_task_group_path(m, tg, fmt...)			\
562 {									\
563 	if (spin_trylock(&sched_debug_lock)) {				\
564 		task_group_path(tg, group_path, sizeof(group_path));	\
565 		SEQ_printf(m, fmt, group_path);				\
566 		spin_unlock(&sched_debug_lock);				\
567 	} else {							\
568 		char buf[128];						\
569 		char *bufend = buf + sizeof(buf) - 3;			\
570 		task_group_path(tg, buf, bufend - buf);			\
571 		strcpy(bufend - 1, "...");				\
572 		SEQ_printf(m, fmt, buf);				\
573 	}								\
574 }
575 #endif
576 
577 static void
578 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
579 {
580 	if (task_current(rq, p))
581 		SEQ_printf(m, ">R");
582 	else
583 		SEQ_printf(m, " %c", task_state_to_char(p));
584 
585 	SEQ_printf(m, " %15s %5d %9Ld.%06ld %9Ld %5d ",
586 		p->comm, task_pid_nr(p),
587 		SPLIT_NS(p->se.vruntime),
588 		(long long)(p->nvcsw + p->nivcsw),
589 		p->prio);
590 
591 	SEQ_printf(m, "%9lld.%06ld %9lld.%06ld %9lld.%06ld %9lld.%06ld",
592 		SPLIT_NS(schedstat_val_or_zero(p->stats.wait_sum)),
593 		SPLIT_NS(p->se.sum_exec_runtime),
594 		SPLIT_NS(schedstat_val_or_zero(p->stats.sum_sleep_runtime)),
595 		SPLIT_NS(schedstat_val_or_zero(p->stats.sum_block_runtime)));
596 
597 #ifdef CONFIG_NUMA_BALANCING
598 	SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
599 #endif
600 #ifdef CONFIG_CGROUP_SCHED
601 	SEQ_printf_task_group_path(m, task_group(p), " %s")
602 #endif
603 
604 	SEQ_printf(m, "\n");
605 }
606 
607 static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
608 {
609 	struct task_struct *g, *p;
610 
611 	SEQ_printf(m, "\n");
612 	SEQ_printf(m, "runnable tasks:\n");
613 	SEQ_printf(m, " S            task   PID         tree-key  switches  prio"
614 		   "     wait-time             sum-exec        sum-sleep\n");
615 	SEQ_printf(m, "-------------------------------------------------------"
616 		   "------------------------------------------------------\n");
617 
618 	rcu_read_lock();
619 	for_each_process_thread(g, p) {
620 		if (task_cpu(p) != rq_cpu)
621 			continue;
622 
623 		print_task(m, rq, p);
624 	}
625 	rcu_read_unlock();
626 }
627 
628 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
629 {
630 	s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
631 		spread, rq0_min_vruntime, spread0;
632 	struct rq *rq = cpu_rq(cpu);
633 	struct sched_entity *last;
634 	unsigned long flags;
635 
636 #ifdef CONFIG_FAIR_GROUP_SCHED
637 	SEQ_printf(m, "\n");
638 	SEQ_printf_task_group_path(m, cfs_rq->tg, "cfs_rq[%d]:%s\n", cpu);
639 #else
640 	SEQ_printf(m, "\n");
641 	SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
642 #endif
643 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "exec_clock",
644 			SPLIT_NS(cfs_rq->exec_clock));
645 
646 	raw_spin_rq_lock_irqsave(rq, flags);
647 	if (rb_first_cached(&cfs_rq->tasks_timeline))
648 		MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
649 	last = __pick_last_entity(cfs_rq);
650 	if (last)
651 		max_vruntime = last->vruntime;
652 	min_vruntime = cfs_rq->min_vruntime;
653 	rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
654 	raw_spin_rq_unlock_irqrestore(rq, flags);
655 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "MIN_vruntime",
656 			SPLIT_NS(MIN_vruntime));
657 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "min_vruntime",
658 			SPLIT_NS(min_vruntime));
659 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "max_vruntime",
660 			SPLIT_NS(max_vruntime));
661 	spread = max_vruntime - MIN_vruntime;
662 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread",
663 			SPLIT_NS(spread));
664 	spread0 = min_vruntime - rq0_min_vruntime;
665 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread0",
666 			SPLIT_NS(spread0));
667 	SEQ_printf(m, "  .%-30s: %d\n", "nr_spread_over",
668 			cfs_rq->nr_spread_over);
669 	SEQ_printf(m, "  .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
670 	SEQ_printf(m, "  .%-30s: %d\n", "h_nr_running", cfs_rq->h_nr_running);
671 	SEQ_printf(m, "  .%-30s: %d\n", "idle_nr_running",
672 			cfs_rq->idle_nr_running);
673 	SEQ_printf(m, "  .%-30s: %d\n", "idle_h_nr_running",
674 			cfs_rq->idle_h_nr_running);
675 	SEQ_printf(m, "  .%-30s: %ld\n", "load", cfs_rq->load.weight);
676 #ifdef CONFIG_SMP
677 	SEQ_printf(m, "  .%-30s: %lu\n", "load_avg",
678 			cfs_rq->avg.load_avg);
679 	SEQ_printf(m, "  .%-30s: %lu\n", "runnable_avg",
680 			cfs_rq->avg.runnable_avg);
681 	SEQ_printf(m, "  .%-30s: %lu\n", "util_avg",
682 			cfs_rq->avg.util_avg);
683 	SEQ_printf(m, "  .%-30s: %u\n", "util_est_enqueued",
684 			cfs_rq->avg.util_est.enqueued);
685 	SEQ_printf(m, "  .%-30s: %ld\n", "removed.load_avg",
686 			cfs_rq->removed.load_avg);
687 	SEQ_printf(m, "  .%-30s: %ld\n", "removed.util_avg",
688 			cfs_rq->removed.util_avg);
689 	SEQ_printf(m, "  .%-30s: %ld\n", "removed.runnable_avg",
690 			cfs_rq->removed.runnable_avg);
691 #ifdef CONFIG_FAIR_GROUP_SCHED
692 	SEQ_printf(m, "  .%-30s: %lu\n", "tg_load_avg_contrib",
693 			cfs_rq->tg_load_avg_contrib);
694 	SEQ_printf(m, "  .%-30s: %ld\n", "tg_load_avg",
695 			atomic_long_read(&cfs_rq->tg->load_avg));
696 #endif
697 #endif
698 #ifdef CONFIG_CFS_BANDWIDTH
699 	SEQ_printf(m, "  .%-30s: %d\n", "throttled",
700 			cfs_rq->throttled);
701 	SEQ_printf(m, "  .%-30s: %d\n", "throttle_count",
702 			cfs_rq->throttle_count);
703 #endif
704 
705 #ifdef CONFIG_FAIR_GROUP_SCHED
706 	print_cfs_group_stats(m, cpu, cfs_rq->tg);
707 #endif
708 }
709 
710 void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
711 {
712 #ifdef CONFIG_RT_GROUP_SCHED
713 	SEQ_printf(m, "\n");
714 	SEQ_printf_task_group_path(m, rt_rq->tg, "rt_rq[%d]:%s\n", cpu);
715 #else
716 	SEQ_printf(m, "\n");
717 	SEQ_printf(m, "rt_rq[%d]:\n", cpu);
718 #endif
719 
720 #define P(x) \
721 	SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
722 #define PU(x) \
723 	SEQ_printf(m, "  .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x))
724 #define PN(x) \
725 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
726 
727 	PU(rt_nr_running);
728 #ifdef CONFIG_SMP
729 	PU(rt_nr_migratory);
730 #endif
731 	P(rt_throttled);
732 	PN(rt_time);
733 	PN(rt_runtime);
734 
735 #undef PN
736 #undef PU
737 #undef P
738 }
739 
740 void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
741 {
742 	struct dl_bw *dl_bw;
743 
744 	SEQ_printf(m, "\n");
745 	SEQ_printf(m, "dl_rq[%d]:\n", cpu);
746 
747 #define PU(x) \
748 	SEQ_printf(m, "  .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))
749 
750 	PU(dl_nr_running);
751 #ifdef CONFIG_SMP
752 	PU(dl_nr_migratory);
753 	dl_bw = &cpu_rq(cpu)->rd->dl_bw;
754 #else
755 	dl_bw = &dl_rq->dl_bw;
756 #endif
757 	SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
758 	SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
759 
760 #undef PU
761 }
762 
763 static void print_cpu(struct seq_file *m, int cpu)
764 {
765 	struct rq *rq = cpu_rq(cpu);
766 
767 #ifdef CONFIG_X86
768 	{
769 		unsigned int freq = cpu_khz ? : 1;
770 
771 		SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
772 			   cpu, freq / 1000, (freq % 1000));
773 	}
774 #else
775 	SEQ_printf(m, "cpu#%d\n", cpu);
776 #endif
777 
778 #define P(x)								\
779 do {									\
780 	if (sizeof(rq->x) == 4)						\
781 		SEQ_printf(m, "  .%-30s: %d\n", #x, (int)(rq->x));	\
782 	else								\
783 		SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rq->x));\
784 } while (0)
785 
786 #define PN(x) \
787 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
788 
789 	P(nr_running);
790 	P(nr_switches);
791 	P(nr_uninterruptible);
792 	PN(next_balance);
793 	SEQ_printf(m, "  .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
794 	PN(clock);
795 	PN(clock_task);
796 #undef P
797 #undef PN
798 
799 #ifdef CONFIG_SMP
800 #define P64(n) SEQ_printf(m, "  .%-30s: %Ld\n", #n, rq->n);
801 	P64(avg_idle);
802 	P64(max_idle_balance_cost);
803 #undef P64
804 #endif
805 
806 #define P(n) SEQ_printf(m, "  .%-30s: %d\n", #n, schedstat_val(rq->n));
807 	if (schedstat_enabled()) {
808 		P(yld_count);
809 		P(sched_count);
810 		P(sched_goidle);
811 		P(ttwu_count);
812 		P(ttwu_local);
813 	}
814 #undef P
815 
816 	print_cfs_stats(m, cpu);
817 	print_rt_stats(m, cpu);
818 	print_dl_stats(m, cpu);
819 
820 	print_rq(m, rq, cpu);
821 	SEQ_printf(m, "\n");
822 }
823 
824 static const char *sched_tunable_scaling_names[] = {
825 	"none",
826 	"logarithmic",
827 	"linear"
828 };
829 
830 static void sched_debug_header(struct seq_file *m)
831 {
832 	u64 ktime, sched_clk, cpu_clk;
833 	unsigned long flags;
834 
835 	local_irq_save(flags);
836 	ktime = ktime_to_ns(ktime_get());
837 	sched_clk = sched_clock();
838 	cpu_clk = local_clock();
839 	local_irq_restore(flags);
840 
841 	SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
842 		init_utsname()->release,
843 		(int)strcspn(init_utsname()->version, " "),
844 		init_utsname()->version);
845 
846 #define P(x) \
847 	SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
848 #define PN(x) \
849 	SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
850 	PN(ktime);
851 	PN(sched_clk);
852 	PN(cpu_clk);
853 	P(jiffies);
854 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
855 	P(sched_clock_stable());
856 #endif
857 #undef PN
858 #undef P
859 
860 	SEQ_printf(m, "\n");
861 	SEQ_printf(m, "sysctl_sched\n");
862 
863 #define P(x) \
864 	SEQ_printf(m, "  .%-40s: %Ld\n", #x, (long long)(x))
865 #define PN(x) \
866 	SEQ_printf(m, "  .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
867 	PN(sysctl_sched_latency);
868 	PN(sysctl_sched_min_granularity);
869 	PN(sysctl_sched_idle_min_granularity);
870 	PN(sysctl_sched_wakeup_granularity);
871 	P(sysctl_sched_child_runs_first);
872 	P(sysctl_sched_features);
873 #undef PN
874 #undef P
875 
876 	SEQ_printf(m, "  .%-40s: %d (%s)\n",
877 		"sysctl_sched_tunable_scaling",
878 		sysctl_sched_tunable_scaling,
879 		sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
880 	SEQ_printf(m, "\n");
881 }
882 
883 static int sched_debug_show(struct seq_file *m, void *v)
884 {
885 	int cpu = (unsigned long)(v - 2);
886 
887 	if (cpu != -1)
888 		print_cpu(m, cpu);
889 	else
890 		sched_debug_header(m);
891 
892 	return 0;
893 }
894 
895 void sysrq_sched_debug_show(void)
896 {
897 	int cpu;
898 
899 	sched_debug_header(NULL);
900 	for_each_online_cpu(cpu) {
901 		/*
902 		 * Need to reset softlockup watchdogs on all CPUs, because
903 		 * another CPU might be blocked waiting for us to process
904 		 * an IPI or stop_machine.
905 		 */
906 		touch_nmi_watchdog();
907 		touch_all_softlockup_watchdogs();
908 		print_cpu(NULL, cpu);
909 	}
910 }
911 
912 /*
913  * This iterator needs some explanation.
914  * It returns 1 for the header position.
915  * This means 2 is CPU 0.
916  * In a hotplugged system some CPUs, including CPU 0, may be missing so we have
917  * to use cpumask_* to iterate over the CPUs.
918  */
919 static void *sched_debug_start(struct seq_file *file, loff_t *offset)
920 {
921 	unsigned long n = *offset;
922 
923 	if (n == 0)
924 		return (void *) 1;
925 
926 	n--;
927 
928 	if (n > 0)
929 		n = cpumask_next(n - 1, cpu_online_mask);
930 	else
931 		n = cpumask_first(cpu_online_mask);
932 
933 	*offset = n + 1;
934 
935 	if (n < nr_cpu_ids)
936 		return (void *)(unsigned long)(n + 2);
937 
938 	return NULL;
939 }
940 
941 static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
942 {
943 	(*offset)++;
944 	return sched_debug_start(file, offset);
945 }
946 
947 static void sched_debug_stop(struct seq_file *file, void *data)
948 {
949 }
950 
951 static const struct seq_operations sched_debug_sops = {
952 	.start		= sched_debug_start,
953 	.next		= sched_debug_next,
954 	.stop		= sched_debug_stop,
955 	.show		= sched_debug_show,
956 };
957 
958 #define __PS(S, F) SEQ_printf(m, "%-45s:%21Ld\n", S, (long long)(F))
959 #define __P(F) __PS(#F, F)
960 #define   P(F) __PS(#F, p->F)
961 #define   PM(F, M) __PS(#F, p->F & (M))
962 #define __PSN(S, F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", S, SPLIT_NS((long long)(F)))
963 #define __PN(F) __PSN(#F, F)
964 #define   PN(F) __PSN(#F, p->F)
965 
966 
967 #ifdef CONFIG_NUMA_BALANCING
968 void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
969 		unsigned long tpf, unsigned long gsf, unsigned long gpf)
970 {
971 	SEQ_printf(m, "numa_faults node=%d ", node);
972 	SEQ_printf(m, "task_private=%lu task_shared=%lu ", tpf, tsf);
973 	SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gpf, gsf);
974 }
975 #endif
976 
977 
978 static void sched_show_numa(struct task_struct *p, struct seq_file *m)
979 {
980 #ifdef CONFIG_NUMA_BALANCING
981 	if (p->mm)
982 		P(mm->numa_scan_seq);
983 
984 	P(numa_pages_migrated);
985 	P(numa_preferred_nid);
986 	P(total_numa_faults);
987 	SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
988 			task_node(p), task_numa_group_id(p));
989 	show_numa_stats(p, m);
990 #endif
991 }
992 
993 void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
994 						  struct seq_file *m)
995 {
996 	unsigned long nr_switches;
997 
998 	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
999 						get_nr_threads(p));
1000 	SEQ_printf(m,
1001 		"---------------------------------------------------------"
1002 		"----------\n");
1003 
1004 #define P_SCHEDSTAT(F)  __PS(#F, schedstat_val(p->stats.F))
1005 #define PN_SCHEDSTAT(F) __PSN(#F, schedstat_val(p->stats.F))
1006 
1007 	PN(se.exec_start);
1008 	PN(se.vruntime);
1009 	PN(se.sum_exec_runtime);
1010 
1011 	nr_switches = p->nvcsw + p->nivcsw;
1012 
1013 	P(se.nr_migrations);
1014 
1015 	if (schedstat_enabled()) {
1016 		u64 avg_atom, avg_per_cpu;
1017 
1018 		PN_SCHEDSTAT(sum_sleep_runtime);
1019 		PN_SCHEDSTAT(sum_block_runtime);
1020 		PN_SCHEDSTAT(wait_start);
1021 		PN_SCHEDSTAT(sleep_start);
1022 		PN_SCHEDSTAT(block_start);
1023 		PN_SCHEDSTAT(sleep_max);
1024 		PN_SCHEDSTAT(block_max);
1025 		PN_SCHEDSTAT(exec_max);
1026 		PN_SCHEDSTAT(slice_max);
1027 		PN_SCHEDSTAT(wait_max);
1028 		PN_SCHEDSTAT(wait_sum);
1029 		P_SCHEDSTAT(wait_count);
1030 		PN_SCHEDSTAT(iowait_sum);
1031 		P_SCHEDSTAT(iowait_count);
1032 		P_SCHEDSTAT(nr_migrations_cold);
1033 		P_SCHEDSTAT(nr_failed_migrations_affine);
1034 		P_SCHEDSTAT(nr_failed_migrations_running);
1035 		P_SCHEDSTAT(nr_failed_migrations_hot);
1036 		P_SCHEDSTAT(nr_forced_migrations);
1037 		P_SCHEDSTAT(nr_wakeups);
1038 		P_SCHEDSTAT(nr_wakeups_sync);
1039 		P_SCHEDSTAT(nr_wakeups_migrate);
1040 		P_SCHEDSTAT(nr_wakeups_local);
1041 		P_SCHEDSTAT(nr_wakeups_remote);
1042 		P_SCHEDSTAT(nr_wakeups_affine);
1043 		P_SCHEDSTAT(nr_wakeups_affine_attempts);
1044 		P_SCHEDSTAT(nr_wakeups_passive);
1045 		P_SCHEDSTAT(nr_wakeups_idle);
1046 
1047 		avg_atom = p->se.sum_exec_runtime;
1048 		if (nr_switches)
1049 			avg_atom = div64_ul(avg_atom, nr_switches);
1050 		else
1051 			avg_atom = -1LL;
1052 
1053 		avg_per_cpu = p->se.sum_exec_runtime;
1054 		if (p->se.nr_migrations) {
1055 			avg_per_cpu = div64_u64(avg_per_cpu,
1056 						p->se.nr_migrations);
1057 		} else {
1058 			avg_per_cpu = -1LL;
1059 		}
1060 
1061 		__PN(avg_atom);
1062 		__PN(avg_per_cpu);
1063 
1064 #ifdef CONFIG_SCHED_CORE
1065 		PN_SCHEDSTAT(core_forceidle_sum);
1066 #endif
1067 	}
1068 
1069 	__P(nr_switches);
1070 	__PS("nr_voluntary_switches", p->nvcsw);
1071 	__PS("nr_involuntary_switches", p->nivcsw);
1072 
1073 	P(se.load.weight);
1074 #ifdef CONFIG_SMP
1075 	P(se.avg.load_sum);
1076 	P(se.avg.runnable_sum);
1077 	P(se.avg.util_sum);
1078 	P(se.avg.load_avg);
1079 	P(se.avg.runnable_avg);
1080 	P(se.avg.util_avg);
1081 	P(se.avg.last_update_time);
1082 	P(se.avg.util_est.ewma);
1083 	PM(se.avg.util_est.enqueued, ~UTIL_AVG_UNCHANGED);
1084 #endif
1085 #ifdef CONFIG_UCLAMP_TASK
1086 	__PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
1087 	__PS("uclamp.max", p->uclamp_req[UCLAMP_MAX].value);
1088 	__PS("effective uclamp.min", uclamp_eff_value(p, UCLAMP_MIN));
1089 	__PS("effective uclamp.max", uclamp_eff_value(p, UCLAMP_MAX));
1090 #endif
1091 	P(policy);
1092 	P(prio);
1093 	if (task_has_dl_policy(p)) {
1094 		P(dl.runtime);
1095 		P(dl.deadline);
1096 	}
1097 #undef PN_SCHEDSTAT
1098 #undef P_SCHEDSTAT
1099 
1100 	{
1101 		unsigned int this_cpu = raw_smp_processor_id();
1102 		u64 t0, t1;
1103 
1104 		t0 = cpu_clock(this_cpu);
1105 		t1 = cpu_clock(this_cpu);
1106 		__PS("clock-delta", t1-t0);
1107 	}
1108 
1109 	sched_show_numa(p, m);
1110 }
1111 
1112 void proc_sched_set_task(struct task_struct *p)
1113 {
1114 #ifdef CONFIG_SCHEDSTATS
1115 	memset(&p->stats, 0, sizeof(p->stats));
1116 #endif
1117 }
1118 
1119 void resched_latency_warn(int cpu, u64 latency)
1120 {
1121 	static DEFINE_RATELIMIT_STATE(latency_check_ratelimit, 60 * 60 * HZ, 1);
1122 
1123 	WARN(__ratelimit(&latency_check_ratelimit),
1124 	     "sched: CPU %d need_resched set for > %llu ns (%d ticks) "
1125 	     "without schedule\n",
1126 	     cpu, latency, cpu_rq(cpu)->ticks_without_resched);
1127 }
1128