xref: /openbmc/linux/kernel/sched/debug.c (revision 84c43674)
1 /*
2  * kernel/sched/debug.c
3  *
4  * Print the CFS rbtree
5  *
6  * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 
13 #include <linux/proc_fs.h>
14 #include <linux/sched/mm.h>
15 #include <linux/sched/task.h>
16 #include <linux/seq_file.h>
17 #include <linux/kallsyms.h>
18 #include <linux/utsname.h>
19 #include <linux/mempolicy.h>
20 #include <linux/debugfs.h>
21 
22 #include "sched.h"
23 
24 static DEFINE_SPINLOCK(sched_debug_lock);
25 
26 /*
27  * This allows printing both to /proc/sched_debug and
28  * to the console
29  */
30 #define SEQ_printf(m, x...)			\
31  do {						\
32 	if (m)					\
33 		seq_printf(m, x);		\
34 	else					\
35 		printk(x);			\
36  } while (0)
37 
38 /*
39  * Ease the printing of nsec fields:
40  */
41 static long long nsec_high(unsigned long long nsec)
42 {
43 	if ((long long)nsec < 0) {
44 		nsec = -nsec;
45 		do_div(nsec, 1000000);
46 		return -nsec;
47 	}
48 	do_div(nsec, 1000000);
49 
50 	return nsec;
51 }
52 
53 static unsigned long nsec_low(unsigned long long nsec)
54 {
55 	if ((long long)nsec < 0)
56 		nsec = -nsec;
57 
58 	return do_div(nsec, 1000000);
59 }
60 
61 #define SPLIT_NS(x) nsec_high(x), nsec_low(x)
62 
63 #define SCHED_FEAT(name, enabled)	\
64 	#name ,
65 
66 static const char * const sched_feat_names[] = {
67 #include "features.h"
68 };
69 
70 #undef SCHED_FEAT
71 
72 static int sched_feat_show(struct seq_file *m, void *v)
73 {
74 	int i;
75 
76 	for (i = 0; i < __SCHED_FEAT_NR; i++) {
77 		if (!(sysctl_sched_features & (1UL << i)))
78 			seq_puts(m, "NO_");
79 		seq_printf(m, "%s ", sched_feat_names[i]);
80 	}
81 	seq_puts(m, "\n");
82 
83 	return 0;
84 }
85 
86 #ifdef HAVE_JUMP_LABEL
87 
88 #define jump_label_key__true  STATIC_KEY_INIT_TRUE
89 #define jump_label_key__false STATIC_KEY_INIT_FALSE
90 
91 #define SCHED_FEAT(name, enabled)	\
92 	jump_label_key__##enabled ,
93 
94 struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
95 #include "features.h"
96 };
97 
98 #undef SCHED_FEAT
99 
100 static void sched_feat_disable(int i)
101 {
102 	static_key_disable(&sched_feat_keys[i]);
103 }
104 
105 static void sched_feat_enable(int i)
106 {
107 	static_key_enable(&sched_feat_keys[i]);
108 }
109 #else
110 static void sched_feat_disable(int i) { };
111 static void sched_feat_enable(int i) { };
112 #endif /* HAVE_JUMP_LABEL */
113 
114 static int sched_feat_set(char *cmp)
115 {
116 	int i;
117 	int neg = 0;
118 
119 	if (strncmp(cmp, "NO_", 3) == 0) {
120 		neg = 1;
121 		cmp += 3;
122 	}
123 
124 	for (i = 0; i < __SCHED_FEAT_NR; i++) {
125 		if (strcmp(cmp, sched_feat_names[i]) == 0) {
126 			if (neg) {
127 				sysctl_sched_features &= ~(1UL << i);
128 				sched_feat_disable(i);
129 			} else {
130 				sysctl_sched_features |= (1UL << i);
131 				sched_feat_enable(i);
132 			}
133 			break;
134 		}
135 	}
136 
137 	return i;
138 }
139 
140 static ssize_t
141 sched_feat_write(struct file *filp, const char __user *ubuf,
142 		size_t cnt, loff_t *ppos)
143 {
144 	char buf[64];
145 	char *cmp;
146 	int i;
147 	struct inode *inode;
148 
149 	if (cnt > 63)
150 		cnt = 63;
151 
152 	if (copy_from_user(&buf, ubuf, cnt))
153 		return -EFAULT;
154 
155 	buf[cnt] = 0;
156 	cmp = strstrip(buf);
157 
158 	/* Ensure the static_key remains in a consistent state */
159 	inode = file_inode(filp);
160 	inode_lock(inode);
161 	i = sched_feat_set(cmp);
162 	inode_unlock(inode);
163 	if (i == __SCHED_FEAT_NR)
164 		return -EINVAL;
165 
166 	*ppos += cnt;
167 
168 	return cnt;
169 }
170 
171 static int sched_feat_open(struct inode *inode, struct file *filp)
172 {
173 	return single_open(filp, sched_feat_show, NULL);
174 }
175 
176 static const struct file_operations sched_feat_fops = {
177 	.open		= sched_feat_open,
178 	.write		= sched_feat_write,
179 	.read		= seq_read,
180 	.llseek		= seq_lseek,
181 	.release	= single_release,
182 };
183 
184 __read_mostly bool sched_debug_enabled;
185 
186 static __init int sched_init_debug(void)
187 {
188 	debugfs_create_file("sched_features", 0644, NULL, NULL,
189 			&sched_feat_fops);
190 
191 	debugfs_create_bool("sched_debug", 0644, NULL,
192 			&sched_debug_enabled);
193 
194 	return 0;
195 }
196 late_initcall(sched_init_debug);
197 
198 #ifdef CONFIG_SMP
199 
200 #ifdef CONFIG_SYSCTL
201 
202 static struct ctl_table sd_ctl_dir[] = {
203 	{
204 		.procname	= "sched_domain",
205 		.mode		= 0555,
206 	},
207 	{}
208 };
209 
210 static struct ctl_table sd_ctl_root[] = {
211 	{
212 		.procname	= "kernel",
213 		.mode		= 0555,
214 		.child		= sd_ctl_dir,
215 	},
216 	{}
217 };
218 
219 static struct ctl_table *sd_alloc_ctl_entry(int n)
220 {
221 	struct ctl_table *entry =
222 		kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
223 
224 	return entry;
225 }
226 
227 static void sd_free_ctl_entry(struct ctl_table **tablep)
228 {
229 	struct ctl_table *entry;
230 
231 	/*
232 	 * In the intermediate directories, both the child directory and
233 	 * procname are dynamically allocated and could fail but the mode
234 	 * will always be set. In the lowest directory the names are
235 	 * static strings and all have proc handlers.
236 	 */
237 	for (entry = *tablep; entry->mode; entry++) {
238 		if (entry->child)
239 			sd_free_ctl_entry(&entry->child);
240 		if (entry->proc_handler == NULL)
241 			kfree(entry->procname);
242 	}
243 
244 	kfree(*tablep);
245 	*tablep = NULL;
246 }
247 
248 static int min_load_idx = 0;
249 static int max_load_idx = CPU_LOAD_IDX_MAX-1;
250 
251 static void
252 set_table_entry(struct ctl_table *entry,
253 		const char *procname, void *data, int maxlen,
254 		umode_t mode, proc_handler *proc_handler,
255 		bool load_idx)
256 {
257 	entry->procname = procname;
258 	entry->data = data;
259 	entry->maxlen = maxlen;
260 	entry->mode = mode;
261 	entry->proc_handler = proc_handler;
262 
263 	if (load_idx) {
264 		entry->extra1 = &min_load_idx;
265 		entry->extra2 = &max_load_idx;
266 	}
267 }
268 
269 static struct ctl_table *
270 sd_alloc_ctl_domain_table(struct sched_domain *sd)
271 {
272 	struct ctl_table *table = sd_alloc_ctl_entry(14);
273 
274 	if (table == NULL)
275 		return NULL;
276 
277 	set_table_entry(&table[0], "min_interval", &sd->min_interval,
278 		sizeof(long), 0644, proc_doulongvec_minmax, false);
279 	set_table_entry(&table[1], "max_interval", &sd->max_interval,
280 		sizeof(long), 0644, proc_doulongvec_minmax, false);
281 	set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
282 		sizeof(int), 0644, proc_dointvec_minmax, true);
283 	set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
284 		sizeof(int), 0644, proc_dointvec_minmax, true);
285 	set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
286 		sizeof(int), 0644, proc_dointvec_minmax, true);
287 	set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
288 		sizeof(int), 0644, proc_dointvec_minmax, true);
289 	set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
290 		sizeof(int), 0644, proc_dointvec_minmax, true);
291 	set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
292 		sizeof(int), 0644, proc_dointvec_minmax, false);
293 	set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
294 		sizeof(int), 0644, proc_dointvec_minmax, false);
295 	set_table_entry(&table[9], "cache_nice_tries",
296 		&sd->cache_nice_tries,
297 		sizeof(int), 0644, proc_dointvec_minmax, false);
298 	set_table_entry(&table[10], "flags", &sd->flags,
299 		sizeof(int), 0644, proc_dointvec_minmax, false);
300 	set_table_entry(&table[11], "max_newidle_lb_cost",
301 		&sd->max_newidle_lb_cost,
302 		sizeof(long), 0644, proc_doulongvec_minmax, false);
303 	set_table_entry(&table[12], "name", sd->name,
304 		CORENAME_MAX_SIZE, 0444, proc_dostring, false);
305 	/* &table[13] is terminator */
306 
307 	return table;
308 }
309 
310 static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
311 {
312 	struct ctl_table *entry, *table;
313 	struct sched_domain *sd;
314 	int domain_num = 0, i;
315 	char buf[32];
316 
317 	for_each_domain(cpu, sd)
318 		domain_num++;
319 	entry = table = sd_alloc_ctl_entry(domain_num + 1);
320 	if (table == NULL)
321 		return NULL;
322 
323 	i = 0;
324 	for_each_domain(cpu, sd) {
325 		snprintf(buf, 32, "domain%d", i);
326 		entry->procname = kstrdup(buf, GFP_KERNEL);
327 		entry->mode = 0555;
328 		entry->child = sd_alloc_ctl_domain_table(sd);
329 		entry++;
330 		i++;
331 	}
332 	return table;
333 }
334 
335 static cpumask_var_t sd_sysctl_cpus;
336 static struct ctl_table_header *sd_sysctl_header;
337 
338 void register_sched_domain_sysctl(void)
339 {
340 	static struct ctl_table *cpu_entries;
341 	static struct ctl_table **cpu_idx;
342 	char buf[32];
343 	int i;
344 
345 	if (!cpu_entries) {
346 		cpu_entries = sd_alloc_ctl_entry(num_possible_cpus() + 1);
347 		if (!cpu_entries)
348 			return;
349 
350 		WARN_ON(sd_ctl_dir[0].child);
351 		sd_ctl_dir[0].child = cpu_entries;
352 	}
353 
354 	if (!cpu_idx) {
355 		struct ctl_table *e = cpu_entries;
356 
357 		cpu_idx = kcalloc(nr_cpu_ids, sizeof(struct ctl_table*), GFP_KERNEL);
358 		if (!cpu_idx)
359 			return;
360 
361 		/* deal with sparse possible map */
362 		for_each_possible_cpu(i) {
363 			cpu_idx[i] = e;
364 			e++;
365 		}
366 	}
367 
368 	if (!cpumask_available(sd_sysctl_cpus)) {
369 		if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
370 			return;
371 
372 		/* init to possible to not have holes in @cpu_entries */
373 		cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
374 	}
375 
376 	for_each_cpu(i, sd_sysctl_cpus) {
377 		struct ctl_table *e = cpu_idx[i];
378 
379 		if (e->child)
380 			sd_free_ctl_entry(&e->child);
381 
382 		if (!e->procname) {
383 			snprintf(buf, 32, "cpu%d", i);
384 			e->procname = kstrdup(buf, GFP_KERNEL);
385 		}
386 		e->mode = 0555;
387 		e->child = sd_alloc_ctl_cpu_table(i);
388 
389 		__cpumask_clear_cpu(i, sd_sysctl_cpus);
390 	}
391 
392 	WARN_ON(sd_sysctl_header);
393 	sd_sysctl_header = register_sysctl_table(sd_ctl_root);
394 }
395 
396 void dirty_sched_domain_sysctl(int cpu)
397 {
398 	if (cpumask_available(sd_sysctl_cpus))
399 		__cpumask_set_cpu(cpu, sd_sysctl_cpus);
400 }
401 
402 /* may be called multiple times per register */
403 void unregister_sched_domain_sysctl(void)
404 {
405 	unregister_sysctl_table(sd_sysctl_header);
406 	sd_sysctl_header = NULL;
407 }
408 #endif /* CONFIG_SYSCTL */
409 #endif /* CONFIG_SMP */
410 
411 #ifdef CONFIG_FAIR_GROUP_SCHED
412 static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
413 {
414 	struct sched_entity *se = tg->se[cpu];
415 
416 #define P(F) \
417 	SEQ_printf(m, "  .%-30s: %lld\n", #F, (long long)F)
418 #define P_SCHEDSTAT(F) \
419 	SEQ_printf(m, "  .%-30s: %lld\n", #F, (long long)schedstat_val(F))
420 #define PN(F) \
421 	SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
422 #define PN_SCHEDSTAT(F) \
423 	SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(F)))
424 
425 	if (!se)
426 		return;
427 
428 	PN(se->exec_start);
429 	PN(se->vruntime);
430 	PN(se->sum_exec_runtime);
431 	if (schedstat_enabled()) {
432 		PN_SCHEDSTAT(se->statistics.wait_start);
433 		PN_SCHEDSTAT(se->statistics.sleep_start);
434 		PN_SCHEDSTAT(se->statistics.block_start);
435 		PN_SCHEDSTAT(se->statistics.sleep_max);
436 		PN_SCHEDSTAT(se->statistics.block_max);
437 		PN_SCHEDSTAT(se->statistics.exec_max);
438 		PN_SCHEDSTAT(se->statistics.slice_max);
439 		PN_SCHEDSTAT(se->statistics.wait_max);
440 		PN_SCHEDSTAT(se->statistics.wait_sum);
441 		P_SCHEDSTAT(se->statistics.wait_count);
442 	}
443 	P(se->load.weight);
444 #ifdef CONFIG_SMP
445 	P(se->avg.load_avg);
446 	P(se->avg.util_avg);
447 #endif
448 
449 #undef PN_SCHEDSTAT
450 #undef PN
451 #undef P_SCHEDSTAT
452 #undef P
453 }
454 #endif
455 
456 #ifdef CONFIG_CGROUP_SCHED
457 static char group_path[PATH_MAX];
458 
459 static char *task_group_path(struct task_group *tg)
460 {
461 	if (autogroup_path(tg, group_path, PATH_MAX))
462 		return group_path;
463 
464 	cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
465 	return group_path;
466 }
467 #endif
468 
469 static void
470 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
471 {
472 	if (rq->curr == p)
473 		SEQ_printf(m, ">R");
474 	else
475 		SEQ_printf(m, " %c", task_state_to_char(p));
476 
477 	SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
478 		p->comm, task_pid_nr(p),
479 		SPLIT_NS(p->se.vruntime),
480 		(long long)(p->nvcsw + p->nivcsw),
481 		p->prio);
482 
483 	SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
484 		SPLIT_NS(schedstat_val_or_zero(p->se.statistics.wait_sum)),
485 		SPLIT_NS(p->se.sum_exec_runtime),
486 		SPLIT_NS(schedstat_val_or_zero(p->se.statistics.sum_sleep_runtime)));
487 
488 #ifdef CONFIG_NUMA_BALANCING
489 	SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
490 #endif
491 #ifdef CONFIG_CGROUP_SCHED
492 	SEQ_printf(m, " %s", task_group_path(task_group(p)));
493 #endif
494 
495 	SEQ_printf(m, "\n");
496 }
497 
498 static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
499 {
500 	struct task_struct *g, *p;
501 
502 	SEQ_printf(m,
503 	"\nrunnable tasks:\n"
504 	" S           task   PID         tree-key  switches  prio"
505 	"     wait-time             sum-exec        sum-sleep\n"
506 	"-------------------------------------------------------"
507 	"----------------------------------------------------\n");
508 
509 	rcu_read_lock();
510 	for_each_process_thread(g, p) {
511 		if (task_cpu(p) != rq_cpu)
512 			continue;
513 
514 		print_task(m, rq, p);
515 	}
516 	rcu_read_unlock();
517 }
518 
519 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
520 {
521 	s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
522 		spread, rq0_min_vruntime, spread0;
523 	struct rq *rq = cpu_rq(cpu);
524 	struct sched_entity *last;
525 	unsigned long flags;
526 
527 #ifdef CONFIG_FAIR_GROUP_SCHED
528 	SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
529 #else
530 	SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
531 #endif
532 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "exec_clock",
533 			SPLIT_NS(cfs_rq->exec_clock));
534 
535 	raw_spin_lock_irqsave(&rq->lock, flags);
536 	if (rb_first_cached(&cfs_rq->tasks_timeline))
537 		MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
538 	last = __pick_last_entity(cfs_rq);
539 	if (last)
540 		max_vruntime = last->vruntime;
541 	min_vruntime = cfs_rq->min_vruntime;
542 	rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
543 	raw_spin_unlock_irqrestore(&rq->lock, flags);
544 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "MIN_vruntime",
545 			SPLIT_NS(MIN_vruntime));
546 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "min_vruntime",
547 			SPLIT_NS(min_vruntime));
548 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "max_vruntime",
549 			SPLIT_NS(max_vruntime));
550 	spread = max_vruntime - MIN_vruntime;
551 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread",
552 			SPLIT_NS(spread));
553 	spread0 = min_vruntime - rq0_min_vruntime;
554 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread0",
555 			SPLIT_NS(spread0));
556 	SEQ_printf(m, "  .%-30s: %d\n", "nr_spread_over",
557 			cfs_rq->nr_spread_over);
558 	SEQ_printf(m, "  .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
559 	SEQ_printf(m, "  .%-30s: %ld\n", "load", cfs_rq->load.weight);
560 #ifdef CONFIG_SMP
561 	SEQ_printf(m, "  .%-30s: %lu\n", "load_avg",
562 			cfs_rq->avg.load_avg);
563 	SEQ_printf(m, "  .%-30s: %lu\n", "runnable_load_avg",
564 			cfs_rq->runnable_load_avg);
565 	SEQ_printf(m, "  .%-30s: %lu\n", "util_avg",
566 			cfs_rq->avg.util_avg);
567 	SEQ_printf(m, "  .%-30s: %ld\n", "removed_load_avg",
568 			atomic_long_read(&cfs_rq->removed_load_avg));
569 	SEQ_printf(m, "  .%-30s: %ld\n", "removed_util_avg",
570 			atomic_long_read(&cfs_rq->removed_util_avg));
571 #ifdef CONFIG_FAIR_GROUP_SCHED
572 	SEQ_printf(m, "  .%-30s: %lu\n", "tg_load_avg_contrib",
573 			cfs_rq->tg_load_avg_contrib);
574 	SEQ_printf(m, "  .%-30s: %ld\n", "tg_load_avg",
575 			atomic_long_read(&cfs_rq->tg->load_avg));
576 #endif
577 #endif
578 #ifdef CONFIG_CFS_BANDWIDTH
579 	SEQ_printf(m, "  .%-30s: %d\n", "throttled",
580 			cfs_rq->throttled);
581 	SEQ_printf(m, "  .%-30s: %d\n", "throttle_count",
582 			cfs_rq->throttle_count);
583 #endif
584 
585 #ifdef CONFIG_FAIR_GROUP_SCHED
586 	print_cfs_group_stats(m, cpu, cfs_rq->tg);
587 #endif
588 }
589 
590 void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
591 {
592 #ifdef CONFIG_RT_GROUP_SCHED
593 	SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
594 #else
595 	SEQ_printf(m, "\nrt_rq[%d]:\n", cpu);
596 #endif
597 
598 #define P(x) \
599 	SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
600 #define PU(x) \
601 	SEQ_printf(m, "  .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x))
602 #define PN(x) \
603 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
604 
605 	PU(rt_nr_running);
606 #ifdef CONFIG_SMP
607 	PU(rt_nr_migratory);
608 #endif
609 	P(rt_throttled);
610 	PN(rt_time);
611 	PN(rt_runtime);
612 
613 #undef PN
614 #undef PU
615 #undef P
616 }
617 
618 void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
619 {
620 	struct dl_bw *dl_bw;
621 
622 	SEQ_printf(m, "\ndl_rq[%d]:\n", cpu);
623 
624 #define PU(x) \
625 	SEQ_printf(m, "  .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))
626 
627 	PU(dl_nr_running);
628 #ifdef CONFIG_SMP
629 	PU(dl_nr_migratory);
630 	dl_bw = &cpu_rq(cpu)->rd->dl_bw;
631 #else
632 	dl_bw = &dl_rq->dl_bw;
633 #endif
634 	SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
635 	SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
636 
637 #undef PU
638 }
639 
640 extern __read_mostly int sched_clock_running;
641 
642 static void print_cpu(struct seq_file *m, int cpu)
643 {
644 	struct rq *rq = cpu_rq(cpu);
645 	unsigned long flags;
646 
647 #ifdef CONFIG_X86
648 	{
649 		unsigned int freq = cpu_khz ? : 1;
650 
651 		SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
652 			   cpu, freq / 1000, (freq % 1000));
653 	}
654 #else
655 	SEQ_printf(m, "cpu#%d\n", cpu);
656 #endif
657 
658 #define P(x)								\
659 do {									\
660 	if (sizeof(rq->x) == 4)						\
661 		SEQ_printf(m, "  .%-30s: %ld\n", #x, (long)(rq->x));	\
662 	else								\
663 		SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rq->x));\
664 } while (0)
665 
666 #define PN(x) \
667 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
668 
669 	P(nr_running);
670 	SEQ_printf(m, "  .%-30s: %lu\n", "load",
671 		   rq->load.weight);
672 	P(nr_switches);
673 	P(nr_load_updates);
674 	P(nr_uninterruptible);
675 	PN(next_balance);
676 	SEQ_printf(m, "  .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
677 	PN(clock);
678 	PN(clock_task);
679 	P(cpu_load[0]);
680 	P(cpu_load[1]);
681 	P(cpu_load[2]);
682 	P(cpu_load[3]);
683 	P(cpu_load[4]);
684 #undef P
685 #undef PN
686 
687 #ifdef CONFIG_SMP
688 #define P64(n) SEQ_printf(m, "  .%-30s: %Ld\n", #n, rq->n);
689 	P64(avg_idle);
690 	P64(max_idle_balance_cost);
691 #undef P64
692 #endif
693 
694 #define P(n) SEQ_printf(m, "  .%-30s: %d\n", #n, schedstat_val(rq->n));
695 	if (schedstat_enabled()) {
696 		P(yld_count);
697 		P(sched_count);
698 		P(sched_goidle);
699 		P(ttwu_count);
700 		P(ttwu_local);
701 	}
702 #undef P
703 
704 	spin_lock_irqsave(&sched_debug_lock, flags);
705 	print_cfs_stats(m, cpu);
706 	print_rt_stats(m, cpu);
707 	print_dl_stats(m, cpu);
708 
709 	print_rq(m, rq, cpu);
710 	spin_unlock_irqrestore(&sched_debug_lock, flags);
711 	SEQ_printf(m, "\n");
712 }
713 
714 static const char *sched_tunable_scaling_names[] = {
715 	"none",
716 	"logaritmic",
717 	"linear"
718 };
719 
720 static void sched_debug_header(struct seq_file *m)
721 {
722 	u64 ktime, sched_clk, cpu_clk;
723 	unsigned long flags;
724 
725 	local_irq_save(flags);
726 	ktime = ktime_to_ns(ktime_get());
727 	sched_clk = sched_clock();
728 	cpu_clk = local_clock();
729 	local_irq_restore(flags);
730 
731 	SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
732 		init_utsname()->release,
733 		(int)strcspn(init_utsname()->version, " "),
734 		init_utsname()->version);
735 
736 #define P(x) \
737 	SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
738 #define PN(x) \
739 	SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
740 	PN(ktime);
741 	PN(sched_clk);
742 	PN(cpu_clk);
743 	P(jiffies);
744 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
745 	P(sched_clock_stable());
746 #endif
747 #undef PN
748 #undef P
749 
750 	SEQ_printf(m, "\n");
751 	SEQ_printf(m, "sysctl_sched\n");
752 
753 #define P(x) \
754 	SEQ_printf(m, "  .%-40s: %Ld\n", #x, (long long)(x))
755 #define PN(x) \
756 	SEQ_printf(m, "  .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
757 	PN(sysctl_sched_latency);
758 	PN(sysctl_sched_min_granularity);
759 	PN(sysctl_sched_wakeup_granularity);
760 	P(sysctl_sched_child_runs_first);
761 	P(sysctl_sched_features);
762 #undef PN
763 #undef P
764 
765 	SEQ_printf(m, "  .%-40s: %d (%s)\n",
766 		"sysctl_sched_tunable_scaling",
767 		sysctl_sched_tunable_scaling,
768 		sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
769 	SEQ_printf(m, "\n");
770 }
771 
772 static int sched_debug_show(struct seq_file *m, void *v)
773 {
774 	int cpu = (unsigned long)(v - 2);
775 
776 	if (cpu != -1)
777 		print_cpu(m, cpu);
778 	else
779 		sched_debug_header(m);
780 
781 	return 0;
782 }
783 
784 void sysrq_sched_debug_show(void)
785 {
786 	int cpu;
787 
788 	sched_debug_header(NULL);
789 	for_each_online_cpu(cpu)
790 		print_cpu(NULL, cpu);
791 
792 }
793 
794 /*
795  * This itererator needs some explanation.
796  * It returns 1 for the header position.
797  * This means 2 is cpu 0.
798  * In a hotplugged system some cpus, including cpu 0, may be missing so we have
799  * to use cpumask_* to iterate over the cpus.
800  */
801 static void *sched_debug_start(struct seq_file *file, loff_t *offset)
802 {
803 	unsigned long n = *offset;
804 
805 	if (n == 0)
806 		return (void *) 1;
807 
808 	n--;
809 
810 	if (n > 0)
811 		n = cpumask_next(n - 1, cpu_online_mask);
812 	else
813 		n = cpumask_first(cpu_online_mask);
814 
815 	*offset = n + 1;
816 
817 	if (n < nr_cpu_ids)
818 		return (void *)(unsigned long)(n + 2);
819 	return NULL;
820 }
821 
822 static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
823 {
824 	(*offset)++;
825 	return sched_debug_start(file, offset);
826 }
827 
828 static void sched_debug_stop(struct seq_file *file, void *data)
829 {
830 }
831 
832 static const struct seq_operations sched_debug_sops = {
833 	.start = sched_debug_start,
834 	.next = sched_debug_next,
835 	.stop = sched_debug_stop,
836 	.show = sched_debug_show,
837 };
838 
839 static int sched_debug_release(struct inode *inode, struct file *file)
840 {
841 	seq_release(inode, file);
842 
843 	return 0;
844 }
845 
846 static int sched_debug_open(struct inode *inode, struct file *filp)
847 {
848 	int ret = 0;
849 
850 	ret = seq_open(filp, &sched_debug_sops);
851 
852 	return ret;
853 }
854 
855 static const struct file_operations sched_debug_fops = {
856 	.open		= sched_debug_open,
857 	.read		= seq_read,
858 	.llseek		= seq_lseek,
859 	.release	= sched_debug_release,
860 };
861 
862 static int __init init_sched_debug_procfs(void)
863 {
864 	struct proc_dir_entry *pe;
865 
866 	pe = proc_create("sched_debug", 0444, NULL, &sched_debug_fops);
867 	if (!pe)
868 		return -ENOMEM;
869 	return 0;
870 }
871 
872 __initcall(init_sched_debug_procfs);
873 
874 #define __P(F) \
875 	SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
876 #define P(F) \
877 	SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
878 #define __PN(F) \
879 	SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
880 #define PN(F) \
881 	SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
882 
883 
884 #ifdef CONFIG_NUMA_BALANCING
885 void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
886 		unsigned long tpf, unsigned long gsf, unsigned long gpf)
887 {
888 	SEQ_printf(m, "numa_faults node=%d ", node);
889 	SEQ_printf(m, "task_private=%lu task_shared=%lu ", tsf, tpf);
890 	SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gsf, gpf);
891 }
892 #endif
893 
894 
895 static void sched_show_numa(struct task_struct *p, struct seq_file *m)
896 {
897 #ifdef CONFIG_NUMA_BALANCING
898 	struct mempolicy *pol;
899 
900 	if (p->mm)
901 		P(mm->numa_scan_seq);
902 
903 	task_lock(p);
904 	pol = p->mempolicy;
905 	if (pol && !(pol->flags & MPOL_F_MORON))
906 		pol = NULL;
907 	mpol_get(pol);
908 	task_unlock(p);
909 
910 	P(numa_pages_migrated);
911 	P(numa_preferred_nid);
912 	P(total_numa_faults);
913 	SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
914 			task_node(p), task_numa_group_id(p));
915 	show_numa_stats(p, m);
916 	mpol_put(pol);
917 #endif
918 }
919 
920 void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
921 						  struct seq_file *m)
922 {
923 	unsigned long nr_switches;
924 
925 	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
926 						get_nr_threads(p));
927 	SEQ_printf(m,
928 		"---------------------------------------------------------"
929 		"----------\n");
930 #define __P(F) \
931 	SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
932 #define P(F) \
933 	SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
934 #define P_SCHEDSTAT(F) \
935 	SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)schedstat_val(p->F))
936 #define __PN(F) \
937 	SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
938 #define PN(F) \
939 	SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
940 #define PN_SCHEDSTAT(F) \
941 	SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(p->F)))
942 
943 	PN(se.exec_start);
944 	PN(se.vruntime);
945 	PN(se.sum_exec_runtime);
946 
947 	nr_switches = p->nvcsw + p->nivcsw;
948 
949 	P(se.nr_migrations);
950 
951 	if (schedstat_enabled()) {
952 		u64 avg_atom, avg_per_cpu;
953 
954 		PN_SCHEDSTAT(se.statistics.sum_sleep_runtime);
955 		PN_SCHEDSTAT(se.statistics.wait_start);
956 		PN_SCHEDSTAT(se.statistics.sleep_start);
957 		PN_SCHEDSTAT(se.statistics.block_start);
958 		PN_SCHEDSTAT(se.statistics.sleep_max);
959 		PN_SCHEDSTAT(se.statistics.block_max);
960 		PN_SCHEDSTAT(se.statistics.exec_max);
961 		PN_SCHEDSTAT(se.statistics.slice_max);
962 		PN_SCHEDSTAT(se.statistics.wait_max);
963 		PN_SCHEDSTAT(se.statistics.wait_sum);
964 		P_SCHEDSTAT(se.statistics.wait_count);
965 		PN_SCHEDSTAT(se.statistics.iowait_sum);
966 		P_SCHEDSTAT(se.statistics.iowait_count);
967 		P_SCHEDSTAT(se.statistics.nr_migrations_cold);
968 		P_SCHEDSTAT(se.statistics.nr_failed_migrations_affine);
969 		P_SCHEDSTAT(se.statistics.nr_failed_migrations_running);
970 		P_SCHEDSTAT(se.statistics.nr_failed_migrations_hot);
971 		P_SCHEDSTAT(se.statistics.nr_forced_migrations);
972 		P_SCHEDSTAT(se.statistics.nr_wakeups);
973 		P_SCHEDSTAT(se.statistics.nr_wakeups_sync);
974 		P_SCHEDSTAT(se.statistics.nr_wakeups_migrate);
975 		P_SCHEDSTAT(se.statistics.nr_wakeups_local);
976 		P_SCHEDSTAT(se.statistics.nr_wakeups_remote);
977 		P_SCHEDSTAT(se.statistics.nr_wakeups_affine);
978 		P_SCHEDSTAT(se.statistics.nr_wakeups_affine_attempts);
979 		P_SCHEDSTAT(se.statistics.nr_wakeups_passive);
980 		P_SCHEDSTAT(se.statistics.nr_wakeups_idle);
981 
982 		avg_atom = p->se.sum_exec_runtime;
983 		if (nr_switches)
984 			avg_atom = div64_ul(avg_atom, nr_switches);
985 		else
986 			avg_atom = -1LL;
987 
988 		avg_per_cpu = p->se.sum_exec_runtime;
989 		if (p->se.nr_migrations) {
990 			avg_per_cpu = div64_u64(avg_per_cpu,
991 						p->se.nr_migrations);
992 		} else {
993 			avg_per_cpu = -1LL;
994 		}
995 
996 		__PN(avg_atom);
997 		__PN(avg_per_cpu);
998 	}
999 
1000 	__P(nr_switches);
1001 	SEQ_printf(m, "%-45s:%21Ld\n",
1002 		   "nr_voluntary_switches", (long long)p->nvcsw);
1003 	SEQ_printf(m, "%-45s:%21Ld\n",
1004 		   "nr_involuntary_switches", (long long)p->nivcsw);
1005 
1006 	P(se.load.weight);
1007 #ifdef CONFIG_SMP
1008 	P(se.avg.load_sum);
1009 	P(se.avg.util_sum);
1010 	P(se.avg.load_avg);
1011 	P(se.avg.util_avg);
1012 	P(se.avg.last_update_time);
1013 #endif
1014 	P(policy);
1015 	P(prio);
1016 	if (p->policy == SCHED_DEADLINE) {
1017 		P(dl.runtime);
1018 		P(dl.deadline);
1019 	}
1020 #undef PN_SCHEDSTAT
1021 #undef PN
1022 #undef __PN
1023 #undef P_SCHEDSTAT
1024 #undef P
1025 #undef __P
1026 
1027 	{
1028 		unsigned int this_cpu = raw_smp_processor_id();
1029 		u64 t0, t1;
1030 
1031 		t0 = cpu_clock(this_cpu);
1032 		t1 = cpu_clock(this_cpu);
1033 		SEQ_printf(m, "%-45s:%21Ld\n",
1034 			   "clock-delta", (long long)(t1-t0));
1035 	}
1036 
1037 	sched_show_numa(p, m);
1038 }
1039 
1040 void proc_sched_set_task(struct task_struct *p)
1041 {
1042 #ifdef CONFIG_SCHEDSTATS
1043 	memset(&p->se.statistics, 0, sizeof(p->se.statistics));
1044 #endif
1045 }
1046