xref: /openbmc/linux/kernel/sched/debug.c (revision 293d5b43)
1 /*
2  * kernel/sched/debug.c
3  *
4  * Print the CFS rbtree
5  *
6  * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 
13 #include <linux/proc_fs.h>
14 #include <linux/sched.h>
15 #include <linux/seq_file.h>
16 #include <linux/kallsyms.h>
17 #include <linux/utsname.h>
18 #include <linux/mempolicy.h>
19 #include <linux/debugfs.h>
20 
21 #include "sched.h"
22 
23 static DEFINE_SPINLOCK(sched_debug_lock);
24 
25 /*
26  * This allows printing both to /proc/sched_debug and
27  * to the console
28  */
29 #define SEQ_printf(m, x...)			\
30  do {						\
31 	if (m)					\
32 		seq_printf(m, x);		\
33 	else					\
34 		printk(x);			\
35  } while (0)
36 
37 /*
38  * Ease the printing of nsec fields:
39  */
40 static long long nsec_high(unsigned long long nsec)
41 {
42 	if ((long long)nsec < 0) {
43 		nsec = -nsec;
44 		do_div(nsec, 1000000);
45 		return -nsec;
46 	}
47 	do_div(nsec, 1000000);
48 
49 	return nsec;
50 }
51 
52 static unsigned long nsec_low(unsigned long long nsec)
53 {
54 	if ((long long)nsec < 0)
55 		nsec = -nsec;
56 
57 	return do_div(nsec, 1000000);
58 }
59 
60 #define SPLIT_NS(x) nsec_high(x), nsec_low(x)
61 
62 #define SCHED_FEAT(name, enabled)	\
63 	#name ,
64 
65 static const char * const sched_feat_names[] = {
66 #include "features.h"
67 };
68 
69 #undef SCHED_FEAT
70 
71 static int sched_feat_show(struct seq_file *m, void *v)
72 {
73 	int i;
74 
75 	for (i = 0; i < __SCHED_FEAT_NR; i++) {
76 		if (!(sysctl_sched_features & (1UL << i)))
77 			seq_puts(m, "NO_");
78 		seq_printf(m, "%s ", sched_feat_names[i]);
79 	}
80 	seq_puts(m, "\n");
81 
82 	return 0;
83 }
84 
85 #ifdef HAVE_JUMP_LABEL
86 
87 #define jump_label_key__true  STATIC_KEY_INIT_TRUE
88 #define jump_label_key__false STATIC_KEY_INIT_FALSE
89 
90 #define SCHED_FEAT(name, enabled)	\
91 	jump_label_key__##enabled ,
92 
93 struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
94 #include "features.h"
95 };
96 
97 #undef SCHED_FEAT
98 
99 static void sched_feat_disable(int i)
100 {
101 	static_key_disable(&sched_feat_keys[i]);
102 }
103 
104 static void sched_feat_enable(int i)
105 {
106 	static_key_enable(&sched_feat_keys[i]);
107 }
108 #else
109 static void sched_feat_disable(int i) { };
110 static void sched_feat_enable(int i) { };
111 #endif /* HAVE_JUMP_LABEL */
112 
113 static int sched_feat_set(char *cmp)
114 {
115 	int i;
116 	int neg = 0;
117 
118 	if (strncmp(cmp, "NO_", 3) == 0) {
119 		neg = 1;
120 		cmp += 3;
121 	}
122 
123 	for (i = 0; i < __SCHED_FEAT_NR; i++) {
124 		if (strcmp(cmp, sched_feat_names[i]) == 0) {
125 			if (neg) {
126 				sysctl_sched_features &= ~(1UL << i);
127 				sched_feat_disable(i);
128 			} else {
129 				sysctl_sched_features |= (1UL << i);
130 				sched_feat_enable(i);
131 			}
132 			break;
133 		}
134 	}
135 
136 	return i;
137 }
138 
139 static ssize_t
140 sched_feat_write(struct file *filp, const char __user *ubuf,
141 		size_t cnt, loff_t *ppos)
142 {
143 	char buf[64];
144 	char *cmp;
145 	int i;
146 	struct inode *inode;
147 
148 	if (cnt > 63)
149 		cnt = 63;
150 
151 	if (copy_from_user(&buf, ubuf, cnt))
152 		return -EFAULT;
153 
154 	buf[cnt] = 0;
155 	cmp = strstrip(buf);
156 
157 	/* Ensure the static_key remains in a consistent state */
158 	inode = file_inode(filp);
159 	inode_lock(inode);
160 	i = sched_feat_set(cmp);
161 	inode_unlock(inode);
162 	if (i == __SCHED_FEAT_NR)
163 		return -EINVAL;
164 
165 	*ppos += cnt;
166 
167 	return cnt;
168 }
169 
170 static int sched_feat_open(struct inode *inode, struct file *filp)
171 {
172 	return single_open(filp, sched_feat_show, NULL);
173 }
174 
175 static const struct file_operations sched_feat_fops = {
176 	.open		= sched_feat_open,
177 	.write		= sched_feat_write,
178 	.read		= seq_read,
179 	.llseek		= seq_lseek,
180 	.release	= single_release,
181 };
182 
183 static __init int sched_init_debug(void)
184 {
185 	debugfs_create_file("sched_features", 0644, NULL, NULL,
186 			&sched_feat_fops);
187 
188 	return 0;
189 }
190 late_initcall(sched_init_debug);
191 
192 #ifdef CONFIG_SMP
193 
194 #ifdef CONFIG_SYSCTL
195 
196 static struct ctl_table sd_ctl_dir[] = {
197 	{
198 		.procname	= "sched_domain",
199 		.mode		= 0555,
200 	},
201 	{}
202 };
203 
204 static struct ctl_table sd_ctl_root[] = {
205 	{
206 		.procname	= "kernel",
207 		.mode		= 0555,
208 		.child		= sd_ctl_dir,
209 	},
210 	{}
211 };
212 
213 static struct ctl_table *sd_alloc_ctl_entry(int n)
214 {
215 	struct ctl_table *entry =
216 		kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
217 
218 	return entry;
219 }
220 
221 static void sd_free_ctl_entry(struct ctl_table **tablep)
222 {
223 	struct ctl_table *entry;
224 
225 	/*
226 	 * In the intermediate directories, both the child directory and
227 	 * procname are dynamically allocated and could fail but the mode
228 	 * will always be set. In the lowest directory the names are
229 	 * static strings and all have proc handlers.
230 	 */
231 	for (entry = *tablep; entry->mode; entry++) {
232 		if (entry->child)
233 			sd_free_ctl_entry(&entry->child);
234 		if (entry->proc_handler == NULL)
235 			kfree(entry->procname);
236 	}
237 
238 	kfree(*tablep);
239 	*tablep = NULL;
240 }
241 
242 static int min_load_idx = 0;
243 static int max_load_idx = CPU_LOAD_IDX_MAX-1;
244 
245 static void
246 set_table_entry(struct ctl_table *entry,
247 		const char *procname, void *data, int maxlen,
248 		umode_t mode, proc_handler *proc_handler,
249 		bool load_idx)
250 {
251 	entry->procname = procname;
252 	entry->data = data;
253 	entry->maxlen = maxlen;
254 	entry->mode = mode;
255 	entry->proc_handler = proc_handler;
256 
257 	if (load_idx) {
258 		entry->extra1 = &min_load_idx;
259 		entry->extra2 = &max_load_idx;
260 	}
261 }
262 
263 static struct ctl_table *
264 sd_alloc_ctl_domain_table(struct sched_domain *sd)
265 {
266 	struct ctl_table *table = sd_alloc_ctl_entry(14);
267 
268 	if (table == NULL)
269 		return NULL;
270 
271 	set_table_entry(&table[0], "min_interval", &sd->min_interval,
272 		sizeof(long), 0644, proc_doulongvec_minmax, false);
273 	set_table_entry(&table[1], "max_interval", &sd->max_interval,
274 		sizeof(long), 0644, proc_doulongvec_minmax, false);
275 	set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
276 		sizeof(int), 0644, proc_dointvec_minmax, true);
277 	set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
278 		sizeof(int), 0644, proc_dointvec_minmax, true);
279 	set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
280 		sizeof(int), 0644, proc_dointvec_minmax, true);
281 	set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
282 		sizeof(int), 0644, proc_dointvec_minmax, true);
283 	set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
284 		sizeof(int), 0644, proc_dointvec_minmax, true);
285 	set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
286 		sizeof(int), 0644, proc_dointvec_minmax, false);
287 	set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
288 		sizeof(int), 0644, proc_dointvec_minmax, false);
289 	set_table_entry(&table[9], "cache_nice_tries",
290 		&sd->cache_nice_tries,
291 		sizeof(int), 0644, proc_dointvec_minmax, false);
292 	set_table_entry(&table[10], "flags", &sd->flags,
293 		sizeof(int), 0644, proc_dointvec_minmax, false);
294 	set_table_entry(&table[11], "max_newidle_lb_cost",
295 		&sd->max_newidle_lb_cost,
296 		sizeof(long), 0644, proc_doulongvec_minmax, false);
297 	set_table_entry(&table[12], "name", sd->name,
298 		CORENAME_MAX_SIZE, 0444, proc_dostring, false);
299 	/* &table[13] is terminator */
300 
301 	return table;
302 }
303 
304 static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
305 {
306 	struct ctl_table *entry, *table;
307 	struct sched_domain *sd;
308 	int domain_num = 0, i;
309 	char buf[32];
310 
311 	for_each_domain(cpu, sd)
312 		domain_num++;
313 	entry = table = sd_alloc_ctl_entry(domain_num + 1);
314 	if (table == NULL)
315 		return NULL;
316 
317 	i = 0;
318 	for_each_domain(cpu, sd) {
319 		snprintf(buf, 32, "domain%d", i);
320 		entry->procname = kstrdup(buf, GFP_KERNEL);
321 		entry->mode = 0555;
322 		entry->child = sd_alloc_ctl_domain_table(sd);
323 		entry++;
324 		i++;
325 	}
326 	return table;
327 }
328 
329 static struct ctl_table_header *sd_sysctl_header;
330 void register_sched_domain_sysctl(void)
331 {
332 	int i, cpu_num = num_possible_cpus();
333 	struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
334 	char buf[32];
335 
336 	WARN_ON(sd_ctl_dir[0].child);
337 	sd_ctl_dir[0].child = entry;
338 
339 	if (entry == NULL)
340 		return;
341 
342 	for_each_possible_cpu(i) {
343 		snprintf(buf, 32, "cpu%d", i);
344 		entry->procname = kstrdup(buf, GFP_KERNEL);
345 		entry->mode = 0555;
346 		entry->child = sd_alloc_ctl_cpu_table(i);
347 		entry++;
348 	}
349 
350 	WARN_ON(sd_sysctl_header);
351 	sd_sysctl_header = register_sysctl_table(sd_ctl_root);
352 }
353 
354 /* may be called multiple times per register */
355 void unregister_sched_domain_sysctl(void)
356 {
357 	unregister_sysctl_table(sd_sysctl_header);
358 	sd_sysctl_header = NULL;
359 	if (sd_ctl_dir[0].child)
360 		sd_free_ctl_entry(&sd_ctl_dir[0].child);
361 }
362 #endif /* CONFIG_SYSCTL */
363 #endif /* CONFIG_SMP */
364 
365 #ifdef CONFIG_FAIR_GROUP_SCHED
366 static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
367 {
368 	struct sched_entity *se = tg->se[cpu];
369 
370 #define P(F) \
371 	SEQ_printf(m, "  .%-30s: %lld\n", #F, (long long)F)
372 #define PN(F) \
373 	SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
374 
375 	if (!se)
376 		return;
377 
378 	PN(se->exec_start);
379 	PN(se->vruntime);
380 	PN(se->sum_exec_runtime);
381 #ifdef CONFIG_SCHEDSTATS
382 	if (schedstat_enabled()) {
383 		PN(se->statistics.wait_start);
384 		PN(se->statistics.sleep_start);
385 		PN(se->statistics.block_start);
386 		PN(se->statistics.sleep_max);
387 		PN(se->statistics.block_max);
388 		PN(se->statistics.exec_max);
389 		PN(se->statistics.slice_max);
390 		PN(se->statistics.wait_max);
391 		PN(se->statistics.wait_sum);
392 		P(se->statistics.wait_count);
393 	}
394 #endif
395 	P(se->load.weight);
396 #ifdef CONFIG_SMP
397 	P(se->avg.load_avg);
398 	P(se->avg.util_avg);
399 #endif
400 #undef PN
401 #undef P
402 }
403 #endif
404 
405 #ifdef CONFIG_CGROUP_SCHED
406 static char group_path[PATH_MAX];
407 
408 static char *task_group_path(struct task_group *tg)
409 {
410 	if (autogroup_path(tg, group_path, PATH_MAX))
411 		return group_path;
412 
413 	return cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
414 }
415 #endif
416 
417 static void
418 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
419 {
420 	if (rq->curr == p)
421 		SEQ_printf(m, "R");
422 	else
423 		SEQ_printf(m, " ");
424 
425 	SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
426 		p->comm, task_pid_nr(p),
427 		SPLIT_NS(p->se.vruntime),
428 		(long long)(p->nvcsw + p->nivcsw),
429 		p->prio);
430 
431 	SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
432 		SPLIT_NS(schedstat_val(p, se.statistics.wait_sum)),
433 		SPLIT_NS(p->se.sum_exec_runtime),
434 		SPLIT_NS(schedstat_val(p, se.statistics.sum_sleep_runtime)));
435 
436 #ifdef CONFIG_NUMA_BALANCING
437 	SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
438 #endif
439 #ifdef CONFIG_CGROUP_SCHED
440 	SEQ_printf(m, " %s", task_group_path(task_group(p)));
441 #endif
442 
443 	SEQ_printf(m, "\n");
444 }
445 
446 static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
447 {
448 	struct task_struct *g, *p;
449 
450 	SEQ_printf(m,
451 	"\nrunnable tasks:\n"
452 	"            task   PID         tree-key  switches  prio"
453 	"     wait-time             sum-exec        sum-sleep\n"
454 	"------------------------------------------------------"
455 	"----------------------------------------------------\n");
456 
457 	rcu_read_lock();
458 	for_each_process_thread(g, p) {
459 		if (task_cpu(p) != rq_cpu)
460 			continue;
461 
462 		print_task(m, rq, p);
463 	}
464 	rcu_read_unlock();
465 }
466 
467 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
468 {
469 	s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
470 		spread, rq0_min_vruntime, spread0;
471 	struct rq *rq = cpu_rq(cpu);
472 	struct sched_entity *last;
473 	unsigned long flags;
474 
475 #ifdef CONFIG_FAIR_GROUP_SCHED
476 	SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
477 #else
478 	SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
479 #endif
480 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "exec_clock",
481 			SPLIT_NS(cfs_rq->exec_clock));
482 
483 	raw_spin_lock_irqsave(&rq->lock, flags);
484 	if (cfs_rq->rb_leftmost)
485 		MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
486 	last = __pick_last_entity(cfs_rq);
487 	if (last)
488 		max_vruntime = last->vruntime;
489 	min_vruntime = cfs_rq->min_vruntime;
490 	rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
491 	raw_spin_unlock_irqrestore(&rq->lock, flags);
492 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "MIN_vruntime",
493 			SPLIT_NS(MIN_vruntime));
494 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "min_vruntime",
495 			SPLIT_NS(min_vruntime));
496 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "max_vruntime",
497 			SPLIT_NS(max_vruntime));
498 	spread = max_vruntime - MIN_vruntime;
499 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread",
500 			SPLIT_NS(spread));
501 	spread0 = min_vruntime - rq0_min_vruntime;
502 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread0",
503 			SPLIT_NS(spread0));
504 	SEQ_printf(m, "  .%-30s: %d\n", "nr_spread_over",
505 			cfs_rq->nr_spread_over);
506 	SEQ_printf(m, "  .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
507 	SEQ_printf(m, "  .%-30s: %ld\n", "load", cfs_rq->load.weight);
508 #ifdef CONFIG_SMP
509 	SEQ_printf(m, "  .%-30s: %lu\n", "load_avg",
510 			cfs_rq->avg.load_avg);
511 	SEQ_printf(m, "  .%-30s: %lu\n", "runnable_load_avg",
512 			cfs_rq->runnable_load_avg);
513 	SEQ_printf(m, "  .%-30s: %lu\n", "util_avg",
514 			cfs_rq->avg.util_avg);
515 	SEQ_printf(m, "  .%-30s: %ld\n", "removed_load_avg",
516 			atomic_long_read(&cfs_rq->removed_load_avg));
517 	SEQ_printf(m, "  .%-30s: %ld\n", "removed_util_avg",
518 			atomic_long_read(&cfs_rq->removed_util_avg));
519 #ifdef CONFIG_FAIR_GROUP_SCHED
520 	SEQ_printf(m, "  .%-30s: %lu\n", "tg_load_avg_contrib",
521 			cfs_rq->tg_load_avg_contrib);
522 	SEQ_printf(m, "  .%-30s: %ld\n", "tg_load_avg",
523 			atomic_long_read(&cfs_rq->tg->load_avg));
524 #endif
525 #endif
526 #ifdef CONFIG_CFS_BANDWIDTH
527 	SEQ_printf(m, "  .%-30s: %d\n", "throttled",
528 			cfs_rq->throttled);
529 	SEQ_printf(m, "  .%-30s: %d\n", "throttle_count",
530 			cfs_rq->throttle_count);
531 #endif
532 
533 #ifdef CONFIG_FAIR_GROUP_SCHED
534 	print_cfs_group_stats(m, cpu, cfs_rq->tg);
535 #endif
536 }
537 
538 void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
539 {
540 #ifdef CONFIG_RT_GROUP_SCHED
541 	SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
542 #else
543 	SEQ_printf(m, "\nrt_rq[%d]:\n", cpu);
544 #endif
545 
546 #define P(x) \
547 	SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
548 #define PN(x) \
549 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
550 
551 	P(rt_nr_running);
552 	P(rt_throttled);
553 	PN(rt_time);
554 	PN(rt_runtime);
555 
556 #undef PN
557 #undef P
558 }
559 
560 void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
561 {
562 	struct dl_bw *dl_bw;
563 
564 	SEQ_printf(m, "\ndl_rq[%d]:\n", cpu);
565 	SEQ_printf(m, "  .%-30s: %ld\n", "dl_nr_running", dl_rq->dl_nr_running);
566 #ifdef CONFIG_SMP
567 	dl_bw = &cpu_rq(cpu)->rd->dl_bw;
568 #else
569 	dl_bw = &dl_rq->dl_bw;
570 #endif
571 	SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
572 	SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
573 }
574 
575 extern __read_mostly int sched_clock_running;
576 
577 static void print_cpu(struct seq_file *m, int cpu)
578 {
579 	struct rq *rq = cpu_rq(cpu);
580 	unsigned long flags;
581 
582 #ifdef CONFIG_X86
583 	{
584 		unsigned int freq = cpu_khz ? : 1;
585 
586 		SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
587 			   cpu, freq / 1000, (freq % 1000));
588 	}
589 #else
590 	SEQ_printf(m, "cpu#%d\n", cpu);
591 #endif
592 
593 #define P(x)								\
594 do {									\
595 	if (sizeof(rq->x) == 4)						\
596 		SEQ_printf(m, "  .%-30s: %ld\n", #x, (long)(rq->x));	\
597 	else								\
598 		SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rq->x));\
599 } while (0)
600 
601 #define PN(x) \
602 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
603 
604 	P(nr_running);
605 	SEQ_printf(m, "  .%-30s: %lu\n", "load",
606 		   rq->load.weight);
607 	P(nr_switches);
608 	P(nr_load_updates);
609 	P(nr_uninterruptible);
610 	PN(next_balance);
611 	SEQ_printf(m, "  .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
612 	PN(clock);
613 	PN(clock_task);
614 	P(cpu_load[0]);
615 	P(cpu_load[1]);
616 	P(cpu_load[2]);
617 	P(cpu_load[3]);
618 	P(cpu_load[4]);
619 #undef P
620 #undef PN
621 
622 #ifdef CONFIG_SMP
623 #define P64(n) SEQ_printf(m, "  .%-30s: %Ld\n", #n, rq->n);
624 	P64(avg_idle);
625 	P64(max_idle_balance_cost);
626 #undef P64
627 #endif
628 
629 #ifdef CONFIG_SCHEDSTATS
630 #define P(n) SEQ_printf(m, "  .%-30s: %d\n", #n, rq->n);
631 
632 	if (schedstat_enabled()) {
633 		P(yld_count);
634 		P(sched_count);
635 		P(sched_goidle);
636 		P(ttwu_count);
637 		P(ttwu_local);
638 	}
639 
640 #undef P
641 #endif
642 	spin_lock_irqsave(&sched_debug_lock, flags);
643 	print_cfs_stats(m, cpu);
644 	print_rt_stats(m, cpu);
645 	print_dl_stats(m, cpu);
646 
647 	print_rq(m, rq, cpu);
648 	spin_unlock_irqrestore(&sched_debug_lock, flags);
649 	SEQ_printf(m, "\n");
650 }
651 
652 static const char *sched_tunable_scaling_names[] = {
653 	"none",
654 	"logaritmic",
655 	"linear"
656 };
657 
658 static void sched_debug_header(struct seq_file *m)
659 {
660 	u64 ktime, sched_clk, cpu_clk;
661 	unsigned long flags;
662 
663 	local_irq_save(flags);
664 	ktime = ktime_to_ns(ktime_get());
665 	sched_clk = sched_clock();
666 	cpu_clk = local_clock();
667 	local_irq_restore(flags);
668 
669 	SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
670 		init_utsname()->release,
671 		(int)strcspn(init_utsname()->version, " "),
672 		init_utsname()->version);
673 
674 #define P(x) \
675 	SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
676 #define PN(x) \
677 	SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
678 	PN(ktime);
679 	PN(sched_clk);
680 	PN(cpu_clk);
681 	P(jiffies);
682 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
683 	P(sched_clock_stable());
684 #endif
685 #undef PN
686 #undef P
687 
688 	SEQ_printf(m, "\n");
689 	SEQ_printf(m, "sysctl_sched\n");
690 
691 #define P(x) \
692 	SEQ_printf(m, "  .%-40s: %Ld\n", #x, (long long)(x))
693 #define PN(x) \
694 	SEQ_printf(m, "  .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
695 	PN(sysctl_sched_latency);
696 	PN(sysctl_sched_min_granularity);
697 	PN(sysctl_sched_wakeup_granularity);
698 	P(sysctl_sched_child_runs_first);
699 	P(sysctl_sched_features);
700 #undef PN
701 #undef P
702 
703 	SEQ_printf(m, "  .%-40s: %d (%s)\n",
704 		"sysctl_sched_tunable_scaling",
705 		sysctl_sched_tunable_scaling,
706 		sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
707 	SEQ_printf(m, "\n");
708 }
709 
710 static int sched_debug_show(struct seq_file *m, void *v)
711 {
712 	int cpu = (unsigned long)(v - 2);
713 
714 	if (cpu != -1)
715 		print_cpu(m, cpu);
716 	else
717 		sched_debug_header(m);
718 
719 	return 0;
720 }
721 
722 void sysrq_sched_debug_show(void)
723 {
724 	int cpu;
725 
726 	sched_debug_header(NULL);
727 	for_each_online_cpu(cpu)
728 		print_cpu(NULL, cpu);
729 
730 }
731 
732 /*
733  * This itererator needs some explanation.
734  * It returns 1 for the header position.
735  * This means 2 is cpu 0.
736  * In a hotplugged system some cpus, including cpu 0, may be missing so we have
737  * to use cpumask_* to iterate over the cpus.
738  */
739 static void *sched_debug_start(struct seq_file *file, loff_t *offset)
740 {
741 	unsigned long n = *offset;
742 
743 	if (n == 0)
744 		return (void *) 1;
745 
746 	n--;
747 
748 	if (n > 0)
749 		n = cpumask_next(n - 1, cpu_online_mask);
750 	else
751 		n = cpumask_first(cpu_online_mask);
752 
753 	*offset = n + 1;
754 
755 	if (n < nr_cpu_ids)
756 		return (void *)(unsigned long)(n + 2);
757 	return NULL;
758 }
759 
760 static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
761 {
762 	(*offset)++;
763 	return sched_debug_start(file, offset);
764 }
765 
766 static void sched_debug_stop(struct seq_file *file, void *data)
767 {
768 }
769 
770 static const struct seq_operations sched_debug_sops = {
771 	.start = sched_debug_start,
772 	.next = sched_debug_next,
773 	.stop = sched_debug_stop,
774 	.show = sched_debug_show,
775 };
776 
777 static int sched_debug_release(struct inode *inode, struct file *file)
778 {
779 	seq_release(inode, file);
780 
781 	return 0;
782 }
783 
784 static int sched_debug_open(struct inode *inode, struct file *filp)
785 {
786 	int ret = 0;
787 
788 	ret = seq_open(filp, &sched_debug_sops);
789 
790 	return ret;
791 }
792 
793 static const struct file_operations sched_debug_fops = {
794 	.open		= sched_debug_open,
795 	.read		= seq_read,
796 	.llseek		= seq_lseek,
797 	.release	= sched_debug_release,
798 };
799 
800 static int __init init_sched_debug_procfs(void)
801 {
802 	struct proc_dir_entry *pe;
803 
804 	pe = proc_create("sched_debug", 0444, NULL, &sched_debug_fops);
805 	if (!pe)
806 		return -ENOMEM;
807 	return 0;
808 }
809 
810 __initcall(init_sched_debug_procfs);
811 
812 #define __P(F) \
813 	SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
814 #define P(F) \
815 	SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
816 #define __PN(F) \
817 	SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
818 #define PN(F) \
819 	SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
820 
821 
822 #ifdef CONFIG_NUMA_BALANCING
823 void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
824 		unsigned long tpf, unsigned long gsf, unsigned long gpf)
825 {
826 	SEQ_printf(m, "numa_faults node=%d ", node);
827 	SEQ_printf(m, "task_private=%lu task_shared=%lu ", tsf, tpf);
828 	SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gsf, gpf);
829 }
830 #endif
831 
832 
833 static void sched_show_numa(struct task_struct *p, struct seq_file *m)
834 {
835 #ifdef CONFIG_NUMA_BALANCING
836 	struct mempolicy *pol;
837 
838 	if (p->mm)
839 		P(mm->numa_scan_seq);
840 
841 	task_lock(p);
842 	pol = p->mempolicy;
843 	if (pol && !(pol->flags & MPOL_F_MORON))
844 		pol = NULL;
845 	mpol_get(pol);
846 	task_unlock(p);
847 
848 	P(numa_pages_migrated);
849 	P(numa_preferred_nid);
850 	P(total_numa_faults);
851 	SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
852 			task_node(p), task_numa_group_id(p));
853 	show_numa_stats(p, m);
854 	mpol_put(pol);
855 #endif
856 }
857 
858 void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
859 {
860 	unsigned long nr_switches;
861 
862 	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr(p),
863 						get_nr_threads(p));
864 	SEQ_printf(m,
865 		"---------------------------------------------------------"
866 		"----------\n");
867 #define __P(F) \
868 	SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
869 #define P(F) \
870 	SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
871 #define __PN(F) \
872 	SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
873 #define PN(F) \
874 	SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
875 
876 	PN(se.exec_start);
877 	PN(se.vruntime);
878 	PN(se.sum_exec_runtime);
879 
880 	nr_switches = p->nvcsw + p->nivcsw;
881 
882 	P(se.nr_migrations);
883 
884 #ifdef CONFIG_SCHEDSTATS
885 	if (schedstat_enabled()) {
886 		u64 avg_atom, avg_per_cpu;
887 
888 		PN(se.statistics.sum_sleep_runtime);
889 		PN(se.statistics.wait_start);
890 		PN(se.statistics.sleep_start);
891 		PN(se.statistics.block_start);
892 		PN(se.statistics.sleep_max);
893 		PN(se.statistics.block_max);
894 		PN(se.statistics.exec_max);
895 		PN(se.statistics.slice_max);
896 		PN(se.statistics.wait_max);
897 		PN(se.statistics.wait_sum);
898 		P(se.statistics.wait_count);
899 		PN(se.statistics.iowait_sum);
900 		P(se.statistics.iowait_count);
901 		P(se.statistics.nr_migrations_cold);
902 		P(se.statistics.nr_failed_migrations_affine);
903 		P(se.statistics.nr_failed_migrations_running);
904 		P(se.statistics.nr_failed_migrations_hot);
905 		P(se.statistics.nr_forced_migrations);
906 		P(se.statistics.nr_wakeups);
907 		P(se.statistics.nr_wakeups_sync);
908 		P(se.statistics.nr_wakeups_migrate);
909 		P(se.statistics.nr_wakeups_local);
910 		P(se.statistics.nr_wakeups_remote);
911 		P(se.statistics.nr_wakeups_affine);
912 		P(se.statistics.nr_wakeups_affine_attempts);
913 		P(se.statistics.nr_wakeups_passive);
914 		P(se.statistics.nr_wakeups_idle);
915 
916 		avg_atom = p->se.sum_exec_runtime;
917 		if (nr_switches)
918 			avg_atom = div64_ul(avg_atom, nr_switches);
919 		else
920 			avg_atom = -1LL;
921 
922 		avg_per_cpu = p->se.sum_exec_runtime;
923 		if (p->se.nr_migrations) {
924 			avg_per_cpu = div64_u64(avg_per_cpu,
925 						p->se.nr_migrations);
926 		} else {
927 			avg_per_cpu = -1LL;
928 		}
929 
930 		__PN(avg_atom);
931 		__PN(avg_per_cpu);
932 	}
933 #endif
934 	__P(nr_switches);
935 	SEQ_printf(m, "%-45s:%21Ld\n",
936 		   "nr_voluntary_switches", (long long)p->nvcsw);
937 	SEQ_printf(m, "%-45s:%21Ld\n",
938 		   "nr_involuntary_switches", (long long)p->nivcsw);
939 
940 	P(se.load.weight);
941 #ifdef CONFIG_SMP
942 	P(se.avg.load_sum);
943 	P(se.avg.util_sum);
944 	P(se.avg.load_avg);
945 	P(se.avg.util_avg);
946 	P(se.avg.last_update_time);
947 #endif
948 	P(policy);
949 	P(prio);
950 #undef PN
951 #undef __PN
952 #undef P
953 #undef __P
954 
955 	{
956 		unsigned int this_cpu = raw_smp_processor_id();
957 		u64 t0, t1;
958 
959 		t0 = cpu_clock(this_cpu);
960 		t1 = cpu_clock(this_cpu);
961 		SEQ_printf(m, "%-45s:%21Ld\n",
962 			   "clock-delta", (long long)(t1-t0));
963 	}
964 
965 	sched_show_numa(p, m);
966 }
967 
968 void proc_sched_set_task(struct task_struct *p)
969 {
970 #ifdef CONFIG_SCHEDSTATS
971 	memset(&p->se.statistics, 0, sizeof(p->se.statistics));
972 #endif
973 }
974