xref: /openbmc/linux/kernel/sched/debug.c (revision 36db6e8484ed455bbb320d89a119378897ae991c)
1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2391e43daSPeter Zijlstra /*
3391e43daSPeter Zijlstra  * kernel/sched/debug.c
4391e43daSPeter Zijlstra  *
5325ea10cSIngo Molnar  * Print the CFS rbtree and other debugging details
6391e43daSPeter Zijlstra  *
7391e43daSPeter Zijlstra  * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
8391e43daSPeter Zijlstra  */
9391e43daSPeter Zijlstra 
10391e43daSPeter Zijlstra /*
11391e43daSPeter Zijlstra  * This allows printing both to /proc/sched_debug and
12391e43daSPeter Zijlstra  * to the console
13391e43daSPeter Zijlstra  */
14391e43daSPeter Zijlstra #define SEQ_printf(m, x...)			\
15391e43daSPeter Zijlstra  do {						\
16391e43daSPeter Zijlstra 	if (m)					\
17391e43daSPeter Zijlstra 		seq_printf(m, x);		\
18391e43daSPeter Zijlstra 	else					\
19a8c024cdSJoe Lawrence 		pr_cont(x);			\
20391e43daSPeter Zijlstra  } while (0)
21391e43daSPeter Zijlstra 
22391e43daSPeter Zijlstra /*
23391e43daSPeter Zijlstra  * Ease the printing of nsec fields:
24391e43daSPeter Zijlstra  */
nsec_high(unsigned long long nsec)25391e43daSPeter Zijlstra static long long nsec_high(unsigned long long nsec)
26391e43daSPeter Zijlstra {
27391e43daSPeter Zijlstra 	if ((long long)nsec < 0) {
28391e43daSPeter Zijlstra 		nsec = -nsec;
29391e43daSPeter Zijlstra 		do_div(nsec, 1000000);
30391e43daSPeter Zijlstra 		return -nsec;
31391e43daSPeter Zijlstra 	}
32391e43daSPeter Zijlstra 	do_div(nsec, 1000000);
33391e43daSPeter Zijlstra 
34391e43daSPeter Zijlstra 	return nsec;
35391e43daSPeter Zijlstra }
36391e43daSPeter Zijlstra 
nsec_low(unsigned long long nsec)37391e43daSPeter Zijlstra static unsigned long nsec_low(unsigned long long nsec)
38391e43daSPeter Zijlstra {
39391e43daSPeter Zijlstra 	if ((long long)nsec < 0)
40391e43daSPeter Zijlstra 		nsec = -nsec;
41391e43daSPeter Zijlstra 
42391e43daSPeter Zijlstra 	return do_div(nsec, 1000000);
43391e43daSPeter Zijlstra }
44391e43daSPeter Zijlstra 
45391e43daSPeter Zijlstra #define SPLIT_NS(x) nsec_high(x), nsec_low(x)
46391e43daSPeter Zijlstra 
47d6ca41d7SSteven Rostedt (Red Hat) #define SCHED_FEAT(name, enabled)	\
48d6ca41d7SSteven Rostedt (Red Hat) 	#name ,
49d6ca41d7SSteven Rostedt (Red Hat) 
50d6ca41d7SSteven Rostedt (Red Hat) static const char * const sched_feat_names[] = {
51d6ca41d7SSteven Rostedt (Red Hat) #include "features.h"
52d6ca41d7SSteven Rostedt (Red Hat) };
53d6ca41d7SSteven Rostedt (Red Hat) 
54d6ca41d7SSteven Rostedt (Red Hat) #undef SCHED_FEAT
55d6ca41d7SSteven Rostedt (Red Hat) 
sched_feat_show(struct seq_file * m,void * v)56d6ca41d7SSteven Rostedt (Red Hat) static int sched_feat_show(struct seq_file *m, void *v)
57d6ca41d7SSteven Rostedt (Red Hat) {
58d6ca41d7SSteven Rostedt (Red Hat) 	int i;
59d6ca41d7SSteven Rostedt (Red Hat) 
60d6ca41d7SSteven Rostedt (Red Hat) 	for (i = 0; i < __SCHED_FEAT_NR; i++) {
61d6ca41d7SSteven Rostedt (Red Hat) 		if (!(sysctl_sched_features & (1UL << i)))
62d6ca41d7SSteven Rostedt (Red Hat) 			seq_puts(m, "NO_");
63d6ca41d7SSteven Rostedt (Red Hat) 		seq_printf(m, "%s ", sched_feat_names[i]);
64d6ca41d7SSteven Rostedt (Red Hat) 	}
65d6ca41d7SSteven Rostedt (Red Hat) 	seq_puts(m, "\n");
66d6ca41d7SSteven Rostedt (Red Hat) 
67d6ca41d7SSteven Rostedt (Red Hat) 	return 0;
68d6ca41d7SSteven Rostedt (Red Hat) }
69d6ca41d7SSteven Rostedt (Red Hat) 
70e9666d10SMasahiro Yamada #ifdef CONFIG_JUMP_LABEL
71d6ca41d7SSteven Rostedt (Red Hat) 
72d6ca41d7SSteven Rostedt (Red Hat) #define jump_label_key__true  STATIC_KEY_INIT_TRUE
73d6ca41d7SSteven Rostedt (Red Hat) #define jump_label_key__false STATIC_KEY_INIT_FALSE
74d6ca41d7SSteven Rostedt (Red Hat) 
75d6ca41d7SSteven Rostedt (Red Hat) #define SCHED_FEAT(name, enabled)	\
76d6ca41d7SSteven Rostedt (Red Hat) 	jump_label_key__##enabled ,
77d6ca41d7SSteven Rostedt (Red Hat) 
78d6ca41d7SSteven Rostedt (Red Hat) struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
79d6ca41d7SSteven Rostedt (Red Hat) #include "features.h"
80d6ca41d7SSteven Rostedt (Red Hat) };
81d6ca41d7SSteven Rostedt (Red Hat) 
82d6ca41d7SSteven Rostedt (Red Hat) #undef SCHED_FEAT
83d6ca41d7SSteven Rostedt (Red Hat) 
sched_feat_disable(int i)84d6ca41d7SSteven Rostedt (Red Hat) static void sched_feat_disable(int i)
85d6ca41d7SSteven Rostedt (Red Hat) {
86e73e8197SJiada Wang 	static_key_disable_cpuslocked(&sched_feat_keys[i]);
87d6ca41d7SSteven Rostedt (Red Hat) }
88d6ca41d7SSteven Rostedt (Red Hat) 
sched_feat_enable(int i)89d6ca41d7SSteven Rostedt (Red Hat) static void sched_feat_enable(int i)
90d6ca41d7SSteven Rostedt (Red Hat) {
91e73e8197SJiada Wang 	static_key_enable_cpuslocked(&sched_feat_keys[i]);
92d6ca41d7SSteven Rostedt (Red Hat) }
93d6ca41d7SSteven Rostedt (Red Hat) #else
sched_feat_disable(int i)94d6ca41d7SSteven Rostedt (Red Hat) static void sched_feat_disable(int i) { };
sched_feat_enable(int i)95d6ca41d7SSteven Rostedt (Red Hat) static void sched_feat_enable(int i) { };
96e9666d10SMasahiro Yamada #endif /* CONFIG_JUMP_LABEL */
97d6ca41d7SSteven Rostedt (Red Hat) 
sched_feat_set(char * cmp)98d6ca41d7SSteven Rostedt (Red Hat) static int sched_feat_set(char *cmp)
99d6ca41d7SSteven Rostedt (Red Hat) {
100d6ca41d7SSteven Rostedt (Red Hat) 	int i;
101d6ca41d7SSteven Rostedt (Red Hat) 	int neg = 0;
102d6ca41d7SSteven Rostedt (Red Hat) 
103d6ca41d7SSteven Rostedt (Red Hat) 	if (strncmp(cmp, "NO_", 3) == 0) {
104d6ca41d7SSteven Rostedt (Red Hat) 		neg = 1;
105d6ca41d7SSteven Rostedt (Red Hat) 		cmp += 3;
106d6ca41d7SSteven Rostedt (Red Hat) 	}
107d6ca41d7SSteven Rostedt (Red Hat) 
1088f894bf4SYisheng Xie 	i = match_string(sched_feat_names, __SCHED_FEAT_NR, cmp);
1098f894bf4SYisheng Xie 	if (i < 0)
1108f894bf4SYisheng Xie 		return i;
1118f894bf4SYisheng Xie 
112d6ca41d7SSteven Rostedt (Red Hat) 	if (neg) {
113d6ca41d7SSteven Rostedt (Red Hat) 		sysctl_sched_features &= ~(1UL << i);
114d6ca41d7SSteven Rostedt (Red Hat) 		sched_feat_disable(i);
115d6ca41d7SSteven Rostedt (Red Hat) 	} else {
116d6ca41d7SSteven Rostedt (Red Hat) 		sysctl_sched_features |= (1UL << i);
117d6ca41d7SSteven Rostedt (Red Hat) 		sched_feat_enable(i);
118d6ca41d7SSteven Rostedt (Red Hat) 	}
119d6ca41d7SSteven Rostedt (Red Hat) 
1208f894bf4SYisheng Xie 	return 0;
121d6ca41d7SSteven Rostedt (Red Hat) }
122d6ca41d7SSteven Rostedt (Red Hat) 
123d6ca41d7SSteven Rostedt (Red Hat) static ssize_t
sched_feat_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)124d6ca41d7SSteven Rostedt (Red Hat) sched_feat_write(struct file *filp, const char __user *ubuf,
125d6ca41d7SSteven Rostedt (Red Hat) 		size_t cnt, loff_t *ppos)
126d6ca41d7SSteven Rostedt (Red Hat) {
127d6ca41d7SSteven Rostedt (Red Hat) 	char buf[64];
128d6ca41d7SSteven Rostedt (Red Hat) 	char *cmp;
1298f894bf4SYisheng Xie 	int ret;
130d6ca41d7SSteven Rostedt (Red Hat) 	struct inode *inode;
131d6ca41d7SSteven Rostedt (Red Hat) 
132d6ca41d7SSteven Rostedt (Red Hat) 	if (cnt > 63)
133d6ca41d7SSteven Rostedt (Red Hat) 		cnt = 63;
134d6ca41d7SSteven Rostedt (Red Hat) 
135d6ca41d7SSteven Rostedt (Red Hat) 	if (copy_from_user(&buf, ubuf, cnt))
136d6ca41d7SSteven Rostedt (Red Hat) 		return -EFAULT;
137d6ca41d7SSteven Rostedt (Red Hat) 
138d6ca41d7SSteven Rostedt (Red Hat) 	buf[cnt] = 0;
139d6ca41d7SSteven Rostedt (Red Hat) 	cmp = strstrip(buf);
140d6ca41d7SSteven Rostedt (Red Hat) 
141d6ca41d7SSteven Rostedt (Red Hat) 	/* Ensure the static_key remains in a consistent state */
142d6ca41d7SSteven Rostedt (Red Hat) 	inode = file_inode(filp);
143e73e8197SJiada Wang 	cpus_read_lock();
144d6ca41d7SSteven Rostedt (Red Hat) 	inode_lock(inode);
1458f894bf4SYisheng Xie 	ret = sched_feat_set(cmp);
146d6ca41d7SSteven Rostedt (Red Hat) 	inode_unlock(inode);
147e73e8197SJiada Wang 	cpus_read_unlock();
1488f894bf4SYisheng Xie 	if (ret < 0)
1498f894bf4SYisheng Xie 		return ret;
150d6ca41d7SSteven Rostedt (Red Hat) 
151d6ca41d7SSteven Rostedt (Red Hat) 	*ppos += cnt;
152d6ca41d7SSteven Rostedt (Red Hat) 
153d6ca41d7SSteven Rostedt (Red Hat) 	return cnt;
154d6ca41d7SSteven Rostedt (Red Hat) }
155d6ca41d7SSteven Rostedt (Red Hat) 
sched_feat_open(struct inode * inode,struct file * filp)156d6ca41d7SSteven Rostedt (Red Hat) static int sched_feat_open(struct inode *inode, struct file *filp)
157d6ca41d7SSteven Rostedt (Red Hat) {
158d6ca41d7SSteven Rostedt (Red Hat) 	return single_open(filp, sched_feat_show, NULL);
159d6ca41d7SSteven Rostedt (Red Hat) }
160d6ca41d7SSteven Rostedt (Red Hat) 
161d6ca41d7SSteven Rostedt (Red Hat) static const struct file_operations sched_feat_fops = {
162d6ca41d7SSteven Rostedt (Red Hat) 	.open		= sched_feat_open,
163d6ca41d7SSteven Rostedt (Red Hat) 	.write		= sched_feat_write,
164d6ca41d7SSteven Rostedt (Red Hat) 	.read		= seq_read,
165d6ca41d7SSteven Rostedt (Red Hat) 	.llseek		= seq_lseek,
166d6ca41d7SSteven Rostedt (Red Hat) 	.release	= single_release,
167d6ca41d7SSteven Rostedt (Red Hat) };
168d6ca41d7SSteven Rostedt (Red Hat) 
1698a99b683SPeter Zijlstra #ifdef CONFIG_SMP
1708a99b683SPeter Zijlstra 
sched_scaling_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)1718a99b683SPeter Zijlstra static ssize_t sched_scaling_write(struct file *filp, const char __user *ubuf,
1728a99b683SPeter Zijlstra 				   size_t cnt, loff_t *ppos)
1738a99b683SPeter Zijlstra {
1748a99b683SPeter Zijlstra 	char buf[16];
17570306618SMel Gorman 	unsigned int scaling;
1768a99b683SPeter Zijlstra 
1778a99b683SPeter Zijlstra 	if (cnt > 15)
1788a99b683SPeter Zijlstra 		cnt = 15;
1798a99b683SPeter Zijlstra 
1808a99b683SPeter Zijlstra 	if (copy_from_user(&buf, ubuf, cnt))
1818a99b683SPeter Zijlstra 		return -EFAULT;
18270306618SMel Gorman 	buf[cnt] = '\0';
1838a99b683SPeter Zijlstra 
18470306618SMel Gorman 	if (kstrtouint(buf, 10, &scaling))
1858a99b683SPeter Zijlstra 		return -EINVAL;
1868a99b683SPeter Zijlstra 
18770306618SMel Gorman 	if (scaling >= SCHED_TUNABLESCALING_END)
18870306618SMel Gorman 		return -EINVAL;
18970306618SMel Gorman 
19070306618SMel Gorman 	sysctl_sched_tunable_scaling = scaling;
1918a99b683SPeter Zijlstra 	if (sched_update_scaling())
1928a99b683SPeter Zijlstra 		return -EINVAL;
1938a99b683SPeter Zijlstra 
1948a99b683SPeter Zijlstra 	*ppos += cnt;
1958a99b683SPeter Zijlstra 	return cnt;
1968a99b683SPeter Zijlstra }
1978a99b683SPeter Zijlstra 
sched_scaling_show(struct seq_file * m,void * v)1988a99b683SPeter Zijlstra static int sched_scaling_show(struct seq_file *m, void *v)
1998a99b683SPeter Zijlstra {
2008a99b683SPeter Zijlstra 	seq_printf(m, "%d\n", sysctl_sched_tunable_scaling);
2018a99b683SPeter Zijlstra 	return 0;
2028a99b683SPeter Zijlstra }
2038a99b683SPeter Zijlstra 
sched_scaling_open(struct inode * inode,struct file * filp)2048a99b683SPeter Zijlstra static int sched_scaling_open(struct inode *inode, struct file *filp)
2058a99b683SPeter Zijlstra {
2068a99b683SPeter Zijlstra 	return single_open(filp, sched_scaling_show, NULL);
2078a99b683SPeter Zijlstra }
2088a99b683SPeter Zijlstra 
2098a99b683SPeter Zijlstra static const struct file_operations sched_scaling_fops = {
2108a99b683SPeter Zijlstra 	.open		= sched_scaling_open,
2118a99b683SPeter Zijlstra 	.write		= sched_scaling_write,
2128a99b683SPeter Zijlstra 	.read		= seq_read,
2138a99b683SPeter Zijlstra 	.llseek		= seq_lseek,
2148a99b683SPeter Zijlstra 	.release	= single_release,
2158a99b683SPeter Zijlstra };
2168a99b683SPeter Zijlstra 
2178a99b683SPeter Zijlstra #endif /* SMP */
2188a99b683SPeter Zijlstra 
2191011dcceSPeter Zijlstra #ifdef CONFIG_PREEMPT_DYNAMIC
2201011dcceSPeter Zijlstra 
sched_dynamic_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)2211011dcceSPeter Zijlstra static ssize_t sched_dynamic_write(struct file *filp, const char __user *ubuf,
2221011dcceSPeter Zijlstra 				   size_t cnt, loff_t *ppos)
2231011dcceSPeter Zijlstra {
2241011dcceSPeter Zijlstra 	char buf[16];
2251011dcceSPeter Zijlstra 	int mode;
2261011dcceSPeter Zijlstra 
2271011dcceSPeter Zijlstra 	if (cnt > 15)
2281011dcceSPeter Zijlstra 		cnt = 15;
2291011dcceSPeter Zijlstra 
2301011dcceSPeter Zijlstra 	if (copy_from_user(&buf, ubuf, cnt))
2311011dcceSPeter Zijlstra 		return -EFAULT;
2321011dcceSPeter Zijlstra 
2331011dcceSPeter Zijlstra 	buf[cnt] = 0;
2341011dcceSPeter Zijlstra 	mode = sched_dynamic_mode(strstrip(buf));
2351011dcceSPeter Zijlstra 	if (mode < 0)
2361011dcceSPeter Zijlstra 		return mode;
2371011dcceSPeter Zijlstra 
2381011dcceSPeter Zijlstra 	sched_dynamic_update(mode);
2391011dcceSPeter Zijlstra 
2401011dcceSPeter Zijlstra 	*ppos += cnt;
2411011dcceSPeter Zijlstra 
2421011dcceSPeter Zijlstra 	return cnt;
2431011dcceSPeter Zijlstra }
2441011dcceSPeter Zijlstra 
sched_dynamic_show(struct seq_file * m,void * v)2451011dcceSPeter Zijlstra static int sched_dynamic_show(struct seq_file *m, void *v)
2461011dcceSPeter Zijlstra {
2471011dcceSPeter Zijlstra 	static const char * preempt_modes[] = {
2481011dcceSPeter Zijlstra 		"none", "voluntary", "full"
2491011dcceSPeter Zijlstra 	};
2501011dcceSPeter Zijlstra 	int i;
2511011dcceSPeter Zijlstra 
2521011dcceSPeter Zijlstra 	for (i = 0; i < ARRAY_SIZE(preempt_modes); i++) {
2531011dcceSPeter Zijlstra 		if (preempt_dynamic_mode == i)
2541011dcceSPeter Zijlstra 			seq_puts(m, "(");
2551011dcceSPeter Zijlstra 		seq_puts(m, preempt_modes[i]);
2561011dcceSPeter Zijlstra 		if (preempt_dynamic_mode == i)
2571011dcceSPeter Zijlstra 			seq_puts(m, ")");
2581011dcceSPeter Zijlstra 
2591011dcceSPeter Zijlstra 		seq_puts(m, " ");
2601011dcceSPeter Zijlstra 	}
2611011dcceSPeter Zijlstra 
2621011dcceSPeter Zijlstra 	seq_puts(m, "\n");
2631011dcceSPeter Zijlstra 	return 0;
2641011dcceSPeter Zijlstra }
2651011dcceSPeter Zijlstra 
sched_dynamic_open(struct inode * inode,struct file * filp)2661011dcceSPeter Zijlstra static int sched_dynamic_open(struct inode *inode, struct file *filp)
2671011dcceSPeter Zijlstra {
2681011dcceSPeter Zijlstra 	return single_open(filp, sched_dynamic_show, NULL);
2691011dcceSPeter Zijlstra }
2701011dcceSPeter Zijlstra 
2711011dcceSPeter Zijlstra static const struct file_operations sched_dynamic_fops = {
2721011dcceSPeter Zijlstra 	.open		= sched_dynamic_open,
2731011dcceSPeter Zijlstra 	.write		= sched_dynamic_write,
2741011dcceSPeter Zijlstra 	.read		= seq_read,
2751011dcceSPeter Zijlstra 	.llseek		= seq_lseek,
2761011dcceSPeter Zijlstra 	.release	= single_release,
2771011dcceSPeter Zijlstra };
2781011dcceSPeter Zijlstra 
2791011dcceSPeter Zijlstra #endif /* CONFIG_PREEMPT_DYNAMIC */
2801011dcceSPeter Zijlstra 
2819406415fSPeter Zijlstra __read_mostly bool sched_debug_verbose;
2829469eb01SPeter Zijlstra 
28334320745SPhil Auld #ifdef CONFIG_SMP
28434320745SPhil Auld static struct dentry           *sd_dentry;
28534320745SPhil Auld 
28634320745SPhil Auld 
sched_verbose_write(struct file * filp,const char __user * ubuf,size_t cnt,loff_t * ppos)28734320745SPhil Auld static ssize_t sched_verbose_write(struct file *filp, const char __user *ubuf,
28834320745SPhil Auld 				  size_t cnt, loff_t *ppos)
28934320745SPhil Auld {
29034320745SPhil Auld 	ssize_t result;
29134320745SPhil Auld 	bool orig;
29234320745SPhil Auld 
29334320745SPhil Auld 	cpus_read_lock();
29434320745SPhil Auld 	mutex_lock(&sched_domains_mutex);
29534320745SPhil Auld 
29634320745SPhil Auld 	orig = sched_debug_verbose;
29734320745SPhil Auld 	result = debugfs_write_file_bool(filp, ubuf, cnt, ppos);
29834320745SPhil Auld 
29934320745SPhil Auld 	if (sched_debug_verbose && !orig)
30034320745SPhil Auld 		update_sched_domain_debugfs();
30134320745SPhil Auld 	else if (!sched_debug_verbose && orig) {
30234320745SPhil Auld 		debugfs_remove(sd_dentry);
30334320745SPhil Auld 		sd_dentry = NULL;
30434320745SPhil Auld 	}
30534320745SPhil Auld 
30634320745SPhil Auld 	mutex_unlock(&sched_domains_mutex);
30734320745SPhil Auld 	cpus_read_unlock();
30834320745SPhil Auld 
30934320745SPhil Auld 	return result;
31034320745SPhil Auld }
31134320745SPhil Auld #else
31234320745SPhil Auld #define sched_verbose_write debugfs_write_file_bool
31334320745SPhil Auld #endif
31434320745SPhil Auld 
31534320745SPhil Auld static const struct file_operations sched_verbose_fops = {
31634320745SPhil Auld 	.read =         debugfs_read_file_bool,
31734320745SPhil Auld 	.write =        sched_verbose_write,
31834320745SPhil Auld 	.open =         simple_open,
31934320745SPhil Auld 	.llseek =       default_llseek,
32034320745SPhil Auld };
32134320745SPhil Auld 
322d27e9ae2SPeter Zijlstra static const struct seq_operations sched_debug_sops;
323d27e9ae2SPeter Zijlstra 
sched_debug_open(struct inode * inode,struct file * filp)324d27e9ae2SPeter Zijlstra static int sched_debug_open(struct inode *inode, struct file *filp)
325d27e9ae2SPeter Zijlstra {
326d27e9ae2SPeter Zijlstra 	return seq_open(filp, &sched_debug_sops);
327d27e9ae2SPeter Zijlstra }
328d27e9ae2SPeter Zijlstra 
329d27e9ae2SPeter Zijlstra static const struct file_operations sched_debug_fops = {
330d27e9ae2SPeter Zijlstra 	.open		= sched_debug_open,
331d27e9ae2SPeter Zijlstra 	.read		= seq_read,
332d27e9ae2SPeter Zijlstra 	.llseek		= seq_lseek,
333d27e9ae2SPeter Zijlstra 	.release	= seq_release,
334d27e9ae2SPeter Zijlstra };
335d27e9ae2SPeter Zijlstra 
3361011dcceSPeter Zijlstra static struct dentry *debugfs_sched;
3378a99b683SPeter Zijlstra 
sched_init_debug(void)338d6ca41d7SSteven Rostedt (Red Hat) static __init int sched_init_debug(void)
339d6ca41d7SSteven Rostedt (Red Hat) {
3408a99b683SPeter Zijlstra 	struct dentry __maybe_unused *numa;
341d6ca41d7SSteven Rostedt (Red Hat) 
3428a99b683SPeter Zijlstra 	debugfs_sched = debugfs_create_dir("sched", NULL);
3438a99b683SPeter Zijlstra 
3448a99b683SPeter Zijlstra 	debugfs_create_file("features", 0644, debugfs_sched, NULL, &sched_feat_fops);
34534320745SPhil Auld 	debugfs_create_file_unsafe("verbose", 0644, debugfs_sched, &sched_debug_verbose, &sched_verbose_fops);
3461011dcceSPeter Zijlstra #ifdef CONFIG_PREEMPT_DYNAMIC
3471011dcceSPeter Zijlstra 	debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
3481011dcceSPeter Zijlstra #endif
3498a99b683SPeter Zijlstra 
350e4ec3318SPeter Zijlstra 	debugfs_create_u32("base_slice_ns", 0644, debugfs_sched, &sysctl_sched_base_slice);
3518a99b683SPeter Zijlstra 
352c006fac5SPaul Turner 	debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resched_latency_warn_ms);
353c006fac5SPaul Turner 	debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resched_latency_warn_once);
354c006fac5SPaul Turner 
3558a99b683SPeter Zijlstra #ifdef CONFIG_SMP
3568a99b683SPeter Zijlstra 	debugfs_create_file("tunable_scaling", 0644, debugfs_sched, NULL, &sched_scaling_fops);
3578a99b683SPeter Zijlstra 	debugfs_create_u32("migration_cost_ns", 0644, debugfs_sched, &sysctl_sched_migration_cost);
3588a99b683SPeter Zijlstra 	debugfs_create_u32("nr_migrate", 0644, debugfs_sched, &sysctl_sched_nr_migrate);
3593b87f136SPeter Zijlstra 
3603b87f136SPeter Zijlstra 	mutex_lock(&sched_domains_mutex);
3613b87f136SPeter Zijlstra 	update_sched_domain_debugfs();
3623b87f136SPeter Zijlstra 	mutex_unlock(&sched_domains_mutex);
3638a99b683SPeter Zijlstra #endif
3648a99b683SPeter Zijlstra 
3658a99b683SPeter Zijlstra #ifdef CONFIG_NUMA_BALANCING
3668a99b683SPeter Zijlstra 	numa = debugfs_create_dir("numa_balancing", debugfs_sched);
3678a99b683SPeter Zijlstra 
3688a99b683SPeter Zijlstra 	debugfs_create_u32("scan_delay_ms", 0644, numa, &sysctl_numa_balancing_scan_delay);
3698a99b683SPeter Zijlstra 	debugfs_create_u32("scan_period_min_ms", 0644, numa, &sysctl_numa_balancing_scan_period_min);
3708a99b683SPeter Zijlstra 	debugfs_create_u32("scan_period_max_ms", 0644, numa, &sysctl_numa_balancing_scan_period_max);
3718a99b683SPeter Zijlstra 	debugfs_create_u32("scan_size_mb", 0644, numa, &sysctl_numa_balancing_scan_size);
37233024536SHuang Ying 	debugfs_create_u32("hot_threshold_ms", 0644, numa, &sysctl_numa_balancing_hot_threshold);
3738a99b683SPeter Zijlstra #endif
3749469eb01SPeter Zijlstra 
375d27e9ae2SPeter Zijlstra 	debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
376d27e9ae2SPeter Zijlstra 
377d6ca41d7SSteven Rostedt (Red Hat) 	return 0;
378d6ca41d7SSteven Rostedt (Red Hat) }
379d6ca41d7SSteven Rostedt (Red Hat) late_initcall(sched_init_debug);
380d6ca41d7SSteven Rostedt (Red Hat) 
3813866e845SSteven Rostedt (Red Hat) #ifdef CONFIG_SMP
3823866e845SSteven Rostedt (Red Hat) 
3833b87f136SPeter Zijlstra static cpumask_var_t		sd_sysctl_cpus;
3843866e845SSteven Rostedt (Red Hat) 
sd_flags_show(struct seq_file * m,void * v)3853b87f136SPeter Zijlstra static int sd_flags_show(struct seq_file *m, void *v)
3863866e845SSteven Rostedt (Red Hat) {
3873b87f136SPeter Zijlstra 	unsigned long flags = *(unsigned int *)m->private;
3885b9f8ff7SValentin Schneider 	int idx;
3895b9f8ff7SValentin Schneider 
3905b9f8ff7SValentin Schneider 	for_each_set_bit(idx, &flags, __SD_FLAG_CNT) {
3913b87f136SPeter Zijlstra 		seq_puts(m, sd_flag_debug[idx].name);
3923b87f136SPeter Zijlstra 		seq_puts(m, " ");
3935b9f8ff7SValentin Schneider 	}
3943b87f136SPeter Zijlstra 	seq_puts(m, "\n");
3955b9f8ff7SValentin Schneider 
3965b9f8ff7SValentin Schneider 	return 0;
3975b9f8ff7SValentin Schneider }
3985b9f8ff7SValentin Schneider 
sd_flags_open(struct inode * inode,struct file * file)3993b87f136SPeter Zijlstra static int sd_flags_open(struct inode *inode, struct file *file)
4003866e845SSteven Rostedt (Red Hat) {
4013b87f136SPeter Zijlstra 	return single_open(file, sd_flags_show, inode->i_private);
4023866e845SSteven Rostedt (Red Hat) }
4033866e845SSteven Rostedt (Red Hat) 
4043b87f136SPeter Zijlstra static const struct file_operations sd_flags_fops = {
4053b87f136SPeter Zijlstra 	.open		= sd_flags_open,
4063b87f136SPeter Zijlstra 	.read		= seq_read,
4073b87f136SPeter Zijlstra 	.llseek		= seq_lseek,
4083b87f136SPeter Zijlstra 	.release	= single_release,
4093b87f136SPeter Zijlstra };
4103b87f136SPeter Zijlstra 
register_sd(struct sched_domain * sd,struct dentry * parent)4113b87f136SPeter Zijlstra static void register_sd(struct sched_domain *sd, struct dentry *parent)
4123866e845SSteven Rostedt (Red Hat) {
4133b87f136SPeter Zijlstra #define SDM(type, mode, member)	\
4143b87f136SPeter Zijlstra 	debugfs_create_##type(#member, mode, parent, &sd->member)
4153866e845SSteven Rostedt (Red Hat) 
4163b87f136SPeter Zijlstra 	SDM(ulong, 0644, min_interval);
4173b87f136SPeter Zijlstra 	SDM(ulong, 0644, max_interval);
4183b87f136SPeter Zijlstra 	SDM(u64,   0644, max_newidle_lb_cost);
4193b87f136SPeter Zijlstra 	SDM(u32,   0644, busy_factor);
4203b87f136SPeter Zijlstra 	SDM(u32,   0644, imbalance_pct);
4213b87f136SPeter Zijlstra 	SDM(u32,   0644, cache_nice_tries);
4223b87f136SPeter Zijlstra 	SDM(str,   0444, name);
4233866e845SSteven Rostedt (Red Hat) 
4243b87f136SPeter Zijlstra #undef SDM
4253b87f136SPeter Zijlstra 
4263b87f136SPeter Zijlstra 	debugfs_create_file("flags", 0444, parent, &sd->flags, &sd_flags_fops);
427ed74cc49SPeter Zijlstra 	debugfs_create_file("groups_flags", 0444, parent, &sd->groups->flags, &sd_flags_fops);
4283866e845SSteven Rostedt (Red Hat) }
4293866e845SSteven Rostedt (Red Hat) 
update_sched_domain_debugfs(void)4303b87f136SPeter Zijlstra void update_sched_domain_debugfs(void)
4313866e845SSteven Rostedt (Red Hat) {
4323b87f136SPeter Zijlstra 	int cpu, i;
433bbdacdfeSPeter Zijlstra 
434459b09b5SValentin Schneider 	/*
435459b09b5SValentin Schneider 	 * This can unfortunately be invoked before sched_debug_init() creates
436459b09b5SValentin Schneider 	 * the debug directory. Don't touch sd_sysctl_cpus until then.
437459b09b5SValentin Schneider 	 */
438459b09b5SValentin Schneider 	if (!debugfs_sched)
439459b09b5SValentin Schneider 		return;
440459b09b5SValentin Schneider 
44134320745SPhil Auld 	if (!sched_debug_verbose)
44234320745SPhil Auld 		return;
44334320745SPhil Auld 
444bbdacdfeSPeter Zijlstra 	if (!cpumask_available(sd_sysctl_cpus)) {
445bbdacdfeSPeter Zijlstra 		if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
446bbdacdfeSPeter Zijlstra 			return;
447bbdacdfeSPeter Zijlstra 		cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
448bbdacdfeSPeter Zijlstra 	}
449bbdacdfeSPeter Zijlstra 
45034320745SPhil Auld 	if (!sd_dentry) {
4513b87f136SPeter Zijlstra 		sd_dentry = debugfs_create_dir("domains", debugfs_sched);
452bbdacdfeSPeter Zijlstra 
45334320745SPhil Auld 		/* rebuild sd_sysctl_cpus if empty since it gets cleared below */
45434320745SPhil Auld 		if (cpumask_empty(sd_sysctl_cpus))
45534320745SPhil Auld 			cpumask_copy(sd_sysctl_cpus, cpu_online_mask);
45634320745SPhil Auld 	}
45734320745SPhil Auld 
4583b87f136SPeter Zijlstra 	for_each_cpu(cpu, sd_sysctl_cpus) {
4593b87f136SPeter Zijlstra 		struct sched_domain *sd;
4603b87f136SPeter Zijlstra 		struct dentry *d_cpu;
4613b87f136SPeter Zijlstra 		char buf[32];
462bbdacdfeSPeter Zijlstra 
4633b87f136SPeter Zijlstra 		snprintf(buf, sizeof(buf), "cpu%d", cpu);
464c2e40659SGreg Kroah-Hartman 		debugfs_lookup_and_remove(buf, sd_dentry);
4653b87f136SPeter Zijlstra 		d_cpu = debugfs_create_dir(buf, sd_dentry);
466bbdacdfeSPeter Zijlstra 
4673b87f136SPeter Zijlstra 		i = 0;
4683b87f136SPeter Zijlstra 		for_each_domain(cpu, sd) {
4693b87f136SPeter Zijlstra 			struct dentry *d_sd;
4703b87f136SPeter Zijlstra 
4713b87f136SPeter Zijlstra 			snprintf(buf, sizeof(buf), "domain%d", i);
4723b87f136SPeter Zijlstra 			d_sd = debugfs_create_dir(buf, d_cpu);
4733b87f136SPeter Zijlstra 
4743b87f136SPeter Zijlstra 			register_sd(sd, d_sd);
4753b87f136SPeter Zijlstra 			i++;
4763866e845SSteven Rostedt (Red Hat) 		}
4773866e845SSteven Rostedt (Red Hat) 
4783b87f136SPeter Zijlstra 		__cpumask_clear_cpu(cpu, sd_sysctl_cpus);
4793b87f136SPeter Zijlstra 	}
4803866e845SSteven Rostedt (Red Hat) }
4813866e845SSteven Rostedt (Red Hat) 
dirty_sched_domain_sysctl(int cpu)482bbdacdfeSPeter Zijlstra void dirty_sched_domain_sysctl(int cpu)
483bbdacdfeSPeter Zijlstra {
484bbdacdfeSPeter Zijlstra 	if (cpumask_available(sd_sysctl_cpus))
485bbdacdfeSPeter Zijlstra 		__cpumask_set_cpu(cpu, sd_sysctl_cpus);
486bbdacdfeSPeter Zijlstra }
487bbdacdfeSPeter Zijlstra 
4883866e845SSteven Rostedt (Red Hat) #endif /* CONFIG_SMP */
4893866e845SSteven Rostedt (Red Hat) 
490391e43daSPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED
print_cfs_group_stats(struct seq_file * m,int cpu,struct task_group * tg)491391e43daSPeter Zijlstra static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
492391e43daSPeter Zijlstra {
493391e43daSPeter Zijlstra 	struct sched_entity *se = tg->se[cpu];
494391e43daSPeter Zijlstra 
49597fb7a0aSIngo Molnar #define P(F)		SEQ_printf(m, "  .%-30s: %lld\n",	#F, (long long)F)
496ceeadb83SYafang Shao #define P_SCHEDSTAT(F)	SEQ_printf(m, "  .%-30s: %lld\n",	\
497ceeadb83SYafang Shao 		#F, (long long)schedstat_val(stats->F))
49897fb7a0aSIngo Molnar #define PN(F)		SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
499ceeadb83SYafang Shao #define PN_SCHEDSTAT(F)	SEQ_printf(m, "  .%-30s: %lld.%06ld\n", \
500ceeadb83SYafang Shao 		#F, SPLIT_NS((long long)schedstat_val(stats->F)))
501391e43daSPeter Zijlstra 
502cd126afeSYuyang Du 	if (!se)
50318bf2805SBen Segall 		return;
50418bf2805SBen Segall 
505391e43daSPeter Zijlstra 	PN(se->exec_start);
506391e43daSPeter Zijlstra 	PN(se->vruntime);
507391e43daSPeter Zijlstra 	PN(se->sum_exec_runtime);
50897fb7a0aSIngo Molnar 
509cb251765SMel Gorman 	if (schedstat_enabled()) {
510769fdf83SPeter Zijlstra 		struct sched_statistics *stats;
511769fdf83SPeter Zijlstra 		stats = __schedstats_from_se(se);
512ceeadb83SYafang Shao 
513ceeadb83SYafang Shao 		PN_SCHEDSTAT(wait_start);
514ceeadb83SYafang Shao 		PN_SCHEDSTAT(sleep_start);
515ceeadb83SYafang Shao 		PN_SCHEDSTAT(block_start);
516ceeadb83SYafang Shao 		PN_SCHEDSTAT(sleep_max);
517ceeadb83SYafang Shao 		PN_SCHEDSTAT(block_max);
518ceeadb83SYafang Shao 		PN_SCHEDSTAT(exec_max);
519ceeadb83SYafang Shao 		PN_SCHEDSTAT(slice_max);
520ceeadb83SYafang Shao 		PN_SCHEDSTAT(wait_max);
521ceeadb83SYafang Shao 		PN_SCHEDSTAT(wait_sum);
522ceeadb83SYafang Shao 		P_SCHEDSTAT(wait_count);
523cb251765SMel Gorman 	}
52497fb7a0aSIngo Molnar 
525391e43daSPeter Zijlstra 	P(se->load.weight);
5269d85f21cSPaul Turner #ifdef CONFIG_SMP
5279d89c257SYuyang Du 	P(se->avg.load_avg);
5289d89c257SYuyang Du 	P(se->avg.util_avg);
5299f683953SVincent Guittot 	P(se->avg.runnable_avg);
5309d85f21cSPaul Turner #endif
5314fa8d299SJosh Poimboeuf 
5324fa8d299SJosh Poimboeuf #undef PN_SCHEDSTAT
533391e43daSPeter Zijlstra #undef PN
5344fa8d299SJosh Poimboeuf #undef P_SCHEDSTAT
535391e43daSPeter Zijlstra #undef P
536391e43daSPeter Zijlstra }
537391e43daSPeter Zijlstra #endif
538391e43daSPeter Zijlstra 
539391e43daSPeter Zijlstra #ifdef CONFIG_CGROUP_SCHED
540ad789f84SWaiman Long static DEFINE_SPINLOCK(sched_debug_lock);
541391e43daSPeter Zijlstra static char group_path[PATH_MAX];
542391e43daSPeter Zijlstra 
task_group_path(struct task_group * tg,char * path,int plen)543ad789f84SWaiman Long static void task_group_path(struct task_group *tg, char *path, int plen)
544391e43daSPeter Zijlstra {
545ad789f84SWaiman Long 	if (autogroup_path(tg, path, plen))
546ad789f84SWaiman Long 		return;
547391e43daSPeter Zijlstra 
548ad789f84SWaiman Long 	cgroup_path(tg->css.cgroup, path, plen);
549ad789f84SWaiman Long }
55097fb7a0aSIngo Molnar 
551ad789f84SWaiman Long /*
552ad789f84SWaiman Long  * Only 1 SEQ_printf_task_group_path() caller can use the full length
553ad789f84SWaiman Long  * group_path[] for cgroup path. Other simultaneous callers will have
554ad789f84SWaiman Long  * to use a shorter stack buffer. A "..." suffix is appended at the end
555ad789f84SWaiman Long  * of the stack buffer so that it will show up in case the output length
556ad789f84SWaiman Long  * matches the given buffer size to indicate possible path name truncation.
557ad789f84SWaiman Long  */
558ad789f84SWaiman Long #define SEQ_printf_task_group_path(m, tg, fmt...)			\
559ad789f84SWaiman Long {									\
560ad789f84SWaiman Long 	if (spin_trylock(&sched_debug_lock)) {				\
561ad789f84SWaiman Long 		task_group_path(tg, group_path, sizeof(group_path));	\
562ad789f84SWaiman Long 		SEQ_printf(m, fmt, group_path);				\
563ad789f84SWaiman Long 		spin_unlock(&sched_debug_lock);				\
564ad789f84SWaiman Long 	} else {							\
565ad789f84SWaiman Long 		char buf[128];						\
566ad789f84SWaiman Long 		char *bufend = buf + sizeof(buf) - 3;			\
567ad789f84SWaiman Long 		task_group_path(tg, buf, bufend - buf);			\
568ad789f84SWaiman Long 		strcpy(bufend - 1, "...");				\
569ad789f84SWaiman Long 		SEQ_printf(m, fmt, buf);				\
570ad789f84SWaiman Long 	}								\
571391e43daSPeter Zijlstra }
572391e43daSPeter Zijlstra #endif
573391e43daSPeter Zijlstra 
574391e43daSPeter Zijlstra static void
print_task(struct seq_file * m,struct rq * rq,struct task_struct * p)575391e43daSPeter Zijlstra print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
576391e43daSPeter Zijlstra {
57765bcf072SHui Su 	if (task_current(rq, p))
578e8c16495SXie XiuQi 		SEQ_printf(m, ">R");
57920435d84SXie XiuQi 	else
58020435d84SXie XiuQi 		SEQ_printf(m, " %c", task_state_to_char(p));
581391e43daSPeter Zijlstra 
582147f3efaSPeter Zijlstra 	SEQ_printf(m, "%15s %5d %9Ld.%06ld %c %9Ld.%06ld %9Ld.%06ld %9Ld.%06ld %9Ld %5d ",
583fc840914SPeter Zijlstra 		p->comm, task_pid_nr(p),
584391e43daSPeter Zijlstra 		SPLIT_NS(p->se.vruntime),
585147f3efaSPeter Zijlstra 		entity_eligible(cfs_rq_of(&p->se), &p->se) ? 'E' : 'N',
586147f3efaSPeter Zijlstra 		SPLIT_NS(p->se.deadline),
587147f3efaSPeter Zijlstra 		SPLIT_NS(p->se.slice),
588147f3efaSPeter Zijlstra 		SPLIT_NS(p->se.sum_exec_runtime),
589391e43daSPeter Zijlstra 		(long long)(p->nvcsw + p->nivcsw),
590391e43daSPeter Zijlstra 		p->prio);
5919c572591SJosh Poimboeuf 
592847fc0cdSYafang Shao 	SEQ_printf(m, "%9lld.%06ld %9lld.%06ld %9lld.%06ld %9lld.%06ld",
593ceeadb83SYafang Shao 		SPLIT_NS(schedstat_val_or_zero(p->stats.wait_sum)),
594391e43daSPeter Zijlstra 		SPLIT_NS(p->se.sum_exec_runtime),
595847fc0cdSYafang Shao 		SPLIT_NS(schedstat_val_or_zero(p->stats.sum_sleep_runtime)),
596847fc0cdSYafang Shao 		SPLIT_NS(schedstat_val_or_zero(p->stats.sum_block_runtime)));
5979c572591SJosh Poimboeuf 
598b32e86b4SIngo Molnar #ifdef CONFIG_NUMA_BALANCING
599e3d24d0aSSrikar Dronamraju 	SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
600b32e86b4SIngo Molnar #endif
601391e43daSPeter Zijlstra #ifdef CONFIG_CGROUP_SCHED
602ad789f84SWaiman Long 	SEQ_printf_task_group_path(m, task_group(p), " %s")
603391e43daSPeter Zijlstra #endif
604391e43daSPeter Zijlstra 
605391e43daSPeter Zijlstra 	SEQ_printf(m, "\n");
606391e43daSPeter Zijlstra }
607391e43daSPeter Zijlstra 
print_rq(struct seq_file * m,struct rq * rq,int rq_cpu)608391e43daSPeter Zijlstra static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
609391e43daSPeter Zijlstra {
610391e43daSPeter Zijlstra 	struct task_struct *g, *p;
611391e43daSPeter Zijlstra 
612e9ca2670SJoe Lawrence 	SEQ_printf(m, "\n");
613e9ca2670SJoe Lawrence 	SEQ_printf(m, "runnable tasks:\n");
614e9ca2670SJoe Lawrence 	SEQ_printf(m, " S            task   PID         tree-key  switches  prio"
615e9ca2670SJoe Lawrence 		   "     wait-time             sum-exec        sum-sleep\n");
616e9ca2670SJoe Lawrence 	SEQ_printf(m, "-------------------------------------------------------"
617f080d93eSXie XiuQi 		   "------------------------------------------------------\n");
618391e43daSPeter Zijlstra 
6195bd96ab6SOleg Nesterov 	rcu_read_lock();
620d38e83c7SOleg Nesterov 	for_each_process_thread(g, p) {
621b32e86b4SIngo Molnar 		if (task_cpu(p) != rq_cpu)
622391e43daSPeter Zijlstra 			continue;
623391e43daSPeter Zijlstra 
624391e43daSPeter Zijlstra 		print_task(m, rq, p);
625d38e83c7SOleg Nesterov 	}
6265bd96ab6SOleg Nesterov 	rcu_read_unlock();
627391e43daSPeter Zijlstra }
628391e43daSPeter Zijlstra 
print_cfs_rq(struct seq_file * m,int cpu,struct cfs_rq * cfs_rq)629391e43daSPeter Zijlstra void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
630391e43daSPeter Zijlstra {
631af4cf404SPeter Zijlstra 	s64 left_vruntime = -1, min_vruntime, right_vruntime = -1, spread;
632af4cf404SPeter Zijlstra 	struct sched_entity *last, *first;
633391e43daSPeter Zijlstra 	struct rq *rq = cpu_rq(cpu);
634391e43daSPeter Zijlstra 	unsigned long flags;
635391e43daSPeter Zijlstra 
636391e43daSPeter Zijlstra #ifdef CONFIG_FAIR_GROUP_SCHED
637e9ca2670SJoe Lawrence 	SEQ_printf(m, "\n");
638ad789f84SWaiman Long 	SEQ_printf_task_group_path(m, cfs_rq->tg, "cfs_rq[%d]:%s\n", cpu);
639391e43daSPeter Zijlstra #else
640e9ca2670SJoe Lawrence 	SEQ_printf(m, "\n");
641e9ca2670SJoe Lawrence 	SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
642391e43daSPeter Zijlstra #endif
643391e43daSPeter Zijlstra 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "exec_clock",
644391e43daSPeter Zijlstra 			SPLIT_NS(cfs_rq->exec_clock));
645391e43daSPeter Zijlstra 
6465cb9eaa3SPeter Zijlstra 	raw_spin_rq_lock_irqsave(rq, flags);
647af4cf404SPeter Zijlstra 	first = __pick_first_entity(cfs_rq);
648af4cf404SPeter Zijlstra 	if (first)
649af4cf404SPeter Zijlstra 		left_vruntime = first->vruntime;
650391e43daSPeter Zijlstra 	last = __pick_last_entity(cfs_rq);
651391e43daSPeter Zijlstra 	if (last)
652af4cf404SPeter Zijlstra 		right_vruntime = last->vruntime;
653391e43daSPeter Zijlstra 	min_vruntime = cfs_rq->min_vruntime;
6545cb9eaa3SPeter Zijlstra 	raw_spin_rq_unlock_irqrestore(rq, flags);
655af4cf404SPeter Zijlstra 
656af4cf404SPeter Zijlstra 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "left_vruntime",
657af4cf404SPeter Zijlstra 			SPLIT_NS(left_vruntime));
658391e43daSPeter Zijlstra 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "min_vruntime",
659391e43daSPeter Zijlstra 			SPLIT_NS(min_vruntime));
660af4cf404SPeter Zijlstra 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "avg_vruntime",
661af4cf404SPeter Zijlstra 			SPLIT_NS(avg_vruntime(cfs_rq)));
662af4cf404SPeter Zijlstra 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "right_vruntime",
663af4cf404SPeter Zijlstra 			SPLIT_NS(right_vruntime));
664af4cf404SPeter Zijlstra 	spread = right_vruntime - left_vruntime;
665af4cf404SPeter Zijlstra 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread", SPLIT_NS(spread));
666391e43daSPeter Zijlstra 	SEQ_printf(m, "  .%-30s: %d\n", "nr_spread_over",
667391e43daSPeter Zijlstra 			cfs_rq->nr_spread_over);
668c82513e5SPeter Zijlstra 	SEQ_printf(m, "  .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
66930400039SJosh Don 	SEQ_printf(m, "  .%-30s: %d\n", "h_nr_running", cfs_rq->h_nr_running);
670a480addeSJosh Don 	SEQ_printf(m, "  .%-30s: %d\n", "idle_nr_running",
671a480addeSJosh Don 			cfs_rq->idle_nr_running);
67230400039SJosh Don 	SEQ_printf(m, "  .%-30s: %d\n", "idle_h_nr_running",
67330400039SJosh Don 			cfs_rq->idle_h_nr_running);
674391e43daSPeter Zijlstra 	SEQ_printf(m, "  .%-30s: %ld\n", "load", cfs_rq->load.weight);
675391e43daSPeter Zijlstra #ifdef CONFIG_SMP
6769d89c257SYuyang Du 	SEQ_printf(m, "  .%-30s: %lu\n", "load_avg",
6779d89c257SYuyang Du 			cfs_rq->avg.load_avg);
6789f683953SVincent Guittot 	SEQ_printf(m, "  .%-30s: %lu\n", "runnable_avg",
6799f683953SVincent Guittot 			cfs_rq->avg.runnable_avg);
6809d89c257SYuyang Du 	SEQ_printf(m, "  .%-30s: %lu\n", "util_avg",
6819d89c257SYuyang Du 			cfs_rq->avg.util_avg);
6827f65ea42SPatrick Bellasi 	SEQ_printf(m, "  .%-30s: %u\n", "util_est_enqueued",
6837f65ea42SPatrick Bellasi 			cfs_rq->avg.util_est.enqueued);
6842a2f5d4eSPeter Zijlstra 	SEQ_printf(m, "  .%-30s: %ld\n", "removed.load_avg",
6852a2f5d4eSPeter Zijlstra 			cfs_rq->removed.load_avg);
6862a2f5d4eSPeter Zijlstra 	SEQ_printf(m, "  .%-30s: %ld\n", "removed.util_avg",
6872a2f5d4eSPeter Zijlstra 			cfs_rq->removed.util_avg);
6889f683953SVincent Guittot 	SEQ_printf(m, "  .%-30s: %ld\n", "removed.runnable_avg",
6899f683953SVincent Guittot 			cfs_rq->removed.runnable_avg);
690333bb864SAlex Shi #ifdef CONFIG_FAIR_GROUP_SCHED
6919d89c257SYuyang Du 	SEQ_printf(m, "  .%-30s: %lu\n", "tg_load_avg_contrib",
6929d89c257SYuyang Du 			cfs_rq->tg_load_avg_contrib);
693333bb864SAlex Shi 	SEQ_printf(m, "  .%-30s: %ld\n", "tg_load_avg",
694333bb864SAlex Shi 			atomic_long_read(&cfs_rq->tg->load_avg));
695391e43daSPeter Zijlstra #endif
696333bb864SAlex Shi #endif
697f9f9ffc2SBen Segall #ifdef CONFIG_CFS_BANDWIDTH
698f9f9ffc2SBen Segall 	SEQ_printf(m, "  .%-30s: %d\n", "throttled",
699f9f9ffc2SBen Segall 			cfs_rq->throttled);
700f9f9ffc2SBen Segall 	SEQ_printf(m, "  .%-30s: %d\n", "throttle_count",
701f9f9ffc2SBen Segall 			cfs_rq->throttle_count);
702f9f9ffc2SBen Segall #endif
703391e43daSPeter Zijlstra 
704333bb864SAlex Shi #ifdef CONFIG_FAIR_GROUP_SCHED
705391e43daSPeter Zijlstra 	print_cfs_group_stats(m, cpu, cfs_rq->tg);
706391e43daSPeter Zijlstra #endif
707391e43daSPeter Zijlstra }
708391e43daSPeter Zijlstra 
print_rt_rq(struct seq_file * m,int cpu,struct rt_rq * rt_rq)709391e43daSPeter Zijlstra void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
710391e43daSPeter Zijlstra {
711391e43daSPeter Zijlstra #ifdef CONFIG_RT_GROUP_SCHED
712e9ca2670SJoe Lawrence 	SEQ_printf(m, "\n");
713ad789f84SWaiman Long 	SEQ_printf_task_group_path(m, rt_rq->tg, "rt_rq[%d]:%s\n", cpu);
714391e43daSPeter Zijlstra #else
715e9ca2670SJoe Lawrence 	SEQ_printf(m, "\n");
716e9ca2670SJoe Lawrence 	SEQ_printf(m, "rt_rq[%d]:\n", cpu);
717391e43daSPeter Zijlstra #endif
718391e43daSPeter Zijlstra 
719391e43daSPeter Zijlstra #define P(x) \
720391e43daSPeter Zijlstra 	SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
72148365b38SDaniel Bristot de Oliveira #define PU(x) \
72248365b38SDaniel Bristot de Oliveira 	SEQ_printf(m, "  .%-30s: %lu\n", #x, (unsigned long)(rt_rq->x))
723391e43daSPeter Zijlstra #define PN(x) \
724391e43daSPeter Zijlstra 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
725391e43daSPeter Zijlstra 
72648365b38SDaniel Bristot de Oliveira 	PU(rt_nr_running);
72748365b38SDaniel Bristot de Oliveira #ifdef CONFIG_SMP
72848365b38SDaniel Bristot de Oliveira 	PU(rt_nr_migratory);
72948365b38SDaniel Bristot de Oliveira #endif
730391e43daSPeter Zijlstra 	P(rt_throttled);
731391e43daSPeter Zijlstra 	PN(rt_time);
732391e43daSPeter Zijlstra 	PN(rt_runtime);
733391e43daSPeter Zijlstra 
734391e43daSPeter Zijlstra #undef PN
73548365b38SDaniel Bristot de Oliveira #undef PU
736391e43daSPeter Zijlstra #undef P
737391e43daSPeter Zijlstra }
738391e43daSPeter Zijlstra 
print_dl_rq(struct seq_file * m,int cpu,struct dl_rq * dl_rq)739acb32132SWanpeng Li void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
740acb32132SWanpeng Li {
741ef477183SSteven Rostedt (Red Hat) 	struct dl_bw *dl_bw;
742ef477183SSteven Rostedt (Red Hat) 
743e9ca2670SJoe Lawrence 	SEQ_printf(m, "\n");
744e9ca2670SJoe Lawrence 	SEQ_printf(m, "dl_rq[%d]:\n", cpu);
74548365b38SDaniel Bristot de Oliveira 
74648365b38SDaniel Bristot de Oliveira #define PU(x) \
74748365b38SDaniel Bristot de Oliveira 	SEQ_printf(m, "  .%-30s: %lu\n", #x, (unsigned long)(dl_rq->x))
74848365b38SDaniel Bristot de Oliveira 
74948365b38SDaniel Bristot de Oliveira 	PU(dl_nr_running);
750ef477183SSteven Rostedt (Red Hat) #ifdef CONFIG_SMP
75148365b38SDaniel Bristot de Oliveira 	PU(dl_nr_migratory);
752ef477183SSteven Rostedt (Red Hat) 	dl_bw = &cpu_rq(cpu)->rd->dl_bw;
753ef477183SSteven Rostedt (Red Hat) #else
754ef477183SSteven Rostedt (Red Hat) 	dl_bw = &dl_rq->dl_bw;
755ef477183SSteven Rostedt (Red Hat) #endif
756ef477183SSteven Rostedt (Red Hat) 	SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->bw", dl_bw->bw);
757ef477183SSteven Rostedt (Red Hat) 	SEQ_printf(m, "  .%-30s: %lld\n", "dl_bw->total_bw", dl_bw->total_bw);
75848365b38SDaniel Bristot de Oliveira 
75948365b38SDaniel Bristot de Oliveira #undef PU
760acb32132SWanpeng Li }
761acb32132SWanpeng Li 
print_cpu(struct seq_file * m,int cpu)762391e43daSPeter Zijlstra static void print_cpu(struct seq_file *m, int cpu)
763391e43daSPeter Zijlstra {
764391e43daSPeter Zijlstra 	struct rq *rq = cpu_rq(cpu);
765391e43daSPeter Zijlstra 
766391e43daSPeter Zijlstra #ifdef CONFIG_X86
767391e43daSPeter Zijlstra 	{
768391e43daSPeter Zijlstra 		unsigned int freq = cpu_khz ? : 1;
769391e43daSPeter Zijlstra 
770bbbfeac9SNathan Zimmer 		SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
771391e43daSPeter Zijlstra 			   cpu, freq / 1000, (freq % 1000));
772391e43daSPeter Zijlstra 	}
773391e43daSPeter Zijlstra #else
774bbbfeac9SNathan Zimmer 	SEQ_printf(m, "cpu#%d\n", cpu);
775391e43daSPeter Zijlstra #endif
776391e43daSPeter Zijlstra 
777391e43daSPeter Zijlstra #define P(x)								\
77813e099d2SPeter Zijlstra do {									\
77913e099d2SPeter Zijlstra 	if (sizeof(rq->x) == 4)						\
780a6fcdd8dS晏艳(采苓) 		SEQ_printf(m, "  .%-30s: %d\n", #x, (int)(rq->x));	\
78113e099d2SPeter Zijlstra 	else								\
78213e099d2SPeter Zijlstra 		SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rq->x));\
78313e099d2SPeter Zijlstra } while (0)
78413e099d2SPeter Zijlstra 
785391e43daSPeter Zijlstra #define PN(x) \
786391e43daSPeter Zijlstra 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
787391e43daSPeter Zijlstra 
788391e43daSPeter Zijlstra 	P(nr_running);
789391e43daSPeter Zijlstra 	P(nr_switches);
790391e43daSPeter Zijlstra 	P(nr_uninterruptible);
791391e43daSPeter Zijlstra 	PN(next_balance);
792fc840914SPeter Zijlstra 	SEQ_printf(m, "  .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
793391e43daSPeter Zijlstra 	PN(clock);
7945a537597SPeter Zijlstra 	PN(clock_task);
795391e43daSPeter Zijlstra #undef P
796391e43daSPeter Zijlstra #undef PN
797391e43daSPeter Zijlstra 
798391e43daSPeter Zijlstra #ifdef CONFIG_SMP
799db6ea2fbSWanpeng Li #define P64(n) SEQ_printf(m, "  .%-30s: %Ld\n", #n, rq->n);
800391e43daSPeter Zijlstra 	P64(avg_idle);
80137e6bae8SAlex Shi 	P64(max_idle_balance_cost);
802db6ea2fbSWanpeng Li #undef P64
803391e43daSPeter Zijlstra #endif
804391e43daSPeter Zijlstra 
8054fa8d299SJosh Poimboeuf #define P(n) SEQ_printf(m, "  .%-30s: %d\n", #n, schedstat_val(rq->n));
806cb251765SMel Gorman 	if (schedstat_enabled()) {
807cb251765SMel Gorman 		P(yld_count);
808cb251765SMel Gorman 		P(sched_count);
809cb251765SMel Gorman 		P(sched_goidle);
810391e43daSPeter Zijlstra 		P(ttwu_count);
811391e43daSPeter Zijlstra 		P(ttwu_local);
812cb251765SMel Gorman 	}
813391e43daSPeter Zijlstra #undef P
8144fa8d299SJosh Poimboeuf 
815391e43daSPeter Zijlstra 	print_cfs_stats(m, cpu);
816391e43daSPeter Zijlstra 	print_rt_stats(m, cpu);
817acb32132SWanpeng Li 	print_dl_stats(m, cpu);
818391e43daSPeter Zijlstra 
819391e43daSPeter Zijlstra 	print_rq(m, rq, cpu);
820bbbfeac9SNathan Zimmer 	SEQ_printf(m, "\n");
821391e43daSPeter Zijlstra }
822391e43daSPeter Zijlstra 
823391e43daSPeter Zijlstra static const char *sched_tunable_scaling_names[] = {
824391e43daSPeter Zijlstra 	"none",
825ad2e379dSColin Ian King 	"logarithmic",
826391e43daSPeter Zijlstra 	"linear"
827391e43daSPeter Zijlstra };
828391e43daSPeter Zijlstra 
sched_debug_header(struct seq_file * m)829bbbfeac9SNathan Zimmer static void sched_debug_header(struct seq_file *m)
830391e43daSPeter Zijlstra {
831391e43daSPeter Zijlstra 	u64 ktime, sched_clk, cpu_clk;
832391e43daSPeter Zijlstra 	unsigned long flags;
833391e43daSPeter Zijlstra 
834391e43daSPeter Zijlstra 	local_irq_save(flags);
835391e43daSPeter Zijlstra 	ktime = ktime_to_ns(ktime_get());
836391e43daSPeter Zijlstra 	sched_clk = sched_clock();
837391e43daSPeter Zijlstra 	cpu_clk = local_clock();
838391e43daSPeter Zijlstra 	local_irq_restore(flags);
839391e43daSPeter Zijlstra 
840b32e86b4SIngo Molnar 	SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
841391e43daSPeter Zijlstra 		init_utsname()->release,
842391e43daSPeter Zijlstra 		(int)strcspn(init_utsname()->version, " "),
843391e43daSPeter Zijlstra 		init_utsname()->version);
844391e43daSPeter Zijlstra 
845391e43daSPeter Zijlstra #define P(x) \
846391e43daSPeter Zijlstra 	SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
847391e43daSPeter Zijlstra #define PN(x) \
848391e43daSPeter Zijlstra 	SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
849391e43daSPeter Zijlstra 	PN(ktime);
850391e43daSPeter Zijlstra 	PN(sched_clk);
851391e43daSPeter Zijlstra 	PN(cpu_clk);
852391e43daSPeter Zijlstra 	P(jiffies);
853391e43daSPeter Zijlstra #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
85435af99e6SPeter Zijlstra 	P(sched_clock_stable());
855391e43daSPeter Zijlstra #endif
856391e43daSPeter Zijlstra #undef PN
857391e43daSPeter Zijlstra #undef P
858391e43daSPeter Zijlstra 
859391e43daSPeter Zijlstra 	SEQ_printf(m, "\n");
860391e43daSPeter Zijlstra 	SEQ_printf(m, "sysctl_sched\n");
861391e43daSPeter Zijlstra 
862391e43daSPeter Zijlstra #define P(x) \
863391e43daSPeter Zijlstra 	SEQ_printf(m, "  .%-40s: %Ld\n", #x, (long long)(x))
864391e43daSPeter Zijlstra #define PN(x) \
865391e43daSPeter Zijlstra 	SEQ_printf(m, "  .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
866e4ec3318SPeter Zijlstra 	PN(sysctl_sched_base_slice);
867391e43daSPeter Zijlstra 	P(sysctl_sched_child_runs_first);
868391e43daSPeter Zijlstra 	P(sysctl_sched_features);
869391e43daSPeter Zijlstra #undef PN
870391e43daSPeter Zijlstra #undef P
871391e43daSPeter Zijlstra 
872bbbfeac9SNathan Zimmer 	SEQ_printf(m, "  .%-40s: %d (%s)\n",
873bbbfeac9SNathan Zimmer 		"sysctl_sched_tunable_scaling",
874391e43daSPeter Zijlstra 		sysctl_sched_tunable_scaling,
875391e43daSPeter Zijlstra 		sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
876391e43daSPeter Zijlstra 	SEQ_printf(m, "\n");
877bbbfeac9SNathan Zimmer }
878bbbfeac9SNathan Zimmer 
sched_debug_show(struct seq_file * m,void * v)879bbbfeac9SNathan Zimmer static int sched_debug_show(struct seq_file *m, void *v)
880bbbfeac9SNathan Zimmer {
881bbbfeac9SNathan Zimmer 	int cpu = (unsigned long)(v - 2);
882bbbfeac9SNathan Zimmer 
883bbbfeac9SNathan Zimmer 	if (cpu != -1)
884bbbfeac9SNathan Zimmer 		print_cpu(m, cpu);
885bbbfeac9SNathan Zimmer 	else
886bbbfeac9SNathan Zimmer 		sched_debug_header(m);
887391e43daSPeter Zijlstra 
888391e43daSPeter Zijlstra 	return 0;
889391e43daSPeter Zijlstra }
890391e43daSPeter Zijlstra 
sysrq_sched_debug_show(void)891391e43daSPeter Zijlstra void sysrq_sched_debug_show(void)
892391e43daSPeter Zijlstra {
893bbbfeac9SNathan Zimmer 	int cpu;
894bbbfeac9SNathan Zimmer 
895bbbfeac9SNathan Zimmer 	sched_debug_header(NULL);
89602d4ac58SWei Li 	for_each_online_cpu(cpu) {
89702d4ac58SWei Li 		/*
89802d4ac58SWei Li 		 * Need to reset softlockup watchdogs on all CPUs, because
89902d4ac58SWei Li 		 * another CPU might be blocked waiting for us to process
90002d4ac58SWei Li 		 * an IPI or stop_machine.
90102d4ac58SWei Li 		 */
90202d4ac58SWei Li 		touch_nmi_watchdog();
90302d4ac58SWei Li 		touch_all_softlockup_watchdogs();
904bbbfeac9SNathan Zimmer 		print_cpu(NULL, cpu);
90502d4ac58SWei Li 	}
906bbbfeac9SNathan Zimmer }
907bbbfeac9SNathan Zimmer 
908bbbfeac9SNathan Zimmer /*
9093b03706fSIngo Molnar  * This iterator needs some explanation.
910bbbfeac9SNathan Zimmer  * It returns 1 for the header position.
91197fb7a0aSIngo Molnar  * This means 2 is CPU 0.
91297fb7a0aSIngo Molnar  * In a hotplugged system some CPUs, including CPU 0, may be missing so we have
91397fb7a0aSIngo Molnar  * to use cpumask_* to iterate over the CPUs.
914bbbfeac9SNathan Zimmer  */
sched_debug_start(struct seq_file * file,loff_t * offset)915bbbfeac9SNathan Zimmer static void *sched_debug_start(struct seq_file *file, loff_t *offset)
916bbbfeac9SNathan Zimmer {
917bbbfeac9SNathan Zimmer 	unsigned long n = *offset;
918bbbfeac9SNathan Zimmer 
919bbbfeac9SNathan Zimmer 	if (n == 0)
920bbbfeac9SNathan Zimmer 		return (void *) 1;
921bbbfeac9SNathan Zimmer 
922bbbfeac9SNathan Zimmer 	n--;
923bbbfeac9SNathan Zimmer 
924bbbfeac9SNathan Zimmer 	if (n > 0)
925bbbfeac9SNathan Zimmer 		n = cpumask_next(n - 1, cpu_online_mask);
926bbbfeac9SNathan Zimmer 	else
927bbbfeac9SNathan Zimmer 		n = cpumask_first(cpu_online_mask);
928bbbfeac9SNathan Zimmer 
929bbbfeac9SNathan Zimmer 	*offset = n + 1;
930bbbfeac9SNathan Zimmer 
931bbbfeac9SNathan Zimmer 	if (n < nr_cpu_ids)
932bbbfeac9SNathan Zimmer 		return (void *)(unsigned long)(n + 2);
93397fb7a0aSIngo Molnar 
934bbbfeac9SNathan Zimmer 	return NULL;
935bbbfeac9SNathan Zimmer }
936bbbfeac9SNathan Zimmer 
sched_debug_next(struct seq_file * file,void * data,loff_t * offset)937bbbfeac9SNathan Zimmer static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
938bbbfeac9SNathan Zimmer {
939bbbfeac9SNathan Zimmer 	(*offset)++;
940bbbfeac9SNathan Zimmer 	return sched_debug_start(file, offset);
941bbbfeac9SNathan Zimmer }
942bbbfeac9SNathan Zimmer 
sched_debug_stop(struct seq_file * file,void * data)943bbbfeac9SNathan Zimmer static void sched_debug_stop(struct seq_file *file, void *data)
944bbbfeac9SNathan Zimmer {
945bbbfeac9SNathan Zimmer }
946bbbfeac9SNathan Zimmer 
947bbbfeac9SNathan Zimmer static const struct seq_operations sched_debug_sops = {
948bbbfeac9SNathan Zimmer 	.start		= sched_debug_start,
949bbbfeac9SNathan Zimmer 	.next		= sched_debug_next,
950bbbfeac9SNathan Zimmer 	.stop		= sched_debug_stop,
951bbbfeac9SNathan Zimmer 	.show		= sched_debug_show,
952bbbfeac9SNathan Zimmer };
953bbbfeac9SNathan Zimmer 
9549e3bf946SValentin Schneider #define __PS(S, F) SEQ_printf(m, "%-45s:%21Ld\n", S, (long long)(F))
9559e3bf946SValentin Schneider #define __P(F) __PS(#F, F)
9569e3bf946SValentin Schneider #define   P(F) __PS(#F, p->F)
95768d7a190SDietmar Eggemann #define   PM(F, M) __PS(#F, p->F & (M))
9589e3bf946SValentin Schneider #define __PSN(S, F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", S, SPLIT_NS((long long)(F)))
9599e3bf946SValentin Schneider #define __PN(F) __PSN(#F, F)
9609e3bf946SValentin Schneider #define   PN(F) __PSN(#F, p->F)
961b32e86b4SIngo Molnar 
962b32e86b4SIngo Molnar 
963397f2378SSrikar Dronamraju #ifdef CONFIG_NUMA_BALANCING
print_numa_stats(struct seq_file * m,int node,unsigned long tsf,unsigned long tpf,unsigned long gsf,unsigned long gpf)964397f2378SSrikar Dronamraju void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
965397f2378SSrikar Dronamraju 		unsigned long tpf, unsigned long gsf, unsigned long gpf)
966397f2378SSrikar Dronamraju {
967397f2378SSrikar Dronamraju 	SEQ_printf(m, "numa_faults node=%d ", node);
96867d9f6c2SSrikar Dronamraju 	SEQ_printf(m, "task_private=%lu task_shared=%lu ", tpf, tsf);
96967d9f6c2SSrikar Dronamraju 	SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gpf, gsf);
970397f2378SSrikar Dronamraju }
971397f2378SSrikar Dronamraju #endif
972397f2378SSrikar Dronamraju 
973397f2378SSrikar Dronamraju 
sched_show_numa(struct task_struct * p,struct seq_file * m)974b32e86b4SIngo Molnar static void sched_show_numa(struct task_struct *p, struct seq_file *m)
975b32e86b4SIngo Molnar {
976b32e86b4SIngo Molnar #ifdef CONFIG_NUMA_BALANCING
977b32e86b4SIngo Molnar 	if (p->mm)
978b32e86b4SIngo Molnar 		P(mm->numa_scan_seq);
979b32e86b4SIngo Molnar 
980397f2378SSrikar Dronamraju 	P(numa_pages_migrated);
981397f2378SSrikar Dronamraju 	P(numa_preferred_nid);
982397f2378SSrikar Dronamraju 	P(total_numa_faults);
983397f2378SSrikar Dronamraju 	SEQ_printf(m, "current_node=%d, numa_group_id=%d\n",
984397f2378SSrikar Dronamraju 			task_node(p), task_numa_group_id(p));
985397f2378SSrikar Dronamraju 	show_numa_stats(p, m);
986b32e86b4SIngo Molnar #endif
987b32e86b4SIngo Molnar }
988b32e86b4SIngo Molnar 
proc_sched_show_task(struct task_struct * p,struct pid_namespace * ns,struct seq_file * m)98974dc3384SAleksa Sarai void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
99074dc3384SAleksa Sarai 						  struct seq_file *m)
991391e43daSPeter Zijlstra {
992391e43daSPeter Zijlstra 	unsigned long nr_switches;
993391e43daSPeter Zijlstra 
99474dc3384SAleksa Sarai 	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
995391e43daSPeter Zijlstra 						get_nr_threads(p));
996391e43daSPeter Zijlstra 	SEQ_printf(m,
997add332a1SKamalesh Babulal 		"---------------------------------------------------------"
998add332a1SKamalesh Babulal 		"----------\n");
9999e3bf946SValentin Schneider 
1000ceeadb83SYafang Shao #define P_SCHEDSTAT(F)  __PS(#F, schedstat_val(p->stats.F))
1001ceeadb83SYafang Shao #define PN_SCHEDSTAT(F) __PSN(#F, schedstat_val(p->stats.F))
1002391e43daSPeter Zijlstra 
1003391e43daSPeter Zijlstra 	PN(se.exec_start);
1004391e43daSPeter Zijlstra 	PN(se.vruntime);
1005391e43daSPeter Zijlstra 	PN(se.sum_exec_runtime);
1006391e43daSPeter Zijlstra 
1007391e43daSPeter Zijlstra 	nr_switches = p->nvcsw + p->nivcsw;
1008391e43daSPeter Zijlstra 
1009cb251765SMel Gorman 	P(se.nr_migrations);
1010cb251765SMel Gorman 
1011cb251765SMel Gorman 	if (schedstat_enabled()) {
1012cb251765SMel Gorman 		u64 avg_atom, avg_per_cpu;
1013cb251765SMel Gorman 
1014ceeadb83SYafang Shao 		PN_SCHEDSTAT(sum_sleep_runtime);
1015847fc0cdSYafang Shao 		PN_SCHEDSTAT(sum_block_runtime);
1016ceeadb83SYafang Shao 		PN_SCHEDSTAT(wait_start);
1017ceeadb83SYafang Shao 		PN_SCHEDSTAT(sleep_start);
1018ceeadb83SYafang Shao 		PN_SCHEDSTAT(block_start);
1019ceeadb83SYafang Shao 		PN_SCHEDSTAT(sleep_max);
1020ceeadb83SYafang Shao 		PN_SCHEDSTAT(block_max);
1021ceeadb83SYafang Shao 		PN_SCHEDSTAT(exec_max);
1022ceeadb83SYafang Shao 		PN_SCHEDSTAT(slice_max);
1023ceeadb83SYafang Shao 		PN_SCHEDSTAT(wait_max);
1024ceeadb83SYafang Shao 		PN_SCHEDSTAT(wait_sum);
1025ceeadb83SYafang Shao 		P_SCHEDSTAT(wait_count);
1026ceeadb83SYafang Shao 		PN_SCHEDSTAT(iowait_sum);
1027ceeadb83SYafang Shao 		P_SCHEDSTAT(iowait_count);
1028ceeadb83SYafang Shao 		P_SCHEDSTAT(nr_migrations_cold);
1029ceeadb83SYafang Shao 		P_SCHEDSTAT(nr_failed_migrations_affine);
1030ceeadb83SYafang Shao 		P_SCHEDSTAT(nr_failed_migrations_running);
1031ceeadb83SYafang Shao 		P_SCHEDSTAT(nr_failed_migrations_hot);
1032ceeadb83SYafang Shao 		P_SCHEDSTAT(nr_forced_migrations);
1033ceeadb83SYafang Shao 		P_SCHEDSTAT(nr_wakeups);
1034ceeadb83SYafang Shao 		P_SCHEDSTAT(nr_wakeups_sync);
1035ceeadb83SYafang Shao 		P_SCHEDSTAT(nr_wakeups_migrate);
1036ceeadb83SYafang Shao 		P_SCHEDSTAT(nr_wakeups_local);
1037ceeadb83SYafang Shao 		P_SCHEDSTAT(nr_wakeups_remote);
1038ceeadb83SYafang Shao 		P_SCHEDSTAT(nr_wakeups_affine);
1039ceeadb83SYafang Shao 		P_SCHEDSTAT(nr_wakeups_affine_attempts);
1040ceeadb83SYafang Shao 		P_SCHEDSTAT(nr_wakeups_passive);
1041ceeadb83SYafang Shao 		P_SCHEDSTAT(nr_wakeups_idle);
1042391e43daSPeter Zijlstra 
1043391e43daSPeter Zijlstra 		avg_atom = p->se.sum_exec_runtime;
1044391e43daSPeter Zijlstra 		if (nr_switches)
1045b0ab99e7SMateusz Guzik 			avg_atom = div64_ul(avg_atom, nr_switches);
1046391e43daSPeter Zijlstra 		else
1047391e43daSPeter Zijlstra 			avg_atom = -1LL;
1048391e43daSPeter Zijlstra 
1049391e43daSPeter Zijlstra 		avg_per_cpu = p->se.sum_exec_runtime;
1050391e43daSPeter Zijlstra 		if (p->se.nr_migrations) {
1051391e43daSPeter Zijlstra 			avg_per_cpu = div64_u64(avg_per_cpu,
1052391e43daSPeter Zijlstra 						p->se.nr_migrations);
1053391e43daSPeter Zijlstra 		} else {
1054391e43daSPeter Zijlstra 			avg_per_cpu = -1LL;
1055391e43daSPeter Zijlstra 		}
1056391e43daSPeter Zijlstra 
1057391e43daSPeter Zijlstra 		__PN(avg_atom);
1058391e43daSPeter Zijlstra 		__PN(avg_per_cpu);
10594feee7d1SJosh Don 
10604feee7d1SJosh Don #ifdef CONFIG_SCHED_CORE
10614feee7d1SJosh Don 		PN_SCHEDSTAT(core_forceidle_sum);
10624feee7d1SJosh Don #endif
1063391e43daSPeter Zijlstra 	}
10644fa8d299SJosh Poimboeuf 
1065391e43daSPeter Zijlstra 	__P(nr_switches);
10669e3bf946SValentin Schneider 	__PS("nr_voluntary_switches", p->nvcsw);
10679e3bf946SValentin Schneider 	__PS("nr_involuntary_switches", p->nivcsw);
1068391e43daSPeter Zijlstra 
1069391e43daSPeter Zijlstra 	P(se.load.weight);
1070333bb864SAlex Shi #ifdef CONFIG_SMP
10719d89c257SYuyang Du 	P(se.avg.load_sum);
10729f683953SVincent Guittot 	P(se.avg.runnable_sum);
10739d89c257SYuyang Du 	P(se.avg.util_sum);
10749d89c257SYuyang Du 	P(se.avg.load_avg);
10759f683953SVincent Guittot 	P(se.avg.runnable_avg);
10769d89c257SYuyang Du 	P(se.avg.util_avg);
10779d89c257SYuyang Du 	P(se.avg.last_update_time);
10787f65ea42SPatrick Bellasi 	P(se.avg.util_est.ewma);
107968d7a190SDietmar Eggemann 	PM(se.avg.util_est.enqueued, ~UTIL_AVG_UNCHANGED);
1080939fd731SKamalesh Babulal #endif
108196e74ebfSValentin Schneider #ifdef CONFIG_UCLAMP_TASK
1082ad32bb41SPavankumar Kondeti 	__PS("uclamp.min", p->uclamp_req[UCLAMP_MIN].value);
1083ad32bb41SPavankumar Kondeti 	__PS("uclamp.max", p->uclamp_req[UCLAMP_MAX].value);
108496e74ebfSValentin Schneider 	__PS("effective uclamp.min", uclamp_eff_value(p, UCLAMP_MIN));
108596e74ebfSValentin Schneider 	__PS("effective uclamp.max", uclamp_eff_value(p, UCLAMP_MAX));
108696e74ebfSValentin Schneider #endif
1087391e43daSPeter Zijlstra 	P(policy);
1088391e43daSPeter Zijlstra 	P(prio);
10891da1843fSViresh Kumar 	if (task_has_dl_policy(p)) {
109059f8c298STommaso Cucinotta 		P(dl.runtime);
109159f8c298STommaso Cucinotta 		P(dl.deadline);
1092*6c8b1efdSChristian Loehle 	} else if (fair_policy(p->policy)) {
1093*6c8b1efdSChristian Loehle 		P(se.slice);
109459f8c298STommaso Cucinotta 	}
10954fa8d299SJosh Poimboeuf #undef PN_SCHEDSTAT
10964fa8d299SJosh Poimboeuf #undef P_SCHEDSTAT
1097391e43daSPeter Zijlstra 
1098391e43daSPeter Zijlstra 	{
1099391e43daSPeter Zijlstra 		unsigned int this_cpu = raw_smp_processor_id();
1100391e43daSPeter Zijlstra 		u64 t0, t1;
1101391e43daSPeter Zijlstra 
1102391e43daSPeter Zijlstra 		t0 = cpu_clock(this_cpu);
1103391e43daSPeter Zijlstra 		t1 = cpu_clock(this_cpu);
11049e3bf946SValentin Schneider 		__PS("clock-delta", t1-t0);
1105391e43daSPeter Zijlstra 	}
1106b32e86b4SIngo Molnar 
1107b32e86b4SIngo Molnar 	sched_show_numa(p, m);
1108391e43daSPeter Zijlstra }
1109391e43daSPeter Zijlstra 
1110391e43daSPeter Zijlstra void proc_sched_set_task(struct task_struct *p)
1111391e43daSPeter Zijlstra {
1112391e43daSPeter Zijlstra #ifdef CONFIG_SCHEDSTATS
1113ceeadb83SYafang Shao 	memset(&p->stats, 0, sizeof(p->stats));
1114391e43daSPeter Zijlstra #endif
resched_latency_warn(int cpu,u64 latency)1115391e43daSPeter Zijlstra }
1116c006fac5SPaul Turner 
1117c006fac5SPaul Turner void resched_latency_warn(int cpu, u64 latency)
1118c006fac5SPaul Turner {
1119c006fac5SPaul Turner 	static DEFINE_RATELIMIT_STATE(latency_check_ratelimit, 60 * 60 * HZ, 1);
1120c006fac5SPaul Turner 
1121c006fac5SPaul Turner 	WARN(__ratelimit(&latency_check_ratelimit),
1122c006fac5SPaul Turner 	     "sched: CPU %d need_resched set for > %llu ns (%d ticks) "
1123c006fac5SPaul Turner 	     "without schedule\n",
1124c006fac5SPaul Turner 	     cpu, latency, cpu_rq(cpu)->ticks_without_resched);
1125c006fac5SPaul Turner }
1126