xref: /openbmc/linux/kernel/sched/debug.c (revision 18bf2805)
1 /*
2  * kernel/sched/debug.c
3  *
4  * Print the CFS rbtree
5  *
6  * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 
13 #include <linux/proc_fs.h>
14 #include <linux/sched.h>
15 #include <linux/seq_file.h>
16 #include <linux/kallsyms.h>
17 #include <linux/utsname.h>
18 
19 #include "sched.h"
20 
21 static DEFINE_SPINLOCK(sched_debug_lock);
22 
23 /*
24  * This allows printing both to /proc/sched_debug and
25  * to the console
26  */
27 #define SEQ_printf(m, x...)			\
28  do {						\
29 	if (m)					\
30 		seq_printf(m, x);		\
31 	else					\
32 		printk(x);			\
33  } while (0)
34 
35 /*
36  * Ease the printing of nsec fields:
37  */
38 static long long nsec_high(unsigned long long nsec)
39 {
40 	if ((long long)nsec < 0) {
41 		nsec = -nsec;
42 		do_div(nsec, 1000000);
43 		return -nsec;
44 	}
45 	do_div(nsec, 1000000);
46 
47 	return nsec;
48 }
49 
50 static unsigned long nsec_low(unsigned long long nsec)
51 {
52 	if ((long long)nsec < 0)
53 		nsec = -nsec;
54 
55 	return do_div(nsec, 1000000);
56 }
57 
58 #define SPLIT_NS(x) nsec_high(x), nsec_low(x)
59 
60 #ifdef CONFIG_FAIR_GROUP_SCHED
61 static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
62 {
63 	struct sched_entity *se = tg->se[cpu];
64 
65 #define P(F) \
66 	SEQ_printf(m, "  .%-30s: %lld\n", #F, (long long)F)
67 #define PN(F) \
68 	SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
69 
70 	if (!se) {
71 		struct sched_avg *avg = &cpu_rq(cpu)->avg;
72 		P(avg->runnable_avg_sum);
73 		P(avg->runnable_avg_period);
74 		return;
75 	}
76 
77 
78 	PN(se->exec_start);
79 	PN(se->vruntime);
80 	PN(se->sum_exec_runtime);
81 #ifdef CONFIG_SCHEDSTATS
82 	PN(se->statistics.wait_start);
83 	PN(se->statistics.sleep_start);
84 	PN(se->statistics.block_start);
85 	PN(se->statistics.sleep_max);
86 	PN(se->statistics.block_max);
87 	PN(se->statistics.exec_max);
88 	PN(se->statistics.slice_max);
89 	PN(se->statistics.wait_max);
90 	PN(se->statistics.wait_sum);
91 	P(se->statistics.wait_count);
92 #endif
93 	P(se->load.weight);
94 #ifdef CONFIG_SMP
95 	P(se->avg.runnable_avg_sum);
96 	P(se->avg.runnable_avg_period);
97 #endif
98 #undef PN
99 #undef P
100 }
101 #endif
102 
103 #ifdef CONFIG_CGROUP_SCHED
104 static char group_path[PATH_MAX];
105 
106 static char *task_group_path(struct task_group *tg)
107 {
108 	if (autogroup_path(tg, group_path, PATH_MAX))
109 		return group_path;
110 
111 	/*
112 	 * May be NULL if the underlying cgroup isn't fully-created yet
113 	 */
114 	if (!tg->css.cgroup) {
115 		group_path[0] = '\0';
116 		return group_path;
117 	}
118 	cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
119 	return group_path;
120 }
121 #endif
122 
123 static void
124 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
125 {
126 	if (rq->curr == p)
127 		SEQ_printf(m, "R");
128 	else
129 		SEQ_printf(m, " ");
130 
131 	SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
132 		p->comm, p->pid,
133 		SPLIT_NS(p->se.vruntime),
134 		(long long)(p->nvcsw + p->nivcsw),
135 		p->prio);
136 #ifdef CONFIG_SCHEDSTATS
137 	SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
138 		SPLIT_NS(p->se.vruntime),
139 		SPLIT_NS(p->se.sum_exec_runtime),
140 		SPLIT_NS(p->se.statistics.sum_sleep_runtime));
141 #else
142 	SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld",
143 		0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L);
144 #endif
145 #ifdef CONFIG_CGROUP_SCHED
146 	SEQ_printf(m, " %s", task_group_path(task_group(p)));
147 #endif
148 
149 	SEQ_printf(m, "\n");
150 }
151 
152 static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
153 {
154 	struct task_struct *g, *p;
155 	unsigned long flags;
156 
157 	SEQ_printf(m,
158 	"\nrunnable tasks:\n"
159 	"            task   PID         tree-key  switches  prio"
160 	"     exec-runtime         sum-exec        sum-sleep\n"
161 	"------------------------------------------------------"
162 	"----------------------------------------------------\n");
163 
164 	read_lock_irqsave(&tasklist_lock, flags);
165 
166 	do_each_thread(g, p) {
167 		if (!p->on_rq || task_cpu(p) != rq_cpu)
168 			continue;
169 
170 		print_task(m, rq, p);
171 	} while_each_thread(g, p);
172 
173 	read_unlock_irqrestore(&tasklist_lock, flags);
174 }
175 
176 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
177 {
178 	s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
179 		spread, rq0_min_vruntime, spread0;
180 	struct rq *rq = cpu_rq(cpu);
181 	struct sched_entity *last;
182 	unsigned long flags;
183 
184 #ifdef CONFIG_FAIR_GROUP_SCHED
185 	SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
186 #else
187 	SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
188 #endif
189 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "exec_clock",
190 			SPLIT_NS(cfs_rq->exec_clock));
191 
192 	raw_spin_lock_irqsave(&rq->lock, flags);
193 	if (cfs_rq->rb_leftmost)
194 		MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
195 	last = __pick_last_entity(cfs_rq);
196 	if (last)
197 		max_vruntime = last->vruntime;
198 	min_vruntime = cfs_rq->min_vruntime;
199 	rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
200 	raw_spin_unlock_irqrestore(&rq->lock, flags);
201 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "MIN_vruntime",
202 			SPLIT_NS(MIN_vruntime));
203 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "min_vruntime",
204 			SPLIT_NS(min_vruntime));
205 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "max_vruntime",
206 			SPLIT_NS(max_vruntime));
207 	spread = max_vruntime - MIN_vruntime;
208 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread",
209 			SPLIT_NS(spread));
210 	spread0 = min_vruntime - rq0_min_vruntime;
211 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread0",
212 			SPLIT_NS(spread0));
213 	SEQ_printf(m, "  .%-30s: %d\n", "nr_spread_over",
214 			cfs_rq->nr_spread_over);
215 	SEQ_printf(m, "  .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
216 	SEQ_printf(m, "  .%-30s: %ld\n", "load", cfs_rq->load.weight);
217 #ifdef CONFIG_FAIR_GROUP_SCHED
218 #ifdef CONFIG_SMP
219 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "load_avg",
220 			SPLIT_NS(cfs_rq->load_avg));
221 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "load_period",
222 			SPLIT_NS(cfs_rq->load_period));
223 	SEQ_printf(m, "  .%-30s: %ld\n", "load_contrib",
224 			cfs_rq->load_contribution);
225 	SEQ_printf(m, "  .%-30s: %d\n", "load_tg",
226 			atomic_read(&cfs_rq->tg->load_weight));
227 #endif
228 
229 	print_cfs_group_stats(m, cpu, cfs_rq->tg);
230 #endif
231 }
232 
233 void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
234 {
235 #ifdef CONFIG_RT_GROUP_SCHED
236 	SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
237 #else
238 	SEQ_printf(m, "\nrt_rq[%d]:\n", cpu);
239 #endif
240 
241 #define P(x) \
242 	SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
243 #define PN(x) \
244 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
245 
246 	P(rt_nr_running);
247 	P(rt_throttled);
248 	PN(rt_time);
249 	PN(rt_runtime);
250 
251 #undef PN
252 #undef P
253 }
254 
255 extern __read_mostly int sched_clock_running;
256 
257 static void print_cpu(struct seq_file *m, int cpu)
258 {
259 	struct rq *rq = cpu_rq(cpu);
260 	unsigned long flags;
261 
262 #ifdef CONFIG_X86
263 	{
264 		unsigned int freq = cpu_khz ? : 1;
265 
266 		SEQ_printf(m, "\ncpu#%d, %u.%03u MHz\n",
267 			   cpu, freq / 1000, (freq % 1000));
268 	}
269 #else
270 	SEQ_printf(m, "\ncpu#%d\n", cpu);
271 #endif
272 
273 #define P(x)								\
274 do {									\
275 	if (sizeof(rq->x) == 4)						\
276 		SEQ_printf(m, "  .%-30s: %ld\n", #x, (long)(rq->x));	\
277 	else								\
278 		SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rq->x));\
279 } while (0)
280 
281 #define PN(x) \
282 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
283 
284 	P(nr_running);
285 	SEQ_printf(m, "  .%-30s: %lu\n", "load",
286 		   rq->load.weight);
287 	P(nr_switches);
288 	P(nr_load_updates);
289 	P(nr_uninterruptible);
290 	PN(next_balance);
291 	P(curr->pid);
292 	PN(clock);
293 	P(cpu_load[0]);
294 	P(cpu_load[1]);
295 	P(cpu_load[2]);
296 	P(cpu_load[3]);
297 	P(cpu_load[4]);
298 #undef P
299 #undef PN
300 
301 #ifdef CONFIG_SCHEDSTATS
302 #define P(n) SEQ_printf(m, "  .%-30s: %d\n", #n, rq->n);
303 #define P64(n) SEQ_printf(m, "  .%-30s: %Ld\n", #n, rq->n);
304 
305 	P(yld_count);
306 
307 	P(sched_count);
308 	P(sched_goidle);
309 #ifdef CONFIG_SMP
310 	P64(avg_idle);
311 #endif
312 
313 	P(ttwu_count);
314 	P(ttwu_local);
315 
316 #undef P
317 #undef P64
318 #endif
319 	spin_lock_irqsave(&sched_debug_lock, flags);
320 	print_cfs_stats(m, cpu);
321 	print_rt_stats(m, cpu);
322 
323 	rcu_read_lock();
324 	print_rq(m, rq, cpu);
325 	rcu_read_unlock();
326 	spin_unlock_irqrestore(&sched_debug_lock, flags);
327 }
328 
329 static const char *sched_tunable_scaling_names[] = {
330 	"none",
331 	"logaritmic",
332 	"linear"
333 };
334 
335 static int sched_debug_show(struct seq_file *m, void *v)
336 {
337 	u64 ktime, sched_clk, cpu_clk;
338 	unsigned long flags;
339 	int cpu;
340 
341 	local_irq_save(flags);
342 	ktime = ktime_to_ns(ktime_get());
343 	sched_clk = sched_clock();
344 	cpu_clk = local_clock();
345 	local_irq_restore(flags);
346 
347 	SEQ_printf(m, "Sched Debug Version: v0.10, %s %.*s\n",
348 		init_utsname()->release,
349 		(int)strcspn(init_utsname()->version, " "),
350 		init_utsname()->version);
351 
352 #define P(x) \
353 	SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
354 #define PN(x) \
355 	SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
356 	PN(ktime);
357 	PN(sched_clk);
358 	PN(cpu_clk);
359 	P(jiffies);
360 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
361 	P(sched_clock_stable);
362 #endif
363 #undef PN
364 #undef P
365 
366 	SEQ_printf(m, "\n");
367 	SEQ_printf(m, "sysctl_sched\n");
368 
369 #define P(x) \
370 	SEQ_printf(m, "  .%-40s: %Ld\n", #x, (long long)(x))
371 #define PN(x) \
372 	SEQ_printf(m, "  .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
373 	PN(sysctl_sched_latency);
374 	PN(sysctl_sched_min_granularity);
375 	PN(sysctl_sched_wakeup_granularity);
376 	P(sysctl_sched_child_runs_first);
377 	P(sysctl_sched_features);
378 #undef PN
379 #undef P
380 
381 	SEQ_printf(m, "  .%-40s: %d (%s)\n", "sysctl_sched_tunable_scaling",
382 		sysctl_sched_tunable_scaling,
383 		sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
384 
385 	for_each_online_cpu(cpu)
386 		print_cpu(m, cpu);
387 
388 	SEQ_printf(m, "\n");
389 
390 	return 0;
391 }
392 
393 void sysrq_sched_debug_show(void)
394 {
395 	sched_debug_show(NULL, NULL);
396 }
397 
398 static int sched_debug_open(struct inode *inode, struct file *filp)
399 {
400 	return single_open(filp, sched_debug_show, NULL);
401 }
402 
403 static const struct file_operations sched_debug_fops = {
404 	.open		= sched_debug_open,
405 	.read		= seq_read,
406 	.llseek		= seq_lseek,
407 	.release	= single_release,
408 };
409 
410 static int __init init_sched_debug_procfs(void)
411 {
412 	struct proc_dir_entry *pe;
413 
414 	pe = proc_create("sched_debug", 0444, NULL, &sched_debug_fops);
415 	if (!pe)
416 		return -ENOMEM;
417 	return 0;
418 }
419 
420 __initcall(init_sched_debug_procfs);
421 
422 void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
423 {
424 	unsigned long nr_switches;
425 
426 	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid,
427 						get_nr_threads(p));
428 	SEQ_printf(m,
429 		"---------------------------------------------------------\n");
430 #define __P(F) \
431 	SEQ_printf(m, "%-35s:%21Ld\n", #F, (long long)F)
432 #define P(F) \
433 	SEQ_printf(m, "%-35s:%21Ld\n", #F, (long long)p->F)
434 #define __PN(F) \
435 	SEQ_printf(m, "%-35s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
436 #define PN(F) \
437 	SEQ_printf(m, "%-35s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
438 
439 	PN(se.exec_start);
440 	PN(se.vruntime);
441 	PN(se.sum_exec_runtime);
442 
443 	nr_switches = p->nvcsw + p->nivcsw;
444 
445 #ifdef CONFIG_SCHEDSTATS
446 	PN(se.statistics.wait_start);
447 	PN(se.statistics.sleep_start);
448 	PN(se.statistics.block_start);
449 	PN(se.statistics.sleep_max);
450 	PN(se.statistics.block_max);
451 	PN(se.statistics.exec_max);
452 	PN(se.statistics.slice_max);
453 	PN(se.statistics.wait_max);
454 	PN(se.statistics.wait_sum);
455 	P(se.statistics.wait_count);
456 	PN(se.statistics.iowait_sum);
457 	P(se.statistics.iowait_count);
458 	P(se.nr_migrations);
459 	P(se.statistics.nr_migrations_cold);
460 	P(se.statistics.nr_failed_migrations_affine);
461 	P(se.statistics.nr_failed_migrations_running);
462 	P(se.statistics.nr_failed_migrations_hot);
463 	P(se.statistics.nr_forced_migrations);
464 	P(se.statistics.nr_wakeups);
465 	P(se.statistics.nr_wakeups_sync);
466 	P(se.statistics.nr_wakeups_migrate);
467 	P(se.statistics.nr_wakeups_local);
468 	P(se.statistics.nr_wakeups_remote);
469 	P(se.statistics.nr_wakeups_affine);
470 	P(se.statistics.nr_wakeups_affine_attempts);
471 	P(se.statistics.nr_wakeups_passive);
472 	P(se.statistics.nr_wakeups_idle);
473 
474 	{
475 		u64 avg_atom, avg_per_cpu;
476 
477 		avg_atom = p->se.sum_exec_runtime;
478 		if (nr_switches)
479 			do_div(avg_atom, nr_switches);
480 		else
481 			avg_atom = -1LL;
482 
483 		avg_per_cpu = p->se.sum_exec_runtime;
484 		if (p->se.nr_migrations) {
485 			avg_per_cpu = div64_u64(avg_per_cpu,
486 						p->se.nr_migrations);
487 		} else {
488 			avg_per_cpu = -1LL;
489 		}
490 
491 		__PN(avg_atom);
492 		__PN(avg_per_cpu);
493 	}
494 #endif
495 	__P(nr_switches);
496 	SEQ_printf(m, "%-35s:%21Ld\n",
497 		   "nr_voluntary_switches", (long long)p->nvcsw);
498 	SEQ_printf(m, "%-35s:%21Ld\n",
499 		   "nr_involuntary_switches", (long long)p->nivcsw);
500 
501 	P(se.load.weight);
502 	P(policy);
503 	P(prio);
504 #undef PN
505 #undef __PN
506 #undef P
507 #undef __P
508 
509 	{
510 		unsigned int this_cpu = raw_smp_processor_id();
511 		u64 t0, t1;
512 
513 		t0 = cpu_clock(this_cpu);
514 		t1 = cpu_clock(this_cpu);
515 		SEQ_printf(m, "%-35s:%21Ld\n",
516 			   "clock-delta", (long long)(t1-t0));
517 	}
518 }
519 
520 void proc_sched_set_task(struct task_struct *p)
521 {
522 #ifdef CONFIG_SCHEDSTATS
523 	memset(&p->se.statistics, 0, sizeof(p->se.statistics));
524 #endif
525 }
526