xref: /openbmc/linux/kernel/sched/debug.c (revision 861e10be)
1 /*
2  * kernel/sched/debug.c
3  *
4  * Print the CFS rbtree
5  *
6  * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 
13 #include <linux/proc_fs.h>
14 #include <linux/sched.h>
15 #include <linux/seq_file.h>
16 #include <linux/kallsyms.h>
17 #include <linux/utsname.h>
18 
19 #include "sched.h"
20 
21 static DEFINE_SPINLOCK(sched_debug_lock);
22 
23 /*
24  * This allows printing both to /proc/sched_debug and
25  * to the console
26  */
27 #define SEQ_printf(m, x...)			\
28  do {						\
29 	if (m)					\
30 		seq_printf(m, x);		\
31 	else					\
32 		printk(x);			\
33  } while (0)
34 
35 /*
36  * Ease the printing of nsec fields:
37  */
38 static long long nsec_high(unsigned long long nsec)
39 {
40 	if ((long long)nsec < 0) {
41 		nsec = -nsec;
42 		do_div(nsec, 1000000);
43 		return -nsec;
44 	}
45 	do_div(nsec, 1000000);
46 
47 	return nsec;
48 }
49 
50 static unsigned long nsec_low(unsigned long long nsec)
51 {
52 	if ((long long)nsec < 0)
53 		nsec = -nsec;
54 
55 	return do_div(nsec, 1000000);
56 }
57 
58 #define SPLIT_NS(x) nsec_high(x), nsec_low(x)
59 
60 #ifdef CONFIG_FAIR_GROUP_SCHED
61 static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
62 {
63 	struct sched_entity *se = tg->se[cpu];
64 
65 #define P(F) \
66 	SEQ_printf(m, "  .%-30s: %lld\n", #F, (long long)F)
67 #define PN(F) \
68 	SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
69 
70 	if (!se) {
71 		struct sched_avg *avg = &cpu_rq(cpu)->avg;
72 		P(avg->runnable_avg_sum);
73 		P(avg->runnable_avg_period);
74 		return;
75 	}
76 
77 
78 	PN(se->exec_start);
79 	PN(se->vruntime);
80 	PN(se->sum_exec_runtime);
81 #ifdef CONFIG_SCHEDSTATS
82 	PN(se->statistics.wait_start);
83 	PN(se->statistics.sleep_start);
84 	PN(se->statistics.block_start);
85 	PN(se->statistics.sleep_max);
86 	PN(se->statistics.block_max);
87 	PN(se->statistics.exec_max);
88 	PN(se->statistics.slice_max);
89 	PN(se->statistics.wait_max);
90 	PN(se->statistics.wait_sum);
91 	P(se->statistics.wait_count);
92 #endif
93 	P(se->load.weight);
94 #ifdef CONFIG_SMP
95 	P(se->avg.runnable_avg_sum);
96 	P(se->avg.runnable_avg_period);
97 	P(se->avg.load_avg_contrib);
98 	P(se->avg.decay_count);
99 #endif
100 #undef PN
101 #undef P
102 }
103 #endif
104 
105 #ifdef CONFIG_CGROUP_SCHED
106 static char group_path[PATH_MAX];
107 
108 static char *task_group_path(struct task_group *tg)
109 {
110 	if (autogroup_path(tg, group_path, PATH_MAX))
111 		return group_path;
112 
113 	/*
114 	 * May be NULL if the underlying cgroup isn't fully-created yet
115 	 */
116 	if (!tg->css.cgroup) {
117 		group_path[0] = '\0';
118 		return group_path;
119 	}
120 	cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
121 	return group_path;
122 }
123 #endif
124 
125 static void
126 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
127 {
128 	if (rq->curr == p)
129 		SEQ_printf(m, "R");
130 	else
131 		SEQ_printf(m, " ");
132 
133 	SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
134 		p->comm, p->pid,
135 		SPLIT_NS(p->se.vruntime),
136 		(long long)(p->nvcsw + p->nivcsw),
137 		p->prio);
138 #ifdef CONFIG_SCHEDSTATS
139 	SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
140 		SPLIT_NS(p->se.vruntime),
141 		SPLIT_NS(p->se.sum_exec_runtime),
142 		SPLIT_NS(p->se.statistics.sum_sleep_runtime));
143 #else
144 	SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld",
145 		0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L);
146 #endif
147 #ifdef CONFIG_CGROUP_SCHED
148 	SEQ_printf(m, " %s", task_group_path(task_group(p)));
149 #endif
150 
151 	SEQ_printf(m, "\n");
152 }
153 
154 static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
155 {
156 	struct task_struct *g, *p;
157 	unsigned long flags;
158 
159 	SEQ_printf(m,
160 	"\nrunnable tasks:\n"
161 	"            task   PID         tree-key  switches  prio"
162 	"     exec-runtime         sum-exec        sum-sleep\n"
163 	"------------------------------------------------------"
164 	"----------------------------------------------------\n");
165 
166 	read_lock_irqsave(&tasklist_lock, flags);
167 
168 	do_each_thread(g, p) {
169 		if (!p->on_rq || task_cpu(p) != rq_cpu)
170 			continue;
171 
172 		print_task(m, rq, p);
173 	} while_each_thread(g, p);
174 
175 	read_unlock_irqrestore(&tasklist_lock, flags);
176 }
177 
178 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
179 {
180 	s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
181 		spread, rq0_min_vruntime, spread0;
182 	struct rq *rq = cpu_rq(cpu);
183 	struct sched_entity *last;
184 	unsigned long flags;
185 
186 #ifdef CONFIG_FAIR_GROUP_SCHED
187 	SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
188 #else
189 	SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
190 #endif
191 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "exec_clock",
192 			SPLIT_NS(cfs_rq->exec_clock));
193 
194 	raw_spin_lock_irqsave(&rq->lock, flags);
195 	if (cfs_rq->rb_leftmost)
196 		MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
197 	last = __pick_last_entity(cfs_rq);
198 	if (last)
199 		max_vruntime = last->vruntime;
200 	min_vruntime = cfs_rq->min_vruntime;
201 	rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
202 	raw_spin_unlock_irqrestore(&rq->lock, flags);
203 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "MIN_vruntime",
204 			SPLIT_NS(MIN_vruntime));
205 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "min_vruntime",
206 			SPLIT_NS(min_vruntime));
207 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "max_vruntime",
208 			SPLIT_NS(max_vruntime));
209 	spread = max_vruntime - MIN_vruntime;
210 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread",
211 			SPLIT_NS(spread));
212 	spread0 = min_vruntime - rq0_min_vruntime;
213 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread0",
214 			SPLIT_NS(spread0));
215 	SEQ_printf(m, "  .%-30s: %d\n", "nr_spread_over",
216 			cfs_rq->nr_spread_over);
217 	SEQ_printf(m, "  .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
218 	SEQ_printf(m, "  .%-30s: %ld\n", "load", cfs_rq->load.weight);
219 #ifdef CONFIG_FAIR_GROUP_SCHED
220 #ifdef CONFIG_SMP
221 	SEQ_printf(m, "  .%-30s: %lld\n", "runnable_load_avg",
222 			cfs_rq->runnable_load_avg);
223 	SEQ_printf(m, "  .%-30s: %lld\n", "blocked_load_avg",
224 			cfs_rq->blocked_load_avg);
225 	SEQ_printf(m, "  .%-30s: %lld\n", "tg_load_avg",
226 			(unsigned long long)atomic64_read(&cfs_rq->tg->load_avg));
227 	SEQ_printf(m, "  .%-30s: %lld\n", "tg_load_contrib",
228 			cfs_rq->tg_load_contrib);
229 	SEQ_printf(m, "  .%-30s: %d\n", "tg_runnable_contrib",
230 			cfs_rq->tg_runnable_contrib);
231 	SEQ_printf(m, "  .%-30s: %d\n", "tg->runnable_avg",
232 			atomic_read(&cfs_rq->tg->runnable_avg));
233 #endif
234 
235 	print_cfs_group_stats(m, cpu, cfs_rq->tg);
236 #endif
237 }
238 
239 void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
240 {
241 #ifdef CONFIG_RT_GROUP_SCHED
242 	SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
243 #else
244 	SEQ_printf(m, "\nrt_rq[%d]:\n", cpu);
245 #endif
246 
247 #define P(x) \
248 	SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
249 #define PN(x) \
250 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
251 
252 	P(rt_nr_running);
253 	P(rt_throttled);
254 	PN(rt_time);
255 	PN(rt_runtime);
256 
257 #undef PN
258 #undef P
259 }
260 
261 extern __read_mostly int sched_clock_running;
262 
263 static void print_cpu(struct seq_file *m, int cpu)
264 {
265 	struct rq *rq = cpu_rq(cpu);
266 	unsigned long flags;
267 
268 #ifdef CONFIG_X86
269 	{
270 		unsigned int freq = cpu_khz ? : 1;
271 
272 		SEQ_printf(m, "\ncpu#%d, %u.%03u MHz\n",
273 			   cpu, freq / 1000, (freq % 1000));
274 	}
275 #else
276 	SEQ_printf(m, "\ncpu#%d\n", cpu);
277 #endif
278 
279 #define P(x)								\
280 do {									\
281 	if (sizeof(rq->x) == 4)						\
282 		SEQ_printf(m, "  .%-30s: %ld\n", #x, (long)(rq->x));	\
283 	else								\
284 		SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rq->x));\
285 } while (0)
286 
287 #define PN(x) \
288 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
289 
290 	P(nr_running);
291 	SEQ_printf(m, "  .%-30s: %lu\n", "load",
292 		   rq->load.weight);
293 	P(nr_switches);
294 	P(nr_load_updates);
295 	P(nr_uninterruptible);
296 	PN(next_balance);
297 	P(curr->pid);
298 	PN(clock);
299 	P(cpu_load[0]);
300 	P(cpu_load[1]);
301 	P(cpu_load[2]);
302 	P(cpu_load[3]);
303 	P(cpu_load[4]);
304 #undef P
305 #undef PN
306 
307 #ifdef CONFIG_SCHEDSTATS
308 #define P(n) SEQ_printf(m, "  .%-30s: %d\n", #n, rq->n);
309 #define P64(n) SEQ_printf(m, "  .%-30s: %Ld\n", #n, rq->n);
310 
311 	P(yld_count);
312 
313 	P(sched_count);
314 	P(sched_goidle);
315 #ifdef CONFIG_SMP
316 	P64(avg_idle);
317 #endif
318 
319 	P(ttwu_count);
320 	P(ttwu_local);
321 
322 #undef P
323 #undef P64
324 #endif
325 	spin_lock_irqsave(&sched_debug_lock, flags);
326 	print_cfs_stats(m, cpu);
327 	print_rt_stats(m, cpu);
328 
329 	rcu_read_lock();
330 	print_rq(m, rq, cpu);
331 	rcu_read_unlock();
332 	spin_unlock_irqrestore(&sched_debug_lock, flags);
333 }
334 
335 static const char *sched_tunable_scaling_names[] = {
336 	"none",
337 	"logaritmic",
338 	"linear"
339 };
340 
341 static int sched_debug_show(struct seq_file *m, void *v)
342 {
343 	u64 ktime, sched_clk, cpu_clk;
344 	unsigned long flags;
345 	int cpu;
346 
347 	local_irq_save(flags);
348 	ktime = ktime_to_ns(ktime_get());
349 	sched_clk = sched_clock();
350 	cpu_clk = local_clock();
351 	local_irq_restore(flags);
352 
353 	SEQ_printf(m, "Sched Debug Version: v0.10, %s %.*s\n",
354 		init_utsname()->release,
355 		(int)strcspn(init_utsname()->version, " "),
356 		init_utsname()->version);
357 
358 #define P(x) \
359 	SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
360 #define PN(x) \
361 	SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
362 	PN(ktime);
363 	PN(sched_clk);
364 	PN(cpu_clk);
365 	P(jiffies);
366 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
367 	P(sched_clock_stable);
368 #endif
369 #undef PN
370 #undef P
371 
372 	SEQ_printf(m, "\n");
373 	SEQ_printf(m, "sysctl_sched\n");
374 
375 #define P(x) \
376 	SEQ_printf(m, "  .%-40s: %Ld\n", #x, (long long)(x))
377 #define PN(x) \
378 	SEQ_printf(m, "  .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
379 	PN(sysctl_sched_latency);
380 	PN(sysctl_sched_min_granularity);
381 	PN(sysctl_sched_wakeup_granularity);
382 	P(sysctl_sched_child_runs_first);
383 	P(sysctl_sched_features);
384 #undef PN
385 #undef P
386 
387 	SEQ_printf(m, "  .%-40s: %d (%s)\n", "sysctl_sched_tunable_scaling",
388 		sysctl_sched_tunable_scaling,
389 		sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
390 
391 	for_each_online_cpu(cpu)
392 		print_cpu(m, cpu);
393 
394 	SEQ_printf(m, "\n");
395 
396 	return 0;
397 }
398 
399 void sysrq_sched_debug_show(void)
400 {
401 	sched_debug_show(NULL, NULL);
402 }
403 
404 static int sched_debug_open(struct inode *inode, struct file *filp)
405 {
406 	return single_open(filp, sched_debug_show, NULL);
407 }
408 
409 static const struct file_operations sched_debug_fops = {
410 	.open		= sched_debug_open,
411 	.read		= seq_read,
412 	.llseek		= seq_lseek,
413 	.release	= single_release,
414 };
415 
416 static int __init init_sched_debug_procfs(void)
417 {
418 	struct proc_dir_entry *pe;
419 
420 	pe = proc_create("sched_debug", 0444, NULL, &sched_debug_fops);
421 	if (!pe)
422 		return -ENOMEM;
423 	return 0;
424 }
425 
426 __initcall(init_sched_debug_procfs);
427 
428 void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
429 {
430 	unsigned long nr_switches;
431 
432 	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid,
433 						get_nr_threads(p));
434 	SEQ_printf(m,
435 		"---------------------------------------------------------\n");
436 #define __P(F) \
437 	SEQ_printf(m, "%-35s:%21Ld\n", #F, (long long)F)
438 #define P(F) \
439 	SEQ_printf(m, "%-35s:%21Ld\n", #F, (long long)p->F)
440 #define __PN(F) \
441 	SEQ_printf(m, "%-35s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
442 #define PN(F) \
443 	SEQ_printf(m, "%-35s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
444 
445 	PN(se.exec_start);
446 	PN(se.vruntime);
447 	PN(se.sum_exec_runtime);
448 
449 	nr_switches = p->nvcsw + p->nivcsw;
450 
451 #ifdef CONFIG_SCHEDSTATS
452 	PN(se.statistics.wait_start);
453 	PN(se.statistics.sleep_start);
454 	PN(se.statistics.block_start);
455 	PN(se.statistics.sleep_max);
456 	PN(se.statistics.block_max);
457 	PN(se.statistics.exec_max);
458 	PN(se.statistics.slice_max);
459 	PN(se.statistics.wait_max);
460 	PN(se.statistics.wait_sum);
461 	P(se.statistics.wait_count);
462 	PN(se.statistics.iowait_sum);
463 	P(se.statistics.iowait_count);
464 	P(se.nr_migrations);
465 	P(se.statistics.nr_migrations_cold);
466 	P(se.statistics.nr_failed_migrations_affine);
467 	P(se.statistics.nr_failed_migrations_running);
468 	P(se.statistics.nr_failed_migrations_hot);
469 	P(se.statistics.nr_forced_migrations);
470 	P(se.statistics.nr_wakeups);
471 	P(se.statistics.nr_wakeups_sync);
472 	P(se.statistics.nr_wakeups_migrate);
473 	P(se.statistics.nr_wakeups_local);
474 	P(se.statistics.nr_wakeups_remote);
475 	P(se.statistics.nr_wakeups_affine);
476 	P(se.statistics.nr_wakeups_affine_attempts);
477 	P(se.statistics.nr_wakeups_passive);
478 	P(se.statistics.nr_wakeups_idle);
479 
480 	{
481 		u64 avg_atom, avg_per_cpu;
482 
483 		avg_atom = p->se.sum_exec_runtime;
484 		if (nr_switches)
485 			do_div(avg_atom, nr_switches);
486 		else
487 			avg_atom = -1LL;
488 
489 		avg_per_cpu = p->se.sum_exec_runtime;
490 		if (p->se.nr_migrations) {
491 			avg_per_cpu = div64_u64(avg_per_cpu,
492 						p->se.nr_migrations);
493 		} else {
494 			avg_per_cpu = -1LL;
495 		}
496 
497 		__PN(avg_atom);
498 		__PN(avg_per_cpu);
499 	}
500 #endif
501 	__P(nr_switches);
502 	SEQ_printf(m, "%-35s:%21Ld\n",
503 		   "nr_voluntary_switches", (long long)p->nvcsw);
504 	SEQ_printf(m, "%-35s:%21Ld\n",
505 		   "nr_involuntary_switches", (long long)p->nivcsw);
506 
507 	P(se.load.weight);
508 	P(policy);
509 	P(prio);
510 #undef PN
511 #undef __PN
512 #undef P
513 #undef __P
514 
515 	{
516 		unsigned int this_cpu = raw_smp_processor_id();
517 		u64 t0, t1;
518 
519 		t0 = cpu_clock(this_cpu);
520 		t1 = cpu_clock(this_cpu);
521 		SEQ_printf(m, "%-35s:%21Ld\n",
522 			   "clock-delta", (long long)(t1-t0));
523 	}
524 }
525 
526 void proc_sched_set_task(struct task_struct *p)
527 {
528 #ifdef CONFIG_SCHEDSTATS
529 	memset(&p->se.statistics, 0, sizeof(p->se.statistics));
530 #endif
531 }
532