xref: /openbmc/linux/kernel/sched/debug.c (revision d894fc60)
1 /*
2  * kernel/sched/debug.c
3  *
4  * Print the CFS rbtree
5  *
6  * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 
13 #include <linux/proc_fs.h>
14 #include <linux/sched.h>
15 #include <linux/seq_file.h>
16 #include <linux/kallsyms.h>
17 #include <linux/utsname.h>
18 #include <linux/mempolicy.h>
19 
20 #include "sched.h"
21 
22 static DEFINE_SPINLOCK(sched_debug_lock);
23 
24 /*
25  * This allows printing both to /proc/sched_debug and
26  * to the console
27  */
28 #define SEQ_printf(m, x...)			\
29  do {						\
30 	if (m)					\
31 		seq_printf(m, x);		\
32 	else					\
33 		printk(x);			\
34  } while (0)
35 
36 /*
37  * Ease the printing of nsec fields:
38  */
39 static long long nsec_high(unsigned long long nsec)
40 {
41 	if ((long long)nsec < 0) {
42 		nsec = -nsec;
43 		do_div(nsec, 1000000);
44 		return -nsec;
45 	}
46 	do_div(nsec, 1000000);
47 
48 	return nsec;
49 }
50 
51 static unsigned long nsec_low(unsigned long long nsec)
52 {
53 	if ((long long)nsec < 0)
54 		nsec = -nsec;
55 
56 	return do_div(nsec, 1000000);
57 }
58 
59 #define SPLIT_NS(x) nsec_high(x), nsec_low(x)
60 
61 #ifdef CONFIG_FAIR_GROUP_SCHED
62 static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
63 {
64 	struct sched_entity *se = tg->se[cpu];
65 
66 #define P(F) \
67 	SEQ_printf(m, "  .%-30s: %lld\n", #F, (long long)F)
68 #define PN(F) \
69 	SEQ_printf(m, "  .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
70 
71 	if (!se) {
72 		struct sched_avg *avg = &cpu_rq(cpu)->avg;
73 		P(avg->runnable_avg_sum);
74 		P(avg->runnable_avg_period);
75 		return;
76 	}
77 
78 
79 	PN(se->exec_start);
80 	PN(se->vruntime);
81 	PN(se->sum_exec_runtime);
82 #ifdef CONFIG_SCHEDSTATS
83 	PN(se->statistics.wait_start);
84 	PN(se->statistics.sleep_start);
85 	PN(se->statistics.block_start);
86 	PN(se->statistics.sleep_max);
87 	PN(se->statistics.block_max);
88 	PN(se->statistics.exec_max);
89 	PN(se->statistics.slice_max);
90 	PN(se->statistics.wait_max);
91 	PN(se->statistics.wait_sum);
92 	P(se->statistics.wait_count);
93 #endif
94 	P(se->load.weight);
95 #ifdef CONFIG_SMP
96 	P(se->avg.runnable_avg_sum);
97 	P(se->avg.runnable_avg_period);
98 	P(se->avg.load_avg_contrib);
99 	P(se->avg.decay_count);
100 #endif
101 #undef PN
102 #undef P
103 }
104 #endif
105 
106 #ifdef CONFIG_CGROUP_SCHED
107 static char group_path[PATH_MAX];
108 
109 static char *task_group_path(struct task_group *tg)
110 {
111 	if (autogroup_path(tg, group_path, PATH_MAX))
112 		return group_path;
113 
114 	return cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
115 }
116 #endif
117 
118 static void
119 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
120 {
121 	if (rq->curr == p)
122 		SEQ_printf(m, "R");
123 	else
124 		SEQ_printf(m, " ");
125 
126 	SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
127 		p->comm, task_pid_nr(p),
128 		SPLIT_NS(p->se.vruntime),
129 		(long long)(p->nvcsw + p->nivcsw),
130 		p->prio);
131 #ifdef CONFIG_SCHEDSTATS
132 	SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
133 		SPLIT_NS(p->se.vruntime),
134 		SPLIT_NS(p->se.sum_exec_runtime),
135 		SPLIT_NS(p->se.statistics.sum_sleep_runtime));
136 #else
137 	SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld",
138 		0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L);
139 #endif
140 #ifdef CONFIG_NUMA_BALANCING
141 	SEQ_printf(m, " %d", task_node(p));
142 #endif
143 #ifdef CONFIG_CGROUP_SCHED
144 	SEQ_printf(m, " %s", task_group_path(task_group(p)));
145 #endif
146 
147 	SEQ_printf(m, "\n");
148 }
149 
150 static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
151 {
152 	struct task_struct *g, *p;
153 
154 	SEQ_printf(m,
155 	"\nrunnable tasks:\n"
156 	"            task   PID         tree-key  switches  prio"
157 	"     exec-runtime         sum-exec        sum-sleep\n"
158 	"------------------------------------------------------"
159 	"----------------------------------------------------\n");
160 
161 	rcu_read_lock();
162 	for_each_process_thread(g, p) {
163 		if (task_cpu(p) != rq_cpu)
164 			continue;
165 
166 		print_task(m, rq, p);
167 	}
168 	rcu_read_unlock();
169 }
170 
171 void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
172 {
173 	s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
174 		spread, rq0_min_vruntime, spread0;
175 	struct rq *rq = cpu_rq(cpu);
176 	struct sched_entity *last;
177 	unsigned long flags;
178 
179 #ifdef CONFIG_FAIR_GROUP_SCHED
180 	SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
181 #else
182 	SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
183 #endif
184 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "exec_clock",
185 			SPLIT_NS(cfs_rq->exec_clock));
186 
187 	raw_spin_lock_irqsave(&rq->lock, flags);
188 	if (cfs_rq->rb_leftmost)
189 		MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
190 	last = __pick_last_entity(cfs_rq);
191 	if (last)
192 		max_vruntime = last->vruntime;
193 	min_vruntime = cfs_rq->min_vruntime;
194 	rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
195 	raw_spin_unlock_irqrestore(&rq->lock, flags);
196 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "MIN_vruntime",
197 			SPLIT_NS(MIN_vruntime));
198 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "min_vruntime",
199 			SPLIT_NS(min_vruntime));
200 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "max_vruntime",
201 			SPLIT_NS(max_vruntime));
202 	spread = max_vruntime - MIN_vruntime;
203 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread",
204 			SPLIT_NS(spread));
205 	spread0 = min_vruntime - rq0_min_vruntime;
206 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", "spread0",
207 			SPLIT_NS(spread0));
208 	SEQ_printf(m, "  .%-30s: %d\n", "nr_spread_over",
209 			cfs_rq->nr_spread_over);
210 	SEQ_printf(m, "  .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
211 	SEQ_printf(m, "  .%-30s: %ld\n", "load", cfs_rq->load.weight);
212 #ifdef CONFIG_SMP
213 	SEQ_printf(m, "  .%-30s: %ld\n", "runnable_load_avg",
214 			cfs_rq->runnable_load_avg);
215 	SEQ_printf(m, "  .%-30s: %ld\n", "blocked_load_avg",
216 			cfs_rq->blocked_load_avg);
217 #ifdef CONFIG_FAIR_GROUP_SCHED
218 	SEQ_printf(m, "  .%-30s: %ld\n", "tg_load_contrib",
219 			cfs_rq->tg_load_contrib);
220 	SEQ_printf(m, "  .%-30s: %d\n", "tg_runnable_contrib",
221 			cfs_rq->tg_runnable_contrib);
222 	SEQ_printf(m, "  .%-30s: %ld\n", "tg_load_avg",
223 			atomic_long_read(&cfs_rq->tg->load_avg));
224 	SEQ_printf(m, "  .%-30s: %d\n", "tg->runnable_avg",
225 			atomic_read(&cfs_rq->tg->runnable_avg));
226 #endif
227 #endif
228 #ifdef CONFIG_CFS_BANDWIDTH
229 	SEQ_printf(m, "  .%-30s: %d\n", "tg->cfs_bandwidth.timer_active",
230 			cfs_rq->tg->cfs_bandwidth.timer_active);
231 	SEQ_printf(m, "  .%-30s: %d\n", "throttled",
232 			cfs_rq->throttled);
233 	SEQ_printf(m, "  .%-30s: %d\n", "throttle_count",
234 			cfs_rq->throttle_count);
235 #endif
236 
237 #ifdef CONFIG_FAIR_GROUP_SCHED
238 	print_cfs_group_stats(m, cpu, cfs_rq->tg);
239 #endif
240 }
241 
242 void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
243 {
244 #ifdef CONFIG_RT_GROUP_SCHED
245 	SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
246 #else
247 	SEQ_printf(m, "\nrt_rq[%d]:\n", cpu);
248 #endif
249 
250 #define P(x) \
251 	SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
252 #define PN(x) \
253 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
254 
255 	P(rt_nr_running);
256 	P(rt_throttled);
257 	PN(rt_time);
258 	PN(rt_runtime);
259 
260 #undef PN
261 #undef P
262 }
263 
264 void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
265 {
266 	SEQ_printf(m, "\ndl_rq[%d]:\n", cpu);
267 	SEQ_printf(m, "  .%-30s: %ld\n", "dl_nr_running", dl_rq->dl_nr_running);
268 }
269 
270 extern __read_mostly int sched_clock_running;
271 
272 static void print_cpu(struct seq_file *m, int cpu)
273 {
274 	struct rq *rq = cpu_rq(cpu);
275 	unsigned long flags;
276 
277 #ifdef CONFIG_X86
278 	{
279 		unsigned int freq = cpu_khz ? : 1;
280 
281 		SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
282 			   cpu, freq / 1000, (freq % 1000));
283 	}
284 #else
285 	SEQ_printf(m, "cpu#%d\n", cpu);
286 #endif
287 
288 #define P(x)								\
289 do {									\
290 	if (sizeof(rq->x) == 4)						\
291 		SEQ_printf(m, "  .%-30s: %ld\n", #x, (long)(rq->x));	\
292 	else								\
293 		SEQ_printf(m, "  .%-30s: %Ld\n", #x, (long long)(rq->x));\
294 } while (0)
295 
296 #define PN(x) \
297 	SEQ_printf(m, "  .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
298 
299 	P(nr_running);
300 	SEQ_printf(m, "  .%-30s: %lu\n", "load",
301 		   rq->load.weight);
302 	P(nr_switches);
303 	P(nr_load_updates);
304 	P(nr_uninterruptible);
305 	PN(next_balance);
306 	SEQ_printf(m, "  .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
307 	PN(clock);
308 	PN(clock_task);
309 	P(cpu_load[0]);
310 	P(cpu_load[1]);
311 	P(cpu_load[2]);
312 	P(cpu_load[3]);
313 	P(cpu_load[4]);
314 #undef P
315 #undef PN
316 
317 #ifdef CONFIG_SCHEDSTATS
318 #define P(n) SEQ_printf(m, "  .%-30s: %d\n", #n, rq->n);
319 #define P64(n) SEQ_printf(m, "  .%-30s: %Ld\n", #n, rq->n);
320 
321 	P(yld_count);
322 
323 	P(sched_count);
324 	P(sched_goidle);
325 #ifdef CONFIG_SMP
326 	P64(avg_idle);
327 	P64(max_idle_balance_cost);
328 #endif
329 
330 	P(ttwu_count);
331 	P(ttwu_local);
332 
333 #undef P
334 #undef P64
335 #endif
336 	spin_lock_irqsave(&sched_debug_lock, flags);
337 	print_cfs_stats(m, cpu);
338 	print_rt_stats(m, cpu);
339 	print_dl_stats(m, cpu);
340 
341 	print_rq(m, rq, cpu);
342 	spin_unlock_irqrestore(&sched_debug_lock, flags);
343 	SEQ_printf(m, "\n");
344 }
345 
346 static const char *sched_tunable_scaling_names[] = {
347 	"none",
348 	"logaritmic",
349 	"linear"
350 };
351 
352 static void sched_debug_header(struct seq_file *m)
353 {
354 	u64 ktime, sched_clk, cpu_clk;
355 	unsigned long flags;
356 
357 	local_irq_save(flags);
358 	ktime = ktime_to_ns(ktime_get());
359 	sched_clk = sched_clock();
360 	cpu_clk = local_clock();
361 	local_irq_restore(flags);
362 
363 	SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
364 		init_utsname()->release,
365 		(int)strcspn(init_utsname()->version, " "),
366 		init_utsname()->version);
367 
368 #define P(x) \
369 	SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
370 #define PN(x) \
371 	SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
372 	PN(ktime);
373 	PN(sched_clk);
374 	PN(cpu_clk);
375 	P(jiffies);
376 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
377 	P(sched_clock_stable());
378 #endif
379 #undef PN
380 #undef P
381 
382 	SEQ_printf(m, "\n");
383 	SEQ_printf(m, "sysctl_sched\n");
384 
385 #define P(x) \
386 	SEQ_printf(m, "  .%-40s: %Ld\n", #x, (long long)(x))
387 #define PN(x) \
388 	SEQ_printf(m, "  .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
389 	PN(sysctl_sched_latency);
390 	PN(sysctl_sched_min_granularity);
391 	PN(sysctl_sched_wakeup_granularity);
392 	P(sysctl_sched_child_runs_first);
393 	P(sysctl_sched_features);
394 #undef PN
395 #undef P
396 
397 	SEQ_printf(m, "  .%-40s: %d (%s)\n",
398 		"sysctl_sched_tunable_scaling",
399 		sysctl_sched_tunable_scaling,
400 		sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
401 	SEQ_printf(m, "\n");
402 }
403 
404 static int sched_debug_show(struct seq_file *m, void *v)
405 {
406 	int cpu = (unsigned long)(v - 2);
407 
408 	if (cpu != -1)
409 		print_cpu(m, cpu);
410 	else
411 		sched_debug_header(m);
412 
413 	return 0;
414 }
415 
416 void sysrq_sched_debug_show(void)
417 {
418 	int cpu;
419 
420 	sched_debug_header(NULL);
421 	for_each_online_cpu(cpu)
422 		print_cpu(NULL, cpu);
423 
424 }
425 
426 /*
427  * This itererator needs some explanation.
428  * It returns 1 for the header position.
429  * This means 2 is cpu 0.
430  * In a hotplugged system some cpus, including cpu 0, may be missing so we have
431  * to use cpumask_* to iterate over the cpus.
432  */
433 static void *sched_debug_start(struct seq_file *file, loff_t *offset)
434 {
435 	unsigned long n = *offset;
436 
437 	if (n == 0)
438 		return (void *) 1;
439 
440 	n--;
441 
442 	if (n > 0)
443 		n = cpumask_next(n - 1, cpu_online_mask);
444 	else
445 		n = cpumask_first(cpu_online_mask);
446 
447 	*offset = n + 1;
448 
449 	if (n < nr_cpu_ids)
450 		return (void *)(unsigned long)(n + 2);
451 	return NULL;
452 }
453 
454 static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
455 {
456 	(*offset)++;
457 	return sched_debug_start(file, offset);
458 }
459 
460 static void sched_debug_stop(struct seq_file *file, void *data)
461 {
462 }
463 
464 static const struct seq_operations sched_debug_sops = {
465 	.start = sched_debug_start,
466 	.next = sched_debug_next,
467 	.stop = sched_debug_stop,
468 	.show = sched_debug_show,
469 };
470 
471 static int sched_debug_release(struct inode *inode, struct file *file)
472 {
473 	seq_release(inode, file);
474 
475 	return 0;
476 }
477 
478 static int sched_debug_open(struct inode *inode, struct file *filp)
479 {
480 	int ret = 0;
481 
482 	ret = seq_open(filp, &sched_debug_sops);
483 
484 	return ret;
485 }
486 
487 static const struct file_operations sched_debug_fops = {
488 	.open		= sched_debug_open,
489 	.read		= seq_read,
490 	.llseek		= seq_lseek,
491 	.release	= sched_debug_release,
492 };
493 
494 static int __init init_sched_debug_procfs(void)
495 {
496 	struct proc_dir_entry *pe;
497 
498 	pe = proc_create("sched_debug", 0444, NULL, &sched_debug_fops);
499 	if (!pe)
500 		return -ENOMEM;
501 	return 0;
502 }
503 
504 __initcall(init_sched_debug_procfs);
505 
506 #define __P(F) \
507 	SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
508 #define P(F) \
509 	SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
510 #define __PN(F) \
511 	SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
512 #define PN(F) \
513 	SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
514 
515 
516 static void sched_show_numa(struct task_struct *p, struct seq_file *m)
517 {
518 #ifdef CONFIG_NUMA_BALANCING
519 	struct mempolicy *pol;
520 	int node, i;
521 
522 	if (p->mm)
523 		P(mm->numa_scan_seq);
524 
525 	task_lock(p);
526 	pol = p->mempolicy;
527 	if (pol && !(pol->flags & MPOL_F_MORON))
528 		pol = NULL;
529 	mpol_get(pol);
530 	task_unlock(p);
531 
532 	SEQ_printf(m, "numa_migrations, %ld\n", xchg(&p->numa_pages_migrated, 0));
533 
534 	for_each_online_node(node) {
535 		for (i = 0; i < 2; i++) {
536 			unsigned long nr_faults = -1;
537 			int cpu_current, home_node;
538 
539 			if (p->numa_faults)
540 				nr_faults = p->numa_faults[2*node + i];
541 
542 			cpu_current = !i ? (task_node(p) == node) :
543 				(pol && node_isset(node, pol->v.nodes));
544 
545 			home_node = (p->numa_preferred_nid == node);
546 
547 			SEQ_printf(m, "numa_faults_memory, %d, %d, %d, %d, %ld\n",
548 				i, node, cpu_current, home_node, nr_faults);
549 		}
550 	}
551 
552 	mpol_put(pol);
553 #endif
554 }
555 
556 void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
557 {
558 	unsigned long nr_switches;
559 
560 	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr(p),
561 						get_nr_threads(p));
562 	SEQ_printf(m,
563 		"---------------------------------------------------------"
564 		"----------\n");
565 #define __P(F) \
566 	SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
567 #define P(F) \
568 	SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
569 #define __PN(F) \
570 	SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
571 #define PN(F) \
572 	SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
573 
574 	PN(se.exec_start);
575 	PN(se.vruntime);
576 	PN(se.sum_exec_runtime);
577 
578 	nr_switches = p->nvcsw + p->nivcsw;
579 
580 #ifdef CONFIG_SCHEDSTATS
581 	PN(se.statistics.wait_start);
582 	PN(se.statistics.sleep_start);
583 	PN(se.statistics.block_start);
584 	PN(se.statistics.sleep_max);
585 	PN(se.statistics.block_max);
586 	PN(se.statistics.exec_max);
587 	PN(se.statistics.slice_max);
588 	PN(se.statistics.wait_max);
589 	PN(se.statistics.wait_sum);
590 	P(se.statistics.wait_count);
591 	PN(se.statistics.iowait_sum);
592 	P(se.statistics.iowait_count);
593 	P(se.nr_migrations);
594 	P(se.statistics.nr_migrations_cold);
595 	P(se.statistics.nr_failed_migrations_affine);
596 	P(se.statistics.nr_failed_migrations_running);
597 	P(se.statistics.nr_failed_migrations_hot);
598 	P(se.statistics.nr_forced_migrations);
599 	P(se.statistics.nr_wakeups);
600 	P(se.statistics.nr_wakeups_sync);
601 	P(se.statistics.nr_wakeups_migrate);
602 	P(se.statistics.nr_wakeups_local);
603 	P(se.statistics.nr_wakeups_remote);
604 	P(se.statistics.nr_wakeups_affine);
605 	P(se.statistics.nr_wakeups_affine_attempts);
606 	P(se.statistics.nr_wakeups_passive);
607 	P(se.statistics.nr_wakeups_idle);
608 
609 	{
610 		u64 avg_atom, avg_per_cpu;
611 
612 		avg_atom = p->se.sum_exec_runtime;
613 		if (nr_switches)
614 			avg_atom = div64_ul(avg_atom, nr_switches);
615 		else
616 			avg_atom = -1LL;
617 
618 		avg_per_cpu = p->se.sum_exec_runtime;
619 		if (p->se.nr_migrations) {
620 			avg_per_cpu = div64_u64(avg_per_cpu,
621 						p->se.nr_migrations);
622 		} else {
623 			avg_per_cpu = -1LL;
624 		}
625 
626 		__PN(avg_atom);
627 		__PN(avg_per_cpu);
628 	}
629 #endif
630 	__P(nr_switches);
631 	SEQ_printf(m, "%-45s:%21Ld\n",
632 		   "nr_voluntary_switches", (long long)p->nvcsw);
633 	SEQ_printf(m, "%-45s:%21Ld\n",
634 		   "nr_involuntary_switches", (long long)p->nivcsw);
635 
636 	P(se.load.weight);
637 #ifdef CONFIG_SMP
638 	P(se.avg.runnable_avg_sum);
639 	P(se.avg.runnable_avg_period);
640 	P(se.avg.load_avg_contrib);
641 	P(se.avg.decay_count);
642 #endif
643 	P(policy);
644 	P(prio);
645 #undef PN
646 #undef __PN
647 #undef P
648 #undef __P
649 
650 	{
651 		unsigned int this_cpu = raw_smp_processor_id();
652 		u64 t0, t1;
653 
654 		t0 = cpu_clock(this_cpu);
655 		t1 = cpu_clock(this_cpu);
656 		SEQ_printf(m, "%-45s:%21Ld\n",
657 			   "clock-delta", (long long)(t1-t0));
658 	}
659 
660 	sched_show_numa(p, m);
661 }
662 
663 void proc_sched_set_task(struct task_struct *p)
664 {
665 #ifdef CONFIG_SCHEDSTATS
666 	memset(&p->se.statistics, 0, sizeof(p->se.statistics));
667 #endif
668 }
669