xref: /openbmc/linux/kernel/sched/stats.c (revision 80d0624d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * /proc/schedstat implementation
4  */
5 
6 void __update_stats_wait_start(struct rq *rq, struct task_struct *p,
7 			       struct sched_statistics *stats)
8 {
9 	u64 wait_start, prev_wait_start;
10 
11 	wait_start = rq_clock(rq);
12 	prev_wait_start = schedstat_val(stats->wait_start);
13 
14 	if (p && likely(wait_start > prev_wait_start))
15 		wait_start -= prev_wait_start;
16 
17 	__schedstat_set(stats->wait_start, wait_start);
18 }
19 
20 void __update_stats_wait_end(struct rq *rq, struct task_struct *p,
21 			     struct sched_statistics *stats)
22 {
23 	u64 delta = rq_clock(rq) - schedstat_val(stats->wait_start);
24 
25 	if (p) {
26 		if (task_on_rq_migrating(p)) {
27 			/*
28 			 * Preserve migrating task's wait time so wait_start
29 			 * time stamp can be adjusted to accumulate wait time
30 			 * prior to migration.
31 			 */
32 			__schedstat_set(stats->wait_start, delta);
33 
34 			return;
35 		}
36 
37 		trace_sched_stat_wait(p, delta);
38 	}
39 
40 	__schedstat_set(stats->wait_max,
41 			max(schedstat_val(stats->wait_max), delta));
42 	__schedstat_inc(stats->wait_count);
43 	__schedstat_add(stats->wait_sum, delta);
44 	__schedstat_set(stats->wait_start, 0);
45 }
46 
47 void __update_stats_enqueue_sleeper(struct rq *rq, struct task_struct *p,
48 				    struct sched_statistics *stats)
49 {
50 	u64 sleep_start, block_start;
51 
52 	sleep_start = schedstat_val(stats->sleep_start);
53 	block_start = schedstat_val(stats->block_start);
54 
55 	if (sleep_start) {
56 		u64 delta = rq_clock(rq) - sleep_start;
57 
58 		if ((s64)delta < 0)
59 			delta = 0;
60 
61 		if (unlikely(delta > schedstat_val(stats->sleep_max)))
62 			__schedstat_set(stats->sleep_max, delta);
63 
64 		__schedstat_set(stats->sleep_start, 0);
65 		__schedstat_add(stats->sum_sleep_runtime, delta);
66 
67 		if (p) {
68 			account_scheduler_latency(p, delta >> 10, 1);
69 			trace_sched_stat_sleep(p, delta);
70 		}
71 	}
72 
73 	if (block_start) {
74 		u64 delta = rq_clock(rq) - block_start;
75 
76 		if ((s64)delta < 0)
77 			delta = 0;
78 
79 		if (unlikely(delta > schedstat_val(stats->block_max)))
80 			__schedstat_set(stats->block_max, delta);
81 
82 		__schedstat_set(stats->block_start, 0);
83 		__schedstat_add(stats->sum_sleep_runtime, delta);
84 		__schedstat_add(stats->sum_block_runtime, delta);
85 
86 		if (p) {
87 			if (p->in_iowait) {
88 				__schedstat_add(stats->iowait_sum, delta);
89 				__schedstat_inc(stats->iowait_count);
90 				trace_sched_stat_iowait(p, delta);
91 			}
92 
93 			trace_sched_stat_blocked(p, delta);
94 
95 			account_scheduler_latency(p, delta >> 10, 0);
96 		}
97 	}
98 }
99 
100 /*
101  * Current schedstat API version.
102  *
103  * Bump this up when changing the output format or the meaning of an existing
104  * format, so that tools can adapt (or abort)
105  */
106 #define SCHEDSTAT_VERSION 15
107 
108 static int show_schedstat(struct seq_file *seq, void *v)
109 {
110 	int cpu;
111 
112 	if (v == (void *)1) {
113 		seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
114 		seq_printf(seq, "timestamp %lu\n", jiffies);
115 	} else {
116 		struct rq *rq;
117 #ifdef CONFIG_SMP
118 		struct sched_domain *sd;
119 		int dcount = 0;
120 #endif
121 		cpu = (unsigned long)(v - 2);
122 		rq = cpu_rq(cpu);
123 
124 		/* runqueue-specific stats */
125 		seq_printf(seq,
126 		    "cpu%d %u 0 %u %u %u %u %llu %llu %lu",
127 		    cpu, rq->yld_count,
128 		    rq->sched_count, rq->sched_goidle,
129 		    rq->ttwu_count, rq->ttwu_local,
130 		    rq->rq_cpu_time,
131 		    rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount);
132 
133 		seq_printf(seq, "\n");
134 
135 #ifdef CONFIG_SMP
136 		/* domain-specific stats */
137 		rcu_read_lock();
138 		for_each_domain(cpu, sd) {
139 			enum cpu_idle_type itype;
140 
141 			seq_printf(seq, "domain%d %*pb", dcount++,
142 				   cpumask_pr_args(sched_domain_span(sd)));
143 			for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
144 					itype++) {
145 				seq_printf(seq, " %u %u %u %u %u %u %u %u",
146 				    sd->lb_count[itype],
147 				    sd->lb_balanced[itype],
148 				    sd->lb_failed[itype],
149 				    sd->lb_imbalance[itype],
150 				    sd->lb_gained[itype],
151 				    sd->lb_hot_gained[itype],
152 				    sd->lb_nobusyq[itype],
153 				    sd->lb_nobusyg[itype]);
154 			}
155 			seq_printf(seq,
156 				   " %u %u %u %u %u %u %u %u %u %u %u %u\n",
157 			    sd->alb_count, sd->alb_failed, sd->alb_pushed,
158 			    sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed,
159 			    sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,
160 			    sd->ttwu_wake_remote, sd->ttwu_move_affine,
161 			    sd->ttwu_move_balance);
162 		}
163 		rcu_read_unlock();
164 #endif
165 	}
166 	return 0;
167 }
168 
169 /*
170  * This iterator needs some explanation.
171  * It returns 1 for the header position.
172  * This means 2 is cpu 0.
173  * In a hotplugged system some CPUs, including cpu 0, may be missing so we have
174  * to use cpumask_* to iterate over the CPUs.
175  */
176 static void *schedstat_start(struct seq_file *file, loff_t *offset)
177 {
178 	unsigned long n = *offset;
179 
180 	if (n == 0)
181 		return (void *) 1;
182 
183 	n--;
184 
185 	if (n > 0)
186 		n = cpumask_next(n - 1, cpu_online_mask);
187 	else
188 		n = cpumask_first(cpu_online_mask);
189 
190 	*offset = n + 1;
191 
192 	if (n < nr_cpu_ids)
193 		return (void *)(unsigned long)(n + 2);
194 
195 	return NULL;
196 }
197 
198 static void *schedstat_next(struct seq_file *file, void *data, loff_t *offset)
199 {
200 	(*offset)++;
201 
202 	return schedstat_start(file, offset);
203 }
204 
205 static void schedstat_stop(struct seq_file *file, void *data)
206 {
207 }
208 
209 static const struct seq_operations schedstat_sops = {
210 	.start = schedstat_start,
211 	.next  = schedstat_next,
212 	.stop  = schedstat_stop,
213 	.show  = show_schedstat,
214 };
215 
216 static int __init proc_schedstat_init(void)
217 {
218 	proc_create_seq("schedstat", 0, NULL, &schedstat_sops);
219 	return 0;
220 }
221 subsys_initcall(proc_schedstat_init);
222