xref: /openbmc/linux/kernel/sched/stats.c (revision 801c1419)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2325ea10cSIngo Molnar /*
3325ea10cSIngo Molnar  * /proc/schedstat implementation
4325ea10cSIngo Molnar  */
5391e43daSPeter Zijlstra 
__update_stats_wait_start(struct rq * rq,struct task_struct * p,struct sched_statistics * stats)660f2415eSYafang Shao void __update_stats_wait_start(struct rq *rq, struct task_struct *p,
760f2415eSYafang Shao 			       struct sched_statistics *stats)
860f2415eSYafang Shao {
960f2415eSYafang Shao 	u64 wait_start, prev_wait_start;
1060f2415eSYafang Shao 
1160f2415eSYafang Shao 	wait_start = rq_clock(rq);
1260f2415eSYafang Shao 	prev_wait_start = schedstat_val(stats->wait_start);
1360f2415eSYafang Shao 
1460f2415eSYafang Shao 	if (p && likely(wait_start > prev_wait_start))
1560f2415eSYafang Shao 		wait_start -= prev_wait_start;
1660f2415eSYafang Shao 
1760f2415eSYafang Shao 	__schedstat_set(stats->wait_start, wait_start);
1860f2415eSYafang Shao }
1960f2415eSYafang Shao 
__update_stats_wait_end(struct rq * rq,struct task_struct * p,struct sched_statistics * stats)2060f2415eSYafang Shao void __update_stats_wait_end(struct rq *rq, struct task_struct *p,
2160f2415eSYafang Shao 			     struct sched_statistics *stats)
2260f2415eSYafang Shao {
2360f2415eSYafang Shao 	u64 delta = rq_clock(rq) - schedstat_val(stats->wait_start);
2460f2415eSYafang Shao 
2560f2415eSYafang Shao 	if (p) {
2660f2415eSYafang Shao 		if (task_on_rq_migrating(p)) {
2760f2415eSYafang Shao 			/*
2860f2415eSYafang Shao 			 * Preserve migrating task's wait time so wait_start
2960f2415eSYafang Shao 			 * time stamp can be adjusted to accumulate wait time
3060f2415eSYafang Shao 			 * prior to migration.
3160f2415eSYafang Shao 			 */
3260f2415eSYafang Shao 			__schedstat_set(stats->wait_start, delta);
3360f2415eSYafang Shao 
3460f2415eSYafang Shao 			return;
3560f2415eSYafang Shao 		}
3660f2415eSYafang Shao 
3760f2415eSYafang Shao 		trace_sched_stat_wait(p, delta);
3860f2415eSYafang Shao 	}
3960f2415eSYafang Shao 
4060f2415eSYafang Shao 	__schedstat_set(stats->wait_max,
4160f2415eSYafang Shao 			max(schedstat_val(stats->wait_max), delta));
4260f2415eSYafang Shao 	__schedstat_inc(stats->wait_count);
4360f2415eSYafang Shao 	__schedstat_add(stats->wait_sum, delta);
4460f2415eSYafang Shao 	__schedstat_set(stats->wait_start, 0);
4560f2415eSYafang Shao }
4660f2415eSYafang Shao 
__update_stats_enqueue_sleeper(struct rq * rq,struct task_struct * p,struct sched_statistics * stats)4760f2415eSYafang Shao void __update_stats_enqueue_sleeper(struct rq *rq, struct task_struct *p,
4860f2415eSYafang Shao 				    struct sched_statistics *stats)
4960f2415eSYafang Shao {
5060f2415eSYafang Shao 	u64 sleep_start, block_start;
5160f2415eSYafang Shao 
5260f2415eSYafang Shao 	sleep_start = schedstat_val(stats->sleep_start);
5360f2415eSYafang Shao 	block_start = schedstat_val(stats->block_start);
5460f2415eSYafang Shao 
5560f2415eSYafang Shao 	if (sleep_start) {
5660f2415eSYafang Shao 		u64 delta = rq_clock(rq) - sleep_start;
5760f2415eSYafang Shao 
5860f2415eSYafang Shao 		if ((s64)delta < 0)
5960f2415eSYafang Shao 			delta = 0;
6060f2415eSYafang Shao 
6160f2415eSYafang Shao 		if (unlikely(delta > schedstat_val(stats->sleep_max)))
6260f2415eSYafang Shao 			__schedstat_set(stats->sleep_max, delta);
6360f2415eSYafang Shao 
6460f2415eSYafang Shao 		__schedstat_set(stats->sleep_start, 0);
6560f2415eSYafang Shao 		__schedstat_add(stats->sum_sleep_runtime, delta);
6660f2415eSYafang Shao 
6760f2415eSYafang Shao 		if (p) {
6860f2415eSYafang Shao 			account_scheduler_latency(p, delta >> 10, 1);
6960f2415eSYafang Shao 			trace_sched_stat_sleep(p, delta);
7060f2415eSYafang Shao 		}
7160f2415eSYafang Shao 	}
7260f2415eSYafang Shao 
7360f2415eSYafang Shao 	if (block_start) {
7460f2415eSYafang Shao 		u64 delta = rq_clock(rq) - block_start;
7560f2415eSYafang Shao 
7660f2415eSYafang Shao 		if ((s64)delta < 0)
7760f2415eSYafang Shao 			delta = 0;
7860f2415eSYafang Shao 
7960f2415eSYafang Shao 		if (unlikely(delta > schedstat_val(stats->block_max)))
8060f2415eSYafang Shao 			__schedstat_set(stats->block_max, delta);
8160f2415eSYafang Shao 
8260f2415eSYafang Shao 		__schedstat_set(stats->block_start, 0);
8360f2415eSYafang Shao 		__schedstat_add(stats->sum_sleep_runtime, delta);
84*847fc0cdSYafang Shao 		__schedstat_add(stats->sum_block_runtime, delta);
8560f2415eSYafang Shao 
8660f2415eSYafang Shao 		if (p) {
8760f2415eSYafang Shao 			if (p->in_iowait) {
8860f2415eSYafang Shao 				__schedstat_add(stats->iowait_sum, delta);
8960f2415eSYafang Shao 				__schedstat_inc(stats->iowait_count);
9060f2415eSYafang Shao 				trace_sched_stat_iowait(p, delta);
9160f2415eSYafang Shao 			}
9260f2415eSYafang Shao 
9360f2415eSYafang Shao 			trace_sched_stat_blocked(p, delta);
9460f2415eSYafang Shao 
9560f2415eSYafang Shao 			/*
9660f2415eSYafang Shao 			 * Blocking time is in units of nanosecs, so shift by
9760f2415eSYafang Shao 			 * 20 to get a milliseconds-range estimation of the
9860f2415eSYafang Shao 			 * amount of time that the task spent sleeping:
9960f2415eSYafang Shao 			 */
10060f2415eSYafang Shao 			if (unlikely(prof_on == SLEEP_PROFILING)) {
10160f2415eSYafang Shao 				profile_hits(SLEEP_PROFILING,
10260f2415eSYafang Shao 					     (void *)get_wchan(p),
10360f2415eSYafang Shao 					     delta >> 20);
10460f2415eSYafang Shao 			}
10560f2415eSYafang Shao 			account_scheduler_latency(p, delta >> 10, 0);
10660f2415eSYafang Shao 		}
10760f2415eSYafang Shao 	}
10860f2415eSYafang Shao }
10960f2415eSYafang Shao 
110391e43daSPeter Zijlstra /*
111325ea10cSIngo Molnar  * Current schedstat API version.
112325ea10cSIngo Molnar  *
113325ea10cSIngo Molnar  * Bump this up when changing the output format or the meaning of an existing
114391e43daSPeter Zijlstra  * format, so that tools can adapt (or abort)
115391e43daSPeter Zijlstra  */
116391e43daSPeter Zijlstra #define SCHEDSTAT_VERSION 15
117391e43daSPeter Zijlstra 
show_schedstat(struct seq_file * seq,void * v)118391e43daSPeter Zijlstra static int show_schedstat(struct seq_file *seq, void *v)
119391e43daSPeter Zijlstra {
120391e43daSPeter Zijlstra 	int cpu;
121391e43daSPeter Zijlstra 
122cb152ff2SNathan Zimmer 	if (v == (void *)1) {
123391e43daSPeter Zijlstra 		seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
124391e43daSPeter Zijlstra 		seq_printf(seq, "timestamp %lu\n", jiffies);
125cb152ff2SNathan Zimmer 	} else {
126cb152ff2SNathan Zimmer 		struct rq *rq;
127391e43daSPeter Zijlstra #ifdef CONFIG_SMP
128391e43daSPeter Zijlstra 		struct sched_domain *sd;
129391e43daSPeter Zijlstra 		int dcount = 0;
130391e43daSPeter Zijlstra #endif
131cb152ff2SNathan Zimmer 		cpu = (unsigned long)(v - 2);
132cb152ff2SNathan Zimmer 		rq = cpu_rq(cpu);
133391e43daSPeter Zijlstra 
134391e43daSPeter Zijlstra 		/* runqueue-specific stats */
135391e43daSPeter Zijlstra 		seq_printf(seq,
13630fd049aSRakib Mullick 		    "cpu%d %u 0 %u %u %u %u %llu %llu %lu",
137391e43daSPeter Zijlstra 		    cpu, rq->yld_count,
13830fd049aSRakib Mullick 		    rq->sched_count, rq->sched_goidle,
139391e43daSPeter Zijlstra 		    rq->ttwu_count, rq->ttwu_local,
140391e43daSPeter Zijlstra 		    rq->rq_cpu_time,
141391e43daSPeter Zijlstra 		    rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount);
142391e43daSPeter Zijlstra 
143391e43daSPeter Zijlstra 		seq_printf(seq, "\n");
144391e43daSPeter Zijlstra 
145391e43daSPeter Zijlstra #ifdef CONFIG_SMP
146391e43daSPeter Zijlstra 		/* domain-specific stats */
147391e43daSPeter Zijlstra 		rcu_read_lock();
148391e43daSPeter Zijlstra 		for_each_domain(cpu, sd) {
149391e43daSPeter Zijlstra 			enum cpu_idle_type itype;
150391e43daSPeter Zijlstra 
151333470eeSTejun Heo 			seq_printf(seq, "domain%d %*pb", dcount++,
152333470eeSTejun Heo 				   cpumask_pr_args(sched_domain_span(sd)));
153391e43daSPeter Zijlstra 			for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
154391e43daSPeter Zijlstra 					itype++) {
155391e43daSPeter Zijlstra 				seq_printf(seq, " %u %u %u %u %u %u %u %u",
156391e43daSPeter Zijlstra 				    sd->lb_count[itype],
157391e43daSPeter Zijlstra 				    sd->lb_balanced[itype],
158391e43daSPeter Zijlstra 				    sd->lb_failed[itype],
159391e43daSPeter Zijlstra 				    sd->lb_imbalance[itype],
160391e43daSPeter Zijlstra 				    sd->lb_gained[itype],
161391e43daSPeter Zijlstra 				    sd->lb_hot_gained[itype],
162391e43daSPeter Zijlstra 				    sd->lb_nobusyq[itype],
163391e43daSPeter Zijlstra 				    sd->lb_nobusyg[itype]);
164391e43daSPeter Zijlstra 			}
165391e43daSPeter Zijlstra 			seq_printf(seq,
166391e43daSPeter Zijlstra 				   " %u %u %u %u %u %u %u %u %u %u %u %u\n",
167391e43daSPeter Zijlstra 			    sd->alb_count, sd->alb_failed, sd->alb_pushed,
168391e43daSPeter Zijlstra 			    sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed,
169391e43daSPeter Zijlstra 			    sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,
170391e43daSPeter Zijlstra 			    sd->ttwu_wake_remote, sd->ttwu_move_affine,
171391e43daSPeter Zijlstra 			    sd->ttwu_move_balance);
172391e43daSPeter Zijlstra 		}
173391e43daSPeter Zijlstra 		rcu_read_unlock();
174391e43daSPeter Zijlstra #endif
175391e43daSPeter Zijlstra 	}
176391e43daSPeter Zijlstra 	return 0;
177391e43daSPeter Zijlstra }
178391e43daSPeter Zijlstra 
179cb152ff2SNathan Zimmer /*
1803b03706fSIngo Molnar  * This iterator needs some explanation.
181cb152ff2SNathan Zimmer  * It returns 1 for the header position.
182cb152ff2SNathan Zimmer  * This means 2 is cpu 0.
18397fb7a0aSIngo Molnar  * In a hotplugged system some CPUs, including cpu 0, may be missing so we have
18497fb7a0aSIngo Molnar  * to use cpumask_* to iterate over the CPUs.
185cb152ff2SNathan Zimmer  */
schedstat_start(struct seq_file * file,loff_t * offset)186cb152ff2SNathan Zimmer static void *schedstat_start(struct seq_file *file, loff_t *offset)
187cb152ff2SNathan Zimmer {
188cb152ff2SNathan Zimmer 	unsigned long n = *offset;
189cb152ff2SNathan Zimmer 
190cb152ff2SNathan Zimmer 	if (n == 0)
191cb152ff2SNathan Zimmer 		return (void *) 1;
192cb152ff2SNathan Zimmer 
193cb152ff2SNathan Zimmer 	n--;
194cb152ff2SNathan Zimmer 
195cb152ff2SNathan Zimmer 	if (n > 0)
196cb152ff2SNathan Zimmer 		n = cpumask_next(n - 1, cpu_online_mask);
197cb152ff2SNathan Zimmer 	else
198cb152ff2SNathan Zimmer 		n = cpumask_first(cpu_online_mask);
199cb152ff2SNathan Zimmer 
200cb152ff2SNathan Zimmer 	*offset = n + 1;
201cb152ff2SNathan Zimmer 
202cb152ff2SNathan Zimmer 	if (n < nr_cpu_ids)
203cb152ff2SNathan Zimmer 		return (void *)(unsigned long)(n + 2);
20497fb7a0aSIngo Molnar 
205cb152ff2SNathan Zimmer 	return NULL;
206cb152ff2SNathan Zimmer }
207cb152ff2SNathan Zimmer 
schedstat_next(struct seq_file * file,void * data,loff_t * offset)208cb152ff2SNathan Zimmer static void *schedstat_next(struct seq_file *file, void *data, loff_t *offset)
209cb152ff2SNathan Zimmer {
210cb152ff2SNathan Zimmer 	(*offset)++;
21197fb7a0aSIngo Molnar 
212cb152ff2SNathan Zimmer 	return schedstat_start(file, offset);
213cb152ff2SNathan Zimmer }
214cb152ff2SNathan Zimmer 
schedstat_stop(struct seq_file * file,void * data)215cb152ff2SNathan Zimmer static void schedstat_stop(struct seq_file *file, void *data)
216cb152ff2SNathan Zimmer {
217cb152ff2SNathan Zimmer }
218cb152ff2SNathan Zimmer 
219cb152ff2SNathan Zimmer static const struct seq_operations schedstat_sops = {
220cb152ff2SNathan Zimmer 	.start = schedstat_start,
221cb152ff2SNathan Zimmer 	.next  = schedstat_next,
222cb152ff2SNathan Zimmer 	.stop  = schedstat_stop,
223cb152ff2SNathan Zimmer 	.show  = show_schedstat,
224cb152ff2SNathan Zimmer };
225cb152ff2SNathan Zimmer 
proc_schedstat_init(void)226391e43daSPeter Zijlstra static int __init proc_schedstat_init(void)
227391e43daSPeter Zijlstra {
228fddda2b7SChristoph Hellwig 	proc_create_seq("schedstat", 0, NULL, &schedstat_sops);
229391e43daSPeter Zijlstra 	return 0;
230391e43daSPeter Zijlstra }
231c96d6660SPaul Gortmaker subsys_initcall(proc_schedstat_init);
232