xref: /openbmc/linux/kernel/latencytop.c (revision b8147511)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * latencytop.c: Latency display infrastructure
4  *
5  * (C) Copyright 2008 Intel Corporation
6  * Author: Arjan van de Ven <arjan@linux.intel.com>
7  */
8 
9 /*
10  * CONFIG_LATENCYTOP enables a kernel latency tracking infrastructure that is
11  * used by the "latencytop" userspace tool. The latency that is tracked is not
12  * the 'traditional' interrupt latency (which is primarily caused by something
13  * else consuming CPU), but instead, it is the latency an application encounters
14  * because the kernel sleeps on its behalf for various reasons.
15  *
16  * This code tracks 2 levels of statistics:
17  * 1) System level latency
18  * 2) Per process latency
19  *
20  * The latency is stored in fixed sized data structures in an accumulated form;
21  * if the "same" latency cause is hit twice, this will be tracked as one entry
22  * in the data structure. Both the count, total accumulated latency and maximum
23  * latency are tracked in this data structure. When the fixed size structure is
24  * full, no new causes are tracked until the buffer is flushed by writing to
25  * the /proc file; the userspace tool does this on a regular basis.
26  *
27  * A latency cause is identified by a stringified backtrace at the point that
28  * the scheduler gets invoked. The userland tool will use this string to
29  * identify the cause of the latency in human readable form.
30  *
31  * The information is exported via /proc/latency_stats and /proc/<pid>/latency.
32  * These files look like this:
33  *
34  * Latency Top version : v0.1
35  * 70 59433 4897 i915_irq_wait drm_ioctl vfs_ioctl do_vfs_ioctl sys_ioctl
36  * |    |    |    |
37  * |    |    |    +----> the stringified backtrace
38  * |    |    +---------> The maximum latency for this entry in microseconds
39  * |    +--------------> The accumulated latency for this entry (microseconds)
40  * +-------------------> The number of times this entry is hit
41  *
42  * (note: the average latency is the accumulated latency divided by the number
43  * of times)
44  */
45 
46 #include <linux/kallsyms.h>
47 #include <linux/seq_file.h>
48 #include <linux/notifier.h>
49 #include <linux/spinlock.h>
50 #include <linux/proc_fs.h>
51 #include <linux/latencytop.h>
52 #include <linux/export.h>
53 #include <linux/sched.h>
54 #include <linux/sched/debug.h>
55 #include <linux/sched/stat.h>
56 #include <linux/list.h>
57 #include <linux/stacktrace.h>
58 #include <linux/sysctl.h>
59 
60 static DEFINE_RAW_SPINLOCK(latency_lock);
61 
62 #define MAXLR 128
63 static struct latency_record latency_record[MAXLR];
64 
65 int latencytop_enabled;
66 
67 #ifdef CONFIG_SYSCTL
sysctl_latencytop(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)68 static int sysctl_latencytop(struct ctl_table *table, int write, void *buffer,
69 		size_t *lenp, loff_t *ppos)
70 {
71 	int err;
72 
73 	err = proc_dointvec(table, write, buffer, lenp, ppos);
74 	if (latencytop_enabled)
75 		force_schedstat_enabled();
76 
77 	return err;
78 }
79 
80 static struct ctl_table latencytop_sysctl[] = {
81 	{
82 		.procname   = "latencytop",
83 		.data       = &latencytop_enabled,
84 		.maxlen     = sizeof(int),
85 		.mode       = 0644,
86 		.proc_handler   = sysctl_latencytop,
87 	},
88 	{}
89 };
90 #endif
91 
clear_tsk_latency_tracing(struct task_struct * p)92 void clear_tsk_latency_tracing(struct task_struct *p)
93 {
94 	unsigned long flags;
95 
96 	raw_spin_lock_irqsave(&latency_lock, flags);
97 	memset(&p->latency_record, 0, sizeof(p->latency_record));
98 	p->latency_record_count = 0;
99 	raw_spin_unlock_irqrestore(&latency_lock, flags);
100 }
101 
clear_global_latency_tracing(void)102 static void clear_global_latency_tracing(void)
103 {
104 	unsigned long flags;
105 
106 	raw_spin_lock_irqsave(&latency_lock, flags);
107 	memset(&latency_record, 0, sizeof(latency_record));
108 	raw_spin_unlock_irqrestore(&latency_lock, flags);
109 }
110 
111 static void __sched
account_global_scheduler_latency(struct task_struct * tsk,struct latency_record * lat)112 account_global_scheduler_latency(struct task_struct *tsk,
113 				 struct latency_record *lat)
114 {
115 	int firstnonnull = MAXLR;
116 	int i;
117 
118 	/* skip kernel threads for now */
119 	if (!tsk->mm)
120 		return;
121 
122 	for (i = 0; i < MAXLR; i++) {
123 		int q, same = 1;
124 
125 		/* Nothing stored: */
126 		if (!latency_record[i].backtrace[0]) {
127 			if (firstnonnull > i)
128 				firstnonnull = i;
129 			continue;
130 		}
131 		for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
132 			unsigned long record = lat->backtrace[q];
133 
134 			if (latency_record[i].backtrace[q] != record) {
135 				same = 0;
136 				break;
137 			}
138 
139 			/* 0 entry marks end of backtrace: */
140 			if (!record)
141 				break;
142 		}
143 		if (same) {
144 			latency_record[i].count++;
145 			latency_record[i].time += lat->time;
146 			if (lat->time > latency_record[i].max)
147 				latency_record[i].max = lat->time;
148 			return;
149 		}
150 	}
151 
152 	i = firstnonnull;
153 	if (i >= MAXLR)
154 		return;
155 
156 	/* Allocted a new one: */
157 	memcpy(&latency_record[i], lat, sizeof(struct latency_record));
158 }
159 
160 /**
161  * __account_scheduler_latency - record an occurred latency
162  * @tsk - the task struct of the task hitting the latency
163  * @usecs - the duration of the latency in microseconds
164  * @inter - 1 if the sleep was interruptible, 0 if uninterruptible
165  *
166  * This function is the main entry point for recording latency entries
167  * as called by the scheduler.
168  *
169  * This function has a few special cases to deal with normal 'non-latency'
170  * sleeps: specifically, interruptible sleep longer than 5 msec is skipped
171  * since this usually is caused by waiting for events via select() and co.
172  *
173  * Negative latencies (caused by time going backwards) are also explicitly
174  * skipped.
175  */
176 void __sched
__account_scheduler_latency(struct task_struct * tsk,int usecs,int inter)177 __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
178 {
179 	unsigned long flags;
180 	int i, q;
181 	struct latency_record lat;
182 
183 	/* Long interruptible waits are generally user requested... */
184 	if (inter && usecs > 5000)
185 		return;
186 
187 	/* Negative sleeps are time going backwards */
188 	/* Zero-time sleeps are non-interesting */
189 	if (usecs <= 0)
190 		return;
191 
192 	memset(&lat, 0, sizeof(lat));
193 	lat.count = 1;
194 	lat.time = usecs;
195 	lat.max = usecs;
196 
197 	stack_trace_save_tsk(tsk, lat.backtrace, LT_BACKTRACEDEPTH, 0);
198 
199 	raw_spin_lock_irqsave(&latency_lock, flags);
200 
201 	account_global_scheduler_latency(tsk, &lat);
202 
203 	for (i = 0; i < tsk->latency_record_count; i++) {
204 		struct latency_record *mylat;
205 		int same = 1;
206 
207 		mylat = &tsk->latency_record[i];
208 		for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
209 			unsigned long record = lat.backtrace[q];
210 
211 			if (mylat->backtrace[q] != record) {
212 				same = 0;
213 				break;
214 			}
215 
216 			/* 0 entry is end of backtrace */
217 			if (!record)
218 				break;
219 		}
220 		if (same) {
221 			mylat->count++;
222 			mylat->time += lat.time;
223 			if (lat.time > mylat->max)
224 				mylat->max = lat.time;
225 			goto out_unlock;
226 		}
227 	}
228 
229 	/*
230 	 * short term hack; if we're > 32 we stop; future we recycle:
231 	 */
232 	if (tsk->latency_record_count >= LT_SAVECOUNT)
233 		goto out_unlock;
234 
235 	/* Allocated a new one: */
236 	i = tsk->latency_record_count++;
237 	memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record));
238 
239 out_unlock:
240 	raw_spin_unlock_irqrestore(&latency_lock, flags);
241 }
242 
lstats_show(struct seq_file * m,void * v)243 static int lstats_show(struct seq_file *m, void *v)
244 {
245 	int i;
246 
247 	seq_puts(m, "Latency Top version : v0.1\n");
248 
249 	for (i = 0; i < MAXLR; i++) {
250 		struct latency_record *lr = &latency_record[i];
251 
252 		if (lr->backtrace[0]) {
253 			int q;
254 			seq_printf(m, "%i %lu %lu",
255 				   lr->count, lr->time, lr->max);
256 			for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
257 				unsigned long bt = lr->backtrace[q];
258 
259 				if (!bt)
260 					break;
261 
262 				seq_printf(m, " %ps", (void *)bt);
263 			}
264 			seq_puts(m, "\n");
265 		}
266 	}
267 	return 0;
268 }
269 
270 static ssize_t
lstats_write(struct file * file,const char __user * buf,size_t count,loff_t * offs)271 lstats_write(struct file *file, const char __user *buf, size_t count,
272 	     loff_t *offs)
273 {
274 	clear_global_latency_tracing();
275 
276 	return count;
277 }
278 
lstats_open(struct inode * inode,struct file * filp)279 static int lstats_open(struct inode *inode, struct file *filp)
280 {
281 	return single_open(filp, lstats_show, NULL);
282 }
283 
284 static const struct proc_ops lstats_proc_ops = {
285 	.proc_open	= lstats_open,
286 	.proc_read	= seq_read,
287 	.proc_write	= lstats_write,
288 	.proc_lseek	= seq_lseek,
289 	.proc_release	= single_release,
290 };
291 
init_lstats_procfs(void)292 static int __init init_lstats_procfs(void)
293 {
294 	proc_create("latency_stats", 0644, NULL, &lstats_proc_ops);
295 #ifdef CONFIG_SYSCTL
296 	register_sysctl_init("kernel", latencytop_sysctl);
297 #endif
298 	return 0;
299 }
300 device_initcall(init_lstats_procfs);
301