xref: /openbmc/linux/kernel/latencytop.c (revision 9d56dd3b083a3bec56e9da35ce07baca81030b03)
1 /*
2  * latencytop.c: Latency display infrastructure
3  *
4  * (C) Copyright 2008 Intel Corporation
5  * Author: Arjan van de Ven <arjan@linux.intel.com>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * as published by the Free Software Foundation; version 2
10  * of the License.
11  */
12 
13 /*
14  * CONFIG_LATENCYTOP enables a kernel latency tracking infrastructure that is
15  * used by the "latencytop" userspace tool. The latency that is tracked is not
16  * the 'traditional' interrupt latency (which is primarily caused by something
17  * else consuming CPU), but instead, it is the latency an application encounters
18  * because the kernel sleeps on its behalf for various reasons.
19  *
20  * This code tracks 2 levels of statistics:
21  * 1) System level latency
22  * 2) Per process latency
23  *
24  * The latency is stored in fixed sized data structures in an accumulated form;
25  * if the "same" latency cause is hit twice, this will be tracked as one entry
26  * in the data structure. Both the count, total accumulated latency and maximum
27  * latency are tracked in this data structure. When the fixed size structure is
28  * full, no new causes are tracked until the buffer is flushed by writing to
29  * the /proc file; the userspace tool does this on a regular basis.
30  *
31  * A latency cause is identified by a stringified backtrace at the point that
32  * the scheduler gets invoked. The userland tool will use this string to
33  * identify the cause of the latency in human readable form.
34  *
35  * The information is exported via /proc/latency_stats and /proc/<pid>/latency.
36  * These files look like this:
37  *
38  * Latency Top version : v0.1
39  * 70 59433 4897 i915_irq_wait drm_ioctl vfs_ioctl do_vfs_ioctl sys_ioctl
40  * |    |    |    |
41  * |    |    |    +----> the stringified backtrace
42  * |    |    +---------> The maximum latency for this entry in microseconds
43  * |    +--------------> The accumulated latency for this entry (microseconds)
44  * +-------------------> The number of times this entry is hit
45  *
46  * (note: the average latency is the accumulated latency divided by the number
47  * of times)
48  */
49 
50 #include <linux/latencytop.h>
51 #include <linux/kallsyms.h>
52 #include <linux/seq_file.h>
53 #include <linux/notifier.h>
54 #include <linux/spinlock.h>
55 #include <linux/proc_fs.h>
56 #include <linux/module.h>
57 #include <linux/sched.h>
58 #include <linux/list.h>
59 #include <linux/slab.h>
60 #include <linux/stacktrace.h>
61 
62 static DEFINE_SPINLOCK(latency_lock);
63 
64 #define MAXLR 128
65 static struct latency_record latency_record[MAXLR];
66 
67 int latencytop_enabled;
68 
69 void clear_all_latency_tracing(struct task_struct *p)
70 {
71 	unsigned long flags;
72 
73 	if (!latencytop_enabled)
74 		return;
75 
76 	spin_lock_irqsave(&latency_lock, flags);
77 	memset(&p->latency_record, 0, sizeof(p->latency_record));
78 	p->latency_record_count = 0;
79 	spin_unlock_irqrestore(&latency_lock, flags);
80 }
81 
82 static void clear_global_latency_tracing(void)
83 {
84 	unsigned long flags;
85 
86 	spin_lock_irqsave(&latency_lock, flags);
87 	memset(&latency_record, 0, sizeof(latency_record));
88 	spin_unlock_irqrestore(&latency_lock, flags);
89 }
90 
91 static void __sched
92 account_global_scheduler_latency(struct task_struct *tsk, struct latency_record *lat)
93 {
94 	int firstnonnull = MAXLR + 1;
95 	int i;
96 
97 	if (!latencytop_enabled)
98 		return;
99 
100 	/* skip kernel threads for now */
101 	if (!tsk->mm)
102 		return;
103 
104 	for (i = 0; i < MAXLR; i++) {
105 		int q, same = 1;
106 
107 		/* Nothing stored: */
108 		if (!latency_record[i].backtrace[0]) {
109 			if (firstnonnull > i)
110 				firstnonnull = i;
111 			continue;
112 		}
113 		for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
114 			unsigned long record = lat->backtrace[q];
115 
116 			if (latency_record[i].backtrace[q] != record) {
117 				same = 0;
118 				break;
119 			}
120 
121 			/* 0 and ULONG_MAX entries mean end of backtrace: */
122 			if (record == 0 || record == ULONG_MAX)
123 				break;
124 		}
125 		if (same) {
126 			latency_record[i].count++;
127 			latency_record[i].time += lat->time;
128 			if (lat->time > latency_record[i].max)
129 				latency_record[i].max = lat->time;
130 			return;
131 		}
132 	}
133 
134 	i = firstnonnull;
135 	if (i >= MAXLR - 1)
136 		return;
137 
138 	/* Allocted a new one: */
139 	memcpy(&latency_record[i], lat, sizeof(struct latency_record));
140 }
141 
142 /*
143  * Iterator to store a backtrace into a latency record entry
144  */
145 static inline void store_stacktrace(struct task_struct *tsk,
146 					struct latency_record *lat)
147 {
148 	struct stack_trace trace;
149 
150 	memset(&trace, 0, sizeof(trace));
151 	trace.max_entries = LT_BACKTRACEDEPTH;
152 	trace.entries = &lat->backtrace[0];
153 	save_stack_trace_tsk(tsk, &trace);
154 }
155 
156 /**
157  * __account_scheduler_latency - record an occured latency
158  * @tsk - the task struct of the task hitting the latency
159  * @usecs - the duration of the latency in microseconds
160  * @inter - 1 if the sleep was interruptible, 0 if uninterruptible
161  *
162  * This function is the main entry point for recording latency entries
163  * as called by the scheduler.
164  *
165  * This function has a few special cases to deal with normal 'non-latency'
166  * sleeps: specifically, interruptible sleep longer than 5 msec is skipped
167  * since this usually is caused by waiting for events via select() and co.
168  *
169  * Negative latencies (caused by time going backwards) are also explicitly
170  * skipped.
171  */
172 void __sched
173 __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
174 {
175 	unsigned long flags;
176 	int i, q;
177 	struct latency_record lat;
178 
179 	/* Long interruptible waits are generally user requested... */
180 	if (inter && usecs > 5000)
181 		return;
182 
183 	/* Negative sleeps are time going backwards */
184 	/* Zero-time sleeps are non-interesting */
185 	if (usecs <= 0)
186 		return;
187 
188 	memset(&lat, 0, sizeof(lat));
189 	lat.count = 1;
190 	lat.time = usecs;
191 	lat.max = usecs;
192 	store_stacktrace(tsk, &lat);
193 
194 	spin_lock_irqsave(&latency_lock, flags);
195 
196 	account_global_scheduler_latency(tsk, &lat);
197 
198 	/*
199 	 * short term hack; if we're > 32 we stop; future we recycle:
200 	 */
201 	tsk->latency_record_count++;
202 	if (tsk->latency_record_count >= LT_SAVECOUNT)
203 		goto out_unlock;
204 
205 	for (i = 0; i < LT_SAVECOUNT; i++) {
206 		struct latency_record *mylat;
207 		int same = 1;
208 
209 		mylat = &tsk->latency_record[i];
210 		for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
211 			unsigned long record = lat.backtrace[q];
212 
213 			if (mylat->backtrace[q] != record) {
214 				same = 0;
215 				break;
216 			}
217 
218 			/* 0 and ULONG_MAX entries mean end of backtrace: */
219 			if (record == 0 || record == ULONG_MAX)
220 				break;
221 		}
222 		if (same) {
223 			mylat->count++;
224 			mylat->time += lat.time;
225 			if (lat.time > mylat->max)
226 				mylat->max = lat.time;
227 			goto out_unlock;
228 		}
229 	}
230 
231 	/* Allocated a new one: */
232 	i = tsk->latency_record_count;
233 	memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record));
234 
235 out_unlock:
236 	spin_unlock_irqrestore(&latency_lock, flags);
237 }
238 
239 static int lstats_show(struct seq_file *m, void *v)
240 {
241 	int i;
242 
243 	seq_puts(m, "Latency Top version : v0.1\n");
244 
245 	for (i = 0; i < MAXLR; i++) {
246 		if (latency_record[i].backtrace[0]) {
247 			int q;
248 			seq_printf(m, "%i %lu %lu ",
249 				latency_record[i].count,
250 				latency_record[i].time,
251 				latency_record[i].max);
252 			for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
253 				char sym[KSYM_SYMBOL_LEN];
254 				char *c;
255 				if (!latency_record[i].backtrace[q])
256 					break;
257 				if (latency_record[i].backtrace[q] == ULONG_MAX)
258 					break;
259 				sprint_symbol(sym, latency_record[i].backtrace[q]);
260 				c = strchr(sym, '+');
261 				if (c)
262 					*c = 0;
263 				seq_printf(m, "%s ", sym);
264 			}
265 			seq_printf(m, "\n");
266 		}
267 	}
268 	return 0;
269 }
270 
271 static ssize_t
272 lstats_write(struct file *file, const char __user *buf, size_t count,
273 	     loff_t *offs)
274 {
275 	clear_global_latency_tracing();
276 
277 	return count;
278 }
279 
280 static int lstats_open(struct inode *inode, struct file *filp)
281 {
282 	return single_open(filp, lstats_show, NULL);
283 }
284 
285 static const struct file_operations lstats_fops = {
286 	.open		= lstats_open,
287 	.read		= seq_read,
288 	.write		= lstats_write,
289 	.llseek		= seq_lseek,
290 	.release	= single_release,
291 };
292 
293 static int __init init_lstats_procfs(void)
294 {
295 	proc_create("latency_stats", 0644, NULL, &lstats_fops);
296 	return 0;
297 }
298 device_initcall(init_lstats_procfs);
299