xref: /openbmc/linux/kernel/trace/trace_stack.c (revision 7490ca1e)
1 /*
2  * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
3  *
4  */
5 #include <linux/stacktrace.h>
6 #include <linux/kallsyms.h>
7 #include <linux/seq_file.h>
8 #include <linux/spinlock.h>
9 #include <linux/uaccess.h>
10 #include <linux/debugfs.h>
11 #include <linux/ftrace.h>
12 #include <linux/module.h>
13 #include <linux/sysctl.h>
14 #include <linux/init.h>
15 #include <linux/fs.h>
16 
17 #include <asm/setup.h>
18 
19 #include "trace.h"
20 
21 #define STACK_TRACE_ENTRIES 500
22 
23 static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
24 	 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
25 static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
26 
27 static struct stack_trace max_stack_trace = {
28 	.max_entries		= STACK_TRACE_ENTRIES,
29 	.entries		= stack_dump_trace,
30 };
31 
32 static unsigned long max_stack_size;
33 static arch_spinlock_t max_stack_lock =
34 	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
35 
36 static int stack_trace_disabled __read_mostly;
37 static DEFINE_PER_CPU(int, trace_active);
38 static DEFINE_MUTEX(stack_sysctl_mutex);
39 
40 int stack_tracer_enabled;
41 static int last_stack_tracer_enabled;
42 
43 static inline void check_stack(void)
44 {
45 	unsigned long this_size, flags;
46 	unsigned long *p, *top, *start;
47 	int i;
48 
49 	this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1);
50 	this_size = THREAD_SIZE - this_size;
51 
52 	if (this_size <= max_stack_size)
53 		return;
54 
55 	/* we do not handle interrupt stacks yet */
56 	if (!object_is_on_stack(&this_size))
57 		return;
58 
59 	local_irq_save(flags);
60 	arch_spin_lock(&max_stack_lock);
61 
62 	/* a race could have already updated it */
63 	if (this_size <= max_stack_size)
64 		goto out;
65 
66 	max_stack_size = this_size;
67 
68 	max_stack_trace.nr_entries	= 0;
69 	max_stack_trace.skip		= 3;
70 
71 	save_stack_trace(&max_stack_trace);
72 
73 	/*
74 	 * Now find where in the stack these are.
75 	 */
76 	i = 0;
77 	start = &this_size;
78 	top = (unsigned long *)
79 		(((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
80 
81 	/*
82 	 * Loop through all the entries. One of the entries may
83 	 * for some reason be missed on the stack, so we may
84 	 * have to account for them. If they are all there, this
85 	 * loop will only happen once. This code only takes place
86 	 * on a new max, so it is far from a fast path.
87 	 */
88 	while (i < max_stack_trace.nr_entries) {
89 		int found = 0;
90 
91 		stack_dump_index[i] = this_size;
92 		p = start;
93 
94 		for (; p < top && i < max_stack_trace.nr_entries; p++) {
95 			if (*p == stack_dump_trace[i]) {
96 				this_size = stack_dump_index[i++] =
97 					(top - p) * sizeof(unsigned long);
98 				found = 1;
99 				/* Start the search from here */
100 				start = p + 1;
101 			}
102 		}
103 
104 		if (!found)
105 			i++;
106 	}
107 
108  out:
109 	arch_spin_unlock(&max_stack_lock);
110 	local_irq_restore(flags);
111 }
112 
113 static void
114 stack_trace_call(unsigned long ip, unsigned long parent_ip)
115 {
116 	int cpu;
117 
118 	if (unlikely(!ftrace_enabled || stack_trace_disabled))
119 		return;
120 
121 	preempt_disable_notrace();
122 
123 	cpu = raw_smp_processor_id();
124 	/* no atomic needed, we only modify this variable by this cpu */
125 	if (per_cpu(trace_active, cpu)++ != 0)
126 		goto out;
127 
128 	check_stack();
129 
130  out:
131 	per_cpu(trace_active, cpu)--;
132 	/* prevent recursion in schedule */
133 	preempt_enable_notrace();
134 }
135 
136 static struct ftrace_ops trace_ops __read_mostly =
137 {
138 	.func = stack_trace_call,
139 };
140 
141 static ssize_t
142 stack_max_size_read(struct file *filp, char __user *ubuf,
143 		    size_t count, loff_t *ppos)
144 {
145 	unsigned long *ptr = filp->private_data;
146 	char buf[64];
147 	int r;
148 
149 	r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
150 	if (r > sizeof(buf))
151 		r = sizeof(buf);
152 	return simple_read_from_buffer(ubuf, count, ppos, buf, r);
153 }
154 
155 static ssize_t
156 stack_max_size_write(struct file *filp, const char __user *ubuf,
157 		     size_t count, loff_t *ppos)
158 {
159 	long *ptr = filp->private_data;
160 	unsigned long val, flags;
161 	int ret;
162 	int cpu;
163 
164 	ret = kstrtoul_from_user(ubuf, count, 10, &val);
165 	if (ret)
166 		return ret;
167 
168 	local_irq_save(flags);
169 
170 	/*
171 	 * In case we trace inside arch_spin_lock() or after (NMI),
172 	 * we will cause circular lock, so we also need to increase
173 	 * the percpu trace_active here.
174 	 */
175 	cpu = smp_processor_id();
176 	per_cpu(trace_active, cpu)++;
177 
178 	arch_spin_lock(&max_stack_lock);
179 	*ptr = val;
180 	arch_spin_unlock(&max_stack_lock);
181 
182 	per_cpu(trace_active, cpu)--;
183 	local_irq_restore(flags);
184 
185 	return count;
186 }
187 
188 static const struct file_operations stack_max_size_fops = {
189 	.open		= tracing_open_generic,
190 	.read		= stack_max_size_read,
191 	.write		= stack_max_size_write,
192 	.llseek		= default_llseek,
193 };
194 
195 static void *
196 __next(struct seq_file *m, loff_t *pos)
197 {
198 	long n = *pos - 1;
199 
200 	if (n >= max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
201 		return NULL;
202 
203 	m->private = (void *)n;
204 	return &m->private;
205 }
206 
207 static void *
208 t_next(struct seq_file *m, void *v, loff_t *pos)
209 {
210 	(*pos)++;
211 	return __next(m, pos);
212 }
213 
214 static void *t_start(struct seq_file *m, loff_t *pos)
215 {
216 	int cpu;
217 
218 	local_irq_disable();
219 
220 	cpu = smp_processor_id();
221 	per_cpu(trace_active, cpu)++;
222 
223 	arch_spin_lock(&max_stack_lock);
224 
225 	if (*pos == 0)
226 		return SEQ_START_TOKEN;
227 
228 	return __next(m, pos);
229 }
230 
231 static void t_stop(struct seq_file *m, void *p)
232 {
233 	int cpu;
234 
235 	arch_spin_unlock(&max_stack_lock);
236 
237 	cpu = smp_processor_id();
238 	per_cpu(trace_active, cpu)--;
239 
240 	local_irq_enable();
241 }
242 
243 static int trace_lookup_stack(struct seq_file *m, long i)
244 {
245 	unsigned long addr = stack_dump_trace[i];
246 
247 	return seq_printf(m, "%pS\n", (void *)addr);
248 }
249 
250 static void print_disabled(struct seq_file *m)
251 {
252 	seq_puts(m, "#\n"
253 		 "#  Stack tracer disabled\n"
254 		 "#\n"
255 		 "# To enable the stack tracer, either add 'stacktrace' to the\n"
256 		 "# kernel command line\n"
257 		 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
258 		 "#\n");
259 }
260 
261 static int t_show(struct seq_file *m, void *v)
262 {
263 	long i;
264 	int size;
265 
266 	if (v == SEQ_START_TOKEN) {
267 		seq_printf(m, "        Depth    Size   Location"
268 			   "    (%d entries)\n"
269 			   "        -----    ----   --------\n",
270 			   max_stack_trace.nr_entries - 1);
271 
272 		if (!stack_tracer_enabled && !max_stack_size)
273 			print_disabled(m);
274 
275 		return 0;
276 	}
277 
278 	i = *(long *)v;
279 
280 	if (i >= max_stack_trace.nr_entries ||
281 	    stack_dump_trace[i] == ULONG_MAX)
282 		return 0;
283 
284 	if (i+1 == max_stack_trace.nr_entries ||
285 	    stack_dump_trace[i+1] == ULONG_MAX)
286 		size = stack_dump_index[i];
287 	else
288 		size = stack_dump_index[i] - stack_dump_index[i+1];
289 
290 	seq_printf(m, "%3ld) %8d   %5d   ", i, stack_dump_index[i], size);
291 
292 	trace_lookup_stack(m, i);
293 
294 	return 0;
295 }
296 
297 static const struct seq_operations stack_trace_seq_ops = {
298 	.start		= t_start,
299 	.next		= t_next,
300 	.stop		= t_stop,
301 	.show		= t_show,
302 };
303 
304 static int stack_trace_open(struct inode *inode, struct file *file)
305 {
306 	return seq_open(file, &stack_trace_seq_ops);
307 }
308 
309 static const struct file_operations stack_trace_fops = {
310 	.open		= stack_trace_open,
311 	.read		= seq_read,
312 	.llseek		= seq_lseek,
313 	.release	= seq_release,
314 };
315 
316 static int
317 stack_trace_filter_open(struct inode *inode, struct file *file)
318 {
319 	return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
320 				 inode, file);
321 }
322 
323 static const struct file_operations stack_trace_filter_fops = {
324 	.open = stack_trace_filter_open,
325 	.read = seq_read,
326 	.write = ftrace_filter_write,
327 	.llseek = ftrace_regex_lseek,
328 	.release = ftrace_regex_release,
329 };
330 
331 int
332 stack_trace_sysctl(struct ctl_table *table, int write,
333 		   void __user *buffer, size_t *lenp,
334 		   loff_t *ppos)
335 {
336 	int ret;
337 
338 	mutex_lock(&stack_sysctl_mutex);
339 
340 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
341 
342 	if (ret || !write ||
343 	    (last_stack_tracer_enabled == !!stack_tracer_enabled))
344 		goto out;
345 
346 	last_stack_tracer_enabled = !!stack_tracer_enabled;
347 
348 	if (stack_tracer_enabled)
349 		register_ftrace_function(&trace_ops);
350 	else
351 		unregister_ftrace_function(&trace_ops);
352 
353  out:
354 	mutex_unlock(&stack_sysctl_mutex);
355 	return ret;
356 }
357 
358 static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
359 
360 static __init int enable_stacktrace(char *str)
361 {
362 	if (strncmp(str, "_filter=", 8) == 0)
363 		strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);
364 
365 	stack_tracer_enabled = 1;
366 	last_stack_tracer_enabled = 1;
367 	return 1;
368 }
369 __setup("stacktrace", enable_stacktrace);
370 
371 static __init int stack_trace_init(void)
372 {
373 	struct dentry *d_tracer;
374 
375 	d_tracer = tracing_init_dentry();
376 
377 	trace_create_file("stack_max_size", 0644, d_tracer,
378 			&max_stack_size, &stack_max_size_fops);
379 
380 	trace_create_file("stack_trace", 0444, d_tracer,
381 			NULL, &stack_trace_fops);
382 
383 	trace_create_file("stack_trace_filter", 0444, d_tracer,
384 			NULL, &stack_trace_filter_fops);
385 
386 	if (stack_trace_filter_buf[0])
387 		ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
388 
389 	if (stack_tracer_enabled)
390 		register_ftrace_function(&trace_ops);
391 
392 	return 0;
393 }
394 
395 device_initcall(stack_trace_init);
396