xref: /openbmc/linux/kernel/trace/trace_stack.c (revision 3932b9ca)
1 /*
2  * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
3  *
4  */
5 #include <linux/stacktrace.h>
6 #include <linux/kallsyms.h>
7 #include <linux/seq_file.h>
8 #include <linux/spinlock.h>
9 #include <linux/uaccess.h>
10 #include <linux/debugfs.h>
11 #include <linux/ftrace.h>
12 #include <linux/module.h>
13 #include <linux/sysctl.h>
14 #include <linux/init.h>
15 #include <linux/fs.h>
16 #include <linux/magic.h>
17 
18 #include <asm/setup.h>
19 
20 #include "trace.h"
21 
22 #define STACK_TRACE_ENTRIES 500
23 
24 #ifdef CC_USING_FENTRY
25 # define fentry		1
26 #else
27 # define fentry		0
28 #endif
29 
30 static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
31 	 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
32 static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
33 
34 /*
35  * Reserve one entry for the passed in ip. This will allow
36  * us to remove most or all of the stack size overhead
37  * added by the stack tracer itself.
38  */
39 static struct stack_trace max_stack_trace = {
40 	.max_entries		= STACK_TRACE_ENTRIES - 1,
41 	.entries		= &stack_dump_trace[1],
42 };
43 
44 static unsigned long max_stack_size;
45 static arch_spinlock_t max_stack_lock =
46 	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
47 
48 static DEFINE_PER_CPU(int, trace_active);
49 static DEFINE_MUTEX(stack_sysctl_mutex);
50 
51 int stack_tracer_enabled;
52 static int last_stack_tracer_enabled;
53 
54 static inline void print_max_stack(void)
55 {
56 	long i;
57 	int size;
58 
59 	pr_emerg("        Depth    Size   Location    (%d entries)\n"
60 			   "        -----    ----   --------\n",
61 			   max_stack_trace.nr_entries - 1);
62 
63 	for (i = 0; i < max_stack_trace.nr_entries; i++) {
64 		if (stack_dump_trace[i] == ULONG_MAX)
65 			break;
66 		if (i+1 == max_stack_trace.nr_entries ||
67 				stack_dump_trace[i+1] == ULONG_MAX)
68 			size = stack_dump_index[i];
69 		else
70 			size = stack_dump_index[i] - stack_dump_index[i+1];
71 
72 		pr_emerg("%3ld) %8d   %5d   %pS\n", i, stack_dump_index[i],
73 				size, (void *)stack_dump_trace[i]);
74 	}
75 }
76 
77 static inline void
78 check_stack(unsigned long ip, unsigned long *stack)
79 {
80 	unsigned long this_size, flags; unsigned long *p, *top, *start;
81 	static int tracer_frame;
82 	int frame_size = ACCESS_ONCE(tracer_frame);
83 	int i;
84 
85 	this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
86 	this_size = THREAD_SIZE - this_size;
87 	/* Remove the frame of the tracer */
88 	this_size -= frame_size;
89 
90 	if (this_size <= max_stack_size)
91 		return;
92 
93 	/* we do not handle interrupt stacks yet */
94 	if (!object_is_on_stack(stack))
95 		return;
96 
97 	local_irq_save(flags);
98 	arch_spin_lock(&max_stack_lock);
99 
100 	/* In case another CPU set the tracer_frame on us */
101 	if (unlikely(!frame_size))
102 		this_size -= tracer_frame;
103 
104 	/* a race could have already updated it */
105 	if (this_size <= max_stack_size)
106 		goto out;
107 
108 	max_stack_size = this_size;
109 
110 	max_stack_trace.nr_entries = 0;
111 
112 	if (using_ftrace_ops_list_func())
113 		max_stack_trace.skip = 4;
114 	else
115 		max_stack_trace.skip = 3;
116 
117 	save_stack_trace(&max_stack_trace);
118 
119 	/*
120 	 * Add the passed in ip from the function tracer.
121 	 * Searching for this on the stack will skip over
122 	 * most of the overhead from the stack tracer itself.
123 	 */
124 	stack_dump_trace[0] = ip;
125 	max_stack_trace.nr_entries++;
126 
127 	/*
128 	 * Now find where in the stack these are.
129 	 */
130 	i = 0;
131 	start = stack;
132 	top = (unsigned long *)
133 		(((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
134 
135 	/*
136 	 * Loop through all the entries. One of the entries may
137 	 * for some reason be missed on the stack, so we may
138 	 * have to account for them. If they are all there, this
139 	 * loop will only happen once. This code only takes place
140 	 * on a new max, so it is far from a fast path.
141 	 */
142 	while (i < max_stack_trace.nr_entries) {
143 		int found = 0;
144 
145 		stack_dump_index[i] = this_size;
146 		p = start;
147 
148 		for (; p < top && i < max_stack_trace.nr_entries; p++) {
149 			if (*p == stack_dump_trace[i]) {
150 				this_size = stack_dump_index[i++] =
151 					(top - p) * sizeof(unsigned long);
152 				found = 1;
153 				/* Start the search from here */
154 				start = p + 1;
155 				/*
156 				 * We do not want to show the overhead
157 				 * of the stack tracer stack in the
158 				 * max stack. If we haven't figured
159 				 * out what that is, then figure it out
160 				 * now.
161 				 */
162 				if (unlikely(!tracer_frame) && i == 1) {
163 					tracer_frame = (p - stack) *
164 						sizeof(unsigned long);
165 					max_stack_size -= tracer_frame;
166 				}
167 			}
168 		}
169 
170 		if (!found)
171 			i++;
172 	}
173 
174 	if ((current != &init_task &&
175 		*(end_of_stack(current)) != STACK_END_MAGIC)) {
176 		print_max_stack();
177 		BUG();
178 	}
179 
180  out:
181 	arch_spin_unlock(&max_stack_lock);
182 	local_irq_restore(flags);
183 }
184 
185 static void
186 stack_trace_call(unsigned long ip, unsigned long parent_ip,
187 		 struct ftrace_ops *op, struct pt_regs *pt_regs)
188 {
189 	unsigned long stack;
190 	int cpu;
191 
192 	preempt_disable_notrace();
193 
194 	cpu = raw_smp_processor_id();
195 	/* no atomic needed, we only modify this variable by this cpu */
196 	if (per_cpu(trace_active, cpu)++ != 0)
197 		goto out;
198 
199 	/*
200 	 * When fentry is used, the traced function does not get
201 	 * its stack frame set up, and we lose the parent.
202 	 * The ip is pretty useless because the function tracer
203 	 * was called before that function set up its stack frame.
204 	 * In this case, we use the parent ip.
205 	 *
206 	 * By adding the return address of either the parent ip
207 	 * or the current ip we can disregard most of the stack usage
208 	 * caused by the stack tracer itself.
209 	 *
210 	 * The function tracer always reports the address of where the
211 	 * mcount call was, but the stack will hold the return address.
212 	 */
213 	if (fentry)
214 		ip = parent_ip;
215 	else
216 		ip += MCOUNT_INSN_SIZE;
217 
218 	check_stack(ip, &stack);
219 
220  out:
221 	per_cpu(trace_active, cpu)--;
222 	/* prevent recursion in schedule */
223 	preempt_enable_notrace();
224 }
225 
226 static struct ftrace_ops trace_ops __read_mostly =
227 {
228 	.func = stack_trace_call,
229 	.flags = FTRACE_OPS_FL_RECURSION_SAFE,
230 };
231 
232 static ssize_t
233 stack_max_size_read(struct file *filp, char __user *ubuf,
234 		    size_t count, loff_t *ppos)
235 {
236 	unsigned long *ptr = filp->private_data;
237 	char buf[64];
238 	int r;
239 
240 	r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
241 	if (r > sizeof(buf))
242 		r = sizeof(buf);
243 	return simple_read_from_buffer(ubuf, count, ppos, buf, r);
244 }
245 
246 static ssize_t
247 stack_max_size_write(struct file *filp, const char __user *ubuf,
248 		     size_t count, loff_t *ppos)
249 {
250 	long *ptr = filp->private_data;
251 	unsigned long val, flags;
252 	int ret;
253 	int cpu;
254 
255 	ret = kstrtoul_from_user(ubuf, count, 10, &val);
256 	if (ret)
257 		return ret;
258 
259 	local_irq_save(flags);
260 
261 	/*
262 	 * In case we trace inside arch_spin_lock() or after (NMI),
263 	 * we will cause circular lock, so we also need to increase
264 	 * the percpu trace_active here.
265 	 */
266 	cpu = smp_processor_id();
267 	per_cpu(trace_active, cpu)++;
268 
269 	arch_spin_lock(&max_stack_lock);
270 	*ptr = val;
271 	arch_spin_unlock(&max_stack_lock);
272 
273 	per_cpu(trace_active, cpu)--;
274 	local_irq_restore(flags);
275 
276 	return count;
277 }
278 
279 static const struct file_operations stack_max_size_fops = {
280 	.open		= tracing_open_generic,
281 	.read		= stack_max_size_read,
282 	.write		= stack_max_size_write,
283 	.llseek		= default_llseek,
284 };
285 
286 static void *
287 __next(struct seq_file *m, loff_t *pos)
288 {
289 	long n = *pos - 1;
290 
291 	if (n >= max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
292 		return NULL;
293 
294 	m->private = (void *)n;
295 	return &m->private;
296 }
297 
298 static void *
299 t_next(struct seq_file *m, void *v, loff_t *pos)
300 {
301 	(*pos)++;
302 	return __next(m, pos);
303 }
304 
305 static void *t_start(struct seq_file *m, loff_t *pos)
306 {
307 	int cpu;
308 
309 	local_irq_disable();
310 
311 	cpu = smp_processor_id();
312 	per_cpu(trace_active, cpu)++;
313 
314 	arch_spin_lock(&max_stack_lock);
315 
316 	if (*pos == 0)
317 		return SEQ_START_TOKEN;
318 
319 	return __next(m, pos);
320 }
321 
322 static void t_stop(struct seq_file *m, void *p)
323 {
324 	int cpu;
325 
326 	arch_spin_unlock(&max_stack_lock);
327 
328 	cpu = smp_processor_id();
329 	per_cpu(trace_active, cpu)--;
330 
331 	local_irq_enable();
332 }
333 
334 static int trace_lookup_stack(struct seq_file *m, long i)
335 {
336 	unsigned long addr = stack_dump_trace[i];
337 
338 	return seq_printf(m, "%pS\n", (void *)addr);
339 }
340 
341 static void print_disabled(struct seq_file *m)
342 {
343 	seq_puts(m, "#\n"
344 		 "#  Stack tracer disabled\n"
345 		 "#\n"
346 		 "# To enable the stack tracer, either add 'stacktrace' to the\n"
347 		 "# kernel command line\n"
348 		 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
349 		 "#\n");
350 }
351 
352 static int t_show(struct seq_file *m, void *v)
353 {
354 	long i;
355 	int size;
356 
357 	if (v == SEQ_START_TOKEN) {
358 		seq_printf(m, "        Depth    Size   Location"
359 			   "    (%d entries)\n"
360 			   "        -----    ----   --------\n",
361 			   max_stack_trace.nr_entries - 1);
362 
363 		if (!stack_tracer_enabled && !max_stack_size)
364 			print_disabled(m);
365 
366 		return 0;
367 	}
368 
369 	i = *(long *)v;
370 
371 	if (i >= max_stack_trace.nr_entries ||
372 	    stack_dump_trace[i] == ULONG_MAX)
373 		return 0;
374 
375 	if (i+1 == max_stack_trace.nr_entries ||
376 	    stack_dump_trace[i+1] == ULONG_MAX)
377 		size = stack_dump_index[i];
378 	else
379 		size = stack_dump_index[i] - stack_dump_index[i+1];
380 
381 	seq_printf(m, "%3ld) %8d   %5d   ", i, stack_dump_index[i], size);
382 
383 	trace_lookup_stack(m, i);
384 
385 	return 0;
386 }
387 
388 static const struct seq_operations stack_trace_seq_ops = {
389 	.start		= t_start,
390 	.next		= t_next,
391 	.stop		= t_stop,
392 	.show		= t_show,
393 };
394 
395 static int stack_trace_open(struct inode *inode, struct file *file)
396 {
397 	return seq_open(file, &stack_trace_seq_ops);
398 }
399 
400 static const struct file_operations stack_trace_fops = {
401 	.open		= stack_trace_open,
402 	.read		= seq_read,
403 	.llseek		= seq_lseek,
404 	.release	= seq_release,
405 };
406 
407 static int
408 stack_trace_filter_open(struct inode *inode, struct file *file)
409 {
410 	return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
411 				 inode, file);
412 }
413 
414 static const struct file_operations stack_trace_filter_fops = {
415 	.open = stack_trace_filter_open,
416 	.read = seq_read,
417 	.write = ftrace_filter_write,
418 	.llseek = tracing_lseek,
419 	.release = ftrace_regex_release,
420 };
421 
422 int
423 stack_trace_sysctl(struct ctl_table *table, int write,
424 		   void __user *buffer, size_t *lenp,
425 		   loff_t *ppos)
426 {
427 	int ret;
428 
429 	mutex_lock(&stack_sysctl_mutex);
430 
431 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
432 
433 	if (ret || !write ||
434 	    (last_stack_tracer_enabled == !!stack_tracer_enabled))
435 		goto out;
436 
437 	last_stack_tracer_enabled = !!stack_tracer_enabled;
438 
439 	if (stack_tracer_enabled)
440 		register_ftrace_function(&trace_ops);
441 	else
442 		unregister_ftrace_function(&trace_ops);
443 
444  out:
445 	mutex_unlock(&stack_sysctl_mutex);
446 	return ret;
447 }
448 
449 static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
450 
451 static __init int enable_stacktrace(char *str)
452 {
453 	if (strncmp(str, "_filter=", 8) == 0)
454 		strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);
455 
456 	stack_tracer_enabled = 1;
457 	last_stack_tracer_enabled = 1;
458 	return 1;
459 }
460 __setup("stacktrace", enable_stacktrace);
461 
462 static __init int stack_trace_init(void)
463 {
464 	struct dentry *d_tracer;
465 
466 	d_tracer = tracing_init_dentry();
467 	if (!d_tracer)
468 		return 0;
469 
470 	trace_create_file("stack_max_size", 0644, d_tracer,
471 			&max_stack_size, &stack_max_size_fops);
472 
473 	trace_create_file("stack_trace", 0444, d_tracer,
474 			NULL, &stack_trace_fops);
475 
476 	trace_create_file("stack_trace_filter", 0444, d_tracer,
477 			NULL, &stack_trace_filter_fops);
478 
479 	if (stack_trace_filter_buf[0])
480 		ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
481 
482 	if (stack_tracer_enabled)
483 		register_ftrace_function(&trace_ops);
484 
485 	return 0;
486 }
487 
488 device_initcall(stack_trace_init);
489