xref: /openbmc/linux/kernel/trace/trace_stack.c (revision abfbd895)
1 /*
2  * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
3  *
4  */
5 #include <linux/stacktrace.h>
6 #include <linux/kallsyms.h>
7 #include <linux/seq_file.h>
8 #include <linux/spinlock.h>
9 #include <linux/uaccess.h>
10 #include <linux/ftrace.h>
11 #include <linux/module.h>
12 #include <linux/sysctl.h>
13 #include <linux/init.h>
14 
15 #include <asm/setup.h>
16 
17 #include "trace.h"
18 
19 static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
20 	 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
21 unsigned stack_trace_index[STACK_TRACE_ENTRIES];
22 
23 /*
24  * Reserve one entry for the passed in ip. This will allow
25  * us to remove most or all of the stack size overhead
26  * added by the stack tracer itself.
27  */
28 struct stack_trace stack_trace_max = {
29 	.max_entries		= STACK_TRACE_ENTRIES - 1,
30 	.entries		= &stack_dump_trace[0],
31 };
32 
33 unsigned long stack_trace_max_size;
34 arch_spinlock_t stack_trace_max_lock =
35 	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
36 
37 static DEFINE_PER_CPU(int, trace_active);
38 static DEFINE_MUTEX(stack_sysctl_mutex);
39 
40 int stack_tracer_enabled;
41 static int last_stack_tracer_enabled;
42 
43 void stack_trace_print(void)
44 {
45 	long i;
46 	int size;
47 
48 	pr_emerg("        Depth    Size   Location    (%d entries)\n"
49 			   "        -----    ----   --------\n",
50 			   stack_trace_max.nr_entries);
51 
52 	for (i = 0; i < stack_trace_max.nr_entries; i++) {
53 		if (stack_dump_trace[i] == ULONG_MAX)
54 			break;
55 		if (i+1 == stack_trace_max.nr_entries ||
56 				stack_dump_trace[i+1] == ULONG_MAX)
57 			size = stack_trace_index[i];
58 		else
59 			size = stack_trace_index[i] - stack_trace_index[i+1];
60 
61 		pr_emerg("%3ld) %8d   %5d   %pS\n", i, stack_trace_index[i],
62 				size, (void *)stack_dump_trace[i]);
63 	}
64 }
65 
66 /*
67  * When arch-specific code overides this function, the following
68  * data should be filled up, assuming stack_trace_max_lock is held to
69  * prevent concurrent updates.
70  *     stack_trace_index[]
71  *     stack_trace_max
72  *     stack_trace_max_size
73  */
74 void __weak
75 check_stack(unsigned long ip, unsigned long *stack)
76 {
77 	unsigned long this_size, flags; unsigned long *p, *top, *start;
78 	static int tracer_frame;
79 	int frame_size = ACCESS_ONCE(tracer_frame);
80 	int i, x;
81 
82 	this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
83 	this_size = THREAD_SIZE - this_size;
84 	/* Remove the frame of the tracer */
85 	this_size -= frame_size;
86 
87 	if (this_size <= stack_trace_max_size)
88 		return;
89 
90 	/* we do not handle interrupt stacks yet */
91 	if (!object_is_on_stack(stack))
92 		return;
93 
94 	/* Can't do this from NMI context (can cause deadlocks) */
95 	if (in_nmi())
96 		return;
97 
98 	local_irq_save(flags);
99 	arch_spin_lock(&stack_trace_max_lock);
100 
101 	/*
102 	 * RCU may not be watching, make it see us.
103 	 * The stack trace code uses rcu_sched.
104 	 */
105 	rcu_irq_enter();
106 
107 	/* In case another CPU set the tracer_frame on us */
108 	if (unlikely(!frame_size))
109 		this_size -= tracer_frame;
110 
111 	/* a race could have already updated it */
112 	if (this_size <= stack_trace_max_size)
113 		goto out;
114 
115 	stack_trace_max_size = this_size;
116 
117 	stack_trace_max.nr_entries = 0;
118 	stack_trace_max.skip = 3;
119 
120 	save_stack_trace(&stack_trace_max);
121 
122 	/* Skip over the overhead of the stack tracer itself */
123 	for (i = 0; i < stack_trace_max.nr_entries; i++) {
124 		if (stack_dump_trace[i] == ip)
125 			break;
126 	}
127 
128 	/*
129 	 * Now find where in the stack these are.
130 	 */
131 	x = 0;
132 	start = stack;
133 	top = (unsigned long *)
134 		(((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
135 
136 	/*
137 	 * Loop through all the entries. One of the entries may
138 	 * for some reason be missed on the stack, so we may
139 	 * have to account for them. If they are all there, this
140 	 * loop will only happen once. This code only takes place
141 	 * on a new max, so it is far from a fast path.
142 	 */
143 	while (i < stack_trace_max.nr_entries) {
144 		int found = 0;
145 
146 		stack_trace_index[x] = this_size;
147 		p = start;
148 
149 		for (; p < top && i < stack_trace_max.nr_entries; p++) {
150 			if (stack_dump_trace[i] == ULONG_MAX)
151 				break;
152 			if (*p == stack_dump_trace[i]) {
153 				stack_dump_trace[x] = stack_dump_trace[i++];
154 				this_size = stack_trace_index[x++] =
155 					(top - p) * sizeof(unsigned long);
156 				found = 1;
157 				/* Start the search from here */
158 				start = p + 1;
159 				/*
160 				 * We do not want to show the overhead
161 				 * of the stack tracer stack in the
162 				 * max stack. If we haven't figured
163 				 * out what that is, then figure it out
164 				 * now.
165 				 */
166 				if (unlikely(!tracer_frame)) {
167 					tracer_frame = (p - stack) *
168 						sizeof(unsigned long);
169 					stack_trace_max_size -= tracer_frame;
170 				}
171 			}
172 		}
173 
174 		if (!found)
175 			i++;
176 	}
177 
178 	stack_trace_max.nr_entries = x;
179 	for (; x < i; x++)
180 		stack_dump_trace[x] = ULONG_MAX;
181 
182 	if (task_stack_end_corrupted(current)) {
183 		stack_trace_print();
184 		BUG();
185 	}
186 
187  out:
188 	rcu_irq_exit();
189 	arch_spin_unlock(&stack_trace_max_lock);
190 	local_irq_restore(flags);
191 }
192 
193 static void
194 stack_trace_call(unsigned long ip, unsigned long parent_ip,
195 		 struct ftrace_ops *op, struct pt_regs *pt_regs)
196 {
197 	unsigned long stack;
198 	int cpu;
199 
200 	preempt_disable_notrace();
201 
202 	cpu = raw_smp_processor_id();
203 	/* no atomic needed, we only modify this variable by this cpu */
204 	if (per_cpu(trace_active, cpu)++ != 0)
205 		goto out;
206 
207 	ip += MCOUNT_INSN_SIZE;
208 
209 	check_stack(ip, &stack);
210 
211  out:
212 	per_cpu(trace_active, cpu)--;
213 	/* prevent recursion in schedule */
214 	preempt_enable_notrace();
215 }
216 
217 static struct ftrace_ops trace_ops __read_mostly =
218 {
219 	.func = stack_trace_call,
220 	.flags = FTRACE_OPS_FL_RECURSION_SAFE,
221 };
222 
223 static ssize_t
224 stack_max_size_read(struct file *filp, char __user *ubuf,
225 		    size_t count, loff_t *ppos)
226 {
227 	unsigned long *ptr = filp->private_data;
228 	char buf[64];
229 	int r;
230 
231 	r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
232 	if (r > sizeof(buf))
233 		r = sizeof(buf);
234 	return simple_read_from_buffer(ubuf, count, ppos, buf, r);
235 }
236 
237 static ssize_t
238 stack_max_size_write(struct file *filp, const char __user *ubuf,
239 		     size_t count, loff_t *ppos)
240 {
241 	long *ptr = filp->private_data;
242 	unsigned long val, flags;
243 	int ret;
244 	int cpu;
245 
246 	ret = kstrtoul_from_user(ubuf, count, 10, &val);
247 	if (ret)
248 		return ret;
249 
250 	local_irq_save(flags);
251 
252 	/*
253 	 * In case we trace inside arch_spin_lock() or after (NMI),
254 	 * we will cause circular lock, so we also need to increase
255 	 * the percpu trace_active here.
256 	 */
257 	cpu = smp_processor_id();
258 	per_cpu(trace_active, cpu)++;
259 
260 	arch_spin_lock(&stack_trace_max_lock);
261 	*ptr = val;
262 	arch_spin_unlock(&stack_trace_max_lock);
263 
264 	per_cpu(trace_active, cpu)--;
265 	local_irq_restore(flags);
266 
267 	return count;
268 }
269 
270 static const struct file_operations stack_max_size_fops = {
271 	.open		= tracing_open_generic,
272 	.read		= stack_max_size_read,
273 	.write		= stack_max_size_write,
274 	.llseek		= default_llseek,
275 };
276 
277 static void *
278 __next(struct seq_file *m, loff_t *pos)
279 {
280 	long n = *pos - 1;
281 
282 	if (n > stack_trace_max.nr_entries || stack_dump_trace[n] == ULONG_MAX)
283 		return NULL;
284 
285 	m->private = (void *)n;
286 	return &m->private;
287 }
288 
289 static void *
290 t_next(struct seq_file *m, void *v, loff_t *pos)
291 {
292 	(*pos)++;
293 	return __next(m, pos);
294 }
295 
296 static void *t_start(struct seq_file *m, loff_t *pos)
297 {
298 	int cpu;
299 
300 	local_irq_disable();
301 
302 	cpu = smp_processor_id();
303 	per_cpu(trace_active, cpu)++;
304 
305 	arch_spin_lock(&stack_trace_max_lock);
306 
307 	if (*pos == 0)
308 		return SEQ_START_TOKEN;
309 
310 	return __next(m, pos);
311 }
312 
313 static void t_stop(struct seq_file *m, void *p)
314 {
315 	int cpu;
316 
317 	arch_spin_unlock(&stack_trace_max_lock);
318 
319 	cpu = smp_processor_id();
320 	per_cpu(trace_active, cpu)--;
321 
322 	local_irq_enable();
323 }
324 
325 static void trace_lookup_stack(struct seq_file *m, long i)
326 {
327 	unsigned long addr = stack_dump_trace[i];
328 
329 	seq_printf(m, "%pS\n", (void *)addr);
330 }
331 
332 static void print_disabled(struct seq_file *m)
333 {
334 	seq_puts(m, "#\n"
335 		 "#  Stack tracer disabled\n"
336 		 "#\n"
337 		 "# To enable the stack tracer, either add 'stacktrace' to the\n"
338 		 "# kernel command line\n"
339 		 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
340 		 "#\n");
341 }
342 
343 static int t_show(struct seq_file *m, void *v)
344 {
345 	long i;
346 	int size;
347 
348 	if (v == SEQ_START_TOKEN) {
349 		seq_printf(m, "        Depth    Size   Location"
350 			   "    (%d entries)\n"
351 			   "        -----    ----   --------\n",
352 			   stack_trace_max.nr_entries);
353 
354 		if (!stack_tracer_enabled && !stack_trace_max_size)
355 			print_disabled(m);
356 
357 		return 0;
358 	}
359 
360 	i = *(long *)v;
361 
362 	if (i >= stack_trace_max.nr_entries ||
363 	    stack_dump_trace[i] == ULONG_MAX)
364 		return 0;
365 
366 	if (i+1 == stack_trace_max.nr_entries ||
367 	    stack_dump_trace[i+1] == ULONG_MAX)
368 		size = stack_trace_index[i];
369 	else
370 		size = stack_trace_index[i] - stack_trace_index[i+1];
371 
372 	seq_printf(m, "%3ld) %8d   %5d   ", i, stack_trace_index[i], size);
373 
374 	trace_lookup_stack(m, i);
375 
376 	return 0;
377 }
378 
379 static const struct seq_operations stack_trace_seq_ops = {
380 	.start		= t_start,
381 	.next		= t_next,
382 	.stop		= t_stop,
383 	.show		= t_show,
384 };
385 
386 static int stack_trace_open(struct inode *inode, struct file *file)
387 {
388 	return seq_open(file, &stack_trace_seq_ops);
389 }
390 
391 static const struct file_operations stack_trace_fops = {
392 	.open		= stack_trace_open,
393 	.read		= seq_read,
394 	.llseek		= seq_lseek,
395 	.release	= seq_release,
396 };
397 
398 static int
399 stack_trace_filter_open(struct inode *inode, struct file *file)
400 {
401 	return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
402 				 inode, file);
403 }
404 
405 static const struct file_operations stack_trace_filter_fops = {
406 	.open = stack_trace_filter_open,
407 	.read = seq_read,
408 	.write = ftrace_filter_write,
409 	.llseek = tracing_lseek,
410 	.release = ftrace_regex_release,
411 };
412 
413 int
414 stack_trace_sysctl(struct ctl_table *table, int write,
415 		   void __user *buffer, size_t *lenp,
416 		   loff_t *ppos)
417 {
418 	int ret;
419 
420 	mutex_lock(&stack_sysctl_mutex);
421 
422 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
423 
424 	if (ret || !write ||
425 	    (last_stack_tracer_enabled == !!stack_tracer_enabled))
426 		goto out;
427 
428 	last_stack_tracer_enabled = !!stack_tracer_enabled;
429 
430 	if (stack_tracer_enabled)
431 		register_ftrace_function(&trace_ops);
432 	else
433 		unregister_ftrace_function(&trace_ops);
434 
435  out:
436 	mutex_unlock(&stack_sysctl_mutex);
437 	return ret;
438 }
439 
440 static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
441 
442 static __init int enable_stacktrace(char *str)
443 {
444 	if (strncmp(str, "_filter=", 8) == 0)
445 		strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);
446 
447 	stack_tracer_enabled = 1;
448 	last_stack_tracer_enabled = 1;
449 	return 1;
450 }
451 __setup("stacktrace", enable_stacktrace);
452 
453 static __init int stack_trace_init(void)
454 {
455 	struct dentry *d_tracer;
456 
457 	d_tracer = tracing_init_dentry();
458 	if (IS_ERR(d_tracer))
459 		return 0;
460 
461 	trace_create_file("stack_max_size", 0644, d_tracer,
462 			&stack_trace_max_size, &stack_max_size_fops);
463 
464 	trace_create_file("stack_trace", 0444, d_tracer,
465 			NULL, &stack_trace_fops);
466 
467 	trace_create_file("stack_trace_filter", 0444, d_tracer,
468 			NULL, &stack_trace_filter_fops);
469 
470 	if (stack_trace_filter_buf[0])
471 		ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
472 
473 	if (stack_tracer_enabled)
474 		register_ftrace_function(&trace_ops);
475 
476 	return 0;
477 }
478 
479 device_initcall(stack_trace_init);
480