xref: /openbmc/linux/kernel/trace/trace_stack.c (revision 4f6cce39)
1 /*
2  * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
3  *
4  */
5 #include <linux/sched/task_stack.h>
6 #include <linux/stacktrace.h>
7 #include <linux/kallsyms.h>
8 #include <linux/seq_file.h>
9 #include <linux/spinlock.h>
10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h>
12 #include <linux/module.h>
13 #include <linux/sysctl.h>
14 #include <linux/init.h>
15 
16 #include <asm/setup.h>
17 
18 #include "trace.h"
19 
20 static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
21 	 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
22 unsigned stack_trace_index[STACK_TRACE_ENTRIES];
23 
24 /*
25  * Reserve one entry for the passed in ip. This will allow
26  * us to remove most or all of the stack size overhead
27  * added by the stack tracer itself.
28  */
29 struct stack_trace stack_trace_max = {
30 	.max_entries		= STACK_TRACE_ENTRIES - 1,
31 	.entries		= &stack_dump_trace[0],
32 };
33 
34 unsigned long stack_trace_max_size;
35 arch_spinlock_t stack_trace_max_lock =
36 	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
37 
38 static DEFINE_PER_CPU(int, trace_active);
39 static DEFINE_MUTEX(stack_sysctl_mutex);
40 
41 int stack_tracer_enabled;
42 static int last_stack_tracer_enabled;
43 
44 void stack_trace_print(void)
45 {
46 	long i;
47 	int size;
48 
49 	pr_emerg("        Depth    Size   Location    (%d entries)\n"
50 			   "        -----    ----   --------\n",
51 			   stack_trace_max.nr_entries);
52 
53 	for (i = 0; i < stack_trace_max.nr_entries; i++) {
54 		if (stack_dump_trace[i] == ULONG_MAX)
55 			break;
56 		if (i+1 == stack_trace_max.nr_entries ||
57 				stack_dump_trace[i+1] == ULONG_MAX)
58 			size = stack_trace_index[i];
59 		else
60 			size = stack_trace_index[i] - stack_trace_index[i+1];
61 
62 		pr_emerg("%3ld) %8d   %5d   %pS\n", i, stack_trace_index[i],
63 				size, (void *)stack_dump_trace[i]);
64 	}
65 }
66 
67 /*
68  * When arch-specific code overrides this function, the following
69  * data should be filled up, assuming stack_trace_max_lock is held to
70  * prevent concurrent updates.
71  *     stack_trace_index[]
72  *     stack_trace_max
73  *     stack_trace_max_size
74  */
75 void __weak
76 check_stack(unsigned long ip, unsigned long *stack)
77 {
78 	unsigned long this_size, flags; unsigned long *p, *top, *start;
79 	static int tracer_frame;
80 	int frame_size = ACCESS_ONCE(tracer_frame);
81 	int i, x;
82 
83 	this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
84 	this_size = THREAD_SIZE - this_size;
85 	/* Remove the frame of the tracer */
86 	this_size -= frame_size;
87 
88 	if (this_size <= stack_trace_max_size)
89 		return;
90 
91 	/* we do not handle interrupt stacks yet */
92 	if (!object_is_on_stack(stack))
93 		return;
94 
95 	/* Can't do this from NMI context (can cause deadlocks) */
96 	if (in_nmi())
97 		return;
98 
99 	local_irq_save(flags);
100 	arch_spin_lock(&stack_trace_max_lock);
101 
102 	/*
103 	 * RCU may not be watching, make it see us.
104 	 * The stack trace code uses rcu_sched.
105 	 */
106 	rcu_irq_enter();
107 
108 	/* In case another CPU set the tracer_frame on us */
109 	if (unlikely(!frame_size))
110 		this_size -= tracer_frame;
111 
112 	/* a race could have already updated it */
113 	if (this_size <= stack_trace_max_size)
114 		goto out;
115 
116 	stack_trace_max_size = this_size;
117 
118 	stack_trace_max.nr_entries = 0;
119 	stack_trace_max.skip = 3;
120 
121 	save_stack_trace(&stack_trace_max);
122 
123 	/* Skip over the overhead of the stack tracer itself */
124 	for (i = 0; i < stack_trace_max.nr_entries; i++) {
125 		if (stack_dump_trace[i] == ip)
126 			break;
127 	}
128 
129 	/*
130 	 * Some archs may not have the passed in ip in the dump.
131 	 * If that happens, we need to show everything.
132 	 */
133 	if (i == stack_trace_max.nr_entries)
134 		i = 0;
135 
136 	/*
137 	 * Now find where in the stack these are.
138 	 */
139 	x = 0;
140 	start = stack;
141 	top = (unsigned long *)
142 		(((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
143 
144 	/*
145 	 * Loop through all the entries. One of the entries may
146 	 * for some reason be missed on the stack, so we may
147 	 * have to account for them. If they are all there, this
148 	 * loop will only happen once. This code only takes place
149 	 * on a new max, so it is far from a fast path.
150 	 */
151 	while (i < stack_trace_max.nr_entries) {
152 		int found = 0;
153 
154 		stack_trace_index[x] = this_size;
155 		p = start;
156 
157 		for (; p < top && i < stack_trace_max.nr_entries; p++) {
158 			if (stack_dump_trace[i] == ULONG_MAX)
159 				break;
160 			/*
161 			 * The READ_ONCE_NOCHECK is used to let KASAN know that
162 			 * this is not a stack-out-of-bounds error.
163 			 */
164 			if ((READ_ONCE_NOCHECK(*p)) == stack_dump_trace[i]) {
165 				stack_dump_trace[x] = stack_dump_trace[i++];
166 				this_size = stack_trace_index[x++] =
167 					(top - p) * sizeof(unsigned long);
168 				found = 1;
169 				/* Start the search from here */
170 				start = p + 1;
171 				/*
172 				 * We do not want to show the overhead
173 				 * of the stack tracer stack in the
174 				 * max stack. If we haven't figured
175 				 * out what that is, then figure it out
176 				 * now.
177 				 */
178 				if (unlikely(!tracer_frame)) {
179 					tracer_frame = (p - stack) *
180 						sizeof(unsigned long);
181 					stack_trace_max_size -= tracer_frame;
182 				}
183 			}
184 		}
185 
186 		if (!found)
187 			i++;
188 	}
189 
190 	stack_trace_max.nr_entries = x;
191 	for (; x < i; x++)
192 		stack_dump_trace[x] = ULONG_MAX;
193 
194 	if (task_stack_end_corrupted(current)) {
195 		stack_trace_print();
196 		BUG();
197 	}
198 
199  out:
200 	rcu_irq_exit();
201 	arch_spin_unlock(&stack_trace_max_lock);
202 	local_irq_restore(flags);
203 }
204 
205 static void
206 stack_trace_call(unsigned long ip, unsigned long parent_ip,
207 		 struct ftrace_ops *op, struct pt_regs *pt_regs)
208 {
209 	unsigned long stack;
210 	int cpu;
211 
212 	preempt_disable_notrace();
213 
214 	cpu = raw_smp_processor_id();
215 	/* no atomic needed, we only modify this variable by this cpu */
216 	if (per_cpu(trace_active, cpu)++ != 0)
217 		goto out;
218 
219 	ip += MCOUNT_INSN_SIZE;
220 
221 	check_stack(ip, &stack);
222 
223  out:
224 	per_cpu(trace_active, cpu)--;
225 	/* prevent recursion in schedule */
226 	preempt_enable_notrace();
227 }
228 
229 static struct ftrace_ops trace_ops __read_mostly =
230 {
231 	.func = stack_trace_call,
232 	.flags = FTRACE_OPS_FL_RECURSION_SAFE,
233 };
234 
235 static ssize_t
236 stack_max_size_read(struct file *filp, char __user *ubuf,
237 		    size_t count, loff_t *ppos)
238 {
239 	unsigned long *ptr = filp->private_data;
240 	char buf[64];
241 	int r;
242 
243 	r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
244 	if (r > sizeof(buf))
245 		r = sizeof(buf);
246 	return simple_read_from_buffer(ubuf, count, ppos, buf, r);
247 }
248 
249 static ssize_t
250 stack_max_size_write(struct file *filp, const char __user *ubuf,
251 		     size_t count, loff_t *ppos)
252 {
253 	long *ptr = filp->private_data;
254 	unsigned long val, flags;
255 	int ret;
256 	int cpu;
257 
258 	ret = kstrtoul_from_user(ubuf, count, 10, &val);
259 	if (ret)
260 		return ret;
261 
262 	local_irq_save(flags);
263 
264 	/*
265 	 * In case we trace inside arch_spin_lock() or after (NMI),
266 	 * we will cause circular lock, so we also need to increase
267 	 * the percpu trace_active here.
268 	 */
269 	cpu = smp_processor_id();
270 	per_cpu(trace_active, cpu)++;
271 
272 	arch_spin_lock(&stack_trace_max_lock);
273 	*ptr = val;
274 	arch_spin_unlock(&stack_trace_max_lock);
275 
276 	per_cpu(trace_active, cpu)--;
277 	local_irq_restore(flags);
278 
279 	return count;
280 }
281 
282 static const struct file_operations stack_max_size_fops = {
283 	.open		= tracing_open_generic,
284 	.read		= stack_max_size_read,
285 	.write		= stack_max_size_write,
286 	.llseek		= default_llseek,
287 };
288 
289 static void *
290 __next(struct seq_file *m, loff_t *pos)
291 {
292 	long n = *pos - 1;
293 
294 	if (n > stack_trace_max.nr_entries || stack_dump_trace[n] == ULONG_MAX)
295 		return NULL;
296 
297 	m->private = (void *)n;
298 	return &m->private;
299 }
300 
301 static void *
302 t_next(struct seq_file *m, void *v, loff_t *pos)
303 {
304 	(*pos)++;
305 	return __next(m, pos);
306 }
307 
308 static void *t_start(struct seq_file *m, loff_t *pos)
309 {
310 	int cpu;
311 
312 	local_irq_disable();
313 
314 	cpu = smp_processor_id();
315 	per_cpu(trace_active, cpu)++;
316 
317 	arch_spin_lock(&stack_trace_max_lock);
318 
319 	if (*pos == 0)
320 		return SEQ_START_TOKEN;
321 
322 	return __next(m, pos);
323 }
324 
325 static void t_stop(struct seq_file *m, void *p)
326 {
327 	int cpu;
328 
329 	arch_spin_unlock(&stack_trace_max_lock);
330 
331 	cpu = smp_processor_id();
332 	per_cpu(trace_active, cpu)--;
333 
334 	local_irq_enable();
335 }
336 
337 static void trace_lookup_stack(struct seq_file *m, long i)
338 {
339 	unsigned long addr = stack_dump_trace[i];
340 
341 	seq_printf(m, "%pS\n", (void *)addr);
342 }
343 
344 static void print_disabled(struct seq_file *m)
345 {
346 	seq_puts(m, "#\n"
347 		 "#  Stack tracer disabled\n"
348 		 "#\n"
349 		 "# To enable the stack tracer, either add 'stacktrace' to the\n"
350 		 "# kernel command line\n"
351 		 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
352 		 "#\n");
353 }
354 
355 static int t_show(struct seq_file *m, void *v)
356 {
357 	long i;
358 	int size;
359 
360 	if (v == SEQ_START_TOKEN) {
361 		seq_printf(m, "        Depth    Size   Location"
362 			   "    (%d entries)\n"
363 			   "        -----    ----   --------\n",
364 			   stack_trace_max.nr_entries);
365 
366 		if (!stack_tracer_enabled && !stack_trace_max_size)
367 			print_disabled(m);
368 
369 		return 0;
370 	}
371 
372 	i = *(long *)v;
373 
374 	if (i >= stack_trace_max.nr_entries ||
375 	    stack_dump_trace[i] == ULONG_MAX)
376 		return 0;
377 
378 	if (i+1 == stack_trace_max.nr_entries ||
379 	    stack_dump_trace[i+1] == ULONG_MAX)
380 		size = stack_trace_index[i];
381 	else
382 		size = stack_trace_index[i] - stack_trace_index[i+1];
383 
384 	seq_printf(m, "%3ld) %8d   %5d   ", i, stack_trace_index[i], size);
385 
386 	trace_lookup_stack(m, i);
387 
388 	return 0;
389 }
390 
391 static const struct seq_operations stack_trace_seq_ops = {
392 	.start		= t_start,
393 	.next		= t_next,
394 	.stop		= t_stop,
395 	.show		= t_show,
396 };
397 
398 static int stack_trace_open(struct inode *inode, struct file *file)
399 {
400 	return seq_open(file, &stack_trace_seq_ops);
401 }
402 
403 static const struct file_operations stack_trace_fops = {
404 	.open		= stack_trace_open,
405 	.read		= seq_read,
406 	.llseek		= seq_lseek,
407 	.release	= seq_release,
408 };
409 
410 static int
411 stack_trace_filter_open(struct inode *inode, struct file *file)
412 {
413 	return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
414 				 inode, file);
415 }
416 
417 static const struct file_operations stack_trace_filter_fops = {
418 	.open = stack_trace_filter_open,
419 	.read = seq_read,
420 	.write = ftrace_filter_write,
421 	.llseek = tracing_lseek,
422 	.release = ftrace_regex_release,
423 };
424 
425 int
426 stack_trace_sysctl(struct ctl_table *table, int write,
427 		   void __user *buffer, size_t *lenp,
428 		   loff_t *ppos)
429 {
430 	int ret;
431 
432 	mutex_lock(&stack_sysctl_mutex);
433 
434 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
435 
436 	if (ret || !write ||
437 	    (last_stack_tracer_enabled == !!stack_tracer_enabled))
438 		goto out;
439 
440 	last_stack_tracer_enabled = !!stack_tracer_enabled;
441 
442 	if (stack_tracer_enabled)
443 		register_ftrace_function(&trace_ops);
444 	else
445 		unregister_ftrace_function(&trace_ops);
446 
447  out:
448 	mutex_unlock(&stack_sysctl_mutex);
449 	return ret;
450 }
451 
452 static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
453 
454 static __init int enable_stacktrace(char *str)
455 {
456 	if (strncmp(str, "_filter=", 8) == 0)
457 		strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);
458 
459 	stack_tracer_enabled = 1;
460 	last_stack_tracer_enabled = 1;
461 	return 1;
462 }
463 __setup("stacktrace", enable_stacktrace);
464 
465 static __init int stack_trace_init(void)
466 {
467 	struct dentry *d_tracer;
468 
469 	d_tracer = tracing_init_dentry();
470 	if (IS_ERR(d_tracer))
471 		return 0;
472 
473 	trace_create_file("stack_max_size", 0644, d_tracer,
474 			&stack_trace_max_size, &stack_max_size_fops);
475 
476 	trace_create_file("stack_trace", 0444, d_tracer,
477 			NULL, &stack_trace_fops);
478 
479 	trace_create_file("stack_trace_filter", 0444, d_tracer,
480 			NULL, &stack_trace_filter_fops);
481 
482 	if (stack_trace_filter_buf[0])
483 		ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
484 
485 	if (stack_tracer_enabled)
486 		register_ftrace_function(&trace_ops);
487 
488 	return 0;
489 }
490 
491 device_initcall(stack_trace_init);
492