xref: /openbmc/linux/kernel/trace/trace_stack.c (revision c4ee0af3)
1 /*
2  * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
3  *
4  */
5 #include <linux/stacktrace.h>
6 #include <linux/kallsyms.h>
7 #include <linux/seq_file.h>
8 #include <linux/spinlock.h>
9 #include <linux/uaccess.h>
10 #include <linux/debugfs.h>
11 #include <linux/ftrace.h>
12 #include <linux/module.h>
13 #include <linux/sysctl.h>
14 #include <linux/init.h>
15 #include <linux/fs.h>
16 
17 #include <asm/setup.h>
18 
19 #include "trace.h"
20 
21 #define STACK_TRACE_ENTRIES 500
22 
23 #ifdef CC_USING_FENTRY
24 # define fentry		1
25 #else
26 # define fentry		0
27 #endif
28 
29 static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
30 	 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
31 static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
32 
33 /*
34  * Reserve one entry for the passed in ip. This will allow
35  * us to remove most or all of the stack size overhead
36  * added by the stack tracer itself.
37  */
38 static struct stack_trace max_stack_trace = {
39 	.max_entries		= STACK_TRACE_ENTRIES - 1,
40 	.entries		= &stack_dump_trace[1],
41 };
42 
43 static unsigned long max_stack_size;
44 static arch_spinlock_t max_stack_lock =
45 	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
46 
47 static DEFINE_PER_CPU(int, trace_active);
48 static DEFINE_MUTEX(stack_sysctl_mutex);
49 
50 int stack_tracer_enabled;
51 static int last_stack_tracer_enabled;
52 
53 static inline void
54 check_stack(unsigned long ip, unsigned long *stack)
55 {
56 	unsigned long this_size, flags;
57 	unsigned long *p, *top, *start;
58 	static int tracer_frame;
59 	int frame_size = ACCESS_ONCE(tracer_frame);
60 	int i;
61 
62 	this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
63 	this_size = THREAD_SIZE - this_size;
64 	/* Remove the frame of the tracer */
65 	this_size -= frame_size;
66 
67 	if (this_size <= max_stack_size)
68 		return;
69 
70 	/* we do not handle interrupt stacks yet */
71 	if (!object_is_on_stack(stack))
72 		return;
73 
74 	local_irq_save(flags);
75 	arch_spin_lock(&max_stack_lock);
76 
77 	/* In case another CPU set the tracer_frame on us */
78 	if (unlikely(!frame_size))
79 		this_size -= tracer_frame;
80 
81 	/* a race could have already updated it */
82 	if (this_size <= max_stack_size)
83 		goto out;
84 
85 	max_stack_size = this_size;
86 
87 	max_stack_trace.nr_entries	= 0;
88 	max_stack_trace.skip		= 3;
89 
90 	save_stack_trace(&max_stack_trace);
91 
92 	/*
93 	 * Add the passed in ip from the function tracer.
94 	 * Searching for this on the stack will skip over
95 	 * most of the overhead from the stack tracer itself.
96 	 */
97 	stack_dump_trace[0] = ip;
98 	max_stack_trace.nr_entries++;
99 
100 	/*
101 	 * Now find where in the stack these are.
102 	 */
103 	i = 0;
104 	start = stack;
105 	top = (unsigned long *)
106 		(((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
107 
108 	/*
109 	 * Loop through all the entries. One of the entries may
110 	 * for some reason be missed on the stack, so we may
111 	 * have to account for them. If they are all there, this
112 	 * loop will only happen once. This code only takes place
113 	 * on a new max, so it is far from a fast path.
114 	 */
115 	while (i < max_stack_trace.nr_entries) {
116 		int found = 0;
117 
118 		stack_dump_index[i] = this_size;
119 		p = start;
120 
121 		for (; p < top && i < max_stack_trace.nr_entries; p++) {
122 			if (*p == stack_dump_trace[i]) {
123 				this_size = stack_dump_index[i++] =
124 					(top - p) * sizeof(unsigned long);
125 				found = 1;
126 				/* Start the search from here */
127 				start = p + 1;
128 				/*
129 				 * We do not want to show the overhead
130 				 * of the stack tracer stack in the
131 				 * max stack. If we haven't figured
132 				 * out what that is, then figure it out
133 				 * now.
134 				 */
135 				if (unlikely(!tracer_frame) && i == 1) {
136 					tracer_frame = (p - stack) *
137 						sizeof(unsigned long);
138 					max_stack_size -= tracer_frame;
139 				}
140 			}
141 		}
142 
143 		if (!found)
144 			i++;
145 	}
146 
147  out:
148 	arch_spin_unlock(&max_stack_lock);
149 	local_irq_restore(flags);
150 }
151 
152 static void
153 stack_trace_call(unsigned long ip, unsigned long parent_ip,
154 		 struct ftrace_ops *op, struct pt_regs *pt_regs)
155 {
156 	unsigned long stack;
157 	int cpu;
158 
159 	preempt_disable_notrace();
160 
161 	cpu = raw_smp_processor_id();
162 	/* no atomic needed, we only modify this variable by this cpu */
163 	if (per_cpu(trace_active, cpu)++ != 0)
164 		goto out;
165 
166 	/*
167 	 * When fentry is used, the traced function does not get
168 	 * its stack frame set up, and we lose the parent.
169 	 * The ip is pretty useless because the function tracer
170 	 * was called before that function set up its stack frame.
171 	 * In this case, we use the parent ip.
172 	 *
173 	 * By adding the return address of either the parent ip
174 	 * or the current ip we can disregard most of the stack usage
175 	 * caused by the stack tracer itself.
176 	 *
177 	 * The function tracer always reports the address of where the
178 	 * mcount call was, but the stack will hold the return address.
179 	 */
180 	if (fentry)
181 		ip = parent_ip;
182 	else
183 		ip += MCOUNT_INSN_SIZE;
184 
185 	check_stack(ip, &stack);
186 
187  out:
188 	per_cpu(trace_active, cpu)--;
189 	/* prevent recursion in schedule */
190 	preempt_enable_notrace();
191 }
192 
193 static struct ftrace_ops trace_ops __read_mostly =
194 {
195 	.func = stack_trace_call,
196 	.flags = FTRACE_OPS_FL_RECURSION_SAFE,
197 };
198 
199 static ssize_t
200 stack_max_size_read(struct file *filp, char __user *ubuf,
201 		    size_t count, loff_t *ppos)
202 {
203 	unsigned long *ptr = filp->private_data;
204 	char buf[64];
205 	int r;
206 
207 	r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
208 	if (r > sizeof(buf))
209 		r = sizeof(buf);
210 	return simple_read_from_buffer(ubuf, count, ppos, buf, r);
211 }
212 
213 static ssize_t
214 stack_max_size_write(struct file *filp, const char __user *ubuf,
215 		     size_t count, loff_t *ppos)
216 {
217 	long *ptr = filp->private_data;
218 	unsigned long val, flags;
219 	int ret;
220 	int cpu;
221 
222 	ret = kstrtoul_from_user(ubuf, count, 10, &val);
223 	if (ret)
224 		return ret;
225 
226 	local_irq_save(flags);
227 
228 	/*
229 	 * In case we trace inside arch_spin_lock() or after (NMI),
230 	 * we will cause circular lock, so we also need to increase
231 	 * the percpu trace_active here.
232 	 */
233 	cpu = smp_processor_id();
234 	per_cpu(trace_active, cpu)++;
235 
236 	arch_spin_lock(&max_stack_lock);
237 	*ptr = val;
238 	arch_spin_unlock(&max_stack_lock);
239 
240 	per_cpu(trace_active, cpu)--;
241 	local_irq_restore(flags);
242 
243 	return count;
244 }
245 
246 static const struct file_operations stack_max_size_fops = {
247 	.open		= tracing_open_generic,
248 	.read		= stack_max_size_read,
249 	.write		= stack_max_size_write,
250 	.llseek		= default_llseek,
251 };
252 
253 static void *
254 __next(struct seq_file *m, loff_t *pos)
255 {
256 	long n = *pos - 1;
257 
258 	if (n >= max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX)
259 		return NULL;
260 
261 	m->private = (void *)n;
262 	return &m->private;
263 }
264 
265 static void *
266 t_next(struct seq_file *m, void *v, loff_t *pos)
267 {
268 	(*pos)++;
269 	return __next(m, pos);
270 }
271 
272 static void *t_start(struct seq_file *m, loff_t *pos)
273 {
274 	int cpu;
275 
276 	local_irq_disable();
277 
278 	cpu = smp_processor_id();
279 	per_cpu(trace_active, cpu)++;
280 
281 	arch_spin_lock(&max_stack_lock);
282 
283 	if (*pos == 0)
284 		return SEQ_START_TOKEN;
285 
286 	return __next(m, pos);
287 }
288 
289 static void t_stop(struct seq_file *m, void *p)
290 {
291 	int cpu;
292 
293 	arch_spin_unlock(&max_stack_lock);
294 
295 	cpu = smp_processor_id();
296 	per_cpu(trace_active, cpu)--;
297 
298 	local_irq_enable();
299 }
300 
301 static int trace_lookup_stack(struct seq_file *m, long i)
302 {
303 	unsigned long addr = stack_dump_trace[i];
304 
305 	return seq_printf(m, "%pS\n", (void *)addr);
306 }
307 
308 static void print_disabled(struct seq_file *m)
309 {
310 	seq_puts(m, "#\n"
311 		 "#  Stack tracer disabled\n"
312 		 "#\n"
313 		 "# To enable the stack tracer, either add 'stacktrace' to the\n"
314 		 "# kernel command line\n"
315 		 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
316 		 "#\n");
317 }
318 
319 static int t_show(struct seq_file *m, void *v)
320 {
321 	long i;
322 	int size;
323 
324 	if (v == SEQ_START_TOKEN) {
325 		seq_printf(m, "        Depth    Size   Location"
326 			   "    (%d entries)\n"
327 			   "        -----    ----   --------\n",
328 			   max_stack_trace.nr_entries - 1);
329 
330 		if (!stack_tracer_enabled && !max_stack_size)
331 			print_disabled(m);
332 
333 		return 0;
334 	}
335 
336 	i = *(long *)v;
337 
338 	if (i >= max_stack_trace.nr_entries ||
339 	    stack_dump_trace[i] == ULONG_MAX)
340 		return 0;
341 
342 	if (i+1 == max_stack_trace.nr_entries ||
343 	    stack_dump_trace[i+1] == ULONG_MAX)
344 		size = stack_dump_index[i];
345 	else
346 		size = stack_dump_index[i] - stack_dump_index[i+1];
347 
348 	seq_printf(m, "%3ld) %8d   %5d   ", i, stack_dump_index[i], size);
349 
350 	trace_lookup_stack(m, i);
351 
352 	return 0;
353 }
354 
355 static const struct seq_operations stack_trace_seq_ops = {
356 	.start		= t_start,
357 	.next		= t_next,
358 	.stop		= t_stop,
359 	.show		= t_show,
360 };
361 
362 static int stack_trace_open(struct inode *inode, struct file *file)
363 {
364 	return seq_open(file, &stack_trace_seq_ops);
365 }
366 
367 static const struct file_operations stack_trace_fops = {
368 	.open		= stack_trace_open,
369 	.read		= seq_read,
370 	.llseek		= seq_lseek,
371 	.release	= seq_release,
372 };
373 
374 static int
375 stack_trace_filter_open(struct inode *inode, struct file *file)
376 {
377 	return ftrace_regex_open(&trace_ops, FTRACE_ITER_FILTER,
378 				 inode, file);
379 }
380 
381 static const struct file_operations stack_trace_filter_fops = {
382 	.open = stack_trace_filter_open,
383 	.read = seq_read,
384 	.write = ftrace_filter_write,
385 	.llseek = ftrace_filter_lseek,
386 	.release = ftrace_regex_release,
387 };
388 
389 int
390 stack_trace_sysctl(struct ctl_table *table, int write,
391 		   void __user *buffer, size_t *lenp,
392 		   loff_t *ppos)
393 {
394 	int ret;
395 
396 	mutex_lock(&stack_sysctl_mutex);
397 
398 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
399 
400 	if (ret || !write ||
401 	    (last_stack_tracer_enabled == !!stack_tracer_enabled))
402 		goto out;
403 
404 	last_stack_tracer_enabled = !!stack_tracer_enabled;
405 
406 	if (stack_tracer_enabled)
407 		register_ftrace_function(&trace_ops);
408 	else
409 		unregister_ftrace_function(&trace_ops);
410 
411  out:
412 	mutex_unlock(&stack_sysctl_mutex);
413 	return ret;
414 }
415 
416 static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
417 
418 static __init int enable_stacktrace(char *str)
419 {
420 	if (strncmp(str, "_filter=", 8) == 0)
421 		strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE);
422 
423 	stack_tracer_enabled = 1;
424 	last_stack_tracer_enabled = 1;
425 	return 1;
426 }
427 __setup("stacktrace", enable_stacktrace);
428 
429 static __init int stack_trace_init(void)
430 {
431 	struct dentry *d_tracer;
432 
433 	d_tracer = tracing_init_dentry();
434 	if (!d_tracer)
435 		return 0;
436 
437 	trace_create_file("stack_max_size", 0644, d_tracer,
438 			&max_stack_size, &stack_max_size_fops);
439 
440 	trace_create_file("stack_trace", 0444, d_tracer,
441 			NULL, &stack_trace_fops);
442 
443 	trace_create_file("stack_trace_filter", 0444, d_tracer,
444 			NULL, &stack_trace_filter_fops);
445 
446 	if (stack_trace_filter_buf[0])
447 		ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
448 
449 	if (stack_tracer_enabled)
450 		register_ftrace_function(&trace_ops);
451 
452 	return 0;
453 }
454 
455 device_initcall(stack_trace_init);
456