1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2e5a81b62SSteven Rostedt /*
3e5a81b62SSteven Rostedt * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
4e5a81b62SSteven Rostedt *
5e5a81b62SSteven Rostedt */
668db0cf1SIngo Molnar #include <linux/sched/task_stack.h>
7e5a81b62SSteven Rostedt #include <linux/stacktrace.h>
817911ff3SSteven Rostedt (VMware) #include <linux/security.h>
9e5a81b62SSteven Rostedt #include <linux/kallsyms.h>
10e5a81b62SSteven Rostedt #include <linux/seq_file.h>
11e5a81b62SSteven Rostedt #include <linux/spinlock.h>
12e5a81b62SSteven Rostedt #include <linux/uaccess.h>
13e5a81b62SSteven Rostedt #include <linux/ftrace.h>
14e5a81b62SSteven Rostedt #include <linux/module.h>
15f38f1d2aSSteven Rostedt #include <linux/sysctl.h>
16e5a81b62SSteven Rostedt #include <linux/init.h>
17762e1207SSteven Rostedt
18762e1207SSteven Rostedt #include <asm/setup.h>
19762e1207SSteven Rostedt
20e5a81b62SSteven Rostedt #include "trace.h"
21e5a81b62SSteven Rostedt
223d9a8072SThomas Gleixner #define STACK_TRACE_ENTRIES 500
231b6cced6SSteven Rostedt
243d9a8072SThomas Gleixner static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES];
253d9a8072SThomas Gleixner static unsigned stack_trace_index[STACK_TRACE_ENTRIES];
263d9a8072SThomas Gleixner
279f50c91bSThomas Gleixner static unsigned int stack_trace_nr_entries;
283d9a8072SThomas Gleixner static unsigned long stack_trace_max_size;
293d9a8072SThomas Gleixner static arch_spinlock_t stack_trace_max_lock =
30edc35bd7SThomas Gleixner (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
31e5a81b62SSteven Rostedt
328aaf1ee7SSteven Rostedt (VMware) DEFINE_PER_CPU(int, disable_stack_tracer);
33f38f1d2aSSteven Rostedt static DEFINE_MUTEX(stack_sysctl_mutex);
34f38f1d2aSSteven Rostedt
35f38f1d2aSSteven Rostedt int stack_tracer_enabled;
36e5a81b62SSteven Rostedt
print_max_stack(void)373d9a8072SThomas Gleixner static void print_max_stack(void)
38e3172181SMinchan Kim {
39e3172181SMinchan Kim long i;
40e3172181SMinchan Kim int size;
41e3172181SMinchan Kim
42e3172181SMinchan Kim pr_emerg(" Depth Size Location (%d entries)\n"
43e3172181SMinchan Kim " ----- ---- --------\n",
449f50c91bSThomas Gleixner stack_trace_nr_entries);
45e3172181SMinchan Kim
469f50c91bSThomas Gleixner for (i = 0; i < stack_trace_nr_entries; i++) {
479f50c91bSThomas Gleixner if (i + 1 == stack_trace_nr_entries)
48bb99d8ccSAKASHI Takahiro size = stack_trace_index[i];
49e3172181SMinchan Kim else
50bb99d8ccSAKASHI Takahiro size = stack_trace_index[i] - stack_trace_index[i+1];
51e3172181SMinchan Kim
52bb99d8ccSAKASHI Takahiro pr_emerg("%3ld) %8d %5d %pS\n", i, stack_trace_index[i],
53e3172181SMinchan Kim size, (void *)stack_dump_trace[i]);
54e3172181SMinchan Kim }
55e3172181SMinchan Kim }
56e3172181SMinchan Kim
5758fe7a87SSteven Rostedt (VMware) /*
5858fe7a87SSteven Rostedt (VMware) * The stack tracer looks for a maximum stack at each call from a function. It
5958fe7a87SSteven Rostedt (VMware) * registers a callback from ftrace, and in that callback it examines the stack
6058fe7a87SSteven Rostedt (VMware) * size. It determines the stack size from the variable passed in, which is the
6158fe7a87SSteven Rostedt (VMware) * address of a local variable in the stack_trace_call() callback function.
6258fe7a87SSteven Rostedt (VMware) * The stack size is calculated by the address of the local variable to the top
6358fe7a87SSteven Rostedt (VMware) * of the current stack. If that size is smaller than the currently saved max
6458fe7a87SSteven Rostedt (VMware) * stack size, nothing more is done.
6558fe7a87SSteven Rostedt (VMware) *
6658fe7a87SSteven Rostedt (VMware) * If the size of the stack is greater than the maximum recorded size, then the
6758fe7a87SSteven Rostedt (VMware) * following algorithm takes place.
6858fe7a87SSteven Rostedt (VMware) *
6958fe7a87SSteven Rostedt (VMware) * For architectures (like x86) that store the function's return address before
7058fe7a87SSteven Rostedt (VMware) * saving the function's local variables, the stack will look something like
7158fe7a87SSteven Rostedt (VMware) * this:
7258fe7a87SSteven Rostedt (VMware) *
7358fe7a87SSteven Rostedt (VMware) * [ top of stack ]
7458fe7a87SSteven Rostedt (VMware) * 0: sys call entry frame
7558fe7a87SSteven Rostedt (VMware) * 10: return addr to entry code
7658fe7a87SSteven Rostedt (VMware) * 11: start of sys_foo frame
7758fe7a87SSteven Rostedt (VMware) * 20: return addr to sys_foo
7858fe7a87SSteven Rostedt (VMware) * 21: start of kernel_func_bar frame
7958fe7a87SSteven Rostedt (VMware) * 30: return addr to kernel_func_bar
8058fe7a87SSteven Rostedt (VMware) * 31: [ do trace stack here ]
8158fe7a87SSteven Rostedt (VMware) *
8258fe7a87SSteven Rostedt (VMware) * The save_stack_trace() is called returning all the functions it finds in the
8358fe7a87SSteven Rostedt (VMware) * current stack. Which would be (from the bottom of the stack to the top):
8458fe7a87SSteven Rostedt (VMware) *
8558fe7a87SSteven Rostedt (VMware) * return addr to kernel_func_bar
8658fe7a87SSteven Rostedt (VMware) * return addr to sys_foo
8758fe7a87SSteven Rostedt (VMware) * return addr to entry code
8858fe7a87SSteven Rostedt (VMware) *
8958fe7a87SSteven Rostedt (VMware) * Now to figure out how much each of these functions' local variable size is,
9058fe7a87SSteven Rostedt (VMware) * a search of the stack is made to find these values. When a match is made, it
9158fe7a87SSteven Rostedt (VMware) * is added to the stack_dump_trace[] array. The offset into the stack is saved
9258fe7a87SSteven Rostedt (VMware) * in the stack_trace_index[] array. The above example would show:
9358fe7a87SSteven Rostedt (VMware) *
9458fe7a87SSteven Rostedt (VMware) * stack_dump_trace[] | stack_trace_index[]
9558fe7a87SSteven Rostedt (VMware) * ------------------ + -------------------
9658fe7a87SSteven Rostedt (VMware) * return addr to kernel_func_bar | 30
9758fe7a87SSteven Rostedt (VMware) * return addr to sys_foo | 20
9858fe7a87SSteven Rostedt (VMware) * return addr to entry | 10
9958fe7a87SSteven Rostedt (VMware) *
10058fe7a87SSteven Rostedt (VMware) * The print_max_stack() function above, uses these values to print the size of
10158fe7a87SSteven Rostedt (VMware) * each function's portion of the stack.
10258fe7a87SSteven Rostedt (VMware) *
10358fe7a87SSteven Rostedt (VMware) * for (i = 0; i < nr_entries; i++) {
10458fe7a87SSteven Rostedt (VMware) * size = i == nr_entries - 1 ? stack_trace_index[i] :
10558fe7a87SSteven Rostedt (VMware) * stack_trace_index[i] - stack_trace_index[i+1]
10658fe7a87SSteven Rostedt (VMware) * print "%d %d %d %s\n", i, stack_trace_index[i], size, stack_dump_trace[i]);
10758fe7a87SSteven Rostedt (VMware) * }
10858fe7a87SSteven Rostedt (VMware) *
10958fe7a87SSteven Rostedt (VMware) * The above shows
11058fe7a87SSteven Rostedt (VMware) *
11158fe7a87SSteven Rostedt (VMware) * depth size location
11258fe7a87SSteven Rostedt (VMware) * ----- ---- --------
11358fe7a87SSteven Rostedt (VMware) * 0 30 10 kernel_func_bar
11458fe7a87SSteven Rostedt (VMware) * 1 20 10 sys_foo
11558fe7a87SSteven Rostedt (VMware) * 2 10 10 entry code
11658fe7a87SSteven Rostedt (VMware) *
11758fe7a87SSteven Rostedt (VMware) * Now for architectures that might save the return address after the functions
11858fe7a87SSteven Rostedt (VMware) * local variables (saving the link register before calling nested functions),
11958fe7a87SSteven Rostedt (VMware) * this will cause the stack to look a little different:
12058fe7a87SSteven Rostedt (VMware) *
12158fe7a87SSteven Rostedt (VMware) * [ top of stack ]
12258fe7a87SSteven Rostedt (VMware) * 0: sys call entry frame
12358fe7a87SSteven Rostedt (VMware) * 10: start of sys_foo_frame
12458fe7a87SSteven Rostedt (VMware) * 19: return addr to entry code << lr saved before calling kernel_func_bar
12558fe7a87SSteven Rostedt (VMware) * 20: start of kernel_func_bar frame
12658fe7a87SSteven Rostedt (VMware) * 29: return addr to sys_foo_frame << lr saved before calling next function
12758fe7a87SSteven Rostedt (VMware) * 30: [ do trace stack here ]
12858fe7a87SSteven Rostedt (VMware) *
12958fe7a87SSteven Rostedt (VMware) * Although the functions returned by save_stack_trace() may be the same, the
13058fe7a87SSteven Rostedt (VMware) * placement in the stack will be different. Using the same algorithm as above
13158fe7a87SSteven Rostedt (VMware) * would yield:
13258fe7a87SSteven Rostedt (VMware) *
13358fe7a87SSteven Rostedt (VMware) * stack_dump_trace[] | stack_trace_index[]
13458fe7a87SSteven Rostedt (VMware) * ------------------ + -------------------
13558fe7a87SSteven Rostedt (VMware) * return addr to kernel_func_bar | 30
13658fe7a87SSteven Rostedt (VMware) * return addr to sys_foo | 29
13758fe7a87SSteven Rostedt (VMware) * return addr to entry | 19
13858fe7a87SSteven Rostedt (VMware) *
13958fe7a87SSteven Rostedt (VMware) * Where the mapping is off by one:
14058fe7a87SSteven Rostedt (VMware) *
14158fe7a87SSteven Rostedt (VMware) * kernel_func_bar stack frame size is 29 - 19 not 30 - 29!
14258fe7a87SSteven Rostedt (VMware) *
14358fe7a87SSteven Rostedt (VMware) * To fix this, if the architecture sets ARCH_RET_ADDR_AFTER_LOCAL_VARS the
14458fe7a87SSteven Rostedt (VMware) * values in stack_trace_index[] are shifted by one to and the number of
14558fe7a87SSteven Rostedt (VMware) * stack trace entries is decremented by one.
14658fe7a87SSteven Rostedt (VMware) *
14758fe7a87SSteven Rostedt (VMware) * stack_dump_trace[] | stack_trace_index[]
14858fe7a87SSteven Rostedt (VMware) * ------------------ + -------------------
14958fe7a87SSteven Rostedt (VMware) * return addr to kernel_func_bar | 29
15058fe7a87SSteven Rostedt (VMware) * return addr to sys_foo | 19
15158fe7a87SSteven Rostedt (VMware) *
15258fe7a87SSteven Rostedt (VMware) * Although the entry function is not displayed, the first function (sys_foo)
15358fe7a87SSteven Rostedt (VMware) * will still include the stack size of it.
15458fe7a87SSteven Rostedt (VMware) */
check_stack(unsigned long ip,unsigned long * stack)1553d9a8072SThomas Gleixner static void check_stack(unsigned long ip, unsigned long *stack)
156e5a81b62SSteven Rostedt {
157e3172181SMinchan Kim unsigned long this_size, flags; unsigned long *p, *top, *start;
1584df29712SSteven Rostedt (Red Hat) static int tracer_frame;
1596aa7de05SMark Rutland int frame_size = READ_ONCE(tracer_frame);
16072ac426aSSteven Rostedt (Red Hat) int i, x;
161e5a81b62SSteven Rostedt
16287889501SSteven Rostedt (Red Hat) this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
163e5a81b62SSteven Rostedt this_size = THREAD_SIZE - this_size;
1644df29712SSteven Rostedt (Red Hat) /* Remove the frame of the tracer */
1654df29712SSteven Rostedt (Red Hat) this_size -= frame_size;
166e5a81b62SSteven Rostedt
167bb99d8ccSAKASHI Takahiro if (this_size <= stack_trace_max_size)
168e5a81b62SSteven Rostedt return;
169e5a81b62SSteven Rostedt
17081520a1bSSteven Rostedt /* we do not handle interrupt stacks yet */
17187889501SSteven Rostedt (Red Hat) if (!object_is_on_stack(stack))
17281520a1bSSteven Rostedt return;
17381520a1bSSteven Rostedt
1741904be1bSSteven Rostedt (Red Hat) /* Can't do this from NMI context (can cause deadlocks) */
1751904be1bSSteven Rostedt (Red Hat) if (in_nmi())
1761904be1bSSteven Rostedt (Red Hat) return;
1771904be1bSSteven Rostedt (Red Hat)
178a5e25883SSteven Rostedt local_irq_save(flags);
179d332736dSSteven Rostedt (Red Hat) arch_spin_lock(&stack_trace_max_lock);
180e5a81b62SSteven Rostedt
1814df29712SSteven Rostedt (Red Hat) /* In case another CPU set the tracer_frame on us */
1824df29712SSteven Rostedt (Red Hat) if (unlikely(!frame_size))
1834df29712SSteven Rostedt (Red Hat) this_size -= tracer_frame;
1844df29712SSteven Rostedt (Red Hat)
185e5a81b62SSteven Rostedt /* a race could have already updated it */
186bb99d8ccSAKASHI Takahiro if (this_size <= stack_trace_max_size)
187e5a81b62SSteven Rostedt goto out;
188e5a81b62SSteven Rostedt
189bb99d8ccSAKASHI Takahiro stack_trace_max_size = this_size;
190e5a81b62SSteven Rostedt
1919f50c91bSThomas Gleixner stack_trace_nr_entries = stack_trace_save(stack_dump_trace,
1929f50c91bSThomas Gleixner ARRAY_SIZE(stack_dump_trace) - 1,
1939f50c91bSThomas Gleixner 0);
194e5a81b62SSteven Rostedt
19572ac426aSSteven Rostedt (Red Hat) /* Skip over the overhead of the stack tracer itself */
1969f50c91bSThomas Gleixner for (i = 0; i < stack_trace_nr_entries; i++) {
19772ac426aSSteven Rostedt (Red Hat) if (stack_dump_trace[i] == ip)
19872ac426aSSteven Rostedt (Red Hat) break;
19972ac426aSSteven Rostedt (Red Hat) }
200d4ecbfc4SSteven Rostedt (Red Hat)
201d4ecbfc4SSteven Rostedt (Red Hat) /*
2026ccd8371SSteven Rostedt * Some archs may not have the passed in ip in the dump.
2036ccd8371SSteven Rostedt * If that happens, we need to show everything.
2046ccd8371SSteven Rostedt */
2059f50c91bSThomas Gleixner if (i == stack_trace_nr_entries)
2066ccd8371SSteven Rostedt i = 0;
2076ccd8371SSteven Rostedt
2086ccd8371SSteven Rostedt /*
2091b6cced6SSteven Rostedt * Now find where in the stack these are.
2101b6cced6SSteven Rostedt */
21172ac426aSSteven Rostedt (Red Hat) x = 0;
21287889501SSteven Rostedt (Red Hat) start = stack;
2131b6cced6SSteven Rostedt top = (unsigned long *)
2141b6cced6SSteven Rostedt (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
2151b6cced6SSteven Rostedt
2161b6cced6SSteven Rostedt /*
2171b6cced6SSteven Rostedt * Loop through all the entries. One of the entries may
2181b6cced6SSteven Rostedt * for some reason be missed on the stack, so we may
2191b6cced6SSteven Rostedt * have to account for them. If they are all there, this
2201b6cced6SSteven Rostedt * loop will only happen once. This code only takes place
2211b6cced6SSteven Rostedt * on a new max, so it is far from a fast path.
2221b6cced6SSteven Rostedt */
2239f50c91bSThomas Gleixner while (i < stack_trace_nr_entries) {
2240a37119dSSteven Rostedt int found = 0;
2251b6cced6SSteven Rostedt
226bb99d8ccSAKASHI Takahiro stack_trace_index[x] = this_size;
2271b6cced6SSteven Rostedt p = start;
2281b6cced6SSteven Rostedt
2299f50c91bSThomas Gleixner for (; p < top && i < stack_trace_nr_entries; p++) {
2306e22c836SYang Shi /*
2316e22c836SYang Shi * The READ_ONCE_NOCHECK is used to let KASAN know that
2326e22c836SYang Shi * this is not a stack-out-of-bounds error.
2336e22c836SYang Shi */
2346e22c836SYang Shi if ((READ_ONCE_NOCHECK(*p)) == stack_dump_trace[i]) {
23572ac426aSSteven Rostedt (Red Hat) stack_dump_trace[x] = stack_dump_trace[i++];
236bb99d8ccSAKASHI Takahiro this_size = stack_trace_index[x++] =
2371b6cced6SSteven Rostedt (top - p) * sizeof(unsigned long);
2380a37119dSSteven Rostedt found = 1;
2391b6cced6SSteven Rostedt /* Start the search from here */
2401b6cced6SSteven Rostedt start = p + 1;
2414df29712SSteven Rostedt (Red Hat) /*
2424df29712SSteven Rostedt (Red Hat) * We do not want to show the overhead
2434df29712SSteven Rostedt (Red Hat) * of the stack tracer stack in the
2444df29712SSteven Rostedt (Red Hat) * max stack. If we haven't figured
2454df29712SSteven Rostedt (Red Hat) * out what that is, then figure it out
2464df29712SSteven Rostedt (Red Hat) * now.
2474df29712SSteven Rostedt (Red Hat) */
24872ac426aSSteven Rostedt (Red Hat) if (unlikely(!tracer_frame)) {
2494df29712SSteven Rostedt (Red Hat) tracer_frame = (p - stack) *
2504df29712SSteven Rostedt (Red Hat) sizeof(unsigned long);
251bb99d8ccSAKASHI Takahiro stack_trace_max_size -= tracer_frame;
2524df29712SSteven Rostedt (Red Hat) }
2531b6cced6SSteven Rostedt }
2541b6cced6SSteven Rostedt }
2551b6cced6SSteven Rostedt
2560a37119dSSteven Rostedt if (!found)
2571b6cced6SSteven Rostedt i++;
2581b6cced6SSteven Rostedt }
2591b6cced6SSteven Rostedt
260f7edb451SSteven Rostedt (VMware) #ifdef ARCH_FTRACE_SHIFT_STACK_TRACER
261f7edb451SSteven Rostedt (VMware) /*
262f7edb451SSteven Rostedt (VMware) * Some archs will store the link register before calling
263f7edb451SSteven Rostedt (VMware) * nested functions. This means the saved return address
264f7edb451SSteven Rostedt (VMware) * comes after the local storage, and we need to shift
265f7edb451SSteven Rostedt (VMware) * for that.
266f7edb451SSteven Rostedt (VMware) */
267f7edb451SSteven Rostedt (VMware) if (x > 1) {
268f7edb451SSteven Rostedt (VMware) memmove(&stack_trace_index[0], &stack_trace_index[1],
269f7edb451SSteven Rostedt (VMware) sizeof(stack_trace_index[0]) * (x - 1));
270f7edb451SSteven Rostedt (VMware) x--;
271f7edb451SSteven Rostedt (VMware) }
272f7edb451SSteven Rostedt (VMware) #endif
273f7edb451SSteven Rostedt (VMware)
2749f50c91bSThomas Gleixner stack_trace_nr_entries = x;
27572ac426aSSteven Rostedt (Red Hat)
276a70857e4SAaron Tomlin if (task_stack_end_corrupted(current)) {
2773d9a8072SThomas Gleixner print_max_stack();
278e3172181SMinchan Kim BUG();
279e3172181SMinchan Kim }
280e3172181SMinchan Kim
281e5a81b62SSteven Rostedt out:
282d332736dSSteven Rostedt (Red Hat) arch_spin_unlock(&stack_trace_max_lock);
283a5e25883SSteven Rostedt local_irq_restore(flags);
284e5a81b62SSteven Rostedt }
285e5a81b62SSteven Rostedt
286b8299d36SSteven Rostedt (VMware) /* Some archs may not define MCOUNT_INSN_SIZE */
287b8299d36SSteven Rostedt (VMware) #ifndef MCOUNT_INSN_SIZE
288b8299d36SSteven Rostedt (VMware) # define MCOUNT_INSN_SIZE 0
289b8299d36SSteven Rostedt (VMware) #endif
290b8299d36SSteven Rostedt (VMware)
291e5a81b62SSteven Rostedt static void
stack_trace_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)292a1e2e31dSSteven Rostedt stack_trace_call(unsigned long ip, unsigned long parent_ip,
293d19ad077SSteven Rostedt (VMware) struct ftrace_ops *op, struct ftrace_regs *fregs)
294e5a81b62SSteven Rostedt {
29587889501SSteven Rostedt (Red Hat) unsigned long stack;
296e5a81b62SSteven Rostedt
2975168ae50SSteven Rostedt preempt_disable_notrace();
298e5a81b62SSteven Rostedt
299e5a81b62SSteven Rostedt /* no atomic needed, we only modify this variable by this cpu */
3008aaf1ee7SSteven Rostedt (VMware) __this_cpu_inc(disable_stack_tracer);
3018aaf1ee7SSteven Rostedt (VMware) if (__this_cpu_read(disable_stack_tracer) != 1)
302e5a81b62SSteven Rostedt goto out;
303e5a81b62SSteven Rostedt
304b00d607bSSteven Rostedt (VMware) /* If rcu is not watching, then save stack trace can fail */
305b00d607bSSteven Rostedt (VMware) if (!rcu_is_watching())
306b00d607bSSteven Rostedt (VMware) goto out;
307b00d607bSSteven Rostedt (VMware)
3084df29712SSteven Rostedt (Red Hat) ip += MCOUNT_INSN_SIZE;
3094df29712SSteven Rostedt (Red Hat)
3104df29712SSteven Rostedt (Red Hat) check_stack(ip, &stack);
311e5a81b62SSteven Rostedt
312e5a81b62SSteven Rostedt out:
3138aaf1ee7SSteven Rostedt (VMware) __this_cpu_dec(disable_stack_tracer);
314e5a81b62SSteven Rostedt /* prevent recursion in schedule */
3155168ae50SSteven Rostedt preempt_enable_notrace();
316e5a81b62SSteven Rostedt }
317e5a81b62SSteven Rostedt
318e5a81b62SSteven Rostedt static struct ftrace_ops trace_ops __read_mostly =
319e5a81b62SSteven Rostedt {
320e5a81b62SSteven Rostedt .func = stack_trace_call,
321e5a81b62SSteven Rostedt };
322e5a81b62SSteven Rostedt
323e5a81b62SSteven Rostedt static ssize_t
stack_max_size_read(struct file * filp,char __user * ubuf,size_t count,loff_t * ppos)324e5a81b62SSteven Rostedt stack_max_size_read(struct file *filp, char __user *ubuf,
325e5a81b62SSteven Rostedt size_t count, loff_t *ppos)
326e5a81b62SSteven Rostedt {
327e5a81b62SSteven Rostedt unsigned long *ptr = filp->private_data;
328e5a81b62SSteven Rostedt char buf[64];
329e5a81b62SSteven Rostedt int r;
330e5a81b62SSteven Rostedt
331e5a81b62SSteven Rostedt r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
332e5a81b62SSteven Rostedt if (r > sizeof(buf))
333e5a81b62SSteven Rostedt r = sizeof(buf);
334e5a81b62SSteven Rostedt return simple_read_from_buffer(ubuf, count, ppos, buf, r);
335e5a81b62SSteven Rostedt }
336e5a81b62SSteven Rostedt
337e5a81b62SSteven Rostedt static ssize_t
stack_max_size_write(struct file * filp,const char __user * ubuf,size_t count,loff_t * ppos)338e5a81b62SSteven Rostedt stack_max_size_write(struct file *filp, const char __user *ubuf,
339e5a81b62SSteven Rostedt size_t count, loff_t *ppos)
340e5a81b62SSteven Rostedt {
341e5a81b62SSteven Rostedt long *ptr = filp->private_data;
342e5a81b62SSteven Rostedt unsigned long val, flags;
343e5a81b62SSteven Rostedt int ret;
344e5a81b62SSteven Rostedt
34522fe9b54SPeter Huewe ret = kstrtoul_from_user(ubuf, count, 10, &val);
34622fe9b54SPeter Huewe if (ret)
347e5a81b62SSteven Rostedt return ret;
348e5a81b62SSteven Rostedt
349a5e25883SSteven Rostedt local_irq_save(flags);
3504f48f8b7SLai Jiangshan
3514f48f8b7SLai Jiangshan /*
3524f48f8b7SLai Jiangshan * In case we trace inside arch_spin_lock() or after (NMI),
3534f48f8b7SLai Jiangshan * we will cause circular lock, so we also need to increase
3548aaf1ee7SSteven Rostedt (VMware) * the percpu disable_stack_tracer here.
3554f48f8b7SLai Jiangshan */
3568aaf1ee7SSteven Rostedt (VMware) __this_cpu_inc(disable_stack_tracer);
3574f48f8b7SLai Jiangshan
358d332736dSSteven Rostedt (Red Hat) arch_spin_lock(&stack_trace_max_lock);
359e5a81b62SSteven Rostedt *ptr = val;
360d332736dSSteven Rostedt (Red Hat) arch_spin_unlock(&stack_trace_max_lock);
3614f48f8b7SLai Jiangshan
3628aaf1ee7SSteven Rostedt (VMware) __this_cpu_dec(disable_stack_tracer);
363a5e25883SSteven Rostedt local_irq_restore(flags);
364e5a81b62SSteven Rostedt
365e5a81b62SSteven Rostedt return count;
366e5a81b62SSteven Rostedt }
367e5a81b62SSteven Rostedt
368f38f1d2aSSteven Rostedt static const struct file_operations stack_max_size_fops = {
369e5a81b62SSteven Rostedt .open = tracing_open_generic,
370e5a81b62SSteven Rostedt .read = stack_max_size_read,
371e5a81b62SSteven Rostedt .write = stack_max_size_write,
3726038f373SArnd Bergmann .llseek = default_llseek,
373e5a81b62SSteven Rostedt };
374e5a81b62SSteven Rostedt
375e5a81b62SSteven Rostedt static void *
__next(struct seq_file * m,loff_t * pos)3762fc5f0cfSLi Zefan __next(struct seq_file *m, loff_t *pos)
377e5a81b62SSteven Rostedt {
3782fc5f0cfSLi Zefan long n = *pos - 1;
379e5a81b62SSteven Rostedt
3809f50c91bSThomas Gleixner if (n >= stack_trace_nr_entries)
381e5a81b62SSteven Rostedt return NULL;
382e5a81b62SSteven Rostedt
3832fc5f0cfSLi Zefan m->private = (void *)n;
3841b6cced6SSteven Rostedt return &m->private;
385e5a81b62SSteven Rostedt }
386e5a81b62SSteven Rostedt
3872fc5f0cfSLi Zefan static void *
t_next(struct seq_file * m,void * v,loff_t * pos)3882fc5f0cfSLi Zefan t_next(struct seq_file *m, void *v, loff_t *pos)
3892fc5f0cfSLi Zefan {
3902fc5f0cfSLi Zefan (*pos)++;
3912fc5f0cfSLi Zefan return __next(m, pos);
3922fc5f0cfSLi Zefan }
3932fc5f0cfSLi Zefan
t_start(struct seq_file * m,loff_t * pos)394e5a81b62SSteven Rostedt static void *t_start(struct seq_file *m, loff_t *pos)
395e5a81b62SSteven Rostedt {
396e5a81b62SSteven Rostedt local_irq_disable();
3974f48f8b7SLai Jiangshan
3988aaf1ee7SSteven Rostedt (VMware) __this_cpu_inc(disable_stack_tracer);
3994f48f8b7SLai Jiangshan
400d332736dSSteven Rostedt (Red Hat) arch_spin_lock(&stack_trace_max_lock);
401e5a81b62SSteven Rostedt
402522a110bSLiming Wang if (*pos == 0)
403522a110bSLiming Wang return SEQ_START_TOKEN;
404522a110bSLiming Wang
4052fc5f0cfSLi Zefan return __next(m, pos);
406e5a81b62SSteven Rostedt }
407e5a81b62SSteven Rostedt
t_stop(struct seq_file * m,void * p)408e5a81b62SSteven Rostedt static void t_stop(struct seq_file *m, void *p)
409e5a81b62SSteven Rostedt {
410d332736dSSteven Rostedt (Red Hat) arch_spin_unlock(&stack_trace_max_lock);
4114f48f8b7SLai Jiangshan
4128aaf1ee7SSteven Rostedt (VMware) __this_cpu_dec(disable_stack_tracer);
4134f48f8b7SLai Jiangshan
414e5a81b62SSteven Rostedt local_irq_enable();
415e5a81b62SSteven Rostedt }
416e5a81b62SSteven Rostedt
trace_lookup_stack(struct seq_file * m,long i)417962e3707SJoe Perches static void trace_lookup_stack(struct seq_file *m, long i)
418e5a81b62SSteven Rostedt {
4191b6cced6SSteven Rostedt unsigned long addr = stack_dump_trace[i];
420e5a81b62SSteven Rostedt
421962e3707SJoe Perches seq_printf(m, "%pS\n", (void *)addr);
422e5a81b62SSteven Rostedt }
423e5a81b62SSteven Rostedt
print_disabled(struct seq_file * m)424e447e1dfSSteven Rostedt static void print_disabled(struct seq_file *m)
425e447e1dfSSteven Rostedt {
426e447e1dfSSteven Rostedt seq_puts(m, "#\n"
427e447e1dfSSteven Rostedt "# Stack tracer disabled\n"
428e447e1dfSSteven Rostedt "#\n"
429e447e1dfSSteven Rostedt "# To enable the stack tracer, either add 'stacktrace' to the\n"
430e447e1dfSSteven Rostedt "# kernel command line\n"
431e447e1dfSSteven Rostedt "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
432e447e1dfSSteven Rostedt "#\n");
433e447e1dfSSteven Rostedt }
434e447e1dfSSteven Rostedt
t_show(struct seq_file * m,void * v)435e5a81b62SSteven Rostedt static int t_show(struct seq_file *m, void *v)
436e5a81b62SSteven Rostedt {
437522a110bSLiming Wang long i;
4381b6cced6SSteven Rostedt int size;
439e5a81b62SSteven Rostedt
440522a110bSLiming Wang if (v == SEQ_START_TOKEN) {
4411b6cced6SSteven Rostedt seq_printf(m, " Depth Size Location"
4421b6cced6SSteven Rostedt " (%d entries)\n"
4431b6cced6SSteven Rostedt " ----- ---- --------\n",
4449f50c91bSThomas Gleixner stack_trace_nr_entries);
445e447e1dfSSteven Rostedt
446bb99d8ccSAKASHI Takahiro if (!stack_tracer_enabled && !stack_trace_max_size)
447e447e1dfSSteven Rostedt print_disabled(m);
448e447e1dfSSteven Rostedt
4491b6cced6SSteven Rostedt return 0;
4501b6cced6SSteven Rostedt }
4511b6cced6SSteven Rostedt
452522a110bSLiming Wang i = *(long *)v;
453522a110bSLiming Wang
4549f50c91bSThomas Gleixner if (i >= stack_trace_nr_entries)
455e5a81b62SSteven Rostedt return 0;
456e5a81b62SSteven Rostedt
4579f50c91bSThomas Gleixner if (i + 1 == stack_trace_nr_entries)
458bb99d8ccSAKASHI Takahiro size = stack_trace_index[i];
4591b6cced6SSteven Rostedt else
460bb99d8ccSAKASHI Takahiro size = stack_trace_index[i] - stack_trace_index[i+1];
4611b6cced6SSteven Rostedt
462bb99d8ccSAKASHI Takahiro seq_printf(m, "%3ld) %8d %5d ", i, stack_trace_index[i], size);
4631b6cced6SSteven Rostedt
4641b6cced6SSteven Rostedt trace_lookup_stack(m, i);
465e5a81b62SSteven Rostedt
466e5a81b62SSteven Rostedt return 0;
467e5a81b62SSteven Rostedt }
468e5a81b62SSteven Rostedt
469f38f1d2aSSteven Rostedt static const struct seq_operations stack_trace_seq_ops = {
470e5a81b62SSteven Rostedt .start = t_start,
471e5a81b62SSteven Rostedt .next = t_next,
472e5a81b62SSteven Rostedt .stop = t_stop,
473e5a81b62SSteven Rostedt .show = t_show,
474e5a81b62SSteven Rostedt };
475e5a81b62SSteven Rostedt
stack_trace_open(struct inode * inode,struct file * file)476e5a81b62SSteven Rostedt static int stack_trace_open(struct inode *inode, struct file *file)
477e5a81b62SSteven Rostedt {
47817911ff3SSteven Rostedt (VMware) int ret;
47917911ff3SSteven Rostedt (VMware)
48017911ff3SSteven Rostedt (VMware) ret = security_locked_down(LOCKDOWN_TRACEFS);
48117911ff3SSteven Rostedt (VMware) if (ret)
48217911ff3SSteven Rostedt (VMware) return ret;
48317911ff3SSteven Rostedt (VMware)
484d8cc1ab7SLi Zefan return seq_open(file, &stack_trace_seq_ops);
485e5a81b62SSteven Rostedt }
486e5a81b62SSteven Rostedt
487f38f1d2aSSteven Rostedt static const struct file_operations stack_trace_fops = {
488e5a81b62SSteven Rostedt .open = stack_trace_open,
489e5a81b62SSteven Rostedt .read = seq_read,
490e5a81b62SSteven Rostedt .llseek = seq_lseek,
491d8cc1ab7SLi Zefan .release = seq_release,
492e5a81b62SSteven Rostedt };
493e5a81b62SSteven Rostedt
494bbd1d27dSSteven Rostedt (VMware) #ifdef CONFIG_DYNAMIC_FTRACE
495bbd1d27dSSteven Rostedt (VMware)
496d2d45c7aSSteven Rostedt static int
stack_trace_filter_open(struct inode * inode,struct file * file)497d2d45c7aSSteven Rostedt stack_trace_filter_open(struct inode *inode, struct file *file)
498d2d45c7aSSteven Rostedt {
4990f179765SSteven Rostedt (VMware) struct ftrace_ops *ops = inode->i_private;
5000f179765SSteven Rostedt (VMware)
50117911ff3SSteven Rostedt (VMware) /* Checks for tracefs lockdown */
5020f179765SSteven Rostedt (VMware) return ftrace_regex_open(ops, FTRACE_ITER_FILTER,
503d2d45c7aSSteven Rostedt inode, file);
504d2d45c7aSSteven Rostedt }
505d2d45c7aSSteven Rostedt
506d2d45c7aSSteven Rostedt static const struct file_operations stack_trace_filter_fops = {
507d2d45c7aSSteven Rostedt .open = stack_trace_filter_open,
508d2d45c7aSSteven Rostedt .read = seq_read,
509d2d45c7aSSteven Rostedt .write = ftrace_filter_write,
510098c879eSSteven Rostedt (Red Hat) .llseek = tracing_lseek,
511d2d45c7aSSteven Rostedt .release = ftrace_regex_release,
512d2d45c7aSSteven Rostedt };
513d2d45c7aSSteven Rostedt
514bbd1d27dSSteven Rostedt (VMware) #endif /* CONFIG_DYNAMIC_FTRACE */
515bbd1d27dSSteven Rostedt (VMware)
516f38f1d2aSSteven Rostedt int
stack_trace_sysctl(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)5177ff0d449SChristoph Hellwig stack_trace_sysctl(struct ctl_table *table, int write, void *buffer,
5187ff0d449SChristoph Hellwig size_t *lenp, loff_t *ppos)
519f38f1d2aSSteven Rostedt {
5203d9a8072SThomas Gleixner int was_enabled;
521f38f1d2aSSteven Rostedt int ret;
522f38f1d2aSSteven Rostedt
523f38f1d2aSSteven Rostedt mutex_lock(&stack_sysctl_mutex);
5243d9a8072SThomas Gleixner was_enabled = !!stack_tracer_enabled;
525f38f1d2aSSteven Rostedt
5268d65af78SAlexey Dobriyan ret = proc_dointvec(table, write, buffer, lenp, ppos);
527f38f1d2aSSteven Rostedt
5283d9a8072SThomas Gleixner if (ret || !write || (was_enabled == !!stack_tracer_enabled))
529f38f1d2aSSteven Rostedt goto out;
530f38f1d2aSSteven Rostedt
531f38f1d2aSSteven Rostedt if (stack_tracer_enabled)
532f38f1d2aSSteven Rostedt register_ftrace_function(&trace_ops);
533f38f1d2aSSteven Rostedt else
534f38f1d2aSSteven Rostedt unregister_ftrace_function(&trace_ops);
535f38f1d2aSSteven Rostedt out:
536f38f1d2aSSteven Rostedt mutex_unlock(&stack_sysctl_mutex);
537f38f1d2aSSteven Rostedt return ret;
538f38f1d2aSSteven Rostedt }
539f38f1d2aSSteven Rostedt
540762e1207SSteven Rostedt static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
541762e1207SSteven Rostedt
enable_stacktrace(char * str)542f38f1d2aSSteven Rostedt static __init int enable_stacktrace(char *str)
543f38f1d2aSSteven Rostedt {
5443d739c1fSSteven Rostedt (VMware) int len;
5453d739c1fSSteven Rostedt (VMware)
5463d739c1fSSteven Rostedt (VMware) if ((len = str_has_prefix(str, "_filter=")))
5473d739c1fSSteven Rostedt (VMware) strncpy(stack_trace_filter_buf, str + len, COMMAND_LINE_SIZE);
548762e1207SSteven Rostedt
549e05a43b7SSteven Rostedt stack_tracer_enabled = 1;
550f38f1d2aSSteven Rostedt return 1;
551f38f1d2aSSteven Rostedt }
552f38f1d2aSSteven Rostedt __setup("stacktrace", enable_stacktrace);
553f38f1d2aSSteven Rostedt
stack_trace_init(void)554e5a81b62SSteven Rostedt static __init int stack_trace_init(void)
555e5a81b62SSteven Rostedt {
55622c36b18SWei Yang int ret;
557e5a81b62SSteven Rostedt
55822c36b18SWei Yang ret = tracing_init_dentry();
55922c36b18SWei Yang if (ret)
560ed6f1c99SNamhyung Kim return 0;
561e5a81b62SSteven Rostedt
562*4e4f6e33SSteven Rostedt (VMware) trace_create_file("stack_max_size", TRACE_MODE_WRITE, NULL,
563bb99d8ccSAKASHI Takahiro &stack_trace_max_size, &stack_max_size_fops);
564e5a81b62SSteven Rostedt
565*4e4f6e33SSteven Rostedt (VMware) trace_create_file("stack_trace", TRACE_MODE_READ, NULL,
566e5a81b62SSteven Rostedt NULL, &stack_trace_fops);
567e5a81b62SSteven Rostedt
568bbd1d27dSSteven Rostedt (VMware) #ifdef CONFIG_DYNAMIC_FTRACE
569*4e4f6e33SSteven Rostedt (VMware) trace_create_file("stack_trace_filter", TRACE_MODE_WRITE, NULL,
5700f179765SSteven Rostedt (VMware) &trace_ops, &stack_trace_filter_fops);
571bbd1d27dSSteven Rostedt (VMware) #endif
572d2d45c7aSSteven Rostedt
573762e1207SSteven Rostedt if (stack_trace_filter_buf[0])
574762e1207SSteven Rostedt ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
575762e1207SSteven Rostedt
576e05a43b7SSteven Rostedt if (stack_tracer_enabled)
577e5a81b62SSteven Rostedt register_ftrace_function(&trace_ops);
578e5a81b62SSteven Rostedt
579e5a81b62SSteven Rostedt return 0;
580e5a81b62SSteven Rostedt }
581e5a81b62SSteven Rostedt
582e5a81b62SSteven Rostedt device_initcall(stack_trace_init);
583