1 /*
2  * Copyright (C) 2012 ARM Ltd.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
15  */
16 #ifndef __ASM_STACKTRACE_H
17 #define __ASM_STACKTRACE_H
18 
19 #include <linux/percpu.h>
20 #include <linux/sched.h>
21 #include <linux/sched/task_stack.h>
22 
23 #include <asm/memory.h>
24 #include <asm/ptrace.h>
25 #include <asm/sdei.h>
26 
27 struct stackframe {
28 	unsigned long fp;
29 	unsigned long pc;
30 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
31 	int graph;
32 #endif
33 };
34 
35 enum stack_type {
36 	STACK_TYPE_UNKNOWN,
37 	STACK_TYPE_TASK,
38 	STACK_TYPE_IRQ,
39 	STACK_TYPE_OVERFLOW,
40 	STACK_TYPE_SDEI_NORMAL,
41 	STACK_TYPE_SDEI_CRITICAL,
42 };
43 
44 struct stack_info {
45 	unsigned long low;
46 	unsigned long high;
47 	enum stack_type type;
48 };
49 
50 extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame);
51 extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
52 			    int (*fn)(struct stackframe *, void *), void *data);
53 extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk);
54 
55 DECLARE_PER_CPU(unsigned long *, irq_stack_ptr);
56 
57 static inline bool on_irq_stack(unsigned long sp,
58 				struct stack_info *info)
59 {
60 	unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr);
61 	unsigned long high = low + IRQ_STACK_SIZE;
62 
63 	if (!low)
64 		return false;
65 
66 	if (sp < low || sp >= high)
67 		return false;
68 
69 	if (info) {
70 		info->low = low;
71 		info->high = high;
72 		info->type = STACK_TYPE_IRQ;
73 	}
74 
75 	return true;
76 }
77 
78 static inline bool on_task_stack(struct task_struct *tsk, unsigned long sp,
79 				struct stack_info *info)
80 {
81 	unsigned long low = (unsigned long)task_stack_page(tsk);
82 	unsigned long high = low + THREAD_SIZE;
83 
84 	if (sp < low || sp >= high)
85 		return false;
86 
87 	if (info) {
88 		info->low = low;
89 		info->high = high;
90 		info->type = STACK_TYPE_TASK;
91 	}
92 
93 	return true;
94 }
95 
96 #ifdef CONFIG_VMAP_STACK
97 DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
98 
99 static inline bool on_overflow_stack(unsigned long sp,
100 				struct stack_info *info)
101 {
102 	unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack);
103 	unsigned long high = low + OVERFLOW_STACK_SIZE;
104 
105 	if (sp < low || sp >= high)
106 		return false;
107 
108 	if (info) {
109 		info->low = low;
110 		info->high = high;
111 		info->type = STACK_TYPE_OVERFLOW;
112 	}
113 
114 	return true;
115 }
116 #else
117 static inline bool on_overflow_stack(unsigned long sp,
118 			struct stack_info *info) { return false; }
119 #endif
120 
121 
122 /*
123  * We can only safely access per-cpu stacks from current in a non-preemptible
124  * context.
125  */
126 static inline bool on_accessible_stack(struct task_struct *tsk,
127 					unsigned long sp,
128 					struct stack_info *info)
129 {
130 	if (on_task_stack(tsk, sp, info))
131 		return true;
132 	if (tsk != current || preemptible())
133 		return false;
134 	if (on_irq_stack(sp, info))
135 		return true;
136 	if (on_overflow_stack(sp, info))
137 		return true;
138 	if (on_sdei_stack(sp, info))
139 		return true;
140 
141 	return false;
142 }
143 
144 #endif	/* __ASM_STACKTRACE_H */
145