1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012 ARM Ltd.
4  */
5 #ifndef __ASM_STACKTRACE_H
6 #define __ASM_STACKTRACE_H
7 
8 #include <linux/percpu.h>
9 #include <linux/sched.h>
10 #include <linux/sched/task_stack.h>
11 #include <linux/types.h>
12 #include <linux/llist.h>
13 
14 #include <asm/memory.h>
15 #include <asm/ptrace.h>
16 #include <asm/sdei.h>
17 
18 enum stack_type {
19 	STACK_TYPE_UNKNOWN,
20 	STACK_TYPE_TASK,
21 	STACK_TYPE_IRQ,
22 	STACK_TYPE_OVERFLOW,
23 	STACK_TYPE_SDEI_NORMAL,
24 	STACK_TYPE_SDEI_CRITICAL,
25 	__NR_STACK_TYPES
26 };
27 
28 struct stack_info {
29 	unsigned long low;
30 	unsigned long high;
31 	enum stack_type type;
32 };
33 
34 /*
35  * A snapshot of a frame record or fp/lr register values, along with some
36  * accounting information necessary for robust unwinding.
37  *
38  * @fp:          The fp value in the frame record (or the real fp)
39  * @pc:          The lr value in the frame record (or the real lr)
40  *
41  * @stacks_done: Stacks which have been entirely unwound, for which it is no
42  *               longer valid to unwind to.
43  *
44  * @prev_fp:     The fp that pointed to this frame record, or a synthetic value
45  *               of 0. This is used to ensure that within a stack, each
46  *               subsequent frame record is at an increasing address.
47  * @prev_type:   The type of stack this frame record was on, or a synthetic
48  *               value of STACK_TYPE_UNKNOWN. This is used to detect a
49  *               transition from one stack to another.
50  *
51  * @graph:       When FUNCTION_GRAPH_TRACER is selected, holds the index of a
52  *               replacement lr value in the ftrace graph stack.
53  */
54 struct stackframe {
55 	unsigned long fp;
56 	unsigned long pc;
57 	DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES);
58 	unsigned long prev_fp;
59 	enum stack_type prev_type;
60 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
61 	int graph;
62 #endif
63 #ifdef CONFIG_KRETPROBES
64 	struct llist_node *kr_cur;
65 #endif
66 };
67 
68 extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame);
69 extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
70 			    bool (*fn)(void *, unsigned long), void *data);
71 extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
72 			   const char *loglvl);
73 
74 DECLARE_PER_CPU(unsigned long *, irq_stack_ptr);
75 
76 static inline bool on_stack(unsigned long sp, unsigned long size,
77 			    unsigned long low, unsigned long high,
78 			    enum stack_type type, struct stack_info *info)
79 {
80 	if (!low)
81 		return false;
82 
83 	if (sp < low || sp + size < sp || sp + size > high)
84 		return false;
85 
86 	if (info) {
87 		info->low = low;
88 		info->high = high;
89 		info->type = type;
90 	}
91 	return true;
92 }
93 
94 static inline bool on_irq_stack(unsigned long sp, unsigned long size,
95 				struct stack_info *info)
96 {
97 	unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr);
98 	unsigned long high = low + IRQ_STACK_SIZE;
99 
100 	return on_stack(sp, size, low, high, STACK_TYPE_IRQ, info);
101 }
102 
103 static inline bool on_task_stack(const struct task_struct *tsk,
104 				 unsigned long sp, unsigned long size,
105 				 struct stack_info *info)
106 {
107 	unsigned long low = (unsigned long)task_stack_page(tsk);
108 	unsigned long high = low + THREAD_SIZE;
109 
110 	return on_stack(sp, size, low, high, STACK_TYPE_TASK, info);
111 }
112 
113 #ifdef CONFIG_VMAP_STACK
114 DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
115 
116 static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
117 				struct stack_info *info)
118 {
119 	unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack);
120 	unsigned long high = low + OVERFLOW_STACK_SIZE;
121 
122 	return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info);
123 }
124 #else
125 static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
126 			struct stack_info *info) { return false; }
127 #endif
128 
129 
130 /*
131  * We can only safely access per-cpu stacks from current in a non-preemptible
132  * context.
133  */
134 static inline bool on_accessible_stack(const struct task_struct *tsk,
135 				       unsigned long sp, unsigned long size,
136 				       struct stack_info *info)
137 {
138 	if (info)
139 		info->type = STACK_TYPE_UNKNOWN;
140 
141 	if (on_task_stack(tsk, sp, size, info))
142 		return true;
143 	if (tsk != current || preemptible())
144 		return false;
145 	if (on_irq_stack(sp, size, info))
146 		return true;
147 	if (on_overflow_stack(sp, size, info))
148 		return true;
149 	if (on_sdei_stack(sp, size, info))
150 		return true;
151 
152 	return false;
153 }
154 
155 void start_backtrace(struct stackframe *frame, unsigned long fp,
156 		     unsigned long pc);
157 
158 #endif	/* __ASM_STACKTRACE_H */
159