1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Common arm64 stack unwinder code. 4 * 5 * See: arch/arm64/kernel/stacktrace.c for the reference implementation. 6 * 7 * Copyright (C) 2012 ARM Ltd. 8 */ 9 #ifndef __ASM_STACKTRACE_COMMON_H 10 #define __ASM_STACKTRACE_COMMON_H 11 12 #include <linux/bitmap.h> 13 #include <linux/bitops.h> 14 #include <linux/kprobes.h> 15 #include <linux/types.h> 16 17 enum stack_type { 18 STACK_TYPE_UNKNOWN, 19 STACK_TYPE_TASK, 20 STACK_TYPE_IRQ, 21 STACK_TYPE_OVERFLOW, 22 STACK_TYPE_SDEI_NORMAL, 23 STACK_TYPE_SDEI_CRITICAL, 24 STACK_TYPE_HYP, 25 __NR_STACK_TYPES 26 }; 27 28 struct stack_info { 29 unsigned long low; 30 unsigned long high; 31 enum stack_type type; 32 }; 33 34 /** 35 * struct unwind_state - state used for robust unwinding. 36 * 37 * @fp: The fp value in the frame record (or the real fp) 38 * @pc: The lr value in the frame record (or the real lr) 39 * 40 * @stacks_done: Stacks which have been entirely unwound, for which it is no 41 * longer valid to unwind to. 42 * 43 * @prev_fp: The fp that pointed to this frame record, or a synthetic value 44 * of 0. This is used to ensure that within a stack, each 45 * subsequent frame record is at an increasing address. 46 * @prev_type: The type of stack this frame record was on, or a synthetic 47 * value of STACK_TYPE_UNKNOWN. This is used to detect a 48 * transition from one stack to another. 49 * 50 * @kr_cur: When KRETPROBES is selected, holds the kretprobe instance 51 * associated with the most recently encountered replacement lr 52 * value. 53 * 54 * @task: The task being unwound. 55 */ 56 struct unwind_state { 57 unsigned long fp; 58 unsigned long pc; 59 DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES); 60 unsigned long prev_fp; 61 enum stack_type prev_type; 62 #ifdef CONFIG_KRETPROBES 63 struct llist_node *kr_cur; 64 #endif 65 struct task_struct *task; 66 }; 67 68 static inline struct stack_info stackinfo_get_unknown(void) 69 { 70 return (struct stack_info) { 71 .low = 0, 72 .high = 0, 73 .type = STACK_TYPE_UNKNOWN, 74 }; 75 } 76 77 static inline bool stackinfo_on_stack(const struct stack_info *info, 78 unsigned long sp, unsigned long size) 79 { 80 if (!info->low) 81 return false; 82 83 if (sp < info->low || sp + size < sp || sp + size > info->high) 84 return false; 85 86 return true; 87 } 88 89 static inline void unwind_init_common(struct unwind_state *state, 90 struct task_struct *task) 91 { 92 state->task = task; 93 #ifdef CONFIG_KRETPROBES 94 state->kr_cur = NULL; 95 #endif 96 97 /* 98 * Prime the first unwind. 99 * 100 * In unwind_next() we'll check that the FP points to a valid stack, 101 * which can't be STACK_TYPE_UNKNOWN, and the first unwind will be 102 * treated as a transition to whichever stack that happens to be. The 103 * prev_fp value won't be used, but we set it to 0 such that it is 104 * definitely not an accessible stack address. 105 */ 106 bitmap_zero(state->stacks_done, __NR_STACK_TYPES); 107 state->prev_fp = 0; 108 state->prev_type = STACK_TYPE_UNKNOWN; 109 } 110 111 /** 112 * typedef stack_trace_translate_fp_fn() - Translates a non-kernel frame 113 * pointer to a kernel address. 114 * 115 * @fp: the frame pointer to be updated to its kernel address. 116 * 117 * Return: true if the VA can be translated, false otherwise. 118 * 119 * Upon success @fp is updated to the corresponding kernel virtual address. 120 */ 121 typedef bool (*stack_trace_translate_fp_fn)(unsigned long *fp); 122 123 /** 124 * typedef on_accessible_stack_fn() - Check whether a stack range is on any of 125 * the possible stacks. 126 * 127 * @tsk: task whose stack is being unwound 128 * @sp: stack address being checked 129 * @size: size of the stack range being checked 130 * @info: stack unwinding context 131 * 132 * Return: true if the stack range is accessible, false otherwise. 133 * 134 * Upon success @info is updated with information for the relevant stack. 135 * 136 * Upon failure @info is updated with the UNKNOWN stack. 137 */ 138 typedef bool (*on_accessible_stack_fn)(const struct task_struct *tsk, 139 unsigned long sp, unsigned long size, 140 struct stack_info *info); 141 142 /** 143 * unwind_next_frame_record() - Unwind to the next frame record. 144 * 145 * @state: the current unwind state. 146 * @accessible: determines whether the frame record is accessible 147 * @translate_fp: translates the fp prior to access (may be NULL) 148 * 149 * Return: 0 upon success, an error code otherwise. 150 */ 151 static inline int 152 unwind_next_frame_record(struct unwind_state *state, 153 on_accessible_stack_fn accessible, 154 stack_trace_translate_fp_fn translate_fp) 155 { 156 struct stack_info info; 157 unsigned long fp = state->fp, kern_fp = fp; 158 struct task_struct *tsk = state->task; 159 160 if (fp & 0x7) 161 return -EINVAL; 162 163 if (!accessible(tsk, fp, 16, &info)) 164 return -EINVAL; 165 166 if (test_bit(info.type, state->stacks_done)) 167 return -EINVAL; 168 169 /* 170 * If fp is not from the current address space perform the necessary 171 * translation before dereferencing it to get the next fp. 172 */ 173 if (translate_fp && !translate_fp(&kern_fp)) 174 return -EINVAL; 175 176 /* 177 * As stacks grow downward, any valid record on the same stack must be 178 * at a strictly higher address than the prior record. 179 * 180 * Stacks can nest in several valid orders, e.g. 181 * 182 * TASK -> IRQ -> OVERFLOW -> SDEI_NORMAL 183 * TASK -> SDEI_NORMAL -> SDEI_CRITICAL -> OVERFLOW 184 * HYP -> OVERFLOW 185 * 186 * ... but the nesting itself is strict. Once we transition from one 187 * stack to another, it's never valid to unwind back to that first 188 * stack. 189 */ 190 if (info.type == state->prev_type) { 191 if (fp <= state->prev_fp) 192 return -EINVAL; 193 } else { 194 __set_bit(state->prev_type, state->stacks_done); 195 } 196 197 /* 198 * Record this frame record's values and location. The prev_fp and 199 * prev_type are only meaningful to the next unwind_next() invocation. 200 */ 201 state->fp = READ_ONCE(*(unsigned long *)(kern_fp)); 202 state->pc = READ_ONCE(*(unsigned long *)(kern_fp + 8)); 203 state->prev_fp = fp; 204 state->prev_type = info.type; 205 206 return 0; 207 } 208 209 #endif /* __ASM_STACKTRACE_COMMON_H */ 210