1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Stack tracing support 4 * 5 * Copyright (C) 2012 ARM Ltd. 6 */ 7 #include <linux/kernel.h> 8 #include <linux/export.h> 9 #include <linux/ftrace.h> 10 #include <linux/kprobes.h> 11 #include <linux/sched.h> 12 #include <linux/sched/debug.h> 13 #include <linux/sched/task_stack.h> 14 #include <linux/stacktrace.h> 15 16 #include <asm/irq.h> 17 #include <asm/pointer_auth.h> 18 #include <asm/stack_pointer.h> 19 #include <asm/stacktrace.h> 20 21 /* 22 * AArch64 PCS assigns the frame pointer to x29. 23 * 24 * A simple function prologue looks like this: 25 * sub sp, sp, #0x10 26 * stp x29, x30, [sp] 27 * mov x29, sp 28 * 29 * A simple function epilogue looks like this: 30 * mov sp, x29 31 * ldp x29, x30, [sp] 32 * add sp, sp, #0x10 33 */ 34 35 /* 36 * Unwind from one frame record (A) to the next frame record (B). 37 * 38 * We terminate early if the location of B indicates a malformed chain of frame 39 * records (e.g. a cycle), determined based on the location and fp value of A 40 * and the location (but not the fp value) of B. 41 */ 42 int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) 43 { 44 unsigned long fp = frame->fp; 45 struct stack_info info; 46 47 /* Terminal record; nothing to unwind */ 48 if (!fp) 49 return -ENOENT; 50 51 if (fp & 0xf) 52 return -EINVAL; 53 54 if (!tsk) 55 tsk = current; 56 57 if (!on_accessible_stack(tsk, fp, &info)) 58 return -EINVAL; 59 60 if (test_bit(info.type, frame->stacks_done)) 61 return -EINVAL; 62 63 /* 64 * As stacks grow downward, any valid record on the same stack must be 65 * at a strictly higher address than the prior record. 66 * 67 * Stacks can nest in several valid orders, e.g. 68 * 69 * TASK -> IRQ -> OVERFLOW -> SDEI_NORMAL 70 * TASK -> SDEI_NORMAL -> SDEI_CRITICAL -> OVERFLOW 71 * 72 * ... but the nesting itself is strict. Once we transition from one 73 * stack to another, it's never valid to unwind back to that first 74 * stack. 75 */ 76 if (info.type == frame->prev_type) { 77 if (fp <= frame->prev_fp) 78 return -EINVAL; 79 } else { 80 set_bit(frame->prev_type, frame->stacks_done); 81 } 82 83 /* 84 * Record this frame record's values and location. The prev_fp and 85 * prev_type are only meaningful to the next unwind_frame() invocation. 86 */ 87 frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp)); 88 frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8)); 89 frame->prev_fp = fp; 90 frame->prev_type = info.type; 91 92 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 93 if (tsk->ret_stack && 94 (ptrauth_strip_insn_pac(frame->pc) == (unsigned long)return_to_handler)) { 95 struct ftrace_ret_stack *ret_stack; 96 /* 97 * This is a case where function graph tracer has 98 * modified a return address (LR) in a stack frame 99 * to hook a function return. 100 * So replace it to an original value. 101 */ 102 ret_stack = ftrace_graph_get_ret_stack(tsk, frame->graph++); 103 if (WARN_ON_ONCE(!ret_stack)) 104 return -EINVAL; 105 frame->pc = ret_stack->ret; 106 } 107 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 108 109 frame->pc = ptrauth_strip_insn_pac(frame->pc); 110 111 return 0; 112 } 113 NOKPROBE_SYMBOL(unwind_frame); 114 115 void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame, 116 bool (*fn)(void *, unsigned long), void *data) 117 { 118 while (1) { 119 int ret; 120 121 if (!fn(data, frame->pc)) 122 break; 123 ret = unwind_frame(tsk, frame); 124 if (ret < 0) 125 break; 126 } 127 } 128 NOKPROBE_SYMBOL(walk_stackframe); 129 130 static void dump_backtrace_entry(unsigned long where, const char *loglvl) 131 { 132 printk("%s %pS\n", loglvl, (void *)where); 133 } 134 135 void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk, 136 const char *loglvl) 137 { 138 struct stackframe frame; 139 int skip = 0; 140 141 pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); 142 143 if (regs) { 144 if (user_mode(regs)) 145 return; 146 skip = 1; 147 } 148 149 if (!tsk) 150 tsk = current; 151 152 if (!try_get_task_stack(tsk)) 153 return; 154 155 if (tsk == current) { 156 start_backtrace(&frame, 157 (unsigned long)__builtin_frame_address(0), 158 (unsigned long)dump_backtrace); 159 } else { 160 /* 161 * task blocked in __switch_to 162 */ 163 start_backtrace(&frame, 164 thread_saved_fp(tsk), 165 thread_saved_pc(tsk)); 166 } 167 168 printk("%sCall trace:\n", loglvl); 169 do { 170 /* skip until specified stack frame */ 171 if (!skip) { 172 dump_backtrace_entry(frame.pc, loglvl); 173 } else if (frame.fp == regs->regs[29]) { 174 skip = 0; 175 /* 176 * Mostly, this is the case where this function is 177 * called in panic/abort. As exception handler's 178 * stack frame does not contain the corresponding pc 179 * at which an exception has taken place, use regs->pc 180 * instead. 181 */ 182 dump_backtrace_entry(regs->pc, loglvl); 183 } 184 } while (!unwind_frame(tsk, &frame)); 185 186 put_task_stack(tsk); 187 } 188 189 void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl) 190 { 191 dump_backtrace(NULL, tsk, loglvl); 192 barrier(); 193 } 194 195 #ifdef CONFIG_STACKTRACE 196 197 noinline void arch_stack_walk(stack_trace_consume_fn consume_entry, 198 void *cookie, struct task_struct *task, 199 struct pt_regs *regs) 200 { 201 struct stackframe frame; 202 203 if (regs) 204 start_backtrace(&frame, regs->regs[29], regs->pc); 205 else if (task == current) 206 start_backtrace(&frame, 207 (unsigned long)__builtin_frame_address(1), 208 (unsigned long)__builtin_return_address(0)); 209 else 210 start_backtrace(&frame, thread_saved_fp(task), 211 thread_saved_pc(task)); 212 213 walk_stackframe(task, &frame, consume_entry, cookie); 214 } 215 216 #endif 217