1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2008 ARM Limited 4 * Copyright (C) 2014 Regents of the University of California 5 */ 6 7 #include <linux/export.h> 8 #include <linux/kallsyms.h> 9 #include <linux/sched.h> 10 #include <linux/sched/debug.h> 11 #include <linux/sched/task_stack.h> 12 #include <linux/stacktrace.h> 13 #include <linux/ftrace.h> 14 15 #include <asm/stacktrace.h> 16 17 register const unsigned long sp_in_global __asm__("sp"); 18 19 #ifdef CONFIG_FRAME_POINTER 20 21 void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, 22 bool (*fn)(void *, unsigned long), void *arg) 23 { 24 unsigned long fp, sp, pc; 25 26 if (regs) { 27 fp = frame_pointer(regs); 28 sp = user_stack_pointer(regs); 29 pc = instruction_pointer(regs); 30 } else if (task == NULL || task == current) { 31 fp = (unsigned long)__builtin_frame_address(0); 32 sp = sp_in_global; 33 pc = (unsigned long)walk_stackframe; 34 } else { 35 /* task blocked in __switch_to */ 36 fp = task->thread.s[0]; 37 sp = task->thread.sp; 38 pc = task->thread.ra; 39 } 40 41 for (;;) { 42 unsigned long low, high; 43 struct stackframe *frame; 44 45 if (unlikely(!__kernel_text_address(pc) || !fn(arg, pc))) 46 break; 47 48 /* Validate frame pointer */ 49 low = sp + sizeof(struct stackframe); 50 high = ALIGN(sp, THREAD_SIZE); 51 if (unlikely(fp < low || fp > high || fp & 0x7)) 52 break; 53 /* Unwind stack frame */ 54 frame = (struct stackframe *)fp - 1; 55 sp = fp; 56 fp = frame->fp; 57 pc = ftrace_graph_ret_addr(current, NULL, frame->ra, 58 (unsigned long *)(fp - 8)); 59 } 60 } 61 62 #else /* !CONFIG_FRAME_POINTER */ 63 64 void notrace walk_stackframe(struct task_struct *task, 65 struct pt_regs *regs, bool (*fn)(void *, unsigned long), void *arg) 66 { 67 unsigned long sp, pc; 68 unsigned long *ksp; 69 70 if (regs) { 71 sp = user_stack_pointer(regs); 72 pc = instruction_pointer(regs); 73 } else if (task == NULL || task == current) { 74 sp = sp_in_global; 75 pc = (unsigned long)walk_stackframe; 76 } else { 77 /* task blocked in __switch_to */ 78 sp = task->thread.sp; 79 pc = task->thread.ra; 80 } 81 82 if (unlikely(sp & 0x7)) 83 return; 84 85 ksp = (unsigned long *)sp; 86 while (!kstack_end(ksp)) { 87 if (__kernel_text_address(pc) && unlikely(!fn(arg, pc))) 88 break; 89 pc = (*ksp++) - 0x4; 90 } 91 } 92 93 #endif /* CONFIG_FRAME_POINTER */ 94 95 static bool print_trace_address(void *arg, unsigned long pc) 96 { 97 const char *loglvl = arg; 98 99 print_ip_sym(loglvl, pc); 100 return true; 101 } 102 103 void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) 104 { 105 pr_cont("Call Trace:\n"); 106 walk_stackframe(task, NULL, print_trace_address, (void *)loglvl); 107 } 108 109 static bool save_wchan(void *arg, unsigned long pc) 110 { 111 if (!in_sched_functions(pc)) { 112 unsigned long *p = arg; 113 *p = pc; 114 return false; 115 } 116 return true; 117 } 118 119 unsigned long get_wchan(struct task_struct *task) 120 { 121 unsigned long pc = 0; 122 123 if (likely(task && task != current && task->state != TASK_RUNNING)) 124 walk_stackframe(task, NULL, save_wchan, &pc); 125 return pc; 126 } 127 128 #ifdef CONFIG_STACKTRACE 129 130 void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, 131 struct task_struct *task, struct pt_regs *regs) 132 { 133 walk_stackframe(task, regs, consume_entry, cookie); 134 } 135 136 #endif /* CONFIG_STACKTRACE */ 137