1 /* 2 * Copyright (C) 2012 ARM Ltd. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public License 14 * along with this program. If not, see <http://www.gnu.org/licenses/>. 15 */ 16 #ifndef __ASM_STACKTRACE_H 17 #define __ASM_STACKTRACE_H 18 19 #include <linux/percpu.h> 20 #include <linux/sched.h> 21 #include <linux/sched/task_stack.h> 22 23 #include <asm/memory.h> 24 #include <asm/ptrace.h> 25 26 struct stackframe { 27 unsigned long fp; 28 unsigned long pc; 29 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 30 unsigned int graph; 31 #endif 32 }; 33 34 extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame); 35 extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame, 36 int (*fn)(struct stackframe *, void *), void *data); 37 extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk); 38 39 DECLARE_PER_CPU(unsigned long *, irq_stack_ptr); 40 41 static inline bool on_irq_stack(unsigned long sp) 42 { 43 unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr); 44 unsigned long high = low + IRQ_STACK_SIZE; 45 46 if (!low) 47 return false; 48 49 return (low <= sp && sp < high); 50 } 51 52 static inline bool on_task_stack(struct task_struct *tsk, unsigned long sp) 53 { 54 unsigned long low = (unsigned long)task_stack_page(tsk); 55 unsigned long high = low + THREAD_SIZE; 56 57 return (low <= sp && sp < high); 58 } 59 60 #ifdef CONFIG_VMAP_STACK 61 DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack); 62 63 static inline bool on_overflow_stack(unsigned long sp) 64 { 65 unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack); 66 unsigned long high = low + OVERFLOW_STACK_SIZE; 67 68 return (low <= sp && sp < high); 69 } 70 #else 71 static inline bool on_overflow_stack(unsigned long sp) { return false; } 72 #endif 73 74 /* 75 * We can only safely access per-cpu stacks from current in a non-preemptible 76 * context. 77 */ 78 static inline bool on_accessible_stack(struct task_struct *tsk, unsigned long sp) 79 { 80 if (on_task_stack(tsk, sp)) 81 return true; 82 if (tsk != current || preemptible()) 83 return false; 84 if (on_irq_stack(sp)) 85 return true; 86 if (on_overflow_stack(sp)) 87 return true; 88 89 return false; 90 } 91 92 #endif /* __ASM_STACKTRACE_H */ 93