1 /* 2 * Stack trace management functions 3 * 4 * Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 5 */ 6 #include <linux/sched.h> 7 #include <linux/stacktrace.h> 8 #include <linux/module.h> 9 #include <linux/uaccess.h> 10 #include <asm/stacktrace.h> 11 12 static void save_stack_warning(void *data, char *msg) 13 { 14 } 15 16 static void 17 save_stack_warning_symbol(void *data, char *msg, unsigned long symbol) 18 { 19 } 20 21 static int save_stack_stack(void *data, char *name) 22 { 23 return 0; 24 } 25 26 static void 27 __save_stack_address(void *data, unsigned long addr, bool reliable, bool nosched) 28 { 29 struct stack_trace *trace = data; 30 #ifdef CONFIG_FRAME_POINTER 31 if (!reliable) 32 return; 33 #endif 34 if (nosched && in_sched_functions(addr)) 35 return; 36 if (trace->skip > 0) { 37 trace->skip--; 38 return; 39 } 40 if (trace->nr_entries < trace->max_entries) 41 trace->entries[trace->nr_entries++] = addr; 42 } 43 44 static void save_stack_address(void *data, unsigned long addr, int reliable) 45 { 46 return __save_stack_address(data, addr, reliable, false); 47 } 48 49 static void 50 save_stack_address_nosched(void *data, unsigned long addr, int reliable) 51 { 52 return __save_stack_address(data, addr, reliable, true); 53 } 54 55 static const struct stacktrace_ops save_stack_ops = { 56 .warning = save_stack_warning, 57 .warning_symbol = save_stack_warning_symbol, 58 .stack = save_stack_stack, 59 .address = save_stack_address, 60 .walk_stack = print_context_stack, 61 }; 62 63 static const struct stacktrace_ops save_stack_ops_nosched = { 64 .warning = save_stack_warning, 65 .warning_symbol = save_stack_warning_symbol, 66 .stack = save_stack_stack, 67 .address = save_stack_address_nosched, 68 .walk_stack = print_context_stack, 69 }; 70 71 /* 72 * Save stack-backtrace addresses into a stack_trace buffer. 73 */ 74 void save_stack_trace(struct stack_trace *trace) 75 { 76 dump_trace(current, NULL, NULL, &save_stack_ops, trace); 77 if (trace->nr_entries < trace->max_entries) 78 trace->entries[trace->nr_entries++] = ULONG_MAX; 79 } 80 EXPORT_SYMBOL_GPL(save_stack_trace); 81 82 void save_stack_trace_regs(struct stack_trace *trace, struct pt_regs *regs) 83 { 84 dump_trace(current, regs, NULL, &save_stack_ops, trace); 85 if (trace->nr_entries < trace->max_entries) 86 trace->entries[trace->nr_entries++] = ULONG_MAX; 87 } 88 89 void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) 90 { 91 dump_trace(tsk, NULL, NULL, &save_stack_ops_nosched, trace); 92 if (trace->nr_entries < trace->max_entries) 93 trace->entries[trace->nr_entries++] = ULONG_MAX; 94 } 95 EXPORT_SYMBOL_GPL(save_stack_trace_tsk); 96 97 /* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */ 98 99 struct stack_frame_user { 100 const void __user *next_fp; 101 unsigned long ret_addr; 102 }; 103 104 static int 105 copy_stack_frame(const void __user *fp, struct stack_frame_user *frame) 106 { 107 int ret; 108 109 if (!access_ok(VERIFY_READ, fp, sizeof(*frame))) 110 return 0; 111 112 ret = 1; 113 pagefault_disable(); 114 if (__copy_from_user_inatomic(frame, fp, sizeof(*frame))) 115 ret = 0; 116 pagefault_enable(); 117 118 return ret; 119 } 120 121 static inline void __save_stack_trace_user(struct stack_trace *trace) 122 { 123 const struct pt_regs *regs = task_pt_regs(current); 124 const void __user *fp = (const void __user *)regs->bp; 125 126 if (trace->nr_entries < trace->max_entries) 127 trace->entries[trace->nr_entries++] = regs->ip; 128 129 while (trace->nr_entries < trace->max_entries) { 130 struct stack_frame_user frame; 131 132 frame.next_fp = NULL; 133 frame.ret_addr = 0; 134 if (!copy_stack_frame(fp, &frame)) 135 break; 136 if ((unsigned long)fp < regs->sp) 137 break; 138 if (frame.ret_addr) { 139 trace->entries[trace->nr_entries++] = 140 frame.ret_addr; 141 } 142 if (fp == frame.next_fp) 143 break; 144 fp = frame.next_fp; 145 } 146 } 147 148 void save_stack_trace_user(struct stack_trace *trace) 149 { 150 /* 151 * Trace user stack if we are not a kernel thread 152 */ 153 if (current->mm) { 154 __save_stack_trace_user(trace); 155 } 156 if (trace->nr_entries < trace->max_entries) 157 trace->entries[trace->nr_entries++] = ULONG_MAX; 158 } 159 160