1 /* 2 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> 3 * 4 */ 5 #include <linux/stacktrace.h> 6 #include <linux/kallsyms.h> 7 #include <linux/seq_file.h> 8 #include <linux/spinlock.h> 9 #include <linux/uaccess.h> 10 #include <linux/debugfs.h> 11 #include <linux/ftrace.h> 12 #include <linux/module.h> 13 #include <linux/sysctl.h> 14 #include <linux/init.h> 15 #include <linux/fs.h> 16 #include "trace.h" 17 18 #define STACK_TRACE_ENTRIES 500 19 20 static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] = 21 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX }; 22 static unsigned stack_dump_index[STACK_TRACE_ENTRIES]; 23 24 static struct stack_trace max_stack_trace = { 25 .max_entries = STACK_TRACE_ENTRIES, 26 .entries = stack_dump_trace, 27 }; 28 29 static unsigned long max_stack_size; 30 static arch_spinlock_t max_stack_lock = 31 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 32 33 static int stack_trace_disabled __read_mostly; 34 static DEFINE_PER_CPU(int, trace_active); 35 static DEFINE_MUTEX(stack_sysctl_mutex); 36 37 int stack_tracer_enabled; 38 static int last_stack_tracer_enabled; 39 40 static inline void check_stack(void) 41 { 42 unsigned long this_size, flags; 43 unsigned long *p, *top, *start; 44 int i; 45 46 this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1); 47 this_size = THREAD_SIZE - this_size; 48 49 if (this_size <= max_stack_size) 50 return; 51 52 /* we do not handle interrupt stacks yet */ 53 if (!object_is_on_stack(&this_size)) 54 return; 55 56 local_irq_save(flags); 57 arch_spin_lock(&max_stack_lock); 58 59 /* a race could have already updated it */ 60 if (this_size <= max_stack_size) 61 goto out; 62 63 max_stack_size = this_size; 64 65 max_stack_trace.nr_entries = 0; 66 max_stack_trace.skip = 3; 67 68 save_stack_trace(&max_stack_trace); 69 70 /* 71 * Now find where in the stack these are. 72 */ 73 i = 0; 74 start = &this_size; 75 top = (unsigned long *) 76 (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE); 77 78 /* 79 * Loop through all the entries. One of the entries may 80 * for some reason be missed on the stack, so we may 81 * have to account for them. If they are all there, this 82 * loop will only happen once. This code only takes place 83 * on a new max, so it is far from a fast path. 84 */ 85 while (i < max_stack_trace.nr_entries) { 86 int found = 0; 87 88 stack_dump_index[i] = this_size; 89 p = start; 90 91 for (; p < top && i < max_stack_trace.nr_entries; p++) { 92 if (*p == stack_dump_trace[i]) { 93 this_size = stack_dump_index[i++] = 94 (top - p) * sizeof(unsigned long); 95 found = 1; 96 /* Start the search from here */ 97 start = p + 1; 98 } 99 } 100 101 if (!found) 102 i++; 103 } 104 105 out: 106 arch_spin_unlock(&max_stack_lock); 107 local_irq_restore(flags); 108 } 109 110 static void 111 stack_trace_call(unsigned long ip, unsigned long parent_ip) 112 { 113 int cpu, resched; 114 115 if (unlikely(!ftrace_enabled || stack_trace_disabled)) 116 return; 117 118 resched = ftrace_preempt_disable(); 119 120 cpu = raw_smp_processor_id(); 121 /* no atomic needed, we only modify this variable by this cpu */ 122 if (per_cpu(trace_active, cpu)++ != 0) 123 goto out; 124 125 check_stack(); 126 127 out: 128 per_cpu(trace_active, cpu)--; 129 /* prevent recursion in schedule */ 130 ftrace_preempt_enable(resched); 131 } 132 133 static struct ftrace_ops trace_ops __read_mostly = 134 { 135 .func = stack_trace_call, 136 }; 137 138 static ssize_t 139 stack_max_size_read(struct file *filp, char __user *ubuf, 140 size_t count, loff_t *ppos) 141 { 142 unsigned long *ptr = filp->private_data; 143 char buf[64]; 144 int r; 145 146 r = snprintf(buf, sizeof(buf), "%ld\n", *ptr); 147 if (r > sizeof(buf)) 148 r = sizeof(buf); 149 return simple_read_from_buffer(ubuf, count, ppos, buf, r); 150 } 151 152 static ssize_t 153 stack_max_size_write(struct file *filp, const char __user *ubuf, 154 size_t count, loff_t *ppos) 155 { 156 long *ptr = filp->private_data; 157 unsigned long val, flags; 158 char buf[64]; 159 int ret; 160 161 if (count >= sizeof(buf)) 162 return -EINVAL; 163 164 if (copy_from_user(&buf, ubuf, count)) 165 return -EFAULT; 166 167 buf[count] = 0; 168 169 ret = strict_strtoul(buf, 10, &val); 170 if (ret < 0) 171 return ret; 172 173 local_irq_save(flags); 174 arch_spin_lock(&max_stack_lock); 175 *ptr = val; 176 arch_spin_unlock(&max_stack_lock); 177 local_irq_restore(flags); 178 179 return count; 180 } 181 182 static const struct file_operations stack_max_size_fops = { 183 .open = tracing_open_generic, 184 .read = stack_max_size_read, 185 .write = stack_max_size_write, 186 }; 187 188 static void * 189 __next(struct seq_file *m, loff_t *pos) 190 { 191 long n = *pos - 1; 192 193 if (n >= max_stack_trace.nr_entries || stack_dump_trace[n] == ULONG_MAX) 194 return NULL; 195 196 m->private = (void *)n; 197 return &m->private; 198 } 199 200 static void * 201 t_next(struct seq_file *m, void *v, loff_t *pos) 202 { 203 (*pos)++; 204 return __next(m, pos); 205 } 206 207 static void *t_start(struct seq_file *m, loff_t *pos) 208 { 209 local_irq_disable(); 210 arch_spin_lock(&max_stack_lock); 211 212 if (*pos == 0) 213 return SEQ_START_TOKEN; 214 215 return __next(m, pos); 216 } 217 218 static void t_stop(struct seq_file *m, void *p) 219 { 220 arch_spin_unlock(&max_stack_lock); 221 local_irq_enable(); 222 } 223 224 static int trace_lookup_stack(struct seq_file *m, long i) 225 { 226 unsigned long addr = stack_dump_trace[i]; 227 228 return seq_printf(m, "%pF\n", (void *)addr); 229 } 230 231 static void print_disabled(struct seq_file *m) 232 { 233 seq_puts(m, "#\n" 234 "# Stack tracer disabled\n" 235 "#\n" 236 "# To enable the stack tracer, either add 'stacktrace' to the\n" 237 "# kernel command line\n" 238 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n" 239 "#\n"); 240 } 241 242 static int t_show(struct seq_file *m, void *v) 243 { 244 long i; 245 int size; 246 247 if (v == SEQ_START_TOKEN) { 248 seq_printf(m, " Depth Size Location" 249 " (%d entries)\n" 250 " ----- ---- --------\n", 251 max_stack_trace.nr_entries - 1); 252 253 if (!stack_tracer_enabled && !max_stack_size) 254 print_disabled(m); 255 256 return 0; 257 } 258 259 i = *(long *)v; 260 261 if (i >= max_stack_trace.nr_entries || 262 stack_dump_trace[i] == ULONG_MAX) 263 return 0; 264 265 if (i+1 == max_stack_trace.nr_entries || 266 stack_dump_trace[i+1] == ULONG_MAX) 267 size = stack_dump_index[i]; 268 else 269 size = stack_dump_index[i] - stack_dump_index[i+1]; 270 271 seq_printf(m, "%3ld) %8d %5d ", i, stack_dump_index[i], size); 272 273 trace_lookup_stack(m, i); 274 275 return 0; 276 } 277 278 static const struct seq_operations stack_trace_seq_ops = { 279 .start = t_start, 280 .next = t_next, 281 .stop = t_stop, 282 .show = t_show, 283 }; 284 285 static int stack_trace_open(struct inode *inode, struct file *file) 286 { 287 return seq_open(file, &stack_trace_seq_ops); 288 } 289 290 static const struct file_operations stack_trace_fops = { 291 .open = stack_trace_open, 292 .read = seq_read, 293 .llseek = seq_lseek, 294 .release = seq_release, 295 }; 296 297 int 298 stack_trace_sysctl(struct ctl_table *table, int write, 299 void __user *buffer, size_t *lenp, 300 loff_t *ppos) 301 { 302 int ret; 303 304 mutex_lock(&stack_sysctl_mutex); 305 306 ret = proc_dointvec(table, write, buffer, lenp, ppos); 307 308 if (ret || !write || 309 (last_stack_tracer_enabled == !!stack_tracer_enabled)) 310 goto out; 311 312 last_stack_tracer_enabled = !!stack_tracer_enabled; 313 314 if (stack_tracer_enabled) 315 register_ftrace_function(&trace_ops); 316 else 317 unregister_ftrace_function(&trace_ops); 318 319 out: 320 mutex_unlock(&stack_sysctl_mutex); 321 return ret; 322 } 323 324 static __init int enable_stacktrace(char *str) 325 { 326 stack_tracer_enabled = 1; 327 last_stack_tracer_enabled = 1; 328 return 1; 329 } 330 __setup("stacktrace", enable_stacktrace); 331 332 static __init int stack_trace_init(void) 333 { 334 struct dentry *d_tracer; 335 336 d_tracer = tracing_init_dentry(); 337 338 trace_create_file("stack_max_size", 0644, d_tracer, 339 &max_stack_size, &stack_max_size_fops); 340 341 trace_create_file("stack_trace", 0444, d_tracer, 342 NULL, &stack_trace_fops); 343 344 if (stack_tracer_enabled) 345 register_ftrace_function(&trace_ops); 346 347 return 0; 348 } 349 350 device_initcall(stack_trace_init); 351