1 /* 2 * unlikely profiler 3 * 4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> 5 */ 6 #include <linux/kallsyms.h> 7 #include <linux/seq_file.h> 8 #include <linux/spinlock.h> 9 #include <linux/irqflags.h> 10 #include <linux/debugfs.h> 11 #include <linux/uaccess.h> 12 #include <linux/module.h> 13 #include <linux/ftrace.h> 14 #include <linux/hash.h> 15 #include <linux/fs.h> 16 #include <asm/local.h> 17 18 #include "trace.h" 19 #include "trace_stat.h" 20 #include "trace_output.h" 21 22 #ifdef CONFIG_BRANCH_TRACER 23 24 static struct tracer branch_trace; 25 static int branch_tracing_enabled __read_mostly; 26 static DEFINE_MUTEX(branch_tracing_mutex); 27 28 static struct trace_array *branch_tracer; 29 30 static void 31 probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) 32 { 33 struct ftrace_event_call *call = &event_branch; 34 struct trace_array *tr = branch_tracer; 35 struct trace_array_cpu *data; 36 struct ring_buffer_event *event; 37 struct trace_branch *entry; 38 struct ring_buffer *buffer; 39 unsigned long flags; 40 int cpu, pc; 41 const char *p; 42 43 /* 44 * I would love to save just the ftrace_likely_data pointer, but 45 * this code can also be used by modules. Ugly things can happen 46 * if the module is unloaded, and then we go and read the 47 * pointer. This is slower, but much safer. 48 */ 49 50 if (unlikely(!tr)) 51 return; 52 53 local_irq_save(flags); 54 cpu = raw_smp_processor_id(); 55 data = per_cpu_ptr(tr->trace_buffer.data, cpu); 56 if (atomic_inc_return(&data->disabled) != 1) 57 goto out; 58 59 pc = preempt_count(); 60 buffer = tr->trace_buffer.buffer; 61 event = trace_buffer_lock_reserve(buffer, TRACE_BRANCH, 62 sizeof(*entry), flags, pc); 63 if (!event) 64 goto out; 65 66 entry = ring_buffer_event_data(event); 67 68 /* Strip off the path, only save the file */ 69 p = f->file + strlen(f->file); 70 while (p >= f->file && *p != '/') 71 p--; 72 p++; 73 74 strncpy(entry->func, f->func, TRACE_FUNC_SIZE); 75 strncpy(entry->file, p, TRACE_FILE_SIZE); 76 entry->func[TRACE_FUNC_SIZE] = 0; 77 entry->file[TRACE_FILE_SIZE] = 0; 78 entry->line = f->line; 79 entry->correct = val == expect; 80 81 if (!call_filter_check_discard(call, entry, buffer, event)) 82 __buffer_unlock_commit(buffer, event); 83 84 out: 85 atomic_dec(&data->disabled); 86 local_irq_restore(flags); 87 } 88 89 static inline 90 void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect) 91 { 92 if (!branch_tracing_enabled) 93 return; 94 95 probe_likely_condition(f, val, expect); 96 } 97 98 int enable_branch_tracing(struct trace_array *tr) 99 { 100 mutex_lock(&branch_tracing_mutex); 101 branch_tracer = tr; 102 /* 103 * Must be seen before enabling. The reader is a condition 104 * where we do not need a matching rmb() 105 */ 106 smp_wmb(); 107 branch_tracing_enabled++; 108 mutex_unlock(&branch_tracing_mutex); 109 110 return 0; 111 } 112 113 void disable_branch_tracing(void) 114 { 115 mutex_lock(&branch_tracing_mutex); 116 117 if (!branch_tracing_enabled) 118 goto out_unlock; 119 120 branch_tracing_enabled--; 121 122 out_unlock: 123 mutex_unlock(&branch_tracing_mutex); 124 } 125 126 static void start_branch_trace(struct trace_array *tr) 127 { 128 enable_branch_tracing(tr); 129 } 130 131 static void stop_branch_trace(struct trace_array *tr) 132 { 133 disable_branch_tracing(); 134 } 135 136 static int branch_trace_init(struct trace_array *tr) 137 { 138 start_branch_trace(tr); 139 return 0; 140 } 141 142 static void branch_trace_reset(struct trace_array *tr) 143 { 144 stop_branch_trace(tr); 145 } 146 147 static enum print_line_t trace_branch_print(struct trace_iterator *iter, 148 int flags, struct trace_event *event) 149 { 150 struct trace_branch *field; 151 152 trace_assign_type(field, iter->ent); 153 154 if (trace_seq_printf(&iter->seq, "[%s] %s:%s:%d\n", 155 field->correct ? " ok " : " MISS ", 156 field->func, 157 field->file, 158 field->line)) 159 return TRACE_TYPE_PARTIAL_LINE; 160 161 return TRACE_TYPE_HANDLED; 162 } 163 164 static void branch_print_header(struct seq_file *s) 165 { 166 seq_puts(s, "# TASK-PID CPU# TIMESTAMP CORRECT" 167 " FUNC:FILE:LINE\n"); 168 seq_puts(s, "# | | | | | " 169 " |\n"); 170 } 171 172 static struct trace_event_functions trace_branch_funcs = { 173 .trace = trace_branch_print, 174 }; 175 176 static struct trace_event trace_branch_event = { 177 .type = TRACE_BRANCH, 178 .funcs = &trace_branch_funcs, 179 }; 180 181 static struct tracer branch_trace __read_mostly = 182 { 183 .name = "branch", 184 .init = branch_trace_init, 185 .reset = branch_trace_reset, 186 #ifdef CONFIG_FTRACE_SELFTEST 187 .selftest = trace_selftest_startup_branch, 188 #endif /* CONFIG_FTRACE_SELFTEST */ 189 .print_header = branch_print_header, 190 }; 191 192 __init static int init_branch_tracer(void) 193 { 194 int ret; 195 196 ret = register_ftrace_event(&trace_branch_event); 197 if (!ret) { 198 printk(KERN_WARNING "Warning: could not register " 199 "branch events\n"); 200 return 1; 201 } 202 return register_tracer(&branch_trace); 203 } 204 core_initcall(init_branch_tracer); 205 206 #else 207 static inline 208 void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect) 209 { 210 } 211 #endif /* CONFIG_BRANCH_TRACER */ 212 213 void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect) 214 { 215 /* 216 * I would love to have a trace point here instead, but the 217 * trace point code is so inundated with unlikely and likely 218 * conditions that the recursive nightmare that exists is too 219 * much to try to get working. At least for now. 220 */ 221 trace_likely_condition(f, val, expect); 222 223 /* FIXME: Make this atomic! */ 224 if (val == expect) 225 f->correct++; 226 else 227 f->incorrect++; 228 } 229 EXPORT_SYMBOL(ftrace_likely_update); 230 231 extern unsigned long __start_annotated_branch_profile[]; 232 extern unsigned long __stop_annotated_branch_profile[]; 233 234 static int annotated_branch_stat_headers(struct seq_file *m) 235 { 236 seq_printf(m, " correct incorrect %% "); 237 seq_printf(m, " Function " 238 " File Line\n" 239 " ------- --------- - " 240 " -------- " 241 " ---- ----\n"); 242 return 0; 243 } 244 245 static inline long get_incorrect_percent(struct ftrace_branch_data *p) 246 { 247 long percent; 248 249 if (p->correct) { 250 percent = p->incorrect * 100; 251 percent /= p->correct + p->incorrect; 252 } else 253 percent = p->incorrect ? 100 : -1; 254 255 return percent; 256 } 257 258 static int branch_stat_show(struct seq_file *m, void *v) 259 { 260 struct ftrace_branch_data *p = v; 261 const char *f; 262 long percent; 263 264 /* Only print the file, not the path */ 265 f = p->file + strlen(p->file); 266 while (f >= p->file && *f != '/') 267 f--; 268 f++; 269 270 /* 271 * The miss is overlayed on correct, and hit on incorrect. 272 */ 273 percent = get_incorrect_percent(p); 274 275 seq_printf(m, "%8lu %8lu ", p->correct, p->incorrect); 276 if (percent < 0) 277 seq_printf(m, " X "); 278 else 279 seq_printf(m, "%3ld ", percent); 280 seq_printf(m, "%-30.30s %-20.20s %d\n", p->func, f, p->line); 281 return 0; 282 } 283 284 static void *annotated_branch_stat_start(struct tracer_stat *trace) 285 { 286 return __start_annotated_branch_profile; 287 } 288 289 static void * 290 annotated_branch_stat_next(void *v, int idx) 291 { 292 struct ftrace_branch_data *p = v; 293 294 ++p; 295 296 if ((void *)p >= (void *)__stop_annotated_branch_profile) 297 return NULL; 298 299 return p; 300 } 301 302 static int annotated_branch_stat_cmp(void *p1, void *p2) 303 { 304 struct ftrace_branch_data *a = p1; 305 struct ftrace_branch_data *b = p2; 306 307 long percent_a, percent_b; 308 309 percent_a = get_incorrect_percent(a); 310 percent_b = get_incorrect_percent(b); 311 312 if (percent_a < percent_b) 313 return -1; 314 if (percent_a > percent_b) 315 return 1; 316 317 if (a->incorrect < b->incorrect) 318 return -1; 319 if (a->incorrect > b->incorrect) 320 return 1; 321 322 /* 323 * Since the above shows worse (incorrect) cases 324 * first, we continue that by showing best (correct) 325 * cases last. 326 */ 327 if (a->correct > b->correct) 328 return -1; 329 if (a->correct < b->correct) 330 return 1; 331 332 return 0; 333 } 334 335 static struct tracer_stat annotated_branch_stats = { 336 .name = "branch_annotated", 337 .stat_start = annotated_branch_stat_start, 338 .stat_next = annotated_branch_stat_next, 339 .stat_cmp = annotated_branch_stat_cmp, 340 .stat_headers = annotated_branch_stat_headers, 341 .stat_show = branch_stat_show 342 }; 343 344 __init static int init_annotated_branch_stats(void) 345 { 346 int ret; 347 348 ret = register_stat_tracer(&annotated_branch_stats); 349 if (!ret) { 350 printk(KERN_WARNING "Warning: could not register " 351 "annotated branches stats\n"); 352 return 1; 353 } 354 return 0; 355 } 356 fs_initcall(init_annotated_branch_stats); 357 358 #ifdef CONFIG_PROFILE_ALL_BRANCHES 359 360 extern unsigned long __start_branch_profile[]; 361 extern unsigned long __stop_branch_profile[]; 362 363 static int all_branch_stat_headers(struct seq_file *m) 364 { 365 seq_printf(m, " miss hit %% "); 366 seq_printf(m, " Function " 367 " File Line\n" 368 " ------- --------- - " 369 " -------- " 370 " ---- ----\n"); 371 return 0; 372 } 373 374 static void *all_branch_stat_start(struct tracer_stat *trace) 375 { 376 return __start_branch_profile; 377 } 378 379 static void * 380 all_branch_stat_next(void *v, int idx) 381 { 382 struct ftrace_branch_data *p = v; 383 384 ++p; 385 386 if ((void *)p >= (void *)__stop_branch_profile) 387 return NULL; 388 389 return p; 390 } 391 392 static struct tracer_stat all_branch_stats = { 393 .name = "branch_all", 394 .stat_start = all_branch_stat_start, 395 .stat_next = all_branch_stat_next, 396 .stat_headers = all_branch_stat_headers, 397 .stat_show = branch_stat_show 398 }; 399 400 __init static int all_annotated_branch_stats(void) 401 { 402 int ret; 403 404 ret = register_stat_tracer(&all_branch_stats); 405 if (!ret) { 406 printk(KERN_WARNING "Warning: could not register " 407 "all branches stats\n"); 408 return 1; 409 } 410 return 0; 411 } 412 fs_initcall(all_annotated_branch_stats); 413 #endif /* CONFIG_PROFILE_ALL_BRANCHES */ 414