xref: /openbmc/linux/kernel/trace/trace_branch.c (revision e8e0929d)
1 /*
2  * unlikely profiler
3  *
4  * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5  */
6 #include <linux/kallsyms.h>
7 #include <linux/seq_file.h>
8 #include <linux/spinlock.h>
9 #include <linux/irqflags.h>
10 #include <linux/debugfs.h>
11 #include <linux/uaccess.h>
12 #include <linux/module.h>
13 #include <linux/ftrace.h>
14 #include <linux/hash.h>
15 #include <linux/fs.h>
16 #include <asm/local.h>
17 
18 #include "trace.h"
19 #include "trace_stat.h"
20 #include "trace_output.h"
21 
22 #ifdef CONFIG_BRANCH_TRACER
23 
24 static struct tracer branch_trace;
25 static int branch_tracing_enabled __read_mostly;
26 static DEFINE_MUTEX(branch_tracing_mutex);
27 
28 static struct trace_array *branch_tracer;
29 
30 static void
31 probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
32 {
33 	struct ftrace_event_call *call = &event_branch;
34 	struct trace_array *tr = branch_tracer;
35 	struct ring_buffer_event *event;
36 	struct trace_branch *entry;
37 	unsigned long flags;
38 	int cpu, pc;
39 	const char *p;
40 
41 	/*
42 	 * I would love to save just the ftrace_likely_data pointer, but
43 	 * this code can also be used by modules. Ugly things can happen
44 	 * if the module is unloaded, and then we go and read the
45 	 * pointer.  This is slower, but much safer.
46 	 */
47 
48 	if (unlikely(!tr))
49 		return;
50 
51 	local_irq_save(flags);
52 	cpu = raw_smp_processor_id();
53 	if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
54 		goto out;
55 
56 	pc = preempt_count();
57 	event = trace_buffer_lock_reserve(tr, TRACE_BRANCH,
58 					  sizeof(*entry), flags, pc);
59 	if (!event)
60 		goto out;
61 
62 	entry	= ring_buffer_event_data(event);
63 
64 	/* Strip off the path, only save the file */
65 	p = f->file + strlen(f->file);
66 	while (p >= f->file && *p != '/')
67 		p--;
68 	p++;
69 
70 	strncpy(entry->func, f->func, TRACE_FUNC_SIZE);
71 	strncpy(entry->file, p, TRACE_FILE_SIZE);
72 	entry->func[TRACE_FUNC_SIZE] = 0;
73 	entry->file[TRACE_FILE_SIZE] = 0;
74 	entry->line = f->line;
75 	entry->correct = val == expect;
76 
77 	if (!filter_check_discard(call, entry, tr->buffer, event))
78 		ring_buffer_unlock_commit(tr->buffer, event);
79 
80  out:
81 	atomic_dec(&tr->data[cpu]->disabled);
82 	local_irq_restore(flags);
83 }
84 
85 static inline
86 void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
87 {
88 	if (!branch_tracing_enabled)
89 		return;
90 
91 	probe_likely_condition(f, val, expect);
92 }
93 
94 int enable_branch_tracing(struct trace_array *tr)
95 {
96 	mutex_lock(&branch_tracing_mutex);
97 	branch_tracer = tr;
98 	/*
99 	 * Must be seen before enabling. The reader is a condition
100 	 * where we do not need a matching rmb()
101 	 */
102 	smp_wmb();
103 	branch_tracing_enabled++;
104 	mutex_unlock(&branch_tracing_mutex);
105 
106 	return 0;
107 }
108 
109 void disable_branch_tracing(void)
110 {
111 	mutex_lock(&branch_tracing_mutex);
112 
113 	if (!branch_tracing_enabled)
114 		goto out_unlock;
115 
116 	branch_tracing_enabled--;
117 
118  out_unlock:
119 	mutex_unlock(&branch_tracing_mutex);
120 }
121 
122 static void start_branch_trace(struct trace_array *tr)
123 {
124 	enable_branch_tracing(tr);
125 }
126 
127 static void stop_branch_trace(struct trace_array *tr)
128 {
129 	disable_branch_tracing();
130 }
131 
132 static int branch_trace_init(struct trace_array *tr)
133 {
134 	start_branch_trace(tr);
135 	return 0;
136 }
137 
138 static void branch_trace_reset(struct trace_array *tr)
139 {
140 	stop_branch_trace(tr);
141 }
142 
143 static enum print_line_t trace_branch_print(struct trace_iterator *iter,
144 					    int flags)
145 {
146 	struct trace_branch *field;
147 
148 	trace_assign_type(field, iter->ent);
149 
150 	if (trace_seq_printf(&iter->seq, "[%s] %s:%s:%d\n",
151 			     field->correct ? "  ok  " : " MISS ",
152 			     field->func,
153 			     field->file,
154 			     field->line))
155 		return TRACE_TYPE_PARTIAL_LINE;
156 
157 	return TRACE_TYPE_HANDLED;
158 }
159 
160 static void branch_print_header(struct seq_file *s)
161 {
162 	seq_puts(s, "#           TASK-PID    CPU#    TIMESTAMP  CORRECT"
163 		"  FUNC:FILE:LINE\n");
164 	seq_puts(s, "#              | |       |          |         |   "
165 		"    |\n");
166 }
167 
168 static struct trace_event trace_branch_event = {
169 	.type		= TRACE_BRANCH,
170 	.trace		= trace_branch_print,
171 };
172 
173 static struct tracer branch_trace __read_mostly =
174 {
175 	.name		= "branch",
176 	.init		= branch_trace_init,
177 	.reset		= branch_trace_reset,
178 #ifdef CONFIG_FTRACE_SELFTEST
179 	.selftest	= trace_selftest_startup_branch,
180 #endif /* CONFIG_FTRACE_SELFTEST */
181 	.print_header	= branch_print_header,
182 };
183 
184 __init static int init_branch_tracer(void)
185 {
186 	int ret;
187 
188 	ret = register_ftrace_event(&trace_branch_event);
189 	if (!ret) {
190 		printk(KERN_WARNING "Warning: could not register "
191 				    "branch events\n");
192 		return 1;
193 	}
194 	return register_tracer(&branch_trace);
195 }
196 device_initcall(init_branch_tracer);
197 
198 #else
199 static inline
200 void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
201 {
202 }
203 #endif /* CONFIG_BRANCH_TRACER */
204 
205 void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect)
206 {
207 	/*
208 	 * I would love to have a trace point here instead, but the
209 	 * trace point code is so inundated with unlikely and likely
210 	 * conditions that the recursive nightmare that exists is too
211 	 * much to try to get working. At least for now.
212 	 */
213 	trace_likely_condition(f, val, expect);
214 
215 	/* FIXME: Make this atomic! */
216 	if (val == expect)
217 		f->correct++;
218 	else
219 		f->incorrect++;
220 }
221 EXPORT_SYMBOL(ftrace_likely_update);
222 
223 extern unsigned long __start_annotated_branch_profile[];
224 extern unsigned long __stop_annotated_branch_profile[];
225 
226 static int annotated_branch_stat_headers(struct seq_file *m)
227 {
228 	seq_printf(m, " correct incorrect  %% ");
229 	seq_printf(m, "       Function                "
230 			      "  File              Line\n"
231 			      " ------- ---------  - "
232 			      "       --------                "
233 			      "  ----              ----\n");
234 	return 0;
235 }
236 
237 static inline long get_incorrect_percent(struct ftrace_branch_data *p)
238 {
239 	long percent;
240 
241 	if (p->correct) {
242 		percent = p->incorrect * 100;
243 		percent /= p->correct + p->incorrect;
244 	} else
245 		percent = p->incorrect ? 100 : -1;
246 
247 	return percent;
248 }
249 
250 static int branch_stat_show(struct seq_file *m, void *v)
251 {
252 	struct ftrace_branch_data *p = v;
253 	const char *f;
254 	long percent;
255 
256 	/* Only print the file, not the path */
257 	f = p->file + strlen(p->file);
258 	while (f >= p->file && *f != '/')
259 		f--;
260 	f++;
261 
262 	/*
263 	 * The miss is overlayed on correct, and hit on incorrect.
264 	 */
265 	percent = get_incorrect_percent(p);
266 
267 	seq_printf(m, "%8lu %8lu ",  p->correct, p->incorrect);
268 	if (percent < 0)
269 		seq_printf(m, "  X ");
270 	else
271 		seq_printf(m, "%3ld ", percent);
272 	seq_printf(m, "%-30.30s %-20.20s %d\n", p->func, f, p->line);
273 	return 0;
274 }
275 
276 static void *annotated_branch_stat_start(struct tracer_stat *trace)
277 {
278 	return __start_annotated_branch_profile;
279 }
280 
281 static void *
282 annotated_branch_stat_next(void *v, int idx)
283 {
284 	struct ftrace_branch_data *p = v;
285 
286 	++p;
287 
288 	if ((void *)p >= (void *)__stop_annotated_branch_profile)
289 		return NULL;
290 
291 	return p;
292 }
293 
294 static int annotated_branch_stat_cmp(void *p1, void *p2)
295 {
296 	struct ftrace_branch_data *a = p1;
297 	struct ftrace_branch_data *b = p2;
298 
299 	long percent_a, percent_b;
300 
301 	percent_a = get_incorrect_percent(a);
302 	percent_b = get_incorrect_percent(b);
303 
304 	if (percent_a < percent_b)
305 		return -1;
306 	if (percent_a > percent_b)
307 		return 1;
308 	else
309 		return 0;
310 }
311 
312 static struct tracer_stat annotated_branch_stats = {
313 	.name = "branch_annotated",
314 	.stat_start = annotated_branch_stat_start,
315 	.stat_next = annotated_branch_stat_next,
316 	.stat_cmp = annotated_branch_stat_cmp,
317 	.stat_headers = annotated_branch_stat_headers,
318 	.stat_show = branch_stat_show
319 };
320 
321 __init static int init_annotated_branch_stats(void)
322 {
323 	int ret;
324 
325 	ret = register_stat_tracer(&annotated_branch_stats);
326 	if (!ret) {
327 		printk(KERN_WARNING "Warning: could not register "
328 				    "annotated branches stats\n");
329 		return 1;
330 	}
331 	return 0;
332 }
333 fs_initcall(init_annotated_branch_stats);
334 
335 #ifdef CONFIG_PROFILE_ALL_BRANCHES
336 
337 extern unsigned long __start_branch_profile[];
338 extern unsigned long __stop_branch_profile[];
339 
340 static int all_branch_stat_headers(struct seq_file *m)
341 {
342 	seq_printf(m, "   miss      hit    %% ");
343 	seq_printf(m, "       Function                "
344 			      "  File              Line\n"
345 			      " ------- ---------  - "
346 			      "       --------                "
347 			      "  ----              ----\n");
348 	return 0;
349 }
350 
351 static void *all_branch_stat_start(struct tracer_stat *trace)
352 {
353 	return __start_branch_profile;
354 }
355 
356 static void *
357 all_branch_stat_next(void *v, int idx)
358 {
359 	struct ftrace_branch_data *p = v;
360 
361 	++p;
362 
363 	if ((void *)p >= (void *)__stop_branch_profile)
364 		return NULL;
365 
366 	return p;
367 }
368 
369 static struct tracer_stat all_branch_stats = {
370 	.name = "branch_all",
371 	.stat_start = all_branch_stat_start,
372 	.stat_next = all_branch_stat_next,
373 	.stat_headers = all_branch_stat_headers,
374 	.stat_show = branch_stat_show
375 };
376 
377 __init static int all_annotated_branch_stats(void)
378 {
379 	int ret;
380 
381 	ret = register_stat_tracer(&all_branch_stats);
382 	if (!ret) {
383 		printk(KERN_WARNING "Warning: could not register "
384 				    "all branches stats\n");
385 		return 1;
386 	}
387 	return 0;
388 }
389 fs_initcall(all_annotated_branch_stats);
390 #endif /* CONFIG_PROFILE_ALL_BRANCHES */
391