xref: /openbmc/linux/kernel/trace/trace_irqsoff.c (revision 384740dc)
1 /*
2  * trace irqs off criticall timings
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * From code in the latency_tracer, that is:
8  *
9  *  Copyright (C) 2004-2006 Ingo Molnar
10  *  Copyright (C) 2004 William Lee Irwin III
11  */
12 #include <linux/kallsyms.h>
13 #include <linux/debugfs.h>
14 #include <linux/uaccess.h>
15 #include <linux/module.h>
16 #include <linux/ftrace.h>
17 #include <linux/fs.h>
18 
19 #include "trace.h"
20 
21 static struct trace_array		*irqsoff_trace __read_mostly;
22 static int				tracer_enabled __read_mostly;
23 
24 static DEFINE_PER_CPU(int, tracing_cpu);
25 
26 static DEFINE_SPINLOCK(max_trace_lock);
27 
28 enum {
29 	TRACER_IRQS_OFF		= (1 << 1),
30 	TRACER_PREEMPT_OFF	= (1 << 2),
31 };
32 
33 static int trace_type __read_mostly;
34 
35 #ifdef CONFIG_PREEMPT_TRACER
36 static inline int
37 preempt_trace(void)
38 {
39 	return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count());
40 }
41 #else
42 # define preempt_trace() (0)
43 #endif
44 
45 #ifdef CONFIG_IRQSOFF_TRACER
46 static inline int
47 irq_trace(void)
48 {
49 	return ((trace_type & TRACER_IRQS_OFF) &&
50 		irqs_disabled());
51 }
52 #else
53 # define irq_trace() (0)
54 #endif
55 
56 /*
57  * Sequence count - we record it when starting a measurement and
58  * skip the latency if the sequence has changed - some other section
59  * did a maximum and could disturb our measurement with serial console
60  * printouts, etc. Truly coinciding maximum latencies should be rare
61  * and what happens together happens separately as well, so this doesnt
62  * decrease the validity of the maximum found:
63  */
64 static __cacheline_aligned_in_smp	unsigned long max_sequence;
65 
66 #ifdef CONFIG_FTRACE
67 /*
68  * irqsoff uses its own tracer function to keep the overhead down:
69  */
70 static void
71 irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
72 {
73 	struct trace_array *tr = irqsoff_trace;
74 	struct trace_array_cpu *data;
75 	unsigned long flags;
76 	long disabled;
77 	int cpu;
78 
79 	/*
80 	 * Does not matter if we preempt. We test the flags
81 	 * afterward, to see if irqs are disabled or not.
82 	 * If we preempt and get a false positive, the flags
83 	 * test will fail.
84 	 */
85 	cpu = raw_smp_processor_id();
86 	if (likely(!per_cpu(tracing_cpu, cpu)))
87 		return;
88 
89 	local_save_flags(flags);
90 	/* slight chance to get a false positive on tracing_cpu */
91 	if (!irqs_disabled_flags(flags))
92 		return;
93 
94 	data = tr->data[cpu];
95 	disabled = atomic_inc_return(&data->disabled);
96 
97 	if (likely(disabled == 1))
98 		trace_function(tr, data, ip, parent_ip, flags);
99 
100 	atomic_dec(&data->disabled);
101 }
102 
103 static struct ftrace_ops trace_ops __read_mostly =
104 {
105 	.func = irqsoff_tracer_call,
106 };
107 #endif /* CONFIG_FTRACE */
108 
109 /*
110  * Should this new latency be reported/recorded?
111  */
112 static int report_latency(cycle_t delta)
113 {
114 	if (tracing_thresh) {
115 		if (delta < tracing_thresh)
116 			return 0;
117 	} else {
118 		if (delta <= tracing_max_latency)
119 			return 0;
120 	}
121 	return 1;
122 }
123 
124 static void
125 check_critical_timing(struct trace_array *tr,
126 		      struct trace_array_cpu *data,
127 		      unsigned long parent_ip,
128 		      int cpu)
129 {
130 	unsigned long latency, t0, t1;
131 	cycle_t T0, T1, delta;
132 	unsigned long flags;
133 
134 	/*
135 	 * usecs conversion is slow so we try to delay the conversion
136 	 * as long as possible:
137 	 */
138 	T0 = data->preempt_timestamp;
139 	T1 = ftrace_now(cpu);
140 	delta = T1-T0;
141 
142 	local_save_flags(flags);
143 
144 	if (!report_latency(delta))
145 		goto out;
146 
147 	spin_lock_irqsave(&max_trace_lock, flags);
148 
149 	/* check if we are still the max latency */
150 	if (!report_latency(delta))
151 		goto out_unlock;
152 
153 	trace_function(tr, data, CALLER_ADDR0, parent_ip, flags);
154 
155 	latency = nsecs_to_usecs(delta);
156 
157 	if (data->critical_sequence != max_sequence)
158 		goto out_unlock;
159 
160 	tracing_max_latency = delta;
161 	t0 = nsecs_to_usecs(T0);
162 	t1 = nsecs_to_usecs(T1);
163 
164 	data->critical_end = parent_ip;
165 
166 	update_max_tr_single(tr, current, cpu);
167 
168 	max_sequence++;
169 
170 out_unlock:
171 	spin_unlock_irqrestore(&max_trace_lock, flags);
172 
173 out:
174 	data->critical_sequence = max_sequence;
175 	data->preempt_timestamp = ftrace_now(cpu);
176 	tracing_reset(data);
177 	trace_function(tr, data, CALLER_ADDR0, parent_ip, flags);
178 }
179 
180 static inline void
181 start_critical_timing(unsigned long ip, unsigned long parent_ip)
182 {
183 	int cpu;
184 	struct trace_array *tr = irqsoff_trace;
185 	struct trace_array_cpu *data;
186 	unsigned long flags;
187 
188 	if (likely(!tracer_enabled))
189 		return;
190 
191 	cpu = raw_smp_processor_id();
192 
193 	if (per_cpu(tracing_cpu, cpu))
194 		return;
195 
196 	data = tr->data[cpu];
197 
198 	if (unlikely(!data) || atomic_read(&data->disabled))
199 		return;
200 
201 	atomic_inc(&data->disabled);
202 
203 	data->critical_sequence = max_sequence;
204 	data->preempt_timestamp = ftrace_now(cpu);
205 	data->critical_start = parent_ip ? : ip;
206 	tracing_reset(data);
207 
208 	local_save_flags(flags);
209 
210 	trace_function(tr, data, ip, parent_ip, flags);
211 
212 	per_cpu(tracing_cpu, cpu) = 1;
213 
214 	atomic_dec(&data->disabled);
215 }
216 
217 static inline void
218 stop_critical_timing(unsigned long ip, unsigned long parent_ip)
219 {
220 	int cpu;
221 	struct trace_array *tr = irqsoff_trace;
222 	struct trace_array_cpu *data;
223 	unsigned long flags;
224 
225 	cpu = raw_smp_processor_id();
226 	/* Always clear the tracing cpu on stopping the trace */
227 	if (unlikely(per_cpu(tracing_cpu, cpu)))
228 		per_cpu(tracing_cpu, cpu) = 0;
229 	else
230 		return;
231 
232 	if (!tracer_enabled)
233 		return;
234 
235 	data = tr->data[cpu];
236 
237 	if (unlikely(!data) || unlikely(!head_page(data)) ||
238 	    !data->critical_start || atomic_read(&data->disabled))
239 		return;
240 
241 	atomic_inc(&data->disabled);
242 
243 	local_save_flags(flags);
244 	trace_function(tr, data, ip, parent_ip, flags);
245 	check_critical_timing(tr, data, parent_ip ? : ip, cpu);
246 	data->critical_start = 0;
247 	atomic_dec(&data->disabled);
248 }
249 
250 /* start and stop critical timings used to for stoppage (in idle) */
251 void start_critical_timings(void)
252 {
253 	if (preempt_trace() || irq_trace())
254 		start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
255 }
256 EXPORT_SYMBOL_GPL(start_critical_timings);
257 
258 void stop_critical_timings(void)
259 {
260 	if (preempt_trace() || irq_trace())
261 		stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
262 }
263 EXPORT_SYMBOL_GPL(stop_critical_timings);
264 
265 #ifdef CONFIG_IRQSOFF_TRACER
266 #ifdef CONFIG_PROVE_LOCKING
267 void time_hardirqs_on(unsigned long a0, unsigned long a1)
268 {
269 	if (!preempt_trace() && irq_trace())
270 		stop_critical_timing(a0, a1);
271 }
272 
273 void time_hardirqs_off(unsigned long a0, unsigned long a1)
274 {
275 	if (!preempt_trace() && irq_trace())
276 		start_critical_timing(a0, a1);
277 }
278 
279 #else /* !CONFIG_PROVE_LOCKING */
280 
281 /*
282  * Stubs:
283  */
284 
285 void early_boot_irqs_off(void)
286 {
287 }
288 
289 void early_boot_irqs_on(void)
290 {
291 }
292 
293 void trace_softirqs_on(unsigned long ip)
294 {
295 }
296 
297 void trace_softirqs_off(unsigned long ip)
298 {
299 }
300 
301 inline void print_irqtrace_events(struct task_struct *curr)
302 {
303 }
304 
305 /*
306  * We are only interested in hardirq on/off events:
307  */
308 void trace_hardirqs_on(void)
309 {
310 	if (!preempt_trace() && irq_trace())
311 		stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
312 }
313 EXPORT_SYMBOL(trace_hardirqs_on);
314 
315 void trace_hardirqs_off(void)
316 {
317 	if (!preempt_trace() && irq_trace())
318 		start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
319 }
320 EXPORT_SYMBOL(trace_hardirqs_off);
321 
322 void trace_hardirqs_on_caller(unsigned long caller_addr)
323 {
324 	if (!preempt_trace() && irq_trace())
325 		stop_critical_timing(CALLER_ADDR0, caller_addr);
326 }
327 EXPORT_SYMBOL(trace_hardirqs_on_caller);
328 
329 void trace_hardirqs_off_caller(unsigned long caller_addr)
330 {
331 	if (!preempt_trace() && irq_trace())
332 		start_critical_timing(CALLER_ADDR0, caller_addr);
333 }
334 EXPORT_SYMBOL(trace_hardirqs_off_caller);
335 
336 #endif /* CONFIG_PROVE_LOCKING */
337 #endif /*  CONFIG_IRQSOFF_TRACER */
338 
339 #ifdef CONFIG_PREEMPT_TRACER
340 void trace_preempt_on(unsigned long a0, unsigned long a1)
341 {
342 	if (preempt_trace())
343 		stop_critical_timing(a0, a1);
344 }
345 
346 void trace_preempt_off(unsigned long a0, unsigned long a1)
347 {
348 	if (preempt_trace())
349 		start_critical_timing(a0, a1);
350 }
351 #endif /* CONFIG_PREEMPT_TRACER */
352 
353 static void start_irqsoff_tracer(struct trace_array *tr)
354 {
355 	register_ftrace_function(&trace_ops);
356 	tracer_enabled = 1;
357 }
358 
359 static void stop_irqsoff_tracer(struct trace_array *tr)
360 {
361 	tracer_enabled = 0;
362 	unregister_ftrace_function(&trace_ops);
363 }
364 
365 static void __irqsoff_tracer_init(struct trace_array *tr)
366 {
367 	irqsoff_trace = tr;
368 	/* make sure that the tracer is visible */
369 	smp_wmb();
370 
371 	if (tr->ctrl)
372 		start_irqsoff_tracer(tr);
373 }
374 
375 static void irqsoff_tracer_reset(struct trace_array *tr)
376 {
377 	if (tr->ctrl)
378 		stop_irqsoff_tracer(tr);
379 }
380 
381 static void irqsoff_tracer_ctrl_update(struct trace_array *tr)
382 {
383 	if (tr->ctrl)
384 		start_irqsoff_tracer(tr);
385 	else
386 		stop_irqsoff_tracer(tr);
387 }
388 
389 static void irqsoff_tracer_open(struct trace_iterator *iter)
390 {
391 	/* stop the trace while dumping */
392 	if (iter->tr->ctrl)
393 		stop_irqsoff_tracer(iter->tr);
394 }
395 
396 static void irqsoff_tracer_close(struct trace_iterator *iter)
397 {
398 	if (iter->tr->ctrl)
399 		start_irqsoff_tracer(iter->tr);
400 }
401 
402 #ifdef CONFIG_IRQSOFF_TRACER
403 static void irqsoff_tracer_init(struct trace_array *tr)
404 {
405 	trace_type = TRACER_IRQS_OFF;
406 
407 	__irqsoff_tracer_init(tr);
408 }
409 static struct tracer irqsoff_tracer __read_mostly =
410 {
411 	.name		= "irqsoff",
412 	.init		= irqsoff_tracer_init,
413 	.reset		= irqsoff_tracer_reset,
414 	.open		= irqsoff_tracer_open,
415 	.close		= irqsoff_tracer_close,
416 	.ctrl_update	= irqsoff_tracer_ctrl_update,
417 	.print_max	= 1,
418 #ifdef CONFIG_FTRACE_SELFTEST
419 	.selftest    = trace_selftest_startup_irqsoff,
420 #endif
421 };
422 # define register_irqsoff(trace) register_tracer(&trace)
423 #else
424 # define register_irqsoff(trace) do { } while (0)
425 #endif
426 
427 #ifdef CONFIG_PREEMPT_TRACER
428 static void preemptoff_tracer_init(struct trace_array *tr)
429 {
430 	trace_type = TRACER_PREEMPT_OFF;
431 
432 	__irqsoff_tracer_init(tr);
433 }
434 
435 static struct tracer preemptoff_tracer __read_mostly =
436 {
437 	.name		= "preemptoff",
438 	.init		= preemptoff_tracer_init,
439 	.reset		= irqsoff_tracer_reset,
440 	.open		= irqsoff_tracer_open,
441 	.close		= irqsoff_tracer_close,
442 	.ctrl_update	= irqsoff_tracer_ctrl_update,
443 	.print_max	= 1,
444 #ifdef CONFIG_FTRACE_SELFTEST
445 	.selftest    = trace_selftest_startup_preemptoff,
446 #endif
447 };
448 # define register_preemptoff(trace) register_tracer(&trace)
449 #else
450 # define register_preemptoff(trace) do { } while (0)
451 #endif
452 
453 #if defined(CONFIG_IRQSOFF_TRACER) && \
454 	defined(CONFIG_PREEMPT_TRACER)
455 
456 static void preemptirqsoff_tracer_init(struct trace_array *tr)
457 {
458 	trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
459 
460 	__irqsoff_tracer_init(tr);
461 }
462 
463 static struct tracer preemptirqsoff_tracer __read_mostly =
464 {
465 	.name		= "preemptirqsoff",
466 	.init		= preemptirqsoff_tracer_init,
467 	.reset		= irqsoff_tracer_reset,
468 	.open		= irqsoff_tracer_open,
469 	.close		= irqsoff_tracer_close,
470 	.ctrl_update	= irqsoff_tracer_ctrl_update,
471 	.print_max	= 1,
472 #ifdef CONFIG_FTRACE_SELFTEST
473 	.selftest    = trace_selftest_startup_preemptirqsoff,
474 #endif
475 };
476 
477 # define register_preemptirqsoff(trace) register_tracer(&trace)
478 #else
479 # define register_preemptirqsoff(trace) do { } while (0)
480 #endif
481 
482 __init static int init_irqsoff_tracer(void)
483 {
484 	register_irqsoff(irqsoff_tracer);
485 	register_preemptoff(preemptoff_tracer);
486 	register_preemptirqsoff(preemptirqsoff_tracer);
487 
488 	return 0;
489 }
490 device_initcall(init_irqsoff_tracer);
491