xref: /openbmc/linux/kernel/trace/trace_irqsoff.c (revision a8fe58ce)
1 /*
2  * trace irqs off critical timings
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * From code in the latency_tracer, that is:
8  *
9  *  Copyright (C) 2004-2006 Ingo Molnar
10  *  Copyright (C) 2004 Nadia Yvette Chambers
11  */
12 #include <linux/kallsyms.h>
13 #include <linux/uaccess.h>
14 #include <linux/module.h>
15 #include <linux/ftrace.h>
16 
17 #include "trace.h"
18 
19 static struct trace_array		*irqsoff_trace __read_mostly;
20 static int				tracer_enabled __read_mostly;
21 
22 static DEFINE_PER_CPU(int, tracing_cpu);
23 
24 static DEFINE_RAW_SPINLOCK(max_trace_lock);
25 
26 enum {
27 	TRACER_IRQS_OFF		= (1 << 1),
28 	TRACER_PREEMPT_OFF	= (1 << 2),
29 };
30 
31 static int trace_type __read_mostly;
32 
33 static int save_flags;
34 
35 static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
36 static int start_irqsoff_tracer(struct trace_array *tr, int graph);
37 
38 #ifdef CONFIG_PREEMPT_TRACER
39 static inline int
40 preempt_trace(void)
41 {
42 	return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count());
43 }
44 #else
45 # define preempt_trace() (0)
46 #endif
47 
48 #ifdef CONFIG_IRQSOFF_TRACER
49 static inline int
50 irq_trace(void)
51 {
52 	return ((trace_type & TRACER_IRQS_OFF) &&
53 		irqs_disabled());
54 }
55 #else
56 # define irq_trace() (0)
57 #endif
58 
59 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
60 static int irqsoff_display_graph(struct trace_array *tr, int set);
61 # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
62 #else
63 static inline int irqsoff_display_graph(struct trace_array *tr, int set)
64 {
65 	return -EINVAL;
66 }
67 # define is_graph(tr) false
68 #endif
69 
70 /*
71  * Sequence count - we record it when starting a measurement and
72  * skip the latency if the sequence has changed - some other section
73  * did a maximum and could disturb our measurement with serial console
74  * printouts, etc. Truly coinciding maximum latencies should be rare
75  * and what happens together happens separately as well, so this doesn't
76  * decrease the validity of the maximum found:
77  */
78 static __cacheline_aligned_in_smp	unsigned long max_sequence;
79 
80 #ifdef CONFIG_FUNCTION_TRACER
81 /*
82  * Prologue for the preempt and irqs off function tracers.
83  *
84  * Returns 1 if it is OK to continue, and data->disabled is
85  *            incremented.
86  *         0 if the trace is to be ignored, and data->disabled
87  *            is kept the same.
88  *
89  * Note, this function is also used outside this ifdef but
90  *  inside the #ifdef of the function graph tracer below.
91  *  This is OK, since the function graph tracer is
92  *  dependent on the function tracer.
93  */
94 static int func_prolog_dec(struct trace_array *tr,
95 			   struct trace_array_cpu **data,
96 			   unsigned long *flags)
97 {
98 	long disabled;
99 	int cpu;
100 
101 	/*
102 	 * Does not matter if we preempt. We test the flags
103 	 * afterward, to see if irqs are disabled or not.
104 	 * If we preempt and get a false positive, the flags
105 	 * test will fail.
106 	 */
107 	cpu = raw_smp_processor_id();
108 	if (likely(!per_cpu(tracing_cpu, cpu)))
109 		return 0;
110 
111 	local_save_flags(*flags);
112 	/* slight chance to get a false positive on tracing_cpu */
113 	if (!irqs_disabled_flags(*flags))
114 		return 0;
115 
116 	*data = per_cpu_ptr(tr->trace_buffer.data, cpu);
117 	disabled = atomic_inc_return(&(*data)->disabled);
118 
119 	if (likely(disabled == 1))
120 		return 1;
121 
122 	atomic_dec(&(*data)->disabled);
123 
124 	return 0;
125 }
126 
127 /*
128  * irqsoff uses its own tracer function to keep the overhead down:
129  */
130 static void
131 irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
132 		    struct ftrace_ops *op, struct pt_regs *pt_regs)
133 {
134 	struct trace_array *tr = irqsoff_trace;
135 	struct trace_array_cpu *data;
136 	unsigned long flags;
137 
138 	if (!func_prolog_dec(tr, &data, &flags))
139 		return;
140 
141 	trace_function(tr, ip, parent_ip, flags, preempt_count());
142 
143 	atomic_dec(&data->disabled);
144 }
145 #endif /* CONFIG_FUNCTION_TRACER */
146 
147 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
148 static int irqsoff_display_graph(struct trace_array *tr, int set)
149 {
150 	int cpu;
151 
152 	if (!(is_graph(tr) ^ set))
153 		return 0;
154 
155 	stop_irqsoff_tracer(irqsoff_trace, !set);
156 
157 	for_each_possible_cpu(cpu)
158 		per_cpu(tracing_cpu, cpu) = 0;
159 
160 	tr->max_latency = 0;
161 	tracing_reset_online_cpus(&irqsoff_trace->trace_buffer);
162 
163 	return start_irqsoff_tracer(irqsoff_trace, set);
164 }
165 
166 static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
167 {
168 	struct trace_array *tr = irqsoff_trace;
169 	struct trace_array_cpu *data;
170 	unsigned long flags;
171 	int ret;
172 	int pc;
173 
174 	if (!func_prolog_dec(tr, &data, &flags))
175 		return 0;
176 
177 	pc = preempt_count();
178 	ret = __trace_graph_entry(tr, trace, flags, pc);
179 	atomic_dec(&data->disabled);
180 
181 	return ret;
182 }
183 
184 static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
185 {
186 	struct trace_array *tr = irqsoff_trace;
187 	struct trace_array_cpu *data;
188 	unsigned long flags;
189 	int pc;
190 
191 	if (!func_prolog_dec(tr, &data, &flags))
192 		return;
193 
194 	pc = preempt_count();
195 	__trace_graph_return(tr, trace, flags, pc);
196 	atomic_dec(&data->disabled);
197 }
198 
199 static void irqsoff_trace_open(struct trace_iterator *iter)
200 {
201 	if (is_graph(iter->tr))
202 		graph_trace_open(iter);
203 
204 }
205 
206 static void irqsoff_trace_close(struct trace_iterator *iter)
207 {
208 	if (iter->private)
209 		graph_trace_close(iter);
210 }
211 
212 #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
213 			    TRACE_GRAPH_PRINT_PROC | \
214 			    TRACE_GRAPH_PRINT_ABS_TIME | \
215 			    TRACE_GRAPH_PRINT_DURATION)
216 
217 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
218 {
219 	/*
220 	 * In graph mode call the graph tracer output function,
221 	 * otherwise go with the TRACE_FN event handler
222 	 */
223 	if (is_graph(iter->tr))
224 		return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
225 
226 	return TRACE_TYPE_UNHANDLED;
227 }
228 
229 static void irqsoff_print_header(struct seq_file *s)
230 {
231 	struct trace_array *tr = irqsoff_trace;
232 
233 	if (is_graph(tr))
234 		print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
235 	else
236 		trace_default_header(s);
237 }
238 
239 static void
240 __trace_function(struct trace_array *tr,
241 		 unsigned long ip, unsigned long parent_ip,
242 		 unsigned long flags, int pc)
243 {
244 	if (is_graph(tr))
245 		trace_graph_function(tr, ip, parent_ip, flags, pc);
246 	else
247 		trace_function(tr, ip, parent_ip, flags, pc);
248 }
249 
250 #else
251 #define __trace_function trace_function
252 
253 #ifdef CONFIG_FUNCTION_TRACER
254 static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
255 {
256 	return -1;
257 }
258 #endif
259 
260 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
261 {
262 	return TRACE_TYPE_UNHANDLED;
263 }
264 
265 static void irqsoff_trace_open(struct trace_iterator *iter) { }
266 static void irqsoff_trace_close(struct trace_iterator *iter) { }
267 
268 #ifdef CONFIG_FUNCTION_TRACER
269 static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { }
270 static void irqsoff_print_header(struct seq_file *s)
271 {
272 	trace_default_header(s);
273 }
274 #else
275 static void irqsoff_print_header(struct seq_file *s)
276 {
277 	trace_latency_header(s);
278 }
279 #endif /* CONFIG_FUNCTION_TRACER */
280 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
281 
282 /*
283  * Should this new latency be reported/recorded?
284  */
285 static bool report_latency(struct trace_array *tr, cycle_t delta)
286 {
287 	if (tracing_thresh) {
288 		if (delta < tracing_thresh)
289 			return false;
290 	} else {
291 		if (delta <= tr->max_latency)
292 			return false;
293 	}
294 	return true;
295 }
296 
297 static void
298 check_critical_timing(struct trace_array *tr,
299 		      struct trace_array_cpu *data,
300 		      unsigned long parent_ip,
301 		      int cpu)
302 {
303 	cycle_t T0, T1, delta;
304 	unsigned long flags;
305 	int pc;
306 
307 	T0 = data->preempt_timestamp;
308 	T1 = ftrace_now(cpu);
309 	delta = T1-T0;
310 
311 	local_save_flags(flags);
312 
313 	pc = preempt_count();
314 
315 	if (!report_latency(tr, delta))
316 		goto out;
317 
318 	raw_spin_lock_irqsave(&max_trace_lock, flags);
319 
320 	/* check if we are still the max latency */
321 	if (!report_latency(tr, delta))
322 		goto out_unlock;
323 
324 	__trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
325 	/* Skip 5 functions to get to the irq/preempt enable function */
326 	__trace_stack(tr, flags, 5, pc);
327 
328 	if (data->critical_sequence != max_sequence)
329 		goto out_unlock;
330 
331 	data->critical_end = parent_ip;
332 
333 	if (likely(!is_tracing_stopped())) {
334 		tr->max_latency = delta;
335 		update_max_tr_single(tr, current, cpu);
336 	}
337 
338 	max_sequence++;
339 
340 out_unlock:
341 	raw_spin_unlock_irqrestore(&max_trace_lock, flags);
342 
343 out:
344 	data->critical_sequence = max_sequence;
345 	data->preempt_timestamp = ftrace_now(cpu);
346 	__trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
347 }
348 
349 static inline void
350 start_critical_timing(unsigned long ip, unsigned long parent_ip)
351 {
352 	int cpu;
353 	struct trace_array *tr = irqsoff_trace;
354 	struct trace_array_cpu *data;
355 	unsigned long flags;
356 
357 	if (!tracer_enabled || !tracing_is_enabled())
358 		return;
359 
360 	cpu = raw_smp_processor_id();
361 
362 	if (per_cpu(tracing_cpu, cpu))
363 		return;
364 
365 	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
366 
367 	if (unlikely(!data) || atomic_read(&data->disabled))
368 		return;
369 
370 	atomic_inc(&data->disabled);
371 
372 	data->critical_sequence = max_sequence;
373 	data->preempt_timestamp = ftrace_now(cpu);
374 	data->critical_start = parent_ip ? : ip;
375 
376 	local_save_flags(flags);
377 
378 	__trace_function(tr, ip, parent_ip, flags, preempt_count());
379 
380 	per_cpu(tracing_cpu, cpu) = 1;
381 
382 	atomic_dec(&data->disabled);
383 }
384 
385 static inline void
386 stop_critical_timing(unsigned long ip, unsigned long parent_ip)
387 {
388 	int cpu;
389 	struct trace_array *tr = irqsoff_trace;
390 	struct trace_array_cpu *data;
391 	unsigned long flags;
392 
393 	cpu = raw_smp_processor_id();
394 	/* Always clear the tracing cpu on stopping the trace */
395 	if (unlikely(per_cpu(tracing_cpu, cpu)))
396 		per_cpu(tracing_cpu, cpu) = 0;
397 	else
398 		return;
399 
400 	if (!tracer_enabled || !tracing_is_enabled())
401 		return;
402 
403 	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
404 
405 	if (unlikely(!data) ||
406 	    !data->critical_start || atomic_read(&data->disabled))
407 		return;
408 
409 	atomic_inc(&data->disabled);
410 
411 	local_save_flags(flags);
412 	__trace_function(tr, ip, parent_ip, flags, preempt_count());
413 	check_critical_timing(tr, data, parent_ip ? : ip, cpu);
414 	data->critical_start = 0;
415 	atomic_dec(&data->disabled);
416 }
417 
418 /* start and stop critical timings used to for stoppage (in idle) */
419 void start_critical_timings(void)
420 {
421 	if (preempt_trace() || irq_trace())
422 		start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
423 }
424 EXPORT_SYMBOL_GPL(start_critical_timings);
425 
426 void stop_critical_timings(void)
427 {
428 	if (preempt_trace() || irq_trace())
429 		stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
430 }
431 EXPORT_SYMBOL_GPL(stop_critical_timings);
432 
433 #ifdef CONFIG_IRQSOFF_TRACER
434 #ifdef CONFIG_PROVE_LOCKING
435 void time_hardirqs_on(unsigned long a0, unsigned long a1)
436 {
437 	if (!preempt_trace() && irq_trace())
438 		stop_critical_timing(a0, a1);
439 }
440 
441 void time_hardirqs_off(unsigned long a0, unsigned long a1)
442 {
443 	if (!preempt_trace() && irq_trace())
444 		start_critical_timing(a0, a1);
445 }
446 
447 #else /* !CONFIG_PROVE_LOCKING */
448 
449 /*
450  * Stubs:
451  */
452 
453 void trace_softirqs_on(unsigned long ip)
454 {
455 }
456 
457 void trace_softirqs_off(unsigned long ip)
458 {
459 }
460 
461 inline void print_irqtrace_events(struct task_struct *curr)
462 {
463 }
464 
465 /*
466  * We are only interested in hardirq on/off events:
467  */
468 void trace_hardirqs_on(void)
469 {
470 	if (!preempt_trace() && irq_trace())
471 		stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
472 }
473 EXPORT_SYMBOL(trace_hardirqs_on);
474 
475 void trace_hardirqs_off(void)
476 {
477 	if (!preempt_trace() && irq_trace())
478 		start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
479 }
480 EXPORT_SYMBOL(trace_hardirqs_off);
481 
482 __visible void trace_hardirqs_on_caller(unsigned long caller_addr)
483 {
484 	if (!preempt_trace() && irq_trace())
485 		stop_critical_timing(CALLER_ADDR0, caller_addr);
486 }
487 EXPORT_SYMBOL(trace_hardirqs_on_caller);
488 
489 __visible void trace_hardirqs_off_caller(unsigned long caller_addr)
490 {
491 	if (!preempt_trace() && irq_trace())
492 		start_critical_timing(CALLER_ADDR0, caller_addr);
493 }
494 EXPORT_SYMBOL(trace_hardirqs_off_caller);
495 
496 #endif /* CONFIG_PROVE_LOCKING */
497 #endif /*  CONFIG_IRQSOFF_TRACER */
498 
499 #ifdef CONFIG_PREEMPT_TRACER
500 void trace_preempt_on(unsigned long a0, unsigned long a1)
501 {
502 	if (preempt_trace() && !irq_trace())
503 		stop_critical_timing(a0, a1);
504 }
505 
506 void trace_preempt_off(unsigned long a0, unsigned long a1)
507 {
508 	if (preempt_trace() && !irq_trace())
509 		start_critical_timing(a0, a1);
510 }
511 #endif /* CONFIG_PREEMPT_TRACER */
512 
513 #ifdef CONFIG_FUNCTION_TRACER
514 static bool function_enabled;
515 
516 static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
517 {
518 	int ret;
519 
520 	/* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
521 	if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
522 		return 0;
523 
524 	if (graph)
525 		ret = register_ftrace_graph(&irqsoff_graph_return,
526 					    &irqsoff_graph_entry);
527 	else
528 		ret = register_ftrace_function(tr->ops);
529 
530 	if (!ret)
531 		function_enabled = true;
532 
533 	return ret;
534 }
535 
536 static void unregister_irqsoff_function(struct trace_array *tr, int graph)
537 {
538 	if (!function_enabled)
539 		return;
540 
541 	if (graph)
542 		unregister_ftrace_graph();
543 	else
544 		unregister_ftrace_function(tr->ops);
545 
546 	function_enabled = false;
547 }
548 
549 static int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
550 {
551 	if (!(mask & TRACE_ITER_FUNCTION))
552 		return 0;
553 
554 	if (set)
555 		register_irqsoff_function(tr, is_graph(tr), 1);
556 	else
557 		unregister_irqsoff_function(tr, is_graph(tr));
558 	return 1;
559 }
560 #else
561 static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
562 {
563 	return 0;
564 }
565 static void unregister_irqsoff_function(struct trace_array *tr, int graph) { }
566 static inline int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
567 {
568 	return 0;
569 }
570 #endif /* CONFIG_FUNCTION_TRACER */
571 
572 static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
573 {
574 	struct tracer *tracer = tr->current_trace;
575 
576 	if (irqsoff_function_set(tr, mask, set))
577 		return 0;
578 
579 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
580 	if (mask & TRACE_ITER_DISPLAY_GRAPH)
581 		return irqsoff_display_graph(tr, set);
582 #endif
583 
584 	return trace_keep_overwrite(tracer, mask, set);
585 }
586 
587 static int start_irqsoff_tracer(struct trace_array *tr, int graph)
588 {
589 	int ret;
590 
591 	ret = register_irqsoff_function(tr, graph, 0);
592 
593 	if (!ret && tracing_is_enabled())
594 		tracer_enabled = 1;
595 	else
596 		tracer_enabled = 0;
597 
598 	return ret;
599 }
600 
601 static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
602 {
603 	tracer_enabled = 0;
604 
605 	unregister_irqsoff_function(tr, graph);
606 }
607 
608 static bool irqsoff_busy;
609 
610 static int __irqsoff_tracer_init(struct trace_array *tr)
611 {
612 	if (irqsoff_busy)
613 		return -EBUSY;
614 
615 	save_flags = tr->trace_flags;
616 
617 	/* non overwrite screws up the latency tracers */
618 	set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
619 	set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
620 
621 	tr->max_latency = 0;
622 	irqsoff_trace = tr;
623 	/* make sure that the tracer is visible */
624 	smp_wmb();
625 	tracing_reset_online_cpus(&tr->trace_buffer);
626 
627 	ftrace_init_array_ops(tr, irqsoff_tracer_call);
628 
629 	/* Only toplevel instance supports graph tracing */
630 	if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
631 				      is_graph(tr))))
632 		printk(KERN_ERR "failed to start irqsoff tracer\n");
633 
634 	irqsoff_busy = true;
635 	return 0;
636 }
637 
638 static void irqsoff_tracer_reset(struct trace_array *tr)
639 {
640 	int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
641 	int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
642 
643 	stop_irqsoff_tracer(tr, is_graph(tr));
644 
645 	set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
646 	set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
647 	ftrace_reset_array_ops(tr);
648 
649 	irqsoff_busy = false;
650 }
651 
652 static void irqsoff_tracer_start(struct trace_array *tr)
653 {
654 	tracer_enabled = 1;
655 }
656 
657 static void irqsoff_tracer_stop(struct trace_array *tr)
658 {
659 	tracer_enabled = 0;
660 }
661 
662 #ifdef CONFIG_IRQSOFF_TRACER
663 static int irqsoff_tracer_init(struct trace_array *tr)
664 {
665 	trace_type = TRACER_IRQS_OFF;
666 
667 	return __irqsoff_tracer_init(tr);
668 }
669 static struct tracer irqsoff_tracer __read_mostly =
670 {
671 	.name		= "irqsoff",
672 	.init		= irqsoff_tracer_init,
673 	.reset		= irqsoff_tracer_reset,
674 	.start		= irqsoff_tracer_start,
675 	.stop		= irqsoff_tracer_stop,
676 	.print_max	= true,
677 	.print_header   = irqsoff_print_header,
678 	.print_line     = irqsoff_print_line,
679 	.flag_changed	= irqsoff_flag_changed,
680 #ifdef CONFIG_FTRACE_SELFTEST
681 	.selftest    = trace_selftest_startup_irqsoff,
682 #endif
683 	.open           = irqsoff_trace_open,
684 	.close          = irqsoff_trace_close,
685 	.allow_instances = true,
686 	.use_max_tr	= true,
687 };
688 # define register_irqsoff(trace) register_tracer(&trace)
689 #else
690 # define register_irqsoff(trace) do { } while (0)
691 #endif
692 
693 #ifdef CONFIG_PREEMPT_TRACER
694 static int preemptoff_tracer_init(struct trace_array *tr)
695 {
696 	trace_type = TRACER_PREEMPT_OFF;
697 
698 	return __irqsoff_tracer_init(tr);
699 }
700 
701 static struct tracer preemptoff_tracer __read_mostly =
702 {
703 	.name		= "preemptoff",
704 	.init		= preemptoff_tracer_init,
705 	.reset		= irqsoff_tracer_reset,
706 	.start		= irqsoff_tracer_start,
707 	.stop		= irqsoff_tracer_stop,
708 	.print_max	= true,
709 	.print_header   = irqsoff_print_header,
710 	.print_line     = irqsoff_print_line,
711 	.flag_changed	= irqsoff_flag_changed,
712 #ifdef CONFIG_FTRACE_SELFTEST
713 	.selftest    = trace_selftest_startup_preemptoff,
714 #endif
715 	.open		= irqsoff_trace_open,
716 	.close		= irqsoff_trace_close,
717 	.allow_instances = true,
718 	.use_max_tr	= true,
719 };
720 # define register_preemptoff(trace) register_tracer(&trace)
721 #else
722 # define register_preemptoff(trace) do { } while (0)
723 #endif
724 
725 #if defined(CONFIG_IRQSOFF_TRACER) && \
726 	defined(CONFIG_PREEMPT_TRACER)
727 
728 static int preemptirqsoff_tracer_init(struct trace_array *tr)
729 {
730 	trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
731 
732 	return __irqsoff_tracer_init(tr);
733 }
734 
735 static struct tracer preemptirqsoff_tracer __read_mostly =
736 {
737 	.name		= "preemptirqsoff",
738 	.init		= preemptirqsoff_tracer_init,
739 	.reset		= irqsoff_tracer_reset,
740 	.start		= irqsoff_tracer_start,
741 	.stop		= irqsoff_tracer_stop,
742 	.print_max	= true,
743 	.print_header   = irqsoff_print_header,
744 	.print_line     = irqsoff_print_line,
745 	.flag_changed	= irqsoff_flag_changed,
746 #ifdef CONFIG_FTRACE_SELFTEST
747 	.selftest    = trace_selftest_startup_preemptirqsoff,
748 #endif
749 	.open		= irqsoff_trace_open,
750 	.close		= irqsoff_trace_close,
751 	.allow_instances = true,
752 	.use_max_tr	= true,
753 };
754 
755 # define register_preemptirqsoff(trace) register_tracer(&trace)
756 #else
757 # define register_preemptirqsoff(trace) do { } while (0)
758 #endif
759 
760 __init static int init_irqsoff_tracer(void)
761 {
762 	register_irqsoff(irqsoff_tracer);
763 	register_preemptoff(preemptoff_tracer);
764 	register_preemptirqsoff(preemptirqsoff_tracer);
765 
766 	return 0;
767 }
768 core_initcall(init_irqsoff_tracer);
769