1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * preemptoff and irqoff tracepoints
4  *
5  * Copyright (C) Joel Fernandes (Google) <joel@joelfernandes.org>
6  */
7 
8 #include <linux/kallsyms.h>
9 #include <linux/uaccess.h>
10 #include <linux/module.h>
11 #include <linux/ftrace.h>
12 #include <linux/kprobes.h>
13 #include "trace.h"
14 
15 #define CREATE_TRACE_POINTS
16 #include <trace/events/preemptirq.h>
17 
18 /*
19  * Use regular trace points on architectures that implement noinstr
20  * tooling: these calls will only happen with RCU enabled, which can
21  * use a regular tracepoint.
22  *
23  * On older architectures, use the rcuidle tracing methods (which
24  * aren't NMI-safe - so exclude NMI contexts):
25  */
26 #ifdef CONFIG_ARCH_WANTS_NO_INSTR
27 #define trace(point)	trace_##point
28 #else
29 #define trace(point)	if (!in_nmi()) trace_##point##_rcuidle
30 #endif
31 
32 #ifdef CONFIG_TRACE_IRQFLAGS
33 /* Per-cpu variable to prevent redundant calls when IRQs already off */
34 static DEFINE_PER_CPU(int, tracing_irq_cpu);
35 
36 /*
37  * Like trace_hardirqs_on() but without the lockdep invocation. This is
38  * used in the low level entry code where the ordering vs. RCU is important
39  * and lockdep uses a staged approach which splits the lockdep hardirq
40  * tracking into a RCU on and a RCU off section.
41  */
trace_hardirqs_on_prepare(void)42 void trace_hardirqs_on_prepare(void)
43 {
44 	if (this_cpu_read(tracing_irq_cpu)) {
45 		trace(irq_enable)(CALLER_ADDR0, CALLER_ADDR1);
46 		tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
47 		this_cpu_write(tracing_irq_cpu, 0);
48 	}
49 }
50 EXPORT_SYMBOL(trace_hardirqs_on_prepare);
51 NOKPROBE_SYMBOL(trace_hardirqs_on_prepare);
52 
trace_hardirqs_on(void)53 void trace_hardirqs_on(void)
54 {
55 	if (this_cpu_read(tracing_irq_cpu)) {
56 		trace(irq_enable)(CALLER_ADDR0, CALLER_ADDR1);
57 		tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
58 		this_cpu_write(tracing_irq_cpu, 0);
59 	}
60 
61 	lockdep_hardirqs_on_prepare();
62 	lockdep_hardirqs_on(CALLER_ADDR0);
63 }
64 EXPORT_SYMBOL(trace_hardirqs_on);
65 NOKPROBE_SYMBOL(trace_hardirqs_on);
66 
67 /*
68  * Like trace_hardirqs_off() but without the lockdep invocation. This is
69  * used in the low level entry code where the ordering vs. RCU is important
70  * and lockdep uses a staged approach which splits the lockdep hardirq
71  * tracking into a RCU on and a RCU off section.
72  */
trace_hardirqs_off_finish(void)73 void trace_hardirqs_off_finish(void)
74 {
75 	if (!this_cpu_read(tracing_irq_cpu)) {
76 		this_cpu_write(tracing_irq_cpu, 1);
77 		tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
78 		trace(irq_disable)(CALLER_ADDR0, CALLER_ADDR1);
79 	}
80 
81 }
82 EXPORT_SYMBOL(trace_hardirqs_off_finish);
83 NOKPROBE_SYMBOL(trace_hardirqs_off_finish);
84 
trace_hardirqs_off(void)85 void trace_hardirqs_off(void)
86 {
87 	lockdep_hardirqs_off(CALLER_ADDR0);
88 
89 	if (!this_cpu_read(tracing_irq_cpu)) {
90 		this_cpu_write(tracing_irq_cpu, 1);
91 		tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
92 		trace(irq_disable)(CALLER_ADDR0, CALLER_ADDR1);
93 	}
94 }
95 EXPORT_SYMBOL(trace_hardirqs_off);
96 NOKPROBE_SYMBOL(trace_hardirqs_off);
97 #endif /* CONFIG_TRACE_IRQFLAGS */
98 
99 #ifdef CONFIG_TRACE_PREEMPT_TOGGLE
100 
trace_preempt_on(unsigned long a0,unsigned long a1)101 void trace_preempt_on(unsigned long a0, unsigned long a1)
102 {
103 	trace(preempt_enable)(a0, a1);
104 	tracer_preempt_on(a0, a1);
105 }
106 
trace_preempt_off(unsigned long a0,unsigned long a1)107 void trace_preempt_off(unsigned long a0, unsigned long a1)
108 {
109 	trace(preempt_disable)(a0, a1);
110 	tracer_preempt_off(a0, a1);
111 }
112 #endif
113