1 /*
2  * trace context switch
3  *
4  * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
5  *
6  */
7 #include <linux/module.h>
8 #include <linux/fs.h>
9 #include <linux/debugfs.h>
10 #include <linux/kallsyms.h>
11 #include <linux/uaccess.h>
12 #include <linux/ftrace.h>
13 #include <trace/sched.h>
14 
15 #include "trace.h"
16 
17 static struct trace_array	*ctx_trace;
18 static int __read_mostly	tracer_enabled;
19 static atomic_t			sched_ref;
20 
21 static void
22 probe_sched_switch(struct rq *__rq, struct task_struct *prev,
23 			struct task_struct *next)
24 {
25 	struct trace_array_cpu *data;
26 	unsigned long flags;
27 	int cpu;
28 	int pc;
29 
30 	if (!atomic_read(&sched_ref))
31 		return;
32 
33 	tracing_record_cmdline(prev);
34 	tracing_record_cmdline(next);
35 
36 	if (!tracer_enabled)
37 		return;
38 
39 	pc = preempt_count();
40 	local_irq_save(flags);
41 	cpu = raw_smp_processor_id();
42 	data = ctx_trace->data[cpu];
43 
44 	if (likely(!atomic_read(&data->disabled)))
45 		tracing_sched_switch_trace(ctx_trace, data, prev, next, flags, pc);
46 
47 	local_irq_restore(flags);
48 }
49 
50 static void
51 probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee)
52 {
53 	struct trace_array_cpu *data;
54 	unsigned long flags;
55 	int cpu, pc;
56 
57 	if (!likely(tracer_enabled))
58 		return;
59 
60 	pc = preempt_count();
61 	tracing_record_cmdline(current);
62 
63 	local_irq_save(flags);
64 	cpu = raw_smp_processor_id();
65 	data = ctx_trace->data[cpu];
66 
67 	if (likely(!atomic_read(&data->disabled)))
68 		tracing_sched_wakeup_trace(ctx_trace, data, wakee, current,
69 					   flags, pc);
70 
71 	local_irq_restore(flags);
72 }
73 
74 static void sched_switch_reset(struct trace_array *tr)
75 {
76 	int cpu;
77 
78 	tr->time_start = ftrace_now(tr->cpu);
79 
80 	for_each_online_cpu(cpu)
81 		tracing_reset(tr, cpu);
82 }
83 
84 static int tracing_sched_register(void)
85 {
86 	int ret;
87 
88 	ret = register_trace_sched_wakeup(probe_sched_wakeup);
89 	if (ret) {
90 		pr_info("wakeup trace: Couldn't activate tracepoint"
91 			" probe to kernel_sched_wakeup\n");
92 		return ret;
93 	}
94 
95 	ret = register_trace_sched_wakeup_new(probe_sched_wakeup);
96 	if (ret) {
97 		pr_info("wakeup trace: Couldn't activate tracepoint"
98 			" probe to kernel_sched_wakeup_new\n");
99 		goto fail_deprobe;
100 	}
101 
102 	ret = register_trace_sched_switch(probe_sched_switch);
103 	if (ret) {
104 		pr_info("sched trace: Couldn't activate tracepoint"
105 			" probe to kernel_sched_schedule\n");
106 		goto fail_deprobe_wake_new;
107 	}
108 
109 	return ret;
110 fail_deprobe_wake_new:
111 	unregister_trace_sched_wakeup_new(probe_sched_wakeup);
112 fail_deprobe:
113 	unregister_trace_sched_wakeup(probe_sched_wakeup);
114 	return ret;
115 }
116 
117 static void tracing_sched_unregister(void)
118 {
119 	unregister_trace_sched_switch(probe_sched_switch);
120 	unregister_trace_sched_wakeup_new(probe_sched_wakeup);
121 	unregister_trace_sched_wakeup(probe_sched_wakeup);
122 }
123 
124 static void tracing_start_sched_switch(void)
125 {
126 	long ref;
127 
128 	ref = atomic_inc_return(&sched_ref);
129 	if (ref == 1)
130 		tracing_sched_register();
131 }
132 
133 static void tracing_stop_sched_switch(void)
134 {
135 	long ref;
136 
137 	ref = atomic_dec_and_test(&sched_ref);
138 	if (ref)
139 		tracing_sched_unregister();
140 }
141 
142 void tracing_start_cmdline_record(void)
143 {
144 	tracing_start_sched_switch();
145 }
146 
147 void tracing_stop_cmdline_record(void)
148 {
149 	tracing_stop_sched_switch();
150 }
151 
152 static void start_sched_trace(struct trace_array *tr)
153 {
154 	sched_switch_reset(tr);
155 	tracing_start_cmdline_record();
156 	tracer_enabled = 1;
157 }
158 
159 static void stop_sched_trace(struct trace_array *tr)
160 {
161 	tracer_enabled = 0;
162 	tracing_stop_cmdline_record();
163 }
164 
165 static void sched_switch_trace_init(struct trace_array *tr)
166 {
167 	ctx_trace = tr;
168 
169 	if (tr->ctrl)
170 		start_sched_trace(tr);
171 }
172 
173 static void sched_switch_trace_reset(struct trace_array *tr)
174 {
175 	if (tr->ctrl)
176 		stop_sched_trace(tr);
177 }
178 
179 static void sched_switch_trace_ctrl_update(struct trace_array *tr)
180 {
181 	/* When starting a new trace, reset the buffers */
182 	if (tr->ctrl)
183 		start_sched_trace(tr);
184 	else
185 		stop_sched_trace(tr);
186 }
187 
188 static struct tracer sched_switch_trace __read_mostly =
189 {
190 	.name		= "sched_switch",
191 	.init		= sched_switch_trace_init,
192 	.reset		= sched_switch_trace_reset,
193 	.ctrl_update	= sched_switch_trace_ctrl_update,
194 #ifdef CONFIG_FTRACE_SELFTEST
195 	.selftest    = trace_selftest_startup_sched_switch,
196 #endif
197 };
198 
199 __init static int init_sched_switch_trace(void)
200 {
201 	int ret = 0;
202 
203 	if (atomic_read(&sched_ref))
204 		ret = tracing_sched_register();
205 	if (ret) {
206 		pr_info("error registering scheduler trace\n");
207 		return ret;
208 	}
209 	return register_tracer(&sched_switch_trace);
210 }
211 device_initcall(init_sched_switch_trace);
212