135e8e302SSteven Rostedt /*
235e8e302SSteven Rostedt  * trace context switch
335e8e302SSteven Rostedt  *
435e8e302SSteven Rostedt  * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
535e8e302SSteven Rostedt  *
635e8e302SSteven Rostedt  */
735e8e302SSteven Rostedt #include <linux/module.h>
835e8e302SSteven Rostedt #include <linux/fs.h>
935e8e302SSteven Rostedt #include <linux/debugfs.h>
1035e8e302SSteven Rostedt #include <linux/kallsyms.h>
1135e8e302SSteven Rostedt #include <linux/uaccess.h>
1235e8e302SSteven Rostedt #include <linux/ftrace.h>
13b07c3f19SMathieu Desnoyers #include <trace/sched.h>
1435e8e302SSteven Rostedt 
1535e8e302SSteven Rostedt #include "trace.h"
1635e8e302SSteven Rostedt 
1735e8e302SSteven Rostedt static struct trace_array	*ctx_trace;
1835e8e302SSteven Rostedt static int __read_mostly	tracer_enabled;
195b82a1b0SMathieu Desnoyers static atomic_t			sched_ref;
2035e8e302SSteven Rostedt 
21e309b41dSIngo Molnar static void
22b07c3f19SMathieu Desnoyers probe_sched_switch(struct rq *__rq, struct task_struct *prev,
235b82a1b0SMathieu Desnoyers 			struct task_struct *next)
2435e8e302SSteven Rostedt {
2535e8e302SSteven Rostedt 	struct trace_array_cpu *data;
2635e8e302SSteven Rostedt 	unsigned long flags;
2735e8e302SSteven Rostedt 	int cpu;
2838697053SSteven Rostedt 	int pc;
2935e8e302SSteven Rostedt 
30b07c3f19SMathieu Desnoyers 	if (!atomic_read(&sched_ref))
31b07c3f19SMathieu Desnoyers 		return;
32b07c3f19SMathieu Desnoyers 
3341bc8144SSteven Rostedt 	tracing_record_cmdline(prev);
3441bc8144SSteven Rostedt 	tracing_record_cmdline(next);
3541bc8144SSteven Rostedt 
3635e8e302SSteven Rostedt 	if (!tracer_enabled)
3735e8e302SSteven Rostedt 		return;
3835e8e302SSteven Rostedt 
3938697053SSteven Rostedt 	pc = preempt_count();
4018cef379SSteven Rostedt 	local_irq_save(flags);
4135e8e302SSteven Rostedt 	cpu = raw_smp_processor_id();
42b07c3f19SMathieu Desnoyers 	data = ctx_trace->data[cpu];
4335e8e302SSteven Rostedt 
443ea2e6d7SSteven Rostedt 	if (likely(!atomic_read(&data->disabled)))
4538697053SSteven Rostedt 		tracing_sched_switch_trace(ctx_trace, data, prev, next, flags, pc);
4635e8e302SSteven Rostedt 
4718cef379SSteven Rostedt 	local_irq_restore(flags);
4835e8e302SSteven Rostedt }
4935e8e302SSteven Rostedt 
505b82a1b0SMathieu Desnoyers static void
51b07c3f19SMathieu Desnoyers probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee)
525b82a1b0SMathieu Desnoyers {
5357422797SIngo Molnar 	struct trace_array_cpu *data;
5457422797SIngo Molnar 	unsigned long flags;
5538697053SSteven Rostedt 	int cpu, pc;
5657422797SIngo Molnar 
57b07c3f19SMathieu Desnoyers 	if (!likely(tracer_enabled))
5857422797SIngo Molnar 		return;
5957422797SIngo Molnar 
6038697053SSteven Rostedt 	pc = preempt_count();
61b07c3f19SMathieu Desnoyers 	tracing_record_cmdline(current);
62d9af56fbSIngo Molnar 
6357422797SIngo Molnar 	local_irq_save(flags);
6457422797SIngo Molnar 	cpu = raw_smp_processor_id();
65b07c3f19SMathieu Desnoyers 	data = ctx_trace->data[cpu];
6657422797SIngo Molnar 
673ea2e6d7SSteven Rostedt 	if (likely(!atomic_read(&data->disabled)))
68b07c3f19SMathieu Desnoyers 		tracing_sched_wakeup_trace(ctx_trace, data, wakee, current,
6938697053SSteven Rostedt 					   flags, pc);
7057422797SIngo Molnar 
7157422797SIngo Molnar 	local_irq_restore(flags);
7257422797SIngo Molnar }
7357422797SIngo Molnar 
74e309b41dSIngo Molnar static void sched_switch_reset(struct trace_array *tr)
7535e8e302SSteven Rostedt {
7635e8e302SSteven Rostedt 	int cpu;
7735e8e302SSteven Rostedt 
78750ed1a4SIngo Molnar 	tr->time_start = ftrace_now(tr->cpu);
7935e8e302SSteven Rostedt 
8035e8e302SSteven Rostedt 	for_each_online_cpu(cpu)
813928a8a2SSteven Rostedt 		tracing_reset(tr, cpu);
8235e8e302SSteven Rostedt }
8335e8e302SSteven Rostedt 
845b82a1b0SMathieu Desnoyers static int tracing_sched_register(void)
855b82a1b0SMathieu Desnoyers {
865b82a1b0SMathieu Desnoyers 	int ret;
875b82a1b0SMathieu Desnoyers 
88b07c3f19SMathieu Desnoyers 	ret = register_trace_sched_wakeup(probe_sched_wakeup);
895b82a1b0SMathieu Desnoyers 	if (ret) {
90b07c3f19SMathieu Desnoyers 		pr_info("wakeup trace: Couldn't activate tracepoint"
915b82a1b0SMathieu Desnoyers 			" probe to kernel_sched_wakeup\n");
925b82a1b0SMathieu Desnoyers 		return ret;
935b82a1b0SMathieu Desnoyers 	}
945b82a1b0SMathieu Desnoyers 
95b07c3f19SMathieu Desnoyers 	ret = register_trace_sched_wakeup_new(probe_sched_wakeup);
965b82a1b0SMathieu Desnoyers 	if (ret) {
97b07c3f19SMathieu Desnoyers 		pr_info("wakeup trace: Couldn't activate tracepoint"
985b82a1b0SMathieu Desnoyers 			" probe to kernel_sched_wakeup_new\n");
995b82a1b0SMathieu Desnoyers 		goto fail_deprobe;
1005b82a1b0SMathieu Desnoyers 	}
1015b82a1b0SMathieu Desnoyers 
102b07c3f19SMathieu Desnoyers 	ret = register_trace_sched_switch(probe_sched_switch);
1035b82a1b0SMathieu Desnoyers 	if (ret) {
104b07c3f19SMathieu Desnoyers 		pr_info("sched trace: Couldn't activate tracepoint"
1055b82a1b0SMathieu Desnoyers 			" probe to kernel_sched_schedule\n");
1065b82a1b0SMathieu Desnoyers 		goto fail_deprobe_wake_new;
1075b82a1b0SMathieu Desnoyers 	}
1085b82a1b0SMathieu Desnoyers 
1095b82a1b0SMathieu Desnoyers 	return ret;
1105b82a1b0SMathieu Desnoyers fail_deprobe_wake_new:
111b07c3f19SMathieu Desnoyers 	unregister_trace_sched_wakeup_new(probe_sched_wakeup);
1125b82a1b0SMathieu Desnoyers fail_deprobe:
113b07c3f19SMathieu Desnoyers 	unregister_trace_sched_wakeup(probe_sched_wakeup);
1145b82a1b0SMathieu Desnoyers 	return ret;
1155b82a1b0SMathieu Desnoyers }
1165b82a1b0SMathieu Desnoyers 
1175b82a1b0SMathieu Desnoyers static void tracing_sched_unregister(void)
1185b82a1b0SMathieu Desnoyers {
119b07c3f19SMathieu Desnoyers 	unregister_trace_sched_switch(probe_sched_switch);
120b07c3f19SMathieu Desnoyers 	unregister_trace_sched_wakeup_new(probe_sched_wakeup);
121b07c3f19SMathieu Desnoyers 	unregister_trace_sched_wakeup(probe_sched_wakeup);
1225b82a1b0SMathieu Desnoyers }
1235b82a1b0SMathieu Desnoyers 
124f2252935SIngo Molnar static void tracing_start_sched_switch(void)
1255b82a1b0SMathieu Desnoyers {
1265b82a1b0SMathieu Desnoyers 	long ref;
1275b82a1b0SMathieu Desnoyers 
1285b82a1b0SMathieu Desnoyers 	ref = atomic_inc_return(&sched_ref);
1295b82a1b0SMathieu Desnoyers 	if (ref == 1)
1305b82a1b0SMathieu Desnoyers 		tracing_sched_register();
1315b82a1b0SMathieu Desnoyers }
1325b82a1b0SMathieu Desnoyers 
133f2252935SIngo Molnar static void tracing_stop_sched_switch(void)
1345b82a1b0SMathieu Desnoyers {
1355b82a1b0SMathieu Desnoyers 	long ref;
1365b82a1b0SMathieu Desnoyers 
1375b82a1b0SMathieu Desnoyers 	ref = atomic_dec_and_test(&sched_ref);
1385b82a1b0SMathieu Desnoyers 	if (ref)
1395b82a1b0SMathieu Desnoyers 		tracing_sched_unregister();
1405b82a1b0SMathieu Desnoyers }
1415b82a1b0SMathieu Desnoyers 
14241bc8144SSteven Rostedt void tracing_start_cmdline_record(void)
14341bc8144SSteven Rostedt {
14441bc8144SSteven Rostedt 	tracing_start_sched_switch();
14541bc8144SSteven Rostedt }
14641bc8144SSteven Rostedt 
14741bc8144SSteven Rostedt void tracing_stop_cmdline_record(void)
14841bc8144SSteven Rostedt {
14941bc8144SSteven Rostedt 	tracing_stop_sched_switch();
15041bc8144SSteven Rostedt }
15141bc8144SSteven Rostedt 
152e309b41dSIngo Molnar static void start_sched_trace(struct trace_array *tr)
15335e8e302SSteven Rostedt {
15435e8e302SSteven Rostedt 	sched_switch_reset(tr);
15541bc8144SSteven Rostedt 	tracing_start_cmdline_record();
156007c05d4SSteven Rostedt 	tracer_enabled = 1;
15735e8e302SSteven Rostedt }
15835e8e302SSteven Rostedt 
159e309b41dSIngo Molnar static void stop_sched_trace(struct trace_array *tr)
16035e8e302SSteven Rostedt {
16135e8e302SSteven Rostedt 	tracer_enabled = 0;
162007c05d4SSteven Rostedt 	tracing_stop_cmdline_record();
16335e8e302SSteven Rostedt }
16435e8e302SSteven Rostedt 
165e309b41dSIngo Molnar static void sched_switch_trace_init(struct trace_array *tr)
16635e8e302SSteven Rostedt {
16735e8e302SSteven Rostedt 	ctx_trace = tr;
16835e8e302SSteven Rostedt 
16935e8e302SSteven Rostedt 	if (tr->ctrl)
17035e8e302SSteven Rostedt 		start_sched_trace(tr);
17135e8e302SSteven Rostedt }
17235e8e302SSteven Rostedt 
173e309b41dSIngo Molnar static void sched_switch_trace_reset(struct trace_array *tr)
17435e8e302SSteven Rostedt {
17535e8e302SSteven Rostedt 	if (tr->ctrl)
17635e8e302SSteven Rostedt 		stop_sched_trace(tr);
17735e8e302SSteven Rostedt }
17835e8e302SSteven Rostedt 
17935e8e302SSteven Rostedt static void sched_switch_trace_ctrl_update(struct trace_array *tr)
18035e8e302SSteven Rostedt {
18135e8e302SSteven Rostedt 	/* When starting a new trace, reset the buffers */
18235e8e302SSteven Rostedt 	if (tr->ctrl)
18335e8e302SSteven Rostedt 		start_sched_trace(tr);
18435e8e302SSteven Rostedt 	else
18535e8e302SSteven Rostedt 		stop_sched_trace(tr);
18635e8e302SSteven Rostedt }
18735e8e302SSteven Rostedt 
18835e8e302SSteven Rostedt static struct tracer sched_switch_trace __read_mostly =
18935e8e302SSteven Rostedt {
19035e8e302SSteven Rostedt 	.name		= "sched_switch",
19135e8e302SSteven Rostedt 	.init		= sched_switch_trace_init,
19235e8e302SSteven Rostedt 	.reset		= sched_switch_trace_reset,
19335e8e302SSteven Rostedt 	.ctrl_update	= sched_switch_trace_ctrl_update,
19460a11774SSteven Rostedt #ifdef CONFIG_FTRACE_SELFTEST
19560a11774SSteven Rostedt 	.selftest    = trace_selftest_startup_sched_switch,
19660a11774SSteven Rostedt #endif
19735e8e302SSteven Rostedt };
19835e8e302SSteven Rostedt 
19935e8e302SSteven Rostedt __init static int init_sched_switch_trace(void)
20035e8e302SSteven Rostedt {
2015b82a1b0SMathieu Desnoyers 	int ret = 0;
2025b82a1b0SMathieu Desnoyers 
2035b82a1b0SMathieu Desnoyers 	if (atomic_read(&sched_ref))
2045b82a1b0SMathieu Desnoyers 		ret = tracing_sched_register();
2055b82a1b0SMathieu Desnoyers 	if (ret) {
2065b82a1b0SMathieu Desnoyers 		pr_info("error registering scheduler trace\n");
2075b82a1b0SMathieu Desnoyers 		return ret;
2085b82a1b0SMathieu Desnoyers 	}
20935e8e302SSteven Rostedt 	return register_tracer(&sched_switch_trace);
21035e8e302SSteven Rostedt }
21135e8e302SSteven Rostedt device_initcall(init_sched_switch_trace);
212