135e8e302SSteven Rostedt /*
235e8e302SSteven Rostedt  * trace context switch
335e8e302SSteven Rostedt  *
435e8e302SSteven Rostedt  * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
535e8e302SSteven Rostedt  *
635e8e302SSteven Rostedt  */
735e8e302SSteven Rostedt #include <linux/module.h>
835e8e302SSteven Rostedt #include <linux/fs.h>
935e8e302SSteven Rostedt #include <linux/debugfs.h>
1035e8e302SSteven Rostedt #include <linux/kallsyms.h>
1135e8e302SSteven Rostedt #include <linux/uaccess.h>
1235e8e302SSteven Rostedt #include <linux/marker.h>
1335e8e302SSteven Rostedt #include <linux/ftrace.h>
1435e8e302SSteven Rostedt 
1535e8e302SSteven Rostedt #include "trace.h"
1635e8e302SSteven Rostedt 
1735e8e302SSteven Rostedt static struct trace_array	*ctx_trace;
1835e8e302SSteven Rostedt static int __read_mostly	tracer_enabled;
195b82a1b0SMathieu Desnoyers static atomic_t			sched_ref;
2035e8e302SSteven Rostedt 
21e309b41dSIngo Molnar static void
225b82a1b0SMathieu Desnoyers sched_switch_func(void *private, void *__rq, struct task_struct *prev,
235b82a1b0SMathieu Desnoyers 			struct task_struct *next)
2435e8e302SSteven Rostedt {
255b82a1b0SMathieu Desnoyers 	struct trace_array **ptr = private;
265b82a1b0SMathieu Desnoyers 	struct trace_array *tr = *ptr;
2735e8e302SSteven Rostedt 	struct trace_array_cpu *data;
2835e8e302SSteven Rostedt 	unsigned long flags;
2935e8e302SSteven Rostedt 	long disabled;
3035e8e302SSteven Rostedt 	int cpu;
3135e8e302SSteven Rostedt 
3241bc8144SSteven Rostedt 	tracing_record_cmdline(prev);
3341bc8144SSteven Rostedt 	tracing_record_cmdline(next);
3441bc8144SSteven Rostedt 
3535e8e302SSteven Rostedt 	if (!tracer_enabled)
3635e8e302SSteven Rostedt 		return;
3735e8e302SSteven Rostedt 
3818cef379SSteven Rostedt 	local_irq_save(flags);
3935e8e302SSteven Rostedt 	cpu = raw_smp_processor_id();
4035e8e302SSteven Rostedt 	data = tr->data[cpu];
4135e8e302SSteven Rostedt 	disabled = atomic_inc_return(&data->disabled);
4235e8e302SSteven Rostedt 
434d9493c9SIngo Molnar 	if (likely(disabled == 1))
4435e8e302SSteven Rostedt 		tracing_sched_switch_trace(tr, data, prev, next, flags);
4535e8e302SSteven Rostedt 
4635e8e302SSteven Rostedt 	atomic_dec(&data->disabled);
4718cef379SSteven Rostedt 	local_irq_restore(flags);
4835e8e302SSteven Rostedt }
4935e8e302SSteven Rostedt 
505b82a1b0SMathieu Desnoyers static notrace void
515b82a1b0SMathieu Desnoyers sched_switch_callback(void *probe_data, void *call_data,
525b82a1b0SMathieu Desnoyers 		      const char *format, va_list *args)
5357422797SIngo Molnar {
545b82a1b0SMathieu Desnoyers 	struct task_struct *prev;
555b82a1b0SMathieu Desnoyers 	struct task_struct *next;
565b82a1b0SMathieu Desnoyers 	struct rq *__rq;
575b82a1b0SMathieu Desnoyers 
585b82a1b0SMathieu Desnoyers 	if (!atomic_read(&sched_ref))
595b82a1b0SMathieu Desnoyers 		return;
605b82a1b0SMathieu Desnoyers 
615b82a1b0SMathieu Desnoyers 	/* skip prev_pid %d next_pid %d prev_state %ld */
625b82a1b0SMathieu Desnoyers 	(void)va_arg(*args, int);
635b82a1b0SMathieu Desnoyers 	(void)va_arg(*args, int);
645b82a1b0SMathieu Desnoyers 	(void)va_arg(*args, long);
655b82a1b0SMathieu Desnoyers 	__rq = va_arg(*args, typeof(__rq));
665b82a1b0SMathieu Desnoyers 	prev = va_arg(*args, typeof(prev));
675b82a1b0SMathieu Desnoyers 	next = va_arg(*args, typeof(next));
685b82a1b0SMathieu Desnoyers 
695b82a1b0SMathieu Desnoyers 	/*
705b82a1b0SMathieu Desnoyers 	 * If tracer_switch_func only points to the local
715b82a1b0SMathieu Desnoyers 	 * switch func, it still needs the ptr passed to it.
725b82a1b0SMathieu Desnoyers 	 */
735b82a1b0SMathieu Desnoyers 	sched_switch_func(probe_data, __rq, prev, next);
745b82a1b0SMathieu Desnoyers }
755b82a1b0SMathieu Desnoyers 
765b82a1b0SMathieu Desnoyers static void
775b82a1b0SMathieu Desnoyers wakeup_func(void *private, void *__rq, struct task_struct *wakee, struct
785b82a1b0SMathieu Desnoyers 			task_struct *curr)
795b82a1b0SMathieu Desnoyers {
805b82a1b0SMathieu Desnoyers 	struct trace_array **ptr = private;
815b82a1b0SMathieu Desnoyers 	struct trace_array *tr = *ptr;
8257422797SIngo Molnar 	struct trace_array_cpu *data;
8357422797SIngo Molnar 	unsigned long flags;
8457422797SIngo Molnar 	long disabled;
8557422797SIngo Molnar 	int cpu;
8657422797SIngo Molnar 
8757422797SIngo Molnar 	if (!tracer_enabled)
8857422797SIngo Molnar 		return;
8957422797SIngo Molnar 
90d9af56fbSIngo Molnar 	tracing_record_cmdline(curr);
91d9af56fbSIngo Molnar 
9257422797SIngo Molnar 	local_irq_save(flags);
9357422797SIngo Molnar 	cpu = raw_smp_processor_id();
9457422797SIngo Molnar 	data = tr->data[cpu];
9557422797SIngo Molnar 	disabled = atomic_inc_return(&data->disabled);
9657422797SIngo Molnar 
974d9493c9SIngo Molnar 	if (likely(disabled == 1))
9857422797SIngo Molnar 		tracing_sched_wakeup_trace(tr, data, wakee, curr, flags);
9957422797SIngo Molnar 
10057422797SIngo Molnar 	atomic_dec(&data->disabled);
10157422797SIngo Molnar 	local_irq_restore(flags);
10257422797SIngo Molnar }
10357422797SIngo Molnar 
1045b82a1b0SMathieu Desnoyers static notrace void
1055b82a1b0SMathieu Desnoyers wake_up_callback(void *probe_data, void *call_data,
1065b82a1b0SMathieu Desnoyers 		 const char *format, va_list *args)
10735e8e302SSteven Rostedt {
1085b82a1b0SMathieu Desnoyers 	struct task_struct *curr;
1095b82a1b0SMathieu Desnoyers 	struct task_struct *task;
1105b82a1b0SMathieu Desnoyers 	struct rq *__rq;
11125b0b44aSSteven Rostedt 
1125b82a1b0SMathieu Desnoyers 	if (likely(!tracer_enabled))
1135b82a1b0SMathieu Desnoyers 		return;
11435e8e302SSteven Rostedt 
1155b82a1b0SMathieu Desnoyers 	/* Skip pid %d state %ld */
1165b82a1b0SMathieu Desnoyers 	(void)va_arg(*args, int);
1175b82a1b0SMathieu Desnoyers 	(void)va_arg(*args, long);
1185b82a1b0SMathieu Desnoyers 	/* now get the meat: "rq %p task %p rq->curr %p" */
1195b82a1b0SMathieu Desnoyers 	__rq = va_arg(*args, typeof(__rq));
1205b82a1b0SMathieu Desnoyers 	task = va_arg(*args, typeof(task));
1215b82a1b0SMathieu Desnoyers 	curr = va_arg(*args, typeof(curr));
12235e8e302SSteven Rostedt 
1235b82a1b0SMathieu Desnoyers 	tracing_record_cmdline(task);
1245b82a1b0SMathieu Desnoyers 	tracing_record_cmdline(curr);
12557422797SIngo Molnar 
1265b82a1b0SMathieu Desnoyers 	wakeup_func(probe_data, __rq, task, curr);
12757422797SIngo Molnar }
12857422797SIngo Molnar 
129e309b41dSIngo Molnar static void sched_switch_reset(struct trace_array *tr)
13035e8e302SSteven Rostedt {
13135e8e302SSteven Rostedt 	int cpu;
13235e8e302SSteven Rostedt 
133750ed1a4SIngo Molnar 	tr->time_start = ftrace_now(tr->cpu);
13435e8e302SSteven Rostedt 
13535e8e302SSteven Rostedt 	for_each_online_cpu(cpu)
13635e8e302SSteven Rostedt 		tracing_reset(tr->data[cpu]);
13735e8e302SSteven Rostedt }
13835e8e302SSteven Rostedt 
1395b82a1b0SMathieu Desnoyers static int tracing_sched_register(void)
1405b82a1b0SMathieu Desnoyers {
1415b82a1b0SMathieu Desnoyers 	int ret;
1425b82a1b0SMathieu Desnoyers 
1435b82a1b0SMathieu Desnoyers 	ret = marker_probe_register("kernel_sched_wakeup",
1445b82a1b0SMathieu Desnoyers 			"pid %d state %ld ## rq %p task %p rq->curr %p",
1455b82a1b0SMathieu Desnoyers 			wake_up_callback,
1465b82a1b0SMathieu Desnoyers 			&ctx_trace);
1475b82a1b0SMathieu Desnoyers 	if (ret) {
1485b82a1b0SMathieu Desnoyers 		pr_info("wakeup trace: Couldn't add marker"
1495b82a1b0SMathieu Desnoyers 			" probe to kernel_sched_wakeup\n");
1505b82a1b0SMathieu Desnoyers 		return ret;
1515b82a1b0SMathieu Desnoyers 	}
1525b82a1b0SMathieu Desnoyers 
1535b82a1b0SMathieu Desnoyers 	ret = marker_probe_register("kernel_sched_wakeup_new",
1545b82a1b0SMathieu Desnoyers 			"pid %d state %ld ## rq %p task %p rq->curr %p",
1555b82a1b0SMathieu Desnoyers 			wake_up_callback,
1565b82a1b0SMathieu Desnoyers 			&ctx_trace);
1575b82a1b0SMathieu Desnoyers 	if (ret) {
1585b82a1b0SMathieu Desnoyers 		pr_info("wakeup trace: Couldn't add marker"
1595b82a1b0SMathieu Desnoyers 			" probe to kernel_sched_wakeup_new\n");
1605b82a1b0SMathieu Desnoyers 		goto fail_deprobe;
1615b82a1b0SMathieu Desnoyers 	}
1625b82a1b0SMathieu Desnoyers 
1635b82a1b0SMathieu Desnoyers 	ret = marker_probe_register("kernel_sched_schedule",
1645b82a1b0SMathieu Desnoyers 		"prev_pid %d next_pid %d prev_state %ld "
1655b82a1b0SMathieu Desnoyers 		"## rq %p prev %p next %p",
1665b82a1b0SMathieu Desnoyers 		sched_switch_callback,
1675b82a1b0SMathieu Desnoyers 		&ctx_trace);
1685b82a1b0SMathieu Desnoyers 	if (ret) {
1695b82a1b0SMathieu Desnoyers 		pr_info("sched trace: Couldn't add marker"
1705b82a1b0SMathieu Desnoyers 			" probe to kernel_sched_schedule\n");
1715b82a1b0SMathieu Desnoyers 		goto fail_deprobe_wake_new;
1725b82a1b0SMathieu Desnoyers 	}
1735b82a1b0SMathieu Desnoyers 
1745b82a1b0SMathieu Desnoyers 	return ret;
1755b82a1b0SMathieu Desnoyers fail_deprobe_wake_new:
1765b82a1b0SMathieu Desnoyers 	marker_probe_unregister("kernel_sched_wakeup_new",
1775b82a1b0SMathieu Desnoyers 				wake_up_callback,
1785b82a1b0SMathieu Desnoyers 				&ctx_trace);
1795b82a1b0SMathieu Desnoyers fail_deprobe:
1805b82a1b0SMathieu Desnoyers 	marker_probe_unregister("kernel_sched_wakeup",
1815b82a1b0SMathieu Desnoyers 				wake_up_callback,
1825b82a1b0SMathieu Desnoyers 				&ctx_trace);
1835b82a1b0SMathieu Desnoyers 	return ret;
1845b82a1b0SMathieu Desnoyers }
1855b82a1b0SMathieu Desnoyers 
1865b82a1b0SMathieu Desnoyers static void tracing_sched_unregister(void)
1875b82a1b0SMathieu Desnoyers {
1885b82a1b0SMathieu Desnoyers 	marker_probe_unregister("kernel_sched_schedule",
1895b82a1b0SMathieu Desnoyers 				sched_switch_callback,
1905b82a1b0SMathieu Desnoyers 				&ctx_trace);
1915b82a1b0SMathieu Desnoyers 	marker_probe_unregister("kernel_sched_wakeup_new",
1925b82a1b0SMathieu Desnoyers 				wake_up_callback,
1935b82a1b0SMathieu Desnoyers 				&ctx_trace);
1945b82a1b0SMathieu Desnoyers 	marker_probe_unregister("kernel_sched_wakeup",
1955b82a1b0SMathieu Desnoyers 				wake_up_callback,
1965b82a1b0SMathieu Desnoyers 				&ctx_trace);
1975b82a1b0SMathieu Desnoyers }
1985b82a1b0SMathieu Desnoyers 
1995b82a1b0SMathieu Desnoyers void tracing_start_sched_switch(void)
2005b82a1b0SMathieu Desnoyers {
2015b82a1b0SMathieu Desnoyers 	long ref;
2025b82a1b0SMathieu Desnoyers 
2035b82a1b0SMathieu Desnoyers 	ref = atomic_inc_return(&sched_ref);
2045b82a1b0SMathieu Desnoyers 	if (ref == 1)
2055b82a1b0SMathieu Desnoyers 		tracing_sched_register();
2065b82a1b0SMathieu Desnoyers }
2075b82a1b0SMathieu Desnoyers 
2085b82a1b0SMathieu Desnoyers void tracing_stop_sched_switch(void)
2095b82a1b0SMathieu Desnoyers {
2105b82a1b0SMathieu Desnoyers 	long ref;
2115b82a1b0SMathieu Desnoyers 
2125b82a1b0SMathieu Desnoyers 	ref = atomic_dec_and_test(&sched_ref);
2135b82a1b0SMathieu Desnoyers 	if (ref)
2145b82a1b0SMathieu Desnoyers 		tracing_sched_unregister();
2155b82a1b0SMathieu Desnoyers }
2165b82a1b0SMathieu Desnoyers 
21741bc8144SSteven Rostedt void tracing_start_cmdline_record(void)
21841bc8144SSteven Rostedt {
21941bc8144SSteven Rostedt 	tracing_start_sched_switch();
22041bc8144SSteven Rostedt }
22141bc8144SSteven Rostedt 
22241bc8144SSteven Rostedt void tracing_stop_cmdline_record(void)
22341bc8144SSteven Rostedt {
22441bc8144SSteven Rostedt 	tracing_stop_sched_switch();
22541bc8144SSteven Rostedt }
22641bc8144SSteven Rostedt 
227e309b41dSIngo Molnar static void start_sched_trace(struct trace_array *tr)
22835e8e302SSteven Rostedt {
22935e8e302SSteven Rostedt 	sched_switch_reset(tr);
23035e8e302SSteven Rostedt 	tracer_enabled = 1;
23141bc8144SSteven Rostedt 	tracing_start_cmdline_record();
23235e8e302SSteven Rostedt }
23335e8e302SSteven Rostedt 
234e309b41dSIngo Molnar static void stop_sched_trace(struct trace_array *tr)
23535e8e302SSteven Rostedt {
23641bc8144SSteven Rostedt 	tracing_stop_cmdline_record();
23735e8e302SSteven Rostedt 	tracer_enabled = 0;
23835e8e302SSteven Rostedt }
23935e8e302SSteven Rostedt 
240e309b41dSIngo Molnar static void sched_switch_trace_init(struct trace_array *tr)
24135e8e302SSteven Rostedt {
24235e8e302SSteven Rostedt 	ctx_trace = tr;
24335e8e302SSteven Rostedt 
24435e8e302SSteven Rostedt 	if (tr->ctrl)
24535e8e302SSteven Rostedt 		start_sched_trace(tr);
24635e8e302SSteven Rostedt }
24735e8e302SSteven Rostedt 
248e309b41dSIngo Molnar static void sched_switch_trace_reset(struct trace_array *tr)
24935e8e302SSteven Rostedt {
25035e8e302SSteven Rostedt 	if (tr->ctrl)
25135e8e302SSteven Rostedt 		stop_sched_trace(tr);
25235e8e302SSteven Rostedt }
25335e8e302SSteven Rostedt 
25435e8e302SSteven Rostedt static void sched_switch_trace_ctrl_update(struct trace_array *tr)
25535e8e302SSteven Rostedt {
25635e8e302SSteven Rostedt 	/* When starting a new trace, reset the buffers */
25735e8e302SSteven Rostedt 	if (tr->ctrl)
25835e8e302SSteven Rostedt 		start_sched_trace(tr);
25935e8e302SSteven Rostedt 	else
26035e8e302SSteven Rostedt 		stop_sched_trace(tr);
26135e8e302SSteven Rostedt }
26235e8e302SSteven Rostedt 
26335e8e302SSteven Rostedt static struct tracer sched_switch_trace __read_mostly =
26435e8e302SSteven Rostedt {
26535e8e302SSteven Rostedt 	.name		= "sched_switch",
26635e8e302SSteven Rostedt 	.init		= sched_switch_trace_init,
26735e8e302SSteven Rostedt 	.reset		= sched_switch_trace_reset,
26835e8e302SSteven Rostedt 	.ctrl_update	= sched_switch_trace_ctrl_update,
26960a11774SSteven Rostedt #ifdef CONFIG_FTRACE_SELFTEST
27060a11774SSteven Rostedt 	.selftest    = trace_selftest_startup_sched_switch,
27160a11774SSteven Rostedt #endif
27235e8e302SSteven Rostedt };
27335e8e302SSteven Rostedt 
27435e8e302SSteven Rostedt __init static int init_sched_switch_trace(void)
27535e8e302SSteven Rostedt {
2765b82a1b0SMathieu Desnoyers 	int ret = 0;
2775b82a1b0SMathieu Desnoyers 
2785b82a1b0SMathieu Desnoyers 	if (atomic_read(&sched_ref))
2795b82a1b0SMathieu Desnoyers 		ret = tracing_sched_register();
2805b82a1b0SMathieu Desnoyers 	if (ret) {
2815b82a1b0SMathieu Desnoyers 		pr_info("error registering scheduler trace\n");
2825b82a1b0SMathieu Desnoyers 		return ret;
2835b82a1b0SMathieu Desnoyers 	}
28435e8e302SSteven Rostedt 	return register_tracer(&sched_switch_trace);
28535e8e302SSteven Rostedt }
28635e8e302SSteven Rostedt device_initcall(init_sched_switch_trace);
287