135e8e302SSteven Rostedt /* 235e8e302SSteven Rostedt * trace context switch 335e8e302SSteven Rostedt * 435e8e302SSteven Rostedt * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com> 535e8e302SSteven Rostedt * 635e8e302SSteven Rostedt */ 735e8e302SSteven Rostedt #include <linux/module.h> 835e8e302SSteven Rostedt #include <linux/fs.h> 935e8e302SSteven Rostedt #include <linux/debugfs.h> 1035e8e302SSteven Rostedt #include <linux/kallsyms.h> 1135e8e302SSteven Rostedt #include <linux/uaccess.h> 1235e8e302SSteven Rostedt #include <linux/ftrace.h> 13ad8d75ffSSteven Rostedt #include <trace/events/sched.h> 1435e8e302SSteven Rostedt 1535e8e302SSteven Rostedt #include "trace.h" 1635e8e302SSteven Rostedt 1735e8e302SSteven Rostedt static struct trace_array *ctx_trace; 1835e8e302SSteven Rostedt static int __read_mostly tracer_enabled; 19efade6e7SFrederic Weisbecker static int sched_ref; 20efade6e7SFrederic Weisbecker static DEFINE_MUTEX(sched_register_mutex); 215fec6ddcSSteven Rostedt static int sched_stopped; 2235e8e302SSteven Rostedt 2382e04af4SFrederic Weisbecker 2482e04af4SFrederic Weisbecker void 2582e04af4SFrederic Weisbecker tracing_sched_switch_trace(struct trace_array *tr, 2682e04af4SFrederic Weisbecker struct task_struct *prev, 2782e04af4SFrederic Weisbecker struct task_struct *next, 2882e04af4SFrederic Weisbecker unsigned long flags, int pc) 2982e04af4SFrederic Weisbecker { 3082e04af4SFrederic Weisbecker struct ftrace_event_call *call = &event_context_switch; 31e77405adSSteven Rostedt struct ring_buffer *buffer = tr->buffer; 3282e04af4SFrederic Weisbecker struct ring_buffer_event *event; 3382e04af4SFrederic Weisbecker struct ctx_switch_entry *entry; 3482e04af4SFrederic Weisbecker 35e77405adSSteven Rostedt event = trace_buffer_lock_reserve(buffer, TRACE_CTX, 3682e04af4SFrederic Weisbecker sizeof(*entry), flags, pc); 3782e04af4SFrederic Weisbecker if (!event) 3882e04af4SFrederic Weisbecker return; 3982e04af4SFrederic Weisbecker entry = ring_buffer_event_data(event); 4082e04af4SFrederic Weisbecker entry->prev_pid = prev->pid; 4182e04af4SFrederic Weisbecker entry->prev_prio = prev->prio; 4282e04af4SFrederic Weisbecker entry->prev_state = prev->state; 4382e04af4SFrederic Weisbecker entry->next_pid = next->pid; 4482e04af4SFrederic Weisbecker entry->next_prio = next->prio; 4582e04af4SFrederic Weisbecker entry->next_state = next->state; 4682e04af4SFrederic Weisbecker entry->next_cpu = task_cpu(next); 4782e04af4SFrederic Weisbecker 48e77405adSSteven Rostedt if (!filter_check_discard(call, entry, buffer, event)) 49e77405adSSteven Rostedt trace_buffer_unlock_commit(buffer, event, flags, pc); 5082e04af4SFrederic Weisbecker } 5182e04af4SFrederic Weisbecker 52e309b41dSIngo Molnar static void 53b07c3f19SMathieu Desnoyers probe_sched_switch(struct rq *__rq, struct task_struct *prev, 545b82a1b0SMathieu Desnoyers struct task_struct *next) 5535e8e302SSteven Rostedt { 5635e8e302SSteven Rostedt struct trace_array_cpu *data; 5735e8e302SSteven Rostedt unsigned long flags; 5835e8e302SSteven Rostedt int cpu; 5938697053SSteven Rostedt int pc; 6035e8e302SSteven Rostedt 61dcef788eSZhaolei if (unlikely(!sched_ref)) 62b07c3f19SMathieu Desnoyers return; 63b07c3f19SMathieu Desnoyers 6441bc8144SSteven Rostedt tracing_record_cmdline(prev); 6541bc8144SSteven Rostedt tracing_record_cmdline(next); 6641bc8144SSteven Rostedt 67dcef788eSZhaolei if (!tracer_enabled || sched_stopped) 6835e8e302SSteven Rostedt return; 6935e8e302SSteven Rostedt 7038697053SSteven Rostedt pc = preempt_count(); 7118cef379SSteven Rostedt local_irq_save(flags); 7235e8e302SSteven Rostedt cpu = raw_smp_processor_id(); 73b07c3f19SMathieu Desnoyers data = ctx_trace->data[cpu]; 7435e8e302SSteven Rostedt 753ea2e6d7SSteven Rostedt if (likely(!atomic_read(&data->disabled))) 767be42151SArnaldo Carvalho de Melo tracing_sched_switch_trace(ctx_trace, prev, next, flags, pc); 7735e8e302SSteven Rostedt 7818cef379SSteven Rostedt local_irq_restore(flags); 7935e8e302SSteven Rostedt } 8035e8e302SSteven Rostedt 8182e04af4SFrederic Weisbecker void 8282e04af4SFrederic Weisbecker tracing_sched_wakeup_trace(struct trace_array *tr, 8382e04af4SFrederic Weisbecker struct task_struct *wakee, 8482e04af4SFrederic Weisbecker struct task_struct *curr, 8582e04af4SFrederic Weisbecker unsigned long flags, int pc) 8682e04af4SFrederic Weisbecker { 8782e04af4SFrederic Weisbecker struct ftrace_event_call *call = &event_wakeup; 8882e04af4SFrederic Weisbecker struct ring_buffer_event *event; 8982e04af4SFrederic Weisbecker struct ctx_switch_entry *entry; 90e77405adSSteven Rostedt struct ring_buffer *buffer = tr->buffer; 9182e04af4SFrederic Weisbecker 92e77405adSSteven Rostedt event = trace_buffer_lock_reserve(buffer, TRACE_WAKE, 9382e04af4SFrederic Weisbecker sizeof(*entry), flags, pc); 9482e04af4SFrederic Weisbecker if (!event) 9582e04af4SFrederic Weisbecker return; 9682e04af4SFrederic Weisbecker entry = ring_buffer_event_data(event); 9782e04af4SFrederic Weisbecker entry->prev_pid = curr->pid; 9882e04af4SFrederic Weisbecker entry->prev_prio = curr->prio; 9982e04af4SFrederic Weisbecker entry->prev_state = curr->state; 10082e04af4SFrederic Weisbecker entry->next_pid = wakee->pid; 10182e04af4SFrederic Weisbecker entry->next_prio = wakee->prio; 10282e04af4SFrederic Weisbecker entry->next_state = wakee->state; 10382e04af4SFrederic Weisbecker entry->next_cpu = task_cpu(wakee); 10482e04af4SFrederic Weisbecker 105e77405adSSteven Rostedt if (!filter_check_discard(call, entry, buffer, event)) 106e77405adSSteven Rostedt ring_buffer_unlock_commit(buffer, event); 107e77405adSSteven Rostedt ftrace_trace_stack(tr->buffer, flags, 6, pc); 108e77405adSSteven Rostedt ftrace_trace_userstack(tr->buffer, flags, pc); 10982e04af4SFrederic Weisbecker } 11082e04af4SFrederic Weisbecker 1115b82a1b0SMathieu Desnoyers static void 112468a15bbSPeter Zijlstra probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success) 1135b82a1b0SMathieu Desnoyers { 11457422797SIngo Molnar struct trace_array_cpu *data; 11557422797SIngo Molnar unsigned long flags; 11638697053SSteven Rostedt int cpu, pc; 11757422797SIngo Molnar 118dcef788eSZhaolei if (unlikely(!sched_ref)) 119dcef788eSZhaolei return; 120dcef788eSZhaolei 121dcef788eSZhaolei tracing_record_cmdline(current); 122dcef788eSZhaolei 123dcef788eSZhaolei if (!tracer_enabled || sched_stopped) 12457422797SIngo Molnar return; 12557422797SIngo Molnar 12638697053SSteven Rostedt pc = preempt_count(); 12757422797SIngo Molnar local_irq_save(flags); 12857422797SIngo Molnar cpu = raw_smp_processor_id(); 129b07c3f19SMathieu Desnoyers data = ctx_trace->data[cpu]; 13057422797SIngo Molnar 1313ea2e6d7SSteven Rostedt if (likely(!atomic_read(&data->disabled))) 1327be42151SArnaldo Carvalho de Melo tracing_sched_wakeup_trace(ctx_trace, wakee, current, 13338697053SSteven Rostedt flags, pc); 13457422797SIngo Molnar 13557422797SIngo Molnar local_irq_restore(flags); 13657422797SIngo Molnar } 13757422797SIngo Molnar 1385b82a1b0SMathieu Desnoyers static int tracing_sched_register(void) 1395b82a1b0SMathieu Desnoyers { 1405b82a1b0SMathieu Desnoyers int ret; 1415b82a1b0SMathieu Desnoyers 142b07c3f19SMathieu Desnoyers ret = register_trace_sched_wakeup(probe_sched_wakeup); 1435b82a1b0SMathieu Desnoyers if (ret) { 144b07c3f19SMathieu Desnoyers pr_info("wakeup trace: Couldn't activate tracepoint" 1455b82a1b0SMathieu Desnoyers " probe to kernel_sched_wakeup\n"); 1465b82a1b0SMathieu Desnoyers return ret; 1475b82a1b0SMathieu Desnoyers } 1485b82a1b0SMathieu Desnoyers 149b07c3f19SMathieu Desnoyers ret = register_trace_sched_wakeup_new(probe_sched_wakeup); 1505b82a1b0SMathieu Desnoyers if (ret) { 151b07c3f19SMathieu Desnoyers pr_info("wakeup trace: Couldn't activate tracepoint" 1525b82a1b0SMathieu Desnoyers " probe to kernel_sched_wakeup_new\n"); 1535b82a1b0SMathieu Desnoyers goto fail_deprobe; 1545b82a1b0SMathieu Desnoyers } 1555b82a1b0SMathieu Desnoyers 156b07c3f19SMathieu Desnoyers ret = register_trace_sched_switch(probe_sched_switch); 1575b82a1b0SMathieu Desnoyers if (ret) { 158b07c3f19SMathieu Desnoyers pr_info("sched trace: Couldn't activate tracepoint" 15973d8b8bcSWenji Huang " probe to kernel_sched_switch\n"); 1605b82a1b0SMathieu Desnoyers goto fail_deprobe_wake_new; 1615b82a1b0SMathieu Desnoyers } 1625b82a1b0SMathieu Desnoyers 1635b82a1b0SMathieu Desnoyers return ret; 1645b82a1b0SMathieu Desnoyers fail_deprobe_wake_new: 165b07c3f19SMathieu Desnoyers unregister_trace_sched_wakeup_new(probe_sched_wakeup); 1665b82a1b0SMathieu Desnoyers fail_deprobe: 167b07c3f19SMathieu Desnoyers unregister_trace_sched_wakeup(probe_sched_wakeup); 1685b82a1b0SMathieu Desnoyers return ret; 1695b82a1b0SMathieu Desnoyers } 1705b82a1b0SMathieu Desnoyers 1715b82a1b0SMathieu Desnoyers static void tracing_sched_unregister(void) 1725b82a1b0SMathieu Desnoyers { 173b07c3f19SMathieu Desnoyers unregister_trace_sched_switch(probe_sched_switch); 174b07c3f19SMathieu Desnoyers unregister_trace_sched_wakeup_new(probe_sched_wakeup); 175b07c3f19SMathieu Desnoyers unregister_trace_sched_wakeup(probe_sched_wakeup); 1765b82a1b0SMathieu Desnoyers } 1775b82a1b0SMathieu Desnoyers 178f2252935SIngo Molnar static void tracing_start_sched_switch(void) 1795b82a1b0SMathieu Desnoyers { 180efade6e7SFrederic Weisbecker mutex_lock(&sched_register_mutex); 181e168e051SSteven Rostedt if (!(sched_ref++)) 1825b82a1b0SMathieu Desnoyers tracing_sched_register(); 183efade6e7SFrederic Weisbecker mutex_unlock(&sched_register_mutex); 1845b82a1b0SMathieu Desnoyers } 1855b82a1b0SMathieu Desnoyers 186f2252935SIngo Molnar static void tracing_stop_sched_switch(void) 1875b82a1b0SMathieu Desnoyers { 188efade6e7SFrederic Weisbecker mutex_lock(&sched_register_mutex); 189e168e051SSteven Rostedt if (!(--sched_ref)) 1905b82a1b0SMathieu Desnoyers tracing_sched_unregister(); 191efade6e7SFrederic Weisbecker mutex_unlock(&sched_register_mutex); 1925b82a1b0SMathieu Desnoyers } 1935b82a1b0SMathieu Desnoyers 19441bc8144SSteven Rostedt void tracing_start_cmdline_record(void) 19541bc8144SSteven Rostedt { 19641bc8144SSteven Rostedt tracing_start_sched_switch(); 19741bc8144SSteven Rostedt } 19841bc8144SSteven Rostedt 19941bc8144SSteven Rostedt void tracing_stop_cmdline_record(void) 20041bc8144SSteven Rostedt { 20141bc8144SSteven Rostedt tracing_stop_sched_switch(); 20241bc8144SSteven Rostedt } 20341bc8144SSteven Rostedt 20475f5c47dSSteven Rostedt /** 205e168e051SSteven Rostedt * tracing_start_sched_switch_record - start tracing context switches 206e168e051SSteven Rostedt * 207e168e051SSteven Rostedt * Turns on context switch tracing for a tracer. 208e168e051SSteven Rostedt */ 209e168e051SSteven Rostedt void tracing_start_sched_switch_record(void) 210e168e051SSteven Rostedt { 211e168e051SSteven Rostedt if (unlikely(!ctx_trace)) { 212e168e051SSteven Rostedt WARN_ON(1); 213e168e051SSteven Rostedt return; 214e168e051SSteven Rostedt } 215e168e051SSteven Rostedt 216e168e051SSteven Rostedt tracing_start_sched_switch(); 217e168e051SSteven Rostedt 218e168e051SSteven Rostedt mutex_lock(&sched_register_mutex); 219e168e051SSteven Rostedt tracer_enabled++; 220e168e051SSteven Rostedt mutex_unlock(&sched_register_mutex); 221e168e051SSteven Rostedt } 222e168e051SSteven Rostedt 223e168e051SSteven Rostedt /** 224e168e051SSteven Rostedt * tracing_stop_sched_switch_record - start tracing context switches 225e168e051SSteven Rostedt * 226e168e051SSteven Rostedt * Turns off context switch tracing for a tracer. 227e168e051SSteven Rostedt */ 228e168e051SSteven Rostedt void tracing_stop_sched_switch_record(void) 229e168e051SSteven Rostedt { 230e168e051SSteven Rostedt mutex_lock(&sched_register_mutex); 231e168e051SSteven Rostedt tracer_enabled--; 232e168e051SSteven Rostedt WARN_ON(tracer_enabled < 0); 233e168e051SSteven Rostedt mutex_unlock(&sched_register_mutex); 234e168e051SSteven Rostedt 235e168e051SSteven Rostedt tracing_stop_sched_switch(); 236e168e051SSteven Rostedt } 237e168e051SSteven Rostedt 238e168e051SSteven Rostedt /** 239e168e051SSteven Rostedt * tracing_sched_switch_assign_trace - assign a trace array for ctx switch 24075f5c47dSSteven Rostedt * @tr: trace array pointer to assign 24175f5c47dSSteven Rostedt * 24275f5c47dSSteven Rostedt * Some tracers might want to record the context switches in their 24375f5c47dSSteven Rostedt * trace. This function lets those tracers assign the trace array 24475f5c47dSSteven Rostedt * to use. 24575f5c47dSSteven Rostedt */ 246e168e051SSteven Rostedt void tracing_sched_switch_assign_trace(struct trace_array *tr) 24775f5c47dSSteven Rostedt { 24875f5c47dSSteven Rostedt ctx_trace = tr; 24975f5c47dSSteven Rostedt } 25075f5c47dSSteven Rostedt 251e309b41dSIngo Molnar static void stop_sched_trace(struct trace_array *tr) 25235e8e302SSteven Rostedt { 253e168e051SSteven Rostedt tracing_stop_sched_switch_record(); 25435e8e302SSteven Rostedt } 25535e8e302SSteven Rostedt 2561c80025aSFrederic Weisbecker static int sched_switch_trace_init(struct trace_array *tr) 25735e8e302SSteven Rostedt { 25835e8e302SSteven Rostedt ctx_trace = tr; 2595fec6ddcSSteven Rostedt tracing_reset_online_cpus(tr); 260b6f11df2SArnaldo Carvalho de Melo tracing_start_sched_switch_record(); 2611c80025aSFrederic Weisbecker return 0; 26235e8e302SSteven Rostedt } 26335e8e302SSteven Rostedt 264e309b41dSIngo Molnar static void sched_switch_trace_reset(struct trace_array *tr) 26535e8e302SSteven Rostedt { 266c76f0694SSteven Rostedt if (sched_ref) 26735e8e302SSteven Rostedt stop_sched_trace(tr); 26835e8e302SSteven Rostedt } 26935e8e302SSteven Rostedt 2709036990dSSteven Rostedt static void sched_switch_trace_start(struct trace_array *tr) 2719036990dSSteven Rostedt { 2725fec6ddcSSteven Rostedt sched_stopped = 0; 2739036990dSSteven Rostedt } 2749036990dSSteven Rostedt 2759036990dSSteven Rostedt static void sched_switch_trace_stop(struct trace_array *tr) 2769036990dSSteven Rostedt { 2775fec6ddcSSteven Rostedt sched_stopped = 1; 2789036990dSSteven Rostedt } 2799036990dSSteven Rostedt 28075f5c47dSSteven Rostedt static struct tracer sched_switch_trace __read_mostly = 28135e8e302SSteven Rostedt { 28235e8e302SSteven Rostedt .name = "sched_switch", 28335e8e302SSteven Rostedt .init = sched_switch_trace_init, 28435e8e302SSteven Rostedt .reset = sched_switch_trace_reset, 2859036990dSSteven Rostedt .start = sched_switch_trace_start, 2869036990dSSteven Rostedt .stop = sched_switch_trace_stop, 2876eaaa5d5SFrederic Weisbecker .wait_pipe = poll_wait_pipe, 28860a11774SSteven Rostedt #ifdef CONFIG_FTRACE_SELFTEST 28960a11774SSteven Rostedt .selftest = trace_selftest_startup_sched_switch, 29060a11774SSteven Rostedt #endif 29135e8e302SSteven Rostedt }; 29235e8e302SSteven Rostedt 29335e8e302SSteven Rostedt __init static int init_sched_switch_trace(void) 29435e8e302SSteven Rostedt { 29535e8e302SSteven Rostedt return register_tracer(&sched_switch_trace); 29635e8e302SSteven Rostedt } 29735e8e302SSteven Rostedt device_initcall(init_sched_switch_trace); 298c71dd42dSIngo Molnar 299