1bcea3f96SSteven Rostedt (VMware) // SPDX-License-Identifier: GPL-2.0
281d68a96SSteven Rostedt /*
373d8b8bcSWenji Huang * trace irqs off critical timings
481d68a96SSteven Rostedt *
581d68a96SSteven Rostedt * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
681d68a96SSteven Rostedt * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
781d68a96SSteven Rostedt *
881d68a96SSteven Rostedt * From code in the latency_tracer, that is:
981d68a96SSteven Rostedt *
1081d68a96SSteven Rostedt * Copyright (C) 2004-2006 Ingo Molnar
116d49e352SNadia Yvette Chambers * Copyright (C) 2004 Nadia Yvette Chambers
1281d68a96SSteven Rostedt */
1381d68a96SSteven Rostedt #include <linux/kallsyms.h>
1481d68a96SSteven Rostedt #include <linux/uaccess.h>
1581d68a96SSteven Rostedt #include <linux/module.h>
1681d68a96SSteven Rostedt #include <linux/ftrace.h>
17eeeb080bSMasami Hiramatsu #include <linux/kprobes.h>
1881d68a96SSteven Rostedt
1981d68a96SSteven Rostedt #include "trace.h"
2081d68a96SSteven Rostedt
21d5915816SJoel Fernandes #include <trace/events/preemptirq.h>
22d5915816SJoel Fernandes
23aaecaa0bSJoel Fernandes #if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER)
2481d68a96SSteven Rostedt static struct trace_array *irqsoff_trace __read_mostly;
2581d68a96SSteven Rostedt static int tracer_enabled __read_mostly;
2681d68a96SSteven Rostedt
276cd8a4bbSSteven Rostedt static DEFINE_PER_CPU(int, tracing_cpu);
286cd8a4bbSSteven Rostedt
295389f6faSThomas Gleixner static DEFINE_RAW_SPINLOCK(max_trace_lock);
3089b2f978SSteven Rostedt
316cd8a4bbSSteven Rostedt enum {
326cd8a4bbSSteven Rostedt TRACER_IRQS_OFF = (1 << 1),
336cd8a4bbSSteven Rostedt TRACER_PREEMPT_OFF = (1 << 2),
346cd8a4bbSSteven Rostedt };
356cd8a4bbSSteven Rostedt
366cd8a4bbSSteven Rostedt static int trace_type __read_mostly;
376cd8a4bbSSteven Rostedt
38613f04a0SSteven Rostedt (Red Hat) static int save_flags;
39e9d25fe6SSteven Rostedt
4062b915f1SJiri Olsa static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
4162b915f1SJiri Olsa static int start_irqsoff_tracer(struct trace_array *tr, int graph);
4262b915f1SJiri Olsa
436cd8a4bbSSteven Rostedt #ifdef CONFIG_PREEMPT_TRACER
44e309b41dSIngo Molnar static inline int
preempt_trace(int pc)45f27107faSSteven Rostedt (VMware) preempt_trace(int pc)
466cd8a4bbSSteven Rostedt {
47f27107faSSteven Rostedt (VMware) return ((trace_type & TRACER_PREEMPT_OFF) && pc);
486cd8a4bbSSteven Rostedt }
496cd8a4bbSSteven Rostedt #else
50f27107faSSteven Rostedt (VMware) # define preempt_trace(pc) (0)
516cd8a4bbSSteven Rostedt #endif
526cd8a4bbSSteven Rostedt
536cd8a4bbSSteven Rostedt #ifdef CONFIG_IRQSOFF_TRACER
54e309b41dSIngo Molnar static inline int
irq_trace(void)556cd8a4bbSSteven Rostedt irq_trace(void)
566cd8a4bbSSteven Rostedt {
576cd8a4bbSSteven Rostedt return ((trace_type & TRACER_IRQS_OFF) &&
586cd8a4bbSSteven Rostedt irqs_disabled());
596cd8a4bbSSteven Rostedt }
606cd8a4bbSSteven Rostedt #else
616cd8a4bbSSteven Rostedt # define irq_trace() (0)
626cd8a4bbSSteven Rostedt #endif
636cd8a4bbSSteven Rostedt
6462b915f1SJiri Olsa #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6503905582SSteven Rostedt (Red Hat) static int irqsoff_display_graph(struct trace_array *tr, int set);
66983f938aSSteven Rostedt (Red Hat) # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
6703905582SSteven Rostedt (Red Hat) #else
irqsoff_display_graph(struct trace_array * tr,int set)6803905582SSteven Rostedt (Red Hat) static inline int irqsoff_display_graph(struct trace_array *tr, int set)
6903905582SSteven Rostedt (Red Hat) {
7003905582SSteven Rostedt (Red Hat) return -EINVAL;
7103905582SSteven Rostedt (Red Hat) }
72983f938aSSteven Rostedt (Red Hat) # define is_graph(tr) false
7362b915f1SJiri Olsa #endif
7462b915f1SJiri Olsa
7581d68a96SSteven Rostedt /*
7681d68a96SSteven Rostedt * Sequence count - we record it when starting a measurement and
7781d68a96SSteven Rostedt * skip the latency if the sequence has changed - some other section
7881d68a96SSteven Rostedt * did a maximum and could disturb our measurement with serial console
7981d68a96SSteven Rostedt * printouts, etc. Truly coinciding maximum latencies should be rare
8025985edcSLucas De Marchi * and what happens together happens separately as well, so this doesn't
8181d68a96SSteven Rostedt * decrease the validity of the maximum found:
8281d68a96SSteven Rostedt */
8381d68a96SSteven Rostedt static __cacheline_aligned_in_smp unsigned long max_sequence;
8481d68a96SSteven Rostedt
85606576ceSSteven Rostedt #ifdef CONFIG_FUNCTION_TRACER
8681d68a96SSteven Rostedt /*
875e6d2b9cSSteven Rostedt * Prologue for the preempt and irqs off function tracers.
885e6d2b9cSSteven Rostedt *
895e6d2b9cSSteven Rostedt * Returns 1 if it is OK to continue, and data->disabled is
905e6d2b9cSSteven Rostedt * incremented.
915e6d2b9cSSteven Rostedt * 0 if the trace is to be ignored, and data->disabled
925e6d2b9cSSteven Rostedt * is kept the same.
935e6d2b9cSSteven Rostedt *
945e6d2b9cSSteven Rostedt * Note, this function is also used outside this ifdef but
955e6d2b9cSSteven Rostedt * inside the #ifdef of the function graph tracer below.
965e6d2b9cSSteven Rostedt * This is OK, since the function graph tracer is
975e6d2b9cSSteven Rostedt * dependent on the function tracer.
9881d68a96SSteven Rostedt */
func_prolog_dec(struct trace_array * tr,struct trace_array_cpu ** data,unsigned long * flags)995e6d2b9cSSteven Rostedt static int func_prolog_dec(struct trace_array *tr,
1005e6d2b9cSSteven Rostedt struct trace_array_cpu **data,
1015e6d2b9cSSteven Rostedt unsigned long *flags)
10281d68a96SSteven Rostedt {
10381d68a96SSteven Rostedt long disabled;
10481d68a96SSteven Rostedt int cpu;
10581d68a96SSteven Rostedt
106361943adSSteven Rostedt /*
107361943adSSteven Rostedt * Does not matter if we preempt. We test the flags
108361943adSSteven Rostedt * afterward, to see if irqs are disabled or not.
109361943adSSteven Rostedt * If we preempt and get a false positive, the flags
110361943adSSteven Rostedt * test will fail.
111361943adSSteven Rostedt */
112361943adSSteven Rostedt cpu = raw_smp_processor_id();
113361943adSSteven Rostedt if (likely(!per_cpu(tracing_cpu, cpu)))
1145e6d2b9cSSteven Rostedt return 0;
11581d68a96SSteven Rostedt
1165e6d2b9cSSteven Rostedt local_save_flags(*flags);
117cb86e053SSteven Rostedt (Red Hat) /*
118cb86e053SSteven Rostedt (Red Hat) * Slight chance to get a false positive on tracing_cpu,
119cb86e053SSteven Rostedt (Red Hat) * although I'm starting to think there isn't a chance.
120cb86e053SSteven Rostedt (Red Hat) * Leave this for now just to be paranoid.
121cb86e053SSteven Rostedt (Red Hat) */
122cb86e053SSteven Rostedt (Red Hat) if (!irqs_disabled_flags(*flags) && !preempt_count())
1235e6d2b9cSSteven Rostedt return 0;
12481d68a96SSteven Rostedt
1251c5eb448SSteven Rostedt (VMware) *data = per_cpu_ptr(tr->array_buffer.data, cpu);
1265e6d2b9cSSteven Rostedt disabled = atomic_inc_return(&(*data)->disabled);
12781d68a96SSteven Rostedt
12881d68a96SSteven Rostedt if (likely(disabled == 1))
1295e6d2b9cSSteven Rostedt return 1;
1305e6d2b9cSSteven Rostedt
1315e6d2b9cSSteven Rostedt atomic_dec(&(*data)->disabled);
1325e6d2b9cSSteven Rostedt
1335e6d2b9cSSteven Rostedt return 0;
1345e6d2b9cSSteven Rostedt }
1355e6d2b9cSSteven Rostedt
1365e6d2b9cSSteven Rostedt /*
1375e6d2b9cSSteven Rostedt * irqsoff uses its own tracer function to keep the overhead down:
1385e6d2b9cSSteven Rostedt */
1395e6d2b9cSSteven Rostedt static void
irqsoff_tracer_call(unsigned long ip,unsigned long parent_ip,struct ftrace_ops * op,struct ftrace_regs * fregs)1402f5f6ad9SSteven Rostedt irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
141d19ad077SSteven Rostedt (VMware) struct ftrace_ops *op, struct ftrace_regs *fregs)
1425e6d2b9cSSteven Rostedt {
1435e6d2b9cSSteven Rostedt struct trace_array *tr = irqsoff_trace;
1445e6d2b9cSSteven Rostedt struct trace_array_cpu *data;
1455e6d2b9cSSteven Rostedt unsigned long flags;
14636590c50SSebastian Andrzej Siewior unsigned int trace_ctx;
1475e6d2b9cSSteven Rostedt
1485e6d2b9cSSteven Rostedt if (!func_prolog_dec(tr, &data, &flags))
1495e6d2b9cSSteven Rostedt return;
1505e6d2b9cSSteven Rostedt
15136590c50SSebastian Andrzej Siewior trace_ctx = tracing_gen_ctx_flags(flags);
15236590c50SSebastian Andrzej Siewior
15336590c50SSebastian Andrzej Siewior trace_function(tr, ip, parent_ip, trace_ctx);
15481d68a96SSteven Rostedt
15581d68a96SSteven Rostedt atomic_dec(&data->disabled);
15681d68a96SSteven Rostedt }
157606576ceSSteven Rostedt #endif /* CONFIG_FUNCTION_TRACER */
15881d68a96SSteven Rostedt
15962b915f1SJiri Olsa #ifdef CONFIG_FUNCTION_GRAPH_TRACER
irqsoff_display_graph(struct trace_array * tr,int set)16003905582SSteven Rostedt (Red Hat) static int irqsoff_display_graph(struct trace_array *tr, int set)
16162b915f1SJiri Olsa {
16262b915f1SJiri Olsa int cpu;
16362b915f1SJiri Olsa
164983f938aSSteven Rostedt (Red Hat) if (!(is_graph(tr) ^ set))
16562b915f1SJiri Olsa return 0;
16662b915f1SJiri Olsa
16762b915f1SJiri Olsa stop_irqsoff_tracer(irqsoff_trace, !set);
16862b915f1SJiri Olsa
16962b915f1SJiri Olsa for_each_possible_cpu(cpu)
17062b915f1SJiri Olsa per_cpu(tracing_cpu, cpu) = 0;
17162b915f1SJiri Olsa
1726d9b3fa5SSteven Rostedt (Red Hat) tr->max_latency = 0;
1731c5eb448SSteven Rostedt (VMware) tracing_reset_online_cpus(&irqsoff_trace->array_buffer);
17462b915f1SJiri Olsa
17562b915f1SJiri Olsa return start_irqsoff_tracer(irqsoff_trace, set);
17662b915f1SJiri Olsa }
17762b915f1SJiri Olsa
irqsoff_graph_entry(struct ftrace_graph_ent * trace)17862b915f1SJiri Olsa static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
17962b915f1SJiri Olsa {
18062b915f1SJiri Olsa struct trace_array *tr = irqsoff_trace;
18162b915f1SJiri Olsa struct trace_array_cpu *data;
18262b915f1SJiri Olsa unsigned long flags;
18336590c50SSebastian Andrzej Siewior unsigned int trace_ctx;
18462b915f1SJiri Olsa int ret;
18562b915f1SJiri Olsa
1861a414428SSteven Rostedt (Red Hat) if (ftrace_graph_ignore_func(trace))
1871a414428SSteven Rostedt (Red Hat) return 0;
1881a414428SSteven Rostedt (Red Hat) /*
1891a414428SSteven Rostedt (Red Hat) * Do not trace a function if it's filtered by set_graph_notrace.
1901a414428SSteven Rostedt (Red Hat) * Make the index of ret stack negative to indicate that it should
1911a414428SSteven Rostedt (Red Hat) * ignore further functions. But it needs its own ret stack entry
1921a414428SSteven Rostedt (Red Hat) * to recover the original index in order to continue tracing after
1931a414428SSteven Rostedt (Red Hat) * returning from the function.
1941a414428SSteven Rostedt (Red Hat) */
1951a414428SSteven Rostedt (Red Hat) if (ftrace_graph_notrace_addr(trace->func))
1961a414428SSteven Rostedt (Red Hat) return 1;
1971a414428SSteven Rostedt (Red Hat)
1985e6d2b9cSSteven Rostedt if (!func_prolog_dec(tr, &data, &flags))
19962b915f1SJiri Olsa return 0;
20062b915f1SJiri Olsa
20136590c50SSebastian Andrzej Siewior trace_ctx = tracing_gen_ctx_flags(flags);
20236590c50SSebastian Andrzej Siewior ret = __trace_graph_entry(tr, trace, trace_ctx);
20362b915f1SJiri Olsa atomic_dec(&data->disabled);
2045e6d2b9cSSteven Rostedt
20562b915f1SJiri Olsa return ret;
20662b915f1SJiri Olsa }
20762b915f1SJiri Olsa
irqsoff_graph_return(struct ftrace_graph_ret * trace)20862b915f1SJiri Olsa static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
20962b915f1SJiri Olsa {
21062b915f1SJiri Olsa struct trace_array *tr = irqsoff_trace;
21162b915f1SJiri Olsa struct trace_array_cpu *data;
21262b915f1SJiri Olsa unsigned long flags;
21336590c50SSebastian Andrzej Siewior unsigned int trace_ctx;
21462b915f1SJiri Olsa
2155cf99a0fSSteven Rostedt (VMware) ftrace_graph_addr_finish(trace);
2165cf99a0fSSteven Rostedt (VMware)
2175e6d2b9cSSteven Rostedt if (!func_prolog_dec(tr, &data, &flags))
21862b915f1SJiri Olsa return;
21962b915f1SJiri Olsa
22036590c50SSebastian Andrzej Siewior trace_ctx = tracing_gen_ctx_flags(flags);
22136590c50SSebastian Andrzej Siewior __trace_graph_return(tr, trace, trace_ctx);
22262b915f1SJiri Olsa atomic_dec(&data->disabled);
22362b915f1SJiri Olsa }
22462b915f1SJiri Olsa
225688f7089SSteven Rostedt (VMware) static struct fgraph_ops fgraph_ops = {
226688f7089SSteven Rostedt (VMware) .entryfunc = &irqsoff_graph_entry,
227688f7089SSteven Rostedt (VMware) .retfunc = &irqsoff_graph_return,
228688f7089SSteven Rostedt (VMware) };
229688f7089SSteven Rostedt (VMware)
irqsoff_trace_open(struct trace_iterator * iter)23062b915f1SJiri Olsa static void irqsoff_trace_open(struct trace_iterator *iter)
23162b915f1SJiri Olsa {
232983f938aSSteven Rostedt (Red Hat) if (is_graph(iter->tr))
23362b915f1SJiri Olsa graph_trace_open(iter);
234*eecb91b9SZheng Yejian else
235*eecb91b9SZheng Yejian iter->private = NULL;
23662b915f1SJiri Olsa }
23762b915f1SJiri Olsa
irqsoff_trace_close(struct trace_iterator * iter)23862b915f1SJiri Olsa static void irqsoff_trace_close(struct trace_iterator *iter)
23962b915f1SJiri Olsa {
24062b915f1SJiri Olsa if (iter->private)
24162b915f1SJiri Olsa graph_trace_close(iter);
24262b915f1SJiri Olsa }
24362b915f1SJiri Olsa
24462b915f1SJiri Olsa #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
245321e68b0SJiri Olsa TRACE_GRAPH_PRINT_PROC | \
2469acd8de6SChangbin Du TRACE_GRAPH_PRINT_REL_TIME | \
247321e68b0SJiri Olsa TRACE_GRAPH_PRINT_DURATION)
24862b915f1SJiri Olsa
irqsoff_print_line(struct trace_iterator * iter)24962b915f1SJiri Olsa static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
25062b915f1SJiri Olsa {
25162b915f1SJiri Olsa /*
25262b915f1SJiri Olsa * In graph mode call the graph tracer output function,
25362b915f1SJiri Olsa * otherwise go with the TRACE_FN event handler
25462b915f1SJiri Olsa */
255983f938aSSteven Rostedt (Red Hat) if (is_graph(iter->tr))
2560a772620SJiri Olsa return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
25762b915f1SJiri Olsa
25862b915f1SJiri Olsa return TRACE_TYPE_UNHANDLED;
25962b915f1SJiri Olsa }
26062b915f1SJiri Olsa
irqsoff_print_header(struct seq_file * s)26162b915f1SJiri Olsa static void irqsoff_print_header(struct seq_file *s)
26262b915f1SJiri Olsa {
263983f938aSSteven Rostedt (Red Hat) struct trace_array *tr = irqsoff_trace;
264983f938aSSteven Rostedt (Red Hat)
265983f938aSSteven Rostedt (Red Hat) if (is_graph(tr))
2660a772620SJiri Olsa print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
2670a772620SJiri Olsa else
26862b915f1SJiri Olsa trace_default_header(s);
26962b915f1SJiri Olsa }
27062b915f1SJiri Olsa
27162b915f1SJiri Olsa static void
__trace_function(struct trace_array * tr,unsigned long ip,unsigned long parent_ip,unsigned int trace_ctx)27262b915f1SJiri Olsa __trace_function(struct trace_array *tr,
27362b915f1SJiri Olsa unsigned long ip, unsigned long parent_ip,
27436590c50SSebastian Andrzej Siewior unsigned int trace_ctx)
27562b915f1SJiri Olsa {
276983f938aSSteven Rostedt (Red Hat) if (is_graph(tr))
27736590c50SSebastian Andrzej Siewior trace_graph_function(tr, ip, parent_ip, trace_ctx);
2780a772620SJiri Olsa else
27936590c50SSebastian Andrzej Siewior trace_function(tr, ip, parent_ip, trace_ctx);
28062b915f1SJiri Olsa }
28162b915f1SJiri Olsa
28262b915f1SJiri Olsa #else
28362b915f1SJiri Olsa #define __trace_function trace_function
28462b915f1SJiri Olsa
irqsoff_print_line(struct trace_iterator * iter)28562b915f1SJiri Olsa static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
28662b915f1SJiri Olsa {
28762b915f1SJiri Olsa return TRACE_TYPE_UNHANDLED;
28862b915f1SJiri Olsa }
28962b915f1SJiri Olsa
irqsoff_trace_open(struct trace_iterator * iter)29062b915f1SJiri Olsa static void irqsoff_trace_open(struct trace_iterator *iter) { }
irqsoff_trace_close(struct trace_iterator * iter)29162b915f1SJiri Olsa static void irqsoff_trace_close(struct trace_iterator *iter) { }
2927e9a49efSJiri Olsa
2937e9a49efSJiri Olsa #ifdef CONFIG_FUNCTION_TRACER
irqsoff_print_header(struct seq_file * s)2947e9a49efSJiri Olsa static void irqsoff_print_header(struct seq_file *s)
2957e9a49efSJiri Olsa {
2967e9a49efSJiri Olsa trace_default_header(s);
2977e9a49efSJiri Olsa }
2987e9a49efSJiri Olsa #else
irqsoff_print_header(struct seq_file * s)2997e9a49efSJiri Olsa static void irqsoff_print_header(struct seq_file *s)
3007e9a49efSJiri Olsa {
3017e9a49efSJiri Olsa trace_latency_header(s);
3027e9a49efSJiri Olsa }
3037e9a49efSJiri Olsa #endif /* CONFIG_FUNCTION_TRACER */
30462b915f1SJiri Olsa #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
30562b915f1SJiri Olsa
30681d68a96SSteven Rostedt /*
30781d68a96SSteven Rostedt * Should this new latency be reported/recorded?
30881d68a96SSteven Rostedt */
report_latency(struct trace_array * tr,u64 delta)309a5a1d1c2SThomas Gleixner static bool report_latency(struct trace_array *tr, u64 delta)
31081d68a96SSteven Rostedt {
31181d68a96SSteven Rostedt if (tracing_thresh) {
31281d68a96SSteven Rostedt if (delta < tracing_thresh)
31379851821SYaowei Bai return false;
31481d68a96SSteven Rostedt } else {
3156d9b3fa5SSteven Rostedt (Red Hat) if (delta <= tr->max_latency)
31679851821SYaowei Bai return false;
31781d68a96SSteven Rostedt }
31879851821SYaowei Bai return true;
31981d68a96SSteven Rostedt }
32081d68a96SSteven Rostedt
321e309b41dSIngo Molnar static void
check_critical_timing(struct trace_array * tr,struct trace_array_cpu * data,unsigned long parent_ip,int cpu)32281d68a96SSteven Rostedt check_critical_timing(struct trace_array *tr,
32381d68a96SSteven Rostedt struct trace_array_cpu *data,
32481d68a96SSteven Rostedt unsigned long parent_ip,
32581d68a96SSteven Rostedt int cpu)
32681d68a96SSteven Rostedt {
327a5a1d1c2SThomas Gleixner u64 T0, T1, delta;
32881d68a96SSteven Rostedt unsigned long flags;
32936590c50SSebastian Andrzej Siewior unsigned int trace_ctx;
33081d68a96SSteven Rostedt
33181d68a96SSteven Rostedt T0 = data->preempt_timestamp;
332750ed1a4SIngo Molnar T1 = ftrace_now(cpu);
33381d68a96SSteven Rostedt delta = T1-T0;
33481d68a96SSteven Rostedt
33536590c50SSebastian Andrzej Siewior trace_ctx = tracing_gen_ctx();
3366450c1d3SSteven Rostedt
3376d9b3fa5SSteven Rostedt (Red Hat) if (!report_latency(tr, delta))
33881d68a96SSteven Rostedt goto out;
33981d68a96SSteven Rostedt
3405389f6faSThomas Gleixner raw_spin_lock_irqsave(&max_trace_lock, flags);
34181d68a96SSteven Rostedt
34289b2f978SSteven Rostedt /* check if we are still the max latency */
3436d9b3fa5SSteven Rostedt (Red Hat) if (!report_latency(tr, delta))
34489b2f978SSteven Rostedt goto out_unlock;
34589b2f978SSteven Rostedt
34636590c50SSebastian Andrzej Siewior __trace_function(tr, CALLER_ADDR0, parent_ip, trace_ctx);
347cc51a0fcSSteven Rostedt /* Skip 5 functions to get to the irq/preempt enable function */
34836590c50SSebastian Andrzej Siewior __trace_stack(tr, trace_ctx, 5);
34981d68a96SSteven Rostedt
35081d68a96SSteven Rostedt if (data->critical_sequence != max_sequence)
35189b2f978SSteven Rostedt goto out_unlock;
35281d68a96SSteven Rostedt
35381d68a96SSteven Rostedt data->critical_end = parent_ip;
35481d68a96SSteven Rostedt
355b5130b1eSCarsten Emde if (likely(!is_tracing_stopped())) {
3566d9b3fa5SSteven Rostedt (Red Hat) tr->max_latency = delta;
35781d68a96SSteven Rostedt update_max_tr_single(tr, current, cpu);
358b5130b1eSCarsten Emde }
35981d68a96SSteven Rostedt
36081d68a96SSteven Rostedt max_sequence++;
36181d68a96SSteven Rostedt
36289b2f978SSteven Rostedt out_unlock:
3635389f6faSThomas Gleixner raw_spin_unlock_irqrestore(&max_trace_lock, flags);
36489b2f978SSteven Rostedt
36581d68a96SSteven Rostedt out:
36681d68a96SSteven Rostedt data->critical_sequence = max_sequence;
367750ed1a4SIngo Molnar data->preempt_timestamp = ftrace_now(cpu);
36836590c50SSebastian Andrzej Siewior __trace_function(tr, CALLER_ADDR0, parent_ip, trace_ctx);
36981d68a96SSteven Rostedt }
37081d68a96SSteven Rostedt
371eeeb080bSMasami Hiramatsu static nokprobe_inline void
start_critical_timing(unsigned long ip,unsigned long parent_ip)37236590c50SSebastian Andrzej Siewior start_critical_timing(unsigned long ip, unsigned long parent_ip)
37381d68a96SSteven Rostedt {
37481d68a96SSteven Rostedt int cpu;
37581d68a96SSteven Rostedt struct trace_array *tr = irqsoff_trace;
37681d68a96SSteven Rostedt struct trace_array_cpu *data;
37781d68a96SSteven Rostedt
37810246fa3SSteven Rostedt (Red Hat) if (!tracer_enabled || !tracing_is_enabled())
37981d68a96SSteven Rostedt return;
38081d68a96SSteven Rostedt
381c5f888caSSteven Rostedt cpu = raw_smp_processor_id();
382c5f888caSSteven Rostedt
383c5f888caSSteven Rostedt if (per_cpu(tracing_cpu, cpu))
3846cd8a4bbSSteven Rostedt return;
3856cd8a4bbSSteven Rostedt
3861c5eb448SSteven Rostedt (VMware) data = per_cpu_ptr(tr->array_buffer.data, cpu);
38781d68a96SSteven Rostedt
388c5f888caSSteven Rostedt if (unlikely(!data) || atomic_read(&data->disabled))
38981d68a96SSteven Rostedt return;
39081d68a96SSteven Rostedt
39181d68a96SSteven Rostedt atomic_inc(&data->disabled);
39281d68a96SSteven Rostedt
39381d68a96SSteven Rostedt data->critical_sequence = max_sequence;
394750ed1a4SIngo Molnar data->preempt_timestamp = ftrace_now(cpu);
3956cd8a4bbSSteven Rostedt data->critical_start = parent_ip ? : ip;
39681d68a96SSteven Rostedt
39736590c50SSebastian Andrzej Siewior __trace_function(tr, ip, parent_ip, tracing_gen_ctx());
39881d68a96SSteven Rostedt
399c5f888caSSteven Rostedt per_cpu(tracing_cpu, cpu) = 1;
4006cd8a4bbSSteven Rostedt
40181d68a96SSteven Rostedt atomic_dec(&data->disabled);
40281d68a96SSteven Rostedt }
40381d68a96SSteven Rostedt
404eeeb080bSMasami Hiramatsu static nokprobe_inline void
stop_critical_timing(unsigned long ip,unsigned long parent_ip)40536590c50SSebastian Andrzej Siewior stop_critical_timing(unsigned long ip, unsigned long parent_ip)
40681d68a96SSteven Rostedt {
40781d68a96SSteven Rostedt int cpu;
40881d68a96SSteven Rostedt struct trace_array *tr = irqsoff_trace;
40981d68a96SSteven Rostedt struct trace_array_cpu *data;
41036590c50SSebastian Andrzej Siewior unsigned int trace_ctx;
41181d68a96SSteven Rostedt
412c5f888caSSteven Rostedt cpu = raw_smp_processor_id();
4136cd8a4bbSSteven Rostedt /* Always clear the tracing cpu on stopping the trace */
414c5f888caSSteven Rostedt if (unlikely(per_cpu(tracing_cpu, cpu)))
415c5f888caSSteven Rostedt per_cpu(tracing_cpu, cpu) = 0;
4166cd8a4bbSSteven Rostedt else
4176cd8a4bbSSteven Rostedt return;
4186cd8a4bbSSteven Rostedt
41910246fa3SSteven Rostedt (Red Hat) if (!tracer_enabled || !tracing_is_enabled())
42081d68a96SSteven Rostedt return;
42181d68a96SSteven Rostedt
4221c5eb448SSteven Rostedt (VMware) data = per_cpu_ptr(tr->array_buffer.data, cpu);
42381d68a96SSteven Rostedt
4243928a8a2SSteven Rostedt if (unlikely(!data) ||
42581d68a96SSteven Rostedt !data->critical_start || atomic_read(&data->disabled))
42681d68a96SSteven Rostedt return;
42781d68a96SSteven Rostedt
42881d68a96SSteven Rostedt atomic_inc(&data->disabled);
429c5f888caSSteven Rostedt
43036590c50SSebastian Andrzej Siewior trace_ctx = tracing_gen_ctx();
43136590c50SSebastian Andrzej Siewior __trace_function(tr, ip, parent_ip, trace_ctx);
4326cd8a4bbSSteven Rostedt check_critical_timing(tr, data, parent_ip ? : ip, cpu);
43381d68a96SSteven Rostedt data->critical_start = 0;
43481d68a96SSteven Rostedt atomic_dec(&data->disabled);
43581d68a96SSteven Rostedt }
43681d68a96SSteven Rostedt
4376cd8a4bbSSteven Rostedt /* start and stop critical timings used to for stoppage (in idle) */
start_critical_timings(void)438e309b41dSIngo Molnar void start_critical_timings(void)
43981d68a96SSteven Rostedt {
44036590c50SSebastian Andrzej Siewior if (preempt_trace(preempt_count()) || irq_trace())
44136590c50SSebastian Andrzej Siewior start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
44281d68a96SSteven Rostedt }
4431fe37104SIngo Molnar EXPORT_SYMBOL_GPL(start_critical_timings);
444eeeb080bSMasami Hiramatsu NOKPROBE_SYMBOL(start_critical_timings);
44581d68a96SSteven Rostedt
stop_critical_timings(void)446e309b41dSIngo Molnar void stop_critical_timings(void)
44781d68a96SSteven Rostedt {
44836590c50SSebastian Andrzej Siewior if (preempt_trace(preempt_count()) || irq_trace())
44936590c50SSebastian Andrzej Siewior stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
45081d68a96SSteven Rostedt }
4511fe37104SIngo Molnar EXPORT_SYMBOL_GPL(stop_critical_timings);
452eeeb080bSMasami Hiramatsu NOKPROBE_SYMBOL(stop_critical_timings);
45381d68a96SSteven Rostedt
4548179e8a1SSteven Rostedt (Red Hat) #ifdef CONFIG_FUNCTION_TRACER
4558179e8a1SSteven Rostedt (Red Hat) static bool function_enabled;
4568179e8a1SSteven Rostedt (Red Hat)
register_irqsoff_function(struct trace_array * tr,int graph,int set)4574104d326SSteven Rostedt (Red Hat) static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
45881d68a96SSteven Rostedt {
459328df475SSteven Rostedt (Red Hat) int ret;
46062b915f1SJiri Olsa
461328df475SSteven Rostedt (Red Hat) /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
462983f938aSSteven Rostedt (Red Hat) if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
463328df475SSteven Rostedt (Red Hat) return 0;
464328df475SSteven Rostedt (Red Hat)
465328df475SSteven Rostedt (Red Hat) if (graph)
466688f7089SSteven Rostedt (VMware) ret = register_ftrace_graph(&fgraph_ops);
467328df475SSteven Rostedt (Red Hat) else
4684104d326SSteven Rostedt (Red Hat) ret = register_ftrace_function(tr->ops);
469328df475SSteven Rostedt (Red Hat)
470328df475SSteven Rostedt (Red Hat) if (!ret)
471328df475SSteven Rostedt (Red Hat) function_enabled = true;
472328df475SSteven Rostedt (Red Hat)
473328df475SSteven Rostedt (Red Hat) return ret;
474328df475SSteven Rostedt (Red Hat) }
475328df475SSteven Rostedt (Red Hat)
unregister_irqsoff_function(struct trace_array * tr,int graph)4764104d326SSteven Rostedt (Red Hat) static void unregister_irqsoff_function(struct trace_array *tr, int graph)
477328df475SSteven Rostedt (Red Hat) {
478328df475SSteven Rostedt (Red Hat) if (!function_enabled)
479328df475SSteven Rostedt (Red Hat) return;
480328df475SSteven Rostedt (Red Hat)
481328df475SSteven Rostedt (Red Hat) if (graph)
482688f7089SSteven Rostedt (VMware) unregister_ftrace_graph(&fgraph_ops);
483328df475SSteven Rostedt (Red Hat) else
4844104d326SSteven Rostedt (Red Hat) unregister_ftrace_function(tr->ops);
485328df475SSteven Rostedt (Red Hat)
486328df475SSteven Rostedt (Red Hat) function_enabled = false;
487328df475SSteven Rostedt (Red Hat) }
488328df475SSteven Rostedt (Red Hat)
irqsoff_function_set(struct trace_array * tr,u32 mask,int set)4898179e8a1SSteven Rostedt (Red Hat) static int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
490328df475SSteven Rostedt (Red Hat) {
4918179e8a1SSteven Rostedt (Red Hat) if (!(mask & TRACE_ITER_FUNCTION))
4928179e8a1SSteven Rostedt (Red Hat) return 0;
4938179e8a1SSteven Rostedt (Red Hat)
494328df475SSteven Rostedt (Red Hat) if (set)
495983f938aSSteven Rostedt (Red Hat) register_irqsoff_function(tr, is_graph(tr), 1);
496328df475SSteven Rostedt (Red Hat) else
497983f938aSSteven Rostedt (Red Hat) unregister_irqsoff_function(tr, is_graph(tr));
4988179e8a1SSteven Rostedt (Red Hat) return 1;
4998179e8a1SSteven Rostedt (Red Hat) }
5008179e8a1SSteven Rostedt (Red Hat) #else
register_irqsoff_function(struct trace_array * tr,int graph,int set)5018179e8a1SSteven Rostedt (Red Hat) static int register_irqsoff_function(struct trace_array *tr, int graph, int set)
5028179e8a1SSteven Rostedt (Red Hat) {
50303905582SSteven Rostedt (Red Hat) return 0;
504328df475SSteven Rostedt (Red Hat) }
unregister_irqsoff_function(struct trace_array * tr,int graph)5058179e8a1SSteven Rostedt (Red Hat) static void unregister_irqsoff_function(struct trace_array *tr, int graph) { }
irqsoff_function_set(struct trace_array * tr,u32 mask,int set)5068179e8a1SSteven Rostedt (Red Hat) static inline int irqsoff_function_set(struct trace_array *tr, u32 mask, int set)
5078179e8a1SSteven Rostedt (Red Hat) {
5088179e8a1SSteven Rostedt (Red Hat) return 0;
5098179e8a1SSteven Rostedt (Red Hat) }
5108179e8a1SSteven Rostedt (Red Hat) #endif /* CONFIG_FUNCTION_TRACER */
511328df475SSteven Rostedt (Red Hat)
irqsoff_flag_changed(struct trace_array * tr,u32 mask,int set)512bf6065b5SSteven Rostedt (Red Hat) static int irqsoff_flag_changed(struct trace_array *tr, u32 mask, int set)
513328df475SSteven Rostedt (Red Hat) {
514bf6065b5SSteven Rostedt (Red Hat) struct tracer *tracer = tr->current_trace;
515bf6065b5SSteven Rostedt (Red Hat)
5168179e8a1SSteven Rostedt (Red Hat) if (irqsoff_function_set(tr, mask, set))
5178179e8a1SSteven Rostedt (Red Hat) return 0;
51803905582SSteven Rostedt (Red Hat)
519729358daSSteven Rostedt (Red Hat) #ifdef CONFIG_FUNCTION_GRAPH_TRACER
52003905582SSteven Rostedt (Red Hat) if (mask & TRACE_ITER_DISPLAY_GRAPH)
52103905582SSteven Rostedt (Red Hat) return irqsoff_display_graph(tr, set);
522729358daSSteven Rostedt (Red Hat) #endif
523328df475SSteven Rostedt (Red Hat)
524328df475SSteven Rostedt (Red Hat) return trace_keep_overwrite(tracer, mask, set);
525328df475SSteven Rostedt (Red Hat) }
526328df475SSteven Rostedt (Red Hat)
start_irqsoff_tracer(struct trace_array * tr,int graph)527328df475SSteven Rostedt (Red Hat) static int start_irqsoff_tracer(struct trace_array *tr, int graph)
528328df475SSteven Rostedt (Red Hat) {
529328df475SSteven Rostedt (Red Hat) int ret;
530328df475SSteven Rostedt (Red Hat)
5314104d326SSteven Rostedt (Red Hat) ret = register_irqsoff_function(tr, graph, 0);
53262b915f1SJiri Olsa
53362b915f1SJiri Olsa if (!ret && tracing_is_enabled())
53489b2f978SSteven Rostedt tracer_enabled = 1;
53594523e81SSteven Rostedt else
5369036990dSSteven Rostedt tracer_enabled = 0;
53762b915f1SJiri Olsa
53862b915f1SJiri Olsa return ret;
53981d68a96SSteven Rostedt }
54081d68a96SSteven Rostedt
stop_irqsoff_tracer(struct trace_array * tr,int graph)54162b915f1SJiri Olsa static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
54281d68a96SSteven Rostedt {
54381d68a96SSteven Rostedt tracer_enabled = 0;
54462b915f1SJiri Olsa
5454104d326SSteven Rostedt (Red Hat) unregister_irqsoff_function(tr, graph);
54681d68a96SSteven Rostedt }
54781d68a96SSteven Rostedt
54802f2f764SSteven Rostedt (Red Hat) static bool irqsoff_busy;
54902f2f764SSteven Rostedt (Red Hat)
__irqsoff_tracer_init(struct trace_array * tr)55002f2f764SSteven Rostedt (Red Hat) static int __irqsoff_tracer_init(struct trace_array *tr)
55181d68a96SSteven Rostedt {
55202f2f764SSteven Rostedt (Red Hat) if (irqsoff_busy)
55302f2f764SSteven Rostedt (Red Hat) return -EBUSY;
55402f2f764SSteven Rostedt (Red Hat)
555983f938aSSteven Rostedt (Red Hat) save_flags = tr->trace_flags;
556613f04a0SSteven Rostedt (Red Hat)
557613f04a0SSteven Rostedt (Red Hat) /* non overwrite screws up the latency tracers */
5582b6080f2SSteven Rostedt set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
5592b6080f2SSteven Rostedt set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
560da7f84cdSViktor Rosendahl /* without pause, we will produce garbage if another latency occurs */
561da7f84cdSViktor Rosendahl set_tracer_flag(tr, TRACE_ITER_PAUSE_ON_TRACE, 1);
562e9d25fe6SSteven Rostedt
5636d9b3fa5SSteven Rostedt (Red Hat) tr->max_latency = 0;
56481d68a96SSteven Rostedt irqsoff_trace = tr;
565c5f888caSSteven Rostedt /* make sure that the tracer is visible */
56681d68a96SSteven Rostedt smp_wmb();
56762b915f1SJiri Olsa
5684104d326SSteven Rostedt (Red Hat) ftrace_init_array_ops(tr, irqsoff_tracer_call);
5694104d326SSteven Rostedt (Red Hat)
5704104d326SSteven Rostedt (Red Hat) /* Only toplevel instance supports graph tracing */
5714104d326SSteven Rostedt (Red Hat) if (start_irqsoff_tracer(tr, (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
572983f938aSSteven Rostedt (Red Hat) is_graph(tr))))
57362b915f1SJiri Olsa printk(KERN_ERR "failed to start irqsoff tracer\n");
57402f2f764SSteven Rostedt (Red Hat)
57502f2f764SSteven Rostedt (Red Hat) irqsoff_busy = true;
57602f2f764SSteven Rostedt (Red Hat) return 0;
57781d68a96SSteven Rostedt }
57881d68a96SSteven Rostedt
__irqsoff_tracer_reset(struct trace_array * tr)5792b27ece6SJoel Fernandes (Google) static void __irqsoff_tracer_reset(struct trace_array *tr)
58081d68a96SSteven Rostedt {
581613f04a0SSteven Rostedt (Red Hat) int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
582613f04a0SSteven Rostedt (Red Hat) int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
583da7f84cdSViktor Rosendahl int pause_flag = save_flags & TRACE_ITER_PAUSE_ON_TRACE;
584613f04a0SSteven Rostedt (Red Hat)
585983f938aSSteven Rostedt (Red Hat) stop_irqsoff_tracer(tr, is_graph(tr));
586e9d25fe6SSteven Rostedt
5872b6080f2SSteven Rostedt set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
5882b6080f2SSteven Rostedt set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
589da7f84cdSViktor Rosendahl set_tracer_flag(tr, TRACE_ITER_PAUSE_ON_TRACE, pause_flag);
5904104d326SSteven Rostedt (Red Hat) ftrace_reset_array_ops(tr);
59102f2f764SSteven Rostedt (Red Hat)
59202f2f764SSteven Rostedt (Red Hat) irqsoff_busy = false;
59381d68a96SSteven Rostedt }
59481d68a96SSteven Rostedt
irqsoff_tracer_start(struct trace_array * tr)5959036990dSSteven Rostedt static void irqsoff_tracer_start(struct trace_array *tr)
5969036990dSSteven Rostedt {
5979036990dSSteven Rostedt tracer_enabled = 1;
5989036990dSSteven Rostedt }
5999036990dSSteven Rostedt
irqsoff_tracer_stop(struct trace_array * tr)6009036990dSSteven Rostedt static void irqsoff_tracer_stop(struct trace_array *tr)
6019036990dSSteven Rostedt {
6029036990dSSteven Rostedt tracer_enabled = 0;
60381d68a96SSteven Rostedt }
60481d68a96SSteven Rostedt
6056cd8a4bbSSteven Rostedt #ifdef CONFIG_IRQSOFF_TRACER
606c3bc8fd6SJoel Fernandes (Google) /*
607c3bc8fd6SJoel Fernandes (Google) * We are only interested in hardirq on/off events:
608c3bc8fd6SJoel Fernandes (Google) */
tracer_hardirqs_on(unsigned long a0,unsigned long a1)6093f1756dcSSteven Rostedt (VMware) void tracer_hardirqs_on(unsigned long a0, unsigned long a1)
610c3bc8fd6SJoel Fernandes (Google) {
61136590c50SSebastian Andrzej Siewior if (!preempt_trace(preempt_count()) && irq_trace())
61236590c50SSebastian Andrzej Siewior stop_critical_timing(a0, a1);
613c3bc8fd6SJoel Fernandes (Google) }
614eeeb080bSMasami Hiramatsu NOKPROBE_SYMBOL(tracer_hardirqs_on);
615c3bc8fd6SJoel Fernandes (Google)
tracer_hardirqs_off(unsigned long a0,unsigned long a1)6163f1756dcSSteven Rostedt (VMware) void tracer_hardirqs_off(unsigned long a0, unsigned long a1)
617c3bc8fd6SJoel Fernandes (Google) {
61836590c50SSebastian Andrzej Siewior if (!preempt_trace(preempt_count()) && irq_trace())
61936590c50SSebastian Andrzej Siewior start_critical_timing(a0, a1);
620c3bc8fd6SJoel Fernandes (Google) }
621eeeb080bSMasami Hiramatsu NOKPROBE_SYMBOL(tracer_hardirqs_off);
622c3bc8fd6SJoel Fernandes (Google)
irqsoff_tracer_init(struct trace_array * tr)6231c80025aSFrederic Weisbecker static int irqsoff_tracer_init(struct trace_array *tr)
6246cd8a4bbSSteven Rostedt {
6256cd8a4bbSSteven Rostedt trace_type = TRACER_IRQS_OFF;
6266cd8a4bbSSteven Rostedt
62702f2f764SSteven Rostedt (Red Hat) return __irqsoff_tracer_init(tr);
6286cd8a4bbSSteven Rostedt }
6292b27ece6SJoel Fernandes (Google)
irqsoff_tracer_reset(struct trace_array * tr)6302b27ece6SJoel Fernandes (Google) static void irqsoff_tracer_reset(struct trace_array *tr)
6312b27ece6SJoel Fernandes (Google) {
6322b27ece6SJoel Fernandes (Google) __irqsoff_tracer_reset(tr);
6332b27ece6SJoel Fernandes (Google) }
6342b27ece6SJoel Fernandes (Google)
63581d68a96SSteven Rostedt static struct tracer irqsoff_tracer __read_mostly =
63681d68a96SSteven Rostedt {
63781d68a96SSteven Rostedt .name = "irqsoff",
63881d68a96SSteven Rostedt .init = irqsoff_tracer_init,
63981d68a96SSteven Rostedt .reset = irqsoff_tracer_reset,
6409036990dSSteven Rostedt .start = irqsoff_tracer_start,
6419036990dSSteven Rostedt .stop = irqsoff_tracer_stop,
642f43c738bSHiraku Toyooka .print_max = true,
64362b915f1SJiri Olsa .print_header = irqsoff_print_header,
64462b915f1SJiri Olsa .print_line = irqsoff_print_line,
645328df475SSteven Rostedt (Red Hat) .flag_changed = irqsoff_flag_changed,
64660a11774SSteven Rostedt #ifdef CONFIG_FTRACE_SELFTEST
64760a11774SSteven Rostedt .selftest = trace_selftest_startup_irqsoff,
64860a11774SSteven Rostedt #endif
64962b915f1SJiri Olsa .open = irqsoff_trace_open,
65062b915f1SJiri Olsa .close = irqsoff_trace_close,
65102f2f764SSteven Rostedt (Red Hat) .allow_instances = true,
652f43c738bSHiraku Toyooka .use_max_tr = true,
65381d68a96SSteven Rostedt };
654c3bc8fd6SJoel Fernandes (Google) #endif /* CONFIG_IRQSOFF_TRACER */
6556cd8a4bbSSteven Rostedt
6566cd8a4bbSSteven Rostedt #ifdef CONFIG_PREEMPT_TRACER
tracer_preempt_on(unsigned long a0,unsigned long a1)6573f1756dcSSteven Rostedt (VMware) void tracer_preempt_on(unsigned long a0, unsigned long a1)
658c3bc8fd6SJoel Fernandes (Google) {
65936590c50SSebastian Andrzej Siewior if (preempt_trace(preempt_count()) && !irq_trace())
66036590c50SSebastian Andrzej Siewior stop_critical_timing(a0, a1);
661c3bc8fd6SJoel Fernandes (Google) }
662c3bc8fd6SJoel Fernandes (Google)
tracer_preempt_off(unsigned long a0,unsigned long a1)6633f1756dcSSteven Rostedt (VMware) void tracer_preempt_off(unsigned long a0, unsigned long a1)
664c3bc8fd6SJoel Fernandes (Google) {
66536590c50SSebastian Andrzej Siewior if (preempt_trace(preempt_count()) && !irq_trace())
66636590c50SSebastian Andrzej Siewior start_critical_timing(a0, a1);
667c3bc8fd6SJoel Fernandes (Google) }
668c3bc8fd6SJoel Fernandes (Google)
preemptoff_tracer_init(struct trace_array * tr)6691c80025aSFrederic Weisbecker static int preemptoff_tracer_init(struct trace_array *tr)
6706cd8a4bbSSteven Rostedt {
6716cd8a4bbSSteven Rostedt trace_type = TRACER_PREEMPT_OFF;
6726cd8a4bbSSteven Rostedt
67302f2f764SSteven Rostedt (Red Hat) return __irqsoff_tracer_init(tr);
6746cd8a4bbSSteven Rostedt }
6756cd8a4bbSSteven Rostedt
preemptoff_tracer_reset(struct trace_array * tr)6762b27ece6SJoel Fernandes (Google) static void preemptoff_tracer_reset(struct trace_array *tr)
6772b27ece6SJoel Fernandes (Google) {
6782b27ece6SJoel Fernandes (Google) __irqsoff_tracer_reset(tr);
6792b27ece6SJoel Fernandes (Google) }
6802b27ece6SJoel Fernandes (Google)
6816cd8a4bbSSteven Rostedt static struct tracer preemptoff_tracer __read_mostly =
6826cd8a4bbSSteven Rostedt {
6836cd8a4bbSSteven Rostedt .name = "preemptoff",
6846cd8a4bbSSteven Rostedt .init = preemptoff_tracer_init,
6852b27ece6SJoel Fernandes (Google) .reset = preemptoff_tracer_reset,
6869036990dSSteven Rostedt .start = irqsoff_tracer_start,
6879036990dSSteven Rostedt .stop = irqsoff_tracer_stop,
688f43c738bSHiraku Toyooka .print_max = true,
68962b915f1SJiri Olsa .print_header = irqsoff_print_header,
69062b915f1SJiri Olsa .print_line = irqsoff_print_line,
691328df475SSteven Rostedt (Red Hat) .flag_changed = irqsoff_flag_changed,
69260a11774SSteven Rostedt #ifdef CONFIG_FTRACE_SELFTEST
69360a11774SSteven Rostedt .selftest = trace_selftest_startup_preemptoff,
69460a11774SSteven Rostedt #endif
69562b915f1SJiri Olsa .open = irqsoff_trace_open,
69662b915f1SJiri Olsa .close = irqsoff_trace_close,
69702f2f764SSteven Rostedt (Red Hat) .allow_instances = true,
698f43c738bSHiraku Toyooka .use_max_tr = true,
6996cd8a4bbSSteven Rostedt };
700c3bc8fd6SJoel Fernandes (Google) #endif /* CONFIG_PREEMPT_TRACER */
7016cd8a4bbSSteven Rostedt
702c3bc8fd6SJoel Fernandes (Google) #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
7036cd8a4bbSSteven Rostedt
preemptirqsoff_tracer_init(struct trace_array * tr)7041c80025aSFrederic Weisbecker static int preemptirqsoff_tracer_init(struct trace_array *tr)
7056cd8a4bbSSteven Rostedt {
7066cd8a4bbSSteven Rostedt trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
7076cd8a4bbSSteven Rostedt
70802f2f764SSteven Rostedt (Red Hat) return __irqsoff_tracer_init(tr);
7096cd8a4bbSSteven Rostedt }
7106cd8a4bbSSteven Rostedt
preemptirqsoff_tracer_reset(struct trace_array * tr)7112b27ece6SJoel Fernandes (Google) static void preemptirqsoff_tracer_reset(struct trace_array *tr)
7122b27ece6SJoel Fernandes (Google) {
7132b27ece6SJoel Fernandes (Google) __irqsoff_tracer_reset(tr);
7142b27ece6SJoel Fernandes (Google) }
7152b27ece6SJoel Fernandes (Google)
7166cd8a4bbSSteven Rostedt static struct tracer preemptirqsoff_tracer __read_mostly =
7176cd8a4bbSSteven Rostedt {
7186cd8a4bbSSteven Rostedt .name = "preemptirqsoff",
7196cd8a4bbSSteven Rostedt .init = preemptirqsoff_tracer_init,
7202b27ece6SJoel Fernandes (Google) .reset = preemptirqsoff_tracer_reset,
7219036990dSSteven Rostedt .start = irqsoff_tracer_start,
7229036990dSSteven Rostedt .stop = irqsoff_tracer_stop,
723f43c738bSHiraku Toyooka .print_max = true,
72462b915f1SJiri Olsa .print_header = irqsoff_print_header,
72562b915f1SJiri Olsa .print_line = irqsoff_print_line,
726328df475SSteven Rostedt (Red Hat) .flag_changed = irqsoff_flag_changed,
72760a11774SSteven Rostedt #ifdef CONFIG_FTRACE_SELFTEST
72860a11774SSteven Rostedt .selftest = trace_selftest_startup_preemptirqsoff,
72960a11774SSteven Rostedt #endif
73062b915f1SJiri Olsa .open = irqsoff_trace_open,
73162b915f1SJiri Olsa .close = irqsoff_trace_close,
73202f2f764SSteven Rostedt (Red Hat) .allow_instances = true,
733f43c738bSHiraku Toyooka .use_max_tr = true,
7346cd8a4bbSSteven Rostedt };
7356cd8a4bbSSteven Rostedt #endif
73681d68a96SSteven Rostedt
init_irqsoff_tracer(void)73781d68a96SSteven Rostedt __init static int init_irqsoff_tracer(void)
73881d68a96SSteven Rostedt {
739c3bc8fd6SJoel Fernandes (Google) #ifdef CONFIG_IRQSOFF_TRACER
740c3bc8fd6SJoel Fernandes (Google) register_tracer(&irqsoff_tracer);
741c3bc8fd6SJoel Fernandes (Google) #endif
742c3bc8fd6SJoel Fernandes (Google) #ifdef CONFIG_PREEMPT_TRACER
743c3bc8fd6SJoel Fernandes (Google) register_tracer(&preemptoff_tracer);
744c3bc8fd6SJoel Fernandes (Google) #endif
745c3bc8fd6SJoel Fernandes (Google) #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
746c3bc8fd6SJoel Fernandes (Google) register_tracer(&preemptirqsoff_tracer);
747c3bc8fd6SJoel Fernandes (Google) #endif
74881d68a96SSteven Rostedt
74981d68a96SSteven Rostedt return 0;
75081d68a96SSteven Rostedt }
7516f415672SSteven Rostedt core_initcall(init_irqsoff_tracer);
752aaecaa0bSJoel Fernandes #endif /* IRQSOFF_TRACER || PREEMPTOFF_TRACER */
753