1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * trace task wakeup timings
4  *
5  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7  *
8  * Based on code from the latency_tracer, that is:
9  *
10  *  Copyright (C) 2004-2006 Ingo Molnar
11  *  Copyright (C) 2004 Nadia Yvette Chambers
12  */
13 #include <linux/module.h>
14 #include <linux/kallsyms.h>
15 #include <linux/uaccess.h>
16 #include <linux/ftrace.h>
17 #include <linux/sched/rt.h>
18 #include <linux/sched/deadline.h>
19 #include <trace/events/sched.h>
20 #include "trace.h"
21 
22 static struct trace_array	*wakeup_trace;
23 static int __read_mostly	tracer_enabled;
24 
25 static struct task_struct	*wakeup_task;
26 static int			wakeup_cpu;
27 static int			wakeup_current_cpu;
28 static unsigned			wakeup_prio = -1;
29 static int			wakeup_rt;
30 static int			wakeup_dl;
31 static int			tracing_dl = 0;
32 
33 static arch_spinlock_t wakeup_lock =
34 	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
35 
36 static void wakeup_reset(struct trace_array *tr);
37 static void __wakeup_reset(struct trace_array *tr);
38 static int start_func_tracer(struct trace_array *tr, int graph);
39 static void stop_func_tracer(struct trace_array *tr, int graph);
40 
41 static int save_flags;
42 
43 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
44 # define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
45 #else
46 # define is_graph(tr) false
47 #endif
48 
49 #ifdef CONFIG_FUNCTION_TRACER
50 
51 static bool function_enabled;
52 
53 /*
54  * Prologue for the wakeup function tracers.
55  *
56  * Returns 1 if it is OK to continue, and preemption
57  *            is disabled and data->disabled is incremented.
58  *         0 if the trace is to be ignored, and preemption
59  *            is not disabled and data->disabled is
60  *            kept the same.
61  *
62  * Note, this function is also used outside this ifdef but
63  *  inside the #ifdef of the function graph tracer below.
64  *  This is OK, since the function graph tracer is
65  *  dependent on the function tracer.
66  */
67 static int
68 func_prolog_preempt_disable(struct trace_array *tr,
69 			    struct trace_array_cpu **data,
70 			    unsigned int *trace_ctx)
71 {
72 	long disabled;
73 	int cpu;
74 
75 	if (likely(!wakeup_task))
76 		return 0;
77 
78 	*trace_ctx = tracing_gen_ctx();
79 	preempt_disable_notrace();
80 
81 	cpu = raw_smp_processor_id();
82 	if (cpu != wakeup_current_cpu)
83 		goto out_enable;
84 
85 	*data = per_cpu_ptr(tr->array_buffer.data, cpu);
86 	disabled = atomic_inc_return(&(*data)->disabled);
87 	if (unlikely(disabled != 1))
88 		goto out;
89 
90 	return 1;
91 
92 out:
93 	atomic_dec(&(*data)->disabled);
94 
95 out_enable:
96 	preempt_enable_notrace();
97 	return 0;
98 }
99 
100 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
101 
102 static int wakeup_display_graph(struct trace_array *tr, int set)
103 {
104 	if (!(is_graph(tr) ^ set))
105 		return 0;
106 
107 	stop_func_tracer(tr, !set);
108 
109 	wakeup_reset(wakeup_trace);
110 	tr->max_latency = 0;
111 
112 	return start_func_tracer(tr, set);
113 }
114 
115 static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
116 {
117 	struct trace_array *tr = wakeup_trace;
118 	struct trace_array_cpu *data;
119 	unsigned int trace_ctx;
120 	int ret = 0;
121 
122 	if (ftrace_graph_ignore_func(trace))
123 		return 0;
124 	/*
125 	 * Do not trace a function if it's filtered by set_graph_notrace.
126 	 * Make the index of ret stack negative to indicate that it should
127 	 * ignore further functions.  But it needs its own ret stack entry
128 	 * to recover the original index in order to continue tracing after
129 	 * returning from the function.
130 	 */
131 	if (ftrace_graph_notrace_addr(trace->func))
132 		return 1;
133 
134 	if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
135 		return 0;
136 
137 	ret = __trace_graph_entry(tr, trace, trace_ctx);
138 	atomic_dec(&data->disabled);
139 	preempt_enable_notrace();
140 
141 	return ret;
142 }
143 
144 static void wakeup_graph_return(struct ftrace_graph_ret *trace)
145 {
146 	struct trace_array *tr = wakeup_trace;
147 	struct trace_array_cpu *data;
148 	unsigned int trace_ctx;
149 
150 	ftrace_graph_addr_finish(trace);
151 
152 	if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
153 		return;
154 
155 	__trace_graph_return(tr, trace, trace_ctx);
156 	atomic_dec(&data->disabled);
157 
158 	preempt_enable_notrace();
159 	return;
160 }
161 
162 static struct fgraph_ops fgraph_wakeup_ops = {
163 	.entryfunc = &wakeup_graph_entry,
164 	.retfunc = &wakeup_graph_return,
165 };
166 
167 static void wakeup_trace_open(struct trace_iterator *iter)
168 {
169 	if (is_graph(iter->tr))
170 		graph_trace_open(iter);
171 }
172 
173 static void wakeup_trace_close(struct trace_iterator *iter)
174 {
175 	if (iter->private)
176 		graph_trace_close(iter);
177 }
178 
179 #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \
180 			    TRACE_GRAPH_PRINT_CPU |  \
181 			    TRACE_GRAPH_PRINT_REL_TIME | \
182 			    TRACE_GRAPH_PRINT_DURATION | \
183 			    TRACE_GRAPH_PRINT_OVERHEAD | \
184 			    TRACE_GRAPH_PRINT_IRQS)
185 
186 static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
187 {
188 	/*
189 	 * In graph mode call the graph tracer output function,
190 	 * otherwise go with the TRACE_FN event handler
191 	 */
192 	if (is_graph(iter->tr))
193 		return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
194 
195 	return TRACE_TYPE_UNHANDLED;
196 }
197 
198 static void wakeup_print_header(struct seq_file *s)
199 {
200 	if (is_graph(wakeup_trace))
201 		print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
202 	else
203 		trace_default_header(s);
204 }
205 #endif /* else CONFIG_FUNCTION_GRAPH_TRACER */
206 
207 /*
208  * wakeup uses its own tracer function to keep the overhead down:
209  */
210 static void
211 wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
212 		   struct ftrace_ops *op, struct ftrace_regs *fregs)
213 {
214 	struct trace_array *tr = wakeup_trace;
215 	struct trace_array_cpu *data;
216 	unsigned long flags;
217 	unsigned int trace_ctx;
218 
219 	if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
220 		return;
221 
222 	local_irq_save(flags);
223 	trace_function(tr, ip, parent_ip, trace_ctx);
224 	local_irq_restore(flags);
225 
226 	atomic_dec(&data->disabled);
227 	preempt_enable_notrace();
228 }
229 
230 static int register_wakeup_function(struct trace_array *tr, int graph, int set)
231 {
232 	int ret;
233 
234 	/* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
235 	if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
236 		return 0;
237 
238 	if (graph)
239 		ret = register_ftrace_graph(&fgraph_wakeup_ops);
240 	else
241 		ret = register_ftrace_function(tr->ops);
242 
243 	if (!ret)
244 		function_enabled = true;
245 
246 	return ret;
247 }
248 
249 static void unregister_wakeup_function(struct trace_array *tr, int graph)
250 {
251 	if (!function_enabled)
252 		return;
253 
254 	if (graph)
255 		unregister_ftrace_graph(&fgraph_wakeup_ops);
256 	else
257 		unregister_ftrace_function(tr->ops);
258 
259 	function_enabled = false;
260 }
261 
262 static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
263 {
264 	if (!(mask & TRACE_ITER_FUNCTION))
265 		return 0;
266 
267 	if (set)
268 		register_wakeup_function(tr, is_graph(tr), 1);
269 	else
270 		unregister_wakeup_function(tr, is_graph(tr));
271 	return 1;
272 }
273 #else /* CONFIG_FUNCTION_TRACER */
274 static int register_wakeup_function(struct trace_array *tr, int graph, int set)
275 {
276 	return 0;
277 }
278 static void unregister_wakeup_function(struct trace_array *tr, int graph) { }
279 static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
280 {
281 	return 0;
282 }
283 #endif /* else CONFIG_FUNCTION_TRACER */
284 
285 #ifndef CONFIG_FUNCTION_GRAPH_TRACER
286 static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
287 {
288 	return TRACE_TYPE_UNHANDLED;
289 }
290 
291 static void wakeup_trace_open(struct trace_iterator *iter) { }
292 static void wakeup_trace_close(struct trace_iterator *iter) { }
293 
294 static void wakeup_print_header(struct seq_file *s)
295 {
296 	trace_default_header(s);
297 }
298 #endif /* !CONFIG_FUNCTION_GRAPH_TRACER */
299 
300 static void
301 __trace_function(struct trace_array *tr,
302 		 unsigned long ip, unsigned long parent_ip,
303 		 unsigned int trace_ctx)
304 {
305 	if (is_graph(tr))
306 		trace_graph_function(tr, ip, parent_ip, trace_ctx);
307 	else
308 		trace_function(tr, ip, parent_ip, trace_ctx);
309 }
310 
311 static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
312 {
313 	struct tracer *tracer = tr->current_trace;
314 
315 	if (wakeup_function_set(tr, mask, set))
316 		return 0;
317 
318 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
319 	if (mask & TRACE_ITER_DISPLAY_GRAPH)
320 		return wakeup_display_graph(tr, set);
321 #endif
322 
323 	return trace_keep_overwrite(tracer, mask, set);
324 }
325 
326 static int start_func_tracer(struct trace_array *tr, int graph)
327 {
328 	int ret;
329 
330 	ret = register_wakeup_function(tr, graph, 0);
331 
332 	if (!ret && tracing_is_enabled())
333 		tracer_enabled = 1;
334 	else
335 		tracer_enabled = 0;
336 
337 	return ret;
338 }
339 
340 static void stop_func_tracer(struct trace_array *tr, int graph)
341 {
342 	tracer_enabled = 0;
343 
344 	unregister_wakeup_function(tr, graph);
345 }
346 
347 /*
348  * Should this new latency be reported/recorded?
349  */
350 static bool report_latency(struct trace_array *tr, u64 delta)
351 {
352 	if (tracing_thresh) {
353 		if (delta < tracing_thresh)
354 			return false;
355 	} else {
356 		if (delta <= tr->max_latency)
357 			return false;
358 	}
359 	return true;
360 }
361 
362 static void
363 probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu)
364 {
365 	if (task != wakeup_task)
366 		return;
367 
368 	wakeup_current_cpu = cpu;
369 }
370 
371 static void
372 tracing_sched_switch_trace(struct trace_array *tr,
373 			   struct task_struct *prev,
374 			   struct task_struct *next,
375 			   unsigned int trace_ctx)
376 {
377 	struct trace_event_call *call = &event_context_switch;
378 	struct trace_buffer *buffer = tr->array_buffer.buffer;
379 	struct ring_buffer_event *event;
380 	struct ctx_switch_entry *entry;
381 
382 	event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
383 					  sizeof(*entry), trace_ctx);
384 	if (!event)
385 		return;
386 	entry	= ring_buffer_event_data(event);
387 	entry->prev_pid			= prev->pid;
388 	entry->prev_prio		= prev->prio;
389 	entry->prev_state		= task_state_index(prev);
390 	entry->next_pid			= next->pid;
391 	entry->next_prio		= next->prio;
392 	entry->next_state		= task_state_index(next);
393 	entry->next_cpu	= task_cpu(next);
394 
395 	if (!call_filter_check_discard(call, entry, buffer, event))
396 		trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
397 }
398 
399 static void
400 tracing_sched_wakeup_trace(struct trace_array *tr,
401 			   struct task_struct *wakee,
402 			   struct task_struct *curr,
403 			   unsigned int trace_ctx)
404 {
405 	struct trace_event_call *call = &event_wakeup;
406 	struct ring_buffer_event *event;
407 	struct ctx_switch_entry *entry;
408 	struct trace_buffer *buffer = tr->array_buffer.buffer;
409 
410 	event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
411 					  sizeof(*entry), trace_ctx);
412 	if (!event)
413 		return;
414 	entry	= ring_buffer_event_data(event);
415 	entry->prev_pid			= curr->pid;
416 	entry->prev_prio		= curr->prio;
417 	entry->prev_state		= task_state_index(curr);
418 	entry->next_pid			= wakee->pid;
419 	entry->next_prio		= wakee->prio;
420 	entry->next_state		= task_state_index(wakee);
421 	entry->next_cpu			= task_cpu(wakee);
422 
423 	if (!call_filter_check_discard(call, entry, buffer, event))
424 		trace_buffer_unlock_commit(tr, buffer, event, trace_ctx);
425 }
426 
427 static void notrace
428 probe_wakeup_sched_switch(void *ignore, bool preempt,
429 			  struct task_struct *prev, struct task_struct *next)
430 {
431 	struct trace_array_cpu *data;
432 	u64 T0, T1, delta;
433 	unsigned long flags;
434 	long disabled;
435 	int cpu;
436 	unsigned int trace_ctx;
437 
438 	tracing_record_cmdline(prev);
439 
440 	if (unlikely(!tracer_enabled))
441 		return;
442 
443 	/*
444 	 * When we start a new trace, we set wakeup_task to NULL
445 	 * and then set tracer_enabled = 1. We want to make sure
446 	 * that another CPU does not see the tracer_enabled = 1
447 	 * and the wakeup_task with an older task, that might
448 	 * actually be the same as next.
449 	 */
450 	smp_rmb();
451 
452 	if (next != wakeup_task)
453 		return;
454 
455 	/* disable local data, not wakeup_cpu data */
456 	cpu = raw_smp_processor_id();
457 	disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
458 	if (likely(disabled != 1))
459 		goto out;
460 
461 	local_irq_save(flags);
462 	trace_ctx = tracing_gen_ctx_flags(flags);
463 
464 	arch_spin_lock(&wakeup_lock);
465 
466 	/* We could race with grabbing wakeup_lock */
467 	if (unlikely(!tracer_enabled || next != wakeup_task))
468 		goto out_unlock;
469 
470 	/* The task we are waiting for is waking up */
471 	data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
472 
473 	__trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, trace_ctx);
474 	tracing_sched_switch_trace(wakeup_trace, prev, next, trace_ctx);
475 	__trace_stack(wakeup_trace, trace_ctx, 0);
476 
477 	T0 = data->preempt_timestamp;
478 	T1 = ftrace_now(cpu);
479 	delta = T1-T0;
480 
481 	if (!report_latency(wakeup_trace, delta))
482 		goto out_unlock;
483 
484 	if (likely(!is_tracing_stopped())) {
485 		wakeup_trace->max_latency = delta;
486 		update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu, NULL);
487 	}
488 
489 out_unlock:
490 	__wakeup_reset(wakeup_trace);
491 	arch_spin_unlock(&wakeup_lock);
492 	local_irq_restore(flags);
493 out:
494 	atomic_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
495 }
496 
497 static void __wakeup_reset(struct trace_array *tr)
498 {
499 	wakeup_cpu = -1;
500 	wakeup_prio = -1;
501 	tracing_dl = 0;
502 
503 	if (wakeup_task)
504 		put_task_struct(wakeup_task);
505 
506 	wakeup_task = NULL;
507 }
508 
509 static void wakeup_reset(struct trace_array *tr)
510 {
511 	unsigned long flags;
512 
513 	tracing_reset_online_cpus(&tr->array_buffer);
514 
515 	local_irq_save(flags);
516 	arch_spin_lock(&wakeup_lock);
517 	__wakeup_reset(tr);
518 	arch_spin_unlock(&wakeup_lock);
519 	local_irq_restore(flags);
520 }
521 
522 static void
523 probe_wakeup(void *ignore, struct task_struct *p)
524 {
525 	struct trace_array_cpu *data;
526 	int cpu = smp_processor_id();
527 	long disabled;
528 	unsigned int trace_ctx;
529 
530 	if (likely(!tracer_enabled))
531 		return;
532 
533 	tracing_record_cmdline(p);
534 	tracing_record_cmdline(current);
535 
536 	/*
537 	 * Semantic is like this:
538 	 *  - wakeup tracer handles all tasks in the system, independently
539 	 *    from their scheduling class;
540 	 *  - wakeup_rt tracer handles tasks belonging to sched_dl and
541 	 *    sched_rt class;
542 	 *  - wakeup_dl handles tasks belonging to sched_dl class only.
543 	 */
544 	if (tracing_dl || (wakeup_dl && !dl_task(p)) ||
545 	    (wakeup_rt && !dl_task(p) && !rt_task(p)) ||
546 	    (!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio)))
547 		return;
548 
549 	disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
550 	if (unlikely(disabled != 1))
551 		goto out;
552 
553 	trace_ctx = tracing_gen_ctx();
554 
555 	/* interrupts should be off from try_to_wake_up */
556 	arch_spin_lock(&wakeup_lock);
557 
558 	/* check for races. */
559 	if (!tracer_enabled || tracing_dl ||
560 	    (!dl_task(p) && p->prio >= wakeup_prio))
561 		goto out_locked;
562 
563 	/* reset the trace */
564 	__wakeup_reset(wakeup_trace);
565 
566 	wakeup_cpu = task_cpu(p);
567 	wakeup_current_cpu = wakeup_cpu;
568 	wakeup_prio = p->prio;
569 
570 	/*
571 	 * Once you start tracing a -deadline task, don't bother tracing
572 	 * another task until the first one wakes up.
573 	 */
574 	if (dl_task(p))
575 		tracing_dl = 1;
576 	else
577 		tracing_dl = 0;
578 
579 	wakeup_task = get_task_struct(p);
580 
581 	data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
582 	data->preempt_timestamp = ftrace_now(cpu);
583 	tracing_sched_wakeup_trace(wakeup_trace, p, current, trace_ctx);
584 	__trace_stack(wakeup_trace, trace_ctx, 0);
585 
586 	/*
587 	 * We must be careful in using CALLER_ADDR2. But since wake_up
588 	 * is not called by an assembly function  (where as schedule is)
589 	 * it should be safe to use it here.
590 	 */
591 	__trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, trace_ctx);
592 
593 out_locked:
594 	arch_spin_unlock(&wakeup_lock);
595 out:
596 	atomic_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
597 }
598 
599 static void start_wakeup_tracer(struct trace_array *tr)
600 {
601 	int ret;
602 
603 	ret = register_trace_sched_wakeup(probe_wakeup, NULL);
604 	if (ret) {
605 		pr_info("wakeup trace: Couldn't activate tracepoint"
606 			" probe to kernel_sched_wakeup\n");
607 		return;
608 	}
609 
610 	ret = register_trace_sched_wakeup_new(probe_wakeup, NULL);
611 	if (ret) {
612 		pr_info("wakeup trace: Couldn't activate tracepoint"
613 			" probe to kernel_sched_wakeup_new\n");
614 		goto fail_deprobe;
615 	}
616 
617 	ret = register_trace_sched_switch(probe_wakeup_sched_switch, NULL);
618 	if (ret) {
619 		pr_info("sched trace: Couldn't activate tracepoint"
620 			" probe to kernel_sched_switch\n");
621 		goto fail_deprobe_wake_new;
622 	}
623 
624 	ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
625 	if (ret) {
626 		pr_info("wakeup trace: Couldn't activate tracepoint"
627 			" probe to kernel_sched_migrate_task\n");
628 		goto fail_deprobe_sched_switch;
629 	}
630 
631 	wakeup_reset(tr);
632 
633 	/*
634 	 * Don't let the tracer_enabled = 1 show up before
635 	 * the wakeup_task is reset. This may be overkill since
636 	 * wakeup_reset does a spin_unlock after setting the
637 	 * wakeup_task to NULL, but I want to be safe.
638 	 * This is a slow path anyway.
639 	 */
640 	smp_wmb();
641 
642 	if (start_func_tracer(tr, is_graph(tr)))
643 		printk(KERN_ERR "failed to start wakeup tracer\n");
644 
645 	return;
646 fail_deprobe_sched_switch:
647 	unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
648 fail_deprobe_wake_new:
649 	unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
650 fail_deprobe:
651 	unregister_trace_sched_wakeup(probe_wakeup, NULL);
652 }
653 
654 static void stop_wakeup_tracer(struct trace_array *tr)
655 {
656 	tracer_enabled = 0;
657 	stop_func_tracer(tr, is_graph(tr));
658 	unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
659 	unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
660 	unregister_trace_sched_wakeup(probe_wakeup, NULL);
661 	unregister_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
662 }
663 
664 static bool wakeup_busy;
665 
666 static int __wakeup_tracer_init(struct trace_array *tr)
667 {
668 	save_flags = tr->trace_flags;
669 
670 	/* non overwrite screws up the latency tracers */
671 	set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
672 	set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
673 
674 	tr->max_latency = 0;
675 	wakeup_trace = tr;
676 	ftrace_init_array_ops(tr, wakeup_tracer_call);
677 	start_wakeup_tracer(tr);
678 
679 	wakeup_busy = true;
680 	return 0;
681 }
682 
683 static int wakeup_tracer_init(struct trace_array *tr)
684 {
685 	if (wakeup_busy)
686 		return -EBUSY;
687 
688 	wakeup_dl = 0;
689 	wakeup_rt = 0;
690 	return __wakeup_tracer_init(tr);
691 }
692 
693 static int wakeup_rt_tracer_init(struct trace_array *tr)
694 {
695 	if (wakeup_busy)
696 		return -EBUSY;
697 
698 	wakeup_dl = 0;
699 	wakeup_rt = 1;
700 	return __wakeup_tracer_init(tr);
701 }
702 
703 static int wakeup_dl_tracer_init(struct trace_array *tr)
704 {
705 	if (wakeup_busy)
706 		return -EBUSY;
707 
708 	wakeup_dl = 1;
709 	wakeup_rt = 0;
710 	return __wakeup_tracer_init(tr);
711 }
712 
713 static void wakeup_tracer_reset(struct trace_array *tr)
714 {
715 	int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
716 	int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
717 
718 	stop_wakeup_tracer(tr);
719 	/* make sure we put back any tasks we are tracing */
720 	wakeup_reset(tr);
721 
722 	set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
723 	set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
724 	ftrace_reset_array_ops(tr);
725 	wakeup_busy = false;
726 }
727 
728 static void wakeup_tracer_start(struct trace_array *tr)
729 {
730 	wakeup_reset(tr);
731 	tracer_enabled = 1;
732 }
733 
734 static void wakeup_tracer_stop(struct trace_array *tr)
735 {
736 	tracer_enabled = 0;
737 }
738 
739 static struct tracer wakeup_tracer __read_mostly =
740 {
741 	.name		= "wakeup",
742 	.init		= wakeup_tracer_init,
743 	.reset		= wakeup_tracer_reset,
744 	.start		= wakeup_tracer_start,
745 	.stop		= wakeup_tracer_stop,
746 	.print_max	= true,
747 	.print_header	= wakeup_print_header,
748 	.print_line	= wakeup_print_line,
749 	.flag_changed	= wakeup_flag_changed,
750 #ifdef CONFIG_FTRACE_SELFTEST
751 	.selftest    = trace_selftest_startup_wakeup,
752 #endif
753 	.open		= wakeup_trace_open,
754 	.close		= wakeup_trace_close,
755 	.allow_instances = true,
756 	.use_max_tr	= true,
757 };
758 
759 static struct tracer wakeup_rt_tracer __read_mostly =
760 {
761 	.name		= "wakeup_rt",
762 	.init		= wakeup_rt_tracer_init,
763 	.reset		= wakeup_tracer_reset,
764 	.start		= wakeup_tracer_start,
765 	.stop		= wakeup_tracer_stop,
766 	.print_max	= true,
767 	.print_header	= wakeup_print_header,
768 	.print_line	= wakeup_print_line,
769 	.flag_changed	= wakeup_flag_changed,
770 #ifdef CONFIG_FTRACE_SELFTEST
771 	.selftest    = trace_selftest_startup_wakeup,
772 #endif
773 	.open		= wakeup_trace_open,
774 	.close		= wakeup_trace_close,
775 	.allow_instances = true,
776 	.use_max_tr	= true,
777 };
778 
779 static struct tracer wakeup_dl_tracer __read_mostly =
780 {
781 	.name		= "wakeup_dl",
782 	.init		= wakeup_dl_tracer_init,
783 	.reset		= wakeup_tracer_reset,
784 	.start		= wakeup_tracer_start,
785 	.stop		= wakeup_tracer_stop,
786 	.print_max	= true,
787 	.print_header	= wakeup_print_header,
788 	.print_line	= wakeup_print_line,
789 	.flag_changed	= wakeup_flag_changed,
790 #ifdef CONFIG_FTRACE_SELFTEST
791 	.selftest    = trace_selftest_startup_wakeup,
792 #endif
793 	.open		= wakeup_trace_open,
794 	.close		= wakeup_trace_close,
795 	.allow_instances = true,
796 	.use_max_tr	= true,
797 };
798 
799 __init static int init_wakeup_tracer(void)
800 {
801 	int ret;
802 
803 	ret = register_tracer(&wakeup_tracer);
804 	if (ret)
805 		return ret;
806 
807 	ret = register_tracer(&wakeup_rt_tracer);
808 	if (ret)
809 		return ret;
810 
811 	ret = register_tracer(&wakeup_dl_tracer);
812 	if (ret)
813 		return ret;
814 
815 	return 0;
816 }
817 core_initcall(init_wakeup_tracer);
818