xref: /openbmc/linux/kernel/trace/trace_selftest.c (revision 24b1944f)
1 /* Include in trace.c */
2 
3 #include <linux/stringify.h>
4 #include <linux/kthread.h>
5 #include <linux/delay.h>
6 #include <linux/slab.h>
7 
8 static inline int trace_valid_entry(struct trace_entry *entry)
9 {
10 	switch (entry->type) {
11 	case TRACE_FN:
12 	case TRACE_CTX:
13 	case TRACE_WAKE:
14 	case TRACE_STACK:
15 	case TRACE_PRINT:
16 	case TRACE_BRANCH:
17 	case TRACE_GRAPH_ENT:
18 	case TRACE_GRAPH_RET:
19 		return 1;
20 	}
21 	return 0;
22 }
23 
24 static int trace_test_buffer_cpu(struct trace_buffer *buf, int cpu)
25 {
26 	struct ring_buffer_event *event;
27 	struct trace_entry *entry;
28 	unsigned int loops = 0;
29 
30 	while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) {
31 		entry = ring_buffer_event_data(event);
32 
33 		/*
34 		 * The ring buffer is a size of trace_buf_size, if
35 		 * we loop more than the size, there's something wrong
36 		 * with the ring buffer.
37 		 */
38 		if (loops++ > trace_buf_size) {
39 			printk(KERN_CONT ".. bad ring buffer ");
40 			goto failed;
41 		}
42 		if (!trace_valid_entry(entry)) {
43 			printk(KERN_CONT ".. invalid entry %d ",
44 				entry->type);
45 			goto failed;
46 		}
47 	}
48 	return 0;
49 
50  failed:
51 	/* disable tracing */
52 	tracing_disabled = 1;
53 	printk(KERN_CONT ".. corrupted trace buffer .. ");
54 	return -1;
55 }
56 
57 /*
58  * Test the trace buffer to see if all the elements
59  * are still sane.
60  */
61 static int trace_test_buffer(struct trace_buffer *buf, unsigned long *count)
62 {
63 	unsigned long flags, cnt = 0;
64 	int cpu, ret = 0;
65 
66 	/* Don't allow flipping of max traces now */
67 	local_irq_save(flags);
68 	arch_spin_lock(&ftrace_max_lock);
69 
70 	cnt = ring_buffer_entries(buf->buffer);
71 
72 	/*
73 	 * The trace_test_buffer_cpu runs a while loop to consume all data.
74 	 * If the calling tracer is broken, and is constantly filling
75 	 * the buffer, this will run forever, and hard lock the box.
76 	 * We disable the ring buffer while we do this test to prevent
77 	 * a hard lock up.
78 	 */
79 	tracing_off();
80 	for_each_possible_cpu(cpu) {
81 		ret = trace_test_buffer_cpu(buf, cpu);
82 		if (ret)
83 			break;
84 	}
85 	tracing_on();
86 	arch_spin_unlock(&ftrace_max_lock);
87 	local_irq_restore(flags);
88 
89 	if (count)
90 		*count = cnt;
91 
92 	return ret;
93 }
94 
95 static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
96 {
97 	printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
98 		trace->name, init_ret);
99 }
100 #ifdef CONFIG_FUNCTION_TRACER
101 
102 #ifdef CONFIG_DYNAMIC_FTRACE
103 
104 static int trace_selftest_test_probe1_cnt;
105 static void trace_selftest_test_probe1_func(unsigned long ip,
106 					    unsigned long pip,
107 					    struct ftrace_ops *op,
108 					    struct pt_regs *pt_regs)
109 {
110 	trace_selftest_test_probe1_cnt++;
111 }
112 
113 static int trace_selftest_test_probe2_cnt;
114 static void trace_selftest_test_probe2_func(unsigned long ip,
115 					    unsigned long pip,
116 					    struct ftrace_ops *op,
117 					    struct pt_regs *pt_regs)
118 {
119 	trace_selftest_test_probe2_cnt++;
120 }
121 
122 static int trace_selftest_test_probe3_cnt;
123 static void trace_selftest_test_probe3_func(unsigned long ip,
124 					    unsigned long pip,
125 					    struct ftrace_ops *op,
126 					    struct pt_regs *pt_regs)
127 {
128 	trace_selftest_test_probe3_cnt++;
129 }
130 
131 static int trace_selftest_test_global_cnt;
132 static void trace_selftest_test_global_func(unsigned long ip,
133 					    unsigned long pip,
134 					    struct ftrace_ops *op,
135 					    struct pt_regs *pt_regs)
136 {
137 	trace_selftest_test_global_cnt++;
138 }
139 
140 static int trace_selftest_test_dyn_cnt;
141 static void trace_selftest_test_dyn_func(unsigned long ip,
142 					 unsigned long pip,
143 					 struct ftrace_ops *op,
144 					 struct pt_regs *pt_regs)
145 {
146 	trace_selftest_test_dyn_cnt++;
147 }
148 
149 static struct ftrace_ops test_probe1 = {
150 	.func			= trace_selftest_test_probe1_func,
151 	.flags			= FTRACE_OPS_FL_RECURSION_SAFE,
152 };
153 
154 static struct ftrace_ops test_probe2 = {
155 	.func			= trace_selftest_test_probe2_func,
156 	.flags			= FTRACE_OPS_FL_RECURSION_SAFE,
157 };
158 
159 static struct ftrace_ops test_probe3 = {
160 	.func			= trace_selftest_test_probe3_func,
161 	.flags			= FTRACE_OPS_FL_RECURSION_SAFE,
162 };
163 
164 static struct ftrace_ops test_global = {
165 	.func		= trace_selftest_test_global_func,
166 	.flags		= FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
167 };
168 
169 static void print_counts(void)
170 {
171 	printk("(%d %d %d %d %d) ",
172 	       trace_selftest_test_probe1_cnt,
173 	       trace_selftest_test_probe2_cnt,
174 	       trace_selftest_test_probe3_cnt,
175 	       trace_selftest_test_global_cnt,
176 	       trace_selftest_test_dyn_cnt);
177 }
178 
179 static void reset_counts(void)
180 {
181 	trace_selftest_test_probe1_cnt = 0;
182 	trace_selftest_test_probe2_cnt = 0;
183 	trace_selftest_test_probe3_cnt = 0;
184 	trace_selftest_test_global_cnt = 0;
185 	trace_selftest_test_dyn_cnt = 0;
186 }
187 
188 static int trace_selftest_ops(int cnt)
189 {
190 	int save_ftrace_enabled = ftrace_enabled;
191 	struct ftrace_ops *dyn_ops;
192 	char *func1_name;
193 	char *func2_name;
194 	int len1;
195 	int len2;
196 	int ret = -1;
197 
198 	printk(KERN_CONT "PASSED\n");
199 	pr_info("Testing dynamic ftrace ops #%d: ", cnt);
200 
201 	ftrace_enabled = 1;
202 	reset_counts();
203 
204 	/* Handle PPC64 '.' name */
205 	func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
206 	func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
207 	len1 = strlen(func1_name);
208 	len2 = strlen(func2_name);
209 
210 	/*
211 	 * Probe 1 will trace function 1.
212 	 * Probe 2 will trace function 2.
213 	 * Probe 3 will trace functions 1 and 2.
214 	 */
215 	ftrace_set_filter(&test_probe1, func1_name, len1, 1);
216 	ftrace_set_filter(&test_probe2, func2_name, len2, 1);
217 	ftrace_set_filter(&test_probe3, func1_name, len1, 1);
218 	ftrace_set_filter(&test_probe3, func2_name, len2, 0);
219 
220 	register_ftrace_function(&test_probe1);
221 	register_ftrace_function(&test_probe2);
222 	register_ftrace_function(&test_probe3);
223 	register_ftrace_function(&test_global);
224 
225 	DYN_FTRACE_TEST_NAME();
226 
227 	print_counts();
228 
229 	if (trace_selftest_test_probe1_cnt != 1)
230 		goto out;
231 	if (trace_selftest_test_probe2_cnt != 0)
232 		goto out;
233 	if (trace_selftest_test_probe3_cnt != 1)
234 		goto out;
235 	if (trace_selftest_test_global_cnt == 0)
236 		goto out;
237 
238 	DYN_FTRACE_TEST_NAME2();
239 
240 	print_counts();
241 
242 	if (trace_selftest_test_probe1_cnt != 1)
243 		goto out;
244 	if (trace_selftest_test_probe2_cnt != 1)
245 		goto out;
246 	if (trace_selftest_test_probe3_cnt != 2)
247 		goto out;
248 
249 	/* Add a dynamic probe */
250 	dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
251 	if (!dyn_ops) {
252 		printk("MEMORY ERROR ");
253 		goto out;
254 	}
255 
256 	dyn_ops->func = trace_selftest_test_dyn_func;
257 
258 	register_ftrace_function(dyn_ops);
259 
260 	trace_selftest_test_global_cnt = 0;
261 
262 	DYN_FTRACE_TEST_NAME();
263 
264 	print_counts();
265 
266 	if (trace_selftest_test_probe1_cnt != 2)
267 		goto out_free;
268 	if (trace_selftest_test_probe2_cnt != 1)
269 		goto out_free;
270 	if (trace_selftest_test_probe3_cnt != 3)
271 		goto out_free;
272 	if (trace_selftest_test_global_cnt == 0)
273 		goto out;
274 	if (trace_selftest_test_dyn_cnt == 0)
275 		goto out_free;
276 
277 	DYN_FTRACE_TEST_NAME2();
278 
279 	print_counts();
280 
281 	if (trace_selftest_test_probe1_cnt != 2)
282 		goto out_free;
283 	if (trace_selftest_test_probe2_cnt != 2)
284 		goto out_free;
285 	if (trace_selftest_test_probe3_cnt != 4)
286 		goto out_free;
287 
288 	ret = 0;
289  out_free:
290 	unregister_ftrace_function(dyn_ops);
291 	kfree(dyn_ops);
292 
293  out:
294 	/* Purposely unregister in the same order */
295 	unregister_ftrace_function(&test_probe1);
296 	unregister_ftrace_function(&test_probe2);
297 	unregister_ftrace_function(&test_probe3);
298 	unregister_ftrace_function(&test_global);
299 
300 	/* Make sure everything is off */
301 	reset_counts();
302 	DYN_FTRACE_TEST_NAME();
303 	DYN_FTRACE_TEST_NAME();
304 
305 	if (trace_selftest_test_probe1_cnt ||
306 	    trace_selftest_test_probe2_cnt ||
307 	    trace_selftest_test_probe3_cnt ||
308 	    trace_selftest_test_global_cnt ||
309 	    trace_selftest_test_dyn_cnt)
310 		ret = -1;
311 
312 	ftrace_enabled = save_ftrace_enabled;
313 
314 	return ret;
315 }
316 
317 /* Test dynamic code modification and ftrace filters */
318 int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
319 					   struct trace_array *tr,
320 					   int (*func)(void))
321 {
322 	int save_ftrace_enabled = ftrace_enabled;
323 	unsigned long count;
324 	char *func_name;
325 	int ret;
326 
327 	/* The ftrace test PASSED */
328 	printk(KERN_CONT "PASSED\n");
329 	pr_info("Testing dynamic ftrace: ");
330 
331 	/* enable tracing, and record the filter function */
332 	ftrace_enabled = 1;
333 
334 	/* passed in by parameter to fool gcc from optimizing */
335 	func();
336 
337 	/*
338 	 * Some archs *cough*PowerPC*cough* add characters to the
339 	 * start of the function names. We simply put a '*' to
340 	 * accommodate them.
341 	 */
342 	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
343 
344 	/* filter only on our function */
345 	ftrace_set_global_filter(func_name, strlen(func_name), 1);
346 
347 	/* enable tracing */
348 	ret = tracer_init(trace, tr);
349 	if (ret) {
350 		warn_failed_init_tracer(trace, ret);
351 		goto out;
352 	}
353 
354 	/* Sleep for a 1/10 of a second */
355 	msleep(100);
356 
357 	/* we should have nothing in the buffer */
358 	ret = trace_test_buffer(&tr->trace_buffer, &count);
359 	if (ret)
360 		goto out;
361 
362 	if (count) {
363 		ret = -1;
364 		printk(KERN_CONT ".. filter did not filter .. ");
365 		goto out;
366 	}
367 
368 	/* call our function again */
369 	func();
370 
371 	/* sleep again */
372 	msleep(100);
373 
374 	/* stop the tracing. */
375 	tracing_stop();
376 	ftrace_enabled = 0;
377 
378 	/* check the trace buffer */
379 	ret = trace_test_buffer(&tr->trace_buffer, &count);
380 	tracing_start();
381 
382 	/* we should only have one item */
383 	if (!ret && count != 1) {
384 		trace->reset(tr);
385 		printk(KERN_CONT ".. filter failed count=%ld ..", count);
386 		ret = -1;
387 		goto out;
388 	}
389 
390 	/* Test the ops with global tracing running */
391 	ret = trace_selftest_ops(1);
392 	trace->reset(tr);
393 
394  out:
395 	ftrace_enabled = save_ftrace_enabled;
396 
397 	/* Enable tracing on all functions again */
398 	ftrace_set_global_filter(NULL, 0, 1);
399 
400 	/* Test the ops with global tracing off */
401 	if (!ret)
402 		ret = trace_selftest_ops(2);
403 
404 	return ret;
405 }
406 
407 static int trace_selftest_recursion_cnt;
408 static void trace_selftest_test_recursion_func(unsigned long ip,
409 					       unsigned long pip,
410 					       struct ftrace_ops *op,
411 					       struct pt_regs *pt_regs)
412 {
413 	/*
414 	 * This function is registered without the recursion safe flag.
415 	 * The ftrace infrastructure should provide the recursion
416 	 * protection. If not, this will crash the kernel!
417 	 */
418 	if (trace_selftest_recursion_cnt++ > 10)
419 		return;
420 	DYN_FTRACE_TEST_NAME();
421 }
422 
423 static void trace_selftest_test_recursion_safe_func(unsigned long ip,
424 						    unsigned long pip,
425 						    struct ftrace_ops *op,
426 						    struct pt_regs *pt_regs)
427 {
428 	/*
429 	 * We said we would provide our own recursion. By calling
430 	 * this function again, we should recurse back into this function
431 	 * and count again. But this only happens if the arch supports
432 	 * all of ftrace features and nothing else is using the function
433 	 * tracing utility.
434 	 */
435 	if (trace_selftest_recursion_cnt++)
436 		return;
437 	DYN_FTRACE_TEST_NAME();
438 }
439 
440 static struct ftrace_ops test_rec_probe = {
441 	.func			= trace_selftest_test_recursion_func,
442 };
443 
444 static struct ftrace_ops test_recsafe_probe = {
445 	.func			= trace_selftest_test_recursion_safe_func,
446 	.flags			= FTRACE_OPS_FL_RECURSION_SAFE,
447 };
448 
449 static int
450 trace_selftest_function_recursion(void)
451 {
452 	int save_ftrace_enabled = ftrace_enabled;
453 	char *func_name;
454 	int len;
455 	int ret;
456 
457 	/* The previous test PASSED */
458 	pr_cont("PASSED\n");
459 	pr_info("Testing ftrace recursion: ");
460 
461 
462 	/* enable tracing, and record the filter function */
463 	ftrace_enabled = 1;
464 
465 	/* Handle PPC64 '.' name */
466 	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
467 	len = strlen(func_name);
468 
469 	ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1);
470 	if (ret) {
471 		pr_cont("*Could not set filter* ");
472 		goto out;
473 	}
474 
475 	ret = register_ftrace_function(&test_rec_probe);
476 	if (ret) {
477 		pr_cont("*could not register callback* ");
478 		goto out;
479 	}
480 
481 	DYN_FTRACE_TEST_NAME();
482 
483 	unregister_ftrace_function(&test_rec_probe);
484 
485 	ret = -1;
486 	if (trace_selftest_recursion_cnt != 1) {
487 		pr_cont("*callback not called once (%d)* ",
488 			trace_selftest_recursion_cnt);
489 		goto out;
490 	}
491 
492 	trace_selftest_recursion_cnt = 1;
493 
494 	pr_cont("PASSED\n");
495 	pr_info("Testing ftrace recursion safe: ");
496 
497 	ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1);
498 	if (ret) {
499 		pr_cont("*Could not set filter* ");
500 		goto out;
501 	}
502 
503 	ret = register_ftrace_function(&test_recsafe_probe);
504 	if (ret) {
505 		pr_cont("*could not register callback* ");
506 		goto out;
507 	}
508 
509 	DYN_FTRACE_TEST_NAME();
510 
511 	unregister_ftrace_function(&test_recsafe_probe);
512 
513 	ret = -1;
514 	if (trace_selftest_recursion_cnt != 2) {
515 		pr_cont("*callback not called expected 2 times (%d)* ",
516 			trace_selftest_recursion_cnt);
517 		goto out;
518 	}
519 
520 	ret = 0;
521 out:
522 	ftrace_enabled = save_ftrace_enabled;
523 
524 	return ret;
525 }
526 #else
527 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
528 # define trace_selftest_function_recursion() ({ 0; })
529 #endif /* CONFIG_DYNAMIC_FTRACE */
530 
531 static enum {
532 	TRACE_SELFTEST_REGS_START,
533 	TRACE_SELFTEST_REGS_FOUND,
534 	TRACE_SELFTEST_REGS_NOT_FOUND,
535 } trace_selftest_regs_stat;
536 
537 static void trace_selftest_test_regs_func(unsigned long ip,
538 					  unsigned long pip,
539 					  struct ftrace_ops *op,
540 					  struct pt_regs *pt_regs)
541 {
542 	if (pt_regs)
543 		trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND;
544 	else
545 		trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND;
546 }
547 
548 static struct ftrace_ops test_regs_probe = {
549 	.func		= trace_selftest_test_regs_func,
550 	.flags		= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_SAVE_REGS,
551 };
552 
553 static int
554 trace_selftest_function_regs(void)
555 {
556 	int save_ftrace_enabled = ftrace_enabled;
557 	char *func_name;
558 	int len;
559 	int ret;
560 	int supported = 0;
561 
562 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
563 	supported = 1;
564 #endif
565 
566 	/* The previous test PASSED */
567 	pr_cont("PASSED\n");
568 	pr_info("Testing ftrace regs%s: ",
569 		!supported ? "(no arch support)" : "");
570 
571 	/* enable tracing, and record the filter function */
572 	ftrace_enabled = 1;
573 
574 	/* Handle PPC64 '.' name */
575 	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
576 	len = strlen(func_name);
577 
578 	ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1);
579 	/*
580 	 * If DYNAMIC_FTRACE is not set, then we just trace all functions.
581 	 * This test really doesn't care.
582 	 */
583 	if (ret && ret != -ENODEV) {
584 		pr_cont("*Could not set filter* ");
585 		goto out;
586 	}
587 
588 	ret = register_ftrace_function(&test_regs_probe);
589 	/*
590 	 * Now if the arch does not support passing regs, then this should
591 	 * have failed.
592 	 */
593 	if (!supported) {
594 		if (!ret) {
595 			pr_cont("*registered save-regs without arch support* ");
596 			goto out;
597 		}
598 		test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED;
599 		ret = register_ftrace_function(&test_regs_probe);
600 	}
601 	if (ret) {
602 		pr_cont("*could not register callback* ");
603 		goto out;
604 	}
605 
606 
607 	DYN_FTRACE_TEST_NAME();
608 
609 	unregister_ftrace_function(&test_regs_probe);
610 
611 	ret = -1;
612 
613 	switch (trace_selftest_regs_stat) {
614 	case TRACE_SELFTEST_REGS_START:
615 		pr_cont("*callback never called* ");
616 		goto out;
617 
618 	case TRACE_SELFTEST_REGS_FOUND:
619 		if (supported)
620 			break;
621 		pr_cont("*callback received regs without arch support* ");
622 		goto out;
623 
624 	case TRACE_SELFTEST_REGS_NOT_FOUND:
625 		if (!supported)
626 			break;
627 		pr_cont("*callback received NULL regs* ");
628 		goto out;
629 	}
630 
631 	ret = 0;
632 out:
633 	ftrace_enabled = save_ftrace_enabled;
634 
635 	return ret;
636 }
637 
638 /*
639  * Simple verification test of ftrace function tracer.
640  * Enable ftrace, sleep 1/10 second, and then read the trace
641  * buffer to see if all is in order.
642  */
643 int
644 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
645 {
646 	int save_ftrace_enabled = ftrace_enabled;
647 	unsigned long count;
648 	int ret;
649 
650 	/* make sure msleep has been recorded */
651 	msleep(1);
652 
653 	/* start the tracing */
654 	ftrace_enabled = 1;
655 
656 	ret = tracer_init(trace, tr);
657 	if (ret) {
658 		warn_failed_init_tracer(trace, ret);
659 		goto out;
660 	}
661 
662 	/* Sleep for a 1/10 of a second */
663 	msleep(100);
664 	/* stop the tracing. */
665 	tracing_stop();
666 	ftrace_enabled = 0;
667 
668 	/* check the trace buffer */
669 	ret = trace_test_buffer(&tr->trace_buffer, &count);
670 	trace->reset(tr);
671 	tracing_start();
672 
673 	if (!ret && !count) {
674 		printk(KERN_CONT ".. no entries found ..");
675 		ret = -1;
676 		goto out;
677 	}
678 
679 	ret = trace_selftest_startup_dynamic_tracing(trace, tr,
680 						     DYN_FTRACE_TEST_NAME);
681 	if (ret)
682 		goto out;
683 
684 	ret = trace_selftest_function_recursion();
685 	if (ret)
686 		goto out;
687 
688 	ret = trace_selftest_function_regs();
689  out:
690 	ftrace_enabled = save_ftrace_enabled;
691 
692 	/* kill ftrace totally if we failed */
693 	if (ret)
694 		ftrace_kill();
695 
696 	return ret;
697 }
698 #endif /* CONFIG_FUNCTION_TRACER */
699 
700 
701 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
702 
703 /* Maximum number of functions to trace before diagnosing a hang */
704 #define GRAPH_MAX_FUNC_TEST	100000000
705 
706 static unsigned int graph_hang_thresh;
707 
708 /* Wrap the real function entry probe to avoid possible hanging */
709 static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
710 {
711 	/* This is harmlessly racy, we want to approximately detect a hang */
712 	if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
713 		ftrace_graph_stop();
714 		printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
715 		if (ftrace_dump_on_oops) {
716 			ftrace_dump(DUMP_ALL);
717 			/* ftrace_dump() disables tracing */
718 			tracing_on();
719 		}
720 		return 0;
721 	}
722 
723 	return trace_graph_entry(trace);
724 }
725 
726 /*
727  * Pretty much the same than for the function tracer from which the selftest
728  * has been borrowed.
729  */
730 int
731 trace_selftest_startup_function_graph(struct tracer *trace,
732 					struct trace_array *tr)
733 {
734 	int ret;
735 	unsigned long count;
736 
737 	/*
738 	 * Simulate the init() callback but we attach a watchdog callback
739 	 * to detect and recover from possible hangs
740 	 */
741 	tracing_reset_online_cpus(&tr->trace_buffer);
742 	set_graph_array(tr);
743 	ret = register_ftrace_graph(&trace_graph_return,
744 				    &trace_graph_entry_watchdog);
745 	if (ret) {
746 		warn_failed_init_tracer(trace, ret);
747 		goto out;
748 	}
749 	tracing_start_cmdline_record();
750 
751 	/* Sleep for a 1/10 of a second */
752 	msleep(100);
753 
754 	/* Have we just recovered from a hang? */
755 	if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
756 		tracing_selftest_disabled = true;
757 		ret = -1;
758 		goto out;
759 	}
760 
761 	tracing_stop();
762 
763 	/* check the trace buffer */
764 	ret = trace_test_buffer(&tr->trace_buffer, &count);
765 
766 	trace->reset(tr);
767 	tracing_start();
768 
769 	if (!ret && !count) {
770 		printk(KERN_CONT ".. no entries found ..");
771 		ret = -1;
772 		goto out;
773 	}
774 
775 	/* Don't test dynamic tracing, the function tracer already did */
776 
777 out:
778 	/* Stop it if we failed */
779 	if (ret)
780 		ftrace_graph_stop();
781 
782 	return ret;
783 }
784 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
785 
786 
787 #ifdef CONFIG_IRQSOFF_TRACER
788 int
789 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
790 {
791 	unsigned long save_max = tracing_max_latency;
792 	unsigned long count;
793 	int ret;
794 
795 	/* start the tracing */
796 	ret = tracer_init(trace, tr);
797 	if (ret) {
798 		warn_failed_init_tracer(trace, ret);
799 		return ret;
800 	}
801 
802 	/* reset the max latency */
803 	tracing_max_latency = 0;
804 	/* disable interrupts for a bit */
805 	local_irq_disable();
806 	udelay(100);
807 	local_irq_enable();
808 
809 	/*
810 	 * Stop the tracer to avoid a warning subsequent
811 	 * to buffer flipping failure because tracing_stop()
812 	 * disables the tr and max buffers, making flipping impossible
813 	 * in case of parallels max irqs off latencies.
814 	 */
815 	trace->stop(tr);
816 	/* stop the tracing. */
817 	tracing_stop();
818 	/* check both trace buffers */
819 	ret = trace_test_buffer(&tr->trace_buffer, NULL);
820 	if (!ret)
821 		ret = trace_test_buffer(&tr->max_buffer, &count);
822 	trace->reset(tr);
823 	tracing_start();
824 
825 	if (!ret && !count) {
826 		printk(KERN_CONT ".. no entries found ..");
827 		ret = -1;
828 	}
829 
830 	tracing_max_latency = save_max;
831 
832 	return ret;
833 }
834 #endif /* CONFIG_IRQSOFF_TRACER */
835 
836 #ifdef CONFIG_PREEMPT_TRACER
837 int
838 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
839 {
840 	unsigned long save_max = tracing_max_latency;
841 	unsigned long count;
842 	int ret;
843 
844 	/*
845 	 * Now that the big kernel lock is no longer preemptable,
846 	 * and this is called with the BKL held, it will always
847 	 * fail. If preemption is already disabled, simply
848 	 * pass the test. When the BKL is removed, or becomes
849 	 * preemptible again, we will once again test this,
850 	 * so keep it in.
851 	 */
852 	if (preempt_count()) {
853 		printk(KERN_CONT "can not test ... force ");
854 		return 0;
855 	}
856 
857 	/* start the tracing */
858 	ret = tracer_init(trace, tr);
859 	if (ret) {
860 		warn_failed_init_tracer(trace, ret);
861 		return ret;
862 	}
863 
864 	/* reset the max latency */
865 	tracing_max_latency = 0;
866 	/* disable preemption for a bit */
867 	preempt_disable();
868 	udelay(100);
869 	preempt_enable();
870 
871 	/*
872 	 * Stop the tracer to avoid a warning subsequent
873 	 * to buffer flipping failure because tracing_stop()
874 	 * disables the tr and max buffers, making flipping impossible
875 	 * in case of parallels max preempt off latencies.
876 	 */
877 	trace->stop(tr);
878 	/* stop the tracing. */
879 	tracing_stop();
880 	/* check both trace buffers */
881 	ret = trace_test_buffer(&tr->trace_buffer, NULL);
882 	if (!ret)
883 		ret = trace_test_buffer(&tr->max_buffer, &count);
884 	trace->reset(tr);
885 	tracing_start();
886 
887 	if (!ret && !count) {
888 		printk(KERN_CONT ".. no entries found ..");
889 		ret = -1;
890 	}
891 
892 	tracing_max_latency = save_max;
893 
894 	return ret;
895 }
896 #endif /* CONFIG_PREEMPT_TRACER */
897 
898 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
899 int
900 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
901 {
902 	unsigned long save_max = tracing_max_latency;
903 	unsigned long count;
904 	int ret;
905 
906 	/*
907 	 * Now that the big kernel lock is no longer preemptable,
908 	 * and this is called with the BKL held, it will always
909 	 * fail. If preemption is already disabled, simply
910 	 * pass the test. When the BKL is removed, or becomes
911 	 * preemptible again, we will once again test this,
912 	 * so keep it in.
913 	 */
914 	if (preempt_count()) {
915 		printk(KERN_CONT "can not test ... force ");
916 		return 0;
917 	}
918 
919 	/* start the tracing */
920 	ret = tracer_init(trace, tr);
921 	if (ret) {
922 		warn_failed_init_tracer(trace, ret);
923 		goto out_no_start;
924 	}
925 
926 	/* reset the max latency */
927 	tracing_max_latency = 0;
928 
929 	/* disable preemption and interrupts for a bit */
930 	preempt_disable();
931 	local_irq_disable();
932 	udelay(100);
933 	preempt_enable();
934 	/* reverse the order of preempt vs irqs */
935 	local_irq_enable();
936 
937 	/*
938 	 * Stop the tracer to avoid a warning subsequent
939 	 * to buffer flipping failure because tracing_stop()
940 	 * disables the tr and max buffers, making flipping impossible
941 	 * in case of parallels max irqs/preempt off latencies.
942 	 */
943 	trace->stop(tr);
944 	/* stop the tracing. */
945 	tracing_stop();
946 	/* check both trace buffers */
947 	ret = trace_test_buffer(&tr->trace_buffer, NULL);
948 	if (ret)
949 		goto out;
950 
951 	ret = trace_test_buffer(&tr->max_buffer, &count);
952 	if (ret)
953 		goto out;
954 
955 	if (!ret && !count) {
956 		printk(KERN_CONT ".. no entries found ..");
957 		ret = -1;
958 		goto out;
959 	}
960 
961 	/* do the test by disabling interrupts first this time */
962 	tracing_max_latency = 0;
963 	tracing_start();
964 	trace->start(tr);
965 
966 	preempt_disable();
967 	local_irq_disable();
968 	udelay(100);
969 	preempt_enable();
970 	/* reverse the order of preempt vs irqs */
971 	local_irq_enable();
972 
973 	trace->stop(tr);
974 	/* stop the tracing. */
975 	tracing_stop();
976 	/* check both trace buffers */
977 	ret = trace_test_buffer(&tr->trace_buffer, NULL);
978 	if (ret)
979 		goto out;
980 
981 	ret = trace_test_buffer(&tr->max_buffer, &count);
982 
983 	if (!ret && !count) {
984 		printk(KERN_CONT ".. no entries found ..");
985 		ret = -1;
986 		goto out;
987 	}
988 
989 out:
990 	tracing_start();
991 out_no_start:
992 	trace->reset(tr);
993 	tracing_max_latency = save_max;
994 
995 	return ret;
996 }
997 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
998 
999 #ifdef CONFIG_NOP_TRACER
1000 int
1001 trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
1002 {
1003 	/* What could possibly go wrong? */
1004 	return 0;
1005 }
1006 #endif
1007 
1008 #ifdef CONFIG_SCHED_TRACER
1009 static int trace_wakeup_test_thread(void *data)
1010 {
1011 	/* Make this a RT thread, doesn't need to be too high */
1012 	static const struct sched_param param = { .sched_priority = 5 };
1013 	struct completion *x = data;
1014 
1015 	sched_setscheduler(current, SCHED_FIFO, &param);
1016 
1017 	/* Make it know we have a new prio */
1018 	complete(x);
1019 
1020 	/* now go to sleep and let the test wake us up */
1021 	set_current_state(TASK_INTERRUPTIBLE);
1022 	schedule();
1023 
1024 	complete(x);
1025 
1026 	/* we are awake, now wait to disappear */
1027 	while (!kthread_should_stop()) {
1028 		/*
1029 		 * This is an RT task, do short sleeps to let
1030 		 * others run.
1031 		 */
1032 		msleep(100);
1033 	}
1034 
1035 	return 0;
1036 }
1037 
1038 int
1039 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
1040 {
1041 	unsigned long save_max = tracing_max_latency;
1042 	struct task_struct *p;
1043 	struct completion isrt;
1044 	unsigned long count;
1045 	int ret;
1046 
1047 	init_completion(&isrt);
1048 
1049 	/* create a high prio thread */
1050 	p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
1051 	if (IS_ERR(p)) {
1052 		printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
1053 		return -1;
1054 	}
1055 
1056 	/* make sure the thread is running at an RT prio */
1057 	wait_for_completion(&isrt);
1058 
1059 	/* start the tracing */
1060 	ret = tracer_init(trace, tr);
1061 	if (ret) {
1062 		warn_failed_init_tracer(trace, ret);
1063 		return ret;
1064 	}
1065 
1066 	/* reset the max latency */
1067 	tracing_max_latency = 0;
1068 
1069 	while (p->on_rq) {
1070 		/*
1071 		 * Sleep to make sure the RT thread is asleep too.
1072 		 * On virtual machines we can't rely on timings,
1073 		 * but we want to make sure this test still works.
1074 		 */
1075 		msleep(100);
1076 	}
1077 
1078 	init_completion(&isrt);
1079 
1080 	wake_up_process(p);
1081 
1082 	/* Wait for the task to wake up */
1083 	wait_for_completion(&isrt);
1084 
1085 	/* stop the tracing. */
1086 	tracing_stop();
1087 	/* check both trace buffers */
1088 	ret = trace_test_buffer(&tr->trace_buffer, NULL);
1089 	printk("ret = %d\n", ret);
1090 	if (!ret)
1091 		ret = trace_test_buffer(&tr->max_buffer, &count);
1092 
1093 
1094 	trace->reset(tr);
1095 	tracing_start();
1096 
1097 	tracing_max_latency = save_max;
1098 
1099 	/* kill the thread */
1100 	kthread_stop(p);
1101 
1102 	if (!ret && !count) {
1103 		printk(KERN_CONT ".. no entries found ..");
1104 		ret = -1;
1105 	}
1106 
1107 	return ret;
1108 }
1109 #endif /* CONFIG_SCHED_TRACER */
1110 
1111 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
1112 int
1113 trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
1114 {
1115 	unsigned long count;
1116 	int ret;
1117 
1118 	/* start the tracing */
1119 	ret = tracer_init(trace, tr);
1120 	if (ret) {
1121 		warn_failed_init_tracer(trace, ret);
1122 		return ret;
1123 	}
1124 
1125 	/* Sleep for a 1/10 of a second */
1126 	msleep(100);
1127 	/* stop the tracing. */
1128 	tracing_stop();
1129 	/* check the trace buffer */
1130 	ret = trace_test_buffer(&tr->trace_buffer, &count);
1131 	trace->reset(tr);
1132 	tracing_start();
1133 
1134 	if (!ret && !count) {
1135 		printk(KERN_CONT ".. no entries found ..");
1136 		ret = -1;
1137 	}
1138 
1139 	return ret;
1140 }
1141 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
1142 
1143 #ifdef CONFIG_BRANCH_TRACER
1144 int
1145 trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
1146 {
1147 	unsigned long count;
1148 	int ret;
1149 
1150 	/* start the tracing */
1151 	ret = tracer_init(trace, tr);
1152 	if (ret) {
1153 		warn_failed_init_tracer(trace, ret);
1154 		return ret;
1155 	}
1156 
1157 	/* Sleep for a 1/10 of a second */
1158 	msleep(100);
1159 	/* stop the tracing. */
1160 	tracing_stop();
1161 	/* check the trace buffer */
1162 	ret = trace_test_buffer(&tr->trace_buffer, &count);
1163 	trace->reset(tr);
1164 	tracing_start();
1165 
1166 	if (!ret && !count) {
1167 		printk(KERN_CONT ".. no entries found ..");
1168 		ret = -1;
1169 	}
1170 
1171 	return ret;
1172 }
1173 #endif /* CONFIG_BRANCH_TRACER */
1174 
1175