xref: /openbmc/linux/kernel/trace/trace_selftest.c (revision 4f6cce39)
1 /* Include in trace.c */
2 
3 #include <uapi/linux/sched/types.h>
4 #include <linux/stringify.h>
5 #include <linux/kthread.h>
6 #include <linux/delay.h>
7 #include <linux/slab.h>
8 
9 static inline int trace_valid_entry(struct trace_entry *entry)
10 {
11 	switch (entry->type) {
12 	case TRACE_FN:
13 	case TRACE_CTX:
14 	case TRACE_WAKE:
15 	case TRACE_STACK:
16 	case TRACE_PRINT:
17 	case TRACE_BRANCH:
18 	case TRACE_GRAPH_ENT:
19 	case TRACE_GRAPH_RET:
20 		return 1;
21 	}
22 	return 0;
23 }
24 
25 static int trace_test_buffer_cpu(struct trace_buffer *buf, int cpu)
26 {
27 	struct ring_buffer_event *event;
28 	struct trace_entry *entry;
29 	unsigned int loops = 0;
30 
31 	while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) {
32 		entry = ring_buffer_event_data(event);
33 
34 		/*
35 		 * The ring buffer is a size of trace_buf_size, if
36 		 * we loop more than the size, there's something wrong
37 		 * with the ring buffer.
38 		 */
39 		if (loops++ > trace_buf_size) {
40 			printk(KERN_CONT ".. bad ring buffer ");
41 			goto failed;
42 		}
43 		if (!trace_valid_entry(entry)) {
44 			printk(KERN_CONT ".. invalid entry %d ",
45 				entry->type);
46 			goto failed;
47 		}
48 	}
49 	return 0;
50 
51  failed:
52 	/* disable tracing */
53 	tracing_disabled = 1;
54 	printk(KERN_CONT ".. corrupted trace buffer .. ");
55 	return -1;
56 }
57 
58 /*
59  * Test the trace buffer to see if all the elements
60  * are still sane.
61  */
62 static int trace_test_buffer(struct trace_buffer *buf, unsigned long *count)
63 {
64 	unsigned long flags, cnt = 0;
65 	int cpu, ret = 0;
66 
67 	/* Don't allow flipping of max traces now */
68 	local_irq_save(flags);
69 	arch_spin_lock(&buf->tr->max_lock);
70 
71 	cnt = ring_buffer_entries(buf->buffer);
72 
73 	/*
74 	 * The trace_test_buffer_cpu runs a while loop to consume all data.
75 	 * If the calling tracer is broken, and is constantly filling
76 	 * the buffer, this will run forever, and hard lock the box.
77 	 * We disable the ring buffer while we do this test to prevent
78 	 * a hard lock up.
79 	 */
80 	tracing_off();
81 	for_each_possible_cpu(cpu) {
82 		ret = trace_test_buffer_cpu(buf, cpu);
83 		if (ret)
84 			break;
85 	}
86 	tracing_on();
87 	arch_spin_unlock(&buf->tr->max_lock);
88 	local_irq_restore(flags);
89 
90 	if (count)
91 		*count = cnt;
92 
93 	return ret;
94 }
95 
96 static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
97 {
98 	printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
99 		trace->name, init_ret);
100 }
101 #ifdef CONFIG_FUNCTION_TRACER
102 
103 #ifdef CONFIG_DYNAMIC_FTRACE
104 
105 static int trace_selftest_test_probe1_cnt;
106 static void trace_selftest_test_probe1_func(unsigned long ip,
107 					    unsigned long pip,
108 					    struct ftrace_ops *op,
109 					    struct pt_regs *pt_regs)
110 {
111 	trace_selftest_test_probe1_cnt++;
112 }
113 
114 static int trace_selftest_test_probe2_cnt;
115 static void trace_selftest_test_probe2_func(unsigned long ip,
116 					    unsigned long pip,
117 					    struct ftrace_ops *op,
118 					    struct pt_regs *pt_regs)
119 {
120 	trace_selftest_test_probe2_cnt++;
121 }
122 
123 static int trace_selftest_test_probe3_cnt;
124 static void trace_selftest_test_probe3_func(unsigned long ip,
125 					    unsigned long pip,
126 					    struct ftrace_ops *op,
127 					    struct pt_regs *pt_regs)
128 {
129 	trace_selftest_test_probe3_cnt++;
130 }
131 
132 static int trace_selftest_test_global_cnt;
133 static void trace_selftest_test_global_func(unsigned long ip,
134 					    unsigned long pip,
135 					    struct ftrace_ops *op,
136 					    struct pt_regs *pt_regs)
137 {
138 	trace_selftest_test_global_cnt++;
139 }
140 
141 static int trace_selftest_test_dyn_cnt;
142 static void trace_selftest_test_dyn_func(unsigned long ip,
143 					 unsigned long pip,
144 					 struct ftrace_ops *op,
145 					 struct pt_regs *pt_regs)
146 {
147 	trace_selftest_test_dyn_cnt++;
148 }
149 
150 static struct ftrace_ops test_probe1 = {
151 	.func			= trace_selftest_test_probe1_func,
152 	.flags			= FTRACE_OPS_FL_RECURSION_SAFE,
153 };
154 
155 static struct ftrace_ops test_probe2 = {
156 	.func			= trace_selftest_test_probe2_func,
157 	.flags			= FTRACE_OPS_FL_RECURSION_SAFE,
158 };
159 
160 static struct ftrace_ops test_probe3 = {
161 	.func			= trace_selftest_test_probe3_func,
162 	.flags			= FTRACE_OPS_FL_RECURSION_SAFE,
163 };
164 
165 static void print_counts(void)
166 {
167 	printk("(%d %d %d %d %d) ",
168 	       trace_selftest_test_probe1_cnt,
169 	       trace_selftest_test_probe2_cnt,
170 	       trace_selftest_test_probe3_cnt,
171 	       trace_selftest_test_global_cnt,
172 	       trace_selftest_test_dyn_cnt);
173 }
174 
175 static void reset_counts(void)
176 {
177 	trace_selftest_test_probe1_cnt = 0;
178 	trace_selftest_test_probe2_cnt = 0;
179 	trace_selftest_test_probe3_cnt = 0;
180 	trace_selftest_test_global_cnt = 0;
181 	trace_selftest_test_dyn_cnt = 0;
182 }
183 
184 static int trace_selftest_ops(struct trace_array *tr, int cnt)
185 {
186 	int save_ftrace_enabled = ftrace_enabled;
187 	struct ftrace_ops *dyn_ops;
188 	char *func1_name;
189 	char *func2_name;
190 	int len1;
191 	int len2;
192 	int ret = -1;
193 
194 	printk(KERN_CONT "PASSED\n");
195 	pr_info("Testing dynamic ftrace ops #%d: ", cnt);
196 
197 	ftrace_enabled = 1;
198 	reset_counts();
199 
200 	/* Handle PPC64 '.' name */
201 	func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
202 	func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
203 	len1 = strlen(func1_name);
204 	len2 = strlen(func2_name);
205 
206 	/*
207 	 * Probe 1 will trace function 1.
208 	 * Probe 2 will trace function 2.
209 	 * Probe 3 will trace functions 1 and 2.
210 	 */
211 	ftrace_set_filter(&test_probe1, func1_name, len1, 1);
212 	ftrace_set_filter(&test_probe2, func2_name, len2, 1);
213 	ftrace_set_filter(&test_probe3, func1_name, len1, 1);
214 	ftrace_set_filter(&test_probe3, func2_name, len2, 0);
215 
216 	register_ftrace_function(&test_probe1);
217 	register_ftrace_function(&test_probe2);
218 	register_ftrace_function(&test_probe3);
219 	/* First time we are running with main function */
220 	if (cnt > 1) {
221 		ftrace_init_array_ops(tr, trace_selftest_test_global_func);
222 		register_ftrace_function(tr->ops);
223 	}
224 
225 	DYN_FTRACE_TEST_NAME();
226 
227 	print_counts();
228 
229 	if (trace_selftest_test_probe1_cnt != 1)
230 		goto out;
231 	if (trace_selftest_test_probe2_cnt != 0)
232 		goto out;
233 	if (trace_selftest_test_probe3_cnt != 1)
234 		goto out;
235 	if (cnt > 1) {
236 		if (trace_selftest_test_global_cnt == 0)
237 			goto out;
238 	}
239 
240 	DYN_FTRACE_TEST_NAME2();
241 
242 	print_counts();
243 
244 	if (trace_selftest_test_probe1_cnt != 1)
245 		goto out;
246 	if (trace_selftest_test_probe2_cnt != 1)
247 		goto out;
248 	if (trace_selftest_test_probe3_cnt != 2)
249 		goto out;
250 
251 	/* Add a dynamic probe */
252 	dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
253 	if (!dyn_ops) {
254 		printk("MEMORY ERROR ");
255 		goto out;
256 	}
257 
258 	dyn_ops->func = trace_selftest_test_dyn_func;
259 
260 	register_ftrace_function(dyn_ops);
261 
262 	trace_selftest_test_global_cnt = 0;
263 
264 	DYN_FTRACE_TEST_NAME();
265 
266 	print_counts();
267 
268 	if (trace_selftest_test_probe1_cnt != 2)
269 		goto out_free;
270 	if (trace_selftest_test_probe2_cnt != 1)
271 		goto out_free;
272 	if (trace_selftest_test_probe3_cnt != 3)
273 		goto out_free;
274 	if (cnt > 1) {
275 		if (trace_selftest_test_global_cnt == 0)
276 			goto out;
277 	}
278 	if (trace_selftest_test_dyn_cnt == 0)
279 		goto out_free;
280 
281 	DYN_FTRACE_TEST_NAME2();
282 
283 	print_counts();
284 
285 	if (trace_selftest_test_probe1_cnt != 2)
286 		goto out_free;
287 	if (trace_selftest_test_probe2_cnt != 2)
288 		goto out_free;
289 	if (trace_selftest_test_probe3_cnt != 4)
290 		goto out_free;
291 
292 	ret = 0;
293  out_free:
294 	unregister_ftrace_function(dyn_ops);
295 	kfree(dyn_ops);
296 
297  out:
298 	/* Purposely unregister in the same order */
299 	unregister_ftrace_function(&test_probe1);
300 	unregister_ftrace_function(&test_probe2);
301 	unregister_ftrace_function(&test_probe3);
302 	if (cnt > 1)
303 		unregister_ftrace_function(tr->ops);
304 	ftrace_reset_array_ops(tr);
305 
306 	/* Make sure everything is off */
307 	reset_counts();
308 	DYN_FTRACE_TEST_NAME();
309 	DYN_FTRACE_TEST_NAME();
310 
311 	if (trace_selftest_test_probe1_cnt ||
312 	    trace_selftest_test_probe2_cnt ||
313 	    trace_selftest_test_probe3_cnt ||
314 	    trace_selftest_test_global_cnt ||
315 	    trace_selftest_test_dyn_cnt)
316 		ret = -1;
317 
318 	ftrace_enabled = save_ftrace_enabled;
319 
320 	return ret;
321 }
322 
323 /* Test dynamic code modification and ftrace filters */
324 static int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
325 						  struct trace_array *tr,
326 						  int (*func)(void))
327 {
328 	int save_ftrace_enabled = ftrace_enabled;
329 	unsigned long count;
330 	char *func_name;
331 	int ret;
332 
333 	/* The ftrace test PASSED */
334 	printk(KERN_CONT "PASSED\n");
335 	pr_info("Testing dynamic ftrace: ");
336 
337 	/* enable tracing, and record the filter function */
338 	ftrace_enabled = 1;
339 
340 	/* passed in by parameter to fool gcc from optimizing */
341 	func();
342 
343 	/*
344 	 * Some archs *cough*PowerPC*cough* add characters to the
345 	 * start of the function names. We simply put a '*' to
346 	 * accommodate them.
347 	 */
348 	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
349 
350 	/* filter only on our function */
351 	ftrace_set_global_filter(func_name, strlen(func_name), 1);
352 
353 	/* enable tracing */
354 	ret = tracer_init(trace, tr);
355 	if (ret) {
356 		warn_failed_init_tracer(trace, ret);
357 		goto out;
358 	}
359 
360 	/* Sleep for a 1/10 of a second */
361 	msleep(100);
362 
363 	/* we should have nothing in the buffer */
364 	ret = trace_test_buffer(&tr->trace_buffer, &count);
365 	if (ret)
366 		goto out;
367 
368 	if (count) {
369 		ret = -1;
370 		printk(KERN_CONT ".. filter did not filter .. ");
371 		goto out;
372 	}
373 
374 	/* call our function again */
375 	func();
376 
377 	/* sleep again */
378 	msleep(100);
379 
380 	/* stop the tracing. */
381 	tracing_stop();
382 	ftrace_enabled = 0;
383 
384 	/* check the trace buffer */
385 	ret = trace_test_buffer(&tr->trace_buffer, &count);
386 
387 	ftrace_enabled = 1;
388 	tracing_start();
389 
390 	/* we should only have one item */
391 	if (!ret && count != 1) {
392 		trace->reset(tr);
393 		printk(KERN_CONT ".. filter failed count=%ld ..", count);
394 		ret = -1;
395 		goto out;
396 	}
397 
398 	/* Test the ops with global tracing running */
399 	ret = trace_selftest_ops(tr, 1);
400 	trace->reset(tr);
401 
402  out:
403 	ftrace_enabled = save_ftrace_enabled;
404 
405 	/* Enable tracing on all functions again */
406 	ftrace_set_global_filter(NULL, 0, 1);
407 
408 	/* Test the ops with global tracing off */
409 	if (!ret)
410 		ret = trace_selftest_ops(tr, 2);
411 
412 	return ret;
413 }
414 
415 static int trace_selftest_recursion_cnt;
416 static void trace_selftest_test_recursion_func(unsigned long ip,
417 					       unsigned long pip,
418 					       struct ftrace_ops *op,
419 					       struct pt_regs *pt_regs)
420 {
421 	/*
422 	 * This function is registered without the recursion safe flag.
423 	 * The ftrace infrastructure should provide the recursion
424 	 * protection. If not, this will crash the kernel!
425 	 */
426 	if (trace_selftest_recursion_cnt++ > 10)
427 		return;
428 	DYN_FTRACE_TEST_NAME();
429 }
430 
431 static void trace_selftest_test_recursion_safe_func(unsigned long ip,
432 						    unsigned long pip,
433 						    struct ftrace_ops *op,
434 						    struct pt_regs *pt_regs)
435 {
436 	/*
437 	 * We said we would provide our own recursion. By calling
438 	 * this function again, we should recurse back into this function
439 	 * and count again. But this only happens if the arch supports
440 	 * all of ftrace features and nothing else is using the function
441 	 * tracing utility.
442 	 */
443 	if (trace_selftest_recursion_cnt++)
444 		return;
445 	DYN_FTRACE_TEST_NAME();
446 }
447 
448 static struct ftrace_ops test_rec_probe = {
449 	.func			= trace_selftest_test_recursion_func,
450 };
451 
452 static struct ftrace_ops test_recsafe_probe = {
453 	.func			= trace_selftest_test_recursion_safe_func,
454 	.flags			= FTRACE_OPS_FL_RECURSION_SAFE,
455 };
456 
457 static int
458 trace_selftest_function_recursion(void)
459 {
460 	int save_ftrace_enabled = ftrace_enabled;
461 	char *func_name;
462 	int len;
463 	int ret;
464 
465 	/* The previous test PASSED */
466 	pr_cont("PASSED\n");
467 	pr_info("Testing ftrace recursion: ");
468 
469 
470 	/* enable tracing, and record the filter function */
471 	ftrace_enabled = 1;
472 
473 	/* Handle PPC64 '.' name */
474 	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
475 	len = strlen(func_name);
476 
477 	ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1);
478 	if (ret) {
479 		pr_cont("*Could not set filter* ");
480 		goto out;
481 	}
482 
483 	ret = register_ftrace_function(&test_rec_probe);
484 	if (ret) {
485 		pr_cont("*could not register callback* ");
486 		goto out;
487 	}
488 
489 	DYN_FTRACE_TEST_NAME();
490 
491 	unregister_ftrace_function(&test_rec_probe);
492 
493 	ret = -1;
494 	if (trace_selftest_recursion_cnt != 1) {
495 		pr_cont("*callback not called once (%d)* ",
496 			trace_selftest_recursion_cnt);
497 		goto out;
498 	}
499 
500 	trace_selftest_recursion_cnt = 1;
501 
502 	pr_cont("PASSED\n");
503 	pr_info("Testing ftrace recursion safe: ");
504 
505 	ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1);
506 	if (ret) {
507 		pr_cont("*Could not set filter* ");
508 		goto out;
509 	}
510 
511 	ret = register_ftrace_function(&test_recsafe_probe);
512 	if (ret) {
513 		pr_cont("*could not register callback* ");
514 		goto out;
515 	}
516 
517 	DYN_FTRACE_TEST_NAME();
518 
519 	unregister_ftrace_function(&test_recsafe_probe);
520 
521 	ret = -1;
522 	if (trace_selftest_recursion_cnt != 2) {
523 		pr_cont("*callback not called expected 2 times (%d)* ",
524 			trace_selftest_recursion_cnt);
525 		goto out;
526 	}
527 
528 	ret = 0;
529 out:
530 	ftrace_enabled = save_ftrace_enabled;
531 
532 	return ret;
533 }
534 #else
535 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
536 # define trace_selftest_function_recursion() ({ 0; })
537 #endif /* CONFIG_DYNAMIC_FTRACE */
538 
539 static enum {
540 	TRACE_SELFTEST_REGS_START,
541 	TRACE_SELFTEST_REGS_FOUND,
542 	TRACE_SELFTEST_REGS_NOT_FOUND,
543 } trace_selftest_regs_stat;
544 
545 static void trace_selftest_test_regs_func(unsigned long ip,
546 					  unsigned long pip,
547 					  struct ftrace_ops *op,
548 					  struct pt_regs *pt_regs)
549 {
550 	if (pt_regs)
551 		trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND;
552 	else
553 		trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND;
554 }
555 
556 static struct ftrace_ops test_regs_probe = {
557 	.func		= trace_selftest_test_regs_func,
558 	.flags		= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_SAVE_REGS,
559 };
560 
561 static int
562 trace_selftest_function_regs(void)
563 {
564 	int save_ftrace_enabled = ftrace_enabled;
565 	char *func_name;
566 	int len;
567 	int ret;
568 	int supported = 0;
569 
570 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
571 	supported = 1;
572 #endif
573 
574 	/* The previous test PASSED */
575 	pr_cont("PASSED\n");
576 	pr_info("Testing ftrace regs%s: ",
577 		!supported ? "(no arch support)" : "");
578 
579 	/* enable tracing, and record the filter function */
580 	ftrace_enabled = 1;
581 
582 	/* Handle PPC64 '.' name */
583 	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
584 	len = strlen(func_name);
585 
586 	ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1);
587 	/*
588 	 * If DYNAMIC_FTRACE is not set, then we just trace all functions.
589 	 * This test really doesn't care.
590 	 */
591 	if (ret && ret != -ENODEV) {
592 		pr_cont("*Could not set filter* ");
593 		goto out;
594 	}
595 
596 	ret = register_ftrace_function(&test_regs_probe);
597 	/*
598 	 * Now if the arch does not support passing regs, then this should
599 	 * have failed.
600 	 */
601 	if (!supported) {
602 		if (!ret) {
603 			pr_cont("*registered save-regs without arch support* ");
604 			goto out;
605 		}
606 		test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED;
607 		ret = register_ftrace_function(&test_regs_probe);
608 	}
609 	if (ret) {
610 		pr_cont("*could not register callback* ");
611 		goto out;
612 	}
613 
614 
615 	DYN_FTRACE_TEST_NAME();
616 
617 	unregister_ftrace_function(&test_regs_probe);
618 
619 	ret = -1;
620 
621 	switch (trace_selftest_regs_stat) {
622 	case TRACE_SELFTEST_REGS_START:
623 		pr_cont("*callback never called* ");
624 		goto out;
625 
626 	case TRACE_SELFTEST_REGS_FOUND:
627 		if (supported)
628 			break;
629 		pr_cont("*callback received regs without arch support* ");
630 		goto out;
631 
632 	case TRACE_SELFTEST_REGS_NOT_FOUND:
633 		if (!supported)
634 			break;
635 		pr_cont("*callback received NULL regs* ");
636 		goto out;
637 	}
638 
639 	ret = 0;
640 out:
641 	ftrace_enabled = save_ftrace_enabled;
642 
643 	return ret;
644 }
645 
646 /*
647  * Simple verification test of ftrace function tracer.
648  * Enable ftrace, sleep 1/10 second, and then read the trace
649  * buffer to see if all is in order.
650  */
651 __init int
652 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
653 {
654 	int save_ftrace_enabled = ftrace_enabled;
655 	unsigned long count;
656 	int ret;
657 
658 #ifdef CONFIG_DYNAMIC_FTRACE
659 	if (ftrace_filter_param) {
660 		printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
661 		return 0;
662 	}
663 #endif
664 
665 	/* make sure msleep has been recorded */
666 	msleep(1);
667 
668 	/* start the tracing */
669 	ftrace_enabled = 1;
670 
671 	ret = tracer_init(trace, tr);
672 	if (ret) {
673 		warn_failed_init_tracer(trace, ret);
674 		goto out;
675 	}
676 
677 	/* Sleep for a 1/10 of a second */
678 	msleep(100);
679 	/* stop the tracing. */
680 	tracing_stop();
681 	ftrace_enabled = 0;
682 
683 	/* check the trace buffer */
684 	ret = trace_test_buffer(&tr->trace_buffer, &count);
685 
686 	ftrace_enabled = 1;
687 	trace->reset(tr);
688 	tracing_start();
689 
690 	if (!ret && !count) {
691 		printk(KERN_CONT ".. no entries found ..");
692 		ret = -1;
693 		goto out;
694 	}
695 
696 	ret = trace_selftest_startup_dynamic_tracing(trace, tr,
697 						     DYN_FTRACE_TEST_NAME);
698 	if (ret)
699 		goto out;
700 
701 	ret = trace_selftest_function_recursion();
702 	if (ret)
703 		goto out;
704 
705 	ret = trace_selftest_function_regs();
706  out:
707 	ftrace_enabled = save_ftrace_enabled;
708 
709 	/* kill ftrace totally if we failed */
710 	if (ret)
711 		ftrace_kill();
712 
713 	return ret;
714 }
715 #endif /* CONFIG_FUNCTION_TRACER */
716 
717 
718 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
719 
720 /* Maximum number of functions to trace before diagnosing a hang */
721 #define GRAPH_MAX_FUNC_TEST	100000000
722 
723 static unsigned int graph_hang_thresh;
724 
725 /* Wrap the real function entry probe to avoid possible hanging */
726 static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
727 {
728 	/* This is harmlessly racy, we want to approximately detect a hang */
729 	if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
730 		ftrace_graph_stop();
731 		printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
732 		if (ftrace_dump_on_oops) {
733 			ftrace_dump(DUMP_ALL);
734 			/* ftrace_dump() disables tracing */
735 			tracing_on();
736 		}
737 		return 0;
738 	}
739 
740 	return trace_graph_entry(trace);
741 }
742 
743 /*
744  * Pretty much the same than for the function tracer from which the selftest
745  * has been borrowed.
746  */
747 __init int
748 trace_selftest_startup_function_graph(struct tracer *trace,
749 					struct trace_array *tr)
750 {
751 	int ret;
752 	unsigned long count;
753 
754 #ifdef CONFIG_DYNAMIC_FTRACE
755 	if (ftrace_filter_param) {
756 		printk(KERN_CONT " ... kernel command line filter set: force PASS ... ");
757 		return 0;
758 	}
759 #endif
760 
761 	/*
762 	 * Simulate the init() callback but we attach a watchdog callback
763 	 * to detect and recover from possible hangs
764 	 */
765 	tracing_reset_online_cpus(&tr->trace_buffer);
766 	set_graph_array(tr);
767 	ret = register_ftrace_graph(&trace_graph_return,
768 				    &trace_graph_entry_watchdog);
769 	if (ret) {
770 		warn_failed_init_tracer(trace, ret);
771 		goto out;
772 	}
773 	tracing_start_cmdline_record();
774 
775 	/* Sleep for a 1/10 of a second */
776 	msleep(100);
777 
778 	/* Have we just recovered from a hang? */
779 	if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
780 		tracing_selftest_disabled = true;
781 		ret = -1;
782 		goto out;
783 	}
784 
785 	tracing_stop();
786 
787 	/* check the trace buffer */
788 	ret = trace_test_buffer(&tr->trace_buffer, &count);
789 
790 	trace->reset(tr);
791 	tracing_start();
792 
793 	if (!ret && !count) {
794 		printk(KERN_CONT ".. no entries found ..");
795 		ret = -1;
796 		goto out;
797 	}
798 
799 	/* Don't test dynamic tracing, the function tracer already did */
800 
801 out:
802 	/* Stop it if we failed */
803 	if (ret)
804 		ftrace_graph_stop();
805 
806 	return ret;
807 }
808 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
809 
810 
811 #ifdef CONFIG_IRQSOFF_TRACER
812 int
813 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
814 {
815 	unsigned long save_max = tr->max_latency;
816 	unsigned long count;
817 	int ret;
818 
819 	/* start the tracing */
820 	ret = tracer_init(trace, tr);
821 	if (ret) {
822 		warn_failed_init_tracer(trace, ret);
823 		return ret;
824 	}
825 
826 	/* reset the max latency */
827 	tr->max_latency = 0;
828 	/* disable interrupts for a bit */
829 	local_irq_disable();
830 	udelay(100);
831 	local_irq_enable();
832 
833 	/*
834 	 * Stop the tracer to avoid a warning subsequent
835 	 * to buffer flipping failure because tracing_stop()
836 	 * disables the tr and max buffers, making flipping impossible
837 	 * in case of parallels max irqs off latencies.
838 	 */
839 	trace->stop(tr);
840 	/* stop the tracing. */
841 	tracing_stop();
842 	/* check both trace buffers */
843 	ret = trace_test_buffer(&tr->trace_buffer, NULL);
844 	if (!ret)
845 		ret = trace_test_buffer(&tr->max_buffer, &count);
846 	trace->reset(tr);
847 	tracing_start();
848 
849 	if (!ret && !count) {
850 		printk(KERN_CONT ".. no entries found ..");
851 		ret = -1;
852 	}
853 
854 	tr->max_latency = save_max;
855 
856 	return ret;
857 }
858 #endif /* CONFIG_IRQSOFF_TRACER */
859 
860 #ifdef CONFIG_PREEMPT_TRACER
861 int
862 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
863 {
864 	unsigned long save_max = tr->max_latency;
865 	unsigned long count;
866 	int ret;
867 
868 	/*
869 	 * Now that the big kernel lock is no longer preemptable,
870 	 * and this is called with the BKL held, it will always
871 	 * fail. If preemption is already disabled, simply
872 	 * pass the test. When the BKL is removed, or becomes
873 	 * preemptible again, we will once again test this,
874 	 * so keep it in.
875 	 */
876 	if (preempt_count()) {
877 		printk(KERN_CONT "can not test ... force ");
878 		return 0;
879 	}
880 
881 	/* start the tracing */
882 	ret = tracer_init(trace, tr);
883 	if (ret) {
884 		warn_failed_init_tracer(trace, ret);
885 		return ret;
886 	}
887 
888 	/* reset the max latency */
889 	tr->max_latency = 0;
890 	/* disable preemption for a bit */
891 	preempt_disable();
892 	udelay(100);
893 	preempt_enable();
894 
895 	/*
896 	 * Stop the tracer to avoid a warning subsequent
897 	 * to buffer flipping failure because tracing_stop()
898 	 * disables the tr and max buffers, making flipping impossible
899 	 * in case of parallels max preempt off latencies.
900 	 */
901 	trace->stop(tr);
902 	/* stop the tracing. */
903 	tracing_stop();
904 	/* check both trace buffers */
905 	ret = trace_test_buffer(&tr->trace_buffer, NULL);
906 	if (!ret)
907 		ret = trace_test_buffer(&tr->max_buffer, &count);
908 	trace->reset(tr);
909 	tracing_start();
910 
911 	if (!ret && !count) {
912 		printk(KERN_CONT ".. no entries found ..");
913 		ret = -1;
914 	}
915 
916 	tr->max_latency = save_max;
917 
918 	return ret;
919 }
920 #endif /* CONFIG_PREEMPT_TRACER */
921 
922 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
923 int
924 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
925 {
926 	unsigned long save_max = tr->max_latency;
927 	unsigned long count;
928 	int ret;
929 
930 	/*
931 	 * Now that the big kernel lock is no longer preemptable,
932 	 * and this is called with the BKL held, it will always
933 	 * fail. If preemption is already disabled, simply
934 	 * pass the test. When the BKL is removed, or becomes
935 	 * preemptible again, we will once again test this,
936 	 * so keep it in.
937 	 */
938 	if (preempt_count()) {
939 		printk(KERN_CONT "can not test ... force ");
940 		return 0;
941 	}
942 
943 	/* start the tracing */
944 	ret = tracer_init(trace, tr);
945 	if (ret) {
946 		warn_failed_init_tracer(trace, ret);
947 		goto out_no_start;
948 	}
949 
950 	/* reset the max latency */
951 	tr->max_latency = 0;
952 
953 	/* disable preemption and interrupts for a bit */
954 	preempt_disable();
955 	local_irq_disable();
956 	udelay(100);
957 	preempt_enable();
958 	/* reverse the order of preempt vs irqs */
959 	local_irq_enable();
960 
961 	/*
962 	 * Stop the tracer to avoid a warning subsequent
963 	 * to buffer flipping failure because tracing_stop()
964 	 * disables the tr and max buffers, making flipping impossible
965 	 * in case of parallels max irqs/preempt off latencies.
966 	 */
967 	trace->stop(tr);
968 	/* stop the tracing. */
969 	tracing_stop();
970 	/* check both trace buffers */
971 	ret = trace_test_buffer(&tr->trace_buffer, NULL);
972 	if (ret)
973 		goto out;
974 
975 	ret = trace_test_buffer(&tr->max_buffer, &count);
976 	if (ret)
977 		goto out;
978 
979 	if (!ret && !count) {
980 		printk(KERN_CONT ".. no entries found ..");
981 		ret = -1;
982 		goto out;
983 	}
984 
985 	/* do the test by disabling interrupts first this time */
986 	tr->max_latency = 0;
987 	tracing_start();
988 	trace->start(tr);
989 
990 	preempt_disable();
991 	local_irq_disable();
992 	udelay(100);
993 	preempt_enable();
994 	/* reverse the order of preempt vs irqs */
995 	local_irq_enable();
996 
997 	trace->stop(tr);
998 	/* stop the tracing. */
999 	tracing_stop();
1000 	/* check both trace buffers */
1001 	ret = trace_test_buffer(&tr->trace_buffer, NULL);
1002 	if (ret)
1003 		goto out;
1004 
1005 	ret = trace_test_buffer(&tr->max_buffer, &count);
1006 
1007 	if (!ret && !count) {
1008 		printk(KERN_CONT ".. no entries found ..");
1009 		ret = -1;
1010 		goto out;
1011 	}
1012 
1013 out:
1014 	tracing_start();
1015 out_no_start:
1016 	trace->reset(tr);
1017 	tr->max_latency = save_max;
1018 
1019 	return ret;
1020 }
1021 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
1022 
1023 #ifdef CONFIG_NOP_TRACER
1024 int
1025 trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
1026 {
1027 	/* What could possibly go wrong? */
1028 	return 0;
1029 }
1030 #endif
1031 
1032 #ifdef CONFIG_SCHED_TRACER
1033 
1034 struct wakeup_test_data {
1035 	struct completion	is_ready;
1036 	int			go;
1037 };
1038 
1039 static int trace_wakeup_test_thread(void *data)
1040 {
1041 	/* Make this a -deadline thread */
1042 	static const struct sched_attr attr = {
1043 		.sched_policy = SCHED_DEADLINE,
1044 		.sched_runtime = 100000ULL,
1045 		.sched_deadline = 10000000ULL,
1046 		.sched_period = 10000000ULL
1047 	};
1048 	struct wakeup_test_data *x = data;
1049 
1050 	sched_setattr(current, &attr);
1051 
1052 	/* Make it know we have a new prio */
1053 	complete(&x->is_ready);
1054 
1055 	/* now go to sleep and let the test wake us up */
1056 	set_current_state(TASK_INTERRUPTIBLE);
1057 	while (!x->go) {
1058 		schedule();
1059 		set_current_state(TASK_INTERRUPTIBLE);
1060 	}
1061 
1062 	complete(&x->is_ready);
1063 
1064 	set_current_state(TASK_INTERRUPTIBLE);
1065 
1066 	/* we are awake, now wait to disappear */
1067 	while (!kthread_should_stop()) {
1068 		schedule();
1069 		set_current_state(TASK_INTERRUPTIBLE);
1070 	}
1071 
1072 	__set_current_state(TASK_RUNNING);
1073 
1074 	return 0;
1075 }
1076 int
1077 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
1078 {
1079 	unsigned long save_max = tr->max_latency;
1080 	struct task_struct *p;
1081 	struct wakeup_test_data data;
1082 	unsigned long count;
1083 	int ret;
1084 
1085 	memset(&data, 0, sizeof(data));
1086 
1087 	init_completion(&data.is_ready);
1088 
1089 	/* create a -deadline thread */
1090 	p = kthread_run(trace_wakeup_test_thread, &data, "ftrace-test");
1091 	if (IS_ERR(p)) {
1092 		printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
1093 		return -1;
1094 	}
1095 
1096 	/* make sure the thread is running at -deadline policy */
1097 	wait_for_completion(&data.is_ready);
1098 
1099 	/* start the tracing */
1100 	ret = tracer_init(trace, tr);
1101 	if (ret) {
1102 		warn_failed_init_tracer(trace, ret);
1103 		return ret;
1104 	}
1105 
1106 	/* reset the max latency */
1107 	tr->max_latency = 0;
1108 
1109 	while (p->on_rq) {
1110 		/*
1111 		 * Sleep to make sure the -deadline thread is asleep too.
1112 		 * On virtual machines we can't rely on timings,
1113 		 * but we want to make sure this test still works.
1114 		 */
1115 		msleep(100);
1116 	}
1117 
1118 	init_completion(&data.is_ready);
1119 
1120 	data.go = 1;
1121 	/* memory barrier is in the wake_up_process() */
1122 
1123 	wake_up_process(p);
1124 
1125 	/* Wait for the task to wake up */
1126 	wait_for_completion(&data.is_ready);
1127 
1128 	/* stop the tracing. */
1129 	tracing_stop();
1130 	/* check both trace buffers */
1131 	ret = trace_test_buffer(&tr->trace_buffer, NULL);
1132 	if (!ret)
1133 		ret = trace_test_buffer(&tr->max_buffer, &count);
1134 
1135 
1136 	trace->reset(tr);
1137 	tracing_start();
1138 
1139 	tr->max_latency = save_max;
1140 
1141 	/* kill the thread */
1142 	kthread_stop(p);
1143 
1144 	if (!ret && !count) {
1145 		printk(KERN_CONT ".. no entries found ..");
1146 		ret = -1;
1147 	}
1148 
1149 	return ret;
1150 }
1151 #endif /* CONFIG_SCHED_TRACER */
1152 
1153 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
1154 int
1155 trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
1156 {
1157 	unsigned long count;
1158 	int ret;
1159 
1160 	/* start the tracing */
1161 	ret = tracer_init(trace, tr);
1162 	if (ret) {
1163 		warn_failed_init_tracer(trace, ret);
1164 		return ret;
1165 	}
1166 
1167 	/* Sleep for a 1/10 of a second */
1168 	msleep(100);
1169 	/* stop the tracing. */
1170 	tracing_stop();
1171 	/* check the trace buffer */
1172 	ret = trace_test_buffer(&tr->trace_buffer, &count);
1173 	trace->reset(tr);
1174 	tracing_start();
1175 
1176 	if (!ret && !count) {
1177 		printk(KERN_CONT ".. no entries found ..");
1178 		ret = -1;
1179 	}
1180 
1181 	return ret;
1182 }
1183 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
1184 
1185 #ifdef CONFIG_BRANCH_TRACER
1186 int
1187 trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
1188 {
1189 	unsigned long count;
1190 	int ret;
1191 
1192 	/* start the tracing */
1193 	ret = tracer_init(trace, tr);
1194 	if (ret) {
1195 		warn_failed_init_tracer(trace, ret);
1196 		return ret;
1197 	}
1198 
1199 	/* Sleep for a 1/10 of a second */
1200 	msleep(100);
1201 	/* stop the tracing. */
1202 	tracing_stop();
1203 	/* check the trace buffer */
1204 	ret = trace_test_buffer(&tr->trace_buffer, &count);
1205 	trace->reset(tr);
1206 	tracing_start();
1207 
1208 	if (!ret && !count) {
1209 		printk(KERN_CONT ".. no entries found ..");
1210 		ret = -1;
1211 	}
1212 
1213 	return ret;
1214 }
1215 #endif /* CONFIG_BRANCH_TRACER */
1216 
1217