xref: /openbmc/linux/kernel/trace/trace_selftest.c (revision b627b4ed)
1 /* Include in trace.c */
2 
3 #include <linux/stringify.h>
4 #include <linux/kthread.h>
5 #include <linux/delay.h>
6 
7 static inline int trace_valid_entry(struct trace_entry *entry)
8 {
9 	switch (entry->type) {
10 	case TRACE_FN:
11 	case TRACE_CTX:
12 	case TRACE_WAKE:
13 	case TRACE_STACK:
14 	case TRACE_PRINT:
15 	case TRACE_SPECIAL:
16 	case TRACE_BRANCH:
17 	case TRACE_GRAPH_ENT:
18 	case TRACE_GRAPH_RET:
19 		return 1;
20 	}
21 	return 0;
22 }
23 
24 static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
25 {
26 	struct ring_buffer_event *event;
27 	struct trace_entry *entry;
28 	unsigned int loops = 0;
29 
30 	while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
31 		entry = ring_buffer_event_data(event);
32 
33 		/*
34 		 * The ring buffer is a size of trace_buf_size, if
35 		 * we loop more than the size, there's something wrong
36 		 * with the ring buffer.
37 		 */
38 		if (loops++ > trace_buf_size) {
39 			printk(KERN_CONT ".. bad ring buffer ");
40 			goto failed;
41 		}
42 		if (!trace_valid_entry(entry)) {
43 			printk(KERN_CONT ".. invalid entry %d ",
44 				entry->type);
45 			goto failed;
46 		}
47 	}
48 	return 0;
49 
50  failed:
51 	/* disable tracing */
52 	tracing_disabled = 1;
53 	printk(KERN_CONT ".. corrupted trace buffer .. ");
54 	return -1;
55 }
56 
57 /*
58  * Test the trace buffer to see if all the elements
59  * are still sane.
60  */
61 static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
62 {
63 	unsigned long flags, cnt = 0;
64 	int cpu, ret = 0;
65 
66 	/* Don't allow flipping of max traces now */
67 	local_irq_save(flags);
68 	__raw_spin_lock(&ftrace_max_lock);
69 
70 	cnt = ring_buffer_entries(tr->buffer);
71 
72 	/*
73 	 * The trace_test_buffer_cpu runs a while loop to consume all data.
74 	 * If the calling tracer is broken, and is constantly filling
75 	 * the buffer, this will run forever, and hard lock the box.
76 	 * We disable the ring buffer while we do this test to prevent
77 	 * a hard lock up.
78 	 */
79 	tracing_off();
80 	for_each_possible_cpu(cpu) {
81 		ret = trace_test_buffer_cpu(tr, cpu);
82 		if (ret)
83 			break;
84 	}
85 	tracing_on();
86 	__raw_spin_unlock(&ftrace_max_lock);
87 	local_irq_restore(flags);
88 
89 	if (count)
90 		*count = cnt;
91 
92 	return ret;
93 }
94 
95 static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
96 {
97 	printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
98 		trace->name, init_ret);
99 }
100 #ifdef CONFIG_FUNCTION_TRACER
101 
102 #ifdef CONFIG_DYNAMIC_FTRACE
103 
104 /* Test dynamic code modification and ftrace filters */
105 int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
106 					   struct trace_array *tr,
107 					   int (*func)(void))
108 {
109 	int save_ftrace_enabled = ftrace_enabled;
110 	int save_tracer_enabled = tracer_enabled;
111 	unsigned long count;
112 	char *func_name;
113 	int ret;
114 
115 	/* The ftrace test PASSED */
116 	printk(KERN_CONT "PASSED\n");
117 	pr_info("Testing dynamic ftrace: ");
118 
119 	/* enable tracing, and record the filter function */
120 	ftrace_enabled = 1;
121 	tracer_enabled = 1;
122 
123 	/* passed in by parameter to fool gcc from optimizing */
124 	func();
125 
126 	/*
127 	 * Some archs *cough*PowerPC*cough* add characters to the
128 	 * start of the function names. We simply put a '*' to
129 	 * accommodate them.
130 	 */
131 	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
132 
133 	/* filter only on our function */
134 	ftrace_set_filter(func_name, strlen(func_name), 1);
135 
136 	/* enable tracing */
137 	ret = tracer_init(trace, tr);
138 	if (ret) {
139 		warn_failed_init_tracer(trace, ret);
140 		goto out;
141 	}
142 
143 	/* Sleep for a 1/10 of a second */
144 	msleep(100);
145 
146 	/* we should have nothing in the buffer */
147 	ret = trace_test_buffer(tr, &count);
148 	if (ret)
149 		goto out;
150 
151 	if (count) {
152 		ret = -1;
153 		printk(KERN_CONT ".. filter did not filter .. ");
154 		goto out;
155 	}
156 
157 	/* call our function again */
158 	func();
159 
160 	/* sleep again */
161 	msleep(100);
162 
163 	/* stop the tracing. */
164 	tracing_stop();
165 	ftrace_enabled = 0;
166 
167 	/* check the trace buffer */
168 	ret = trace_test_buffer(tr, &count);
169 	trace->reset(tr);
170 	tracing_start();
171 
172 	/* we should only have one item */
173 	if (!ret && count != 1) {
174 		printk(KERN_CONT ".. filter failed count=%ld ..", count);
175 		ret = -1;
176 		goto out;
177 	}
178 
179  out:
180 	ftrace_enabled = save_ftrace_enabled;
181 	tracer_enabled = save_tracer_enabled;
182 
183 	/* Enable tracing on all functions again */
184 	ftrace_set_filter(NULL, 0, 1);
185 
186 	return ret;
187 }
188 #else
189 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
190 #endif /* CONFIG_DYNAMIC_FTRACE */
191 /*
192  * Simple verification test of ftrace function tracer.
193  * Enable ftrace, sleep 1/10 second, and then read the trace
194  * buffer to see if all is in order.
195  */
196 int
197 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
198 {
199 	int save_ftrace_enabled = ftrace_enabled;
200 	int save_tracer_enabled = tracer_enabled;
201 	unsigned long count;
202 	int ret;
203 
204 	/* make sure msleep has been recorded */
205 	msleep(1);
206 
207 	/* start the tracing */
208 	ftrace_enabled = 1;
209 	tracer_enabled = 1;
210 
211 	ret = tracer_init(trace, tr);
212 	if (ret) {
213 		warn_failed_init_tracer(trace, ret);
214 		goto out;
215 	}
216 
217 	/* Sleep for a 1/10 of a second */
218 	msleep(100);
219 	/* stop the tracing. */
220 	tracing_stop();
221 	ftrace_enabled = 0;
222 
223 	/* check the trace buffer */
224 	ret = trace_test_buffer(tr, &count);
225 	trace->reset(tr);
226 	tracing_start();
227 
228 	if (!ret && !count) {
229 		printk(KERN_CONT ".. no entries found ..");
230 		ret = -1;
231 		goto out;
232 	}
233 
234 	ret = trace_selftest_startup_dynamic_tracing(trace, tr,
235 						     DYN_FTRACE_TEST_NAME);
236 
237  out:
238 	ftrace_enabled = save_ftrace_enabled;
239 	tracer_enabled = save_tracer_enabled;
240 
241 	/* kill ftrace totally if we failed */
242 	if (ret)
243 		ftrace_kill();
244 
245 	return ret;
246 }
247 #endif /* CONFIG_FUNCTION_TRACER */
248 
249 
250 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
251 
252 /* Maximum number of functions to trace before diagnosing a hang */
253 #define GRAPH_MAX_FUNC_TEST	100000000
254 
255 static void __ftrace_dump(bool disable_tracing);
256 static unsigned int graph_hang_thresh;
257 
258 /* Wrap the real function entry probe to avoid possible hanging */
259 static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
260 {
261 	/* This is harmlessly racy, we want to approximately detect a hang */
262 	if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
263 		ftrace_graph_stop();
264 		printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
265 		if (ftrace_dump_on_oops)
266 			__ftrace_dump(false);
267 		return 0;
268 	}
269 
270 	return trace_graph_entry(trace);
271 }
272 
273 /*
274  * Pretty much the same than for the function tracer from which the selftest
275  * has been borrowed.
276  */
277 int
278 trace_selftest_startup_function_graph(struct tracer *trace,
279 					struct trace_array *tr)
280 {
281 	int ret;
282 	unsigned long count;
283 
284 	/*
285 	 * Simulate the init() callback but we attach a watchdog callback
286 	 * to detect and recover from possible hangs
287 	 */
288 	tracing_reset_online_cpus(tr);
289 	ret = register_ftrace_graph(&trace_graph_return,
290 				    &trace_graph_entry_watchdog);
291 	if (ret) {
292 		warn_failed_init_tracer(trace, ret);
293 		goto out;
294 	}
295 	tracing_start_cmdline_record();
296 
297 	/* Sleep for a 1/10 of a second */
298 	msleep(100);
299 
300 	/* Have we just recovered from a hang? */
301 	if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
302 		tracing_selftest_disabled = true;
303 		ret = -1;
304 		goto out;
305 	}
306 
307 	tracing_stop();
308 
309 	/* check the trace buffer */
310 	ret = trace_test_buffer(tr, &count);
311 
312 	trace->reset(tr);
313 	tracing_start();
314 
315 	if (!ret && !count) {
316 		printk(KERN_CONT ".. no entries found ..");
317 		ret = -1;
318 		goto out;
319 	}
320 
321 	/* Don't test dynamic tracing, the function tracer already did */
322 
323 out:
324 	/* Stop it if we failed */
325 	if (ret)
326 		ftrace_graph_stop();
327 
328 	return ret;
329 }
330 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
331 
332 
333 #ifdef CONFIG_IRQSOFF_TRACER
334 int
335 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
336 {
337 	unsigned long save_max = tracing_max_latency;
338 	unsigned long count;
339 	int ret;
340 
341 	/* start the tracing */
342 	ret = tracer_init(trace, tr);
343 	if (ret) {
344 		warn_failed_init_tracer(trace, ret);
345 		return ret;
346 	}
347 
348 	/* reset the max latency */
349 	tracing_max_latency = 0;
350 	/* disable interrupts for a bit */
351 	local_irq_disable();
352 	udelay(100);
353 	local_irq_enable();
354 
355 	/*
356 	 * Stop the tracer to avoid a warning subsequent
357 	 * to buffer flipping failure because tracing_stop()
358 	 * disables the tr and max buffers, making flipping impossible
359 	 * in case of parallels max irqs off latencies.
360 	 */
361 	trace->stop(tr);
362 	/* stop the tracing. */
363 	tracing_stop();
364 	/* check both trace buffers */
365 	ret = trace_test_buffer(tr, NULL);
366 	if (!ret)
367 		ret = trace_test_buffer(&max_tr, &count);
368 	trace->reset(tr);
369 	tracing_start();
370 
371 	if (!ret && !count) {
372 		printk(KERN_CONT ".. no entries found ..");
373 		ret = -1;
374 	}
375 
376 	tracing_max_latency = save_max;
377 
378 	return ret;
379 }
380 #endif /* CONFIG_IRQSOFF_TRACER */
381 
382 #ifdef CONFIG_PREEMPT_TRACER
383 int
384 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
385 {
386 	unsigned long save_max = tracing_max_latency;
387 	unsigned long count;
388 	int ret;
389 
390 	/*
391 	 * Now that the big kernel lock is no longer preemptable,
392 	 * and this is called with the BKL held, it will always
393 	 * fail. If preemption is already disabled, simply
394 	 * pass the test. When the BKL is removed, or becomes
395 	 * preemptible again, we will once again test this,
396 	 * so keep it in.
397 	 */
398 	if (preempt_count()) {
399 		printk(KERN_CONT "can not test ... force ");
400 		return 0;
401 	}
402 
403 	/* start the tracing */
404 	ret = tracer_init(trace, tr);
405 	if (ret) {
406 		warn_failed_init_tracer(trace, ret);
407 		return ret;
408 	}
409 
410 	/* reset the max latency */
411 	tracing_max_latency = 0;
412 	/* disable preemption for a bit */
413 	preempt_disable();
414 	udelay(100);
415 	preempt_enable();
416 
417 	/*
418 	 * Stop the tracer to avoid a warning subsequent
419 	 * to buffer flipping failure because tracing_stop()
420 	 * disables the tr and max buffers, making flipping impossible
421 	 * in case of parallels max preempt off latencies.
422 	 */
423 	trace->stop(tr);
424 	/* stop the tracing. */
425 	tracing_stop();
426 	/* check both trace buffers */
427 	ret = trace_test_buffer(tr, NULL);
428 	if (!ret)
429 		ret = trace_test_buffer(&max_tr, &count);
430 	trace->reset(tr);
431 	tracing_start();
432 
433 	if (!ret && !count) {
434 		printk(KERN_CONT ".. no entries found ..");
435 		ret = -1;
436 	}
437 
438 	tracing_max_latency = save_max;
439 
440 	return ret;
441 }
442 #endif /* CONFIG_PREEMPT_TRACER */
443 
444 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
445 int
446 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
447 {
448 	unsigned long save_max = tracing_max_latency;
449 	unsigned long count;
450 	int ret;
451 
452 	/*
453 	 * Now that the big kernel lock is no longer preemptable,
454 	 * and this is called with the BKL held, it will always
455 	 * fail. If preemption is already disabled, simply
456 	 * pass the test. When the BKL is removed, or becomes
457 	 * preemptible again, we will once again test this,
458 	 * so keep it in.
459 	 */
460 	if (preempt_count()) {
461 		printk(KERN_CONT "can not test ... force ");
462 		return 0;
463 	}
464 
465 	/* start the tracing */
466 	ret = tracer_init(trace, tr);
467 	if (ret) {
468 		warn_failed_init_tracer(trace, ret);
469 		goto out_no_start;
470 	}
471 
472 	/* reset the max latency */
473 	tracing_max_latency = 0;
474 
475 	/* disable preemption and interrupts for a bit */
476 	preempt_disable();
477 	local_irq_disable();
478 	udelay(100);
479 	preempt_enable();
480 	/* reverse the order of preempt vs irqs */
481 	local_irq_enable();
482 
483 	/*
484 	 * Stop the tracer to avoid a warning subsequent
485 	 * to buffer flipping failure because tracing_stop()
486 	 * disables the tr and max buffers, making flipping impossible
487 	 * in case of parallels max irqs/preempt off latencies.
488 	 */
489 	trace->stop(tr);
490 	/* stop the tracing. */
491 	tracing_stop();
492 	/* check both trace buffers */
493 	ret = trace_test_buffer(tr, NULL);
494 	if (ret)
495 		goto out;
496 
497 	ret = trace_test_buffer(&max_tr, &count);
498 	if (ret)
499 		goto out;
500 
501 	if (!ret && !count) {
502 		printk(KERN_CONT ".. no entries found ..");
503 		ret = -1;
504 		goto out;
505 	}
506 
507 	/* do the test by disabling interrupts first this time */
508 	tracing_max_latency = 0;
509 	tracing_start();
510 	trace->start(tr);
511 
512 	preempt_disable();
513 	local_irq_disable();
514 	udelay(100);
515 	preempt_enable();
516 	/* reverse the order of preempt vs irqs */
517 	local_irq_enable();
518 
519 	trace->stop(tr);
520 	/* stop the tracing. */
521 	tracing_stop();
522 	/* check both trace buffers */
523 	ret = trace_test_buffer(tr, NULL);
524 	if (ret)
525 		goto out;
526 
527 	ret = trace_test_buffer(&max_tr, &count);
528 
529 	if (!ret && !count) {
530 		printk(KERN_CONT ".. no entries found ..");
531 		ret = -1;
532 		goto out;
533 	}
534 
535 out:
536 	tracing_start();
537 out_no_start:
538 	trace->reset(tr);
539 	tracing_max_latency = save_max;
540 
541 	return ret;
542 }
543 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
544 
545 #ifdef CONFIG_NOP_TRACER
546 int
547 trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
548 {
549 	/* What could possibly go wrong? */
550 	return 0;
551 }
552 #endif
553 
554 #ifdef CONFIG_SCHED_TRACER
555 static int trace_wakeup_test_thread(void *data)
556 {
557 	/* Make this a RT thread, doesn't need to be too high */
558 	struct sched_param param = { .sched_priority = 5 };
559 	struct completion *x = data;
560 
561 	sched_setscheduler(current, SCHED_FIFO, &param);
562 
563 	/* Make it know we have a new prio */
564 	complete(x);
565 
566 	/* now go to sleep and let the test wake us up */
567 	set_current_state(TASK_INTERRUPTIBLE);
568 	schedule();
569 
570 	/* we are awake, now wait to disappear */
571 	while (!kthread_should_stop()) {
572 		/*
573 		 * This is an RT task, do short sleeps to let
574 		 * others run.
575 		 */
576 		msleep(100);
577 	}
578 
579 	return 0;
580 }
581 
582 int
583 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
584 {
585 	unsigned long save_max = tracing_max_latency;
586 	struct task_struct *p;
587 	struct completion isrt;
588 	unsigned long count;
589 	int ret;
590 
591 	init_completion(&isrt);
592 
593 	/* create a high prio thread */
594 	p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
595 	if (IS_ERR(p)) {
596 		printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
597 		return -1;
598 	}
599 
600 	/* make sure the thread is running at an RT prio */
601 	wait_for_completion(&isrt);
602 
603 	/* start the tracing */
604 	ret = tracer_init(trace, tr);
605 	if (ret) {
606 		warn_failed_init_tracer(trace, ret);
607 		return ret;
608 	}
609 
610 	/* reset the max latency */
611 	tracing_max_latency = 0;
612 
613 	/* sleep to let the RT thread sleep too */
614 	msleep(100);
615 
616 	/*
617 	 * Yes this is slightly racy. It is possible that for some
618 	 * strange reason that the RT thread we created, did not
619 	 * call schedule for 100ms after doing the completion,
620 	 * and we do a wakeup on a task that already is awake.
621 	 * But that is extremely unlikely, and the worst thing that
622 	 * happens in such a case, is that we disable tracing.
623 	 * Honestly, if this race does happen something is horrible
624 	 * wrong with the system.
625 	 */
626 
627 	wake_up_process(p);
628 
629 	/* give a little time to let the thread wake up */
630 	msleep(100);
631 
632 	/* stop the tracing. */
633 	tracing_stop();
634 	/* check both trace buffers */
635 	ret = trace_test_buffer(tr, NULL);
636 	if (!ret)
637 		ret = trace_test_buffer(&max_tr, &count);
638 
639 
640 	trace->reset(tr);
641 	tracing_start();
642 
643 	tracing_max_latency = save_max;
644 
645 	/* kill the thread */
646 	kthread_stop(p);
647 
648 	if (!ret && !count) {
649 		printk(KERN_CONT ".. no entries found ..");
650 		ret = -1;
651 	}
652 
653 	return ret;
654 }
655 #endif /* CONFIG_SCHED_TRACER */
656 
657 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
658 int
659 trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
660 {
661 	unsigned long count;
662 	int ret;
663 
664 	/* start the tracing */
665 	ret = tracer_init(trace, tr);
666 	if (ret) {
667 		warn_failed_init_tracer(trace, ret);
668 		return ret;
669 	}
670 
671 	/* Sleep for a 1/10 of a second */
672 	msleep(100);
673 	/* stop the tracing. */
674 	tracing_stop();
675 	/* check the trace buffer */
676 	ret = trace_test_buffer(tr, &count);
677 	trace->reset(tr);
678 	tracing_start();
679 
680 	if (!ret && !count) {
681 		printk(KERN_CONT ".. no entries found ..");
682 		ret = -1;
683 	}
684 
685 	return ret;
686 }
687 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
688 
689 #ifdef CONFIG_SYSPROF_TRACER
690 int
691 trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
692 {
693 	unsigned long count;
694 	int ret;
695 
696 	/* start the tracing */
697 	ret = tracer_init(trace, tr);
698 	if (ret) {
699 		warn_failed_init_tracer(trace, ret);
700 		return ret;
701 	}
702 
703 	/* Sleep for a 1/10 of a second */
704 	msleep(100);
705 	/* stop the tracing. */
706 	tracing_stop();
707 	/* check the trace buffer */
708 	ret = trace_test_buffer(tr, &count);
709 	trace->reset(tr);
710 	tracing_start();
711 
712 	if (!ret && !count) {
713 		printk(KERN_CONT ".. no entries found ..");
714 		ret = -1;
715 	}
716 
717 	return ret;
718 }
719 #endif /* CONFIG_SYSPROF_TRACER */
720 
721 #ifdef CONFIG_BRANCH_TRACER
722 int
723 trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
724 {
725 	unsigned long count;
726 	int ret;
727 
728 	/* start the tracing */
729 	ret = tracer_init(trace, tr);
730 	if (ret) {
731 		warn_failed_init_tracer(trace, ret);
732 		return ret;
733 	}
734 
735 	/* Sleep for a 1/10 of a second */
736 	msleep(100);
737 	/* stop the tracing. */
738 	tracing_stop();
739 	/* check the trace buffer */
740 	ret = trace_test_buffer(tr, &count);
741 	trace->reset(tr);
742 	tracing_start();
743 
744 	if (!ret && !count) {
745 		printk(KERN_CONT ".. no entries found ..");
746 		ret = -1;
747 	}
748 
749 	return ret;
750 }
751 #endif /* CONFIG_BRANCH_TRACER */
752