xref: /openbmc/linux/kernel/trace/trace_selftest.c (revision a09d2831)
1 /* Include in trace.c */
2 
3 #include <linux/stringify.h>
4 #include <linux/kthread.h>
5 #include <linux/delay.h>
6 
7 static inline int trace_valid_entry(struct trace_entry *entry)
8 {
9 	switch (entry->type) {
10 	case TRACE_FN:
11 	case TRACE_CTX:
12 	case TRACE_WAKE:
13 	case TRACE_STACK:
14 	case TRACE_PRINT:
15 	case TRACE_SPECIAL:
16 	case TRACE_BRANCH:
17 	case TRACE_GRAPH_ENT:
18 	case TRACE_GRAPH_RET:
19 	case TRACE_HW_BRANCHES:
20 	case TRACE_KSYM:
21 		return 1;
22 	}
23 	return 0;
24 }
25 
26 static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
27 {
28 	struct ring_buffer_event *event;
29 	struct trace_entry *entry;
30 	unsigned int loops = 0;
31 
32 	while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
33 		entry = ring_buffer_event_data(event);
34 
35 		/*
36 		 * The ring buffer is a size of trace_buf_size, if
37 		 * we loop more than the size, there's something wrong
38 		 * with the ring buffer.
39 		 */
40 		if (loops++ > trace_buf_size) {
41 			printk(KERN_CONT ".. bad ring buffer ");
42 			goto failed;
43 		}
44 		if (!trace_valid_entry(entry)) {
45 			printk(KERN_CONT ".. invalid entry %d ",
46 				entry->type);
47 			goto failed;
48 		}
49 	}
50 	return 0;
51 
52  failed:
53 	/* disable tracing */
54 	tracing_disabled = 1;
55 	printk(KERN_CONT ".. corrupted trace buffer .. ");
56 	return -1;
57 }
58 
59 /*
60  * Test the trace buffer to see if all the elements
61  * are still sane.
62  */
63 static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
64 {
65 	unsigned long flags, cnt = 0;
66 	int cpu, ret = 0;
67 
68 	/* Don't allow flipping of max traces now */
69 	local_irq_save(flags);
70 	arch_spin_lock(&ftrace_max_lock);
71 
72 	cnt = ring_buffer_entries(tr->buffer);
73 
74 	/*
75 	 * The trace_test_buffer_cpu runs a while loop to consume all data.
76 	 * If the calling tracer is broken, and is constantly filling
77 	 * the buffer, this will run forever, and hard lock the box.
78 	 * We disable the ring buffer while we do this test to prevent
79 	 * a hard lock up.
80 	 */
81 	tracing_off();
82 	for_each_possible_cpu(cpu) {
83 		ret = trace_test_buffer_cpu(tr, cpu);
84 		if (ret)
85 			break;
86 	}
87 	tracing_on();
88 	arch_spin_unlock(&ftrace_max_lock);
89 	local_irq_restore(flags);
90 
91 	if (count)
92 		*count = cnt;
93 
94 	return ret;
95 }
96 
97 static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
98 {
99 	printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
100 		trace->name, init_ret);
101 }
102 #ifdef CONFIG_FUNCTION_TRACER
103 
104 #ifdef CONFIG_DYNAMIC_FTRACE
105 
106 /* Test dynamic code modification and ftrace filters */
107 int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
108 					   struct trace_array *tr,
109 					   int (*func)(void))
110 {
111 	int save_ftrace_enabled = ftrace_enabled;
112 	int save_tracer_enabled = tracer_enabled;
113 	unsigned long count;
114 	char *func_name;
115 	int ret;
116 
117 	/* The ftrace test PASSED */
118 	printk(KERN_CONT "PASSED\n");
119 	pr_info("Testing dynamic ftrace: ");
120 
121 	/* enable tracing, and record the filter function */
122 	ftrace_enabled = 1;
123 	tracer_enabled = 1;
124 
125 	/* passed in by parameter to fool gcc from optimizing */
126 	func();
127 
128 	/*
129 	 * Some archs *cough*PowerPC*cough* add characters to the
130 	 * start of the function names. We simply put a '*' to
131 	 * accommodate them.
132 	 */
133 	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
134 
135 	/* filter only on our function */
136 	ftrace_set_filter(func_name, strlen(func_name), 1);
137 
138 	/* enable tracing */
139 	ret = tracer_init(trace, tr);
140 	if (ret) {
141 		warn_failed_init_tracer(trace, ret);
142 		goto out;
143 	}
144 
145 	/* Sleep for a 1/10 of a second */
146 	msleep(100);
147 
148 	/* we should have nothing in the buffer */
149 	ret = trace_test_buffer(tr, &count);
150 	if (ret)
151 		goto out;
152 
153 	if (count) {
154 		ret = -1;
155 		printk(KERN_CONT ".. filter did not filter .. ");
156 		goto out;
157 	}
158 
159 	/* call our function again */
160 	func();
161 
162 	/* sleep again */
163 	msleep(100);
164 
165 	/* stop the tracing. */
166 	tracing_stop();
167 	ftrace_enabled = 0;
168 
169 	/* check the trace buffer */
170 	ret = trace_test_buffer(tr, &count);
171 	trace->reset(tr);
172 	tracing_start();
173 
174 	/* we should only have one item */
175 	if (!ret && count != 1) {
176 		printk(KERN_CONT ".. filter failed count=%ld ..", count);
177 		ret = -1;
178 		goto out;
179 	}
180 
181  out:
182 	ftrace_enabled = save_ftrace_enabled;
183 	tracer_enabled = save_tracer_enabled;
184 
185 	/* Enable tracing on all functions again */
186 	ftrace_set_filter(NULL, 0, 1);
187 
188 	return ret;
189 }
190 #else
191 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
192 #endif /* CONFIG_DYNAMIC_FTRACE */
193 
194 /*
195  * Simple verification test of ftrace function tracer.
196  * Enable ftrace, sleep 1/10 second, and then read the trace
197  * buffer to see if all is in order.
198  */
199 int
200 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
201 {
202 	int save_ftrace_enabled = ftrace_enabled;
203 	int save_tracer_enabled = tracer_enabled;
204 	unsigned long count;
205 	int ret;
206 
207 	/* make sure msleep has been recorded */
208 	msleep(1);
209 
210 	/* start the tracing */
211 	ftrace_enabled = 1;
212 	tracer_enabled = 1;
213 
214 	ret = tracer_init(trace, tr);
215 	if (ret) {
216 		warn_failed_init_tracer(trace, ret);
217 		goto out;
218 	}
219 
220 	/* Sleep for a 1/10 of a second */
221 	msleep(100);
222 	/* stop the tracing. */
223 	tracing_stop();
224 	ftrace_enabled = 0;
225 
226 	/* check the trace buffer */
227 	ret = trace_test_buffer(tr, &count);
228 	trace->reset(tr);
229 	tracing_start();
230 
231 	if (!ret && !count) {
232 		printk(KERN_CONT ".. no entries found ..");
233 		ret = -1;
234 		goto out;
235 	}
236 
237 	ret = trace_selftest_startup_dynamic_tracing(trace, tr,
238 						     DYN_FTRACE_TEST_NAME);
239 
240  out:
241 	ftrace_enabled = save_ftrace_enabled;
242 	tracer_enabled = save_tracer_enabled;
243 
244 	/* kill ftrace totally if we failed */
245 	if (ret)
246 		ftrace_kill();
247 
248 	return ret;
249 }
250 #endif /* CONFIG_FUNCTION_TRACER */
251 
252 
253 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
254 
255 /* Maximum number of functions to trace before diagnosing a hang */
256 #define GRAPH_MAX_FUNC_TEST	100000000
257 
258 static void __ftrace_dump(bool disable_tracing);
259 static unsigned int graph_hang_thresh;
260 
261 /* Wrap the real function entry probe to avoid possible hanging */
262 static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
263 {
264 	/* This is harmlessly racy, we want to approximately detect a hang */
265 	if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
266 		ftrace_graph_stop();
267 		printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
268 		if (ftrace_dump_on_oops)
269 			__ftrace_dump(false);
270 		return 0;
271 	}
272 
273 	return trace_graph_entry(trace);
274 }
275 
276 /*
277  * Pretty much the same than for the function tracer from which the selftest
278  * has been borrowed.
279  */
280 int
281 trace_selftest_startup_function_graph(struct tracer *trace,
282 					struct trace_array *tr)
283 {
284 	int ret;
285 	unsigned long count;
286 
287 	/*
288 	 * Simulate the init() callback but we attach a watchdog callback
289 	 * to detect and recover from possible hangs
290 	 */
291 	tracing_reset_online_cpus(tr);
292 	set_graph_array(tr);
293 	ret = register_ftrace_graph(&trace_graph_return,
294 				    &trace_graph_entry_watchdog);
295 	if (ret) {
296 		warn_failed_init_tracer(trace, ret);
297 		goto out;
298 	}
299 	tracing_start_cmdline_record();
300 
301 	/* Sleep for a 1/10 of a second */
302 	msleep(100);
303 
304 	/* Have we just recovered from a hang? */
305 	if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
306 		tracing_selftest_disabled = true;
307 		ret = -1;
308 		goto out;
309 	}
310 
311 	tracing_stop();
312 
313 	/* check the trace buffer */
314 	ret = trace_test_buffer(tr, &count);
315 
316 	trace->reset(tr);
317 	tracing_start();
318 
319 	if (!ret && !count) {
320 		printk(KERN_CONT ".. no entries found ..");
321 		ret = -1;
322 		goto out;
323 	}
324 
325 	/* Don't test dynamic tracing, the function tracer already did */
326 
327 out:
328 	/* Stop it if we failed */
329 	if (ret)
330 		ftrace_graph_stop();
331 
332 	return ret;
333 }
334 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
335 
336 
337 #ifdef CONFIG_IRQSOFF_TRACER
338 int
339 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
340 {
341 	unsigned long save_max = tracing_max_latency;
342 	unsigned long count;
343 	int ret;
344 
345 	/* start the tracing */
346 	ret = tracer_init(trace, tr);
347 	if (ret) {
348 		warn_failed_init_tracer(trace, ret);
349 		return ret;
350 	}
351 
352 	/* reset the max latency */
353 	tracing_max_latency = 0;
354 	/* disable interrupts for a bit */
355 	local_irq_disable();
356 	udelay(100);
357 	local_irq_enable();
358 
359 	/*
360 	 * Stop the tracer to avoid a warning subsequent
361 	 * to buffer flipping failure because tracing_stop()
362 	 * disables the tr and max buffers, making flipping impossible
363 	 * in case of parallels max irqs off latencies.
364 	 */
365 	trace->stop(tr);
366 	/* stop the tracing. */
367 	tracing_stop();
368 	/* check both trace buffers */
369 	ret = trace_test_buffer(tr, NULL);
370 	if (!ret)
371 		ret = trace_test_buffer(&max_tr, &count);
372 	trace->reset(tr);
373 	tracing_start();
374 
375 	if (!ret && !count) {
376 		printk(KERN_CONT ".. no entries found ..");
377 		ret = -1;
378 	}
379 
380 	tracing_max_latency = save_max;
381 
382 	return ret;
383 }
384 #endif /* CONFIG_IRQSOFF_TRACER */
385 
386 #ifdef CONFIG_PREEMPT_TRACER
387 int
388 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
389 {
390 	unsigned long save_max = tracing_max_latency;
391 	unsigned long count;
392 	int ret;
393 
394 	/*
395 	 * Now that the big kernel lock is no longer preemptable,
396 	 * and this is called with the BKL held, it will always
397 	 * fail. If preemption is already disabled, simply
398 	 * pass the test. When the BKL is removed, or becomes
399 	 * preemptible again, we will once again test this,
400 	 * so keep it in.
401 	 */
402 	if (preempt_count()) {
403 		printk(KERN_CONT "can not test ... force ");
404 		return 0;
405 	}
406 
407 	/* start the tracing */
408 	ret = tracer_init(trace, tr);
409 	if (ret) {
410 		warn_failed_init_tracer(trace, ret);
411 		return ret;
412 	}
413 
414 	/* reset the max latency */
415 	tracing_max_latency = 0;
416 	/* disable preemption for a bit */
417 	preempt_disable();
418 	udelay(100);
419 	preempt_enable();
420 
421 	/*
422 	 * Stop the tracer to avoid a warning subsequent
423 	 * to buffer flipping failure because tracing_stop()
424 	 * disables the tr and max buffers, making flipping impossible
425 	 * in case of parallels max preempt off latencies.
426 	 */
427 	trace->stop(tr);
428 	/* stop the tracing. */
429 	tracing_stop();
430 	/* check both trace buffers */
431 	ret = trace_test_buffer(tr, NULL);
432 	if (!ret)
433 		ret = trace_test_buffer(&max_tr, &count);
434 	trace->reset(tr);
435 	tracing_start();
436 
437 	if (!ret && !count) {
438 		printk(KERN_CONT ".. no entries found ..");
439 		ret = -1;
440 	}
441 
442 	tracing_max_latency = save_max;
443 
444 	return ret;
445 }
446 #endif /* CONFIG_PREEMPT_TRACER */
447 
448 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
449 int
450 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
451 {
452 	unsigned long save_max = tracing_max_latency;
453 	unsigned long count;
454 	int ret;
455 
456 	/*
457 	 * Now that the big kernel lock is no longer preemptable,
458 	 * and this is called with the BKL held, it will always
459 	 * fail. If preemption is already disabled, simply
460 	 * pass the test. When the BKL is removed, or becomes
461 	 * preemptible again, we will once again test this,
462 	 * so keep it in.
463 	 */
464 	if (preempt_count()) {
465 		printk(KERN_CONT "can not test ... force ");
466 		return 0;
467 	}
468 
469 	/* start the tracing */
470 	ret = tracer_init(trace, tr);
471 	if (ret) {
472 		warn_failed_init_tracer(trace, ret);
473 		goto out_no_start;
474 	}
475 
476 	/* reset the max latency */
477 	tracing_max_latency = 0;
478 
479 	/* disable preemption and interrupts for a bit */
480 	preempt_disable();
481 	local_irq_disable();
482 	udelay(100);
483 	preempt_enable();
484 	/* reverse the order of preempt vs irqs */
485 	local_irq_enable();
486 
487 	/*
488 	 * Stop the tracer to avoid a warning subsequent
489 	 * to buffer flipping failure because tracing_stop()
490 	 * disables the tr and max buffers, making flipping impossible
491 	 * in case of parallels max irqs/preempt off latencies.
492 	 */
493 	trace->stop(tr);
494 	/* stop the tracing. */
495 	tracing_stop();
496 	/* check both trace buffers */
497 	ret = trace_test_buffer(tr, NULL);
498 	if (ret)
499 		goto out;
500 
501 	ret = trace_test_buffer(&max_tr, &count);
502 	if (ret)
503 		goto out;
504 
505 	if (!ret && !count) {
506 		printk(KERN_CONT ".. no entries found ..");
507 		ret = -1;
508 		goto out;
509 	}
510 
511 	/* do the test by disabling interrupts first this time */
512 	tracing_max_latency = 0;
513 	tracing_start();
514 	trace->start(tr);
515 
516 	preempt_disable();
517 	local_irq_disable();
518 	udelay(100);
519 	preempt_enable();
520 	/* reverse the order of preempt vs irqs */
521 	local_irq_enable();
522 
523 	trace->stop(tr);
524 	/* stop the tracing. */
525 	tracing_stop();
526 	/* check both trace buffers */
527 	ret = trace_test_buffer(tr, NULL);
528 	if (ret)
529 		goto out;
530 
531 	ret = trace_test_buffer(&max_tr, &count);
532 
533 	if (!ret && !count) {
534 		printk(KERN_CONT ".. no entries found ..");
535 		ret = -1;
536 		goto out;
537 	}
538 
539 out:
540 	tracing_start();
541 out_no_start:
542 	trace->reset(tr);
543 	tracing_max_latency = save_max;
544 
545 	return ret;
546 }
547 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
548 
549 #ifdef CONFIG_NOP_TRACER
550 int
551 trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
552 {
553 	/* What could possibly go wrong? */
554 	return 0;
555 }
556 #endif
557 
558 #ifdef CONFIG_SCHED_TRACER
559 static int trace_wakeup_test_thread(void *data)
560 {
561 	/* Make this a RT thread, doesn't need to be too high */
562 	struct sched_param param = { .sched_priority = 5 };
563 	struct completion *x = data;
564 
565 	sched_setscheduler(current, SCHED_FIFO, &param);
566 
567 	/* Make it know we have a new prio */
568 	complete(x);
569 
570 	/* now go to sleep and let the test wake us up */
571 	set_current_state(TASK_INTERRUPTIBLE);
572 	schedule();
573 
574 	/* we are awake, now wait to disappear */
575 	while (!kthread_should_stop()) {
576 		/*
577 		 * This is an RT task, do short sleeps to let
578 		 * others run.
579 		 */
580 		msleep(100);
581 	}
582 
583 	return 0;
584 }
585 
586 int
587 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
588 {
589 	unsigned long save_max = tracing_max_latency;
590 	struct task_struct *p;
591 	struct completion isrt;
592 	unsigned long count;
593 	int ret;
594 
595 	init_completion(&isrt);
596 
597 	/* create a high prio thread */
598 	p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
599 	if (IS_ERR(p)) {
600 		printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
601 		return -1;
602 	}
603 
604 	/* make sure the thread is running at an RT prio */
605 	wait_for_completion(&isrt);
606 
607 	/* start the tracing */
608 	ret = tracer_init(trace, tr);
609 	if (ret) {
610 		warn_failed_init_tracer(trace, ret);
611 		return ret;
612 	}
613 
614 	/* reset the max latency */
615 	tracing_max_latency = 0;
616 
617 	/* sleep to let the RT thread sleep too */
618 	msleep(100);
619 
620 	/*
621 	 * Yes this is slightly racy. It is possible that for some
622 	 * strange reason that the RT thread we created, did not
623 	 * call schedule for 100ms after doing the completion,
624 	 * and we do a wakeup on a task that already is awake.
625 	 * But that is extremely unlikely, and the worst thing that
626 	 * happens in such a case, is that we disable tracing.
627 	 * Honestly, if this race does happen something is horrible
628 	 * wrong with the system.
629 	 */
630 
631 	wake_up_process(p);
632 
633 	/* give a little time to let the thread wake up */
634 	msleep(100);
635 
636 	/* stop the tracing. */
637 	tracing_stop();
638 	/* check both trace buffers */
639 	ret = trace_test_buffer(tr, NULL);
640 	if (!ret)
641 		ret = trace_test_buffer(&max_tr, &count);
642 
643 
644 	trace->reset(tr);
645 	tracing_start();
646 
647 	tracing_max_latency = save_max;
648 
649 	/* kill the thread */
650 	kthread_stop(p);
651 
652 	if (!ret && !count) {
653 		printk(KERN_CONT ".. no entries found ..");
654 		ret = -1;
655 	}
656 
657 	return ret;
658 }
659 #endif /* CONFIG_SCHED_TRACER */
660 
661 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
662 int
663 trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
664 {
665 	unsigned long count;
666 	int ret;
667 
668 	/* start the tracing */
669 	ret = tracer_init(trace, tr);
670 	if (ret) {
671 		warn_failed_init_tracer(trace, ret);
672 		return ret;
673 	}
674 
675 	/* Sleep for a 1/10 of a second */
676 	msleep(100);
677 	/* stop the tracing. */
678 	tracing_stop();
679 	/* check the trace buffer */
680 	ret = trace_test_buffer(tr, &count);
681 	trace->reset(tr);
682 	tracing_start();
683 
684 	if (!ret && !count) {
685 		printk(KERN_CONT ".. no entries found ..");
686 		ret = -1;
687 	}
688 
689 	return ret;
690 }
691 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
692 
693 #ifdef CONFIG_SYSPROF_TRACER
694 int
695 trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
696 {
697 	unsigned long count;
698 	int ret;
699 
700 	/* start the tracing */
701 	ret = tracer_init(trace, tr);
702 	if (ret) {
703 		warn_failed_init_tracer(trace, ret);
704 		return ret;
705 	}
706 
707 	/* Sleep for a 1/10 of a second */
708 	msleep(100);
709 	/* stop the tracing. */
710 	tracing_stop();
711 	/* check the trace buffer */
712 	ret = trace_test_buffer(tr, &count);
713 	trace->reset(tr);
714 	tracing_start();
715 
716 	if (!ret && !count) {
717 		printk(KERN_CONT ".. no entries found ..");
718 		ret = -1;
719 	}
720 
721 	return ret;
722 }
723 #endif /* CONFIG_SYSPROF_TRACER */
724 
725 #ifdef CONFIG_BRANCH_TRACER
726 int
727 trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
728 {
729 	unsigned long count;
730 	int ret;
731 
732 	/* start the tracing */
733 	ret = tracer_init(trace, tr);
734 	if (ret) {
735 		warn_failed_init_tracer(trace, ret);
736 		return ret;
737 	}
738 
739 	/* Sleep for a 1/10 of a second */
740 	msleep(100);
741 	/* stop the tracing. */
742 	tracing_stop();
743 	/* check the trace buffer */
744 	ret = trace_test_buffer(tr, &count);
745 	trace->reset(tr);
746 	tracing_start();
747 
748 	if (!ret && !count) {
749 		printk(KERN_CONT ".. no entries found ..");
750 		ret = -1;
751 	}
752 
753 	return ret;
754 }
755 #endif /* CONFIG_BRANCH_TRACER */
756 
757 #ifdef CONFIG_HW_BRANCH_TRACER
758 int
759 trace_selftest_startup_hw_branches(struct tracer *trace,
760 				   struct trace_array *tr)
761 {
762 	struct trace_iterator *iter;
763 	struct tracer tracer;
764 	unsigned long count;
765 	int ret;
766 
767 	if (!trace->open) {
768 		printk(KERN_CONT "missing open function...");
769 		return -1;
770 	}
771 
772 	ret = tracer_init(trace, tr);
773 	if (ret) {
774 		warn_failed_init_tracer(trace, ret);
775 		return ret;
776 	}
777 
778 	/*
779 	 * The hw-branch tracer needs to collect the trace from the various
780 	 * cpu trace buffers - before tracing is stopped.
781 	 */
782 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
783 	if (!iter)
784 		return -ENOMEM;
785 
786 	memcpy(&tracer, trace, sizeof(tracer));
787 
788 	iter->trace = &tracer;
789 	iter->tr = tr;
790 	iter->pos = -1;
791 	mutex_init(&iter->mutex);
792 
793 	trace->open(iter);
794 
795 	mutex_destroy(&iter->mutex);
796 	kfree(iter);
797 
798 	tracing_stop();
799 
800 	ret = trace_test_buffer(tr, &count);
801 	trace->reset(tr);
802 	tracing_start();
803 
804 	if (!ret && !count) {
805 		printk(KERN_CONT "no entries found..");
806 		ret = -1;
807 	}
808 
809 	return ret;
810 }
811 #endif /* CONFIG_HW_BRANCH_TRACER */
812 
813 #ifdef CONFIG_KSYM_TRACER
814 static int ksym_selftest_dummy;
815 
816 int
817 trace_selftest_startup_ksym(struct tracer *trace, struct trace_array *tr)
818 {
819 	unsigned long count;
820 	int ret;
821 
822 	/* start the tracing */
823 	ret = tracer_init(trace, tr);
824 	if (ret) {
825 		warn_failed_init_tracer(trace, ret);
826 		return ret;
827 	}
828 
829 	ksym_selftest_dummy = 0;
830 	/* Register the read-write tracing request */
831 
832 	ret = process_new_ksym_entry("ksym_selftest_dummy",
833 				     HW_BREAKPOINT_R | HW_BREAKPOINT_W,
834 					(unsigned long)(&ksym_selftest_dummy));
835 
836 	if (ret < 0) {
837 		printk(KERN_CONT "ksym_trace read-write startup test failed\n");
838 		goto ret_path;
839 	}
840 	/* Perform a read and a write operation over the dummy variable to
841 	 * trigger the tracer
842 	 */
843 	if (ksym_selftest_dummy == 0)
844 		ksym_selftest_dummy++;
845 
846 	/* stop the tracing. */
847 	tracing_stop();
848 	/* check the trace buffer */
849 	ret = trace_test_buffer(tr, &count);
850 	trace->reset(tr);
851 	tracing_start();
852 
853 	/* read & write operations - one each is performed on the dummy variable
854 	 * triggering two entries in the trace buffer
855 	 */
856 	if (!ret && count != 2) {
857 		printk(KERN_CONT "Ksym tracer startup test failed");
858 		ret = -1;
859 	}
860 
861 ret_path:
862 	return ret;
863 }
864 #endif /* CONFIG_KSYM_TRACER */
865 
866