xref: /openbmc/linux/kernel/trace/trace_selftest.c (revision b8bb76713ec50df2f11efee386e16f93d51e1076)
1 /* Include in trace.c */
2 
3 #include <linux/kthread.h>
4 #include <linux/delay.h>
5 
6 static inline int trace_valid_entry(struct trace_entry *entry)
7 {
8 	switch (entry->type) {
9 	case TRACE_FN:
10 	case TRACE_CTX:
11 	case TRACE_WAKE:
12 	case TRACE_CONT:
13 	case TRACE_STACK:
14 	case TRACE_PRINT:
15 	case TRACE_SPECIAL:
16 	case TRACE_BRANCH:
17 		return 1;
18 	}
19 	return 0;
20 }
21 
22 static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
23 {
24 	struct ring_buffer_event *event;
25 	struct trace_entry *entry;
26 	unsigned int loops = 0;
27 
28 	while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
29 		entry = ring_buffer_event_data(event);
30 
31 		/*
32 		 * The ring buffer is a size of trace_buf_size, if
33 		 * we loop more than the size, there's something wrong
34 		 * with the ring buffer.
35 		 */
36 		if (loops++ > trace_buf_size) {
37 			printk(KERN_CONT ".. bad ring buffer ");
38 			goto failed;
39 		}
40 		if (!trace_valid_entry(entry)) {
41 			printk(KERN_CONT ".. invalid entry %d ",
42 				entry->type);
43 			goto failed;
44 		}
45 	}
46 	return 0;
47 
48  failed:
49 	/* disable tracing */
50 	tracing_disabled = 1;
51 	printk(KERN_CONT ".. corrupted trace buffer .. ");
52 	return -1;
53 }
54 
55 /*
56  * Test the trace buffer to see if all the elements
57  * are still sane.
58  */
59 static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
60 {
61 	unsigned long flags, cnt = 0;
62 	int cpu, ret = 0;
63 
64 	/* Don't allow flipping of max traces now */
65 	local_irq_save(flags);
66 	__raw_spin_lock(&ftrace_max_lock);
67 
68 	cnt = ring_buffer_entries(tr->buffer);
69 
70 	/*
71 	 * The trace_test_buffer_cpu runs a while loop to consume all data.
72 	 * If the calling tracer is broken, and is constantly filling
73 	 * the buffer, this will run forever, and hard lock the box.
74 	 * We disable the ring buffer while we do this test to prevent
75 	 * a hard lock up.
76 	 */
77 	tracing_off();
78 	for_each_possible_cpu(cpu) {
79 		ret = trace_test_buffer_cpu(tr, cpu);
80 		if (ret)
81 			break;
82 	}
83 	tracing_on();
84 	__raw_spin_unlock(&ftrace_max_lock);
85 	local_irq_restore(flags);
86 
87 	if (count)
88 		*count = cnt;
89 
90 	return ret;
91 }
92 
93 static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
94 {
95 	printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
96 		trace->name, init_ret);
97 }
98 #ifdef CONFIG_FUNCTION_TRACER
99 
100 #ifdef CONFIG_DYNAMIC_FTRACE
101 
102 #define __STR(x) #x
103 #define STR(x) __STR(x)
104 
105 /* Test dynamic code modification and ftrace filters */
106 int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
107 					   struct trace_array *tr,
108 					   int (*func)(void))
109 {
110 	int save_ftrace_enabled = ftrace_enabled;
111 	int save_tracer_enabled = tracer_enabled;
112 	unsigned long count;
113 	char *func_name;
114 	int ret;
115 
116 	/* The ftrace test PASSED */
117 	printk(KERN_CONT "PASSED\n");
118 	pr_info("Testing dynamic ftrace: ");
119 
120 	/* enable tracing, and record the filter function */
121 	ftrace_enabled = 1;
122 	tracer_enabled = 1;
123 
124 	/* passed in by parameter to fool gcc from optimizing */
125 	func();
126 
127 	/*
128 	 * Some archs *cough*PowerPC*cough* add charachters to the
129 	 * start of the function names. We simply put a '*' to
130 	 * accomodate them.
131 	 */
132 	func_name = "*" STR(DYN_FTRACE_TEST_NAME);
133 
134 	/* filter only on our function */
135 	ftrace_set_filter(func_name, strlen(func_name), 1);
136 
137 	/* enable tracing */
138 	ret = trace->init(tr);
139 	if (ret) {
140 		warn_failed_init_tracer(trace, ret);
141 		goto out;
142 	}
143 
144 	/* Sleep for a 1/10 of a second */
145 	msleep(100);
146 
147 	/* we should have nothing in the buffer */
148 	ret = trace_test_buffer(tr, &count);
149 	if (ret)
150 		goto out;
151 
152 	if (count) {
153 		ret = -1;
154 		printk(KERN_CONT ".. filter did not filter .. ");
155 		goto out;
156 	}
157 
158 	/* call our function again */
159 	func();
160 
161 	/* sleep again */
162 	msleep(100);
163 
164 	/* stop the tracing. */
165 	tracing_stop();
166 	ftrace_enabled = 0;
167 
168 	/* check the trace buffer */
169 	ret = trace_test_buffer(tr, &count);
170 	trace->reset(tr);
171 	tracing_start();
172 
173 	/* we should only have one item */
174 	if (!ret && count != 1) {
175 		printk(KERN_CONT ".. filter failed count=%ld ..", count);
176 		ret = -1;
177 		goto out;
178 	}
179 
180  out:
181 	ftrace_enabled = save_ftrace_enabled;
182 	tracer_enabled = save_tracer_enabled;
183 
184 	/* Enable tracing on all functions again */
185 	ftrace_set_filter(NULL, 0, 1);
186 
187 	return ret;
188 }
189 #else
190 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
191 #endif /* CONFIG_DYNAMIC_FTRACE */
192 /*
193  * Simple verification test of ftrace function tracer.
194  * Enable ftrace, sleep 1/10 second, and then read the trace
195  * buffer to see if all is in order.
196  */
197 int
198 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
199 {
200 	int save_ftrace_enabled = ftrace_enabled;
201 	int save_tracer_enabled = tracer_enabled;
202 	unsigned long count;
203 	int ret;
204 
205 	/* make sure msleep has been recorded */
206 	msleep(1);
207 
208 	/* start the tracing */
209 	ftrace_enabled = 1;
210 	tracer_enabled = 1;
211 
212 	ret = trace->init(tr);
213 	if (ret) {
214 		warn_failed_init_tracer(trace, ret);
215 		goto out;
216 	}
217 
218 	/* Sleep for a 1/10 of a second */
219 	msleep(100);
220 	/* stop the tracing. */
221 	tracing_stop();
222 	ftrace_enabled = 0;
223 
224 	/* check the trace buffer */
225 	ret = trace_test_buffer(tr, &count);
226 	trace->reset(tr);
227 	tracing_start();
228 
229 	if (!ret && !count) {
230 		printk(KERN_CONT ".. no entries found ..");
231 		ret = -1;
232 		goto out;
233 	}
234 
235 	ret = trace_selftest_startup_dynamic_tracing(trace, tr,
236 						     DYN_FTRACE_TEST_NAME);
237 
238  out:
239 	ftrace_enabled = save_ftrace_enabled;
240 	tracer_enabled = save_tracer_enabled;
241 
242 	/* kill ftrace totally if we failed */
243 	if (ret)
244 		ftrace_kill();
245 
246 	return ret;
247 }
248 #endif /* CONFIG_FUNCTION_TRACER */
249 
250 #ifdef CONFIG_IRQSOFF_TRACER
251 int
252 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
253 {
254 	unsigned long save_max = tracing_max_latency;
255 	unsigned long count;
256 	int ret;
257 
258 	/* start the tracing */
259 	ret = trace->init(tr);
260 	if (ret) {
261 		warn_failed_init_tracer(trace, ret);
262 		return ret;
263 	}
264 
265 	/* reset the max latency */
266 	tracing_max_latency = 0;
267 	/* disable interrupts for a bit */
268 	local_irq_disable();
269 	udelay(100);
270 	local_irq_enable();
271 	/* stop the tracing. */
272 	tracing_stop();
273 	/* check both trace buffers */
274 	ret = trace_test_buffer(tr, NULL);
275 	if (!ret)
276 		ret = trace_test_buffer(&max_tr, &count);
277 	trace->reset(tr);
278 	tracing_start();
279 
280 	if (!ret && !count) {
281 		printk(KERN_CONT ".. no entries found ..");
282 		ret = -1;
283 	}
284 
285 	tracing_max_latency = save_max;
286 
287 	return ret;
288 }
289 #endif /* CONFIG_IRQSOFF_TRACER */
290 
291 #ifdef CONFIG_PREEMPT_TRACER
292 int
293 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
294 {
295 	unsigned long save_max = tracing_max_latency;
296 	unsigned long count;
297 	int ret;
298 
299 	/*
300 	 * Now that the big kernel lock is no longer preemptable,
301 	 * and this is called with the BKL held, it will always
302 	 * fail. If preemption is already disabled, simply
303 	 * pass the test. When the BKL is removed, or becomes
304 	 * preemptible again, we will once again test this,
305 	 * so keep it in.
306 	 */
307 	if (preempt_count()) {
308 		printk(KERN_CONT "can not test ... force ");
309 		return 0;
310 	}
311 
312 	/* start the tracing */
313 	ret = trace->init(tr);
314 	if (ret) {
315 		warn_failed_init_tracer(trace, ret);
316 		return ret;
317 	}
318 
319 	/* reset the max latency */
320 	tracing_max_latency = 0;
321 	/* disable preemption for a bit */
322 	preempt_disable();
323 	udelay(100);
324 	preempt_enable();
325 	/* stop the tracing. */
326 	tracing_stop();
327 	/* check both trace buffers */
328 	ret = trace_test_buffer(tr, NULL);
329 	if (!ret)
330 		ret = trace_test_buffer(&max_tr, &count);
331 	trace->reset(tr);
332 	tracing_start();
333 
334 	if (!ret && !count) {
335 		printk(KERN_CONT ".. no entries found ..");
336 		ret = -1;
337 	}
338 
339 	tracing_max_latency = save_max;
340 
341 	return ret;
342 }
343 #endif /* CONFIG_PREEMPT_TRACER */
344 
345 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
346 int
347 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
348 {
349 	unsigned long save_max = tracing_max_latency;
350 	unsigned long count;
351 	int ret;
352 
353 	/*
354 	 * Now that the big kernel lock is no longer preemptable,
355 	 * and this is called with the BKL held, it will always
356 	 * fail. If preemption is already disabled, simply
357 	 * pass the test. When the BKL is removed, or becomes
358 	 * preemptible again, we will once again test this,
359 	 * so keep it in.
360 	 */
361 	if (preempt_count()) {
362 		printk(KERN_CONT "can not test ... force ");
363 		return 0;
364 	}
365 
366 	/* start the tracing */
367 	ret = trace->init(tr);
368 	if (ret) {
369 		warn_failed_init_tracer(trace, ret);
370 		goto out;
371 	}
372 
373 	/* reset the max latency */
374 	tracing_max_latency = 0;
375 
376 	/* disable preemption and interrupts for a bit */
377 	preempt_disable();
378 	local_irq_disable();
379 	udelay(100);
380 	preempt_enable();
381 	/* reverse the order of preempt vs irqs */
382 	local_irq_enable();
383 
384 	/* stop the tracing. */
385 	tracing_stop();
386 	/* check both trace buffers */
387 	ret = trace_test_buffer(tr, NULL);
388 	if (ret) {
389 		tracing_start();
390 		goto out;
391 	}
392 
393 	ret = trace_test_buffer(&max_tr, &count);
394 	if (ret) {
395 		tracing_start();
396 		goto out;
397 	}
398 
399 	if (!ret && !count) {
400 		printk(KERN_CONT ".. no entries found ..");
401 		ret = -1;
402 		tracing_start();
403 		goto out;
404 	}
405 
406 	/* do the test by disabling interrupts first this time */
407 	tracing_max_latency = 0;
408 	tracing_start();
409 	preempt_disable();
410 	local_irq_disable();
411 	udelay(100);
412 	preempt_enable();
413 	/* reverse the order of preempt vs irqs */
414 	local_irq_enable();
415 
416 	/* stop the tracing. */
417 	tracing_stop();
418 	/* check both trace buffers */
419 	ret = trace_test_buffer(tr, NULL);
420 	if (ret)
421 		goto out;
422 
423 	ret = trace_test_buffer(&max_tr, &count);
424 
425 	if (!ret && !count) {
426 		printk(KERN_CONT ".. no entries found ..");
427 		ret = -1;
428 		goto out;
429 	}
430 
431  out:
432 	trace->reset(tr);
433 	tracing_start();
434 	tracing_max_latency = save_max;
435 
436 	return ret;
437 }
438 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
439 
440 #ifdef CONFIG_NOP_TRACER
441 int
442 trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
443 {
444 	/* What could possibly go wrong? */
445 	return 0;
446 }
447 #endif
448 
449 #ifdef CONFIG_SCHED_TRACER
450 static int trace_wakeup_test_thread(void *data)
451 {
452 	/* Make this a RT thread, doesn't need to be too high */
453 	struct sched_param param = { .sched_priority = 5 };
454 	struct completion *x = data;
455 
456 	sched_setscheduler(current, SCHED_FIFO, &param);
457 
458 	/* Make it know we have a new prio */
459 	complete(x);
460 
461 	/* now go to sleep and let the test wake us up */
462 	set_current_state(TASK_INTERRUPTIBLE);
463 	schedule();
464 
465 	/* we are awake, now wait to disappear */
466 	while (!kthread_should_stop()) {
467 		/*
468 		 * This is an RT task, do short sleeps to let
469 		 * others run.
470 		 */
471 		msleep(100);
472 	}
473 
474 	return 0;
475 }
476 
477 int
478 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
479 {
480 	unsigned long save_max = tracing_max_latency;
481 	struct task_struct *p;
482 	struct completion isrt;
483 	unsigned long count;
484 	int ret;
485 
486 	init_completion(&isrt);
487 
488 	/* create a high prio thread */
489 	p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
490 	if (IS_ERR(p)) {
491 		printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
492 		return -1;
493 	}
494 
495 	/* make sure the thread is running at an RT prio */
496 	wait_for_completion(&isrt);
497 
498 	/* start the tracing */
499 	ret = trace->init(tr);
500 	if (ret) {
501 		warn_failed_init_tracer(trace, ret);
502 		return ret;
503 	}
504 
505 	/* reset the max latency */
506 	tracing_max_latency = 0;
507 
508 	/* sleep to let the RT thread sleep too */
509 	msleep(100);
510 
511 	/*
512 	 * Yes this is slightly racy. It is possible that for some
513 	 * strange reason that the RT thread we created, did not
514 	 * call schedule for 100ms after doing the completion,
515 	 * and we do a wakeup on a task that already is awake.
516 	 * But that is extremely unlikely, and the worst thing that
517 	 * happens in such a case, is that we disable tracing.
518 	 * Honestly, if this race does happen something is horrible
519 	 * wrong with the system.
520 	 */
521 
522 	wake_up_process(p);
523 
524 	/* give a little time to let the thread wake up */
525 	msleep(100);
526 
527 	/* stop the tracing. */
528 	tracing_stop();
529 	/* check both trace buffers */
530 	ret = trace_test_buffer(tr, NULL);
531 	if (!ret)
532 		ret = trace_test_buffer(&max_tr, &count);
533 
534 
535 	trace->reset(tr);
536 	tracing_start();
537 
538 	tracing_max_latency = save_max;
539 
540 	/* kill the thread */
541 	kthread_stop(p);
542 
543 	if (!ret && !count) {
544 		printk(KERN_CONT ".. no entries found ..");
545 		ret = -1;
546 	}
547 
548 	return ret;
549 }
550 #endif /* CONFIG_SCHED_TRACER */
551 
552 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
553 int
554 trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
555 {
556 	unsigned long count;
557 	int ret;
558 
559 	/* start the tracing */
560 	ret = trace->init(tr);
561 	if (ret) {
562 		warn_failed_init_tracer(trace, ret);
563 		return ret;
564 	}
565 
566 	/* Sleep for a 1/10 of a second */
567 	msleep(100);
568 	/* stop the tracing. */
569 	tracing_stop();
570 	/* check the trace buffer */
571 	ret = trace_test_buffer(tr, &count);
572 	trace->reset(tr);
573 	tracing_start();
574 
575 	if (!ret && !count) {
576 		printk(KERN_CONT ".. no entries found ..");
577 		ret = -1;
578 	}
579 
580 	return ret;
581 }
582 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
583 
584 #ifdef CONFIG_SYSPROF_TRACER
585 int
586 trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
587 {
588 	unsigned long count;
589 	int ret;
590 
591 	/* start the tracing */
592 	ret = trace->init(tr);
593 	if (ret) {
594 		warn_failed_init_tracer(trace, ret);
595 		return 0;
596 	}
597 
598 	/* Sleep for a 1/10 of a second */
599 	msleep(100);
600 	/* stop the tracing. */
601 	tracing_stop();
602 	/* check the trace buffer */
603 	ret = trace_test_buffer(tr, &count);
604 	trace->reset(tr);
605 	tracing_start();
606 
607 	return ret;
608 }
609 #endif /* CONFIG_SYSPROF_TRACER */
610 
611 #ifdef CONFIG_BRANCH_TRACER
612 int
613 trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
614 {
615 	unsigned long count;
616 	int ret;
617 
618 	/* start the tracing */
619 	ret = trace->init(tr);
620 	if (ret) {
621 		warn_failed_init_tracer(trace, ret);
622 		return ret;
623 	}
624 
625 	/* Sleep for a 1/10 of a second */
626 	msleep(100);
627 	/* stop the tracing. */
628 	tracing_stop();
629 	/* check the trace buffer */
630 	ret = trace_test_buffer(tr, &count);
631 	trace->reset(tr);
632 	tracing_start();
633 
634 	return ret;
635 }
636 #endif /* CONFIG_BRANCH_TRACER */
637