xref: /openbmc/linux/kernel/trace/trace_selftest.c (revision 06fa75ab566c50e01bfd7b055bde85cf9b1bc98a)
1 /* Include in trace.c */
2 
3 #include <linux/kthread.h>
4 #include <linux/delay.h>
5 
6 static inline int trace_valid_entry(struct trace_entry *entry)
7 {
8 	switch (entry->type) {
9 	case TRACE_FN:
10 	case TRACE_CTX:
11 	case TRACE_WAKE:
12 	case TRACE_STACK:
13 	case TRACE_SPECIAL:
14 		return 1;
15 	}
16 	return 0;
17 }
18 
19 static int
20 trace_test_buffer_cpu(struct trace_array *tr, struct trace_array_cpu *data)
21 {
22 	struct trace_entry *entries;
23 	struct page *page;
24 	int idx = 0;
25 	int i;
26 
27 	BUG_ON(list_empty(&data->trace_pages));
28 	page = list_entry(data->trace_pages.next, struct page, lru);
29 	entries = page_address(page);
30 
31 	if (head_page(data) != entries)
32 		goto failed;
33 
34 	/*
35 	 * The starting trace buffer always has valid elements,
36 	 * if any element exists.
37 	 */
38 	entries = head_page(data);
39 
40 	for (i = 0; i < tr->entries; i++) {
41 
42 		if (i < data->trace_idx && !trace_valid_entry(&entries[idx])) {
43 			printk(KERN_CONT ".. invalid entry %d ",
44 				entries[idx].type);
45 			goto failed;
46 		}
47 
48 		idx++;
49 		if (idx >= ENTRIES_PER_PAGE) {
50 			page = virt_to_page(entries);
51 			if (page->lru.next == &data->trace_pages) {
52 				if (i != tr->entries - 1) {
53 					printk(KERN_CONT ".. entries buffer mismatch");
54 					goto failed;
55 				}
56 			} else {
57 				page = list_entry(page->lru.next, struct page, lru);
58 				entries = page_address(page);
59 			}
60 			idx = 0;
61 		}
62 	}
63 
64 	page = virt_to_page(entries);
65 	if (page->lru.next != &data->trace_pages) {
66 		printk(KERN_CONT ".. too many entries");
67 		goto failed;
68 	}
69 
70 	return 0;
71 
72  failed:
73 	/* disable tracing */
74 	tracing_disabled = 1;
75 	printk(KERN_CONT ".. corrupted trace buffer .. ");
76 	return -1;
77 }
78 
79 /*
80  * Test the trace buffer to see if all the elements
81  * are still sane.
82  */
83 static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
84 {
85 	unsigned long cnt = 0;
86 	int cpu;
87 	int ret = 0;
88 
89 	for_each_possible_cpu(cpu) {
90 		if (!head_page(tr->data[cpu]))
91 			continue;
92 
93 		cnt += tr->data[cpu]->trace_idx;
94 
95 		ret = trace_test_buffer_cpu(tr, tr->data[cpu]);
96 		if (ret)
97 			break;
98 	}
99 
100 	if (count)
101 		*count = cnt;
102 
103 	return ret;
104 }
105 
106 #ifdef CONFIG_FTRACE
107 
108 #ifdef CONFIG_DYNAMIC_FTRACE
109 
110 #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
111 #define __STR(x) #x
112 #define STR(x) __STR(x)
113 static int DYN_FTRACE_TEST_NAME(void)
114 {
115 	/* used to call mcount */
116 	return 0;
117 }
118 
119 /* Test dynamic code modification and ftrace filters */
120 int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
121 					   struct trace_array *tr,
122 					   int (*func)(void))
123 {
124 	unsigned long count;
125 	int ret;
126 	int save_ftrace_enabled = ftrace_enabled;
127 	int save_tracer_enabled = tracer_enabled;
128 
129 	/* The ftrace test PASSED */
130 	printk(KERN_CONT "PASSED\n");
131 	pr_info("Testing dynamic ftrace: ");
132 
133 	/* enable tracing, and record the filter function */
134 	ftrace_enabled = 1;
135 	tracer_enabled = 1;
136 
137 	/* passed in by parameter to fool gcc from optimizing */
138 	func();
139 
140 	/* update the records */
141 	ret = ftrace_force_update();
142 	if (ret) {
143 		printk(KERN_CONT ".. ftraced failed .. ");
144 		return ret;
145 	}
146 
147 	/* filter only on our function */
148 	ftrace_set_filter(STR(DYN_FTRACE_TEST_NAME),
149 			  sizeof(STR(DYN_FTRACE_TEST_NAME)), 1);
150 
151 	/* enable tracing */
152 	tr->ctrl = 1;
153 	trace->init(tr);
154 	/* Sleep for a 1/10 of a second */
155 	msleep(100);
156 
157 	/* we should have nothing in the buffer */
158 	ret = trace_test_buffer(tr, &count);
159 	if (ret)
160 		goto out;
161 
162 	if (count) {
163 		ret = -1;
164 		printk(KERN_CONT ".. filter did not filter .. ");
165 		goto out;
166 	}
167 
168 	/* call our function again */
169 	func();
170 
171 	/* sleep again */
172 	msleep(100);
173 
174 	/* stop the tracing. */
175 	tr->ctrl = 0;
176 	trace->ctrl_update(tr);
177 	ftrace_enabled = 0;
178 
179 	/* check the trace buffer */
180 	ret = trace_test_buffer(tr, &count);
181 	trace->reset(tr);
182 
183 	/* we should only have one item */
184 	if (!ret && count != 1) {
185 		printk(KERN_CONT ".. filter failed count=%ld ..", count);
186 		ret = -1;
187 		goto out;
188 	}
189  out:
190 	ftrace_enabled = save_ftrace_enabled;
191 	tracer_enabled = save_tracer_enabled;
192 
193 	/* Enable tracing on all functions again */
194 	ftrace_set_filter(NULL, 0, 1);
195 
196 	return ret;
197 }
198 #else
199 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
200 #endif /* CONFIG_DYNAMIC_FTRACE */
201 /*
202  * Simple verification test of ftrace function tracer.
203  * Enable ftrace, sleep 1/10 second, and then read the trace
204  * buffer to see if all is in order.
205  */
206 int
207 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
208 {
209 	unsigned long count;
210 	int ret;
211 	int save_ftrace_enabled = ftrace_enabled;
212 	int save_tracer_enabled = tracer_enabled;
213 
214 	/* make sure msleep has been recorded */
215 	msleep(1);
216 
217 	/* force the recorded functions to be traced */
218 	ret = ftrace_force_update();
219 	if (ret) {
220 		printk(KERN_CONT ".. ftraced failed .. ");
221 		return ret;
222 	}
223 
224 	/* start the tracing */
225 	ftrace_enabled = 1;
226 	tracer_enabled = 1;
227 
228 	tr->ctrl = 1;
229 	trace->init(tr);
230 	/* Sleep for a 1/10 of a second */
231 	msleep(100);
232 	/* stop the tracing. */
233 	tr->ctrl = 0;
234 	trace->ctrl_update(tr);
235 	ftrace_enabled = 0;
236 
237 	/* check the trace buffer */
238 	ret = trace_test_buffer(tr, &count);
239 	trace->reset(tr);
240 
241 	if (!ret && !count) {
242 		printk(KERN_CONT ".. no entries found ..");
243 		ret = -1;
244 		goto out;
245 	}
246 
247 	ret = trace_selftest_startup_dynamic_tracing(trace, tr,
248 						     DYN_FTRACE_TEST_NAME);
249 
250  out:
251 	ftrace_enabled = save_ftrace_enabled;
252 	tracer_enabled = save_tracer_enabled;
253 
254 	/* kill ftrace totally if we failed */
255 	if (ret)
256 		ftrace_kill();
257 
258 	return ret;
259 }
260 #endif /* CONFIG_FTRACE */
261 
262 #ifdef CONFIG_IRQSOFF_TRACER
263 int
264 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
265 {
266 	unsigned long save_max = tracing_max_latency;
267 	unsigned long count;
268 	int ret;
269 
270 	/* start the tracing */
271 	tr->ctrl = 1;
272 	trace->init(tr);
273 	/* reset the max latency */
274 	tracing_max_latency = 0;
275 	/* disable interrupts for a bit */
276 	local_irq_disable();
277 	udelay(100);
278 	local_irq_enable();
279 	/* stop the tracing. */
280 	tr->ctrl = 0;
281 	trace->ctrl_update(tr);
282 	/* check both trace buffers */
283 	ret = trace_test_buffer(tr, NULL);
284 	if (!ret)
285 		ret = trace_test_buffer(&max_tr, &count);
286 	trace->reset(tr);
287 
288 	if (!ret && !count) {
289 		printk(KERN_CONT ".. no entries found ..");
290 		ret = -1;
291 	}
292 
293 	tracing_max_latency = save_max;
294 
295 	return ret;
296 }
297 #endif /* CONFIG_IRQSOFF_TRACER */
298 
299 #ifdef CONFIG_PREEMPT_TRACER
300 int
301 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
302 {
303 	unsigned long save_max = tracing_max_latency;
304 	unsigned long count;
305 	int ret;
306 
307 	/* start the tracing */
308 	tr->ctrl = 1;
309 	trace->init(tr);
310 	/* reset the max latency */
311 	tracing_max_latency = 0;
312 	/* disable preemption for a bit */
313 	preempt_disable();
314 	udelay(100);
315 	preempt_enable();
316 	/* stop the tracing. */
317 	tr->ctrl = 0;
318 	trace->ctrl_update(tr);
319 	/* check both trace buffers */
320 	ret = trace_test_buffer(tr, NULL);
321 	if (!ret)
322 		ret = trace_test_buffer(&max_tr, &count);
323 	trace->reset(tr);
324 
325 	if (!ret && !count) {
326 		printk(KERN_CONT ".. no entries found ..");
327 		ret = -1;
328 	}
329 
330 	tracing_max_latency = save_max;
331 
332 	return ret;
333 }
334 #endif /* CONFIG_PREEMPT_TRACER */
335 
336 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
337 int
338 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
339 {
340 	unsigned long save_max = tracing_max_latency;
341 	unsigned long count;
342 	int ret;
343 
344 	/* start the tracing */
345 	tr->ctrl = 1;
346 	trace->init(tr);
347 
348 	/* reset the max latency */
349 	tracing_max_latency = 0;
350 
351 	/* disable preemption and interrupts for a bit */
352 	preempt_disable();
353 	local_irq_disable();
354 	udelay(100);
355 	preempt_enable();
356 	/* reverse the order of preempt vs irqs */
357 	local_irq_enable();
358 
359 	/* stop the tracing. */
360 	tr->ctrl = 0;
361 	trace->ctrl_update(tr);
362 	/* check both trace buffers */
363 	ret = trace_test_buffer(tr, NULL);
364 	if (ret)
365 		goto out;
366 
367 	ret = trace_test_buffer(&max_tr, &count);
368 	if (ret)
369 		goto out;
370 
371 	if (!ret && !count) {
372 		printk(KERN_CONT ".. no entries found ..");
373 		ret = -1;
374 		goto out;
375 	}
376 
377 	/* do the test by disabling interrupts first this time */
378 	tracing_max_latency = 0;
379 	tr->ctrl = 1;
380 	trace->ctrl_update(tr);
381 	preempt_disable();
382 	local_irq_disable();
383 	udelay(100);
384 	preempt_enable();
385 	/* reverse the order of preempt vs irqs */
386 	local_irq_enable();
387 
388 	/* stop the tracing. */
389 	tr->ctrl = 0;
390 	trace->ctrl_update(tr);
391 	/* check both trace buffers */
392 	ret = trace_test_buffer(tr, NULL);
393 	if (ret)
394 		goto out;
395 
396 	ret = trace_test_buffer(&max_tr, &count);
397 
398 	if (!ret && !count) {
399 		printk(KERN_CONT ".. no entries found ..");
400 		ret = -1;
401 		goto out;
402 	}
403 
404  out:
405 	trace->reset(tr);
406 	tracing_max_latency = save_max;
407 
408 	return ret;
409 }
410 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
411 
412 #ifdef CONFIG_SCHED_TRACER
413 static int trace_wakeup_test_thread(void *data)
414 {
415 	struct completion *x = data;
416 
417 	/* Make this a RT thread, doesn't need to be too high */
418 
419 	rt_mutex_setprio(current, MAX_RT_PRIO - 5);
420 
421 	/* Make it know we have a new prio */
422 	complete(x);
423 
424 	/* now go to sleep and let the test wake us up */
425 	set_current_state(TASK_INTERRUPTIBLE);
426 	schedule();
427 
428 	/* we are awake, now wait to disappear */
429 	while (!kthread_should_stop()) {
430 		/*
431 		 * This is an RT task, do short sleeps to let
432 		 * others run.
433 		 */
434 		msleep(100);
435 	}
436 
437 	return 0;
438 }
439 
440 int
441 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
442 {
443 	unsigned long save_max = tracing_max_latency;
444 	struct task_struct *p;
445 	struct completion isrt;
446 	unsigned long count;
447 	int ret;
448 
449 	init_completion(&isrt);
450 
451 	/* create a high prio thread */
452 	p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
453 	if (IS_ERR(p)) {
454 		printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
455 		return -1;
456 	}
457 
458 	/* make sure the thread is running at an RT prio */
459 	wait_for_completion(&isrt);
460 
461 	/* start the tracing */
462 	tr->ctrl = 1;
463 	trace->init(tr);
464 	/* reset the max latency */
465 	tracing_max_latency = 0;
466 
467 	/* sleep to let the RT thread sleep too */
468 	msleep(100);
469 
470 	/*
471 	 * Yes this is slightly racy. It is possible that for some
472 	 * strange reason that the RT thread we created, did not
473 	 * call schedule for 100ms after doing the completion,
474 	 * and we do a wakeup on a task that already is awake.
475 	 * But that is extremely unlikely, and the worst thing that
476 	 * happens in such a case, is that we disable tracing.
477 	 * Honestly, if this race does happen something is horrible
478 	 * wrong with the system.
479 	 */
480 
481 	wake_up_process(p);
482 
483 	/* stop the tracing. */
484 	tr->ctrl = 0;
485 	trace->ctrl_update(tr);
486 	/* check both trace buffers */
487 	ret = trace_test_buffer(tr, NULL);
488 	if (!ret)
489 		ret = trace_test_buffer(&max_tr, &count);
490 
491 
492 	trace->reset(tr);
493 
494 	tracing_max_latency = save_max;
495 
496 	/* kill the thread */
497 	kthread_stop(p);
498 
499 	if (!ret && !count) {
500 		printk(KERN_CONT ".. no entries found ..");
501 		ret = -1;
502 	}
503 
504 	return ret;
505 }
506 #endif /* CONFIG_SCHED_TRACER */
507 
508 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
509 int
510 trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
511 {
512 	unsigned long count;
513 	int ret;
514 
515 	/* start the tracing */
516 	tr->ctrl = 1;
517 	trace->init(tr);
518 	/* Sleep for a 1/10 of a second */
519 	msleep(100);
520 	/* stop the tracing. */
521 	tr->ctrl = 0;
522 	trace->ctrl_update(tr);
523 	/* check the trace buffer */
524 	ret = trace_test_buffer(tr, &count);
525 	trace->reset(tr);
526 
527 	if (!ret && !count) {
528 		printk(KERN_CONT ".. no entries found ..");
529 		ret = -1;
530 	}
531 
532 	return ret;
533 }
534 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
535