xref: /openbmc/linux/kernel/trace/trace_selftest.c (revision d05cdb25d80f06f77aa6bddb53cd1390d4d91a0b)
1 /* Include in trace.c */
2 
3 #include <linux/kthread.h>
4 #include <linux/delay.h>
5 
6 static inline int trace_valid_entry(struct trace_entry *entry)
7 {
8 	switch (entry->type) {
9 	case TRACE_FN:
10 	case TRACE_CTX:
11 	case TRACE_WAKE:
12 	case TRACE_STACK:
13 	case TRACE_SPECIAL:
14 		return 1;
15 	}
16 	return 0;
17 }
18 
19 static int
20 trace_test_buffer_cpu(struct trace_array *tr, struct trace_array_cpu *data)
21 {
22 	struct trace_entry *entries;
23 	struct page *page;
24 	int idx = 0;
25 	int i;
26 
27 	BUG_ON(list_empty(&data->trace_pages));
28 	page = list_entry(data->trace_pages.next, struct page, lru);
29 	entries = page_address(page);
30 
31 	if (head_page(data) != entries)
32 		goto failed;
33 
34 	/*
35 	 * The starting trace buffer always has valid elements,
36 	 * if any element exists.
37 	 */
38 	entries = head_page(data);
39 
40 	for (i = 0; i < tr->entries; i++) {
41 
42 		if (i < data->trace_idx && !trace_valid_entry(&entries[idx])) {
43 			printk(KERN_CONT ".. invalid entry %d ",
44 				entries[idx].type);
45 			goto failed;
46 		}
47 
48 		idx++;
49 		if (idx >= ENTRIES_PER_PAGE) {
50 			page = virt_to_page(entries);
51 			if (page->lru.next == &data->trace_pages) {
52 				if (i != tr->entries - 1) {
53 					printk(KERN_CONT ".. entries buffer mismatch");
54 					goto failed;
55 				}
56 			} else {
57 				page = list_entry(page->lru.next, struct page, lru);
58 				entries = page_address(page);
59 			}
60 			idx = 0;
61 		}
62 	}
63 
64 	page = virt_to_page(entries);
65 	if (page->lru.next != &data->trace_pages) {
66 		printk(KERN_CONT ".. too many entries");
67 		goto failed;
68 	}
69 
70 	return 0;
71 
72  failed:
73 	/* disable tracing */
74 	tracing_disabled = 1;
75 	printk(KERN_CONT ".. corrupted trace buffer .. ");
76 	return -1;
77 }
78 
79 /*
80  * Test the trace buffer to see if all the elements
81  * are still sane.
82  */
83 static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
84 {
85 	unsigned long cnt = 0;
86 	int cpu;
87 	int ret = 0;
88 
89 	for_each_possible_cpu(cpu) {
90 		if (!head_page(tr->data[cpu]))
91 			continue;
92 
93 		cnt += tr->data[cpu]->trace_idx;
94 
95 		ret = trace_test_buffer_cpu(tr, tr->data[cpu]);
96 		if (ret)
97 			break;
98 	}
99 
100 	if (count)
101 		*count = cnt;
102 
103 	return ret;
104 }
105 
106 #ifdef CONFIG_FTRACE
107 
108 #ifdef CONFIG_DYNAMIC_FTRACE
109 
110 #define __STR(x) #x
111 #define STR(x) __STR(x)
112 
113 /* Test dynamic code modification and ftrace filters */
114 int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
115 					   struct trace_array *tr,
116 					   int (*func)(void))
117 {
118 	unsigned long count;
119 	int ret;
120 	int save_ftrace_enabled = ftrace_enabled;
121 	int save_tracer_enabled = tracer_enabled;
122 
123 	/* The ftrace test PASSED */
124 	printk(KERN_CONT "PASSED\n");
125 	pr_info("Testing dynamic ftrace: ");
126 
127 	/* enable tracing, and record the filter function */
128 	ftrace_enabled = 1;
129 	tracer_enabled = 1;
130 
131 	/* passed in by parameter to fool gcc from optimizing */
132 	func();
133 
134 	/* update the records */
135 	ret = ftrace_force_update();
136 	if (ret) {
137 		printk(KERN_CONT ".. ftraced failed .. ");
138 		return ret;
139 	}
140 
141 	/* filter only on our function */
142 	ftrace_set_filter(STR(DYN_FTRACE_TEST_NAME),
143 			  sizeof(STR(DYN_FTRACE_TEST_NAME)), 1);
144 
145 	/* enable tracing */
146 	tr->ctrl = 1;
147 	trace->init(tr);
148 	/* Sleep for a 1/10 of a second */
149 	msleep(100);
150 
151 	/* we should have nothing in the buffer */
152 	ret = trace_test_buffer(tr, &count);
153 	if (ret)
154 		goto out;
155 
156 	if (count) {
157 		ret = -1;
158 		printk(KERN_CONT ".. filter did not filter .. ");
159 		goto out;
160 	}
161 
162 	/* call our function again */
163 	func();
164 
165 	/* sleep again */
166 	msleep(100);
167 
168 	/* stop the tracing. */
169 	tr->ctrl = 0;
170 	trace->ctrl_update(tr);
171 	ftrace_enabled = 0;
172 
173 	/* check the trace buffer */
174 	ret = trace_test_buffer(tr, &count);
175 	trace->reset(tr);
176 
177 	/* we should only have one item */
178 	if (!ret && count != 1) {
179 		printk(KERN_CONT ".. filter failed count=%ld ..", count);
180 		ret = -1;
181 		goto out;
182 	}
183  out:
184 	ftrace_enabled = save_ftrace_enabled;
185 	tracer_enabled = save_tracer_enabled;
186 
187 	/* Enable tracing on all functions again */
188 	ftrace_set_filter(NULL, 0, 1);
189 
190 	return ret;
191 }
192 #else
193 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
194 #endif /* CONFIG_DYNAMIC_FTRACE */
195 /*
196  * Simple verification test of ftrace function tracer.
197  * Enable ftrace, sleep 1/10 second, and then read the trace
198  * buffer to see if all is in order.
199  */
200 int
201 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
202 {
203 	unsigned long count;
204 	int ret;
205 	int save_ftrace_enabled = ftrace_enabled;
206 	int save_tracer_enabled = tracer_enabled;
207 
208 	/* make sure msleep has been recorded */
209 	msleep(1);
210 
211 	/* force the recorded functions to be traced */
212 	ret = ftrace_force_update();
213 	if (ret) {
214 		printk(KERN_CONT ".. ftraced failed .. ");
215 		return ret;
216 	}
217 
218 	/* start the tracing */
219 	ftrace_enabled = 1;
220 	tracer_enabled = 1;
221 
222 	tr->ctrl = 1;
223 	trace->init(tr);
224 	/* Sleep for a 1/10 of a second */
225 	msleep(100);
226 	/* stop the tracing. */
227 	tr->ctrl = 0;
228 	trace->ctrl_update(tr);
229 	ftrace_enabled = 0;
230 
231 	/* check the trace buffer */
232 	ret = trace_test_buffer(tr, &count);
233 	trace->reset(tr);
234 
235 	if (!ret && !count) {
236 		printk(KERN_CONT ".. no entries found ..");
237 		ret = -1;
238 		goto out;
239 	}
240 
241 	ret = trace_selftest_startup_dynamic_tracing(trace, tr,
242 						     DYN_FTRACE_TEST_NAME);
243 
244  out:
245 	ftrace_enabled = save_ftrace_enabled;
246 	tracer_enabled = save_tracer_enabled;
247 
248 	/* kill ftrace totally if we failed */
249 	if (ret)
250 		ftrace_kill();
251 
252 	return ret;
253 }
254 #endif /* CONFIG_FTRACE */
255 
256 #ifdef CONFIG_IRQSOFF_TRACER
257 int
258 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
259 {
260 	unsigned long save_max = tracing_max_latency;
261 	unsigned long count;
262 	int ret;
263 
264 	/* start the tracing */
265 	tr->ctrl = 1;
266 	trace->init(tr);
267 	/* reset the max latency */
268 	tracing_max_latency = 0;
269 	/* disable interrupts for a bit */
270 	local_irq_disable();
271 	udelay(100);
272 	local_irq_enable();
273 	/* stop the tracing. */
274 	tr->ctrl = 0;
275 	trace->ctrl_update(tr);
276 	/* check both trace buffers */
277 	ret = trace_test_buffer(tr, NULL);
278 	if (!ret)
279 		ret = trace_test_buffer(&max_tr, &count);
280 	trace->reset(tr);
281 
282 	if (!ret && !count) {
283 		printk(KERN_CONT ".. no entries found ..");
284 		ret = -1;
285 	}
286 
287 	tracing_max_latency = save_max;
288 
289 	return ret;
290 }
291 #endif /* CONFIG_IRQSOFF_TRACER */
292 
293 #ifdef CONFIG_PREEMPT_TRACER
294 int
295 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
296 {
297 	unsigned long save_max = tracing_max_latency;
298 	unsigned long count;
299 	int ret;
300 
301 	/* start the tracing */
302 	tr->ctrl = 1;
303 	trace->init(tr);
304 	/* reset the max latency */
305 	tracing_max_latency = 0;
306 	/* disable preemption for a bit */
307 	preempt_disable();
308 	udelay(100);
309 	preempt_enable();
310 	/* stop the tracing. */
311 	tr->ctrl = 0;
312 	trace->ctrl_update(tr);
313 	/* check both trace buffers */
314 	ret = trace_test_buffer(tr, NULL);
315 	if (!ret)
316 		ret = trace_test_buffer(&max_tr, &count);
317 	trace->reset(tr);
318 
319 	if (!ret && !count) {
320 		printk(KERN_CONT ".. no entries found ..");
321 		ret = -1;
322 	}
323 
324 	tracing_max_latency = save_max;
325 
326 	return ret;
327 }
328 #endif /* CONFIG_PREEMPT_TRACER */
329 
330 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
331 int
332 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
333 {
334 	unsigned long save_max = tracing_max_latency;
335 	unsigned long count;
336 	int ret;
337 
338 	/* start the tracing */
339 	tr->ctrl = 1;
340 	trace->init(tr);
341 
342 	/* reset the max latency */
343 	tracing_max_latency = 0;
344 
345 	/* disable preemption and interrupts for a bit */
346 	preempt_disable();
347 	local_irq_disable();
348 	udelay(100);
349 	preempt_enable();
350 	/* reverse the order of preempt vs irqs */
351 	local_irq_enable();
352 
353 	/* stop the tracing. */
354 	tr->ctrl = 0;
355 	trace->ctrl_update(tr);
356 	/* check both trace buffers */
357 	ret = trace_test_buffer(tr, NULL);
358 	if (ret)
359 		goto out;
360 
361 	ret = trace_test_buffer(&max_tr, &count);
362 	if (ret)
363 		goto out;
364 
365 	if (!ret && !count) {
366 		printk(KERN_CONT ".. no entries found ..");
367 		ret = -1;
368 		goto out;
369 	}
370 
371 	/* do the test by disabling interrupts first this time */
372 	tracing_max_latency = 0;
373 	tr->ctrl = 1;
374 	trace->ctrl_update(tr);
375 	preempt_disable();
376 	local_irq_disable();
377 	udelay(100);
378 	preempt_enable();
379 	/* reverse the order of preempt vs irqs */
380 	local_irq_enable();
381 
382 	/* stop the tracing. */
383 	tr->ctrl = 0;
384 	trace->ctrl_update(tr);
385 	/* check both trace buffers */
386 	ret = trace_test_buffer(tr, NULL);
387 	if (ret)
388 		goto out;
389 
390 	ret = trace_test_buffer(&max_tr, &count);
391 
392 	if (!ret && !count) {
393 		printk(KERN_CONT ".. no entries found ..");
394 		ret = -1;
395 		goto out;
396 	}
397 
398  out:
399 	trace->reset(tr);
400 	tracing_max_latency = save_max;
401 
402 	return ret;
403 }
404 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
405 
406 #ifdef CONFIG_SCHED_TRACER
407 static int trace_wakeup_test_thread(void *data)
408 {
409 	struct completion *x = data;
410 
411 	/* Make this a RT thread, doesn't need to be too high */
412 
413 	rt_mutex_setprio(current, MAX_RT_PRIO - 5);
414 
415 	/* Make it know we have a new prio */
416 	complete(x);
417 
418 	/* now go to sleep and let the test wake us up */
419 	set_current_state(TASK_INTERRUPTIBLE);
420 	schedule();
421 
422 	/* we are awake, now wait to disappear */
423 	while (!kthread_should_stop()) {
424 		/*
425 		 * This is an RT task, do short sleeps to let
426 		 * others run.
427 		 */
428 		msleep(100);
429 	}
430 
431 	return 0;
432 }
433 
434 int
435 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
436 {
437 	unsigned long save_max = tracing_max_latency;
438 	struct task_struct *p;
439 	struct completion isrt;
440 	unsigned long count;
441 	int ret;
442 
443 	init_completion(&isrt);
444 
445 	/* create a high prio thread */
446 	p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
447 	if (IS_ERR(p)) {
448 		printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
449 		return -1;
450 	}
451 
452 	/* make sure the thread is running at an RT prio */
453 	wait_for_completion(&isrt);
454 
455 	/* start the tracing */
456 	tr->ctrl = 1;
457 	trace->init(tr);
458 	/* reset the max latency */
459 	tracing_max_latency = 0;
460 
461 	/* sleep to let the RT thread sleep too */
462 	msleep(100);
463 
464 	/*
465 	 * Yes this is slightly racy. It is possible that for some
466 	 * strange reason that the RT thread we created, did not
467 	 * call schedule for 100ms after doing the completion,
468 	 * and we do a wakeup on a task that already is awake.
469 	 * But that is extremely unlikely, and the worst thing that
470 	 * happens in such a case, is that we disable tracing.
471 	 * Honestly, if this race does happen something is horrible
472 	 * wrong with the system.
473 	 */
474 
475 	wake_up_process(p);
476 
477 	/* stop the tracing. */
478 	tr->ctrl = 0;
479 	trace->ctrl_update(tr);
480 	/* check both trace buffers */
481 	ret = trace_test_buffer(tr, NULL);
482 	if (!ret)
483 		ret = trace_test_buffer(&max_tr, &count);
484 
485 
486 	trace->reset(tr);
487 
488 	tracing_max_latency = save_max;
489 
490 	/* kill the thread */
491 	kthread_stop(p);
492 
493 	if (!ret && !count) {
494 		printk(KERN_CONT ".. no entries found ..");
495 		ret = -1;
496 	}
497 
498 	return ret;
499 }
500 #endif /* CONFIG_SCHED_TRACER */
501 
502 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
503 int
504 trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
505 {
506 	unsigned long count;
507 	int ret;
508 
509 	/* start the tracing */
510 	tr->ctrl = 1;
511 	trace->init(tr);
512 	/* Sleep for a 1/10 of a second */
513 	msleep(100);
514 	/* stop the tracing. */
515 	tr->ctrl = 0;
516 	trace->ctrl_update(tr);
517 	/* check the trace buffer */
518 	ret = trace_test_buffer(tr, &count);
519 	trace->reset(tr);
520 
521 	if (!ret && !count) {
522 		printk(KERN_CONT ".. no entries found ..");
523 		ret = -1;
524 	}
525 
526 	return ret;
527 }
528 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
529