xref: /openbmc/linux/kernel/trace/trace_selftest.c (revision f15cbe6f1a4b4d9df59142fc8e4abb973302cf44)
1 /* Include in trace.c */
2 
3 #include <linux/kthread.h>
4 #include <linux/delay.h>
5 
6 static inline int trace_valid_entry(struct trace_entry *entry)
7 {
8 	switch (entry->type) {
9 	case TRACE_FN:
10 	case TRACE_CTX:
11 	case TRACE_WAKE:
12 	case TRACE_STACK:
13 	case TRACE_SPECIAL:
14 		return 1;
15 	}
16 	return 0;
17 }
18 
19 static int
20 trace_test_buffer_cpu(struct trace_array *tr, struct trace_array_cpu *data)
21 {
22 	struct trace_entry *entries;
23 	struct page *page;
24 	int idx = 0;
25 	int i;
26 
27 	BUG_ON(list_empty(&data->trace_pages));
28 	page = list_entry(data->trace_pages.next, struct page, lru);
29 	entries = page_address(page);
30 
31 	check_pages(data);
32 	if (head_page(data) != entries)
33 		goto failed;
34 
35 	/*
36 	 * The starting trace buffer always has valid elements,
37 	 * if any element exists.
38 	 */
39 	entries = head_page(data);
40 
41 	for (i = 0; i < tr->entries; i++) {
42 
43 		if (i < data->trace_idx && !trace_valid_entry(&entries[idx])) {
44 			printk(KERN_CONT ".. invalid entry %d ",
45 				entries[idx].type);
46 			goto failed;
47 		}
48 
49 		idx++;
50 		if (idx >= ENTRIES_PER_PAGE) {
51 			page = virt_to_page(entries);
52 			if (page->lru.next == &data->trace_pages) {
53 				if (i != tr->entries - 1) {
54 					printk(KERN_CONT ".. entries buffer mismatch");
55 					goto failed;
56 				}
57 			} else {
58 				page = list_entry(page->lru.next, struct page, lru);
59 				entries = page_address(page);
60 			}
61 			idx = 0;
62 		}
63 	}
64 
65 	page = virt_to_page(entries);
66 	if (page->lru.next != &data->trace_pages) {
67 		printk(KERN_CONT ".. too many entries");
68 		goto failed;
69 	}
70 
71 	return 0;
72 
73  failed:
74 	/* disable tracing */
75 	tracing_disabled = 1;
76 	printk(KERN_CONT ".. corrupted trace buffer .. ");
77 	return -1;
78 }
79 
80 /*
81  * Test the trace buffer to see if all the elements
82  * are still sane.
83  */
84 static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
85 {
86 	unsigned long flags, cnt = 0;
87 	int cpu, ret = 0;
88 
89 	/* Don't allow flipping of max traces now */
90 	raw_local_irq_save(flags);
91 	__raw_spin_lock(&ftrace_max_lock);
92 	for_each_possible_cpu(cpu) {
93 		if (!head_page(tr->data[cpu]))
94 			continue;
95 
96 		cnt += tr->data[cpu]->trace_idx;
97 
98 		ret = trace_test_buffer_cpu(tr, tr->data[cpu]);
99 		if (ret)
100 			break;
101 	}
102 	__raw_spin_unlock(&ftrace_max_lock);
103 	raw_local_irq_restore(flags);
104 
105 	if (count)
106 		*count = cnt;
107 
108 	return ret;
109 }
110 
111 #ifdef CONFIG_FTRACE
112 
113 #ifdef CONFIG_DYNAMIC_FTRACE
114 
115 #define __STR(x) #x
116 #define STR(x) __STR(x)
117 
118 /* Test dynamic code modification and ftrace filters */
119 int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
120 					   struct trace_array *tr,
121 					   int (*func)(void))
122 {
123 	unsigned long count;
124 	int ret;
125 	int save_ftrace_enabled = ftrace_enabled;
126 	int save_tracer_enabled = tracer_enabled;
127 	char *func_name;
128 
129 	/* The ftrace test PASSED */
130 	printk(KERN_CONT "PASSED\n");
131 	pr_info("Testing dynamic ftrace: ");
132 
133 	/* enable tracing, and record the filter function */
134 	ftrace_enabled = 1;
135 	tracer_enabled = 1;
136 
137 	/* passed in by parameter to fool gcc from optimizing */
138 	func();
139 
140 	/* update the records */
141 	ret = ftrace_force_update();
142 	if (ret) {
143 		printk(KERN_CONT ".. ftraced failed .. ");
144 		return ret;
145 	}
146 
147 	/*
148 	 * Some archs *cough*PowerPC*cough* add charachters to the
149 	 * start of the function names. We simply put a '*' to
150 	 * accomodate them.
151 	 */
152 	func_name = "*" STR(DYN_FTRACE_TEST_NAME);
153 
154 	/* filter only on our function */
155 	ftrace_set_filter(func_name, strlen(func_name), 1);
156 
157 	/* enable tracing */
158 	tr->ctrl = 1;
159 	trace->init(tr);
160 	/* Sleep for a 1/10 of a second */
161 	msleep(100);
162 
163 	/* we should have nothing in the buffer */
164 	ret = trace_test_buffer(tr, &count);
165 	if (ret)
166 		goto out;
167 
168 	if (count) {
169 		ret = -1;
170 		printk(KERN_CONT ".. filter did not filter .. ");
171 		goto out;
172 	}
173 
174 	/* call our function again */
175 	func();
176 
177 	/* sleep again */
178 	msleep(100);
179 
180 	/* stop the tracing. */
181 	tr->ctrl = 0;
182 	trace->ctrl_update(tr);
183 	ftrace_enabled = 0;
184 
185 	/* check the trace buffer */
186 	ret = trace_test_buffer(tr, &count);
187 	trace->reset(tr);
188 
189 	/* we should only have one item */
190 	if (!ret && count != 1) {
191 		printk(KERN_CONT ".. filter failed count=%ld ..", count);
192 		ret = -1;
193 		goto out;
194 	}
195  out:
196 	ftrace_enabled = save_ftrace_enabled;
197 	tracer_enabled = save_tracer_enabled;
198 
199 	/* Enable tracing on all functions again */
200 	ftrace_set_filter(NULL, 0, 1);
201 
202 	return ret;
203 }
204 #else
205 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
206 #endif /* CONFIG_DYNAMIC_FTRACE */
207 /*
208  * Simple verification test of ftrace function tracer.
209  * Enable ftrace, sleep 1/10 second, and then read the trace
210  * buffer to see if all is in order.
211  */
212 int
213 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
214 {
215 	unsigned long count;
216 	int ret;
217 	int save_ftrace_enabled = ftrace_enabled;
218 	int save_tracer_enabled = tracer_enabled;
219 
220 	/* make sure msleep has been recorded */
221 	msleep(1);
222 
223 	/* force the recorded functions to be traced */
224 	ret = ftrace_force_update();
225 	if (ret) {
226 		printk(KERN_CONT ".. ftraced failed .. ");
227 		return ret;
228 	}
229 
230 	/* start the tracing */
231 	ftrace_enabled = 1;
232 	tracer_enabled = 1;
233 
234 	tr->ctrl = 1;
235 	trace->init(tr);
236 	/* Sleep for a 1/10 of a second */
237 	msleep(100);
238 	/* stop the tracing. */
239 	tr->ctrl = 0;
240 	trace->ctrl_update(tr);
241 	ftrace_enabled = 0;
242 
243 	/* check the trace buffer */
244 	ret = trace_test_buffer(tr, &count);
245 	trace->reset(tr);
246 
247 	if (!ret && !count) {
248 		printk(KERN_CONT ".. no entries found ..");
249 		ret = -1;
250 		goto out;
251 	}
252 
253 	ret = trace_selftest_startup_dynamic_tracing(trace, tr,
254 						     DYN_FTRACE_TEST_NAME);
255 
256  out:
257 	ftrace_enabled = save_ftrace_enabled;
258 	tracer_enabled = save_tracer_enabled;
259 
260 	/* kill ftrace totally if we failed */
261 	if (ret)
262 		ftrace_kill();
263 
264 	return ret;
265 }
266 #endif /* CONFIG_FTRACE */
267 
268 #ifdef CONFIG_IRQSOFF_TRACER
269 int
270 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
271 {
272 	unsigned long save_max = tracing_max_latency;
273 	unsigned long count;
274 	int ret;
275 
276 	/* start the tracing */
277 	tr->ctrl = 1;
278 	trace->init(tr);
279 	/* reset the max latency */
280 	tracing_max_latency = 0;
281 	/* disable interrupts for a bit */
282 	local_irq_disable();
283 	udelay(100);
284 	local_irq_enable();
285 	/* stop the tracing. */
286 	tr->ctrl = 0;
287 	trace->ctrl_update(tr);
288 	/* check both trace buffers */
289 	ret = trace_test_buffer(tr, NULL);
290 	if (!ret)
291 		ret = trace_test_buffer(&max_tr, &count);
292 	trace->reset(tr);
293 
294 	if (!ret && !count) {
295 		printk(KERN_CONT ".. no entries found ..");
296 		ret = -1;
297 	}
298 
299 	tracing_max_latency = save_max;
300 
301 	return ret;
302 }
303 #endif /* CONFIG_IRQSOFF_TRACER */
304 
305 #ifdef CONFIG_PREEMPT_TRACER
306 int
307 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
308 {
309 	unsigned long save_max = tracing_max_latency;
310 	unsigned long count;
311 	int ret;
312 
313 	/* start the tracing */
314 	tr->ctrl = 1;
315 	trace->init(tr);
316 	/* reset the max latency */
317 	tracing_max_latency = 0;
318 	/* disable preemption for a bit */
319 	preempt_disable();
320 	udelay(100);
321 	preempt_enable();
322 	/* stop the tracing. */
323 	tr->ctrl = 0;
324 	trace->ctrl_update(tr);
325 	/* check both trace buffers */
326 	ret = trace_test_buffer(tr, NULL);
327 	if (!ret)
328 		ret = trace_test_buffer(&max_tr, &count);
329 	trace->reset(tr);
330 
331 	if (!ret && !count) {
332 		printk(KERN_CONT ".. no entries found ..");
333 		ret = -1;
334 	}
335 
336 	tracing_max_latency = save_max;
337 
338 	return ret;
339 }
340 #endif /* CONFIG_PREEMPT_TRACER */
341 
342 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
343 int
344 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
345 {
346 	unsigned long save_max = tracing_max_latency;
347 	unsigned long count;
348 	int ret;
349 
350 	/* start the tracing */
351 	tr->ctrl = 1;
352 	trace->init(tr);
353 
354 	/* reset the max latency */
355 	tracing_max_latency = 0;
356 
357 	/* disable preemption and interrupts for a bit */
358 	preempt_disable();
359 	local_irq_disable();
360 	udelay(100);
361 	preempt_enable();
362 	/* reverse the order of preempt vs irqs */
363 	local_irq_enable();
364 
365 	/* stop the tracing. */
366 	tr->ctrl = 0;
367 	trace->ctrl_update(tr);
368 	/* check both trace buffers */
369 	ret = trace_test_buffer(tr, NULL);
370 	if (ret)
371 		goto out;
372 
373 	ret = trace_test_buffer(&max_tr, &count);
374 	if (ret)
375 		goto out;
376 
377 	if (!ret && !count) {
378 		printk(KERN_CONT ".. no entries found ..");
379 		ret = -1;
380 		goto out;
381 	}
382 
383 	/* do the test by disabling interrupts first this time */
384 	tracing_max_latency = 0;
385 	tr->ctrl = 1;
386 	trace->ctrl_update(tr);
387 	preempt_disable();
388 	local_irq_disable();
389 	udelay(100);
390 	preempt_enable();
391 	/* reverse the order of preempt vs irqs */
392 	local_irq_enable();
393 
394 	/* stop the tracing. */
395 	tr->ctrl = 0;
396 	trace->ctrl_update(tr);
397 	/* check both trace buffers */
398 	ret = trace_test_buffer(tr, NULL);
399 	if (ret)
400 		goto out;
401 
402 	ret = trace_test_buffer(&max_tr, &count);
403 
404 	if (!ret && !count) {
405 		printk(KERN_CONT ".. no entries found ..");
406 		ret = -1;
407 		goto out;
408 	}
409 
410  out:
411 	trace->reset(tr);
412 	tracing_max_latency = save_max;
413 
414 	return ret;
415 }
416 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
417 
418 #ifdef CONFIG_SCHED_TRACER
419 static int trace_wakeup_test_thread(void *data)
420 {
421 	/* Make this a RT thread, doesn't need to be too high */
422 	struct sched_param param = { .sched_priority = 5 };
423 	struct completion *x = data;
424 
425 	sched_setscheduler(current, SCHED_FIFO, &param);
426 
427 	/* Make it know we have a new prio */
428 	complete(x);
429 
430 	/* now go to sleep and let the test wake us up */
431 	set_current_state(TASK_INTERRUPTIBLE);
432 	schedule();
433 
434 	/* we are awake, now wait to disappear */
435 	while (!kthread_should_stop()) {
436 		/*
437 		 * This is an RT task, do short sleeps to let
438 		 * others run.
439 		 */
440 		msleep(100);
441 	}
442 
443 	return 0;
444 }
445 
446 int
447 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
448 {
449 	unsigned long save_max = tracing_max_latency;
450 	struct task_struct *p;
451 	struct completion isrt;
452 	unsigned long count;
453 	int ret;
454 
455 	init_completion(&isrt);
456 
457 	/* create a high prio thread */
458 	p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
459 	if (IS_ERR(p)) {
460 		printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
461 		return -1;
462 	}
463 
464 	/* make sure the thread is running at an RT prio */
465 	wait_for_completion(&isrt);
466 
467 	/* start the tracing */
468 	tr->ctrl = 1;
469 	trace->init(tr);
470 	/* reset the max latency */
471 	tracing_max_latency = 0;
472 
473 	/* sleep to let the RT thread sleep too */
474 	msleep(100);
475 
476 	/*
477 	 * Yes this is slightly racy. It is possible that for some
478 	 * strange reason that the RT thread we created, did not
479 	 * call schedule for 100ms after doing the completion,
480 	 * and we do a wakeup on a task that already is awake.
481 	 * But that is extremely unlikely, and the worst thing that
482 	 * happens in such a case, is that we disable tracing.
483 	 * Honestly, if this race does happen something is horrible
484 	 * wrong with the system.
485 	 */
486 
487 	wake_up_process(p);
488 
489 	/* stop the tracing. */
490 	tr->ctrl = 0;
491 	trace->ctrl_update(tr);
492 	/* check both trace buffers */
493 	ret = trace_test_buffer(tr, NULL);
494 	if (!ret)
495 		ret = trace_test_buffer(&max_tr, &count);
496 
497 
498 	trace->reset(tr);
499 
500 	tracing_max_latency = save_max;
501 
502 	/* kill the thread */
503 	kthread_stop(p);
504 
505 	if (!ret && !count) {
506 		printk(KERN_CONT ".. no entries found ..");
507 		ret = -1;
508 	}
509 
510 	return ret;
511 }
512 #endif /* CONFIG_SCHED_TRACER */
513 
514 #ifdef CONFIG_CONTEXT_SWITCH_TRACER
515 int
516 trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
517 {
518 	unsigned long count;
519 	int ret;
520 
521 	/* start the tracing */
522 	tr->ctrl = 1;
523 	trace->init(tr);
524 	/* Sleep for a 1/10 of a second */
525 	msleep(100);
526 	/* stop the tracing. */
527 	tr->ctrl = 0;
528 	trace->ctrl_update(tr);
529 	/* check the trace buffer */
530 	ret = trace_test_buffer(tr, &count);
531 	trace->reset(tr);
532 
533 	if (!ret && !count) {
534 		printk(KERN_CONT ".. no entries found ..");
535 		ret = -1;
536 	}
537 
538 	return ret;
539 }
540 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
541 
542 #ifdef CONFIG_SYSPROF_TRACER
543 int
544 trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
545 {
546 	unsigned long count;
547 	int ret;
548 
549 	/* start the tracing */
550 	tr->ctrl = 1;
551 	trace->init(tr);
552 	/* Sleep for a 1/10 of a second */
553 	msleep(100);
554 	/* stop the tracing. */
555 	tr->ctrl = 0;
556 	trace->ctrl_update(tr);
557 	/* check the trace buffer */
558 	ret = trace_test_buffer(tr, &count);
559 	trace->reset(tr);
560 
561 	return ret;
562 }
563 #endif /* CONFIG_SYSPROF_TRACER */
564