1 /* Include in trace.c */ 2 3 #include <linux/stringify.h> 4 #include <linux/kthread.h> 5 #include <linux/delay.h> 6 #include <linux/slab.h> 7 8 static inline int trace_valid_entry(struct trace_entry *entry) 9 { 10 switch (entry->type) { 11 case TRACE_FN: 12 case TRACE_CTX: 13 case TRACE_WAKE: 14 case TRACE_STACK: 15 case TRACE_PRINT: 16 case TRACE_BRANCH: 17 case TRACE_GRAPH_ENT: 18 case TRACE_GRAPH_RET: 19 return 1; 20 } 21 return 0; 22 } 23 24 static int trace_test_buffer_cpu(struct trace_buffer *buf, int cpu) 25 { 26 struct ring_buffer_event *event; 27 struct trace_entry *entry; 28 unsigned int loops = 0; 29 30 while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) { 31 entry = ring_buffer_event_data(event); 32 33 /* 34 * The ring buffer is a size of trace_buf_size, if 35 * we loop more than the size, there's something wrong 36 * with the ring buffer. 37 */ 38 if (loops++ > trace_buf_size) { 39 printk(KERN_CONT ".. bad ring buffer "); 40 goto failed; 41 } 42 if (!trace_valid_entry(entry)) { 43 printk(KERN_CONT ".. invalid entry %d ", 44 entry->type); 45 goto failed; 46 } 47 } 48 return 0; 49 50 failed: 51 /* disable tracing */ 52 tracing_disabled = 1; 53 printk(KERN_CONT ".. corrupted trace buffer .. "); 54 return -1; 55 } 56 57 /* 58 * Test the trace buffer to see if all the elements 59 * are still sane. 60 */ 61 static int trace_test_buffer(struct trace_buffer *buf, unsigned long *count) 62 { 63 unsigned long flags, cnt = 0; 64 int cpu, ret = 0; 65 66 /* Don't allow flipping of max traces now */ 67 local_irq_save(flags); 68 arch_spin_lock(&ftrace_max_lock); 69 70 cnt = ring_buffer_entries(buf->buffer); 71 72 /* 73 * The trace_test_buffer_cpu runs a while loop to consume all data. 74 * If the calling tracer is broken, and is constantly filling 75 * the buffer, this will run forever, and hard lock the box. 76 * We disable the ring buffer while we do this test to prevent 77 * a hard lock up. 78 */ 79 tracing_off(); 80 for_each_possible_cpu(cpu) { 81 ret = trace_test_buffer_cpu(buf, cpu); 82 if (ret) 83 break; 84 } 85 tracing_on(); 86 arch_spin_unlock(&ftrace_max_lock); 87 local_irq_restore(flags); 88 89 if (count) 90 *count = cnt; 91 92 return ret; 93 } 94 95 static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret) 96 { 97 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n", 98 trace->name, init_ret); 99 } 100 #ifdef CONFIG_FUNCTION_TRACER 101 102 #ifdef CONFIG_DYNAMIC_FTRACE 103 104 static int trace_selftest_test_probe1_cnt; 105 static void trace_selftest_test_probe1_func(unsigned long ip, 106 unsigned long pip, 107 struct ftrace_ops *op, 108 struct pt_regs *pt_regs) 109 { 110 trace_selftest_test_probe1_cnt++; 111 } 112 113 static int trace_selftest_test_probe2_cnt; 114 static void trace_selftest_test_probe2_func(unsigned long ip, 115 unsigned long pip, 116 struct ftrace_ops *op, 117 struct pt_regs *pt_regs) 118 { 119 trace_selftest_test_probe2_cnt++; 120 } 121 122 static int trace_selftest_test_probe3_cnt; 123 static void trace_selftest_test_probe3_func(unsigned long ip, 124 unsigned long pip, 125 struct ftrace_ops *op, 126 struct pt_regs *pt_regs) 127 { 128 trace_selftest_test_probe3_cnt++; 129 } 130 131 static int trace_selftest_test_global_cnt; 132 static void trace_selftest_test_global_func(unsigned long ip, 133 unsigned long pip, 134 struct ftrace_ops *op, 135 struct pt_regs *pt_regs) 136 { 137 trace_selftest_test_global_cnt++; 138 } 139 140 static int trace_selftest_test_dyn_cnt; 141 static void trace_selftest_test_dyn_func(unsigned long ip, 142 unsigned long pip, 143 struct ftrace_ops *op, 144 struct pt_regs *pt_regs) 145 { 146 trace_selftest_test_dyn_cnt++; 147 } 148 149 static struct ftrace_ops test_probe1 = { 150 .func = trace_selftest_test_probe1_func, 151 .flags = FTRACE_OPS_FL_RECURSION_SAFE, 152 }; 153 154 static struct ftrace_ops test_probe2 = { 155 .func = trace_selftest_test_probe2_func, 156 .flags = FTRACE_OPS_FL_RECURSION_SAFE, 157 }; 158 159 static struct ftrace_ops test_probe3 = { 160 .func = trace_selftest_test_probe3_func, 161 .flags = FTRACE_OPS_FL_RECURSION_SAFE, 162 }; 163 164 static struct ftrace_ops test_global = { 165 .func = trace_selftest_test_global_func, 166 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE, 167 }; 168 169 static void print_counts(void) 170 { 171 printk("(%d %d %d %d %d) ", 172 trace_selftest_test_probe1_cnt, 173 trace_selftest_test_probe2_cnt, 174 trace_selftest_test_probe3_cnt, 175 trace_selftest_test_global_cnt, 176 trace_selftest_test_dyn_cnt); 177 } 178 179 static void reset_counts(void) 180 { 181 trace_selftest_test_probe1_cnt = 0; 182 trace_selftest_test_probe2_cnt = 0; 183 trace_selftest_test_probe3_cnt = 0; 184 trace_selftest_test_global_cnt = 0; 185 trace_selftest_test_dyn_cnt = 0; 186 } 187 188 static int trace_selftest_ops(int cnt) 189 { 190 int save_ftrace_enabled = ftrace_enabled; 191 struct ftrace_ops *dyn_ops; 192 char *func1_name; 193 char *func2_name; 194 int len1; 195 int len2; 196 int ret = -1; 197 198 printk(KERN_CONT "PASSED\n"); 199 pr_info("Testing dynamic ftrace ops #%d: ", cnt); 200 201 ftrace_enabled = 1; 202 reset_counts(); 203 204 /* Handle PPC64 '.' name */ 205 func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME); 206 func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2); 207 len1 = strlen(func1_name); 208 len2 = strlen(func2_name); 209 210 /* 211 * Probe 1 will trace function 1. 212 * Probe 2 will trace function 2. 213 * Probe 3 will trace functions 1 and 2. 214 */ 215 ftrace_set_filter(&test_probe1, func1_name, len1, 1); 216 ftrace_set_filter(&test_probe2, func2_name, len2, 1); 217 ftrace_set_filter(&test_probe3, func1_name, len1, 1); 218 ftrace_set_filter(&test_probe3, func2_name, len2, 0); 219 220 register_ftrace_function(&test_probe1); 221 register_ftrace_function(&test_probe2); 222 register_ftrace_function(&test_probe3); 223 register_ftrace_function(&test_global); 224 225 DYN_FTRACE_TEST_NAME(); 226 227 print_counts(); 228 229 if (trace_selftest_test_probe1_cnt != 1) 230 goto out; 231 if (trace_selftest_test_probe2_cnt != 0) 232 goto out; 233 if (trace_selftest_test_probe3_cnt != 1) 234 goto out; 235 if (trace_selftest_test_global_cnt == 0) 236 goto out; 237 238 DYN_FTRACE_TEST_NAME2(); 239 240 print_counts(); 241 242 if (trace_selftest_test_probe1_cnt != 1) 243 goto out; 244 if (trace_selftest_test_probe2_cnt != 1) 245 goto out; 246 if (trace_selftest_test_probe3_cnt != 2) 247 goto out; 248 249 /* Add a dynamic probe */ 250 dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL); 251 if (!dyn_ops) { 252 printk("MEMORY ERROR "); 253 goto out; 254 } 255 256 dyn_ops->func = trace_selftest_test_dyn_func; 257 258 register_ftrace_function(dyn_ops); 259 260 trace_selftest_test_global_cnt = 0; 261 262 DYN_FTRACE_TEST_NAME(); 263 264 print_counts(); 265 266 if (trace_selftest_test_probe1_cnt != 2) 267 goto out_free; 268 if (trace_selftest_test_probe2_cnt != 1) 269 goto out_free; 270 if (trace_selftest_test_probe3_cnt != 3) 271 goto out_free; 272 if (trace_selftest_test_global_cnt == 0) 273 goto out; 274 if (trace_selftest_test_dyn_cnt == 0) 275 goto out_free; 276 277 DYN_FTRACE_TEST_NAME2(); 278 279 print_counts(); 280 281 if (trace_selftest_test_probe1_cnt != 2) 282 goto out_free; 283 if (trace_selftest_test_probe2_cnt != 2) 284 goto out_free; 285 if (trace_selftest_test_probe3_cnt != 4) 286 goto out_free; 287 288 ret = 0; 289 out_free: 290 unregister_ftrace_function(dyn_ops); 291 kfree(dyn_ops); 292 293 out: 294 /* Purposely unregister in the same order */ 295 unregister_ftrace_function(&test_probe1); 296 unregister_ftrace_function(&test_probe2); 297 unregister_ftrace_function(&test_probe3); 298 unregister_ftrace_function(&test_global); 299 300 /* Make sure everything is off */ 301 reset_counts(); 302 DYN_FTRACE_TEST_NAME(); 303 DYN_FTRACE_TEST_NAME(); 304 305 if (trace_selftest_test_probe1_cnt || 306 trace_selftest_test_probe2_cnt || 307 trace_selftest_test_probe3_cnt || 308 trace_selftest_test_global_cnt || 309 trace_selftest_test_dyn_cnt) 310 ret = -1; 311 312 ftrace_enabled = save_ftrace_enabled; 313 314 return ret; 315 } 316 317 /* Test dynamic code modification and ftrace filters */ 318 int trace_selftest_startup_dynamic_tracing(struct tracer *trace, 319 struct trace_array *tr, 320 int (*func)(void)) 321 { 322 int save_ftrace_enabled = ftrace_enabled; 323 unsigned long count; 324 char *func_name; 325 int ret; 326 327 /* The ftrace test PASSED */ 328 printk(KERN_CONT "PASSED\n"); 329 pr_info("Testing dynamic ftrace: "); 330 331 /* enable tracing, and record the filter function */ 332 ftrace_enabled = 1; 333 334 /* passed in by parameter to fool gcc from optimizing */ 335 func(); 336 337 /* 338 * Some archs *cough*PowerPC*cough* add characters to the 339 * start of the function names. We simply put a '*' to 340 * accommodate them. 341 */ 342 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); 343 344 /* filter only on our function */ 345 ftrace_set_global_filter(func_name, strlen(func_name), 1); 346 347 /* enable tracing */ 348 ret = tracer_init(trace, tr); 349 if (ret) { 350 warn_failed_init_tracer(trace, ret); 351 goto out; 352 } 353 354 /* Sleep for a 1/10 of a second */ 355 msleep(100); 356 357 /* we should have nothing in the buffer */ 358 ret = trace_test_buffer(&tr->trace_buffer, &count); 359 if (ret) 360 goto out; 361 362 if (count) { 363 ret = -1; 364 printk(KERN_CONT ".. filter did not filter .. "); 365 goto out; 366 } 367 368 /* call our function again */ 369 func(); 370 371 /* sleep again */ 372 msleep(100); 373 374 /* stop the tracing. */ 375 tracing_stop(); 376 ftrace_enabled = 0; 377 378 /* check the trace buffer */ 379 ret = trace_test_buffer(&tr->trace_buffer, &count); 380 tracing_start(); 381 382 /* we should only have one item */ 383 if (!ret && count != 1) { 384 trace->reset(tr); 385 printk(KERN_CONT ".. filter failed count=%ld ..", count); 386 ret = -1; 387 goto out; 388 } 389 390 /* Test the ops with global tracing running */ 391 ret = trace_selftest_ops(1); 392 trace->reset(tr); 393 394 out: 395 ftrace_enabled = save_ftrace_enabled; 396 397 /* Enable tracing on all functions again */ 398 ftrace_set_global_filter(NULL, 0, 1); 399 400 /* Test the ops with global tracing off */ 401 if (!ret) 402 ret = trace_selftest_ops(2); 403 404 return ret; 405 } 406 407 static int trace_selftest_recursion_cnt; 408 static void trace_selftest_test_recursion_func(unsigned long ip, 409 unsigned long pip, 410 struct ftrace_ops *op, 411 struct pt_regs *pt_regs) 412 { 413 /* 414 * This function is registered without the recursion safe flag. 415 * The ftrace infrastructure should provide the recursion 416 * protection. If not, this will crash the kernel! 417 */ 418 if (trace_selftest_recursion_cnt++ > 10) 419 return; 420 DYN_FTRACE_TEST_NAME(); 421 } 422 423 static void trace_selftest_test_recursion_safe_func(unsigned long ip, 424 unsigned long pip, 425 struct ftrace_ops *op, 426 struct pt_regs *pt_regs) 427 { 428 /* 429 * We said we would provide our own recursion. By calling 430 * this function again, we should recurse back into this function 431 * and count again. But this only happens if the arch supports 432 * all of ftrace features and nothing else is using the function 433 * tracing utility. 434 */ 435 if (trace_selftest_recursion_cnt++) 436 return; 437 DYN_FTRACE_TEST_NAME(); 438 } 439 440 static struct ftrace_ops test_rec_probe = { 441 .func = trace_selftest_test_recursion_func, 442 }; 443 444 static struct ftrace_ops test_recsafe_probe = { 445 .func = trace_selftest_test_recursion_safe_func, 446 .flags = FTRACE_OPS_FL_RECURSION_SAFE, 447 }; 448 449 static int 450 trace_selftest_function_recursion(void) 451 { 452 int save_ftrace_enabled = ftrace_enabled; 453 char *func_name; 454 int len; 455 int ret; 456 457 /* The previous test PASSED */ 458 pr_cont("PASSED\n"); 459 pr_info("Testing ftrace recursion: "); 460 461 462 /* enable tracing, and record the filter function */ 463 ftrace_enabled = 1; 464 465 /* Handle PPC64 '.' name */ 466 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); 467 len = strlen(func_name); 468 469 ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1); 470 if (ret) { 471 pr_cont("*Could not set filter* "); 472 goto out; 473 } 474 475 ret = register_ftrace_function(&test_rec_probe); 476 if (ret) { 477 pr_cont("*could not register callback* "); 478 goto out; 479 } 480 481 DYN_FTRACE_TEST_NAME(); 482 483 unregister_ftrace_function(&test_rec_probe); 484 485 ret = -1; 486 if (trace_selftest_recursion_cnt != 1) { 487 pr_cont("*callback not called once (%d)* ", 488 trace_selftest_recursion_cnt); 489 goto out; 490 } 491 492 trace_selftest_recursion_cnt = 1; 493 494 pr_cont("PASSED\n"); 495 pr_info("Testing ftrace recursion safe: "); 496 497 ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1); 498 if (ret) { 499 pr_cont("*Could not set filter* "); 500 goto out; 501 } 502 503 ret = register_ftrace_function(&test_recsafe_probe); 504 if (ret) { 505 pr_cont("*could not register callback* "); 506 goto out; 507 } 508 509 DYN_FTRACE_TEST_NAME(); 510 511 unregister_ftrace_function(&test_recsafe_probe); 512 513 ret = -1; 514 if (trace_selftest_recursion_cnt != 2) { 515 pr_cont("*callback not called expected 2 times (%d)* ", 516 trace_selftest_recursion_cnt); 517 goto out; 518 } 519 520 ret = 0; 521 out: 522 ftrace_enabled = save_ftrace_enabled; 523 524 return ret; 525 } 526 #else 527 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; }) 528 # define trace_selftest_function_recursion() ({ 0; }) 529 #endif /* CONFIG_DYNAMIC_FTRACE */ 530 531 static enum { 532 TRACE_SELFTEST_REGS_START, 533 TRACE_SELFTEST_REGS_FOUND, 534 TRACE_SELFTEST_REGS_NOT_FOUND, 535 } trace_selftest_regs_stat; 536 537 static void trace_selftest_test_regs_func(unsigned long ip, 538 unsigned long pip, 539 struct ftrace_ops *op, 540 struct pt_regs *pt_regs) 541 { 542 if (pt_regs) 543 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND; 544 else 545 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND; 546 } 547 548 static struct ftrace_ops test_regs_probe = { 549 .func = trace_selftest_test_regs_func, 550 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_SAVE_REGS, 551 }; 552 553 static int 554 trace_selftest_function_regs(void) 555 { 556 int save_ftrace_enabled = ftrace_enabled; 557 char *func_name; 558 int len; 559 int ret; 560 int supported = 0; 561 562 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 563 supported = 1; 564 #endif 565 566 /* The previous test PASSED */ 567 pr_cont("PASSED\n"); 568 pr_info("Testing ftrace regs%s: ", 569 !supported ? "(no arch support)" : ""); 570 571 /* enable tracing, and record the filter function */ 572 ftrace_enabled = 1; 573 574 /* Handle PPC64 '.' name */ 575 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); 576 len = strlen(func_name); 577 578 ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1); 579 /* 580 * If DYNAMIC_FTRACE is not set, then we just trace all functions. 581 * This test really doesn't care. 582 */ 583 if (ret && ret != -ENODEV) { 584 pr_cont("*Could not set filter* "); 585 goto out; 586 } 587 588 ret = register_ftrace_function(&test_regs_probe); 589 /* 590 * Now if the arch does not support passing regs, then this should 591 * have failed. 592 */ 593 if (!supported) { 594 if (!ret) { 595 pr_cont("*registered save-regs without arch support* "); 596 goto out; 597 } 598 test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED; 599 ret = register_ftrace_function(&test_regs_probe); 600 } 601 if (ret) { 602 pr_cont("*could not register callback* "); 603 goto out; 604 } 605 606 607 DYN_FTRACE_TEST_NAME(); 608 609 unregister_ftrace_function(&test_regs_probe); 610 611 ret = -1; 612 613 switch (trace_selftest_regs_stat) { 614 case TRACE_SELFTEST_REGS_START: 615 pr_cont("*callback never called* "); 616 goto out; 617 618 case TRACE_SELFTEST_REGS_FOUND: 619 if (supported) 620 break; 621 pr_cont("*callback received regs without arch support* "); 622 goto out; 623 624 case TRACE_SELFTEST_REGS_NOT_FOUND: 625 if (!supported) 626 break; 627 pr_cont("*callback received NULL regs* "); 628 goto out; 629 } 630 631 ret = 0; 632 out: 633 ftrace_enabled = save_ftrace_enabled; 634 635 return ret; 636 } 637 638 /* 639 * Simple verification test of ftrace function tracer. 640 * Enable ftrace, sleep 1/10 second, and then read the trace 641 * buffer to see if all is in order. 642 */ 643 __init int 644 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) 645 { 646 int save_ftrace_enabled = ftrace_enabled; 647 unsigned long count; 648 int ret; 649 650 #ifdef CONFIG_DYNAMIC_FTRACE 651 if (ftrace_filter_param) { 652 printk(KERN_CONT " ... kernel command line filter set: force PASS ... "); 653 return 0; 654 } 655 #endif 656 657 /* make sure msleep has been recorded */ 658 msleep(1); 659 660 /* start the tracing */ 661 ftrace_enabled = 1; 662 663 ret = tracer_init(trace, tr); 664 if (ret) { 665 warn_failed_init_tracer(trace, ret); 666 goto out; 667 } 668 669 /* Sleep for a 1/10 of a second */ 670 msleep(100); 671 /* stop the tracing. */ 672 tracing_stop(); 673 ftrace_enabled = 0; 674 675 /* check the trace buffer */ 676 ret = trace_test_buffer(&tr->trace_buffer, &count); 677 trace->reset(tr); 678 tracing_start(); 679 680 if (!ret && !count) { 681 printk(KERN_CONT ".. no entries found .."); 682 ret = -1; 683 goto out; 684 } 685 686 ret = trace_selftest_startup_dynamic_tracing(trace, tr, 687 DYN_FTRACE_TEST_NAME); 688 if (ret) 689 goto out; 690 691 ret = trace_selftest_function_recursion(); 692 if (ret) 693 goto out; 694 695 ret = trace_selftest_function_regs(); 696 out: 697 ftrace_enabled = save_ftrace_enabled; 698 699 /* kill ftrace totally if we failed */ 700 if (ret) 701 ftrace_kill(); 702 703 return ret; 704 } 705 #endif /* CONFIG_FUNCTION_TRACER */ 706 707 708 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 709 710 /* Maximum number of functions to trace before diagnosing a hang */ 711 #define GRAPH_MAX_FUNC_TEST 100000000 712 713 static unsigned int graph_hang_thresh; 714 715 /* Wrap the real function entry probe to avoid possible hanging */ 716 static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace) 717 { 718 /* This is harmlessly racy, we want to approximately detect a hang */ 719 if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) { 720 ftrace_graph_stop(); 721 printk(KERN_WARNING "BUG: Function graph tracer hang!\n"); 722 if (ftrace_dump_on_oops) { 723 ftrace_dump(DUMP_ALL); 724 /* ftrace_dump() disables tracing */ 725 tracing_on(); 726 } 727 return 0; 728 } 729 730 return trace_graph_entry(trace); 731 } 732 733 /* 734 * Pretty much the same than for the function tracer from which the selftest 735 * has been borrowed. 736 */ 737 __init int 738 trace_selftest_startup_function_graph(struct tracer *trace, 739 struct trace_array *tr) 740 { 741 int ret; 742 unsigned long count; 743 744 #ifdef CONFIG_DYNAMIC_FTRACE 745 if (ftrace_filter_param) { 746 printk(KERN_CONT " ... kernel command line filter set: force PASS ... "); 747 return 0; 748 } 749 #endif 750 751 /* 752 * Simulate the init() callback but we attach a watchdog callback 753 * to detect and recover from possible hangs 754 */ 755 tracing_reset_online_cpus(&tr->trace_buffer); 756 set_graph_array(tr); 757 ret = register_ftrace_graph(&trace_graph_return, 758 &trace_graph_entry_watchdog); 759 if (ret) { 760 warn_failed_init_tracer(trace, ret); 761 goto out; 762 } 763 tracing_start_cmdline_record(); 764 765 /* Sleep for a 1/10 of a second */ 766 msleep(100); 767 768 /* Have we just recovered from a hang? */ 769 if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) { 770 tracing_selftest_disabled = true; 771 ret = -1; 772 goto out; 773 } 774 775 tracing_stop(); 776 777 /* check the trace buffer */ 778 ret = trace_test_buffer(&tr->trace_buffer, &count); 779 780 trace->reset(tr); 781 tracing_start(); 782 783 if (!ret && !count) { 784 printk(KERN_CONT ".. no entries found .."); 785 ret = -1; 786 goto out; 787 } 788 789 /* Don't test dynamic tracing, the function tracer already did */ 790 791 out: 792 /* Stop it if we failed */ 793 if (ret) 794 ftrace_graph_stop(); 795 796 return ret; 797 } 798 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 799 800 801 #ifdef CONFIG_IRQSOFF_TRACER 802 int 803 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) 804 { 805 unsigned long save_max = tracing_max_latency; 806 unsigned long count; 807 int ret; 808 809 /* start the tracing */ 810 ret = tracer_init(trace, tr); 811 if (ret) { 812 warn_failed_init_tracer(trace, ret); 813 return ret; 814 } 815 816 /* reset the max latency */ 817 tracing_max_latency = 0; 818 /* disable interrupts for a bit */ 819 local_irq_disable(); 820 udelay(100); 821 local_irq_enable(); 822 823 /* 824 * Stop the tracer to avoid a warning subsequent 825 * to buffer flipping failure because tracing_stop() 826 * disables the tr and max buffers, making flipping impossible 827 * in case of parallels max irqs off latencies. 828 */ 829 trace->stop(tr); 830 /* stop the tracing. */ 831 tracing_stop(); 832 /* check both trace buffers */ 833 ret = trace_test_buffer(&tr->trace_buffer, NULL); 834 if (!ret) 835 ret = trace_test_buffer(&tr->max_buffer, &count); 836 trace->reset(tr); 837 tracing_start(); 838 839 if (!ret && !count) { 840 printk(KERN_CONT ".. no entries found .."); 841 ret = -1; 842 } 843 844 tracing_max_latency = save_max; 845 846 return ret; 847 } 848 #endif /* CONFIG_IRQSOFF_TRACER */ 849 850 #ifdef CONFIG_PREEMPT_TRACER 851 int 852 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) 853 { 854 unsigned long save_max = tracing_max_latency; 855 unsigned long count; 856 int ret; 857 858 /* 859 * Now that the big kernel lock is no longer preemptable, 860 * and this is called with the BKL held, it will always 861 * fail. If preemption is already disabled, simply 862 * pass the test. When the BKL is removed, or becomes 863 * preemptible again, we will once again test this, 864 * so keep it in. 865 */ 866 if (preempt_count()) { 867 printk(KERN_CONT "can not test ... force "); 868 return 0; 869 } 870 871 /* start the tracing */ 872 ret = tracer_init(trace, tr); 873 if (ret) { 874 warn_failed_init_tracer(trace, ret); 875 return ret; 876 } 877 878 /* reset the max latency */ 879 tracing_max_latency = 0; 880 /* disable preemption for a bit */ 881 preempt_disable(); 882 udelay(100); 883 preempt_enable(); 884 885 /* 886 * Stop the tracer to avoid a warning subsequent 887 * to buffer flipping failure because tracing_stop() 888 * disables the tr and max buffers, making flipping impossible 889 * in case of parallels max preempt off latencies. 890 */ 891 trace->stop(tr); 892 /* stop the tracing. */ 893 tracing_stop(); 894 /* check both trace buffers */ 895 ret = trace_test_buffer(&tr->trace_buffer, NULL); 896 if (!ret) 897 ret = trace_test_buffer(&tr->max_buffer, &count); 898 trace->reset(tr); 899 tracing_start(); 900 901 if (!ret && !count) { 902 printk(KERN_CONT ".. no entries found .."); 903 ret = -1; 904 } 905 906 tracing_max_latency = save_max; 907 908 return ret; 909 } 910 #endif /* CONFIG_PREEMPT_TRACER */ 911 912 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER) 913 int 914 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr) 915 { 916 unsigned long save_max = tracing_max_latency; 917 unsigned long count; 918 int ret; 919 920 /* 921 * Now that the big kernel lock is no longer preemptable, 922 * and this is called with the BKL held, it will always 923 * fail. If preemption is already disabled, simply 924 * pass the test. When the BKL is removed, or becomes 925 * preemptible again, we will once again test this, 926 * so keep it in. 927 */ 928 if (preempt_count()) { 929 printk(KERN_CONT "can not test ... force "); 930 return 0; 931 } 932 933 /* start the tracing */ 934 ret = tracer_init(trace, tr); 935 if (ret) { 936 warn_failed_init_tracer(trace, ret); 937 goto out_no_start; 938 } 939 940 /* reset the max latency */ 941 tracing_max_latency = 0; 942 943 /* disable preemption and interrupts for a bit */ 944 preempt_disable(); 945 local_irq_disable(); 946 udelay(100); 947 preempt_enable(); 948 /* reverse the order of preempt vs irqs */ 949 local_irq_enable(); 950 951 /* 952 * Stop the tracer to avoid a warning subsequent 953 * to buffer flipping failure because tracing_stop() 954 * disables the tr and max buffers, making flipping impossible 955 * in case of parallels max irqs/preempt off latencies. 956 */ 957 trace->stop(tr); 958 /* stop the tracing. */ 959 tracing_stop(); 960 /* check both trace buffers */ 961 ret = trace_test_buffer(&tr->trace_buffer, NULL); 962 if (ret) 963 goto out; 964 965 ret = trace_test_buffer(&tr->max_buffer, &count); 966 if (ret) 967 goto out; 968 969 if (!ret && !count) { 970 printk(KERN_CONT ".. no entries found .."); 971 ret = -1; 972 goto out; 973 } 974 975 /* do the test by disabling interrupts first this time */ 976 tracing_max_latency = 0; 977 tracing_start(); 978 trace->start(tr); 979 980 preempt_disable(); 981 local_irq_disable(); 982 udelay(100); 983 preempt_enable(); 984 /* reverse the order of preempt vs irqs */ 985 local_irq_enable(); 986 987 trace->stop(tr); 988 /* stop the tracing. */ 989 tracing_stop(); 990 /* check both trace buffers */ 991 ret = trace_test_buffer(&tr->trace_buffer, NULL); 992 if (ret) 993 goto out; 994 995 ret = trace_test_buffer(&tr->max_buffer, &count); 996 997 if (!ret && !count) { 998 printk(KERN_CONT ".. no entries found .."); 999 ret = -1; 1000 goto out; 1001 } 1002 1003 out: 1004 tracing_start(); 1005 out_no_start: 1006 trace->reset(tr); 1007 tracing_max_latency = save_max; 1008 1009 return ret; 1010 } 1011 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */ 1012 1013 #ifdef CONFIG_NOP_TRACER 1014 int 1015 trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr) 1016 { 1017 /* What could possibly go wrong? */ 1018 return 0; 1019 } 1020 #endif 1021 1022 #ifdef CONFIG_SCHED_TRACER 1023 static int trace_wakeup_test_thread(void *data) 1024 { 1025 /* Make this a RT thread, doesn't need to be too high */ 1026 static const struct sched_param param = { .sched_priority = 5 }; 1027 struct completion *x = data; 1028 1029 sched_setscheduler(current, SCHED_FIFO, ¶m); 1030 1031 /* Make it know we have a new prio */ 1032 complete(x); 1033 1034 /* now go to sleep and let the test wake us up */ 1035 set_current_state(TASK_INTERRUPTIBLE); 1036 schedule(); 1037 1038 complete(x); 1039 1040 /* we are awake, now wait to disappear */ 1041 while (!kthread_should_stop()) { 1042 /* 1043 * This is an RT task, do short sleeps to let 1044 * others run. 1045 */ 1046 msleep(100); 1047 } 1048 1049 return 0; 1050 } 1051 1052 int 1053 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) 1054 { 1055 unsigned long save_max = tracing_max_latency; 1056 struct task_struct *p; 1057 struct completion isrt; 1058 unsigned long count; 1059 int ret; 1060 1061 init_completion(&isrt); 1062 1063 /* create a high prio thread */ 1064 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test"); 1065 if (IS_ERR(p)) { 1066 printk(KERN_CONT "Failed to create ftrace wakeup test thread "); 1067 return -1; 1068 } 1069 1070 /* make sure the thread is running at an RT prio */ 1071 wait_for_completion(&isrt); 1072 1073 /* start the tracing */ 1074 ret = tracer_init(trace, tr); 1075 if (ret) { 1076 warn_failed_init_tracer(trace, ret); 1077 return ret; 1078 } 1079 1080 /* reset the max latency */ 1081 tracing_max_latency = 0; 1082 1083 while (p->on_rq) { 1084 /* 1085 * Sleep to make sure the RT thread is asleep too. 1086 * On virtual machines we can't rely on timings, 1087 * but we want to make sure this test still works. 1088 */ 1089 msleep(100); 1090 } 1091 1092 init_completion(&isrt); 1093 1094 wake_up_process(p); 1095 1096 /* Wait for the task to wake up */ 1097 wait_for_completion(&isrt); 1098 1099 /* stop the tracing. */ 1100 tracing_stop(); 1101 /* check both trace buffers */ 1102 ret = trace_test_buffer(&tr->trace_buffer, NULL); 1103 printk("ret = %d\n", ret); 1104 if (!ret) 1105 ret = trace_test_buffer(&tr->max_buffer, &count); 1106 1107 1108 trace->reset(tr); 1109 tracing_start(); 1110 1111 tracing_max_latency = save_max; 1112 1113 /* kill the thread */ 1114 kthread_stop(p); 1115 1116 if (!ret && !count) { 1117 printk(KERN_CONT ".. no entries found .."); 1118 ret = -1; 1119 } 1120 1121 return ret; 1122 } 1123 #endif /* CONFIG_SCHED_TRACER */ 1124 1125 #ifdef CONFIG_CONTEXT_SWITCH_TRACER 1126 int 1127 trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr) 1128 { 1129 unsigned long count; 1130 int ret; 1131 1132 /* start the tracing */ 1133 ret = tracer_init(trace, tr); 1134 if (ret) { 1135 warn_failed_init_tracer(trace, ret); 1136 return ret; 1137 } 1138 1139 /* Sleep for a 1/10 of a second */ 1140 msleep(100); 1141 /* stop the tracing. */ 1142 tracing_stop(); 1143 /* check the trace buffer */ 1144 ret = trace_test_buffer(&tr->trace_buffer, &count); 1145 trace->reset(tr); 1146 tracing_start(); 1147 1148 if (!ret && !count) { 1149 printk(KERN_CONT ".. no entries found .."); 1150 ret = -1; 1151 } 1152 1153 return ret; 1154 } 1155 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ 1156 1157 #ifdef CONFIG_BRANCH_TRACER 1158 int 1159 trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) 1160 { 1161 unsigned long count; 1162 int ret; 1163 1164 /* start the tracing */ 1165 ret = tracer_init(trace, tr); 1166 if (ret) { 1167 warn_failed_init_tracer(trace, ret); 1168 return ret; 1169 } 1170 1171 /* Sleep for a 1/10 of a second */ 1172 msleep(100); 1173 /* stop the tracing. */ 1174 tracing_stop(); 1175 /* check the trace buffer */ 1176 ret = trace_test_buffer(&tr->trace_buffer, &count); 1177 trace->reset(tr); 1178 tracing_start(); 1179 1180 if (!ret && !count) { 1181 printk(KERN_CONT ".. no entries found .."); 1182 ret = -1; 1183 } 1184 1185 return ret; 1186 } 1187 #endif /* CONFIG_BRANCH_TRACER */ 1188 1189