1 /* Include in trace.c */ 2 3 #include <linux/stringify.h> 4 #include <linux/kthread.h> 5 #include <linux/delay.h> 6 #include <linux/slab.h> 7 8 static inline int trace_valid_entry(struct trace_entry *entry) 9 { 10 switch (entry->type) { 11 case TRACE_FN: 12 case TRACE_CTX: 13 case TRACE_WAKE: 14 case TRACE_STACK: 15 case TRACE_PRINT: 16 case TRACE_BRANCH: 17 case TRACE_GRAPH_ENT: 18 case TRACE_GRAPH_RET: 19 return 1; 20 } 21 return 0; 22 } 23 24 static int trace_test_buffer_cpu(struct trace_buffer *buf, int cpu) 25 { 26 struct ring_buffer_event *event; 27 struct trace_entry *entry; 28 unsigned int loops = 0; 29 30 while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) { 31 entry = ring_buffer_event_data(event); 32 33 /* 34 * The ring buffer is a size of trace_buf_size, if 35 * we loop more than the size, there's something wrong 36 * with the ring buffer. 37 */ 38 if (loops++ > trace_buf_size) { 39 printk(KERN_CONT ".. bad ring buffer "); 40 goto failed; 41 } 42 if (!trace_valid_entry(entry)) { 43 printk(KERN_CONT ".. invalid entry %d ", 44 entry->type); 45 goto failed; 46 } 47 } 48 return 0; 49 50 failed: 51 /* disable tracing */ 52 tracing_disabled = 1; 53 printk(KERN_CONT ".. corrupted trace buffer .. "); 54 return -1; 55 } 56 57 /* 58 * Test the trace buffer to see if all the elements 59 * are still sane. 60 */ 61 static int trace_test_buffer(struct trace_buffer *buf, unsigned long *count) 62 { 63 unsigned long flags, cnt = 0; 64 int cpu, ret = 0; 65 66 /* Don't allow flipping of max traces now */ 67 local_irq_save(flags); 68 arch_spin_lock(&buf->tr->max_lock); 69 70 cnt = ring_buffer_entries(buf->buffer); 71 72 /* 73 * The trace_test_buffer_cpu runs a while loop to consume all data. 74 * If the calling tracer is broken, and is constantly filling 75 * the buffer, this will run forever, and hard lock the box. 76 * We disable the ring buffer while we do this test to prevent 77 * a hard lock up. 78 */ 79 tracing_off(); 80 for_each_possible_cpu(cpu) { 81 ret = trace_test_buffer_cpu(buf, cpu); 82 if (ret) 83 break; 84 } 85 tracing_on(); 86 arch_spin_unlock(&buf->tr->max_lock); 87 local_irq_restore(flags); 88 89 if (count) 90 *count = cnt; 91 92 return ret; 93 } 94 95 static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret) 96 { 97 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n", 98 trace->name, init_ret); 99 } 100 #ifdef CONFIG_FUNCTION_TRACER 101 102 #ifdef CONFIG_DYNAMIC_FTRACE 103 104 static int trace_selftest_test_probe1_cnt; 105 static void trace_selftest_test_probe1_func(unsigned long ip, 106 unsigned long pip, 107 struct ftrace_ops *op, 108 struct pt_regs *pt_regs) 109 { 110 trace_selftest_test_probe1_cnt++; 111 } 112 113 static int trace_selftest_test_probe2_cnt; 114 static void trace_selftest_test_probe2_func(unsigned long ip, 115 unsigned long pip, 116 struct ftrace_ops *op, 117 struct pt_regs *pt_regs) 118 { 119 trace_selftest_test_probe2_cnt++; 120 } 121 122 static int trace_selftest_test_probe3_cnt; 123 static void trace_selftest_test_probe3_func(unsigned long ip, 124 unsigned long pip, 125 struct ftrace_ops *op, 126 struct pt_regs *pt_regs) 127 { 128 trace_selftest_test_probe3_cnt++; 129 } 130 131 static int trace_selftest_test_global_cnt; 132 static void trace_selftest_test_global_func(unsigned long ip, 133 unsigned long pip, 134 struct ftrace_ops *op, 135 struct pt_regs *pt_regs) 136 { 137 trace_selftest_test_global_cnt++; 138 } 139 140 static int trace_selftest_test_dyn_cnt; 141 static void trace_selftest_test_dyn_func(unsigned long ip, 142 unsigned long pip, 143 struct ftrace_ops *op, 144 struct pt_regs *pt_regs) 145 { 146 trace_selftest_test_dyn_cnt++; 147 } 148 149 static struct ftrace_ops test_probe1 = { 150 .func = trace_selftest_test_probe1_func, 151 .flags = FTRACE_OPS_FL_RECURSION_SAFE, 152 }; 153 154 static struct ftrace_ops test_probe2 = { 155 .func = trace_selftest_test_probe2_func, 156 .flags = FTRACE_OPS_FL_RECURSION_SAFE, 157 }; 158 159 static struct ftrace_ops test_probe3 = { 160 .func = trace_selftest_test_probe3_func, 161 .flags = FTRACE_OPS_FL_RECURSION_SAFE, 162 }; 163 164 static void print_counts(void) 165 { 166 printk("(%d %d %d %d %d) ", 167 trace_selftest_test_probe1_cnt, 168 trace_selftest_test_probe2_cnt, 169 trace_selftest_test_probe3_cnt, 170 trace_selftest_test_global_cnt, 171 trace_selftest_test_dyn_cnt); 172 } 173 174 static void reset_counts(void) 175 { 176 trace_selftest_test_probe1_cnt = 0; 177 trace_selftest_test_probe2_cnt = 0; 178 trace_selftest_test_probe3_cnt = 0; 179 trace_selftest_test_global_cnt = 0; 180 trace_selftest_test_dyn_cnt = 0; 181 } 182 183 static int trace_selftest_ops(struct trace_array *tr, int cnt) 184 { 185 int save_ftrace_enabled = ftrace_enabled; 186 struct ftrace_ops *dyn_ops; 187 char *func1_name; 188 char *func2_name; 189 int len1; 190 int len2; 191 int ret = -1; 192 193 printk(KERN_CONT "PASSED\n"); 194 pr_info("Testing dynamic ftrace ops #%d: ", cnt); 195 196 ftrace_enabled = 1; 197 reset_counts(); 198 199 /* Handle PPC64 '.' name */ 200 func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME); 201 func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2); 202 len1 = strlen(func1_name); 203 len2 = strlen(func2_name); 204 205 /* 206 * Probe 1 will trace function 1. 207 * Probe 2 will trace function 2. 208 * Probe 3 will trace functions 1 and 2. 209 */ 210 ftrace_set_filter(&test_probe1, func1_name, len1, 1); 211 ftrace_set_filter(&test_probe2, func2_name, len2, 1); 212 ftrace_set_filter(&test_probe3, func1_name, len1, 1); 213 ftrace_set_filter(&test_probe3, func2_name, len2, 0); 214 215 register_ftrace_function(&test_probe1); 216 register_ftrace_function(&test_probe2); 217 register_ftrace_function(&test_probe3); 218 /* First time we are running with main function */ 219 if (cnt > 1) { 220 ftrace_init_array_ops(tr, trace_selftest_test_global_func); 221 register_ftrace_function(tr->ops); 222 } 223 224 DYN_FTRACE_TEST_NAME(); 225 226 print_counts(); 227 228 if (trace_selftest_test_probe1_cnt != 1) 229 goto out; 230 if (trace_selftest_test_probe2_cnt != 0) 231 goto out; 232 if (trace_selftest_test_probe3_cnt != 1) 233 goto out; 234 if (cnt > 1) { 235 if (trace_selftest_test_global_cnt == 0) 236 goto out; 237 } 238 239 DYN_FTRACE_TEST_NAME2(); 240 241 print_counts(); 242 243 if (trace_selftest_test_probe1_cnt != 1) 244 goto out; 245 if (trace_selftest_test_probe2_cnt != 1) 246 goto out; 247 if (trace_selftest_test_probe3_cnt != 2) 248 goto out; 249 250 /* Add a dynamic probe */ 251 dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL); 252 if (!dyn_ops) { 253 printk("MEMORY ERROR "); 254 goto out; 255 } 256 257 dyn_ops->func = trace_selftest_test_dyn_func; 258 259 register_ftrace_function(dyn_ops); 260 261 trace_selftest_test_global_cnt = 0; 262 263 DYN_FTRACE_TEST_NAME(); 264 265 print_counts(); 266 267 if (trace_selftest_test_probe1_cnt != 2) 268 goto out_free; 269 if (trace_selftest_test_probe2_cnt != 1) 270 goto out_free; 271 if (trace_selftest_test_probe3_cnt != 3) 272 goto out_free; 273 if (cnt > 1) { 274 if (trace_selftest_test_global_cnt == 0) 275 goto out; 276 } 277 if (trace_selftest_test_dyn_cnt == 0) 278 goto out_free; 279 280 DYN_FTRACE_TEST_NAME2(); 281 282 print_counts(); 283 284 if (trace_selftest_test_probe1_cnt != 2) 285 goto out_free; 286 if (trace_selftest_test_probe2_cnt != 2) 287 goto out_free; 288 if (trace_selftest_test_probe3_cnt != 4) 289 goto out_free; 290 291 ret = 0; 292 out_free: 293 unregister_ftrace_function(dyn_ops); 294 kfree(dyn_ops); 295 296 out: 297 /* Purposely unregister in the same order */ 298 unregister_ftrace_function(&test_probe1); 299 unregister_ftrace_function(&test_probe2); 300 unregister_ftrace_function(&test_probe3); 301 if (cnt > 1) 302 unregister_ftrace_function(tr->ops); 303 ftrace_reset_array_ops(tr); 304 305 /* Make sure everything is off */ 306 reset_counts(); 307 DYN_FTRACE_TEST_NAME(); 308 DYN_FTRACE_TEST_NAME(); 309 310 if (trace_selftest_test_probe1_cnt || 311 trace_selftest_test_probe2_cnt || 312 trace_selftest_test_probe3_cnt || 313 trace_selftest_test_global_cnt || 314 trace_selftest_test_dyn_cnt) 315 ret = -1; 316 317 ftrace_enabled = save_ftrace_enabled; 318 319 return ret; 320 } 321 322 /* Test dynamic code modification and ftrace filters */ 323 static int trace_selftest_startup_dynamic_tracing(struct tracer *trace, 324 struct trace_array *tr, 325 int (*func)(void)) 326 { 327 int save_ftrace_enabled = ftrace_enabled; 328 unsigned long count; 329 char *func_name; 330 int ret; 331 332 /* The ftrace test PASSED */ 333 printk(KERN_CONT "PASSED\n"); 334 pr_info("Testing dynamic ftrace: "); 335 336 /* enable tracing, and record the filter function */ 337 ftrace_enabled = 1; 338 339 /* passed in by parameter to fool gcc from optimizing */ 340 func(); 341 342 /* 343 * Some archs *cough*PowerPC*cough* add characters to the 344 * start of the function names. We simply put a '*' to 345 * accommodate them. 346 */ 347 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); 348 349 /* filter only on our function */ 350 ftrace_set_global_filter(func_name, strlen(func_name), 1); 351 352 /* enable tracing */ 353 ret = tracer_init(trace, tr); 354 if (ret) { 355 warn_failed_init_tracer(trace, ret); 356 goto out; 357 } 358 359 /* Sleep for a 1/10 of a second */ 360 msleep(100); 361 362 /* we should have nothing in the buffer */ 363 ret = trace_test_buffer(&tr->trace_buffer, &count); 364 if (ret) 365 goto out; 366 367 if (count) { 368 ret = -1; 369 printk(KERN_CONT ".. filter did not filter .. "); 370 goto out; 371 } 372 373 /* call our function again */ 374 func(); 375 376 /* sleep again */ 377 msleep(100); 378 379 /* stop the tracing. */ 380 tracing_stop(); 381 ftrace_enabled = 0; 382 383 /* check the trace buffer */ 384 ret = trace_test_buffer(&tr->trace_buffer, &count); 385 tracing_start(); 386 387 /* we should only have one item */ 388 if (!ret && count != 1) { 389 trace->reset(tr); 390 printk(KERN_CONT ".. filter failed count=%ld ..", count); 391 ret = -1; 392 goto out; 393 } 394 395 /* Test the ops with global tracing running */ 396 ret = trace_selftest_ops(tr, 1); 397 trace->reset(tr); 398 399 out: 400 ftrace_enabled = save_ftrace_enabled; 401 402 /* Enable tracing on all functions again */ 403 ftrace_set_global_filter(NULL, 0, 1); 404 405 /* Test the ops with global tracing off */ 406 if (!ret) 407 ret = trace_selftest_ops(tr, 2); 408 409 return ret; 410 } 411 412 static int trace_selftest_recursion_cnt; 413 static void trace_selftest_test_recursion_func(unsigned long ip, 414 unsigned long pip, 415 struct ftrace_ops *op, 416 struct pt_regs *pt_regs) 417 { 418 /* 419 * This function is registered without the recursion safe flag. 420 * The ftrace infrastructure should provide the recursion 421 * protection. If not, this will crash the kernel! 422 */ 423 if (trace_selftest_recursion_cnt++ > 10) 424 return; 425 DYN_FTRACE_TEST_NAME(); 426 } 427 428 static void trace_selftest_test_recursion_safe_func(unsigned long ip, 429 unsigned long pip, 430 struct ftrace_ops *op, 431 struct pt_regs *pt_regs) 432 { 433 /* 434 * We said we would provide our own recursion. By calling 435 * this function again, we should recurse back into this function 436 * and count again. But this only happens if the arch supports 437 * all of ftrace features and nothing else is using the function 438 * tracing utility. 439 */ 440 if (trace_selftest_recursion_cnt++) 441 return; 442 DYN_FTRACE_TEST_NAME(); 443 } 444 445 static struct ftrace_ops test_rec_probe = { 446 .func = trace_selftest_test_recursion_func, 447 }; 448 449 static struct ftrace_ops test_recsafe_probe = { 450 .func = trace_selftest_test_recursion_safe_func, 451 .flags = FTRACE_OPS_FL_RECURSION_SAFE, 452 }; 453 454 static int 455 trace_selftest_function_recursion(void) 456 { 457 int save_ftrace_enabled = ftrace_enabled; 458 char *func_name; 459 int len; 460 int ret; 461 462 /* The previous test PASSED */ 463 pr_cont("PASSED\n"); 464 pr_info("Testing ftrace recursion: "); 465 466 467 /* enable tracing, and record the filter function */ 468 ftrace_enabled = 1; 469 470 /* Handle PPC64 '.' name */ 471 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); 472 len = strlen(func_name); 473 474 ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1); 475 if (ret) { 476 pr_cont("*Could not set filter* "); 477 goto out; 478 } 479 480 ret = register_ftrace_function(&test_rec_probe); 481 if (ret) { 482 pr_cont("*could not register callback* "); 483 goto out; 484 } 485 486 DYN_FTRACE_TEST_NAME(); 487 488 unregister_ftrace_function(&test_rec_probe); 489 490 ret = -1; 491 if (trace_selftest_recursion_cnt != 1) { 492 pr_cont("*callback not called once (%d)* ", 493 trace_selftest_recursion_cnt); 494 goto out; 495 } 496 497 trace_selftest_recursion_cnt = 1; 498 499 pr_cont("PASSED\n"); 500 pr_info("Testing ftrace recursion safe: "); 501 502 ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1); 503 if (ret) { 504 pr_cont("*Could not set filter* "); 505 goto out; 506 } 507 508 ret = register_ftrace_function(&test_recsafe_probe); 509 if (ret) { 510 pr_cont("*could not register callback* "); 511 goto out; 512 } 513 514 DYN_FTRACE_TEST_NAME(); 515 516 unregister_ftrace_function(&test_recsafe_probe); 517 518 ret = -1; 519 if (trace_selftest_recursion_cnt != 2) { 520 pr_cont("*callback not called expected 2 times (%d)* ", 521 trace_selftest_recursion_cnt); 522 goto out; 523 } 524 525 ret = 0; 526 out: 527 ftrace_enabled = save_ftrace_enabled; 528 529 return ret; 530 } 531 #else 532 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; }) 533 # define trace_selftest_function_recursion() ({ 0; }) 534 #endif /* CONFIG_DYNAMIC_FTRACE */ 535 536 static enum { 537 TRACE_SELFTEST_REGS_START, 538 TRACE_SELFTEST_REGS_FOUND, 539 TRACE_SELFTEST_REGS_NOT_FOUND, 540 } trace_selftest_regs_stat; 541 542 static void trace_selftest_test_regs_func(unsigned long ip, 543 unsigned long pip, 544 struct ftrace_ops *op, 545 struct pt_regs *pt_regs) 546 { 547 if (pt_regs) 548 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND; 549 else 550 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND; 551 } 552 553 static struct ftrace_ops test_regs_probe = { 554 .func = trace_selftest_test_regs_func, 555 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_SAVE_REGS, 556 }; 557 558 static int 559 trace_selftest_function_regs(void) 560 { 561 int save_ftrace_enabled = ftrace_enabled; 562 char *func_name; 563 int len; 564 int ret; 565 int supported = 0; 566 567 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 568 supported = 1; 569 #endif 570 571 /* The previous test PASSED */ 572 pr_cont("PASSED\n"); 573 pr_info("Testing ftrace regs%s: ", 574 !supported ? "(no arch support)" : ""); 575 576 /* enable tracing, and record the filter function */ 577 ftrace_enabled = 1; 578 579 /* Handle PPC64 '.' name */ 580 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); 581 len = strlen(func_name); 582 583 ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1); 584 /* 585 * If DYNAMIC_FTRACE is not set, then we just trace all functions. 586 * This test really doesn't care. 587 */ 588 if (ret && ret != -ENODEV) { 589 pr_cont("*Could not set filter* "); 590 goto out; 591 } 592 593 ret = register_ftrace_function(&test_regs_probe); 594 /* 595 * Now if the arch does not support passing regs, then this should 596 * have failed. 597 */ 598 if (!supported) { 599 if (!ret) { 600 pr_cont("*registered save-regs without arch support* "); 601 goto out; 602 } 603 test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED; 604 ret = register_ftrace_function(&test_regs_probe); 605 } 606 if (ret) { 607 pr_cont("*could not register callback* "); 608 goto out; 609 } 610 611 612 DYN_FTRACE_TEST_NAME(); 613 614 unregister_ftrace_function(&test_regs_probe); 615 616 ret = -1; 617 618 switch (trace_selftest_regs_stat) { 619 case TRACE_SELFTEST_REGS_START: 620 pr_cont("*callback never called* "); 621 goto out; 622 623 case TRACE_SELFTEST_REGS_FOUND: 624 if (supported) 625 break; 626 pr_cont("*callback received regs without arch support* "); 627 goto out; 628 629 case TRACE_SELFTEST_REGS_NOT_FOUND: 630 if (!supported) 631 break; 632 pr_cont("*callback received NULL regs* "); 633 goto out; 634 } 635 636 ret = 0; 637 out: 638 ftrace_enabled = save_ftrace_enabled; 639 640 return ret; 641 } 642 643 /* 644 * Simple verification test of ftrace function tracer. 645 * Enable ftrace, sleep 1/10 second, and then read the trace 646 * buffer to see if all is in order. 647 */ 648 __init int 649 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) 650 { 651 int save_ftrace_enabled = ftrace_enabled; 652 unsigned long count; 653 int ret; 654 655 #ifdef CONFIG_DYNAMIC_FTRACE 656 if (ftrace_filter_param) { 657 printk(KERN_CONT " ... kernel command line filter set: force PASS ... "); 658 return 0; 659 } 660 #endif 661 662 /* make sure msleep has been recorded */ 663 msleep(1); 664 665 /* start the tracing */ 666 ftrace_enabled = 1; 667 668 ret = tracer_init(trace, tr); 669 if (ret) { 670 warn_failed_init_tracer(trace, ret); 671 goto out; 672 } 673 674 /* Sleep for a 1/10 of a second */ 675 msleep(100); 676 /* stop the tracing. */ 677 tracing_stop(); 678 ftrace_enabled = 0; 679 680 /* check the trace buffer */ 681 ret = trace_test_buffer(&tr->trace_buffer, &count); 682 trace->reset(tr); 683 tracing_start(); 684 685 if (!ret && !count) { 686 printk(KERN_CONT ".. no entries found .."); 687 ret = -1; 688 goto out; 689 } 690 691 ret = trace_selftest_startup_dynamic_tracing(trace, tr, 692 DYN_FTRACE_TEST_NAME); 693 if (ret) 694 goto out; 695 696 ret = trace_selftest_function_recursion(); 697 if (ret) 698 goto out; 699 700 ret = trace_selftest_function_regs(); 701 out: 702 ftrace_enabled = save_ftrace_enabled; 703 704 /* kill ftrace totally if we failed */ 705 if (ret) 706 ftrace_kill(); 707 708 return ret; 709 } 710 #endif /* CONFIG_FUNCTION_TRACER */ 711 712 713 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 714 715 /* Maximum number of functions to trace before diagnosing a hang */ 716 #define GRAPH_MAX_FUNC_TEST 100000000 717 718 static unsigned int graph_hang_thresh; 719 720 /* Wrap the real function entry probe to avoid possible hanging */ 721 static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace) 722 { 723 /* This is harmlessly racy, we want to approximately detect a hang */ 724 if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) { 725 ftrace_graph_stop(); 726 printk(KERN_WARNING "BUG: Function graph tracer hang!\n"); 727 if (ftrace_dump_on_oops) { 728 ftrace_dump(DUMP_ALL); 729 /* ftrace_dump() disables tracing */ 730 tracing_on(); 731 } 732 return 0; 733 } 734 735 return trace_graph_entry(trace); 736 } 737 738 /* 739 * Pretty much the same than for the function tracer from which the selftest 740 * has been borrowed. 741 */ 742 __init int 743 trace_selftest_startup_function_graph(struct tracer *trace, 744 struct trace_array *tr) 745 { 746 int ret; 747 unsigned long count; 748 749 #ifdef CONFIG_DYNAMIC_FTRACE 750 if (ftrace_filter_param) { 751 printk(KERN_CONT " ... kernel command line filter set: force PASS ... "); 752 return 0; 753 } 754 #endif 755 756 /* 757 * Simulate the init() callback but we attach a watchdog callback 758 * to detect and recover from possible hangs 759 */ 760 tracing_reset_online_cpus(&tr->trace_buffer); 761 set_graph_array(tr); 762 ret = register_ftrace_graph(&trace_graph_return, 763 &trace_graph_entry_watchdog); 764 if (ret) { 765 warn_failed_init_tracer(trace, ret); 766 goto out; 767 } 768 tracing_start_cmdline_record(); 769 770 /* Sleep for a 1/10 of a second */ 771 msleep(100); 772 773 /* Have we just recovered from a hang? */ 774 if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) { 775 tracing_selftest_disabled = true; 776 ret = -1; 777 goto out; 778 } 779 780 tracing_stop(); 781 782 /* check the trace buffer */ 783 ret = trace_test_buffer(&tr->trace_buffer, &count); 784 785 trace->reset(tr); 786 tracing_start(); 787 788 if (!ret && !count) { 789 printk(KERN_CONT ".. no entries found .."); 790 ret = -1; 791 goto out; 792 } 793 794 /* Don't test dynamic tracing, the function tracer already did */ 795 796 out: 797 /* Stop it if we failed */ 798 if (ret) 799 ftrace_graph_stop(); 800 801 return ret; 802 } 803 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 804 805 806 #ifdef CONFIG_IRQSOFF_TRACER 807 int 808 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) 809 { 810 unsigned long save_max = tr->max_latency; 811 unsigned long count; 812 int ret; 813 814 /* start the tracing */ 815 ret = tracer_init(trace, tr); 816 if (ret) { 817 warn_failed_init_tracer(trace, ret); 818 return ret; 819 } 820 821 /* reset the max latency */ 822 tr->max_latency = 0; 823 /* disable interrupts for a bit */ 824 local_irq_disable(); 825 udelay(100); 826 local_irq_enable(); 827 828 /* 829 * Stop the tracer to avoid a warning subsequent 830 * to buffer flipping failure because tracing_stop() 831 * disables the tr and max buffers, making flipping impossible 832 * in case of parallels max irqs off latencies. 833 */ 834 trace->stop(tr); 835 /* stop the tracing. */ 836 tracing_stop(); 837 /* check both trace buffers */ 838 ret = trace_test_buffer(&tr->trace_buffer, NULL); 839 if (!ret) 840 ret = trace_test_buffer(&tr->max_buffer, &count); 841 trace->reset(tr); 842 tracing_start(); 843 844 if (!ret && !count) { 845 printk(KERN_CONT ".. no entries found .."); 846 ret = -1; 847 } 848 849 tr->max_latency = save_max; 850 851 return ret; 852 } 853 #endif /* CONFIG_IRQSOFF_TRACER */ 854 855 #ifdef CONFIG_PREEMPT_TRACER 856 int 857 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) 858 { 859 unsigned long save_max = tr->max_latency; 860 unsigned long count; 861 int ret; 862 863 /* 864 * Now that the big kernel lock is no longer preemptable, 865 * and this is called with the BKL held, it will always 866 * fail. If preemption is already disabled, simply 867 * pass the test. When the BKL is removed, or becomes 868 * preemptible again, we will once again test this, 869 * so keep it in. 870 */ 871 if (preempt_count()) { 872 printk(KERN_CONT "can not test ... force "); 873 return 0; 874 } 875 876 /* start the tracing */ 877 ret = tracer_init(trace, tr); 878 if (ret) { 879 warn_failed_init_tracer(trace, ret); 880 return ret; 881 } 882 883 /* reset the max latency */ 884 tr->max_latency = 0; 885 /* disable preemption for a bit */ 886 preempt_disable(); 887 udelay(100); 888 preempt_enable(); 889 890 /* 891 * Stop the tracer to avoid a warning subsequent 892 * to buffer flipping failure because tracing_stop() 893 * disables the tr and max buffers, making flipping impossible 894 * in case of parallels max preempt off latencies. 895 */ 896 trace->stop(tr); 897 /* stop the tracing. */ 898 tracing_stop(); 899 /* check both trace buffers */ 900 ret = trace_test_buffer(&tr->trace_buffer, NULL); 901 if (!ret) 902 ret = trace_test_buffer(&tr->max_buffer, &count); 903 trace->reset(tr); 904 tracing_start(); 905 906 if (!ret && !count) { 907 printk(KERN_CONT ".. no entries found .."); 908 ret = -1; 909 } 910 911 tr->max_latency = save_max; 912 913 return ret; 914 } 915 #endif /* CONFIG_PREEMPT_TRACER */ 916 917 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER) 918 int 919 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr) 920 { 921 unsigned long save_max = tr->max_latency; 922 unsigned long count; 923 int ret; 924 925 /* 926 * Now that the big kernel lock is no longer preemptable, 927 * and this is called with the BKL held, it will always 928 * fail. If preemption is already disabled, simply 929 * pass the test. When the BKL is removed, or becomes 930 * preemptible again, we will once again test this, 931 * so keep it in. 932 */ 933 if (preempt_count()) { 934 printk(KERN_CONT "can not test ... force "); 935 return 0; 936 } 937 938 /* start the tracing */ 939 ret = tracer_init(trace, tr); 940 if (ret) { 941 warn_failed_init_tracer(trace, ret); 942 goto out_no_start; 943 } 944 945 /* reset the max latency */ 946 tr->max_latency = 0; 947 948 /* disable preemption and interrupts for a bit */ 949 preempt_disable(); 950 local_irq_disable(); 951 udelay(100); 952 preempt_enable(); 953 /* reverse the order of preempt vs irqs */ 954 local_irq_enable(); 955 956 /* 957 * Stop the tracer to avoid a warning subsequent 958 * to buffer flipping failure because tracing_stop() 959 * disables the tr and max buffers, making flipping impossible 960 * in case of parallels max irqs/preempt off latencies. 961 */ 962 trace->stop(tr); 963 /* stop the tracing. */ 964 tracing_stop(); 965 /* check both trace buffers */ 966 ret = trace_test_buffer(&tr->trace_buffer, NULL); 967 if (ret) 968 goto out; 969 970 ret = trace_test_buffer(&tr->max_buffer, &count); 971 if (ret) 972 goto out; 973 974 if (!ret && !count) { 975 printk(KERN_CONT ".. no entries found .."); 976 ret = -1; 977 goto out; 978 } 979 980 /* do the test by disabling interrupts first this time */ 981 tr->max_latency = 0; 982 tracing_start(); 983 trace->start(tr); 984 985 preempt_disable(); 986 local_irq_disable(); 987 udelay(100); 988 preempt_enable(); 989 /* reverse the order of preempt vs irqs */ 990 local_irq_enable(); 991 992 trace->stop(tr); 993 /* stop the tracing. */ 994 tracing_stop(); 995 /* check both trace buffers */ 996 ret = trace_test_buffer(&tr->trace_buffer, NULL); 997 if (ret) 998 goto out; 999 1000 ret = trace_test_buffer(&tr->max_buffer, &count); 1001 1002 if (!ret && !count) { 1003 printk(KERN_CONT ".. no entries found .."); 1004 ret = -1; 1005 goto out; 1006 } 1007 1008 out: 1009 tracing_start(); 1010 out_no_start: 1011 trace->reset(tr); 1012 tr->max_latency = save_max; 1013 1014 return ret; 1015 } 1016 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */ 1017 1018 #ifdef CONFIG_NOP_TRACER 1019 int 1020 trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr) 1021 { 1022 /* What could possibly go wrong? */ 1023 return 0; 1024 } 1025 #endif 1026 1027 #ifdef CONFIG_SCHED_TRACER 1028 static int trace_wakeup_test_thread(void *data) 1029 { 1030 /* Make this a -deadline thread */ 1031 static const struct sched_attr attr = { 1032 .sched_policy = SCHED_DEADLINE, 1033 .sched_runtime = 100000ULL, 1034 .sched_deadline = 10000000ULL, 1035 .sched_period = 10000000ULL 1036 }; 1037 struct completion *x = data; 1038 1039 sched_setattr(current, &attr); 1040 1041 /* Make it know we have a new prio */ 1042 complete(x); 1043 1044 /* now go to sleep and let the test wake us up */ 1045 set_current_state(TASK_INTERRUPTIBLE); 1046 schedule(); 1047 1048 complete(x); 1049 1050 /* we are awake, now wait to disappear */ 1051 while (!kthread_should_stop()) { 1052 /* 1053 * This will likely be the system top priority 1054 * task, do short sleeps to let others run. 1055 */ 1056 msleep(100); 1057 } 1058 1059 return 0; 1060 } 1061 1062 int 1063 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) 1064 { 1065 unsigned long save_max = tr->max_latency; 1066 struct task_struct *p; 1067 struct completion is_ready; 1068 unsigned long count; 1069 int ret; 1070 1071 init_completion(&is_ready); 1072 1073 /* create a -deadline thread */ 1074 p = kthread_run(trace_wakeup_test_thread, &is_ready, "ftrace-test"); 1075 if (IS_ERR(p)) { 1076 printk(KERN_CONT "Failed to create ftrace wakeup test thread "); 1077 return -1; 1078 } 1079 1080 /* make sure the thread is running at -deadline policy */ 1081 wait_for_completion(&is_ready); 1082 1083 /* start the tracing */ 1084 ret = tracer_init(trace, tr); 1085 if (ret) { 1086 warn_failed_init_tracer(trace, ret); 1087 return ret; 1088 } 1089 1090 /* reset the max latency */ 1091 tr->max_latency = 0; 1092 1093 while (p->on_rq) { 1094 /* 1095 * Sleep to make sure the -deadline thread is asleep too. 1096 * On virtual machines we can't rely on timings, 1097 * but we want to make sure this test still works. 1098 */ 1099 msleep(100); 1100 } 1101 1102 init_completion(&is_ready); 1103 1104 wake_up_process(p); 1105 1106 /* Wait for the task to wake up */ 1107 wait_for_completion(&is_ready); 1108 1109 /* stop the tracing. */ 1110 tracing_stop(); 1111 /* check both trace buffers */ 1112 ret = trace_test_buffer(&tr->trace_buffer, NULL); 1113 printk("ret = %d\n", ret); 1114 if (!ret) 1115 ret = trace_test_buffer(&tr->max_buffer, &count); 1116 1117 1118 trace->reset(tr); 1119 tracing_start(); 1120 1121 tr->max_latency = save_max; 1122 1123 /* kill the thread */ 1124 kthread_stop(p); 1125 1126 if (!ret && !count) { 1127 printk(KERN_CONT ".. no entries found .."); 1128 ret = -1; 1129 } 1130 1131 return ret; 1132 } 1133 #endif /* CONFIG_SCHED_TRACER */ 1134 1135 #ifdef CONFIG_CONTEXT_SWITCH_TRACER 1136 int 1137 trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr) 1138 { 1139 unsigned long count; 1140 int ret; 1141 1142 /* start the tracing */ 1143 ret = tracer_init(trace, tr); 1144 if (ret) { 1145 warn_failed_init_tracer(trace, ret); 1146 return ret; 1147 } 1148 1149 /* Sleep for a 1/10 of a second */ 1150 msleep(100); 1151 /* stop the tracing. */ 1152 tracing_stop(); 1153 /* check the trace buffer */ 1154 ret = trace_test_buffer(&tr->trace_buffer, &count); 1155 trace->reset(tr); 1156 tracing_start(); 1157 1158 if (!ret && !count) { 1159 printk(KERN_CONT ".. no entries found .."); 1160 ret = -1; 1161 } 1162 1163 return ret; 1164 } 1165 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ 1166 1167 #ifdef CONFIG_BRANCH_TRACER 1168 int 1169 trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) 1170 { 1171 unsigned long count; 1172 int ret; 1173 1174 /* start the tracing */ 1175 ret = tracer_init(trace, tr); 1176 if (ret) { 1177 warn_failed_init_tracer(trace, ret); 1178 return ret; 1179 } 1180 1181 /* Sleep for a 1/10 of a second */ 1182 msleep(100); 1183 /* stop the tracing. */ 1184 tracing_stop(); 1185 /* check the trace buffer */ 1186 ret = trace_test_buffer(&tr->trace_buffer, &count); 1187 trace->reset(tr); 1188 tracing_start(); 1189 1190 if (!ret && !count) { 1191 printk(KERN_CONT ".. no entries found .."); 1192 ret = -1; 1193 } 1194 1195 return ret; 1196 } 1197 #endif /* CONFIG_BRANCH_TRACER */ 1198 1199