1 /* Include in trace.c */ 2 3 #include <linux/stringify.h> 4 #include <linux/kthread.h> 5 #include <linux/delay.h> 6 #include <linux/slab.h> 7 8 static inline int trace_valid_entry(struct trace_entry *entry) 9 { 10 switch (entry->type) { 11 case TRACE_FN: 12 case TRACE_CTX: 13 case TRACE_WAKE: 14 case TRACE_STACK: 15 case TRACE_PRINT: 16 case TRACE_BRANCH: 17 case TRACE_GRAPH_ENT: 18 case TRACE_GRAPH_RET: 19 return 1; 20 } 21 return 0; 22 } 23 24 static int trace_test_buffer_cpu(struct trace_array *tr, int cpu) 25 { 26 struct ring_buffer_event *event; 27 struct trace_entry *entry; 28 unsigned int loops = 0; 29 30 while ((event = ring_buffer_consume(tr->buffer, cpu, NULL, NULL))) { 31 entry = ring_buffer_event_data(event); 32 33 /* 34 * The ring buffer is a size of trace_buf_size, if 35 * we loop more than the size, there's something wrong 36 * with the ring buffer. 37 */ 38 if (loops++ > trace_buf_size) { 39 printk(KERN_CONT ".. bad ring buffer "); 40 goto failed; 41 } 42 if (!trace_valid_entry(entry)) { 43 printk(KERN_CONT ".. invalid entry %d ", 44 entry->type); 45 goto failed; 46 } 47 } 48 return 0; 49 50 failed: 51 /* disable tracing */ 52 tracing_disabled = 1; 53 printk(KERN_CONT ".. corrupted trace buffer .. "); 54 return -1; 55 } 56 57 /* 58 * Test the trace buffer to see if all the elements 59 * are still sane. 60 */ 61 static int trace_test_buffer(struct trace_array *tr, unsigned long *count) 62 { 63 unsigned long flags, cnt = 0; 64 int cpu, ret = 0; 65 66 /* Don't allow flipping of max traces now */ 67 local_irq_save(flags); 68 arch_spin_lock(&ftrace_max_lock); 69 70 cnt = ring_buffer_entries(tr->buffer); 71 72 /* 73 * The trace_test_buffer_cpu runs a while loop to consume all data. 74 * If the calling tracer is broken, and is constantly filling 75 * the buffer, this will run forever, and hard lock the box. 76 * We disable the ring buffer while we do this test to prevent 77 * a hard lock up. 78 */ 79 tracing_off(); 80 for_each_possible_cpu(cpu) { 81 ret = trace_test_buffer_cpu(tr, cpu); 82 if (ret) 83 break; 84 } 85 tracing_on(); 86 arch_spin_unlock(&ftrace_max_lock); 87 local_irq_restore(flags); 88 89 if (count) 90 *count = cnt; 91 92 return ret; 93 } 94 95 static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret) 96 { 97 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n", 98 trace->name, init_ret); 99 } 100 #ifdef CONFIG_FUNCTION_TRACER 101 102 #ifdef CONFIG_DYNAMIC_FTRACE 103 104 static int trace_selftest_test_probe1_cnt; 105 static void trace_selftest_test_probe1_func(unsigned long ip, 106 unsigned long pip, 107 struct ftrace_ops *op, 108 struct pt_regs *pt_regs) 109 { 110 trace_selftest_test_probe1_cnt++; 111 } 112 113 static int trace_selftest_test_probe2_cnt; 114 static void trace_selftest_test_probe2_func(unsigned long ip, 115 unsigned long pip, 116 struct ftrace_ops *op, 117 struct pt_regs *pt_regs) 118 { 119 trace_selftest_test_probe2_cnt++; 120 } 121 122 static int trace_selftest_test_probe3_cnt; 123 static void trace_selftest_test_probe3_func(unsigned long ip, 124 unsigned long pip, 125 struct ftrace_ops *op, 126 struct pt_regs *pt_regs) 127 { 128 trace_selftest_test_probe3_cnt++; 129 } 130 131 static int trace_selftest_test_global_cnt; 132 static void trace_selftest_test_global_func(unsigned long ip, 133 unsigned long pip, 134 struct ftrace_ops *op, 135 struct pt_regs *pt_regs) 136 { 137 trace_selftest_test_global_cnt++; 138 } 139 140 static int trace_selftest_test_dyn_cnt; 141 static void trace_selftest_test_dyn_func(unsigned long ip, 142 unsigned long pip, 143 struct ftrace_ops *op, 144 struct pt_regs *pt_regs) 145 { 146 trace_selftest_test_dyn_cnt++; 147 } 148 149 static struct ftrace_ops test_probe1 = { 150 .func = trace_selftest_test_probe1_func, 151 .flags = FTRACE_OPS_FL_RECURSION_SAFE, 152 }; 153 154 static struct ftrace_ops test_probe2 = { 155 .func = trace_selftest_test_probe2_func, 156 .flags = FTRACE_OPS_FL_RECURSION_SAFE, 157 }; 158 159 static struct ftrace_ops test_probe3 = { 160 .func = trace_selftest_test_probe3_func, 161 .flags = FTRACE_OPS_FL_RECURSION_SAFE, 162 }; 163 164 static struct ftrace_ops test_global = { 165 .func = trace_selftest_test_global_func, 166 .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE, 167 }; 168 169 static void print_counts(void) 170 { 171 printk("(%d %d %d %d %d) ", 172 trace_selftest_test_probe1_cnt, 173 trace_selftest_test_probe2_cnt, 174 trace_selftest_test_probe3_cnt, 175 trace_selftest_test_global_cnt, 176 trace_selftest_test_dyn_cnt); 177 } 178 179 static void reset_counts(void) 180 { 181 trace_selftest_test_probe1_cnt = 0; 182 trace_selftest_test_probe2_cnt = 0; 183 trace_selftest_test_probe3_cnt = 0; 184 trace_selftest_test_global_cnt = 0; 185 trace_selftest_test_dyn_cnt = 0; 186 } 187 188 static int trace_selftest_ops(int cnt) 189 { 190 int save_ftrace_enabled = ftrace_enabled; 191 struct ftrace_ops *dyn_ops; 192 char *func1_name; 193 char *func2_name; 194 int len1; 195 int len2; 196 int ret = -1; 197 198 printk(KERN_CONT "PASSED\n"); 199 pr_info("Testing dynamic ftrace ops #%d: ", cnt); 200 201 ftrace_enabled = 1; 202 reset_counts(); 203 204 /* Handle PPC64 '.' name */ 205 func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME); 206 func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2); 207 len1 = strlen(func1_name); 208 len2 = strlen(func2_name); 209 210 /* 211 * Probe 1 will trace function 1. 212 * Probe 2 will trace function 2. 213 * Probe 3 will trace functions 1 and 2. 214 */ 215 ftrace_set_filter(&test_probe1, func1_name, len1, 1); 216 ftrace_set_filter(&test_probe2, func2_name, len2, 1); 217 ftrace_set_filter(&test_probe3, func1_name, len1, 1); 218 ftrace_set_filter(&test_probe3, func2_name, len2, 0); 219 220 register_ftrace_function(&test_probe1); 221 register_ftrace_function(&test_probe2); 222 register_ftrace_function(&test_probe3); 223 register_ftrace_function(&test_global); 224 225 DYN_FTRACE_TEST_NAME(); 226 227 print_counts(); 228 229 if (trace_selftest_test_probe1_cnt != 1) 230 goto out; 231 if (trace_selftest_test_probe2_cnt != 0) 232 goto out; 233 if (trace_selftest_test_probe3_cnt != 1) 234 goto out; 235 if (trace_selftest_test_global_cnt == 0) 236 goto out; 237 238 DYN_FTRACE_TEST_NAME2(); 239 240 print_counts(); 241 242 if (trace_selftest_test_probe1_cnt != 1) 243 goto out; 244 if (trace_selftest_test_probe2_cnt != 1) 245 goto out; 246 if (trace_selftest_test_probe3_cnt != 2) 247 goto out; 248 249 /* Add a dynamic probe */ 250 dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL); 251 if (!dyn_ops) { 252 printk("MEMORY ERROR "); 253 goto out; 254 } 255 256 dyn_ops->func = trace_selftest_test_dyn_func; 257 258 register_ftrace_function(dyn_ops); 259 260 trace_selftest_test_global_cnt = 0; 261 262 DYN_FTRACE_TEST_NAME(); 263 264 print_counts(); 265 266 if (trace_selftest_test_probe1_cnt != 2) 267 goto out_free; 268 if (trace_selftest_test_probe2_cnt != 1) 269 goto out_free; 270 if (trace_selftest_test_probe3_cnt != 3) 271 goto out_free; 272 if (trace_selftest_test_global_cnt == 0) 273 goto out; 274 if (trace_selftest_test_dyn_cnt == 0) 275 goto out_free; 276 277 DYN_FTRACE_TEST_NAME2(); 278 279 print_counts(); 280 281 if (trace_selftest_test_probe1_cnt != 2) 282 goto out_free; 283 if (trace_selftest_test_probe2_cnt != 2) 284 goto out_free; 285 if (trace_selftest_test_probe3_cnt != 4) 286 goto out_free; 287 288 ret = 0; 289 out_free: 290 unregister_ftrace_function(dyn_ops); 291 kfree(dyn_ops); 292 293 out: 294 /* Purposely unregister in the same order */ 295 unregister_ftrace_function(&test_probe1); 296 unregister_ftrace_function(&test_probe2); 297 unregister_ftrace_function(&test_probe3); 298 unregister_ftrace_function(&test_global); 299 300 /* Make sure everything is off */ 301 reset_counts(); 302 DYN_FTRACE_TEST_NAME(); 303 DYN_FTRACE_TEST_NAME(); 304 305 if (trace_selftest_test_probe1_cnt || 306 trace_selftest_test_probe2_cnt || 307 trace_selftest_test_probe3_cnt || 308 trace_selftest_test_global_cnt || 309 trace_selftest_test_dyn_cnt) 310 ret = -1; 311 312 ftrace_enabled = save_ftrace_enabled; 313 314 return ret; 315 } 316 317 /* Test dynamic code modification and ftrace filters */ 318 int trace_selftest_startup_dynamic_tracing(struct tracer *trace, 319 struct trace_array *tr, 320 int (*func)(void)) 321 { 322 int save_ftrace_enabled = ftrace_enabled; 323 unsigned long count; 324 char *func_name; 325 int ret; 326 327 /* The ftrace test PASSED */ 328 printk(KERN_CONT "PASSED\n"); 329 pr_info("Testing dynamic ftrace: "); 330 331 /* enable tracing, and record the filter function */ 332 ftrace_enabled = 1; 333 334 /* passed in by parameter to fool gcc from optimizing */ 335 func(); 336 337 /* 338 * Some archs *cough*PowerPC*cough* add characters to the 339 * start of the function names. We simply put a '*' to 340 * accommodate them. 341 */ 342 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); 343 344 /* filter only on our function */ 345 ftrace_set_global_filter(func_name, strlen(func_name), 1); 346 347 /* enable tracing */ 348 ret = tracer_init(trace, tr); 349 if (ret) { 350 warn_failed_init_tracer(trace, ret); 351 goto out; 352 } 353 354 /* Sleep for a 1/10 of a second */ 355 msleep(100); 356 357 /* we should have nothing in the buffer */ 358 ret = trace_test_buffer(tr, &count); 359 if (ret) 360 goto out; 361 362 if (count) { 363 ret = -1; 364 printk(KERN_CONT ".. filter did not filter .. "); 365 goto out; 366 } 367 368 /* call our function again */ 369 func(); 370 371 /* sleep again */ 372 msleep(100); 373 374 /* stop the tracing. */ 375 tracing_stop(); 376 ftrace_enabled = 0; 377 378 /* check the trace buffer */ 379 ret = trace_test_buffer(tr, &count); 380 tracing_start(); 381 382 /* we should only have one item */ 383 if (!ret && count != 1) { 384 trace->reset(tr); 385 printk(KERN_CONT ".. filter failed count=%ld ..", count); 386 ret = -1; 387 goto out; 388 } 389 390 /* Test the ops with global tracing running */ 391 ret = trace_selftest_ops(1); 392 trace->reset(tr); 393 394 out: 395 ftrace_enabled = save_ftrace_enabled; 396 397 /* Enable tracing on all functions again */ 398 ftrace_set_global_filter(NULL, 0, 1); 399 400 /* Test the ops with global tracing off */ 401 if (!ret) 402 ret = trace_selftest_ops(2); 403 404 return ret; 405 } 406 407 static int trace_selftest_recursion_cnt; 408 static void trace_selftest_test_recursion_func(unsigned long ip, 409 unsigned long pip, 410 struct ftrace_ops *op, 411 struct pt_regs *pt_regs) 412 { 413 /* 414 * This function is registered without the recursion safe flag. 415 * The ftrace infrastructure should provide the recursion 416 * protection. If not, this will crash the kernel! 417 */ 418 trace_selftest_recursion_cnt++; 419 DYN_FTRACE_TEST_NAME(); 420 } 421 422 static void trace_selftest_test_recursion_safe_func(unsigned long ip, 423 unsigned long pip, 424 struct ftrace_ops *op, 425 struct pt_regs *pt_regs) 426 { 427 /* 428 * We said we would provide our own recursion. By calling 429 * this function again, we should recurse back into this function 430 * and count again. But this only happens if the arch supports 431 * all of ftrace features and nothing else is using the function 432 * tracing utility. 433 */ 434 if (trace_selftest_recursion_cnt++) 435 return; 436 DYN_FTRACE_TEST_NAME(); 437 } 438 439 static struct ftrace_ops test_rec_probe = { 440 .func = trace_selftest_test_recursion_func, 441 }; 442 443 static struct ftrace_ops test_recsafe_probe = { 444 .func = trace_selftest_test_recursion_safe_func, 445 .flags = FTRACE_OPS_FL_RECURSION_SAFE, 446 }; 447 448 static int 449 trace_selftest_function_recursion(void) 450 { 451 int save_ftrace_enabled = ftrace_enabled; 452 char *func_name; 453 int len; 454 int ret; 455 int cnt; 456 457 /* The previous test PASSED */ 458 pr_cont("PASSED\n"); 459 pr_info("Testing ftrace recursion: "); 460 461 462 /* enable tracing, and record the filter function */ 463 ftrace_enabled = 1; 464 465 /* Handle PPC64 '.' name */ 466 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); 467 len = strlen(func_name); 468 469 ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1); 470 if (ret) { 471 pr_cont("*Could not set filter* "); 472 goto out; 473 } 474 475 ret = register_ftrace_function(&test_rec_probe); 476 if (ret) { 477 pr_cont("*could not register callback* "); 478 goto out; 479 } 480 481 DYN_FTRACE_TEST_NAME(); 482 483 unregister_ftrace_function(&test_rec_probe); 484 485 ret = -1; 486 if (trace_selftest_recursion_cnt != 1) { 487 pr_cont("*callback not called once (%d)* ", 488 trace_selftest_recursion_cnt); 489 goto out; 490 } 491 492 trace_selftest_recursion_cnt = 1; 493 494 pr_cont("PASSED\n"); 495 pr_info("Testing ftrace recursion safe: "); 496 497 ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1); 498 if (ret) { 499 pr_cont("*Could not set filter* "); 500 goto out; 501 } 502 503 ret = register_ftrace_function(&test_recsafe_probe); 504 if (ret) { 505 pr_cont("*could not register callback* "); 506 goto out; 507 } 508 509 DYN_FTRACE_TEST_NAME(); 510 511 unregister_ftrace_function(&test_recsafe_probe); 512 513 /* 514 * If arch supports all ftrace features, and no other task 515 * was on the list, we should be fine. 516 */ 517 if (!ftrace_nr_registered_ops() && !FTRACE_FORCE_LIST_FUNC) 518 cnt = 2; /* Should have recursed */ 519 else 520 cnt = 1; 521 522 ret = -1; 523 if (trace_selftest_recursion_cnt != cnt) { 524 pr_cont("*callback not called expected %d times (%d)* ", 525 cnt, trace_selftest_recursion_cnt); 526 goto out; 527 } 528 529 ret = 0; 530 out: 531 ftrace_enabled = save_ftrace_enabled; 532 533 return ret; 534 } 535 #else 536 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; }) 537 # define trace_selftest_function_recursion() ({ 0; }) 538 #endif /* CONFIG_DYNAMIC_FTRACE */ 539 540 static enum { 541 TRACE_SELFTEST_REGS_START, 542 TRACE_SELFTEST_REGS_FOUND, 543 TRACE_SELFTEST_REGS_NOT_FOUND, 544 } trace_selftest_regs_stat; 545 546 static void trace_selftest_test_regs_func(unsigned long ip, 547 unsigned long pip, 548 struct ftrace_ops *op, 549 struct pt_regs *pt_regs) 550 { 551 if (pt_regs) 552 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND; 553 else 554 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND; 555 } 556 557 static struct ftrace_ops test_regs_probe = { 558 .func = trace_selftest_test_regs_func, 559 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_SAVE_REGS, 560 }; 561 562 static int 563 trace_selftest_function_regs(void) 564 { 565 int save_ftrace_enabled = ftrace_enabled; 566 char *func_name; 567 int len; 568 int ret; 569 int supported = 0; 570 571 #ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS 572 supported = 1; 573 #endif 574 575 /* The previous test PASSED */ 576 pr_cont("PASSED\n"); 577 pr_info("Testing ftrace regs%s: ", 578 !supported ? "(no arch support)" : ""); 579 580 /* enable tracing, and record the filter function */ 581 ftrace_enabled = 1; 582 583 /* Handle PPC64 '.' name */ 584 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); 585 len = strlen(func_name); 586 587 ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1); 588 /* 589 * If DYNAMIC_FTRACE is not set, then we just trace all functions. 590 * This test really doesn't care. 591 */ 592 if (ret && ret != -ENODEV) { 593 pr_cont("*Could not set filter* "); 594 goto out; 595 } 596 597 ret = register_ftrace_function(&test_regs_probe); 598 /* 599 * Now if the arch does not support passing regs, then this should 600 * have failed. 601 */ 602 if (!supported) { 603 if (!ret) { 604 pr_cont("*registered save-regs without arch support* "); 605 goto out; 606 } 607 test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED; 608 ret = register_ftrace_function(&test_regs_probe); 609 } 610 if (ret) { 611 pr_cont("*could not register callback* "); 612 goto out; 613 } 614 615 616 DYN_FTRACE_TEST_NAME(); 617 618 unregister_ftrace_function(&test_regs_probe); 619 620 ret = -1; 621 622 switch (trace_selftest_regs_stat) { 623 case TRACE_SELFTEST_REGS_START: 624 pr_cont("*callback never called* "); 625 goto out; 626 627 case TRACE_SELFTEST_REGS_FOUND: 628 if (supported) 629 break; 630 pr_cont("*callback received regs without arch support* "); 631 goto out; 632 633 case TRACE_SELFTEST_REGS_NOT_FOUND: 634 if (!supported) 635 break; 636 pr_cont("*callback received NULL regs* "); 637 goto out; 638 } 639 640 ret = 0; 641 out: 642 ftrace_enabled = save_ftrace_enabled; 643 644 return ret; 645 } 646 647 /* 648 * Simple verification test of ftrace function tracer. 649 * Enable ftrace, sleep 1/10 second, and then read the trace 650 * buffer to see if all is in order. 651 */ 652 int 653 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) 654 { 655 int save_ftrace_enabled = ftrace_enabled; 656 unsigned long count; 657 int ret; 658 659 /* make sure msleep has been recorded */ 660 msleep(1); 661 662 /* start the tracing */ 663 ftrace_enabled = 1; 664 665 ret = tracer_init(trace, tr); 666 if (ret) { 667 warn_failed_init_tracer(trace, ret); 668 goto out; 669 } 670 671 /* Sleep for a 1/10 of a second */ 672 msleep(100); 673 /* stop the tracing. */ 674 tracing_stop(); 675 ftrace_enabled = 0; 676 677 /* check the trace buffer */ 678 ret = trace_test_buffer(tr, &count); 679 trace->reset(tr); 680 tracing_start(); 681 682 if (!ret && !count) { 683 printk(KERN_CONT ".. no entries found .."); 684 ret = -1; 685 goto out; 686 } 687 688 ret = trace_selftest_startup_dynamic_tracing(trace, tr, 689 DYN_FTRACE_TEST_NAME); 690 if (ret) 691 goto out; 692 693 ret = trace_selftest_function_recursion(); 694 if (ret) 695 goto out; 696 697 ret = trace_selftest_function_regs(); 698 out: 699 ftrace_enabled = save_ftrace_enabled; 700 701 /* kill ftrace totally if we failed */ 702 if (ret) 703 ftrace_kill(); 704 705 return ret; 706 } 707 #endif /* CONFIG_FUNCTION_TRACER */ 708 709 710 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 711 712 /* Maximum number of functions to trace before diagnosing a hang */ 713 #define GRAPH_MAX_FUNC_TEST 100000000 714 715 static void 716 __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode); 717 static unsigned int graph_hang_thresh; 718 719 /* Wrap the real function entry probe to avoid possible hanging */ 720 static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace) 721 { 722 /* This is harmlessly racy, we want to approximately detect a hang */ 723 if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) { 724 ftrace_graph_stop(); 725 printk(KERN_WARNING "BUG: Function graph tracer hang!\n"); 726 if (ftrace_dump_on_oops) 727 __ftrace_dump(false, DUMP_ALL); 728 return 0; 729 } 730 731 return trace_graph_entry(trace); 732 } 733 734 /* 735 * Pretty much the same than for the function tracer from which the selftest 736 * has been borrowed. 737 */ 738 int 739 trace_selftest_startup_function_graph(struct tracer *trace, 740 struct trace_array *tr) 741 { 742 int ret; 743 unsigned long count; 744 745 /* 746 * Simulate the init() callback but we attach a watchdog callback 747 * to detect and recover from possible hangs 748 */ 749 tracing_reset_online_cpus(tr); 750 set_graph_array(tr); 751 ret = register_ftrace_graph(&trace_graph_return, 752 &trace_graph_entry_watchdog); 753 if (ret) { 754 warn_failed_init_tracer(trace, ret); 755 goto out; 756 } 757 tracing_start_cmdline_record(); 758 759 /* Sleep for a 1/10 of a second */ 760 msleep(100); 761 762 /* Have we just recovered from a hang? */ 763 if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) { 764 tracing_selftest_disabled = true; 765 ret = -1; 766 goto out; 767 } 768 769 tracing_stop(); 770 771 /* check the trace buffer */ 772 ret = trace_test_buffer(tr, &count); 773 774 trace->reset(tr); 775 tracing_start(); 776 777 if (!ret && !count) { 778 printk(KERN_CONT ".. no entries found .."); 779 ret = -1; 780 goto out; 781 } 782 783 /* Don't test dynamic tracing, the function tracer already did */ 784 785 out: 786 /* Stop it if we failed */ 787 if (ret) 788 ftrace_graph_stop(); 789 790 return ret; 791 } 792 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 793 794 795 #ifdef CONFIG_IRQSOFF_TRACER 796 int 797 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) 798 { 799 unsigned long save_max = tracing_max_latency; 800 unsigned long count; 801 int ret; 802 803 /* start the tracing */ 804 ret = tracer_init(trace, tr); 805 if (ret) { 806 warn_failed_init_tracer(trace, ret); 807 return ret; 808 } 809 810 /* reset the max latency */ 811 tracing_max_latency = 0; 812 /* disable interrupts for a bit */ 813 local_irq_disable(); 814 udelay(100); 815 local_irq_enable(); 816 817 /* 818 * Stop the tracer to avoid a warning subsequent 819 * to buffer flipping failure because tracing_stop() 820 * disables the tr and max buffers, making flipping impossible 821 * in case of parallels max irqs off latencies. 822 */ 823 trace->stop(tr); 824 /* stop the tracing. */ 825 tracing_stop(); 826 /* check both trace buffers */ 827 ret = trace_test_buffer(tr, NULL); 828 if (!ret) 829 ret = trace_test_buffer(&max_tr, &count); 830 trace->reset(tr); 831 tracing_start(); 832 833 if (!ret && !count) { 834 printk(KERN_CONT ".. no entries found .."); 835 ret = -1; 836 } 837 838 tracing_max_latency = save_max; 839 840 return ret; 841 } 842 #endif /* CONFIG_IRQSOFF_TRACER */ 843 844 #ifdef CONFIG_PREEMPT_TRACER 845 int 846 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) 847 { 848 unsigned long save_max = tracing_max_latency; 849 unsigned long count; 850 int ret; 851 852 /* 853 * Now that the big kernel lock is no longer preemptable, 854 * and this is called with the BKL held, it will always 855 * fail. If preemption is already disabled, simply 856 * pass the test. When the BKL is removed, or becomes 857 * preemptible again, we will once again test this, 858 * so keep it in. 859 */ 860 if (preempt_count()) { 861 printk(KERN_CONT "can not test ... force "); 862 return 0; 863 } 864 865 /* start the tracing */ 866 ret = tracer_init(trace, tr); 867 if (ret) { 868 warn_failed_init_tracer(trace, ret); 869 return ret; 870 } 871 872 /* reset the max latency */ 873 tracing_max_latency = 0; 874 /* disable preemption for a bit */ 875 preempt_disable(); 876 udelay(100); 877 preempt_enable(); 878 879 /* 880 * Stop the tracer to avoid a warning subsequent 881 * to buffer flipping failure because tracing_stop() 882 * disables the tr and max buffers, making flipping impossible 883 * in case of parallels max preempt off latencies. 884 */ 885 trace->stop(tr); 886 /* stop the tracing. */ 887 tracing_stop(); 888 /* check both trace buffers */ 889 ret = trace_test_buffer(tr, NULL); 890 if (!ret) 891 ret = trace_test_buffer(&max_tr, &count); 892 trace->reset(tr); 893 tracing_start(); 894 895 if (!ret && !count) { 896 printk(KERN_CONT ".. no entries found .."); 897 ret = -1; 898 } 899 900 tracing_max_latency = save_max; 901 902 return ret; 903 } 904 #endif /* CONFIG_PREEMPT_TRACER */ 905 906 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER) 907 int 908 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr) 909 { 910 unsigned long save_max = tracing_max_latency; 911 unsigned long count; 912 int ret; 913 914 /* 915 * Now that the big kernel lock is no longer preemptable, 916 * and this is called with the BKL held, it will always 917 * fail. If preemption is already disabled, simply 918 * pass the test. When the BKL is removed, or becomes 919 * preemptible again, we will once again test this, 920 * so keep it in. 921 */ 922 if (preempt_count()) { 923 printk(KERN_CONT "can not test ... force "); 924 return 0; 925 } 926 927 /* start the tracing */ 928 ret = tracer_init(trace, tr); 929 if (ret) { 930 warn_failed_init_tracer(trace, ret); 931 goto out_no_start; 932 } 933 934 /* reset the max latency */ 935 tracing_max_latency = 0; 936 937 /* disable preemption and interrupts for a bit */ 938 preempt_disable(); 939 local_irq_disable(); 940 udelay(100); 941 preempt_enable(); 942 /* reverse the order of preempt vs irqs */ 943 local_irq_enable(); 944 945 /* 946 * Stop the tracer to avoid a warning subsequent 947 * to buffer flipping failure because tracing_stop() 948 * disables the tr and max buffers, making flipping impossible 949 * in case of parallels max irqs/preempt off latencies. 950 */ 951 trace->stop(tr); 952 /* stop the tracing. */ 953 tracing_stop(); 954 /* check both trace buffers */ 955 ret = trace_test_buffer(tr, NULL); 956 if (ret) 957 goto out; 958 959 ret = trace_test_buffer(&max_tr, &count); 960 if (ret) 961 goto out; 962 963 if (!ret && !count) { 964 printk(KERN_CONT ".. no entries found .."); 965 ret = -1; 966 goto out; 967 } 968 969 /* do the test by disabling interrupts first this time */ 970 tracing_max_latency = 0; 971 tracing_start(); 972 trace->start(tr); 973 974 preempt_disable(); 975 local_irq_disable(); 976 udelay(100); 977 preempt_enable(); 978 /* reverse the order of preempt vs irqs */ 979 local_irq_enable(); 980 981 trace->stop(tr); 982 /* stop the tracing. */ 983 tracing_stop(); 984 /* check both trace buffers */ 985 ret = trace_test_buffer(tr, NULL); 986 if (ret) 987 goto out; 988 989 ret = trace_test_buffer(&max_tr, &count); 990 991 if (!ret && !count) { 992 printk(KERN_CONT ".. no entries found .."); 993 ret = -1; 994 goto out; 995 } 996 997 out: 998 tracing_start(); 999 out_no_start: 1000 trace->reset(tr); 1001 tracing_max_latency = save_max; 1002 1003 return ret; 1004 } 1005 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */ 1006 1007 #ifdef CONFIG_NOP_TRACER 1008 int 1009 trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr) 1010 { 1011 /* What could possibly go wrong? */ 1012 return 0; 1013 } 1014 #endif 1015 1016 #ifdef CONFIG_SCHED_TRACER 1017 static int trace_wakeup_test_thread(void *data) 1018 { 1019 /* Make this a RT thread, doesn't need to be too high */ 1020 static const struct sched_param param = { .sched_priority = 5 }; 1021 struct completion *x = data; 1022 1023 sched_setscheduler(current, SCHED_FIFO, ¶m); 1024 1025 /* Make it know we have a new prio */ 1026 complete(x); 1027 1028 /* now go to sleep and let the test wake us up */ 1029 set_current_state(TASK_INTERRUPTIBLE); 1030 schedule(); 1031 1032 complete(x); 1033 1034 /* we are awake, now wait to disappear */ 1035 while (!kthread_should_stop()) { 1036 /* 1037 * This is an RT task, do short sleeps to let 1038 * others run. 1039 */ 1040 msleep(100); 1041 } 1042 1043 return 0; 1044 } 1045 1046 int 1047 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) 1048 { 1049 unsigned long save_max = tracing_max_latency; 1050 struct task_struct *p; 1051 struct completion isrt; 1052 unsigned long count; 1053 int ret; 1054 1055 init_completion(&isrt); 1056 1057 /* create a high prio thread */ 1058 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test"); 1059 if (IS_ERR(p)) { 1060 printk(KERN_CONT "Failed to create ftrace wakeup test thread "); 1061 return -1; 1062 } 1063 1064 /* make sure the thread is running at an RT prio */ 1065 wait_for_completion(&isrt); 1066 1067 /* start the tracing */ 1068 ret = tracer_init(trace, tr); 1069 if (ret) { 1070 warn_failed_init_tracer(trace, ret); 1071 return ret; 1072 } 1073 1074 /* reset the max latency */ 1075 tracing_max_latency = 0; 1076 1077 while (p->on_rq) { 1078 /* 1079 * Sleep to make sure the RT thread is asleep too. 1080 * On virtual machines we can't rely on timings, 1081 * but we want to make sure this test still works. 1082 */ 1083 msleep(100); 1084 } 1085 1086 init_completion(&isrt); 1087 1088 wake_up_process(p); 1089 1090 /* Wait for the task to wake up */ 1091 wait_for_completion(&isrt); 1092 1093 /* stop the tracing. */ 1094 tracing_stop(); 1095 /* check both trace buffers */ 1096 ret = trace_test_buffer(tr, NULL); 1097 printk("ret = %d\n", ret); 1098 if (!ret) 1099 ret = trace_test_buffer(&max_tr, &count); 1100 1101 1102 trace->reset(tr); 1103 tracing_start(); 1104 1105 tracing_max_latency = save_max; 1106 1107 /* kill the thread */ 1108 kthread_stop(p); 1109 1110 if (!ret && !count) { 1111 printk(KERN_CONT ".. no entries found .."); 1112 ret = -1; 1113 } 1114 1115 return ret; 1116 } 1117 #endif /* CONFIG_SCHED_TRACER */ 1118 1119 #ifdef CONFIG_CONTEXT_SWITCH_TRACER 1120 int 1121 trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr) 1122 { 1123 unsigned long count; 1124 int ret; 1125 1126 /* start the tracing */ 1127 ret = tracer_init(trace, tr); 1128 if (ret) { 1129 warn_failed_init_tracer(trace, ret); 1130 return ret; 1131 } 1132 1133 /* Sleep for a 1/10 of a second */ 1134 msleep(100); 1135 /* stop the tracing. */ 1136 tracing_stop(); 1137 /* check the trace buffer */ 1138 ret = trace_test_buffer(tr, &count); 1139 trace->reset(tr); 1140 tracing_start(); 1141 1142 if (!ret && !count) { 1143 printk(KERN_CONT ".. no entries found .."); 1144 ret = -1; 1145 } 1146 1147 return ret; 1148 } 1149 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ 1150 1151 #ifdef CONFIG_BRANCH_TRACER 1152 int 1153 trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) 1154 { 1155 unsigned long count; 1156 int ret; 1157 1158 /* start the tracing */ 1159 ret = tracer_init(trace, tr); 1160 if (ret) { 1161 warn_failed_init_tracer(trace, ret); 1162 return ret; 1163 } 1164 1165 /* Sleep for a 1/10 of a second */ 1166 msleep(100); 1167 /* stop the tracing. */ 1168 tracing_stop(); 1169 /* check the trace buffer */ 1170 ret = trace_test_buffer(tr, &count); 1171 trace->reset(tr); 1172 tracing_start(); 1173 1174 if (!ret && !count) { 1175 printk(KERN_CONT ".. no entries found .."); 1176 ret = -1; 1177 } 1178 1179 return ret; 1180 } 1181 #endif /* CONFIG_BRANCH_TRACER */ 1182 1183