1 // SPDX-License-Identifier: GPL-2.0 2 /* Include in trace.c */ 3 4 #include <uapi/linux/sched/types.h> 5 #include <linux/stringify.h> 6 #include <linux/kthread.h> 7 #include <linux/delay.h> 8 #include <linux/slab.h> 9 10 static inline int trace_valid_entry(struct trace_entry *entry) 11 { 12 switch (entry->type) { 13 case TRACE_FN: 14 case TRACE_CTX: 15 case TRACE_WAKE: 16 case TRACE_STACK: 17 case TRACE_PRINT: 18 case TRACE_BRANCH: 19 case TRACE_GRAPH_ENT: 20 case TRACE_GRAPH_RET: 21 return 1; 22 } 23 return 0; 24 } 25 26 static int trace_test_buffer_cpu(struct array_buffer *buf, int cpu) 27 { 28 struct ring_buffer_event *event; 29 struct trace_entry *entry; 30 unsigned int loops = 0; 31 32 while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) { 33 entry = ring_buffer_event_data(event); 34 35 /* 36 * The ring buffer is a size of trace_buf_size, if 37 * we loop more than the size, there's something wrong 38 * with the ring buffer. 39 */ 40 if (loops++ > trace_buf_size) { 41 printk(KERN_CONT ".. bad ring buffer "); 42 goto failed; 43 } 44 if (!trace_valid_entry(entry)) { 45 printk(KERN_CONT ".. invalid entry %d ", 46 entry->type); 47 goto failed; 48 } 49 } 50 return 0; 51 52 failed: 53 /* disable tracing */ 54 tracing_disabled = 1; 55 printk(KERN_CONT ".. corrupted trace buffer .. "); 56 return -1; 57 } 58 59 /* 60 * Test the trace buffer to see if all the elements 61 * are still sane. 62 */ 63 static int __maybe_unused trace_test_buffer(struct array_buffer *buf, unsigned long *count) 64 { 65 unsigned long flags, cnt = 0; 66 int cpu, ret = 0; 67 68 /* Don't allow flipping of max traces now */ 69 local_irq_save(flags); 70 arch_spin_lock(&buf->tr->max_lock); 71 72 cnt = ring_buffer_entries(buf->buffer); 73 74 /* 75 * The trace_test_buffer_cpu runs a while loop to consume all data. 76 * If the calling tracer is broken, and is constantly filling 77 * the buffer, this will run forever, and hard lock the box. 78 * We disable the ring buffer while we do this test to prevent 79 * a hard lock up. 80 */ 81 tracing_off(); 82 for_each_possible_cpu(cpu) { 83 ret = trace_test_buffer_cpu(buf, cpu); 84 if (ret) 85 break; 86 } 87 tracing_on(); 88 arch_spin_unlock(&buf->tr->max_lock); 89 local_irq_restore(flags); 90 91 if (count) 92 *count = cnt; 93 94 return ret; 95 } 96 97 static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret) 98 { 99 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n", 100 trace->name, init_ret); 101 } 102 #ifdef CONFIG_FUNCTION_TRACER 103 104 #ifdef CONFIG_DYNAMIC_FTRACE 105 106 static int trace_selftest_test_probe1_cnt; 107 static void trace_selftest_test_probe1_func(unsigned long ip, 108 unsigned long pip, 109 struct ftrace_ops *op, 110 struct pt_regs *pt_regs) 111 { 112 trace_selftest_test_probe1_cnt++; 113 } 114 115 static int trace_selftest_test_probe2_cnt; 116 static void trace_selftest_test_probe2_func(unsigned long ip, 117 unsigned long pip, 118 struct ftrace_ops *op, 119 struct pt_regs *pt_regs) 120 { 121 trace_selftest_test_probe2_cnt++; 122 } 123 124 static int trace_selftest_test_probe3_cnt; 125 static void trace_selftest_test_probe3_func(unsigned long ip, 126 unsigned long pip, 127 struct ftrace_ops *op, 128 struct pt_regs *pt_regs) 129 { 130 trace_selftest_test_probe3_cnt++; 131 } 132 133 static int trace_selftest_test_global_cnt; 134 static void trace_selftest_test_global_func(unsigned long ip, 135 unsigned long pip, 136 struct ftrace_ops *op, 137 struct pt_regs *pt_regs) 138 { 139 trace_selftest_test_global_cnt++; 140 } 141 142 static int trace_selftest_test_dyn_cnt; 143 static void trace_selftest_test_dyn_func(unsigned long ip, 144 unsigned long pip, 145 struct ftrace_ops *op, 146 struct pt_regs *pt_regs) 147 { 148 trace_selftest_test_dyn_cnt++; 149 } 150 151 static struct ftrace_ops test_probe1 = { 152 .func = trace_selftest_test_probe1_func, 153 .flags = FTRACE_OPS_FL_RECURSION_SAFE, 154 }; 155 156 static struct ftrace_ops test_probe2 = { 157 .func = trace_selftest_test_probe2_func, 158 .flags = FTRACE_OPS_FL_RECURSION_SAFE, 159 }; 160 161 static struct ftrace_ops test_probe3 = { 162 .func = trace_selftest_test_probe3_func, 163 .flags = FTRACE_OPS_FL_RECURSION_SAFE, 164 }; 165 166 static void print_counts(void) 167 { 168 printk("(%d %d %d %d %d) ", 169 trace_selftest_test_probe1_cnt, 170 trace_selftest_test_probe2_cnt, 171 trace_selftest_test_probe3_cnt, 172 trace_selftest_test_global_cnt, 173 trace_selftest_test_dyn_cnt); 174 } 175 176 static void reset_counts(void) 177 { 178 trace_selftest_test_probe1_cnt = 0; 179 trace_selftest_test_probe2_cnt = 0; 180 trace_selftest_test_probe3_cnt = 0; 181 trace_selftest_test_global_cnt = 0; 182 trace_selftest_test_dyn_cnt = 0; 183 } 184 185 static int trace_selftest_ops(struct trace_array *tr, int cnt) 186 { 187 int save_ftrace_enabled = ftrace_enabled; 188 struct ftrace_ops *dyn_ops; 189 char *func1_name; 190 char *func2_name; 191 int len1; 192 int len2; 193 int ret = -1; 194 195 printk(KERN_CONT "PASSED\n"); 196 pr_info("Testing dynamic ftrace ops #%d: ", cnt); 197 198 ftrace_enabled = 1; 199 reset_counts(); 200 201 /* Handle PPC64 '.' name */ 202 func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME); 203 func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2); 204 len1 = strlen(func1_name); 205 len2 = strlen(func2_name); 206 207 /* 208 * Probe 1 will trace function 1. 209 * Probe 2 will trace function 2. 210 * Probe 3 will trace functions 1 and 2. 211 */ 212 ftrace_set_filter(&test_probe1, func1_name, len1, 1); 213 ftrace_set_filter(&test_probe2, func2_name, len2, 1); 214 ftrace_set_filter(&test_probe3, func1_name, len1, 1); 215 ftrace_set_filter(&test_probe3, func2_name, len2, 0); 216 217 register_ftrace_function(&test_probe1); 218 register_ftrace_function(&test_probe2); 219 register_ftrace_function(&test_probe3); 220 /* First time we are running with main function */ 221 if (cnt > 1) { 222 ftrace_init_array_ops(tr, trace_selftest_test_global_func); 223 register_ftrace_function(tr->ops); 224 } 225 226 DYN_FTRACE_TEST_NAME(); 227 228 print_counts(); 229 230 if (trace_selftest_test_probe1_cnt != 1) 231 goto out; 232 if (trace_selftest_test_probe2_cnt != 0) 233 goto out; 234 if (trace_selftest_test_probe3_cnt != 1) 235 goto out; 236 if (cnt > 1) { 237 if (trace_selftest_test_global_cnt == 0) 238 goto out; 239 } 240 241 DYN_FTRACE_TEST_NAME2(); 242 243 print_counts(); 244 245 if (trace_selftest_test_probe1_cnt != 1) 246 goto out; 247 if (trace_selftest_test_probe2_cnt != 1) 248 goto out; 249 if (trace_selftest_test_probe3_cnt != 2) 250 goto out; 251 252 /* Add a dynamic probe */ 253 dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL); 254 if (!dyn_ops) { 255 printk("MEMORY ERROR "); 256 goto out; 257 } 258 259 dyn_ops->func = trace_selftest_test_dyn_func; 260 261 register_ftrace_function(dyn_ops); 262 263 trace_selftest_test_global_cnt = 0; 264 265 DYN_FTRACE_TEST_NAME(); 266 267 print_counts(); 268 269 if (trace_selftest_test_probe1_cnt != 2) 270 goto out_free; 271 if (trace_selftest_test_probe2_cnt != 1) 272 goto out_free; 273 if (trace_selftest_test_probe3_cnt != 3) 274 goto out_free; 275 if (cnt > 1) { 276 if (trace_selftest_test_global_cnt == 0) 277 goto out_free; 278 } 279 if (trace_selftest_test_dyn_cnt == 0) 280 goto out_free; 281 282 DYN_FTRACE_TEST_NAME2(); 283 284 print_counts(); 285 286 if (trace_selftest_test_probe1_cnt != 2) 287 goto out_free; 288 if (trace_selftest_test_probe2_cnt != 2) 289 goto out_free; 290 if (trace_selftest_test_probe3_cnt != 4) 291 goto out_free; 292 293 ret = 0; 294 out_free: 295 unregister_ftrace_function(dyn_ops); 296 kfree(dyn_ops); 297 298 out: 299 /* Purposely unregister in the same order */ 300 unregister_ftrace_function(&test_probe1); 301 unregister_ftrace_function(&test_probe2); 302 unregister_ftrace_function(&test_probe3); 303 if (cnt > 1) 304 unregister_ftrace_function(tr->ops); 305 ftrace_reset_array_ops(tr); 306 307 /* Make sure everything is off */ 308 reset_counts(); 309 DYN_FTRACE_TEST_NAME(); 310 DYN_FTRACE_TEST_NAME(); 311 312 if (trace_selftest_test_probe1_cnt || 313 trace_selftest_test_probe2_cnt || 314 trace_selftest_test_probe3_cnt || 315 trace_selftest_test_global_cnt || 316 trace_selftest_test_dyn_cnt) 317 ret = -1; 318 319 ftrace_enabled = save_ftrace_enabled; 320 321 return ret; 322 } 323 324 /* Test dynamic code modification and ftrace filters */ 325 static int trace_selftest_startup_dynamic_tracing(struct tracer *trace, 326 struct trace_array *tr, 327 int (*func)(void)) 328 { 329 int save_ftrace_enabled = ftrace_enabled; 330 unsigned long count; 331 char *func_name; 332 int ret; 333 334 /* The ftrace test PASSED */ 335 printk(KERN_CONT "PASSED\n"); 336 pr_info("Testing dynamic ftrace: "); 337 338 /* enable tracing, and record the filter function */ 339 ftrace_enabled = 1; 340 341 /* passed in by parameter to fool gcc from optimizing */ 342 func(); 343 344 /* 345 * Some archs *cough*PowerPC*cough* add characters to the 346 * start of the function names. We simply put a '*' to 347 * accommodate them. 348 */ 349 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); 350 351 /* filter only on our function */ 352 ftrace_set_global_filter(func_name, strlen(func_name), 1); 353 354 /* enable tracing */ 355 ret = tracer_init(trace, tr); 356 if (ret) { 357 warn_failed_init_tracer(trace, ret); 358 goto out; 359 } 360 361 /* Sleep for a 1/10 of a second */ 362 msleep(100); 363 364 /* we should have nothing in the buffer */ 365 ret = trace_test_buffer(&tr->array_buffer, &count); 366 if (ret) 367 goto out; 368 369 if (count) { 370 ret = -1; 371 printk(KERN_CONT ".. filter did not filter .. "); 372 goto out; 373 } 374 375 /* call our function again */ 376 func(); 377 378 /* sleep again */ 379 msleep(100); 380 381 /* stop the tracing. */ 382 tracing_stop(); 383 ftrace_enabled = 0; 384 385 /* check the trace buffer */ 386 ret = trace_test_buffer(&tr->array_buffer, &count); 387 388 ftrace_enabled = 1; 389 tracing_start(); 390 391 /* we should only have one item */ 392 if (!ret && count != 1) { 393 trace->reset(tr); 394 printk(KERN_CONT ".. filter failed count=%ld ..", count); 395 ret = -1; 396 goto out; 397 } 398 399 /* Test the ops with global tracing running */ 400 ret = trace_selftest_ops(tr, 1); 401 trace->reset(tr); 402 403 out: 404 ftrace_enabled = save_ftrace_enabled; 405 406 /* Enable tracing on all functions again */ 407 ftrace_set_global_filter(NULL, 0, 1); 408 409 /* Test the ops with global tracing off */ 410 if (!ret) 411 ret = trace_selftest_ops(tr, 2); 412 413 return ret; 414 } 415 416 static int trace_selftest_recursion_cnt; 417 static void trace_selftest_test_recursion_func(unsigned long ip, 418 unsigned long pip, 419 struct ftrace_ops *op, 420 struct pt_regs *pt_regs) 421 { 422 /* 423 * This function is registered without the recursion safe flag. 424 * The ftrace infrastructure should provide the recursion 425 * protection. If not, this will crash the kernel! 426 */ 427 if (trace_selftest_recursion_cnt++ > 10) 428 return; 429 DYN_FTRACE_TEST_NAME(); 430 } 431 432 static void trace_selftest_test_recursion_safe_func(unsigned long ip, 433 unsigned long pip, 434 struct ftrace_ops *op, 435 struct pt_regs *pt_regs) 436 { 437 /* 438 * We said we would provide our own recursion. By calling 439 * this function again, we should recurse back into this function 440 * and count again. But this only happens if the arch supports 441 * all of ftrace features and nothing else is using the function 442 * tracing utility. 443 */ 444 if (trace_selftest_recursion_cnt++) 445 return; 446 DYN_FTRACE_TEST_NAME(); 447 } 448 449 static struct ftrace_ops test_rec_probe = { 450 .func = trace_selftest_test_recursion_func, 451 }; 452 453 static struct ftrace_ops test_recsafe_probe = { 454 .func = trace_selftest_test_recursion_safe_func, 455 .flags = FTRACE_OPS_FL_RECURSION_SAFE, 456 }; 457 458 static int 459 trace_selftest_function_recursion(void) 460 { 461 int save_ftrace_enabled = ftrace_enabled; 462 char *func_name; 463 int len; 464 int ret; 465 466 /* The previous test PASSED */ 467 pr_cont("PASSED\n"); 468 pr_info("Testing ftrace recursion: "); 469 470 471 /* enable tracing, and record the filter function */ 472 ftrace_enabled = 1; 473 474 /* Handle PPC64 '.' name */ 475 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); 476 len = strlen(func_name); 477 478 ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1); 479 if (ret) { 480 pr_cont("*Could not set filter* "); 481 goto out; 482 } 483 484 ret = register_ftrace_function(&test_rec_probe); 485 if (ret) { 486 pr_cont("*could not register callback* "); 487 goto out; 488 } 489 490 DYN_FTRACE_TEST_NAME(); 491 492 unregister_ftrace_function(&test_rec_probe); 493 494 ret = -1; 495 /* 496 * Recursion allows for transitions between context, 497 * and may call the callback twice. 498 */ 499 if (trace_selftest_recursion_cnt != 1 && 500 trace_selftest_recursion_cnt != 2) { 501 pr_cont("*callback not called once (or twice) (%d)* ", 502 trace_selftest_recursion_cnt); 503 goto out; 504 } 505 506 trace_selftest_recursion_cnt = 1; 507 508 pr_cont("PASSED\n"); 509 pr_info("Testing ftrace recursion safe: "); 510 511 ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1); 512 if (ret) { 513 pr_cont("*Could not set filter* "); 514 goto out; 515 } 516 517 ret = register_ftrace_function(&test_recsafe_probe); 518 if (ret) { 519 pr_cont("*could not register callback* "); 520 goto out; 521 } 522 523 DYN_FTRACE_TEST_NAME(); 524 525 unregister_ftrace_function(&test_recsafe_probe); 526 527 ret = -1; 528 if (trace_selftest_recursion_cnt != 2) { 529 pr_cont("*callback not called expected 2 times (%d)* ", 530 trace_selftest_recursion_cnt); 531 goto out; 532 } 533 534 ret = 0; 535 out: 536 ftrace_enabled = save_ftrace_enabled; 537 538 return ret; 539 } 540 #else 541 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; }) 542 # define trace_selftest_function_recursion() ({ 0; }) 543 #endif /* CONFIG_DYNAMIC_FTRACE */ 544 545 static enum { 546 TRACE_SELFTEST_REGS_START, 547 TRACE_SELFTEST_REGS_FOUND, 548 TRACE_SELFTEST_REGS_NOT_FOUND, 549 } trace_selftest_regs_stat; 550 551 static void trace_selftest_test_regs_func(unsigned long ip, 552 unsigned long pip, 553 struct ftrace_ops *op, 554 struct pt_regs *pt_regs) 555 { 556 if (pt_regs) 557 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND; 558 else 559 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND; 560 } 561 562 static struct ftrace_ops test_regs_probe = { 563 .func = trace_selftest_test_regs_func, 564 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_SAVE_REGS, 565 }; 566 567 static int 568 trace_selftest_function_regs(void) 569 { 570 int save_ftrace_enabled = ftrace_enabled; 571 char *func_name; 572 int len; 573 int ret; 574 int supported = 0; 575 576 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 577 supported = 1; 578 #endif 579 580 /* The previous test PASSED */ 581 pr_cont("PASSED\n"); 582 pr_info("Testing ftrace regs%s: ", 583 !supported ? "(no arch support)" : ""); 584 585 /* enable tracing, and record the filter function */ 586 ftrace_enabled = 1; 587 588 /* Handle PPC64 '.' name */ 589 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); 590 len = strlen(func_name); 591 592 ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1); 593 /* 594 * If DYNAMIC_FTRACE is not set, then we just trace all functions. 595 * This test really doesn't care. 596 */ 597 if (ret && ret != -ENODEV) { 598 pr_cont("*Could not set filter* "); 599 goto out; 600 } 601 602 ret = register_ftrace_function(&test_regs_probe); 603 /* 604 * Now if the arch does not support passing regs, then this should 605 * have failed. 606 */ 607 if (!supported) { 608 if (!ret) { 609 pr_cont("*registered save-regs without arch support* "); 610 goto out; 611 } 612 test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED; 613 ret = register_ftrace_function(&test_regs_probe); 614 } 615 if (ret) { 616 pr_cont("*could not register callback* "); 617 goto out; 618 } 619 620 621 DYN_FTRACE_TEST_NAME(); 622 623 unregister_ftrace_function(&test_regs_probe); 624 625 ret = -1; 626 627 switch (trace_selftest_regs_stat) { 628 case TRACE_SELFTEST_REGS_START: 629 pr_cont("*callback never called* "); 630 goto out; 631 632 case TRACE_SELFTEST_REGS_FOUND: 633 if (supported) 634 break; 635 pr_cont("*callback received regs without arch support* "); 636 goto out; 637 638 case TRACE_SELFTEST_REGS_NOT_FOUND: 639 if (!supported) 640 break; 641 pr_cont("*callback received NULL regs* "); 642 goto out; 643 } 644 645 ret = 0; 646 out: 647 ftrace_enabled = save_ftrace_enabled; 648 649 return ret; 650 } 651 652 /* 653 * Simple verification test of ftrace function tracer. 654 * Enable ftrace, sleep 1/10 second, and then read the trace 655 * buffer to see if all is in order. 656 */ 657 __init int 658 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) 659 { 660 int save_ftrace_enabled = ftrace_enabled; 661 unsigned long count; 662 int ret; 663 664 #ifdef CONFIG_DYNAMIC_FTRACE 665 if (ftrace_filter_param) { 666 printk(KERN_CONT " ... kernel command line filter set: force PASS ... "); 667 return 0; 668 } 669 #endif 670 671 /* make sure msleep has been recorded */ 672 msleep(1); 673 674 /* start the tracing */ 675 ftrace_enabled = 1; 676 677 ret = tracer_init(trace, tr); 678 if (ret) { 679 warn_failed_init_tracer(trace, ret); 680 goto out; 681 } 682 683 /* Sleep for a 1/10 of a second */ 684 msleep(100); 685 /* stop the tracing. */ 686 tracing_stop(); 687 ftrace_enabled = 0; 688 689 /* check the trace buffer */ 690 ret = trace_test_buffer(&tr->array_buffer, &count); 691 692 ftrace_enabled = 1; 693 trace->reset(tr); 694 tracing_start(); 695 696 if (!ret && !count) { 697 printk(KERN_CONT ".. no entries found .."); 698 ret = -1; 699 goto out; 700 } 701 702 ret = trace_selftest_startup_dynamic_tracing(trace, tr, 703 DYN_FTRACE_TEST_NAME); 704 if (ret) 705 goto out; 706 707 ret = trace_selftest_function_recursion(); 708 if (ret) 709 goto out; 710 711 ret = trace_selftest_function_regs(); 712 out: 713 ftrace_enabled = save_ftrace_enabled; 714 715 /* kill ftrace totally if we failed */ 716 if (ret) 717 ftrace_kill(); 718 719 return ret; 720 } 721 #endif /* CONFIG_FUNCTION_TRACER */ 722 723 724 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 725 726 /* Maximum number of functions to trace before diagnosing a hang */ 727 #define GRAPH_MAX_FUNC_TEST 100000000 728 729 static unsigned int graph_hang_thresh; 730 731 /* Wrap the real function entry probe to avoid possible hanging */ 732 static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace) 733 { 734 /* This is harmlessly racy, we want to approximately detect a hang */ 735 if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) { 736 ftrace_graph_stop(); 737 printk(KERN_WARNING "BUG: Function graph tracer hang!\n"); 738 if (ftrace_dump_on_oops) { 739 ftrace_dump(DUMP_ALL); 740 /* ftrace_dump() disables tracing */ 741 tracing_on(); 742 } 743 return 0; 744 } 745 746 return trace_graph_entry(trace); 747 } 748 749 static struct fgraph_ops fgraph_ops __initdata = { 750 .entryfunc = &trace_graph_entry_watchdog, 751 .retfunc = &trace_graph_return, 752 }; 753 754 /* 755 * Pretty much the same than for the function tracer from which the selftest 756 * has been borrowed. 757 */ 758 __init int 759 trace_selftest_startup_function_graph(struct tracer *trace, 760 struct trace_array *tr) 761 { 762 int ret; 763 unsigned long count; 764 765 #ifdef CONFIG_DYNAMIC_FTRACE 766 if (ftrace_filter_param) { 767 printk(KERN_CONT " ... kernel command line filter set: force PASS ... "); 768 return 0; 769 } 770 #endif 771 772 /* 773 * Simulate the init() callback but we attach a watchdog callback 774 * to detect and recover from possible hangs 775 */ 776 tracing_reset_online_cpus(&tr->array_buffer); 777 set_graph_array(tr); 778 ret = register_ftrace_graph(&fgraph_ops); 779 if (ret) { 780 warn_failed_init_tracer(trace, ret); 781 goto out; 782 } 783 tracing_start_cmdline_record(); 784 785 /* Sleep for a 1/10 of a second */ 786 msleep(100); 787 788 /* Have we just recovered from a hang? */ 789 if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) { 790 tracing_selftest_disabled = true; 791 ret = -1; 792 goto out; 793 } 794 795 tracing_stop(); 796 797 /* check the trace buffer */ 798 ret = trace_test_buffer(&tr->array_buffer, &count); 799 800 /* Need to also simulate the tr->reset to remove this fgraph_ops */ 801 tracing_stop_cmdline_record(); 802 unregister_ftrace_graph(&fgraph_ops); 803 804 tracing_start(); 805 806 if (!ret && !count) { 807 printk(KERN_CONT ".. no entries found .."); 808 ret = -1; 809 goto out; 810 } 811 812 /* Don't test dynamic tracing, the function tracer already did */ 813 814 out: 815 /* Stop it if we failed */ 816 if (ret) 817 ftrace_graph_stop(); 818 819 return ret; 820 } 821 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 822 823 824 #ifdef CONFIG_IRQSOFF_TRACER 825 int 826 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) 827 { 828 unsigned long save_max = tr->max_latency; 829 unsigned long count; 830 int ret; 831 832 /* start the tracing */ 833 ret = tracer_init(trace, tr); 834 if (ret) { 835 warn_failed_init_tracer(trace, ret); 836 return ret; 837 } 838 839 /* reset the max latency */ 840 tr->max_latency = 0; 841 /* disable interrupts for a bit */ 842 local_irq_disable(); 843 udelay(100); 844 local_irq_enable(); 845 846 /* 847 * Stop the tracer to avoid a warning subsequent 848 * to buffer flipping failure because tracing_stop() 849 * disables the tr and max buffers, making flipping impossible 850 * in case of parallels max irqs off latencies. 851 */ 852 trace->stop(tr); 853 /* stop the tracing. */ 854 tracing_stop(); 855 /* check both trace buffers */ 856 ret = trace_test_buffer(&tr->array_buffer, NULL); 857 if (!ret) 858 ret = trace_test_buffer(&tr->max_buffer, &count); 859 trace->reset(tr); 860 tracing_start(); 861 862 if (!ret && !count) { 863 printk(KERN_CONT ".. no entries found .."); 864 ret = -1; 865 } 866 867 tr->max_latency = save_max; 868 869 return ret; 870 } 871 #endif /* CONFIG_IRQSOFF_TRACER */ 872 873 #ifdef CONFIG_PREEMPT_TRACER 874 int 875 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) 876 { 877 unsigned long save_max = tr->max_latency; 878 unsigned long count; 879 int ret; 880 881 /* 882 * Now that the big kernel lock is no longer preemptable, 883 * and this is called with the BKL held, it will always 884 * fail. If preemption is already disabled, simply 885 * pass the test. When the BKL is removed, or becomes 886 * preemptible again, we will once again test this, 887 * so keep it in. 888 */ 889 if (preempt_count()) { 890 printk(KERN_CONT "can not test ... force "); 891 return 0; 892 } 893 894 /* start the tracing */ 895 ret = tracer_init(trace, tr); 896 if (ret) { 897 warn_failed_init_tracer(trace, ret); 898 return ret; 899 } 900 901 /* reset the max latency */ 902 tr->max_latency = 0; 903 /* disable preemption for a bit */ 904 preempt_disable(); 905 udelay(100); 906 preempt_enable(); 907 908 /* 909 * Stop the tracer to avoid a warning subsequent 910 * to buffer flipping failure because tracing_stop() 911 * disables the tr and max buffers, making flipping impossible 912 * in case of parallels max preempt off latencies. 913 */ 914 trace->stop(tr); 915 /* stop the tracing. */ 916 tracing_stop(); 917 /* check both trace buffers */ 918 ret = trace_test_buffer(&tr->array_buffer, NULL); 919 if (!ret) 920 ret = trace_test_buffer(&tr->max_buffer, &count); 921 trace->reset(tr); 922 tracing_start(); 923 924 if (!ret && !count) { 925 printk(KERN_CONT ".. no entries found .."); 926 ret = -1; 927 } 928 929 tr->max_latency = save_max; 930 931 return ret; 932 } 933 #endif /* CONFIG_PREEMPT_TRACER */ 934 935 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER) 936 int 937 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr) 938 { 939 unsigned long save_max = tr->max_latency; 940 unsigned long count; 941 int ret; 942 943 /* 944 * Now that the big kernel lock is no longer preemptable, 945 * and this is called with the BKL held, it will always 946 * fail. If preemption is already disabled, simply 947 * pass the test. When the BKL is removed, or becomes 948 * preemptible again, we will once again test this, 949 * so keep it in. 950 */ 951 if (preempt_count()) { 952 printk(KERN_CONT "can not test ... force "); 953 return 0; 954 } 955 956 /* start the tracing */ 957 ret = tracer_init(trace, tr); 958 if (ret) { 959 warn_failed_init_tracer(trace, ret); 960 goto out_no_start; 961 } 962 963 /* reset the max latency */ 964 tr->max_latency = 0; 965 966 /* disable preemption and interrupts for a bit */ 967 preempt_disable(); 968 local_irq_disable(); 969 udelay(100); 970 preempt_enable(); 971 /* reverse the order of preempt vs irqs */ 972 local_irq_enable(); 973 974 /* 975 * Stop the tracer to avoid a warning subsequent 976 * to buffer flipping failure because tracing_stop() 977 * disables the tr and max buffers, making flipping impossible 978 * in case of parallels max irqs/preempt off latencies. 979 */ 980 trace->stop(tr); 981 /* stop the tracing. */ 982 tracing_stop(); 983 /* check both trace buffers */ 984 ret = trace_test_buffer(&tr->array_buffer, NULL); 985 if (ret) 986 goto out; 987 988 ret = trace_test_buffer(&tr->max_buffer, &count); 989 if (ret) 990 goto out; 991 992 if (!ret && !count) { 993 printk(KERN_CONT ".. no entries found .."); 994 ret = -1; 995 goto out; 996 } 997 998 /* do the test by disabling interrupts first this time */ 999 tr->max_latency = 0; 1000 tracing_start(); 1001 trace->start(tr); 1002 1003 preempt_disable(); 1004 local_irq_disable(); 1005 udelay(100); 1006 preempt_enable(); 1007 /* reverse the order of preempt vs irqs */ 1008 local_irq_enable(); 1009 1010 trace->stop(tr); 1011 /* stop the tracing. */ 1012 tracing_stop(); 1013 /* check both trace buffers */ 1014 ret = trace_test_buffer(&tr->array_buffer, NULL); 1015 if (ret) 1016 goto out; 1017 1018 ret = trace_test_buffer(&tr->max_buffer, &count); 1019 1020 if (!ret && !count) { 1021 printk(KERN_CONT ".. no entries found .."); 1022 ret = -1; 1023 goto out; 1024 } 1025 1026 out: 1027 tracing_start(); 1028 out_no_start: 1029 trace->reset(tr); 1030 tr->max_latency = save_max; 1031 1032 return ret; 1033 } 1034 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */ 1035 1036 #ifdef CONFIG_NOP_TRACER 1037 int 1038 trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr) 1039 { 1040 /* What could possibly go wrong? */ 1041 return 0; 1042 } 1043 #endif 1044 1045 #ifdef CONFIG_SCHED_TRACER 1046 1047 struct wakeup_test_data { 1048 struct completion is_ready; 1049 int go; 1050 }; 1051 1052 static int trace_wakeup_test_thread(void *data) 1053 { 1054 /* Make this a -deadline thread */ 1055 static const struct sched_attr attr = { 1056 .sched_policy = SCHED_DEADLINE, 1057 .sched_runtime = 100000ULL, 1058 .sched_deadline = 10000000ULL, 1059 .sched_period = 10000000ULL 1060 }; 1061 struct wakeup_test_data *x = data; 1062 1063 sched_setattr(current, &attr); 1064 1065 /* Make it know we have a new prio */ 1066 complete(&x->is_ready); 1067 1068 /* now go to sleep and let the test wake us up */ 1069 set_current_state(TASK_INTERRUPTIBLE); 1070 while (!x->go) { 1071 schedule(); 1072 set_current_state(TASK_INTERRUPTIBLE); 1073 } 1074 1075 complete(&x->is_ready); 1076 1077 set_current_state(TASK_INTERRUPTIBLE); 1078 1079 /* we are awake, now wait to disappear */ 1080 while (!kthread_should_stop()) { 1081 schedule(); 1082 set_current_state(TASK_INTERRUPTIBLE); 1083 } 1084 1085 __set_current_state(TASK_RUNNING); 1086 1087 return 0; 1088 } 1089 int 1090 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) 1091 { 1092 unsigned long save_max = tr->max_latency; 1093 struct task_struct *p; 1094 struct wakeup_test_data data; 1095 unsigned long count; 1096 int ret; 1097 1098 memset(&data, 0, sizeof(data)); 1099 1100 init_completion(&data.is_ready); 1101 1102 /* create a -deadline thread */ 1103 p = kthread_run(trace_wakeup_test_thread, &data, "ftrace-test"); 1104 if (IS_ERR(p)) { 1105 printk(KERN_CONT "Failed to create ftrace wakeup test thread "); 1106 return -1; 1107 } 1108 1109 /* make sure the thread is running at -deadline policy */ 1110 wait_for_completion(&data.is_ready); 1111 1112 /* start the tracing */ 1113 ret = tracer_init(trace, tr); 1114 if (ret) { 1115 warn_failed_init_tracer(trace, ret); 1116 return ret; 1117 } 1118 1119 /* reset the max latency */ 1120 tr->max_latency = 0; 1121 1122 while (p->on_rq) { 1123 /* 1124 * Sleep to make sure the -deadline thread is asleep too. 1125 * On virtual machines we can't rely on timings, 1126 * but we want to make sure this test still works. 1127 */ 1128 msleep(100); 1129 } 1130 1131 init_completion(&data.is_ready); 1132 1133 data.go = 1; 1134 /* memory barrier is in the wake_up_process() */ 1135 1136 wake_up_process(p); 1137 1138 /* Wait for the task to wake up */ 1139 wait_for_completion(&data.is_ready); 1140 1141 /* stop the tracing. */ 1142 tracing_stop(); 1143 /* check both trace buffers */ 1144 ret = trace_test_buffer(&tr->array_buffer, NULL); 1145 if (!ret) 1146 ret = trace_test_buffer(&tr->max_buffer, &count); 1147 1148 1149 trace->reset(tr); 1150 tracing_start(); 1151 1152 tr->max_latency = save_max; 1153 1154 /* kill the thread */ 1155 kthread_stop(p); 1156 1157 if (!ret && !count) { 1158 printk(KERN_CONT ".. no entries found .."); 1159 ret = -1; 1160 } 1161 1162 return ret; 1163 } 1164 #endif /* CONFIG_SCHED_TRACER */ 1165 1166 #ifdef CONFIG_BRANCH_TRACER 1167 int 1168 trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) 1169 { 1170 unsigned long count; 1171 int ret; 1172 1173 /* start the tracing */ 1174 ret = tracer_init(trace, tr); 1175 if (ret) { 1176 warn_failed_init_tracer(trace, ret); 1177 return ret; 1178 } 1179 1180 /* Sleep for a 1/10 of a second */ 1181 msleep(100); 1182 /* stop the tracing. */ 1183 tracing_stop(); 1184 /* check the trace buffer */ 1185 ret = trace_test_buffer(&tr->array_buffer, &count); 1186 trace->reset(tr); 1187 tracing_start(); 1188 1189 if (!ret && !count) { 1190 printk(KERN_CONT ".. no entries found .."); 1191 ret = -1; 1192 } 1193 1194 return ret; 1195 } 1196 #endif /* CONFIG_BRANCH_TRACER */ 1197 1198