1 // SPDX-License-Identifier: GPL-2.0 2 /* Include in trace.c */ 3 4 #include <uapi/linux/sched/types.h> 5 #include <linux/stringify.h> 6 #include <linux/kthread.h> 7 #include <linux/delay.h> 8 #include <linux/slab.h> 9 10 static inline int trace_valid_entry(struct trace_entry *entry) 11 { 12 switch (entry->type) { 13 case TRACE_FN: 14 case TRACE_CTX: 15 case TRACE_WAKE: 16 case TRACE_STACK: 17 case TRACE_PRINT: 18 case TRACE_BRANCH: 19 case TRACE_GRAPH_ENT: 20 case TRACE_GRAPH_RET: 21 return 1; 22 } 23 return 0; 24 } 25 26 static int trace_test_buffer_cpu(struct array_buffer *buf, int cpu) 27 { 28 struct ring_buffer_event *event; 29 struct trace_entry *entry; 30 unsigned int loops = 0; 31 32 while ((event = ring_buffer_consume(buf->buffer, cpu, NULL, NULL))) { 33 entry = ring_buffer_event_data(event); 34 35 /* 36 * The ring buffer is a size of trace_buf_size, if 37 * we loop more than the size, there's something wrong 38 * with the ring buffer. 39 */ 40 if (loops++ > trace_buf_size) { 41 printk(KERN_CONT ".. bad ring buffer "); 42 goto failed; 43 } 44 if (!trace_valid_entry(entry)) { 45 printk(KERN_CONT ".. invalid entry %d ", 46 entry->type); 47 goto failed; 48 } 49 } 50 return 0; 51 52 failed: 53 /* disable tracing */ 54 tracing_disabled = 1; 55 printk(KERN_CONT ".. corrupted trace buffer .. "); 56 return -1; 57 } 58 59 /* 60 * Test the trace buffer to see if all the elements 61 * are still sane. 62 */ 63 static int __maybe_unused trace_test_buffer(struct array_buffer *buf, unsigned long *count) 64 { 65 unsigned long flags, cnt = 0; 66 int cpu, ret = 0; 67 68 /* Don't allow flipping of max traces now */ 69 local_irq_save(flags); 70 arch_spin_lock(&buf->tr->max_lock); 71 72 cnt = ring_buffer_entries(buf->buffer); 73 74 /* 75 * The trace_test_buffer_cpu runs a while loop to consume all data. 76 * If the calling tracer is broken, and is constantly filling 77 * the buffer, this will run forever, and hard lock the box. 78 * We disable the ring buffer while we do this test to prevent 79 * a hard lock up. 80 */ 81 tracing_off(); 82 for_each_possible_cpu(cpu) { 83 ret = trace_test_buffer_cpu(buf, cpu); 84 if (ret) 85 break; 86 } 87 tracing_on(); 88 arch_spin_unlock(&buf->tr->max_lock); 89 local_irq_restore(flags); 90 91 if (count) 92 *count = cnt; 93 94 return ret; 95 } 96 97 static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret) 98 { 99 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n", 100 trace->name, init_ret); 101 } 102 #ifdef CONFIG_FUNCTION_TRACER 103 104 #ifdef CONFIG_DYNAMIC_FTRACE 105 106 static int trace_selftest_test_probe1_cnt; 107 static void trace_selftest_test_probe1_func(unsigned long ip, 108 unsigned long pip, 109 struct ftrace_ops *op, 110 struct ftrace_regs *fregs) 111 { 112 trace_selftest_test_probe1_cnt++; 113 } 114 115 static int trace_selftest_test_probe2_cnt; 116 static void trace_selftest_test_probe2_func(unsigned long ip, 117 unsigned long pip, 118 struct ftrace_ops *op, 119 struct ftrace_regs *fregs) 120 { 121 trace_selftest_test_probe2_cnt++; 122 } 123 124 static int trace_selftest_test_probe3_cnt; 125 static void trace_selftest_test_probe3_func(unsigned long ip, 126 unsigned long pip, 127 struct ftrace_ops *op, 128 struct ftrace_regs *fregs) 129 { 130 trace_selftest_test_probe3_cnt++; 131 } 132 133 static int trace_selftest_test_global_cnt; 134 static void trace_selftest_test_global_func(unsigned long ip, 135 unsigned long pip, 136 struct ftrace_ops *op, 137 struct ftrace_regs *fregs) 138 { 139 trace_selftest_test_global_cnt++; 140 } 141 142 static int trace_selftest_test_dyn_cnt; 143 static void trace_selftest_test_dyn_func(unsigned long ip, 144 unsigned long pip, 145 struct ftrace_ops *op, 146 struct ftrace_regs *fregs) 147 { 148 trace_selftest_test_dyn_cnt++; 149 } 150 151 static struct ftrace_ops test_probe1 = { 152 .func = trace_selftest_test_probe1_func, 153 }; 154 155 static struct ftrace_ops test_probe2 = { 156 .func = trace_selftest_test_probe2_func, 157 }; 158 159 static struct ftrace_ops test_probe3 = { 160 .func = trace_selftest_test_probe3_func, 161 }; 162 163 static void print_counts(void) 164 { 165 printk("(%d %d %d %d %d) ", 166 trace_selftest_test_probe1_cnt, 167 trace_selftest_test_probe2_cnt, 168 trace_selftest_test_probe3_cnt, 169 trace_selftest_test_global_cnt, 170 trace_selftest_test_dyn_cnt); 171 } 172 173 static void reset_counts(void) 174 { 175 trace_selftest_test_probe1_cnt = 0; 176 trace_selftest_test_probe2_cnt = 0; 177 trace_selftest_test_probe3_cnt = 0; 178 trace_selftest_test_global_cnt = 0; 179 trace_selftest_test_dyn_cnt = 0; 180 } 181 182 static int trace_selftest_ops(struct trace_array *tr, int cnt) 183 { 184 int save_ftrace_enabled = ftrace_enabled; 185 struct ftrace_ops *dyn_ops; 186 char *func1_name; 187 char *func2_name; 188 int len1; 189 int len2; 190 int ret = -1; 191 192 printk(KERN_CONT "PASSED\n"); 193 pr_info("Testing dynamic ftrace ops #%d: ", cnt); 194 195 ftrace_enabled = 1; 196 reset_counts(); 197 198 /* Handle PPC64 '.' name */ 199 func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME); 200 func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2); 201 len1 = strlen(func1_name); 202 len2 = strlen(func2_name); 203 204 /* 205 * Probe 1 will trace function 1. 206 * Probe 2 will trace function 2. 207 * Probe 3 will trace functions 1 and 2. 208 */ 209 ftrace_set_filter(&test_probe1, func1_name, len1, 1); 210 ftrace_set_filter(&test_probe2, func2_name, len2, 1); 211 ftrace_set_filter(&test_probe3, func1_name, len1, 1); 212 ftrace_set_filter(&test_probe3, func2_name, len2, 0); 213 214 register_ftrace_function(&test_probe1); 215 register_ftrace_function(&test_probe2); 216 register_ftrace_function(&test_probe3); 217 /* First time we are running with main function */ 218 if (cnt > 1) { 219 ftrace_init_array_ops(tr, trace_selftest_test_global_func); 220 register_ftrace_function(tr->ops); 221 } 222 223 DYN_FTRACE_TEST_NAME(); 224 225 print_counts(); 226 227 if (trace_selftest_test_probe1_cnt != 1) 228 goto out; 229 if (trace_selftest_test_probe2_cnt != 0) 230 goto out; 231 if (trace_selftest_test_probe3_cnt != 1) 232 goto out; 233 if (cnt > 1) { 234 if (trace_selftest_test_global_cnt == 0) 235 goto out; 236 } 237 238 DYN_FTRACE_TEST_NAME2(); 239 240 print_counts(); 241 242 if (trace_selftest_test_probe1_cnt != 1) 243 goto out; 244 if (trace_selftest_test_probe2_cnt != 1) 245 goto out; 246 if (trace_selftest_test_probe3_cnt != 2) 247 goto out; 248 249 /* Add a dynamic probe */ 250 dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL); 251 if (!dyn_ops) { 252 printk("MEMORY ERROR "); 253 goto out; 254 } 255 256 dyn_ops->func = trace_selftest_test_dyn_func; 257 258 register_ftrace_function(dyn_ops); 259 260 trace_selftest_test_global_cnt = 0; 261 262 DYN_FTRACE_TEST_NAME(); 263 264 print_counts(); 265 266 if (trace_selftest_test_probe1_cnt != 2) 267 goto out_free; 268 if (trace_selftest_test_probe2_cnt != 1) 269 goto out_free; 270 if (trace_selftest_test_probe3_cnt != 3) 271 goto out_free; 272 if (cnt > 1) { 273 if (trace_selftest_test_global_cnt == 0) 274 goto out_free; 275 } 276 if (trace_selftest_test_dyn_cnt == 0) 277 goto out_free; 278 279 DYN_FTRACE_TEST_NAME2(); 280 281 print_counts(); 282 283 if (trace_selftest_test_probe1_cnt != 2) 284 goto out_free; 285 if (trace_selftest_test_probe2_cnt != 2) 286 goto out_free; 287 if (trace_selftest_test_probe3_cnt != 4) 288 goto out_free; 289 290 /* Remove trace function from probe 3 */ 291 func1_name = "!" __stringify(DYN_FTRACE_TEST_NAME); 292 len1 = strlen(func1_name); 293 294 ftrace_set_filter(&test_probe3, func1_name, len1, 0); 295 296 DYN_FTRACE_TEST_NAME(); 297 298 print_counts(); 299 300 if (trace_selftest_test_probe1_cnt != 3) 301 goto out_free; 302 if (trace_selftest_test_probe2_cnt != 2) 303 goto out_free; 304 if (trace_selftest_test_probe3_cnt != 4) 305 goto out_free; 306 if (cnt > 1) { 307 if (trace_selftest_test_global_cnt == 0) 308 goto out_free; 309 } 310 if (trace_selftest_test_dyn_cnt == 0) 311 goto out_free; 312 313 DYN_FTRACE_TEST_NAME2(); 314 315 print_counts(); 316 317 if (trace_selftest_test_probe1_cnt != 3) 318 goto out_free; 319 if (trace_selftest_test_probe2_cnt != 3) 320 goto out_free; 321 if (trace_selftest_test_probe3_cnt != 5) 322 goto out_free; 323 324 ret = 0; 325 out_free: 326 unregister_ftrace_function(dyn_ops); 327 kfree(dyn_ops); 328 329 out: 330 /* Purposely unregister in the same order */ 331 unregister_ftrace_function(&test_probe1); 332 unregister_ftrace_function(&test_probe2); 333 unregister_ftrace_function(&test_probe3); 334 if (cnt > 1) 335 unregister_ftrace_function(tr->ops); 336 ftrace_reset_array_ops(tr); 337 338 /* Make sure everything is off */ 339 reset_counts(); 340 DYN_FTRACE_TEST_NAME(); 341 DYN_FTRACE_TEST_NAME(); 342 343 if (trace_selftest_test_probe1_cnt || 344 trace_selftest_test_probe2_cnt || 345 trace_selftest_test_probe3_cnt || 346 trace_selftest_test_global_cnt || 347 trace_selftest_test_dyn_cnt) 348 ret = -1; 349 350 ftrace_enabled = save_ftrace_enabled; 351 352 return ret; 353 } 354 355 /* Test dynamic code modification and ftrace filters */ 356 static int trace_selftest_startup_dynamic_tracing(struct tracer *trace, 357 struct trace_array *tr, 358 int (*func)(void)) 359 { 360 int save_ftrace_enabled = ftrace_enabled; 361 unsigned long count; 362 char *func_name; 363 int ret; 364 365 /* The ftrace test PASSED */ 366 printk(KERN_CONT "PASSED\n"); 367 pr_info("Testing dynamic ftrace: "); 368 369 /* enable tracing, and record the filter function */ 370 ftrace_enabled = 1; 371 372 /* passed in by parameter to fool gcc from optimizing */ 373 func(); 374 375 /* 376 * Some archs *cough*PowerPC*cough* add characters to the 377 * start of the function names. We simply put a '*' to 378 * accommodate them. 379 */ 380 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); 381 382 /* filter only on our function */ 383 ftrace_set_global_filter(func_name, strlen(func_name), 1); 384 385 /* enable tracing */ 386 ret = tracer_init(trace, tr); 387 if (ret) { 388 warn_failed_init_tracer(trace, ret); 389 goto out; 390 } 391 392 /* Sleep for a 1/10 of a second */ 393 msleep(100); 394 395 /* we should have nothing in the buffer */ 396 ret = trace_test_buffer(&tr->array_buffer, &count); 397 if (ret) 398 goto out; 399 400 if (count) { 401 ret = -1; 402 printk(KERN_CONT ".. filter did not filter .. "); 403 goto out; 404 } 405 406 /* call our function again */ 407 func(); 408 409 /* sleep again */ 410 msleep(100); 411 412 /* stop the tracing. */ 413 tracing_stop(); 414 ftrace_enabled = 0; 415 416 /* check the trace buffer */ 417 ret = trace_test_buffer(&tr->array_buffer, &count); 418 419 ftrace_enabled = 1; 420 tracing_start(); 421 422 /* we should only have one item */ 423 if (!ret && count != 1) { 424 trace->reset(tr); 425 printk(KERN_CONT ".. filter failed count=%ld ..", count); 426 ret = -1; 427 goto out; 428 } 429 430 /* Test the ops with global tracing running */ 431 ret = trace_selftest_ops(tr, 1); 432 trace->reset(tr); 433 434 out: 435 ftrace_enabled = save_ftrace_enabled; 436 437 /* Enable tracing on all functions again */ 438 ftrace_set_global_filter(NULL, 0, 1); 439 440 /* Test the ops with global tracing off */ 441 if (!ret) 442 ret = trace_selftest_ops(tr, 2); 443 444 return ret; 445 } 446 447 static int trace_selftest_recursion_cnt; 448 static void trace_selftest_test_recursion_func(unsigned long ip, 449 unsigned long pip, 450 struct ftrace_ops *op, 451 struct ftrace_regs *fregs) 452 { 453 /* 454 * This function is registered without the recursion safe flag. 455 * The ftrace infrastructure should provide the recursion 456 * protection. If not, this will crash the kernel! 457 */ 458 if (trace_selftest_recursion_cnt++ > 10) 459 return; 460 DYN_FTRACE_TEST_NAME(); 461 } 462 463 static void trace_selftest_test_recursion_safe_func(unsigned long ip, 464 unsigned long pip, 465 struct ftrace_ops *op, 466 struct ftrace_regs *fregs) 467 { 468 /* 469 * We said we would provide our own recursion. By calling 470 * this function again, we should recurse back into this function 471 * and count again. But this only happens if the arch supports 472 * all of ftrace features and nothing else is using the function 473 * tracing utility. 474 */ 475 if (trace_selftest_recursion_cnt++) 476 return; 477 DYN_FTRACE_TEST_NAME(); 478 } 479 480 static struct ftrace_ops test_rec_probe = { 481 .func = trace_selftest_test_recursion_func, 482 .flags = FTRACE_OPS_FL_RECURSION, 483 }; 484 485 static struct ftrace_ops test_recsafe_probe = { 486 .func = trace_selftest_test_recursion_safe_func, 487 }; 488 489 static int 490 trace_selftest_function_recursion(void) 491 { 492 int save_ftrace_enabled = ftrace_enabled; 493 char *func_name; 494 int len; 495 int ret; 496 497 /* The previous test PASSED */ 498 pr_cont("PASSED\n"); 499 pr_info("Testing ftrace recursion: "); 500 501 502 /* enable tracing, and record the filter function */ 503 ftrace_enabled = 1; 504 505 /* Handle PPC64 '.' name */ 506 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); 507 len = strlen(func_name); 508 509 ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1); 510 if (ret) { 511 pr_cont("*Could not set filter* "); 512 goto out; 513 } 514 515 ret = register_ftrace_function(&test_rec_probe); 516 if (ret) { 517 pr_cont("*could not register callback* "); 518 goto out; 519 } 520 521 DYN_FTRACE_TEST_NAME(); 522 523 unregister_ftrace_function(&test_rec_probe); 524 525 ret = -1; 526 /* 527 * Recursion allows for transitions between context, 528 * and may call the callback twice. 529 */ 530 if (trace_selftest_recursion_cnt != 1 && 531 trace_selftest_recursion_cnt != 2) { 532 pr_cont("*callback not called once (or twice) (%d)* ", 533 trace_selftest_recursion_cnt); 534 goto out; 535 } 536 537 trace_selftest_recursion_cnt = 1; 538 539 pr_cont("PASSED\n"); 540 pr_info("Testing ftrace recursion safe: "); 541 542 ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1); 543 if (ret) { 544 pr_cont("*Could not set filter* "); 545 goto out; 546 } 547 548 ret = register_ftrace_function(&test_recsafe_probe); 549 if (ret) { 550 pr_cont("*could not register callback* "); 551 goto out; 552 } 553 554 DYN_FTRACE_TEST_NAME(); 555 556 unregister_ftrace_function(&test_recsafe_probe); 557 558 ret = -1; 559 if (trace_selftest_recursion_cnt != 2) { 560 pr_cont("*callback not called expected 2 times (%d)* ", 561 trace_selftest_recursion_cnt); 562 goto out; 563 } 564 565 ret = 0; 566 out: 567 ftrace_enabled = save_ftrace_enabled; 568 569 return ret; 570 } 571 #else 572 # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; }) 573 # define trace_selftest_function_recursion() ({ 0; }) 574 #endif /* CONFIG_DYNAMIC_FTRACE */ 575 576 static enum { 577 TRACE_SELFTEST_REGS_START, 578 TRACE_SELFTEST_REGS_FOUND, 579 TRACE_SELFTEST_REGS_NOT_FOUND, 580 } trace_selftest_regs_stat; 581 582 static void trace_selftest_test_regs_func(unsigned long ip, 583 unsigned long pip, 584 struct ftrace_ops *op, 585 struct ftrace_regs *fregs) 586 { 587 struct pt_regs *regs = ftrace_get_regs(fregs); 588 589 if (regs) 590 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND; 591 else 592 trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND; 593 } 594 595 static struct ftrace_ops test_regs_probe = { 596 .func = trace_selftest_test_regs_func, 597 .flags = FTRACE_OPS_FL_SAVE_REGS, 598 }; 599 600 static int 601 trace_selftest_function_regs(void) 602 { 603 int save_ftrace_enabled = ftrace_enabled; 604 char *func_name; 605 int len; 606 int ret; 607 int supported = 0; 608 609 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 610 supported = 1; 611 #endif 612 613 /* The previous test PASSED */ 614 pr_cont("PASSED\n"); 615 pr_info("Testing ftrace regs%s: ", 616 !supported ? "(no arch support)" : ""); 617 618 /* enable tracing, and record the filter function */ 619 ftrace_enabled = 1; 620 621 /* Handle PPC64 '.' name */ 622 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); 623 len = strlen(func_name); 624 625 ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1); 626 /* 627 * If DYNAMIC_FTRACE is not set, then we just trace all functions. 628 * This test really doesn't care. 629 */ 630 if (ret && ret != -ENODEV) { 631 pr_cont("*Could not set filter* "); 632 goto out; 633 } 634 635 ret = register_ftrace_function(&test_regs_probe); 636 /* 637 * Now if the arch does not support passing regs, then this should 638 * have failed. 639 */ 640 if (!supported) { 641 if (!ret) { 642 pr_cont("*registered save-regs without arch support* "); 643 goto out; 644 } 645 test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED; 646 ret = register_ftrace_function(&test_regs_probe); 647 } 648 if (ret) { 649 pr_cont("*could not register callback* "); 650 goto out; 651 } 652 653 654 DYN_FTRACE_TEST_NAME(); 655 656 unregister_ftrace_function(&test_regs_probe); 657 658 ret = -1; 659 660 switch (trace_selftest_regs_stat) { 661 case TRACE_SELFTEST_REGS_START: 662 pr_cont("*callback never called* "); 663 goto out; 664 665 case TRACE_SELFTEST_REGS_FOUND: 666 if (supported) 667 break; 668 pr_cont("*callback received regs without arch support* "); 669 goto out; 670 671 case TRACE_SELFTEST_REGS_NOT_FOUND: 672 if (!supported) 673 break; 674 pr_cont("*callback received NULL regs* "); 675 goto out; 676 } 677 678 ret = 0; 679 out: 680 ftrace_enabled = save_ftrace_enabled; 681 682 return ret; 683 } 684 685 /* 686 * Simple verification test of ftrace function tracer. 687 * Enable ftrace, sleep 1/10 second, and then read the trace 688 * buffer to see if all is in order. 689 */ 690 __init int 691 trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) 692 { 693 int save_ftrace_enabled = ftrace_enabled; 694 unsigned long count; 695 int ret; 696 697 #ifdef CONFIG_DYNAMIC_FTRACE 698 if (ftrace_filter_param) { 699 printk(KERN_CONT " ... kernel command line filter set: force PASS ... "); 700 return 0; 701 } 702 #endif 703 704 /* make sure msleep has been recorded */ 705 msleep(1); 706 707 /* start the tracing */ 708 ftrace_enabled = 1; 709 710 ret = tracer_init(trace, tr); 711 if (ret) { 712 warn_failed_init_tracer(trace, ret); 713 goto out; 714 } 715 716 /* Sleep for a 1/10 of a second */ 717 msleep(100); 718 /* stop the tracing. */ 719 tracing_stop(); 720 ftrace_enabled = 0; 721 722 /* check the trace buffer */ 723 ret = trace_test_buffer(&tr->array_buffer, &count); 724 725 ftrace_enabled = 1; 726 trace->reset(tr); 727 tracing_start(); 728 729 if (!ret && !count) { 730 printk(KERN_CONT ".. no entries found .."); 731 ret = -1; 732 goto out; 733 } 734 735 ret = trace_selftest_startup_dynamic_tracing(trace, tr, 736 DYN_FTRACE_TEST_NAME); 737 if (ret) 738 goto out; 739 740 ret = trace_selftest_function_recursion(); 741 if (ret) 742 goto out; 743 744 ret = trace_selftest_function_regs(); 745 out: 746 ftrace_enabled = save_ftrace_enabled; 747 748 /* kill ftrace totally if we failed */ 749 if (ret) 750 ftrace_kill(); 751 752 return ret; 753 } 754 #endif /* CONFIG_FUNCTION_TRACER */ 755 756 757 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 758 759 /* Maximum number of functions to trace before diagnosing a hang */ 760 #define GRAPH_MAX_FUNC_TEST 100000000 761 762 static unsigned int graph_hang_thresh; 763 764 /* Wrap the real function entry probe to avoid possible hanging */ 765 static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace) 766 { 767 /* This is harmlessly racy, we want to approximately detect a hang */ 768 if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) { 769 ftrace_graph_stop(); 770 printk(KERN_WARNING "BUG: Function graph tracer hang!\n"); 771 if (ftrace_dump_on_oops) { 772 ftrace_dump(DUMP_ALL); 773 /* ftrace_dump() disables tracing */ 774 tracing_on(); 775 } 776 return 0; 777 } 778 779 return trace_graph_entry(trace); 780 } 781 782 static struct fgraph_ops fgraph_ops __initdata = { 783 .entryfunc = &trace_graph_entry_watchdog, 784 .retfunc = &trace_graph_return, 785 }; 786 787 #if defined(CONFIG_DYNAMIC_FTRACE) && \ 788 defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS) 789 #define TEST_DIRECT_TRAMP 790 noinline __noclone static void trace_direct_tramp(void) { } 791 #endif 792 793 /* 794 * Pretty much the same than for the function tracer from which the selftest 795 * has been borrowed. 796 */ 797 __init int 798 trace_selftest_startup_function_graph(struct tracer *trace, 799 struct trace_array *tr) 800 { 801 int ret; 802 unsigned long count; 803 char *func_name __maybe_unused; 804 805 #ifdef CONFIG_DYNAMIC_FTRACE 806 if (ftrace_filter_param) { 807 printk(KERN_CONT " ... kernel command line filter set: force PASS ... "); 808 return 0; 809 } 810 #endif 811 812 /* 813 * Simulate the init() callback but we attach a watchdog callback 814 * to detect and recover from possible hangs 815 */ 816 tracing_reset_online_cpus(&tr->array_buffer); 817 set_graph_array(tr); 818 ret = register_ftrace_graph(&fgraph_ops); 819 if (ret) { 820 warn_failed_init_tracer(trace, ret); 821 goto out; 822 } 823 tracing_start_cmdline_record(); 824 825 /* Sleep for a 1/10 of a second */ 826 msleep(100); 827 828 /* Have we just recovered from a hang? */ 829 if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) { 830 disable_tracing_selftest("recovering from a hang"); 831 ret = -1; 832 goto out; 833 } 834 835 tracing_stop(); 836 837 /* check the trace buffer */ 838 ret = trace_test_buffer(&tr->array_buffer, &count); 839 840 /* Need to also simulate the tr->reset to remove this fgraph_ops */ 841 tracing_stop_cmdline_record(); 842 unregister_ftrace_graph(&fgraph_ops); 843 844 tracing_start(); 845 846 if (!ret && !count) { 847 printk(KERN_CONT ".. no entries found .."); 848 ret = -1; 849 goto out; 850 } 851 852 #ifdef TEST_DIRECT_TRAMP 853 tracing_reset_online_cpus(&tr->array_buffer); 854 set_graph_array(tr); 855 856 /* 857 * Some archs *cough*PowerPC*cough* add characters to the 858 * start of the function names. We simply put a '*' to 859 * accommodate them. 860 */ 861 func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); 862 ftrace_set_global_filter(func_name, strlen(func_name), 1); 863 864 /* 865 * Register direct function together with graph tracer 866 * and make sure we get graph trace. 867 */ 868 ret = register_ftrace_direct((unsigned long) DYN_FTRACE_TEST_NAME, 869 (unsigned long) trace_direct_tramp); 870 if (ret) 871 goto out; 872 873 ret = register_ftrace_graph(&fgraph_ops); 874 if (ret) { 875 warn_failed_init_tracer(trace, ret); 876 goto out; 877 } 878 879 DYN_FTRACE_TEST_NAME(); 880 881 count = 0; 882 883 tracing_stop(); 884 /* check the trace buffer */ 885 ret = trace_test_buffer(&tr->array_buffer, &count); 886 887 unregister_ftrace_graph(&fgraph_ops); 888 889 ret = unregister_ftrace_direct((unsigned long) DYN_FTRACE_TEST_NAME, 890 (unsigned long) trace_direct_tramp); 891 if (ret) 892 goto out; 893 894 tracing_start(); 895 896 if (!ret && !count) { 897 ret = -1; 898 goto out; 899 } 900 #endif 901 902 /* Don't test dynamic tracing, the function tracer already did */ 903 out: 904 /* Stop it if we failed */ 905 if (ret) 906 ftrace_graph_stop(); 907 908 return ret; 909 } 910 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 911 912 913 #ifdef CONFIG_IRQSOFF_TRACER 914 int 915 trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) 916 { 917 unsigned long save_max = tr->max_latency; 918 unsigned long count; 919 int ret; 920 921 /* start the tracing */ 922 ret = tracer_init(trace, tr); 923 if (ret) { 924 warn_failed_init_tracer(trace, ret); 925 return ret; 926 } 927 928 /* reset the max latency */ 929 tr->max_latency = 0; 930 /* disable interrupts for a bit */ 931 local_irq_disable(); 932 udelay(100); 933 local_irq_enable(); 934 935 /* 936 * Stop the tracer to avoid a warning subsequent 937 * to buffer flipping failure because tracing_stop() 938 * disables the tr and max buffers, making flipping impossible 939 * in case of parallels max irqs off latencies. 940 */ 941 trace->stop(tr); 942 /* stop the tracing. */ 943 tracing_stop(); 944 /* check both trace buffers */ 945 ret = trace_test_buffer(&tr->array_buffer, NULL); 946 if (!ret) 947 ret = trace_test_buffer(&tr->max_buffer, &count); 948 trace->reset(tr); 949 tracing_start(); 950 951 if (!ret && !count) { 952 printk(KERN_CONT ".. no entries found .."); 953 ret = -1; 954 } 955 956 tr->max_latency = save_max; 957 958 return ret; 959 } 960 #endif /* CONFIG_IRQSOFF_TRACER */ 961 962 #ifdef CONFIG_PREEMPT_TRACER 963 int 964 trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) 965 { 966 unsigned long save_max = tr->max_latency; 967 unsigned long count; 968 int ret; 969 970 /* 971 * Now that the big kernel lock is no longer preemptible, 972 * and this is called with the BKL held, it will always 973 * fail. If preemption is already disabled, simply 974 * pass the test. When the BKL is removed, or becomes 975 * preemptible again, we will once again test this, 976 * so keep it in. 977 */ 978 if (preempt_count()) { 979 printk(KERN_CONT "can not test ... force "); 980 return 0; 981 } 982 983 /* start the tracing */ 984 ret = tracer_init(trace, tr); 985 if (ret) { 986 warn_failed_init_tracer(trace, ret); 987 return ret; 988 } 989 990 /* reset the max latency */ 991 tr->max_latency = 0; 992 /* disable preemption for a bit */ 993 preempt_disable(); 994 udelay(100); 995 preempt_enable(); 996 997 /* 998 * Stop the tracer to avoid a warning subsequent 999 * to buffer flipping failure because tracing_stop() 1000 * disables the tr and max buffers, making flipping impossible 1001 * in case of parallels max preempt off latencies. 1002 */ 1003 trace->stop(tr); 1004 /* stop the tracing. */ 1005 tracing_stop(); 1006 /* check both trace buffers */ 1007 ret = trace_test_buffer(&tr->array_buffer, NULL); 1008 if (!ret) 1009 ret = trace_test_buffer(&tr->max_buffer, &count); 1010 trace->reset(tr); 1011 tracing_start(); 1012 1013 if (!ret && !count) { 1014 printk(KERN_CONT ".. no entries found .."); 1015 ret = -1; 1016 } 1017 1018 tr->max_latency = save_max; 1019 1020 return ret; 1021 } 1022 #endif /* CONFIG_PREEMPT_TRACER */ 1023 1024 #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER) 1025 int 1026 trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr) 1027 { 1028 unsigned long save_max = tr->max_latency; 1029 unsigned long count; 1030 int ret; 1031 1032 /* 1033 * Now that the big kernel lock is no longer preemptible, 1034 * and this is called with the BKL held, it will always 1035 * fail. If preemption is already disabled, simply 1036 * pass the test. When the BKL is removed, or becomes 1037 * preemptible again, we will once again test this, 1038 * so keep it in. 1039 */ 1040 if (preempt_count()) { 1041 printk(KERN_CONT "can not test ... force "); 1042 return 0; 1043 } 1044 1045 /* start the tracing */ 1046 ret = tracer_init(trace, tr); 1047 if (ret) { 1048 warn_failed_init_tracer(trace, ret); 1049 goto out_no_start; 1050 } 1051 1052 /* reset the max latency */ 1053 tr->max_latency = 0; 1054 1055 /* disable preemption and interrupts for a bit */ 1056 preempt_disable(); 1057 local_irq_disable(); 1058 udelay(100); 1059 preempt_enable(); 1060 /* reverse the order of preempt vs irqs */ 1061 local_irq_enable(); 1062 1063 /* 1064 * Stop the tracer to avoid a warning subsequent 1065 * to buffer flipping failure because tracing_stop() 1066 * disables the tr and max buffers, making flipping impossible 1067 * in case of parallels max irqs/preempt off latencies. 1068 */ 1069 trace->stop(tr); 1070 /* stop the tracing. */ 1071 tracing_stop(); 1072 /* check both trace buffers */ 1073 ret = trace_test_buffer(&tr->array_buffer, NULL); 1074 if (ret) 1075 goto out; 1076 1077 ret = trace_test_buffer(&tr->max_buffer, &count); 1078 if (ret) 1079 goto out; 1080 1081 if (!ret && !count) { 1082 printk(KERN_CONT ".. no entries found .."); 1083 ret = -1; 1084 goto out; 1085 } 1086 1087 /* do the test by disabling interrupts first this time */ 1088 tr->max_latency = 0; 1089 tracing_start(); 1090 trace->start(tr); 1091 1092 preempt_disable(); 1093 local_irq_disable(); 1094 udelay(100); 1095 preempt_enable(); 1096 /* reverse the order of preempt vs irqs */ 1097 local_irq_enable(); 1098 1099 trace->stop(tr); 1100 /* stop the tracing. */ 1101 tracing_stop(); 1102 /* check both trace buffers */ 1103 ret = trace_test_buffer(&tr->array_buffer, NULL); 1104 if (ret) 1105 goto out; 1106 1107 ret = trace_test_buffer(&tr->max_buffer, &count); 1108 1109 if (!ret && !count) { 1110 printk(KERN_CONT ".. no entries found .."); 1111 ret = -1; 1112 goto out; 1113 } 1114 1115 out: 1116 tracing_start(); 1117 out_no_start: 1118 trace->reset(tr); 1119 tr->max_latency = save_max; 1120 1121 return ret; 1122 } 1123 #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */ 1124 1125 #ifdef CONFIG_NOP_TRACER 1126 int 1127 trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr) 1128 { 1129 /* What could possibly go wrong? */ 1130 return 0; 1131 } 1132 #endif 1133 1134 #ifdef CONFIG_SCHED_TRACER 1135 1136 struct wakeup_test_data { 1137 struct completion is_ready; 1138 int go; 1139 }; 1140 1141 static int trace_wakeup_test_thread(void *data) 1142 { 1143 /* Make this a -deadline thread */ 1144 static const struct sched_attr attr = { 1145 .sched_policy = SCHED_DEADLINE, 1146 .sched_runtime = 100000ULL, 1147 .sched_deadline = 10000000ULL, 1148 .sched_period = 10000000ULL 1149 }; 1150 struct wakeup_test_data *x = data; 1151 1152 sched_setattr(current, &attr); 1153 1154 /* Make it know we have a new prio */ 1155 complete(&x->is_ready); 1156 1157 /* now go to sleep and let the test wake us up */ 1158 set_current_state(TASK_INTERRUPTIBLE); 1159 while (!x->go) { 1160 schedule(); 1161 set_current_state(TASK_INTERRUPTIBLE); 1162 } 1163 1164 complete(&x->is_ready); 1165 1166 set_current_state(TASK_INTERRUPTIBLE); 1167 1168 /* we are awake, now wait to disappear */ 1169 while (!kthread_should_stop()) { 1170 schedule(); 1171 set_current_state(TASK_INTERRUPTIBLE); 1172 } 1173 1174 __set_current_state(TASK_RUNNING); 1175 1176 return 0; 1177 } 1178 int 1179 trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) 1180 { 1181 unsigned long save_max = tr->max_latency; 1182 struct task_struct *p; 1183 struct wakeup_test_data data; 1184 unsigned long count; 1185 int ret; 1186 1187 memset(&data, 0, sizeof(data)); 1188 1189 init_completion(&data.is_ready); 1190 1191 /* create a -deadline thread */ 1192 p = kthread_run(trace_wakeup_test_thread, &data, "ftrace-test"); 1193 if (IS_ERR(p)) { 1194 printk(KERN_CONT "Failed to create ftrace wakeup test thread "); 1195 return -1; 1196 } 1197 1198 /* make sure the thread is running at -deadline policy */ 1199 wait_for_completion(&data.is_ready); 1200 1201 /* start the tracing */ 1202 ret = tracer_init(trace, tr); 1203 if (ret) { 1204 warn_failed_init_tracer(trace, ret); 1205 return ret; 1206 } 1207 1208 /* reset the max latency */ 1209 tr->max_latency = 0; 1210 1211 while (p->on_rq) { 1212 /* 1213 * Sleep to make sure the -deadline thread is asleep too. 1214 * On virtual machines we can't rely on timings, 1215 * but we want to make sure this test still works. 1216 */ 1217 msleep(100); 1218 } 1219 1220 init_completion(&data.is_ready); 1221 1222 data.go = 1; 1223 /* memory barrier is in the wake_up_process() */ 1224 1225 wake_up_process(p); 1226 1227 /* Wait for the task to wake up */ 1228 wait_for_completion(&data.is_ready); 1229 1230 /* stop the tracing. */ 1231 tracing_stop(); 1232 /* check both trace buffers */ 1233 ret = trace_test_buffer(&tr->array_buffer, NULL); 1234 if (!ret) 1235 ret = trace_test_buffer(&tr->max_buffer, &count); 1236 1237 1238 trace->reset(tr); 1239 tracing_start(); 1240 1241 tr->max_latency = save_max; 1242 1243 /* kill the thread */ 1244 kthread_stop(p); 1245 1246 if (!ret && !count) { 1247 printk(KERN_CONT ".. no entries found .."); 1248 ret = -1; 1249 } 1250 1251 return ret; 1252 } 1253 #endif /* CONFIG_SCHED_TRACER */ 1254 1255 #ifdef CONFIG_BRANCH_TRACER 1256 int 1257 trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) 1258 { 1259 unsigned long count; 1260 int ret; 1261 1262 /* start the tracing */ 1263 ret = tracer_init(trace, tr); 1264 if (ret) { 1265 warn_failed_init_tracer(trace, ret); 1266 return ret; 1267 } 1268 1269 /* Sleep for a 1/10 of a second */ 1270 msleep(100); 1271 /* stop the tracing. */ 1272 tracing_stop(); 1273 /* check the trace buffer */ 1274 ret = trace_test_buffer(&tr->array_buffer, &count); 1275 trace->reset(tr); 1276 tracing_start(); 1277 1278 if (!ret && !count) { 1279 printk(KERN_CONT ".. no entries found .."); 1280 ret = -1; 1281 } 1282 1283 return ret; 1284 } 1285 #endif /* CONFIG_BRANCH_TRACER */ 1286 1287