1 /* 2 * Infrastructure for profiling code inserted by 'gcc -pg'. 3 * 4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> 5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com> 6 * 7 * Originally ported from the -rt patch by: 8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com> 9 * 10 * Based on code in the latency_tracer, that is: 11 * 12 * Copyright (C) 2004-2006 Ingo Molnar 13 * Copyright (C) 2004 William Lee Irwin III 14 */ 15 16 #include <linux/stop_machine.h> 17 #include <linux/clocksource.h> 18 #include <linux/kallsyms.h> 19 #include <linux/seq_file.h> 20 #include <linux/debugfs.h> 21 #include <linux/hardirq.h> 22 #include <linux/kthread.h> 23 #include <linux/uaccess.h> 24 #include <linux/kprobes.h> 25 #include <linux/ftrace.h> 26 #include <linux/sysctl.h> 27 #include <linux/ctype.h> 28 #include <linux/list.h> 29 30 #include <asm/ftrace.h> 31 32 #include "trace.h" 33 34 #define FTRACE_WARN_ON(cond) \ 35 do { \ 36 if (WARN_ON(cond)) \ 37 ftrace_kill(); \ 38 } while (0) 39 40 #define FTRACE_WARN_ON_ONCE(cond) \ 41 do { \ 42 if (WARN_ON_ONCE(cond)) \ 43 ftrace_kill(); \ 44 } while (0) 45 46 /* ftrace_enabled is a method to turn ftrace on or off */ 47 int ftrace_enabled __read_mostly; 48 static int last_ftrace_enabled; 49 50 /* 51 * ftrace_disabled is set when an anomaly is discovered. 52 * ftrace_disabled is much stronger than ftrace_enabled. 53 */ 54 static int ftrace_disabled __read_mostly; 55 56 static DEFINE_SPINLOCK(ftrace_lock); 57 static DEFINE_MUTEX(ftrace_sysctl_lock); 58 59 static struct ftrace_ops ftrace_list_end __read_mostly = 60 { 61 .func = ftrace_stub, 62 }; 63 64 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; 65 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; 66 67 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) 68 { 69 struct ftrace_ops *op = ftrace_list; 70 71 /* in case someone actually ports this to alpha! */ 72 read_barrier_depends(); 73 74 while (op != &ftrace_list_end) { 75 /* silly alpha */ 76 read_barrier_depends(); 77 op->func(ip, parent_ip); 78 op = op->next; 79 }; 80 } 81 82 /** 83 * clear_ftrace_function - reset the ftrace function 84 * 85 * This NULLs the ftrace function and in essence stops 86 * tracing. There may be lag 87 */ 88 void clear_ftrace_function(void) 89 { 90 ftrace_trace_function = ftrace_stub; 91 } 92 93 static int __register_ftrace_function(struct ftrace_ops *ops) 94 { 95 /* should not be called from interrupt context */ 96 spin_lock(&ftrace_lock); 97 98 ops->next = ftrace_list; 99 /* 100 * We are entering ops into the ftrace_list but another 101 * CPU might be walking that list. We need to make sure 102 * the ops->next pointer is valid before another CPU sees 103 * the ops pointer included into the ftrace_list. 104 */ 105 smp_wmb(); 106 ftrace_list = ops; 107 108 if (ftrace_enabled) { 109 /* 110 * For one func, simply call it directly. 111 * For more than one func, call the chain. 112 */ 113 if (ops->next == &ftrace_list_end) 114 ftrace_trace_function = ops->func; 115 else 116 ftrace_trace_function = ftrace_list_func; 117 } 118 119 spin_unlock(&ftrace_lock); 120 121 return 0; 122 } 123 124 static int __unregister_ftrace_function(struct ftrace_ops *ops) 125 { 126 struct ftrace_ops **p; 127 int ret = 0; 128 129 /* should not be called from interrupt context */ 130 spin_lock(&ftrace_lock); 131 132 /* 133 * If we are removing the last function, then simply point 134 * to the ftrace_stub. 135 */ 136 if (ftrace_list == ops && ops->next == &ftrace_list_end) { 137 ftrace_trace_function = ftrace_stub; 138 ftrace_list = &ftrace_list_end; 139 goto out; 140 } 141 142 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next) 143 if (*p == ops) 144 break; 145 146 if (*p != ops) { 147 ret = -1; 148 goto out; 149 } 150 151 *p = (*p)->next; 152 153 if (ftrace_enabled) { 154 /* If we only have one func left, then call that directly */ 155 if (ftrace_list == &ftrace_list_end || 156 ftrace_list->next == &ftrace_list_end) 157 ftrace_trace_function = ftrace_list->func; 158 } 159 160 out: 161 spin_unlock(&ftrace_lock); 162 163 return ret; 164 } 165 166 #ifdef CONFIG_DYNAMIC_FTRACE 167 #ifndef CONFIG_FTRACE_MCOUNT_RECORD 168 # error Dynamic ftrace depends on MCOUNT_RECORD 169 #endif 170 171 /* 172 * Since MCOUNT_ADDR may point to mcount itself, we do not want 173 * to get it confused by reading a reference in the code as we 174 * are parsing on objcopy output of text. Use a variable for 175 * it instead. 176 */ 177 static unsigned long mcount_addr = MCOUNT_ADDR; 178 179 enum { 180 FTRACE_ENABLE_CALLS = (1 << 0), 181 FTRACE_DISABLE_CALLS = (1 << 1), 182 FTRACE_UPDATE_TRACE_FUNC = (1 << 2), 183 FTRACE_ENABLE_MCOUNT = (1 << 3), 184 FTRACE_DISABLE_MCOUNT = (1 << 4), 185 }; 186 187 static int ftrace_filtered; 188 static int tracing_on; 189 190 static LIST_HEAD(ftrace_new_addrs); 191 192 static DEFINE_MUTEX(ftrace_regex_lock); 193 194 struct ftrace_page { 195 struct ftrace_page *next; 196 unsigned long index; 197 struct dyn_ftrace records[]; 198 }; 199 200 #define ENTRIES_PER_PAGE \ 201 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace)) 202 203 /* estimate from running different kernels */ 204 #define NR_TO_INIT 10000 205 206 static struct ftrace_page *ftrace_pages_start; 207 static struct ftrace_page *ftrace_pages; 208 209 static struct dyn_ftrace *ftrace_free_records; 210 211 212 #ifdef CONFIG_KPROBES 213 214 static int frozen_record_count; 215 216 static inline void freeze_record(struct dyn_ftrace *rec) 217 { 218 if (!(rec->flags & FTRACE_FL_FROZEN)) { 219 rec->flags |= FTRACE_FL_FROZEN; 220 frozen_record_count++; 221 } 222 } 223 224 static inline void unfreeze_record(struct dyn_ftrace *rec) 225 { 226 if (rec->flags & FTRACE_FL_FROZEN) { 227 rec->flags &= ~FTRACE_FL_FROZEN; 228 frozen_record_count--; 229 } 230 } 231 232 static inline int record_frozen(struct dyn_ftrace *rec) 233 { 234 return rec->flags & FTRACE_FL_FROZEN; 235 } 236 #else 237 # define freeze_record(rec) ({ 0; }) 238 # define unfreeze_record(rec) ({ 0; }) 239 # define record_frozen(rec) ({ 0; }) 240 #endif /* CONFIG_KPROBES */ 241 242 static void ftrace_free_rec(struct dyn_ftrace *rec) 243 { 244 rec->ip = (unsigned long)ftrace_free_records; 245 ftrace_free_records = rec; 246 rec->flags |= FTRACE_FL_FREE; 247 } 248 249 void ftrace_release(void *start, unsigned long size) 250 { 251 struct dyn_ftrace *rec; 252 struct ftrace_page *pg; 253 unsigned long s = (unsigned long)start; 254 unsigned long e = s + size; 255 int i; 256 257 if (ftrace_disabled || !start) 258 return; 259 260 /* should not be called from interrupt context */ 261 spin_lock(&ftrace_lock); 262 263 for (pg = ftrace_pages_start; pg; pg = pg->next) { 264 for (i = 0; i < pg->index; i++) { 265 rec = &pg->records[i]; 266 267 if ((rec->ip >= s) && (rec->ip < e)) 268 ftrace_free_rec(rec); 269 } 270 } 271 spin_unlock(&ftrace_lock); 272 } 273 274 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip) 275 { 276 struct dyn_ftrace *rec; 277 278 /* First check for freed records */ 279 if (ftrace_free_records) { 280 rec = ftrace_free_records; 281 282 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) { 283 FTRACE_WARN_ON_ONCE(1); 284 ftrace_free_records = NULL; 285 return NULL; 286 } 287 288 ftrace_free_records = (void *)rec->ip; 289 memset(rec, 0, sizeof(*rec)); 290 return rec; 291 } 292 293 if (ftrace_pages->index == ENTRIES_PER_PAGE) { 294 if (!ftrace_pages->next) { 295 /* allocate another page */ 296 ftrace_pages->next = 297 (void *)get_zeroed_page(GFP_KERNEL); 298 if (!ftrace_pages->next) 299 return NULL; 300 } 301 ftrace_pages = ftrace_pages->next; 302 } 303 304 return &ftrace_pages->records[ftrace_pages->index++]; 305 } 306 307 static struct dyn_ftrace * 308 ftrace_record_ip(unsigned long ip) 309 { 310 struct dyn_ftrace *rec; 311 312 if (!ftrace_enabled || ftrace_disabled) 313 return NULL; 314 315 rec = ftrace_alloc_dyn_node(ip); 316 if (!rec) 317 return NULL; 318 319 rec->ip = ip; 320 321 list_add(&rec->list, &ftrace_new_addrs); 322 323 return rec; 324 } 325 326 #define FTRACE_ADDR ((long)(ftrace_caller)) 327 328 static int 329 __ftrace_replace_code(struct dyn_ftrace *rec, 330 unsigned char *old, unsigned char *new, int enable) 331 { 332 unsigned long ip, fl; 333 334 ip = rec->ip; 335 336 if (ftrace_filtered && enable) { 337 /* 338 * If filtering is on: 339 * 340 * If this record is set to be filtered and 341 * is enabled then do nothing. 342 * 343 * If this record is set to be filtered and 344 * it is not enabled, enable it. 345 * 346 * If this record is not set to be filtered 347 * and it is not enabled do nothing. 348 * 349 * If this record is set not to trace then 350 * do nothing. 351 * 352 * If this record is set not to trace and 353 * it is enabled then disable it. 354 * 355 * If this record is not set to be filtered and 356 * it is enabled, disable it. 357 */ 358 359 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE | 360 FTRACE_FL_ENABLED); 361 362 if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) || 363 (fl == (FTRACE_FL_FILTER | FTRACE_FL_NOTRACE)) || 364 !fl || (fl == FTRACE_FL_NOTRACE)) 365 return 0; 366 367 /* 368 * If it is enabled disable it, 369 * otherwise enable it! 370 */ 371 if (fl & FTRACE_FL_ENABLED) { 372 /* swap new and old */ 373 new = old; 374 old = ftrace_call_replace(ip, FTRACE_ADDR); 375 rec->flags &= ~FTRACE_FL_ENABLED; 376 } else { 377 new = ftrace_call_replace(ip, FTRACE_ADDR); 378 rec->flags |= FTRACE_FL_ENABLED; 379 } 380 } else { 381 382 if (enable) { 383 /* 384 * If this record is set not to trace and is 385 * not enabled, do nothing. 386 */ 387 fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED); 388 if (fl == FTRACE_FL_NOTRACE) 389 return 0; 390 391 new = ftrace_call_replace(ip, FTRACE_ADDR); 392 } else 393 old = ftrace_call_replace(ip, FTRACE_ADDR); 394 395 if (enable) { 396 if (rec->flags & FTRACE_FL_ENABLED) 397 return 0; 398 rec->flags |= FTRACE_FL_ENABLED; 399 } else { 400 if (!(rec->flags & FTRACE_FL_ENABLED)) 401 return 0; 402 rec->flags &= ~FTRACE_FL_ENABLED; 403 } 404 } 405 406 return ftrace_modify_code(ip, old, new); 407 } 408 409 static void ftrace_replace_code(int enable) 410 { 411 int i, failed; 412 unsigned char *new = NULL, *old = NULL; 413 struct dyn_ftrace *rec; 414 struct ftrace_page *pg; 415 416 if (enable) 417 old = ftrace_nop_replace(); 418 else 419 new = ftrace_nop_replace(); 420 421 for (pg = ftrace_pages_start; pg; pg = pg->next) { 422 for (i = 0; i < pg->index; i++) { 423 rec = &pg->records[i]; 424 425 /* don't modify code that has already faulted */ 426 if (rec->flags & FTRACE_FL_FAILED) 427 continue; 428 429 /* ignore updates to this record's mcount site */ 430 if (get_kprobe((void *)rec->ip)) { 431 freeze_record(rec); 432 continue; 433 } else { 434 unfreeze_record(rec); 435 } 436 437 failed = __ftrace_replace_code(rec, old, new, enable); 438 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) { 439 rec->flags |= FTRACE_FL_FAILED; 440 if ((system_state == SYSTEM_BOOTING) || 441 !core_kernel_text(rec->ip)) { 442 ftrace_free_rec(rec); 443 } 444 } 445 } 446 } 447 } 448 449 static void print_ip_ins(const char *fmt, unsigned char *p) 450 { 451 int i; 452 453 printk(KERN_CONT "%s", fmt); 454 455 for (i = 0; i < MCOUNT_INSN_SIZE; i++) 456 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]); 457 } 458 459 static int 460 ftrace_code_disable(struct dyn_ftrace *rec) 461 { 462 unsigned long ip; 463 unsigned char *nop, *call; 464 int ret; 465 466 ip = rec->ip; 467 468 nop = ftrace_nop_replace(); 469 call = ftrace_call_replace(ip, mcount_addr); 470 471 ret = ftrace_modify_code(ip, call, nop); 472 if (ret) { 473 switch (ret) { 474 case -EFAULT: 475 FTRACE_WARN_ON_ONCE(1); 476 pr_info("ftrace faulted on modifying "); 477 print_ip_sym(ip); 478 break; 479 case -EINVAL: 480 FTRACE_WARN_ON_ONCE(1); 481 pr_info("ftrace failed to modify "); 482 print_ip_sym(ip); 483 print_ip_ins(" expected: ", call); 484 print_ip_ins(" actual: ", (unsigned char *)ip); 485 print_ip_ins(" replace: ", nop); 486 printk(KERN_CONT "\n"); 487 break; 488 case -EPERM: 489 FTRACE_WARN_ON_ONCE(1); 490 pr_info("ftrace faulted on writing "); 491 print_ip_sym(ip); 492 break; 493 default: 494 FTRACE_WARN_ON_ONCE(1); 495 pr_info("ftrace faulted on unknown error "); 496 print_ip_sym(ip); 497 } 498 499 rec->flags |= FTRACE_FL_FAILED; 500 return 0; 501 } 502 return 1; 503 } 504 505 static int __ftrace_modify_code(void *data) 506 { 507 int *command = data; 508 509 if (*command & FTRACE_ENABLE_CALLS) { 510 ftrace_replace_code(1); 511 tracing_on = 1; 512 } else if (*command & FTRACE_DISABLE_CALLS) { 513 ftrace_replace_code(0); 514 tracing_on = 0; 515 } 516 517 if (*command & FTRACE_UPDATE_TRACE_FUNC) 518 ftrace_update_ftrace_func(ftrace_trace_function); 519 520 return 0; 521 } 522 523 static void ftrace_run_update_code(int command) 524 { 525 stop_machine(__ftrace_modify_code, &command, NULL); 526 } 527 528 static ftrace_func_t saved_ftrace_func; 529 static int ftrace_start; 530 static DEFINE_MUTEX(ftrace_start_lock); 531 532 static void ftrace_startup(void) 533 { 534 int command = 0; 535 536 if (unlikely(ftrace_disabled)) 537 return; 538 539 mutex_lock(&ftrace_start_lock); 540 ftrace_start++; 541 if (ftrace_start == 1) 542 command |= FTRACE_ENABLE_CALLS; 543 544 if (saved_ftrace_func != ftrace_trace_function) { 545 saved_ftrace_func = ftrace_trace_function; 546 command |= FTRACE_UPDATE_TRACE_FUNC; 547 } 548 549 if (!command || !ftrace_enabled) 550 goto out; 551 552 ftrace_run_update_code(command); 553 out: 554 mutex_unlock(&ftrace_start_lock); 555 } 556 557 static void ftrace_shutdown(void) 558 { 559 int command = 0; 560 561 if (unlikely(ftrace_disabled)) 562 return; 563 564 mutex_lock(&ftrace_start_lock); 565 ftrace_start--; 566 if (!ftrace_start) 567 command |= FTRACE_DISABLE_CALLS; 568 569 if (saved_ftrace_func != ftrace_trace_function) { 570 saved_ftrace_func = ftrace_trace_function; 571 command |= FTRACE_UPDATE_TRACE_FUNC; 572 } 573 574 if (!command || !ftrace_enabled) 575 goto out; 576 577 ftrace_run_update_code(command); 578 out: 579 mutex_unlock(&ftrace_start_lock); 580 } 581 582 static void ftrace_startup_sysctl(void) 583 { 584 int command = FTRACE_ENABLE_MCOUNT; 585 586 if (unlikely(ftrace_disabled)) 587 return; 588 589 mutex_lock(&ftrace_start_lock); 590 /* Force update next time */ 591 saved_ftrace_func = NULL; 592 /* ftrace_start is true if we want ftrace running */ 593 if (ftrace_start) 594 command |= FTRACE_ENABLE_CALLS; 595 596 ftrace_run_update_code(command); 597 mutex_unlock(&ftrace_start_lock); 598 } 599 600 static void ftrace_shutdown_sysctl(void) 601 { 602 int command = FTRACE_DISABLE_MCOUNT; 603 604 if (unlikely(ftrace_disabled)) 605 return; 606 607 mutex_lock(&ftrace_start_lock); 608 /* ftrace_start is true if ftrace is running */ 609 if (ftrace_start) 610 command |= FTRACE_DISABLE_CALLS; 611 612 ftrace_run_update_code(command); 613 mutex_unlock(&ftrace_start_lock); 614 } 615 616 static cycle_t ftrace_update_time; 617 static unsigned long ftrace_update_cnt; 618 unsigned long ftrace_update_tot_cnt; 619 620 static int ftrace_update_code(void) 621 { 622 struct dyn_ftrace *p, *t; 623 cycle_t start, stop; 624 625 start = ftrace_now(raw_smp_processor_id()); 626 ftrace_update_cnt = 0; 627 628 list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) { 629 630 /* If something went wrong, bail without enabling anything */ 631 if (unlikely(ftrace_disabled)) 632 return -1; 633 634 list_del_init(&p->list); 635 636 /* convert record (i.e, patch mcount-call with NOP) */ 637 if (ftrace_code_disable(p)) { 638 p->flags |= FTRACE_FL_CONVERTED; 639 ftrace_update_cnt++; 640 } else 641 ftrace_free_rec(p); 642 } 643 644 stop = ftrace_now(raw_smp_processor_id()); 645 ftrace_update_time = stop - start; 646 ftrace_update_tot_cnt += ftrace_update_cnt; 647 648 return 0; 649 } 650 651 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init) 652 { 653 struct ftrace_page *pg; 654 int cnt; 655 int i; 656 657 /* allocate a few pages */ 658 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL); 659 if (!ftrace_pages_start) 660 return -1; 661 662 /* 663 * Allocate a few more pages. 664 * 665 * TODO: have some parser search vmlinux before 666 * final linking to find all calls to ftrace. 667 * Then we can: 668 * a) know how many pages to allocate. 669 * and/or 670 * b) set up the table then. 671 * 672 * The dynamic code is still necessary for 673 * modules. 674 */ 675 676 pg = ftrace_pages = ftrace_pages_start; 677 678 cnt = num_to_init / ENTRIES_PER_PAGE; 679 pr_info("ftrace: allocating %ld entries in %d pages\n", 680 num_to_init, cnt); 681 682 for (i = 0; i < cnt; i++) { 683 pg->next = (void *)get_zeroed_page(GFP_KERNEL); 684 685 /* If we fail, we'll try later anyway */ 686 if (!pg->next) 687 break; 688 689 pg = pg->next; 690 } 691 692 return 0; 693 } 694 695 enum { 696 FTRACE_ITER_FILTER = (1 << 0), 697 FTRACE_ITER_CONT = (1 << 1), 698 FTRACE_ITER_NOTRACE = (1 << 2), 699 FTRACE_ITER_FAILURES = (1 << 3), 700 }; 701 702 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ 703 704 struct ftrace_iterator { 705 loff_t pos; 706 struct ftrace_page *pg; 707 unsigned idx; 708 unsigned flags; 709 unsigned char buffer[FTRACE_BUFF_MAX+1]; 710 unsigned buffer_idx; 711 unsigned filtered; 712 }; 713 714 static void * 715 t_next(struct seq_file *m, void *v, loff_t *pos) 716 { 717 struct ftrace_iterator *iter = m->private; 718 struct dyn_ftrace *rec = NULL; 719 720 (*pos)++; 721 722 /* should not be called from interrupt context */ 723 spin_lock(&ftrace_lock); 724 retry: 725 if (iter->idx >= iter->pg->index) { 726 if (iter->pg->next) { 727 iter->pg = iter->pg->next; 728 iter->idx = 0; 729 goto retry; 730 } 731 } else { 732 rec = &iter->pg->records[iter->idx++]; 733 if ((rec->flags & FTRACE_FL_FREE) || 734 735 (!(iter->flags & FTRACE_ITER_FAILURES) && 736 (rec->flags & FTRACE_FL_FAILED)) || 737 738 ((iter->flags & FTRACE_ITER_FAILURES) && 739 !(rec->flags & FTRACE_FL_FAILED)) || 740 741 ((iter->flags & FTRACE_ITER_NOTRACE) && 742 !(rec->flags & FTRACE_FL_NOTRACE))) { 743 rec = NULL; 744 goto retry; 745 } 746 } 747 spin_unlock(&ftrace_lock); 748 749 iter->pos = *pos; 750 751 return rec; 752 } 753 754 static void *t_start(struct seq_file *m, loff_t *pos) 755 { 756 struct ftrace_iterator *iter = m->private; 757 void *p = NULL; 758 loff_t l = -1; 759 760 if (*pos != iter->pos) { 761 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l)) 762 ; 763 } else { 764 l = *pos; 765 p = t_next(m, p, &l); 766 } 767 768 return p; 769 } 770 771 static void t_stop(struct seq_file *m, void *p) 772 { 773 } 774 775 static int t_show(struct seq_file *m, void *v) 776 { 777 struct dyn_ftrace *rec = v; 778 char str[KSYM_SYMBOL_LEN]; 779 780 if (!rec) 781 return 0; 782 783 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); 784 785 seq_printf(m, "%s\n", str); 786 787 return 0; 788 } 789 790 static struct seq_operations show_ftrace_seq_ops = { 791 .start = t_start, 792 .next = t_next, 793 .stop = t_stop, 794 .show = t_show, 795 }; 796 797 static int 798 ftrace_avail_open(struct inode *inode, struct file *file) 799 { 800 struct ftrace_iterator *iter; 801 int ret; 802 803 if (unlikely(ftrace_disabled)) 804 return -ENODEV; 805 806 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 807 if (!iter) 808 return -ENOMEM; 809 810 iter->pg = ftrace_pages_start; 811 iter->pos = -1; 812 813 ret = seq_open(file, &show_ftrace_seq_ops); 814 if (!ret) { 815 struct seq_file *m = file->private_data; 816 817 m->private = iter; 818 } else { 819 kfree(iter); 820 } 821 822 return ret; 823 } 824 825 int ftrace_avail_release(struct inode *inode, struct file *file) 826 { 827 struct seq_file *m = (struct seq_file *)file->private_data; 828 struct ftrace_iterator *iter = m->private; 829 830 seq_release(inode, file); 831 kfree(iter); 832 833 return 0; 834 } 835 836 static int 837 ftrace_failures_open(struct inode *inode, struct file *file) 838 { 839 int ret; 840 struct seq_file *m; 841 struct ftrace_iterator *iter; 842 843 ret = ftrace_avail_open(inode, file); 844 if (!ret) { 845 m = (struct seq_file *)file->private_data; 846 iter = (struct ftrace_iterator *)m->private; 847 iter->flags = FTRACE_ITER_FAILURES; 848 } 849 850 return ret; 851 } 852 853 854 static void ftrace_filter_reset(int enable) 855 { 856 struct ftrace_page *pg; 857 struct dyn_ftrace *rec; 858 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; 859 unsigned i; 860 861 /* should not be called from interrupt context */ 862 spin_lock(&ftrace_lock); 863 if (enable) 864 ftrace_filtered = 0; 865 pg = ftrace_pages_start; 866 while (pg) { 867 for (i = 0; i < pg->index; i++) { 868 rec = &pg->records[i]; 869 if (rec->flags & FTRACE_FL_FAILED) 870 continue; 871 rec->flags &= ~type; 872 } 873 pg = pg->next; 874 } 875 spin_unlock(&ftrace_lock); 876 } 877 878 static int 879 ftrace_regex_open(struct inode *inode, struct file *file, int enable) 880 { 881 struct ftrace_iterator *iter; 882 int ret = 0; 883 884 if (unlikely(ftrace_disabled)) 885 return -ENODEV; 886 887 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 888 if (!iter) 889 return -ENOMEM; 890 891 mutex_lock(&ftrace_regex_lock); 892 if ((file->f_mode & FMODE_WRITE) && 893 !(file->f_flags & O_APPEND)) 894 ftrace_filter_reset(enable); 895 896 if (file->f_mode & FMODE_READ) { 897 iter->pg = ftrace_pages_start; 898 iter->pos = -1; 899 iter->flags = enable ? FTRACE_ITER_FILTER : 900 FTRACE_ITER_NOTRACE; 901 902 ret = seq_open(file, &show_ftrace_seq_ops); 903 if (!ret) { 904 struct seq_file *m = file->private_data; 905 m->private = iter; 906 } else 907 kfree(iter); 908 } else 909 file->private_data = iter; 910 mutex_unlock(&ftrace_regex_lock); 911 912 return ret; 913 } 914 915 static int 916 ftrace_filter_open(struct inode *inode, struct file *file) 917 { 918 return ftrace_regex_open(inode, file, 1); 919 } 920 921 static int 922 ftrace_notrace_open(struct inode *inode, struct file *file) 923 { 924 return ftrace_regex_open(inode, file, 0); 925 } 926 927 static ssize_t 928 ftrace_regex_read(struct file *file, char __user *ubuf, 929 size_t cnt, loff_t *ppos) 930 { 931 if (file->f_mode & FMODE_READ) 932 return seq_read(file, ubuf, cnt, ppos); 933 else 934 return -EPERM; 935 } 936 937 static loff_t 938 ftrace_regex_lseek(struct file *file, loff_t offset, int origin) 939 { 940 loff_t ret; 941 942 if (file->f_mode & FMODE_READ) 943 ret = seq_lseek(file, offset, origin); 944 else 945 file->f_pos = ret = 1; 946 947 return ret; 948 } 949 950 enum { 951 MATCH_FULL, 952 MATCH_FRONT_ONLY, 953 MATCH_MIDDLE_ONLY, 954 MATCH_END_ONLY, 955 }; 956 957 static void 958 ftrace_match(unsigned char *buff, int len, int enable) 959 { 960 char str[KSYM_SYMBOL_LEN]; 961 char *search = NULL; 962 struct ftrace_page *pg; 963 struct dyn_ftrace *rec; 964 int type = MATCH_FULL; 965 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; 966 unsigned i, match = 0, search_len = 0; 967 968 for (i = 0; i < len; i++) { 969 if (buff[i] == '*') { 970 if (!i) { 971 search = buff + i + 1; 972 type = MATCH_END_ONLY; 973 search_len = len - (i + 1); 974 } else { 975 if (type == MATCH_END_ONLY) { 976 type = MATCH_MIDDLE_ONLY; 977 } else { 978 match = i; 979 type = MATCH_FRONT_ONLY; 980 } 981 buff[i] = 0; 982 break; 983 } 984 } 985 } 986 987 /* should not be called from interrupt context */ 988 spin_lock(&ftrace_lock); 989 if (enable) 990 ftrace_filtered = 1; 991 pg = ftrace_pages_start; 992 while (pg) { 993 for (i = 0; i < pg->index; i++) { 994 int matched = 0; 995 char *ptr; 996 997 rec = &pg->records[i]; 998 if (rec->flags & FTRACE_FL_FAILED) 999 continue; 1000 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); 1001 switch (type) { 1002 case MATCH_FULL: 1003 if (strcmp(str, buff) == 0) 1004 matched = 1; 1005 break; 1006 case MATCH_FRONT_ONLY: 1007 if (memcmp(str, buff, match) == 0) 1008 matched = 1; 1009 break; 1010 case MATCH_MIDDLE_ONLY: 1011 if (strstr(str, search)) 1012 matched = 1; 1013 break; 1014 case MATCH_END_ONLY: 1015 ptr = strstr(str, search); 1016 if (ptr && (ptr[search_len] == 0)) 1017 matched = 1; 1018 break; 1019 } 1020 if (matched) 1021 rec->flags |= flag; 1022 } 1023 pg = pg->next; 1024 } 1025 spin_unlock(&ftrace_lock); 1026 } 1027 1028 static ssize_t 1029 ftrace_regex_write(struct file *file, const char __user *ubuf, 1030 size_t cnt, loff_t *ppos, int enable) 1031 { 1032 struct ftrace_iterator *iter; 1033 char ch; 1034 size_t read = 0; 1035 ssize_t ret; 1036 1037 if (!cnt || cnt < 0) 1038 return 0; 1039 1040 mutex_lock(&ftrace_regex_lock); 1041 1042 if (file->f_mode & FMODE_READ) { 1043 struct seq_file *m = file->private_data; 1044 iter = m->private; 1045 } else 1046 iter = file->private_data; 1047 1048 if (!*ppos) { 1049 iter->flags &= ~FTRACE_ITER_CONT; 1050 iter->buffer_idx = 0; 1051 } 1052 1053 ret = get_user(ch, ubuf++); 1054 if (ret) 1055 goto out; 1056 read++; 1057 cnt--; 1058 1059 if (!(iter->flags & ~FTRACE_ITER_CONT)) { 1060 /* skip white space */ 1061 while (cnt && isspace(ch)) { 1062 ret = get_user(ch, ubuf++); 1063 if (ret) 1064 goto out; 1065 read++; 1066 cnt--; 1067 } 1068 1069 if (isspace(ch)) { 1070 file->f_pos += read; 1071 ret = read; 1072 goto out; 1073 } 1074 1075 iter->buffer_idx = 0; 1076 } 1077 1078 while (cnt && !isspace(ch)) { 1079 if (iter->buffer_idx < FTRACE_BUFF_MAX) 1080 iter->buffer[iter->buffer_idx++] = ch; 1081 else { 1082 ret = -EINVAL; 1083 goto out; 1084 } 1085 ret = get_user(ch, ubuf++); 1086 if (ret) 1087 goto out; 1088 read++; 1089 cnt--; 1090 } 1091 1092 if (isspace(ch)) { 1093 iter->filtered++; 1094 iter->buffer[iter->buffer_idx] = 0; 1095 ftrace_match(iter->buffer, iter->buffer_idx, enable); 1096 iter->buffer_idx = 0; 1097 } else 1098 iter->flags |= FTRACE_ITER_CONT; 1099 1100 1101 file->f_pos += read; 1102 1103 ret = read; 1104 out: 1105 mutex_unlock(&ftrace_regex_lock); 1106 1107 return ret; 1108 } 1109 1110 static ssize_t 1111 ftrace_filter_write(struct file *file, const char __user *ubuf, 1112 size_t cnt, loff_t *ppos) 1113 { 1114 return ftrace_regex_write(file, ubuf, cnt, ppos, 1); 1115 } 1116 1117 static ssize_t 1118 ftrace_notrace_write(struct file *file, const char __user *ubuf, 1119 size_t cnt, loff_t *ppos) 1120 { 1121 return ftrace_regex_write(file, ubuf, cnt, ppos, 0); 1122 } 1123 1124 static void 1125 ftrace_set_regex(unsigned char *buf, int len, int reset, int enable) 1126 { 1127 if (unlikely(ftrace_disabled)) 1128 return; 1129 1130 mutex_lock(&ftrace_regex_lock); 1131 if (reset) 1132 ftrace_filter_reset(enable); 1133 if (buf) 1134 ftrace_match(buf, len, enable); 1135 mutex_unlock(&ftrace_regex_lock); 1136 } 1137 1138 /** 1139 * ftrace_set_filter - set a function to filter on in ftrace 1140 * @buf - the string that holds the function filter text. 1141 * @len - the length of the string. 1142 * @reset - non zero to reset all filters before applying this filter. 1143 * 1144 * Filters denote which functions should be enabled when tracing is enabled. 1145 * If @buf is NULL and reset is set, all functions will be enabled for tracing. 1146 */ 1147 void ftrace_set_filter(unsigned char *buf, int len, int reset) 1148 { 1149 ftrace_set_regex(buf, len, reset, 1); 1150 } 1151 1152 /** 1153 * ftrace_set_notrace - set a function to not trace in ftrace 1154 * @buf - the string that holds the function notrace text. 1155 * @len - the length of the string. 1156 * @reset - non zero to reset all filters before applying this filter. 1157 * 1158 * Notrace Filters denote which functions should not be enabled when tracing 1159 * is enabled. If @buf is NULL and reset is set, all functions will be enabled 1160 * for tracing. 1161 */ 1162 void ftrace_set_notrace(unsigned char *buf, int len, int reset) 1163 { 1164 ftrace_set_regex(buf, len, reset, 0); 1165 } 1166 1167 static int 1168 ftrace_regex_release(struct inode *inode, struct file *file, int enable) 1169 { 1170 struct seq_file *m = (struct seq_file *)file->private_data; 1171 struct ftrace_iterator *iter; 1172 1173 mutex_lock(&ftrace_regex_lock); 1174 if (file->f_mode & FMODE_READ) { 1175 iter = m->private; 1176 1177 seq_release(inode, file); 1178 } else 1179 iter = file->private_data; 1180 1181 if (iter->buffer_idx) { 1182 iter->filtered++; 1183 iter->buffer[iter->buffer_idx] = 0; 1184 ftrace_match(iter->buffer, iter->buffer_idx, enable); 1185 } 1186 1187 mutex_lock(&ftrace_sysctl_lock); 1188 mutex_lock(&ftrace_start_lock); 1189 if (iter->filtered && ftrace_start && ftrace_enabled) 1190 ftrace_run_update_code(FTRACE_ENABLE_CALLS); 1191 mutex_unlock(&ftrace_start_lock); 1192 mutex_unlock(&ftrace_sysctl_lock); 1193 1194 kfree(iter); 1195 mutex_unlock(&ftrace_regex_lock); 1196 return 0; 1197 } 1198 1199 static int 1200 ftrace_filter_release(struct inode *inode, struct file *file) 1201 { 1202 return ftrace_regex_release(inode, file, 1); 1203 } 1204 1205 static int 1206 ftrace_notrace_release(struct inode *inode, struct file *file) 1207 { 1208 return ftrace_regex_release(inode, file, 0); 1209 } 1210 1211 static struct file_operations ftrace_avail_fops = { 1212 .open = ftrace_avail_open, 1213 .read = seq_read, 1214 .llseek = seq_lseek, 1215 .release = ftrace_avail_release, 1216 }; 1217 1218 static struct file_operations ftrace_failures_fops = { 1219 .open = ftrace_failures_open, 1220 .read = seq_read, 1221 .llseek = seq_lseek, 1222 .release = ftrace_avail_release, 1223 }; 1224 1225 static struct file_operations ftrace_filter_fops = { 1226 .open = ftrace_filter_open, 1227 .read = ftrace_regex_read, 1228 .write = ftrace_filter_write, 1229 .llseek = ftrace_regex_lseek, 1230 .release = ftrace_filter_release, 1231 }; 1232 1233 static struct file_operations ftrace_notrace_fops = { 1234 .open = ftrace_notrace_open, 1235 .read = ftrace_regex_read, 1236 .write = ftrace_notrace_write, 1237 .llseek = ftrace_regex_lseek, 1238 .release = ftrace_notrace_release, 1239 }; 1240 1241 static __init int ftrace_init_debugfs(void) 1242 { 1243 struct dentry *d_tracer; 1244 struct dentry *entry; 1245 1246 d_tracer = tracing_init_dentry(); 1247 1248 entry = debugfs_create_file("available_filter_functions", 0444, 1249 d_tracer, NULL, &ftrace_avail_fops); 1250 if (!entry) 1251 pr_warning("Could not create debugfs " 1252 "'available_filter_functions' entry\n"); 1253 1254 entry = debugfs_create_file("failures", 0444, 1255 d_tracer, NULL, &ftrace_failures_fops); 1256 if (!entry) 1257 pr_warning("Could not create debugfs 'failures' entry\n"); 1258 1259 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer, 1260 NULL, &ftrace_filter_fops); 1261 if (!entry) 1262 pr_warning("Could not create debugfs " 1263 "'set_ftrace_filter' entry\n"); 1264 1265 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer, 1266 NULL, &ftrace_notrace_fops); 1267 if (!entry) 1268 pr_warning("Could not create debugfs " 1269 "'set_ftrace_notrace' entry\n"); 1270 1271 return 0; 1272 } 1273 1274 fs_initcall(ftrace_init_debugfs); 1275 1276 static int ftrace_convert_nops(unsigned long *start, 1277 unsigned long *end) 1278 { 1279 unsigned long *p; 1280 unsigned long addr; 1281 unsigned long flags; 1282 1283 mutex_lock(&ftrace_start_lock); 1284 p = start; 1285 while (p < end) { 1286 addr = ftrace_call_adjust(*p++); 1287 ftrace_record_ip(addr); 1288 } 1289 1290 /* disable interrupts to prevent kstop machine */ 1291 local_irq_save(flags); 1292 ftrace_update_code(); 1293 local_irq_restore(flags); 1294 mutex_unlock(&ftrace_start_lock); 1295 1296 return 0; 1297 } 1298 1299 void ftrace_init_module(unsigned long *start, unsigned long *end) 1300 { 1301 if (ftrace_disabled || start == end) 1302 return; 1303 ftrace_convert_nops(start, end); 1304 } 1305 1306 extern unsigned long __start_mcount_loc[]; 1307 extern unsigned long __stop_mcount_loc[]; 1308 1309 void __init ftrace_init(void) 1310 { 1311 unsigned long count, addr, flags; 1312 int ret; 1313 1314 /* Keep the ftrace pointer to the stub */ 1315 addr = (unsigned long)ftrace_stub; 1316 1317 local_irq_save(flags); 1318 ftrace_dyn_arch_init(&addr); 1319 local_irq_restore(flags); 1320 1321 /* ftrace_dyn_arch_init places the return code in addr */ 1322 if (addr) 1323 goto failed; 1324 1325 count = __stop_mcount_loc - __start_mcount_loc; 1326 1327 ret = ftrace_dyn_table_alloc(count); 1328 if (ret) 1329 goto failed; 1330 1331 last_ftrace_enabled = ftrace_enabled = 1; 1332 1333 ret = ftrace_convert_nops(__start_mcount_loc, 1334 __stop_mcount_loc); 1335 1336 return; 1337 failed: 1338 ftrace_disabled = 1; 1339 } 1340 1341 #else 1342 1343 static int __init ftrace_nodyn_init(void) 1344 { 1345 ftrace_enabled = 1; 1346 return 0; 1347 } 1348 device_initcall(ftrace_nodyn_init); 1349 1350 # define ftrace_startup() do { } while (0) 1351 # define ftrace_shutdown() do { } while (0) 1352 # define ftrace_startup_sysctl() do { } while (0) 1353 # define ftrace_shutdown_sysctl() do { } while (0) 1354 #endif /* CONFIG_DYNAMIC_FTRACE */ 1355 1356 /** 1357 * ftrace_kill - kill ftrace 1358 * 1359 * This function should be used by panic code. It stops ftrace 1360 * but in a not so nice way. If you need to simply kill ftrace 1361 * from a non-atomic section, use ftrace_kill. 1362 */ 1363 void ftrace_kill(void) 1364 { 1365 ftrace_disabled = 1; 1366 ftrace_enabled = 0; 1367 clear_ftrace_function(); 1368 } 1369 1370 /** 1371 * register_ftrace_function - register a function for profiling 1372 * @ops - ops structure that holds the function for profiling. 1373 * 1374 * Register a function to be called by all functions in the 1375 * kernel. 1376 * 1377 * Note: @ops->func and all the functions it calls must be labeled 1378 * with "notrace", otherwise it will go into a 1379 * recursive loop. 1380 */ 1381 int register_ftrace_function(struct ftrace_ops *ops) 1382 { 1383 int ret; 1384 1385 if (unlikely(ftrace_disabled)) 1386 return -1; 1387 1388 mutex_lock(&ftrace_sysctl_lock); 1389 ret = __register_ftrace_function(ops); 1390 ftrace_startup(); 1391 mutex_unlock(&ftrace_sysctl_lock); 1392 1393 return ret; 1394 } 1395 1396 /** 1397 * unregister_ftrace_function - unresgister a function for profiling. 1398 * @ops - ops structure that holds the function to unregister 1399 * 1400 * Unregister a function that was added to be called by ftrace profiling. 1401 */ 1402 int unregister_ftrace_function(struct ftrace_ops *ops) 1403 { 1404 int ret; 1405 1406 mutex_lock(&ftrace_sysctl_lock); 1407 ret = __unregister_ftrace_function(ops); 1408 ftrace_shutdown(); 1409 mutex_unlock(&ftrace_sysctl_lock); 1410 1411 return ret; 1412 } 1413 1414 int 1415 ftrace_enable_sysctl(struct ctl_table *table, int write, 1416 struct file *file, void __user *buffer, size_t *lenp, 1417 loff_t *ppos) 1418 { 1419 int ret; 1420 1421 if (unlikely(ftrace_disabled)) 1422 return -ENODEV; 1423 1424 mutex_lock(&ftrace_sysctl_lock); 1425 1426 ret = proc_dointvec(table, write, file, buffer, lenp, ppos); 1427 1428 if (ret || !write || (last_ftrace_enabled == ftrace_enabled)) 1429 goto out; 1430 1431 last_ftrace_enabled = ftrace_enabled; 1432 1433 if (ftrace_enabled) { 1434 1435 ftrace_startup_sysctl(); 1436 1437 /* we are starting ftrace again */ 1438 if (ftrace_list != &ftrace_list_end) { 1439 if (ftrace_list->next == &ftrace_list_end) 1440 ftrace_trace_function = ftrace_list->func; 1441 else 1442 ftrace_trace_function = ftrace_list_func; 1443 } 1444 1445 } else { 1446 /* stopping ftrace calls (just send to ftrace_stub) */ 1447 ftrace_trace_function = ftrace_stub; 1448 1449 ftrace_shutdown_sysctl(); 1450 } 1451 1452 out: 1453 mutex_unlock(&ftrace_sysctl_lock); 1454 return ret; 1455 } 1456 1457