1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Infrastructure for profiling code inserted by 'gcc -pg'. 4 * 5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> 6 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com> 7 * 8 * Originally ported from the -rt patch by: 9 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com> 10 * 11 * Based on code in the latency_tracer, that is: 12 * 13 * Copyright (C) 2004-2006 Ingo Molnar 14 * Copyright (C) 2004 Nadia Yvette Chambers 15 */ 16 17 #include <linux/stop_machine.h> 18 #include <linux/clocksource.h> 19 #include <linux/sched/task.h> 20 #include <linux/kallsyms.h> 21 #include <linux/security.h> 22 #include <linux/seq_file.h> 23 #include <linux/tracefs.h> 24 #include <linux/hardirq.h> 25 #include <linux/kthread.h> 26 #include <linux/uaccess.h> 27 #include <linux/bsearch.h> 28 #include <linux/module.h> 29 #include <linux/ftrace.h> 30 #include <linux/sysctl.h> 31 #include <linux/slab.h> 32 #include <linux/ctype.h> 33 #include <linux/sort.h> 34 #include <linux/list.h> 35 #include <linux/hash.h> 36 #include <linux/rcupdate.h> 37 #include <linux/kprobes.h> 38 39 #include <trace/events/sched.h> 40 41 #include <asm/sections.h> 42 #include <asm/setup.h> 43 44 #include "ftrace_internal.h" 45 #include "trace_output.h" 46 #include "trace_stat.h" 47 48 #define FTRACE_WARN_ON(cond) \ 49 ({ \ 50 int ___r = cond; \ 51 if (WARN_ON(___r)) \ 52 ftrace_kill(); \ 53 ___r; \ 54 }) 55 56 #define FTRACE_WARN_ON_ONCE(cond) \ 57 ({ \ 58 int ___r = cond; \ 59 if (WARN_ON_ONCE(___r)) \ 60 ftrace_kill(); \ 61 ___r; \ 62 }) 63 64 /* hash bits for specific function selection */ 65 #define FTRACE_HASH_DEFAULT_BITS 10 66 #define FTRACE_HASH_MAX_BITS 12 67 68 #ifdef CONFIG_DYNAMIC_FTRACE 69 #define INIT_OPS_HASH(opsname) \ 70 .func_hash = &opsname.local_hash, \ 71 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock), 72 #else 73 #define INIT_OPS_HASH(opsname) 74 #endif 75 76 enum { 77 FTRACE_MODIFY_ENABLE_FL = (1 << 0), 78 FTRACE_MODIFY_MAY_SLEEP_FL = (1 << 1), 79 }; 80 81 struct ftrace_ops ftrace_list_end __read_mostly = { 82 .func = ftrace_stub, 83 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB, 84 INIT_OPS_HASH(ftrace_list_end) 85 }; 86 87 /* ftrace_enabled is a method to turn ftrace on or off */ 88 int ftrace_enabled __read_mostly; 89 static int last_ftrace_enabled; 90 91 /* Current function tracing op */ 92 struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end; 93 /* What to set function_trace_op to */ 94 static struct ftrace_ops *set_function_trace_op; 95 96 static bool ftrace_pids_enabled(struct ftrace_ops *ops) 97 { 98 struct trace_array *tr; 99 100 if (!(ops->flags & FTRACE_OPS_FL_PID) || !ops->private) 101 return false; 102 103 tr = ops->private; 104 105 return tr->function_pids != NULL; 106 } 107 108 static void ftrace_update_trampoline(struct ftrace_ops *ops); 109 110 /* 111 * ftrace_disabled is set when an anomaly is discovered. 112 * ftrace_disabled is much stronger than ftrace_enabled. 113 */ 114 static int ftrace_disabled __read_mostly; 115 116 DEFINE_MUTEX(ftrace_lock); 117 118 struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end; 119 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; 120 struct ftrace_ops global_ops; 121 122 #if ARCH_SUPPORTS_FTRACE_OPS 123 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, 124 struct ftrace_ops *op, struct pt_regs *regs); 125 #else 126 /* See comment below, where ftrace_ops_list_func is defined */ 127 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip); 128 #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops) 129 #endif 130 131 static inline void ftrace_ops_init(struct ftrace_ops *ops) 132 { 133 #ifdef CONFIG_DYNAMIC_FTRACE 134 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) { 135 mutex_init(&ops->local_hash.regex_lock); 136 ops->func_hash = &ops->local_hash; 137 ops->flags |= FTRACE_OPS_FL_INITIALIZED; 138 } 139 #endif 140 } 141 142 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip, 143 struct ftrace_ops *op, struct pt_regs *regs) 144 { 145 struct trace_array *tr = op->private; 146 147 if (tr && this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid)) 148 return; 149 150 op->saved_func(ip, parent_ip, op, regs); 151 } 152 153 static void ftrace_sync(struct work_struct *work) 154 { 155 /* 156 * This function is just a stub to implement a hard force 157 * of synchronize_rcu(). This requires synchronizing 158 * tasks even in userspace and idle. 159 * 160 * Yes, function tracing is rude. 161 */ 162 } 163 164 static void ftrace_sync_ipi(void *data) 165 { 166 /* Probably not needed, but do it anyway */ 167 smp_rmb(); 168 } 169 170 static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops) 171 { 172 /* 173 * If this is a dynamic, RCU, or per CPU ops, or we force list func, 174 * then it needs to call the list anyway. 175 */ 176 if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) || 177 FTRACE_FORCE_LIST_FUNC) 178 return ftrace_ops_list_func; 179 180 return ftrace_ops_get_func(ops); 181 } 182 183 static void update_ftrace_function(void) 184 { 185 ftrace_func_t func; 186 187 /* 188 * Prepare the ftrace_ops that the arch callback will use. 189 * If there's only one ftrace_ops registered, the ftrace_ops_list 190 * will point to the ops we want. 191 */ 192 set_function_trace_op = rcu_dereference_protected(ftrace_ops_list, 193 lockdep_is_held(&ftrace_lock)); 194 195 /* If there's no ftrace_ops registered, just call the stub function */ 196 if (set_function_trace_op == &ftrace_list_end) { 197 func = ftrace_stub; 198 199 /* 200 * If we are at the end of the list and this ops is 201 * recursion safe and not dynamic and the arch supports passing ops, 202 * then have the mcount trampoline call the function directly. 203 */ 204 } else if (rcu_dereference_protected(ftrace_ops_list->next, 205 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) { 206 func = ftrace_ops_get_list_func(ftrace_ops_list); 207 208 } else { 209 /* Just use the default ftrace_ops */ 210 set_function_trace_op = &ftrace_list_end; 211 func = ftrace_ops_list_func; 212 } 213 214 update_function_graph_func(); 215 216 /* If there's no change, then do nothing more here */ 217 if (ftrace_trace_function == func) 218 return; 219 220 /* 221 * If we are using the list function, it doesn't care 222 * about the function_trace_ops. 223 */ 224 if (func == ftrace_ops_list_func) { 225 ftrace_trace_function = func; 226 /* 227 * Don't even bother setting function_trace_ops, 228 * it would be racy to do so anyway. 229 */ 230 return; 231 } 232 233 #ifndef CONFIG_DYNAMIC_FTRACE 234 /* 235 * For static tracing, we need to be a bit more careful. 236 * The function change takes affect immediately. Thus, 237 * we need to coorditate the setting of the function_trace_ops 238 * with the setting of the ftrace_trace_function. 239 * 240 * Set the function to the list ops, which will call the 241 * function we want, albeit indirectly, but it handles the 242 * ftrace_ops and doesn't depend on function_trace_op. 243 */ 244 ftrace_trace_function = ftrace_ops_list_func; 245 /* 246 * Make sure all CPUs see this. Yes this is slow, but static 247 * tracing is slow and nasty to have enabled. 248 */ 249 schedule_on_each_cpu(ftrace_sync); 250 /* Now all cpus are using the list ops. */ 251 function_trace_op = set_function_trace_op; 252 /* Make sure the function_trace_op is visible on all CPUs */ 253 smp_wmb(); 254 /* Nasty way to force a rmb on all cpus */ 255 smp_call_function(ftrace_sync_ipi, NULL, 1); 256 /* OK, we are all set to update the ftrace_trace_function now! */ 257 #endif /* !CONFIG_DYNAMIC_FTRACE */ 258 259 ftrace_trace_function = func; 260 } 261 262 static void add_ftrace_ops(struct ftrace_ops __rcu **list, 263 struct ftrace_ops *ops) 264 { 265 rcu_assign_pointer(ops->next, *list); 266 267 /* 268 * We are entering ops into the list but another 269 * CPU might be walking that list. We need to make sure 270 * the ops->next pointer is valid before another CPU sees 271 * the ops pointer included into the list. 272 */ 273 rcu_assign_pointer(*list, ops); 274 } 275 276 static int remove_ftrace_ops(struct ftrace_ops __rcu **list, 277 struct ftrace_ops *ops) 278 { 279 struct ftrace_ops **p; 280 281 /* 282 * If we are removing the last function, then simply point 283 * to the ftrace_stub. 284 */ 285 if (rcu_dereference_protected(*list, 286 lockdep_is_held(&ftrace_lock)) == ops && 287 rcu_dereference_protected(ops->next, 288 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) { 289 *list = &ftrace_list_end; 290 return 0; 291 } 292 293 for (p = list; *p != &ftrace_list_end; p = &(*p)->next) 294 if (*p == ops) 295 break; 296 297 if (*p != ops) 298 return -1; 299 300 *p = (*p)->next; 301 return 0; 302 } 303 304 static void ftrace_update_trampoline(struct ftrace_ops *ops); 305 306 int __register_ftrace_function(struct ftrace_ops *ops) 307 { 308 if (ops->flags & FTRACE_OPS_FL_DELETED) 309 return -EINVAL; 310 311 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED)) 312 return -EBUSY; 313 314 #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS 315 /* 316 * If the ftrace_ops specifies SAVE_REGS, then it only can be used 317 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set. 318 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant. 319 */ 320 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS && 321 !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)) 322 return -EINVAL; 323 324 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED) 325 ops->flags |= FTRACE_OPS_FL_SAVE_REGS; 326 #endif 327 if (!ftrace_enabled && (ops->flags & FTRACE_OPS_FL_PERMANENT)) 328 return -EBUSY; 329 330 if (!core_kernel_data((unsigned long)ops)) 331 ops->flags |= FTRACE_OPS_FL_DYNAMIC; 332 333 add_ftrace_ops(&ftrace_ops_list, ops); 334 335 /* Always save the function, and reset at unregistering */ 336 ops->saved_func = ops->func; 337 338 if (ftrace_pids_enabled(ops)) 339 ops->func = ftrace_pid_func; 340 341 ftrace_update_trampoline(ops); 342 343 if (ftrace_enabled) 344 update_ftrace_function(); 345 346 return 0; 347 } 348 349 int __unregister_ftrace_function(struct ftrace_ops *ops) 350 { 351 int ret; 352 353 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED))) 354 return -EBUSY; 355 356 ret = remove_ftrace_ops(&ftrace_ops_list, ops); 357 358 if (ret < 0) 359 return ret; 360 361 if (ftrace_enabled) 362 update_ftrace_function(); 363 364 ops->func = ops->saved_func; 365 366 return 0; 367 } 368 369 static void ftrace_update_pid_func(void) 370 { 371 struct ftrace_ops *op; 372 373 /* Only do something if we are tracing something */ 374 if (ftrace_trace_function == ftrace_stub) 375 return; 376 377 do_for_each_ftrace_op(op, ftrace_ops_list) { 378 if (op->flags & FTRACE_OPS_FL_PID) { 379 op->func = ftrace_pids_enabled(op) ? 380 ftrace_pid_func : op->saved_func; 381 ftrace_update_trampoline(op); 382 } 383 } while_for_each_ftrace_op(op); 384 385 update_ftrace_function(); 386 } 387 388 #ifdef CONFIG_FUNCTION_PROFILER 389 struct ftrace_profile { 390 struct hlist_node node; 391 unsigned long ip; 392 unsigned long counter; 393 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 394 unsigned long long time; 395 unsigned long long time_squared; 396 #endif 397 }; 398 399 struct ftrace_profile_page { 400 struct ftrace_profile_page *next; 401 unsigned long index; 402 struct ftrace_profile records[]; 403 }; 404 405 struct ftrace_profile_stat { 406 atomic_t disabled; 407 struct hlist_head *hash; 408 struct ftrace_profile_page *pages; 409 struct ftrace_profile_page *start; 410 struct tracer_stat stat; 411 }; 412 413 #define PROFILE_RECORDS_SIZE \ 414 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records)) 415 416 #define PROFILES_PER_PAGE \ 417 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile)) 418 419 static int ftrace_profile_enabled __read_mostly; 420 421 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */ 422 static DEFINE_MUTEX(ftrace_profile_lock); 423 424 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats); 425 426 #define FTRACE_PROFILE_HASH_BITS 10 427 #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS) 428 429 static void * 430 function_stat_next(void *v, int idx) 431 { 432 struct ftrace_profile *rec = v; 433 struct ftrace_profile_page *pg; 434 435 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK); 436 437 again: 438 if (idx != 0) 439 rec++; 440 441 if ((void *)rec >= (void *)&pg->records[pg->index]) { 442 pg = pg->next; 443 if (!pg) 444 return NULL; 445 rec = &pg->records[0]; 446 if (!rec->counter) 447 goto again; 448 } 449 450 return rec; 451 } 452 453 static void *function_stat_start(struct tracer_stat *trace) 454 { 455 struct ftrace_profile_stat *stat = 456 container_of(trace, struct ftrace_profile_stat, stat); 457 458 if (!stat || !stat->start) 459 return NULL; 460 461 return function_stat_next(&stat->start->records[0], 0); 462 } 463 464 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 465 /* function graph compares on total time */ 466 static int function_stat_cmp(const void *p1, const void *p2) 467 { 468 const struct ftrace_profile *a = p1; 469 const struct ftrace_profile *b = p2; 470 471 if (a->time < b->time) 472 return -1; 473 if (a->time > b->time) 474 return 1; 475 else 476 return 0; 477 } 478 #else 479 /* not function graph compares against hits */ 480 static int function_stat_cmp(const void *p1, const void *p2) 481 { 482 const struct ftrace_profile *a = p1; 483 const struct ftrace_profile *b = p2; 484 485 if (a->counter < b->counter) 486 return -1; 487 if (a->counter > b->counter) 488 return 1; 489 else 490 return 0; 491 } 492 #endif 493 494 static int function_stat_headers(struct seq_file *m) 495 { 496 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 497 seq_puts(m, " Function " 498 "Hit Time Avg s^2\n" 499 " -------- " 500 "--- ---- --- ---\n"); 501 #else 502 seq_puts(m, " Function Hit\n" 503 " -------- ---\n"); 504 #endif 505 return 0; 506 } 507 508 static int function_stat_show(struct seq_file *m, void *v) 509 { 510 struct ftrace_profile *rec = v; 511 char str[KSYM_SYMBOL_LEN]; 512 int ret = 0; 513 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 514 static struct trace_seq s; 515 unsigned long long avg; 516 unsigned long long stddev; 517 #endif 518 mutex_lock(&ftrace_profile_lock); 519 520 /* we raced with function_profile_reset() */ 521 if (unlikely(rec->counter == 0)) { 522 ret = -EBUSY; 523 goto out; 524 } 525 526 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 527 avg = div64_ul(rec->time, rec->counter); 528 if (tracing_thresh && (avg < tracing_thresh)) 529 goto out; 530 #endif 531 532 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); 533 seq_printf(m, " %-30.30s %10lu", str, rec->counter); 534 535 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 536 seq_puts(m, " "); 537 538 /* Sample standard deviation (s^2) */ 539 if (rec->counter <= 1) 540 stddev = 0; 541 else { 542 /* 543 * Apply Welford's method: 544 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2) 545 */ 546 stddev = rec->counter * rec->time_squared - 547 rec->time * rec->time; 548 549 /* 550 * Divide only 1000 for ns^2 -> us^2 conversion. 551 * trace_print_graph_duration will divide 1000 again. 552 */ 553 stddev = div64_ul(stddev, 554 rec->counter * (rec->counter - 1) * 1000); 555 } 556 557 trace_seq_init(&s); 558 trace_print_graph_duration(rec->time, &s); 559 trace_seq_puts(&s, " "); 560 trace_print_graph_duration(avg, &s); 561 trace_seq_puts(&s, " "); 562 trace_print_graph_duration(stddev, &s); 563 trace_print_seq(m, &s); 564 #endif 565 seq_putc(m, '\n'); 566 out: 567 mutex_unlock(&ftrace_profile_lock); 568 569 return ret; 570 } 571 572 static void ftrace_profile_reset(struct ftrace_profile_stat *stat) 573 { 574 struct ftrace_profile_page *pg; 575 576 pg = stat->pages = stat->start; 577 578 while (pg) { 579 memset(pg->records, 0, PROFILE_RECORDS_SIZE); 580 pg->index = 0; 581 pg = pg->next; 582 } 583 584 memset(stat->hash, 0, 585 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head)); 586 } 587 588 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat) 589 { 590 struct ftrace_profile_page *pg; 591 int functions; 592 int pages; 593 int i; 594 595 /* If we already allocated, do nothing */ 596 if (stat->pages) 597 return 0; 598 599 stat->pages = (void *)get_zeroed_page(GFP_KERNEL); 600 if (!stat->pages) 601 return -ENOMEM; 602 603 #ifdef CONFIG_DYNAMIC_FTRACE 604 functions = ftrace_update_tot_cnt; 605 #else 606 /* 607 * We do not know the number of functions that exist because 608 * dynamic tracing is what counts them. With past experience 609 * we have around 20K functions. That should be more than enough. 610 * It is highly unlikely we will execute every function in 611 * the kernel. 612 */ 613 functions = 20000; 614 #endif 615 616 pg = stat->start = stat->pages; 617 618 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE); 619 620 for (i = 1; i < pages; i++) { 621 pg->next = (void *)get_zeroed_page(GFP_KERNEL); 622 if (!pg->next) 623 goto out_free; 624 pg = pg->next; 625 } 626 627 return 0; 628 629 out_free: 630 pg = stat->start; 631 while (pg) { 632 unsigned long tmp = (unsigned long)pg; 633 634 pg = pg->next; 635 free_page(tmp); 636 } 637 638 stat->pages = NULL; 639 stat->start = NULL; 640 641 return -ENOMEM; 642 } 643 644 static int ftrace_profile_init_cpu(int cpu) 645 { 646 struct ftrace_profile_stat *stat; 647 int size; 648 649 stat = &per_cpu(ftrace_profile_stats, cpu); 650 651 if (stat->hash) { 652 /* If the profile is already created, simply reset it */ 653 ftrace_profile_reset(stat); 654 return 0; 655 } 656 657 /* 658 * We are profiling all functions, but usually only a few thousand 659 * functions are hit. We'll make a hash of 1024 items. 660 */ 661 size = FTRACE_PROFILE_HASH_SIZE; 662 663 stat->hash = kcalloc(size, sizeof(struct hlist_head), GFP_KERNEL); 664 665 if (!stat->hash) 666 return -ENOMEM; 667 668 /* Preallocate the function profiling pages */ 669 if (ftrace_profile_pages_init(stat) < 0) { 670 kfree(stat->hash); 671 stat->hash = NULL; 672 return -ENOMEM; 673 } 674 675 return 0; 676 } 677 678 static int ftrace_profile_init(void) 679 { 680 int cpu; 681 int ret = 0; 682 683 for_each_possible_cpu(cpu) { 684 ret = ftrace_profile_init_cpu(cpu); 685 if (ret) 686 break; 687 } 688 689 return ret; 690 } 691 692 /* interrupts must be disabled */ 693 static struct ftrace_profile * 694 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip) 695 { 696 struct ftrace_profile *rec; 697 struct hlist_head *hhd; 698 unsigned long key; 699 700 key = hash_long(ip, FTRACE_PROFILE_HASH_BITS); 701 hhd = &stat->hash[key]; 702 703 if (hlist_empty(hhd)) 704 return NULL; 705 706 hlist_for_each_entry_rcu_notrace(rec, hhd, node) { 707 if (rec->ip == ip) 708 return rec; 709 } 710 711 return NULL; 712 } 713 714 static void ftrace_add_profile(struct ftrace_profile_stat *stat, 715 struct ftrace_profile *rec) 716 { 717 unsigned long key; 718 719 key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS); 720 hlist_add_head_rcu(&rec->node, &stat->hash[key]); 721 } 722 723 /* 724 * The memory is already allocated, this simply finds a new record to use. 725 */ 726 static struct ftrace_profile * 727 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip) 728 { 729 struct ftrace_profile *rec = NULL; 730 731 /* prevent recursion (from NMIs) */ 732 if (atomic_inc_return(&stat->disabled) != 1) 733 goto out; 734 735 /* 736 * Try to find the function again since an NMI 737 * could have added it 738 */ 739 rec = ftrace_find_profiled_func(stat, ip); 740 if (rec) 741 goto out; 742 743 if (stat->pages->index == PROFILES_PER_PAGE) { 744 if (!stat->pages->next) 745 goto out; 746 stat->pages = stat->pages->next; 747 } 748 749 rec = &stat->pages->records[stat->pages->index++]; 750 rec->ip = ip; 751 ftrace_add_profile(stat, rec); 752 753 out: 754 atomic_dec(&stat->disabled); 755 756 return rec; 757 } 758 759 static void 760 function_profile_call(unsigned long ip, unsigned long parent_ip, 761 struct ftrace_ops *ops, struct pt_regs *regs) 762 { 763 struct ftrace_profile_stat *stat; 764 struct ftrace_profile *rec; 765 unsigned long flags; 766 767 if (!ftrace_profile_enabled) 768 return; 769 770 local_irq_save(flags); 771 772 stat = this_cpu_ptr(&ftrace_profile_stats); 773 if (!stat->hash || !ftrace_profile_enabled) 774 goto out; 775 776 rec = ftrace_find_profiled_func(stat, ip); 777 if (!rec) { 778 rec = ftrace_profile_alloc(stat, ip); 779 if (!rec) 780 goto out; 781 } 782 783 rec->counter++; 784 out: 785 local_irq_restore(flags); 786 } 787 788 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 789 static bool fgraph_graph_time = true; 790 791 void ftrace_graph_graph_time_control(bool enable) 792 { 793 fgraph_graph_time = enable; 794 } 795 796 static int profile_graph_entry(struct ftrace_graph_ent *trace) 797 { 798 struct ftrace_ret_stack *ret_stack; 799 800 function_profile_call(trace->func, 0, NULL, NULL); 801 802 /* If function graph is shutting down, ret_stack can be NULL */ 803 if (!current->ret_stack) 804 return 0; 805 806 ret_stack = ftrace_graph_get_ret_stack(current, 0); 807 if (ret_stack) 808 ret_stack->subtime = 0; 809 810 return 1; 811 } 812 813 static void profile_graph_return(struct ftrace_graph_ret *trace) 814 { 815 struct ftrace_ret_stack *ret_stack; 816 struct ftrace_profile_stat *stat; 817 unsigned long long calltime; 818 struct ftrace_profile *rec; 819 unsigned long flags; 820 821 local_irq_save(flags); 822 stat = this_cpu_ptr(&ftrace_profile_stats); 823 if (!stat->hash || !ftrace_profile_enabled) 824 goto out; 825 826 /* If the calltime was zero'd ignore it */ 827 if (!trace->calltime) 828 goto out; 829 830 calltime = trace->rettime - trace->calltime; 831 832 if (!fgraph_graph_time) { 833 834 /* Append this call time to the parent time to subtract */ 835 ret_stack = ftrace_graph_get_ret_stack(current, 1); 836 if (ret_stack) 837 ret_stack->subtime += calltime; 838 839 ret_stack = ftrace_graph_get_ret_stack(current, 0); 840 if (ret_stack && ret_stack->subtime < calltime) 841 calltime -= ret_stack->subtime; 842 else 843 calltime = 0; 844 } 845 846 rec = ftrace_find_profiled_func(stat, trace->func); 847 if (rec) { 848 rec->time += calltime; 849 rec->time_squared += calltime * calltime; 850 } 851 852 out: 853 local_irq_restore(flags); 854 } 855 856 static struct fgraph_ops fprofiler_ops = { 857 .entryfunc = &profile_graph_entry, 858 .retfunc = &profile_graph_return, 859 }; 860 861 static int register_ftrace_profiler(void) 862 { 863 return register_ftrace_graph(&fprofiler_ops); 864 } 865 866 static void unregister_ftrace_profiler(void) 867 { 868 unregister_ftrace_graph(&fprofiler_ops); 869 } 870 #else 871 static struct ftrace_ops ftrace_profile_ops __read_mostly = { 872 .func = function_profile_call, 873 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, 874 INIT_OPS_HASH(ftrace_profile_ops) 875 }; 876 877 static int register_ftrace_profiler(void) 878 { 879 return register_ftrace_function(&ftrace_profile_ops); 880 } 881 882 static void unregister_ftrace_profiler(void) 883 { 884 unregister_ftrace_function(&ftrace_profile_ops); 885 } 886 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 887 888 static ssize_t 889 ftrace_profile_write(struct file *filp, const char __user *ubuf, 890 size_t cnt, loff_t *ppos) 891 { 892 unsigned long val; 893 int ret; 894 895 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 896 if (ret) 897 return ret; 898 899 val = !!val; 900 901 mutex_lock(&ftrace_profile_lock); 902 if (ftrace_profile_enabled ^ val) { 903 if (val) { 904 ret = ftrace_profile_init(); 905 if (ret < 0) { 906 cnt = ret; 907 goto out; 908 } 909 910 ret = register_ftrace_profiler(); 911 if (ret < 0) { 912 cnt = ret; 913 goto out; 914 } 915 ftrace_profile_enabled = 1; 916 } else { 917 ftrace_profile_enabled = 0; 918 /* 919 * unregister_ftrace_profiler calls stop_machine 920 * so this acts like an synchronize_rcu. 921 */ 922 unregister_ftrace_profiler(); 923 } 924 } 925 out: 926 mutex_unlock(&ftrace_profile_lock); 927 928 *ppos += cnt; 929 930 return cnt; 931 } 932 933 static ssize_t 934 ftrace_profile_read(struct file *filp, char __user *ubuf, 935 size_t cnt, loff_t *ppos) 936 { 937 char buf[64]; /* big enough to hold a number */ 938 int r; 939 940 r = sprintf(buf, "%u\n", ftrace_profile_enabled); 941 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 942 } 943 944 static const struct file_operations ftrace_profile_fops = { 945 .open = tracing_open_generic, 946 .read = ftrace_profile_read, 947 .write = ftrace_profile_write, 948 .llseek = default_llseek, 949 }; 950 951 /* used to initialize the real stat files */ 952 static struct tracer_stat function_stats __initdata = { 953 .name = "functions", 954 .stat_start = function_stat_start, 955 .stat_next = function_stat_next, 956 .stat_cmp = function_stat_cmp, 957 .stat_headers = function_stat_headers, 958 .stat_show = function_stat_show 959 }; 960 961 static __init void ftrace_profile_tracefs(struct dentry *d_tracer) 962 { 963 struct ftrace_profile_stat *stat; 964 struct dentry *entry; 965 char *name; 966 int ret; 967 int cpu; 968 969 for_each_possible_cpu(cpu) { 970 stat = &per_cpu(ftrace_profile_stats, cpu); 971 972 name = kasprintf(GFP_KERNEL, "function%d", cpu); 973 if (!name) { 974 /* 975 * The files created are permanent, if something happens 976 * we still do not free memory. 977 */ 978 WARN(1, 979 "Could not allocate stat file for cpu %d\n", 980 cpu); 981 return; 982 } 983 stat->stat = function_stats; 984 stat->stat.name = name; 985 ret = register_stat_tracer(&stat->stat); 986 if (ret) { 987 WARN(1, 988 "Could not register function stat for cpu %d\n", 989 cpu); 990 kfree(name); 991 return; 992 } 993 } 994 995 entry = tracefs_create_file("function_profile_enabled", 0644, 996 d_tracer, NULL, &ftrace_profile_fops); 997 if (!entry) 998 pr_warn("Could not create tracefs 'function_profile_enabled' entry\n"); 999 } 1000 1001 #else /* CONFIG_FUNCTION_PROFILER */ 1002 static __init void ftrace_profile_tracefs(struct dentry *d_tracer) 1003 { 1004 } 1005 #endif /* CONFIG_FUNCTION_PROFILER */ 1006 1007 #ifdef CONFIG_DYNAMIC_FTRACE 1008 1009 static struct ftrace_ops *removed_ops; 1010 1011 /* 1012 * Set when doing a global update, like enabling all recs or disabling them. 1013 * It is not set when just updating a single ftrace_ops. 1014 */ 1015 static bool update_all_ops; 1016 1017 #ifndef CONFIG_FTRACE_MCOUNT_RECORD 1018 # error Dynamic ftrace depends on MCOUNT_RECORD 1019 #endif 1020 1021 struct ftrace_func_probe { 1022 struct ftrace_probe_ops *probe_ops; 1023 struct ftrace_ops ops; 1024 struct trace_array *tr; 1025 struct list_head list; 1026 void *data; 1027 int ref; 1028 }; 1029 1030 /* 1031 * We make these constant because no one should touch them, 1032 * but they are used as the default "empty hash", to avoid allocating 1033 * it all the time. These are in a read only section such that if 1034 * anyone does try to modify it, it will cause an exception. 1035 */ 1036 static const struct hlist_head empty_buckets[1]; 1037 static const struct ftrace_hash empty_hash = { 1038 .buckets = (struct hlist_head *)empty_buckets, 1039 }; 1040 #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash) 1041 1042 struct ftrace_ops global_ops = { 1043 .func = ftrace_stub, 1044 .local_hash.notrace_hash = EMPTY_HASH, 1045 .local_hash.filter_hash = EMPTY_HASH, 1046 INIT_OPS_HASH(global_ops) 1047 .flags = FTRACE_OPS_FL_RECURSION_SAFE | 1048 FTRACE_OPS_FL_INITIALIZED | 1049 FTRACE_OPS_FL_PID, 1050 }; 1051 1052 /* 1053 * Used by the stack undwinder to know about dynamic ftrace trampolines. 1054 */ 1055 struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr) 1056 { 1057 struct ftrace_ops *op = NULL; 1058 1059 /* 1060 * Some of the ops may be dynamically allocated, 1061 * they are freed after a synchronize_rcu(). 1062 */ 1063 preempt_disable_notrace(); 1064 1065 do_for_each_ftrace_op(op, ftrace_ops_list) { 1066 /* 1067 * This is to check for dynamically allocated trampolines. 1068 * Trampolines that are in kernel text will have 1069 * core_kernel_text() return true. 1070 */ 1071 if (op->trampoline && op->trampoline_size) 1072 if (addr >= op->trampoline && 1073 addr < op->trampoline + op->trampoline_size) { 1074 preempt_enable_notrace(); 1075 return op; 1076 } 1077 } while_for_each_ftrace_op(op); 1078 preempt_enable_notrace(); 1079 1080 return NULL; 1081 } 1082 1083 /* 1084 * This is used by __kernel_text_address() to return true if the 1085 * address is on a dynamically allocated trampoline that would 1086 * not return true for either core_kernel_text() or 1087 * is_module_text_address(). 1088 */ 1089 bool is_ftrace_trampoline(unsigned long addr) 1090 { 1091 return ftrace_ops_trampoline(addr) != NULL; 1092 } 1093 1094 struct ftrace_page { 1095 struct ftrace_page *next; 1096 struct dyn_ftrace *records; 1097 int index; 1098 int size; 1099 }; 1100 1101 #define ENTRY_SIZE sizeof(struct dyn_ftrace) 1102 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE) 1103 1104 static struct ftrace_page *ftrace_pages_start; 1105 static struct ftrace_page *ftrace_pages; 1106 1107 static __always_inline unsigned long 1108 ftrace_hash_key(struct ftrace_hash *hash, unsigned long ip) 1109 { 1110 if (hash->size_bits > 0) 1111 return hash_long(ip, hash->size_bits); 1112 1113 return 0; 1114 } 1115 1116 /* Only use this function if ftrace_hash_empty() has already been tested */ 1117 static __always_inline struct ftrace_func_entry * 1118 __ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) 1119 { 1120 unsigned long key; 1121 struct ftrace_func_entry *entry; 1122 struct hlist_head *hhd; 1123 1124 key = ftrace_hash_key(hash, ip); 1125 hhd = &hash->buckets[key]; 1126 1127 hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) { 1128 if (entry->ip == ip) 1129 return entry; 1130 } 1131 return NULL; 1132 } 1133 1134 /** 1135 * ftrace_lookup_ip - Test to see if an ip exists in an ftrace_hash 1136 * @hash: The hash to look at 1137 * @ip: The instruction pointer to test 1138 * 1139 * Search a given @hash to see if a given instruction pointer (@ip) 1140 * exists in it. 1141 * 1142 * Returns the entry that holds the @ip if found. NULL otherwise. 1143 */ 1144 struct ftrace_func_entry * 1145 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) 1146 { 1147 if (ftrace_hash_empty(hash)) 1148 return NULL; 1149 1150 return __ftrace_lookup_ip(hash, ip); 1151 } 1152 1153 static void __add_hash_entry(struct ftrace_hash *hash, 1154 struct ftrace_func_entry *entry) 1155 { 1156 struct hlist_head *hhd; 1157 unsigned long key; 1158 1159 key = ftrace_hash_key(hash, entry->ip); 1160 hhd = &hash->buckets[key]; 1161 hlist_add_head(&entry->hlist, hhd); 1162 hash->count++; 1163 } 1164 1165 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip) 1166 { 1167 struct ftrace_func_entry *entry; 1168 1169 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 1170 if (!entry) 1171 return -ENOMEM; 1172 1173 entry->ip = ip; 1174 __add_hash_entry(hash, entry); 1175 1176 return 0; 1177 } 1178 1179 static void 1180 free_hash_entry(struct ftrace_hash *hash, 1181 struct ftrace_func_entry *entry) 1182 { 1183 hlist_del(&entry->hlist); 1184 kfree(entry); 1185 hash->count--; 1186 } 1187 1188 static void 1189 remove_hash_entry(struct ftrace_hash *hash, 1190 struct ftrace_func_entry *entry) 1191 { 1192 hlist_del_rcu(&entry->hlist); 1193 hash->count--; 1194 } 1195 1196 static void ftrace_hash_clear(struct ftrace_hash *hash) 1197 { 1198 struct hlist_head *hhd; 1199 struct hlist_node *tn; 1200 struct ftrace_func_entry *entry; 1201 int size = 1 << hash->size_bits; 1202 int i; 1203 1204 if (!hash->count) 1205 return; 1206 1207 for (i = 0; i < size; i++) { 1208 hhd = &hash->buckets[i]; 1209 hlist_for_each_entry_safe(entry, tn, hhd, hlist) 1210 free_hash_entry(hash, entry); 1211 } 1212 FTRACE_WARN_ON(hash->count); 1213 } 1214 1215 static void free_ftrace_mod(struct ftrace_mod_load *ftrace_mod) 1216 { 1217 list_del(&ftrace_mod->list); 1218 kfree(ftrace_mod->module); 1219 kfree(ftrace_mod->func); 1220 kfree(ftrace_mod); 1221 } 1222 1223 static void clear_ftrace_mod_list(struct list_head *head) 1224 { 1225 struct ftrace_mod_load *p, *n; 1226 1227 /* stack tracer isn't supported yet */ 1228 if (!head) 1229 return; 1230 1231 mutex_lock(&ftrace_lock); 1232 list_for_each_entry_safe(p, n, head, list) 1233 free_ftrace_mod(p); 1234 mutex_unlock(&ftrace_lock); 1235 } 1236 1237 static void free_ftrace_hash(struct ftrace_hash *hash) 1238 { 1239 if (!hash || hash == EMPTY_HASH) 1240 return; 1241 ftrace_hash_clear(hash); 1242 kfree(hash->buckets); 1243 kfree(hash); 1244 } 1245 1246 static void __free_ftrace_hash_rcu(struct rcu_head *rcu) 1247 { 1248 struct ftrace_hash *hash; 1249 1250 hash = container_of(rcu, struct ftrace_hash, rcu); 1251 free_ftrace_hash(hash); 1252 } 1253 1254 static void free_ftrace_hash_rcu(struct ftrace_hash *hash) 1255 { 1256 if (!hash || hash == EMPTY_HASH) 1257 return; 1258 call_rcu(&hash->rcu, __free_ftrace_hash_rcu); 1259 } 1260 1261 void ftrace_free_filter(struct ftrace_ops *ops) 1262 { 1263 ftrace_ops_init(ops); 1264 free_ftrace_hash(ops->func_hash->filter_hash); 1265 free_ftrace_hash(ops->func_hash->notrace_hash); 1266 } 1267 1268 static struct ftrace_hash *alloc_ftrace_hash(int size_bits) 1269 { 1270 struct ftrace_hash *hash; 1271 int size; 1272 1273 hash = kzalloc(sizeof(*hash), GFP_KERNEL); 1274 if (!hash) 1275 return NULL; 1276 1277 size = 1 << size_bits; 1278 hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL); 1279 1280 if (!hash->buckets) { 1281 kfree(hash); 1282 return NULL; 1283 } 1284 1285 hash->size_bits = size_bits; 1286 1287 return hash; 1288 } 1289 1290 1291 static int ftrace_add_mod(struct trace_array *tr, 1292 const char *func, const char *module, 1293 int enable) 1294 { 1295 struct ftrace_mod_load *ftrace_mod; 1296 struct list_head *mod_head = enable ? &tr->mod_trace : &tr->mod_notrace; 1297 1298 ftrace_mod = kzalloc(sizeof(*ftrace_mod), GFP_KERNEL); 1299 if (!ftrace_mod) 1300 return -ENOMEM; 1301 1302 ftrace_mod->func = kstrdup(func, GFP_KERNEL); 1303 ftrace_mod->module = kstrdup(module, GFP_KERNEL); 1304 ftrace_mod->enable = enable; 1305 1306 if (!ftrace_mod->func || !ftrace_mod->module) 1307 goto out_free; 1308 1309 list_add(&ftrace_mod->list, mod_head); 1310 1311 return 0; 1312 1313 out_free: 1314 free_ftrace_mod(ftrace_mod); 1315 1316 return -ENOMEM; 1317 } 1318 1319 static struct ftrace_hash * 1320 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash) 1321 { 1322 struct ftrace_func_entry *entry; 1323 struct ftrace_hash *new_hash; 1324 int size; 1325 int ret; 1326 int i; 1327 1328 new_hash = alloc_ftrace_hash(size_bits); 1329 if (!new_hash) 1330 return NULL; 1331 1332 if (hash) 1333 new_hash->flags = hash->flags; 1334 1335 /* Empty hash? */ 1336 if (ftrace_hash_empty(hash)) 1337 return new_hash; 1338 1339 size = 1 << hash->size_bits; 1340 for (i = 0; i < size; i++) { 1341 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { 1342 ret = add_hash_entry(new_hash, entry->ip); 1343 if (ret < 0) 1344 goto free_hash; 1345 } 1346 } 1347 1348 FTRACE_WARN_ON(new_hash->count != hash->count); 1349 1350 return new_hash; 1351 1352 free_hash: 1353 free_ftrace_hash(new_hash); 1354 return NULL; 1355 } 1356 1357 static void 1358 ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash); 1359 static void 1360 ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash); 1361 1362 static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops, 1363 struct ftrace_hash *new_hash); 1364 1365 static struct ftrace_hash *dup_hash(struct ftrace_hash *src, int size) 1366 { 1367 struct ftrace_func_entry *entry; 1368 struct ftrace_hash *new_hash; 1369 struct hlist_head *hhd; 1370 struct hlist_node *tn; 1371 int bits = 0; 1372 int i; 1373 1374 /* 1375 * Make the hash size about 1/2 the # found 1376 */ 1377 for (size /= 2; size; size >>= 1) 1378 bits++; 1379 1380 /* Don't allocate too much */ 1381 if (bits > FTRACE_HASH_MAX_BITS) 1382 bits = FTRACE_HASH_MAX_BITS; 1383 1384 new_hash = alloc_ftrace_hash(bits); 1385 if (!new_hash) 1386 return NULL; 1387 1388 new_hash->flags = src->flags; 1389 1390 size = 1 << src->size_bits; 1391 for (i = 0; i < size; i++) { 1392 hhd = &src->buckets[i]; 1393 hlist_for_each_entry_safe(entry, tn, hhd, hlist) { 1394 remove_hash_entry(src, entry); 1395 __add_hash_entry(new_hash, entry); 1396 } 1397 } 1398 return new_hash; 1399 } 1400 1401 static struct ftrace_hash * 1402 __ftrace_hash_move(struct ftrace_hash *src) 1403 { 1404 int size = src->count; 1405 1406 /* 1407 * If the new source is empty, just return the empty_hash. 1408 */ 1409 if (ftrace_hash_empty(src)) 1410 return EMPTY_HASH; 1411 1412 return dup_hash(src, size); 1413 } 1414 1415 static int 1416 ftrace_hash_move(struct ftrace_ops *ops, int enable, 1417 struct ftrace_hash **dst, struct ftrace_hash *src) 1418 { 1419 struct ftrace_hash *new_hash; 1420 int ret; 1421 1422 /* Reject setting notrace hash on IPMODIFY ftrace_ops */ 1423 if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable) 1424 return -EINVAL; 1425 1426 new_hash = __ftrace_hash_move(src); 1427 if (!new_hash) 1428 return -ENOMEM; 1429 1430 /* Make sure this can be applied if it is IPMODIFY ftrace_ops */ 1431 if (enable) { 1432 /* IPMODIFY should be updated only when filter_hash updating */ 1433 ret = ftrace_hash_ipmodify_update(ops, new_hash); 1434 if (ret < 0) { 1435 free_ftrace_hash(new_hash); 1436 return ret; 1437 } 1438 } 1439 1440 /* 1441 * Remove the current set, update the hash and add 1442 * them back. 1443 */ 1444 ftrace_hash_rec_disable_modify(ops, enable); 1445 1446 rcu_assign_pointer(*dst, new_hash); 1447 1448 ftrace_hash_rec_enable_modify(ops, enable); 1449 1450 return 0; 1451 } 1452 1453 static bool hash_contains_ip(unsigned long ip, 1454 struct ftrace_ops_hash *hash) 1455 { 1456 /* 1457 * The function record is a match if it exists in the filter 1458 * hash and not in the notrace hash. Note, an emty hash is 1459 * considered a match for the filter hash, but an empty 1460 * notrace hash is considered not in the notrace hash. 1461 */ 1462 return (ftrace_hash_empty(hash->filter_hash) || 1463 __ftrace_lookup_ip(hash->filter_hash, ip)) && 1464 (ftrace_hash_empty(hash->notrace_hash) || 1465 !__ftrace_lookup_ip(hash->notrace_hash, ip)); 1466 } 1467 1468 /* 1469 * Test the hashes for this ops to see if we want to call 1470 * the ops->func or not. 1471 * 1472 * It's a match if the ip is in the ops->filter_hash or 1473 * the filter_hash does not exist or is empty, 1474 * AND 1475 * the ip is not in the ops->notrace_hash. 1476 * 1477 * This needs to be called with preemption disabled as 1478 * the hashes are freed with call_rcu(). 1479 */ 1480 int 1481 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) 1482 { 1483 struct ftrace_ops_hash hash; 1484 int ret; 1485 1486 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 1487 /* 1488 * There's a small race when adding ops that the ftrace handler 1489 * that wants regs, may be called without them. We can not 1490 * allow that handler to be called if regs is NULL. 1491 */ 1492 if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS)) 1493 return 0; 1494 #endif 1495 1496 rcu_assign_pointer(hash.filter_hash, ops->func_hash->filter_hash); 1497 rcu_assign_pointer(hash.notrace_hash, ops->func_hash->notrace_hash); 1498 1499 if (hash_contains_ip(ip, &hash)) 1500 ret = 1; 1501 else 1502 ret = 0; 1503 1504 return ret; 1505 } 1506 1507 /* 1508 * This is a double for. Do not use 'break' to break out of the loop, 1509 * you must use a goto. 1510 */ 1511 #define do_for_each_ftrace_rec(pg, rec) \ 1512 for (pg = ftrace_pages_start; pg; pg = pg->next) { \ 1513 int _____i; \ 1514 for (_____i = 0; _____i < pg->index; _____i++) { \ 1515 rec = &pg->records[_____i]; 1516 1517 #define while_for_each_ftrace_rec() \ 1518 } \ 1519 } 1520 1521 1522 static int ftrace_cmp_recs(const void *a, const void *b) 1523 { 1524 const struct dyn_ftrace *key = a; 1525 const struct dyn_ftrace *rec = b; 1526 1527 if (key->flags < rec->ip) 1528 return -1; 1529 if (key->ip >= rec->ip + MCOUNT_INSN_SIZE) 1530 return 1; 1531 return 0; 1532 } 1533 1534 static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end) 1535 { 1536 struct ftrace_page *pg; 1537 struct dyn_ftrace *rec = NULL; 1538 struct dyn_ftrace key; 1539 1540 key.ip = start; 1541 key.flags = end; /* overload flags, as it is unsigned long */ 1542 1543 for (pg = ftrace_pages_start; pg; pg = pg->next) { 1544 if (end < pg->records[0].ip || 1545 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) 1546 continue; 1547 rec = bsearch(&key, pg->records, pg->index, 1548 sizeof(struct dyn_ftrace), 1549 ftrace_cmp_recs); 1550 } 1551 return rec; 1552 } 1553 1554 /** 1555 * ftrace_location_range - return the first address of a traced location 1556 * if it touches the given ip range 1557 * @start: start of range to search. 1558 * @end: end of range to search (inclusive). @end points to the last byte 1559 * to check. 1560 * 1561 * Returns rec->ip if the related ftrace location is a least partly within 1562 * the given address range. That is, the first address of the instruction 1563 * that is either a NOP or call to the function tracer. It checks the ftrace 1564 * internal tables to determine if the address belongs or not. 1565 */ 1566 unsigned long ftrace_location_range(unsigned long start, unsigned long end) 1567 { 1568 struct dyn_ftrace *rec; 1569 1570 rec = lookup_rec(start, end); 1571 if (rec) 1572 return rec->ip; 1573 1574 return 0; 1575 } 1576 1577 /** 1578 * ftrace_location - return true if the ip giving is a traced location 1579 * @ip: the instruction pointer to check 1580 * 1581 * Returns rec->ip if @ip given is a pointer to a ftrace location. 1582 * That is, the instruction that is either a NOP or call to 1583 * the function tracer. It checks the ftrace internal tables to 1584 * determine if the address belongs or not. 1585 */ 1586 unsigned long ftrace_location(unsigned long ip) 1587 { 1588 return ftrace_location_range(ip, ip); 1589 } 1590 1591 /** 1592 * ftrace_text_reserved - return true if range contains an ftrace location 1593 * @start: start of range to search 1594 * @end: end of range to search (inclusive). @end points to the last byte to check. 1595 * 1596 * Returns 1 if @start and @end contains a ftrace location. 1597 * That is, the instruction that is either a NOP or call to 1598 * the function tracer. It checks the ftrace internal tables to 1599 * determine if the address belongs or not. 1600 */ 1601 int ftrace_text_reserved(const void *start, const void *end) 1602 { 1603 unsigned long ret; 1604 1605 ret = ftrace_location_range((unsigned long)start, 1606 (unsigned long)end); 1607 1608 return (int)!!ret; 1609 } 1610 1611 /* Test if ops registered to this rec needs regs */ 1612 static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec) 1613 { 1614 struct ftrace_ops *ops; 1615 bool keep_regs = false; 1616 1617 for (ops = ftrace_ops_list; 1618 ops != &ftrace_list_end; ops = ops->next) { 1619 /* pass rec in as regs to have non-NULL val */ 1620 if (ftrace_ops_test(ops, rec->ip, rec)) { 1621 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) { 1622 keep_regs = true; 1623 break; 1624 } 1625 } 1626 } 1627 1628 return keep_regs; 1629 } 1630 1631 static struct ftrace_ops * 1632 ftrace_find_tramp_ops_any(struct dyn_ftrace *rec); 1633 static struct ftrace_ops * 1634 ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops); 1635 1636 static bool __ftrace_hash_rec_update(struct ftrace_ops *ops, 1637 int filter_hash, 1638 bool inc) 1639 { 1640 struct ftrace_hash *hash; 1641 struct ftrace_hash *other_hash; 1642 struct ftrace_page *pg; 1643 struct dyn_ftrace *rec; 1644 bool update = false; 1645 int count = 0; 1646 int all = false; 1647 1648 /* Only update if the ops has been registered */ 1649 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) 1650 return false; 1651 1652 /* 1653 * In the filter_hash case: 1654 * If the count is zero, we update all records. 1655 * Otherwise we just update the items in the hash. 1656 * 1657 * In the notrace_hash case: 1658 * We enable the update in the hash. 1659 * As disabling notrace means enabling the tracing, 1660 * and enabling notrace means disabling, the inc variable 1661 * gets inversed. 1662 */ 1663 if (filter_hash) { 1664 hash = ops->func_hash->filter_hash; 1665 other_hash = ops->func_hash->notrace_hash; 1666 if (ftrace_hash_empty(hash)) 1667 all = true; 1668 } else { 1669 inc = !inc; 1670 hash = ops->func_hash->notrace_hash; 1671 other_hash = ops->func_hash->filter_hash; 1672 /* 1673 * If the notrace hash has no items, 1674 * then there's nothing to do. 1675 */ 1676 if (ftrace_hash_empty(hash)) 1677 return false; 1678 } 1679 1680 do_for_each_ftrace_rec(pg, rec) { 1681 int in_other_hash = 0; 1682 int in_hash = 0; 1683 int match = 0; 1684 1685 if (rec->flags & FTRACE_FL_DISABLED) 1686 continue; 1687 1688 if (all) { 1689 /* 1690 * Only the filter_hash affects all records. 1691 * Update if the record is not in the notrace hash. 1692 */ 1693 if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip)) 1694 match = 1; 1695 } else { 1696 in_hash = !!ftrace_lookup_ip(hash, rec->ip); 1697 in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip); 1698 1699 /* 1700 * If filter_hash is set, we want to match all functions 1701 * that are in the hash but not in the other hash. 1702 * 1703 * If filter_hash is not set, then we are decrementing. 1704 * That means we match anything that is in the hash 1705 * and also in the other_hash. That is, we need to turn 1706 * off functions in the other hash because they are disabled 1707 * by this hash. 1708 */ 1709 if (filter_hash && in_hash && !in_other_hash) 1710 match = 1; 1711 else if (!filter_hash && in_hash && 1712 (in_other_hash || ftrace_hash_empty(other_hash))) 1713 match = 1; 1714 } 1715 if (!match) 1716 continue; 1717 1718 if (inc) { 1719 rec->flags++; 1720 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX)) 1721 return false; 1722 1723 if (ops->flags & FTRACE_OPS_FL_DIRECT) 1724 rec->flags |= FTRACE_FL_DIRECT; 1725 1726 /* 1727 * If there's only a single callback registered to a 1728 * function, and the ops has a trampoline registered 1729 * for it, then we can call it directly. 1730 */ 1731 if (ftrace_rec_count(rec) == 1 && ops->trampoline) 1732 rec->flags |= FTRACE_FL_TRAMP; 1733 else 1734 /* 1735 * If we are adding another function callback 1736 * to this function, and the previous had a 1737 * custom trampoline in use, then we need to go 1738 * back to the default trampoline. 1739 */ 1740 rec->flags &= ~FTRACE_FL_TRAMP; 1741 1742 /* 1743 * If any ops wants regs saved for this function 1744 * then all ops will get saved regs. 1745 */ 1746 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) 1747 rec->flags |= FTRACE_FL_REGS; 1748 } else { 1749 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0)) 1750 return false; 1751 rec->flags--; 1752 1753 /* 1754 * Only the internal direct_ops should have the 1755 * DIRECT flag set. Thus, if it is removing a 1756 * function, then that function should no longer 1757 * be direct. 1758 */ 1759 if (ops->flags & FTRACE_OPS_FL_DIRECT) 1760 rec->flags &= ~FTRACE_FL_DIRECT; 1761 1762 /* 1763 * If the rec had REGS enabled and the ops that is 1764 * being removed had REGS set, then see if there is 1765 * still any ops for this record that wants regs. 1766 * If not, we can stop recording them. 1767 */ 1768 if (ftrace_rec_count(rec) > 0 && 1769 rec->flags & FTRACE_FL_REGS && 1770 ops->flags & FTRACE_OPS_FL_SAVE_REGS) { 1771 if (!test_rec_ops_needs_regs(rec)) 1772 rec->flags &= ~FTRACE_FL_REGS; 1773 } 1774 1775 /* 1776 * The TRAMP needs to be set only if rec count 1777 * is decremented to one, and the ops that is 1778 * left has a trampoline. As TRAMP can only be 1779 * enabled if there is only a single ops attached 1780 * to it. 1781 */ 1782 if (ftrace_rec_count(rec) == 1 && 1783 ftrace_find_tramp_ops_any(rec)) 1784 rec->flags |= FTRACE_FL_TRAMP; 1785 else 1786 rec->flags &= ~FTRACE_FL_TRAMP; 1787 1788 /* 1789 * flags will be cleared in ftrace_check_record() 1790 * if rec count is zero. 1791 */ 1792 } 1793 count++; 1794 1795 /* Must match FTRACE_UPDATE_CALLS in ftrace_modify_all_code() */ 1796 update |= ftrace_test_record(rec, true) != FTRACE_UPDATE_IGNORE; 1797 1798 /* Shortcut, if we handled all records, we are done. */ 1799 if (!all && count == hash->count) 1800 return update; 1801 } while_for_each_ftrace_rec(); 1802 1803 return update; 1804 } 1805 1806 static bool ftrace_hash_rec_disable(struct ftrace_ops *ops, 1807 int filter_hash) 1808 { 1809 return __ftrace_hash_rec_update(ops, filter_hash, 0); 1810 } 1811 1812 static bool ftrace_hash_rec_enable(struct ftrace_ops *ops, 1813 int filter_hash) 1814 { 1815 return __ftrace_hash_rec_update(ops, filter_hash, 1); 1816 } 1817 1818 static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops, 1819 int filter_hash, int inc) 1820 { 1821 struct ftrace_ops *op; 1822 1823 __ftrace_hash_rec_update(ops, filter_hash, inc); 1824 1825 if (ops->func_hash != &global_ops.local_hash) 1826 return; 1827 1828 /* 1829 * If the ops shares the global_ops hash, then we need to update 1830 * all ops that are enabled and use this hash. 1831 */ 1832 do_for_each_ftrace_op(op, ftrace_ops_list) { 1833 /* Already done */ 1834 if (op == ops) 1835 continue; 1836 if (op->func_hash == &global_ops.local_hash) 1837 __ftrace_hash_rec_update(op, filter_hash, inc); 1838 } while_for_each_ftrace_op(op); 1839 } 1840 1841 static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, 1842 int filter_hash) 1843 { 1844 ftrace_hash_rec_update_modify(ops, filter_hash, 0); 1845 } 1846 1847 static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, 1848 int filter_hash) 1849 { 1850 ftrace_hash_rec_update_modify(ops, filter_hash, 1); 1851 } 1852 1853 /* 1854 * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK 1855 * or no-needed to update, -EBUSY if it detects a conflict of the flag 1856 * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs. 1857 * Note that old_hash and new_hash has below meanings 1858 * - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected) 1859 * - If the hash is EMPTY_HASH, it hits nothing 1860 * - Anything else hits the recs which match the hash entries. 1861 */ 1862 static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops, 1863 struct ftrace_hash *old_hash, 1864 struct ftrace_hash *new_hash) 1865 { 1866 struct ftrace_page *pg; 1867 struct dyn_ftrace *rec, *end = NULL; 1868 int in_old, in_new; 1869 1870 /* Only update if the ops has been registered */ 1871 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) 1872 return 0; 1873 1874 if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY)) 1875 return 0; 1876 1877 /* 1878 * Since the IPMODIFY is a very address sensitive action, we do not 1879 * allow ftrace_ops to set all functions to new hash. 1880 */ 1881 if (!new_hash || !old_hash) 1882 return -EINVAL; 1883 1884 /* Update rec->flags */ 1885 do_for_each_ftrace_rec(pg, rec) { 1886 1887 if (rec->flags & FTRACE_FL_DISABLED) 1888 continue; 1889 1890 /* We need to update only differences of filter_hash */ 1891 in_old = !!ftrace_lookup_ip(old_hash, rec->ip); 1892 in_new = !!ftrace_lookup_ip(new_hash, rec->ip); 1893 if (in_old == in_new) 1894 continue; 1895 1896 if (in_new) { 1897 /* New entries must ensure no others are using it */ 1898 if (rec->flags & FTRACE_FL_IPMODIFY) 1899 goto rollback; 1900 rec->flags |= FTRACE_FL_IPMODIFY; 1901 } else /* Removed entry */ 1902 rec->flags &= ~FTRACE_FL_IPMODIFY; 1903 } while_for_each_ftrace_rec(); 1904 1905 return 0; 1906 1907 rollback: 1908 end = rec; 1909 1910 /* Roll back what we did above */ 1911 do_for_each_ftrace_rec(pg, rec) { 1912 1913 if (rec->flags & FTRACE_FL_DISABLED) 1914 continue; 1915 1916 if (rec == end) 1917 goto err_out; 1918 1919 in_old = !!ftrace_lookup_ip(old_hash, rec->ip); 1920 in_new = !!ftrace_lookup_ip(new_hash, rec->ip); 1921 if (in_old == in_new) 1922 continue; 1923 1924 if (in_new) 1925 rec->flags &= ~FTRACE_FL_IPMODIFY; 1926 else 1927 rec->flags |= FTRACE_FL_IPMODIFY; 1928 } while_for_each_ftrace_rec(); 1929 1930 err_out: 1931 return -EBUSY; 1932 } 1933 1934 static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops) 1935 { 1936 struct ftrace_hash *hash = ops->func_hash->filter_hash; 1937 1938 if (ftrace_hash_empty(hash)) 1939 hash = NULL; 1940 1941 return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash); 1942 } 1943 1944 /* Disabling always succeeds */ 1945 static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops) 1946 { 1947 struct ftrace_hash *hash = ops->func_hash->filter_hash; 1948 1949 if (ftrace_hash_empty(hash)) 1950 hash = NULL; 1951 1952 __ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH); 1953 } 1954 1955 static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops, 1956 struct ftrace_hash *new_hash) 1957 { 1958 struct ftrace_hash *old_hash = ops->func_hash->filter_hash; 1959 1960 if (ftrace_hash_empty(old_hash)) 1961 old_hash = NULL; 1962 1963 if (ftrace_hash_empty(new_hash)) 1964 new_hash = NULL; 1965 1966 return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash); 1967 } 1968 1969 static void print_ip_ins(const char *fmt, const unsigned char *p) 1970 { 1971 int i; 1972 1973 printk(KERN_CONT "%s", fmt); 1974 1975 for (i = 0; i < MCOUNT_INSN_SIZE; i++) 1976 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]); 1977 } 1978 1979 enum ftrace_bug_type ftrace_bug_type; 1980 const void *ftrace_expected; 1981 1982 static void print_bug_type(void) 1983 { 1984 switch (ftrace_bug_type) { 1985 case FTRACE_BUG_UNKNOWN: 1986 break; 1987 case FTRACE_BUG_INIT: 1988 pr_info("Initializing ftrace call sites\n"); 1989 break; 1990 case FTRACE_BUG_NOP: 1991 pr_info("Setting ftrace call site to NOP\n"); 1992 break; 1993 case FTRACE_BUG_CALL: 1994 pr_info("Setting ftrace call site to call ftrace function\n"); 1995 break; 1996 case FTRACE_BUG_UPDATE: 1997 pr_info("Updating ftrace call site to call a different ftrace function\n"); 1998 break; 1999 } 2000 } 2001 2002 /** 2003 * ftrace_bug - report and shutdown function tracer 2004 * @failed: The failed type (EFAULT, EINVAL, EPERM) 2005 * @rec: The record that failed 2006 * 2007 * The arch code that enables or disables the function tracing 2008 * can call ftrace_bug() when it has detected a problem in 2009 * modifying the code. @failed should be one of either: 2010 * EFAULT - if the problem happens on reading the @ip address 2011 * EINVAL - if what is read at @ip is not what was expected 2012 * EPERM - if the problem happens on writing to the @ip address 2013 */ 2014 void ftrace_bug(int failed, struct dyn_ftrace *rec) 2015 { 2016 unsigned long ip = rec ? rec->ip : 0; 2017 2018 switch (failed) { 2019 case -EFAULT: 2020 FTRACE_WARN_ON_ONCE(1); 2021 pr_info("ftrace faulted on modifying "); 2022 print_ip_sym(ip); 2023 break; 2024 case -EINVAL: 2025 FTRACE_WARN_ON_ONCE(1); 2026 pr_info("ftrace failed to modify "); 2027 print_ip_sym(ip); 2028 print_ip_ins(" actual: ", (unsigned char *)ip); 2029 pr_cont("\n"); 2030 if (ftrace_expected) { 2031 print_ip_ins(" expected: ", ftrace_expected); 2032 pr_cont("\n"); 2033 } 2034 break; 2035 case -EPERM: 2036 FTRACE_WARN_ON_ONCE(1); 2037 pr_info("ftrace faulted on writing "); 2038 print_ip_sym(ip); 2039 break; 2040 default: 2041 FTRACE_WARN_ON_ONCE(1); 2042 pr_info("ftrace faulted on unknown error "); 2043 print_ip_sym(ip); 2044 } 2045 print_bug_type(); 2046 if (rec) { 2047 struct ftrace_ops *ops = NULL; 2048 2049 pr_info("ftrace record flags: %lx\n", rec->flags); 2050 pr_cont(" (%ld)%s", ftrace_rec_count(rec), 2051 rec->flags & FTRACE_FL_REGS ? " R" : " "); 2052 if (rec->flags & FTRACE_FL_TRAMP_EN) { 2053 ops = ftrace_find_tramp_ops_any(rec); 2054 if (ops) { 2055 do { 2056 pr_cont("\ttramp: %pS (%pS)", 2057 (void *)ops->trampoline, 2058 (void *)ops->func); 2059 ops = ftrace_find_tramp_ops_next(rec, ops); 2060 } while (ops); 2061 } else 2062 pr_cont("\ttramp: ERROR!"); 2063 2064 } 2065 ip = ftrace_get_addr_curr(rec); 2066 pr_cont("\n expected tramp: %lx\n", ip); 2067 } 2068 } 2069 2070 static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update) 2071 { 2072 unsigned long flag = 0UL; 2073 2074 ftrace_bug_type = FTRACE_BUG_UNKNOWN; 2075 2076 if (rec->flags & FTRACE_FL_DISABLED) 2077 return FTRACE_UPDATE_IGNORE; 2078 2079 /* 2080 * If we are updating calls: 2081 * 2082 * If the record has a ref count, then we need to enable it 2083 * because someone is using it. 2084 * 2085 * Otherwise we make sure its disabled. 2086 * 2087 * If we are disabling calls, then disable all records that 2088 * are enabled. 2089 */ 2090 if (enable && ftrace_rec_count(rec)) 2091 flag = FTRACE_FL_ENABLED; 2092 2093 /* 2094 * If enabling and the REGS flag does not match the REGS_EN, or 2095 * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore 2096 * this record. Set flags to fail the compare against ENABLED. 2097 * Same for direct calls. 2098 */ 2099 if (flag) { 2100 if (!(rec->flags & FTRACE_FL_REGS) != 2101 !(rec->flags & FTRACE_FL_REGS_EN)) 2102 flag |= FTRACE_FL_REGS; 2103 2104 if (!(rec->flags & FTRACE_FL_TRAMP) != 2105 !(rec->flags & FTRACE_FL_TRAMP_EN)) 2106 flag |= FTRACE_FL_TRAMP; 2107 2108 /* 2109 * Direct calls are special, as count matters. 2110 * We must test the record for direct, if the 2111 * DIRECT and DIRECT_EN do not match, but only 2112 * if the count is 1. That's because, if the 2113 * count is something other than one, we do not 2114 * want the direct enabled (it will be done via the 2115 * direct helper). But if DIRECT_EN is set, and 2116 * the count is not one, we need to clear it. 2117 */ 2118 if (ftrace_rec_count(rec) == 1) { 2119 if (!(rec->flags & FTRACE_FL_DIRECT) != 2120 !(rec->flags & FTRACE_FL_DIRECT_EN)) 2121 flag |= FTRACE_FL_DIRECT; 2122 } else if (rec->flags & FTRACE_FL_DIRECT_EN) { 2123 flag |= FTRACE_FL_DIRECT; 2124 } 2125 } 2126 2127 /* If the state of this record hasn't changed, then do nothing */ 2128 if ((rec->flags & FTRACE_FL_ENABLED) == flag) 2129 return FTRACE_UPDATE_IGNORE; 2130 2131 if (flag) { 2132 /* Save off if rec is being enabled (for return value) */ 2133 flag ^= rec->flags & FTRACE_FL_ENABLED; 2134 2135 if (update) { 2136 rec->flags |= FTRACE_FL_ENABLED; 2137 if (flag & FTRACE_FL_REGS) { 2138 if (rec->flags & FTRACE_FL_REGS) 2139 rec->flags |= FTRACE_FL_REGS_EN; 2140 else 2141 rec->flags &= ~FTRACE_FL_REGS_EN; 2142 } 2143 if (flag & FTRACE_FL_TRAMP) { 2144 if (rec->flags & FTRACE_FL_TRAMP) 2145 rec->flags |= FTRACE_FL_TRAMP_EN; 2146 else 2147 rec->flags &= ~FTRACE_FL_TRAMP_EN; 2148 } 2149 if (flag & FTRACE_FL_DIRECT) { 2150 /* 2151 * If there's only one user (direct_ops helper) 2152 * then we can call the direct function 2153 * directly (no ftrace trampoline). 2154 */ 2155 if (ftrace_rec_count(rec) == 1) { 2156 if (rec->flags & FTRACE_FL_DIRECT) 2157 rec->flags |= FTRACE_FL_DIRECT_EN; 2158 else 2159 rec->flags &= ~FTRACE_FL_DIRECT_EN; 2160 } else { 2161 /* 2162 * Can only call directly if there's 2163 * only one callback to the function. 2164 */ 2165 rec->flags &= ~FTRACE_FL_DIRECT_EN; 2166 } 2167 } 2168 } 2169 2170 /* 2171 * If this record is being updated from a nop, then 2172 * return UPDATE_MAKE_CALL. 2173 * Otherwise, 2174 * return UPDATE_MODIFY_CALL to tell the caller to convert 2175 * from the save regs, to a non-save regs function or 2176 * vice versa, or from a trampoline call. 2177 */ 2178 if (flag & FTRACE_FL_ENABLED) { 2179 ftrace_bug_type = FTRACE_BUG_CALL; 2180 return FTRACE_UPDATE_MAKE_CALL; 2181 } 2182 2183 ftrace_bug_type = FTRACE_BUG_UPDATE; 2184 return FTRACE_UPDATE_MODIFY_CALL; 2185 } 2186 2187 if (update) { 2188 /* If there's no more users, clear all flags */ 2189 if (!ftrace_rec_count(rec)) 2190 rec->flags = 0; 2191 else 2192 /* 2193 * Just disable the record, but keep the ops TRAMP 2194 * and REGS states. The _EN flags must be disabled though. 2195 */ 2196 rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN | 2197 FTRACE_FL_REGS_EN | FTRACE_FL_DIRECT_EN); 2198 } 2199 2200 ftrace_bug_type = FTRACE_BUG_NOP; 2201 return FTRACE_UPDATE_MAKE_NOP; 2202 } 2203 2204 /** 2205 * ftrace_update_record, set a record that now is tracing or not 2206 * @rec: the record to update 2207 * @enable: set to true if the record is tracing, false to force disable 2208 * 2209 * The records that represent all functions that can be traced need 2210 * to be updated when tracing has been enabled. 2211 */ 2212 int ftrace_update_record(struct dyn_ftrace *rec, bool enable) 2213 { 2214 return ftrace_check_record(rec, enable, true); 2215 } 2216 2217 /** 2218 * ftrace_test_record, check if the record has been enabled or not 2219 * @rec: the record to test 2220 * @enable: set to true to check if enabled, false if it is disabled 2221 * 2222 * The arch code may need to test if a record is already set to 2223 * tracing to determine how to modify the function code that it 2224 * represents. 2225 */ 2226 int ftrace_test_record(struct dyn_ftrace *rec, bool enable) 2227 { 2228 return ftrace_check_record(rec, enable, false); 2229 } 2230 2231 static struct ftrace_ops * 2232 ftrace_find_tramp_ops_any(struct dyn_ftrace *rec) 2233 { 2234 struct ftrace_ops *op; 2235 unsigned long ip = rec->ip; 2236 2237 do_for_each_ftrace_op(op, ftrace_ops_list) { 2238 2239 if (!op->trampoline) 2240 continue; 2241 2242 if (hash_contains_ip(ip, op->func_hash)) 2243 return op; 2244 } while_for_each_ftrace_op(op); 2245 2246 return NULL; 2247 } 2248 2249 static struct ftrace_ops * 2250 ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, 2251 struct ftrace_ops *op) 2252 { 2253 unsigned long ip = rec->ip; 2254 2255 while_for_each_ftrace_op(op) { 2256 2257 if (!op->trampoline) 2258 continue; 2259 2260 if (hash_contains_ip(ip, op->func_hash)) 2261 return op; 2262 } 2263 2264 return NULL; 2265 } 2266 2267 static struct ftrace_ops * 2268 ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec) 2269 { 2270 struct ftrace_ops *op; 2271 unsigned long ip = rec->ip; 2272 2273 /* 2274 * Need to check removed ops first. 2275 * If they are being removed, and this rec has a tramp, 2276 * and this rec is in the ops list, then it would be the 2277 * one with the tramp. 2278 */ 2279 if (removed_ops) { 2280 if (hash_contains_ip(ip, &removed_ops->old_hash)) 2281 return removed_ops; 2282 } 2283 2284 /* 2285 * Need to find the current trampoline for a rec. 2286 * Now, a trampoline is only attached to a rec if there 2287 * was a single 'ops' attached to it. But this can be called 2288 * when we are adding another op to the rec or removing the 2289 * current one. Thus, if the op is being added, we can 2290 * ignore it because it hasn't attached itself to the rec 2291 * yet. 2292 * 2293 * If an ops is being modified (hooking to different functions) 2294 * then we don't care about the new functions that are being 2295 * added, just the old ones (that are probably being removed). 2296 * 2297 * If we are adding an ops to a function that already is using 2298 * a trampoline, it needs to be removed (trampolines are only 2299 * for single ops connected), then an ops that is not being 2300 * modified also needs to be checked. 2301 */ 2302 do_for_each_ftrace_op(op, ftrace_ops_list) { 2303 2304 if (!op->trampoline) 2305 continue; 2306 2307 /* 2308 * If the ops is being added, it hasn't gotten to 2309 * the point to be removed from this tree yet. 2310 */ 2311 if (op->flags & FTRACE_OPS_FL_ADDING) 2312 continue; 2313 2314 2315 /* 2316 * If the ops is being modified and is in the old 2317 * hash, then it is probably being removed from this 2318 * function. 2319 */ 2320 if ((op->flags & FTRACE_OPS_FL_MODIFYING) && 2321 hash_contains_ip(ip, &op->old_hash)) 2322 return op; 2323 /* 2324 * If the ops is not being added or modified, and it's 2325 * in its normal filter hash, then this must be the one 2326 * we want! 2327 */ 2328 if (!(op->flags & FTRACE_OPS_FL_MODIFYING) && 2329 hash_contains_ip(ip, op->func_hash)) 2330 return op; 2331 2332 } while_for_each_ftrace_op(op); 2333 2334 return NULL; 2335 } 2336 2337 static struct ftrace_ops * 2338 ftrace_find_tramp_ops_new(struct dyn_ftrace *rec) 2339 { 2340 struct ftrace_ops *op; 2341 unsigned long ip = rec->ip; 2342 2343 do_for_each_ftrace_op(op, ftrace_ops_list) { 2344 /* pass rec in as regs to have non-NULL val */ 2345 if (hash_contains_ip(ip, op->func_hash)) 2346 return op; 2347 } while_for_each_ftrace_op(op); 2348 2349 return NULL; 2350 } 2351 2352 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 2353 /* Protected by rcu_tasks for reading, and direct_mutex for writing */ 2354 static struct ftrace_hash *direct_functions = EMPTY_HASH; 2355 static DEFINE_MUTEX(direct_mutex); 2356 int ftrace_direct_func_count; 2357 2358 /* 2359 * Search the direct_functions hash to see if the given instruction pointer 2360 * has a direct caller attached to it. 2361 */ 2362 unsigned long ftrace_find_rec_direct(unsigned long ip) 2363 { 2364 struct ftrace_func_entry *entry; 2365 2366 entry = __ftrace_lookup_ip(direct_functions, ip); 2367 if (!entry) 2368 return 0; 2369 2370 return entry->direct; 2371 } 2372 2373 static void call_direct_funcs(unsigned long ip, unsigned long pip, 2374 struct ftrace_ops *ops, struct pt_regs *regs) 2375 { 2376 unsigned long addr; 2377 2378 addr = ftrace_find_rec_direct(ip); 2379 if (!addr) 2380 return; 2381 2382 arch_ftrace_set_direct_caller(regs, addr); 2383 } 2384 2385 struct ftrace_ops direct_ops = { 2386 .func = call_direct_funcs, 2387 .flags = FTRACE_OPS_FL_IPMODIFY | FTRACE_OPS_FL_RECURSION_SAFE 2388 | FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS 2389 | FTRACE_OPS_FL_PERMANENT, 2390 }; 2391 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ 2392 2393 /** 2394 * ftrace_get_addr_new - Get the call address to set to 2395 * @rec: The ftrace record descriptor 2396 * 2397 * If the record has the FTRACE_FL_REGS set, that means that it 2398 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS 2399 * is not not set, then it wants to convert to the normal callback. 2400 * 2401 * Returns the address of the trampoline to set to 2402 */ 2403 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec) 2404 { 2405 struct ftrace_ops *ops; 2406 unsigned long addr; 2407 2408 if ((rec->flags & FTRACE_FL_DIRECT) && 2409 (ftrace_rec_count(rec) == 1)) { 2410 addr = ftrace_find_rec_direct(rec->ip); 2411 if (addr) 2412 return addr; 2413 WARN_ON_ONCE(1); 2414 } 2415 2416 /* Trampolines take precedence over regs */ 2417 if (rec->flags & FTRACE_FL_TRAMP) { 2418 ops = ftrace_find_tramp_ops_new(rec); 2419 if (FTRACE_WARN_ON(!ops || !ops->trampoline)) { 2420 pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n", 2421 (void *)rec->ip, (void *)rec->ip, rec->flags); 2422 /* Ftrace is shutting down, return anything */ 2423 return (unsigned long)FTRACE_ADDR; 2424 } 2425 return ops->trampoline; 2426 } 2427 2428 if (rec->flags & FTRACE_FL_REGS) 2429 return (unsigned long)FTRACE_REGS_ADDR; 2430 else 2431 return (unsigned long)FTRACE_ADDR; 2432 } 2433 2434 /** 2435 * ftrace_get_addr_curr - Get the call address that is already there 2436 * @rec: The ftrace record descriptor 2437 * 2438 * The FTRACE_FL_REGS_EN is set when the record already points to 2439 * a function that saves all the regs. Basically the '_EN' version 2440 * represents the current state of the function. 2441 * 2442 * Returns the address of the trampoline that is currently being called 2443 */ 2444 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec) 2445 { 2446 struct ftrace_ops *ops; 2447 unsigned long addr; 2448 2449 /* Direct calls take precedence over trampolines */ 2450 if (rec->flags & FTRACE_FL_DIRECT_EN) { 2451 addr = ftrace_find_rec_direct(rec->ip); 2452 if (addr) 2453 return addr; 2454 WARN_ON_ONCE(1); 2455 } 2456 2457 /* Trampolines take precedence over regs */ 2458 if (rec->flags & FTRACE_FL_TRAMP_EN) { 2459 ops = ftrace_find_tramp_ops_curr(rec); 2460 if (FTRACE_WARN_ON(!ops)) { 2461 pr_warn("Bad trampoline accounting at: %p (%pS)\n", 2462 (void *)rec->ip, (void *)rec->ip); 2463 /* Ftrace is shutting down, return anything */ 2464 return (unsigned long)FTRACE_ADDR; 2465 } 2466 return ops->trampoline; 2467 } 2468 2469 if (rec->flags & FTRACE_FL_REGS_EN) 2470 return (unsigned long)FTRACE_REGS_ADDR; 2471 else 2472 return (unsigned long)FTRACE_ADDR; 2473 } 2474 2475 static int 2476 __ftrace_replace_code(struct dyn_ftrace *rec, bool enable) 2477 { 2478 unsigned long ftrace_old_addr; 2479 unsigned long ftrace_addr; 2480 int ret; 2481 2482 ftrace_addr = ftrace_get_addr_new(rec); 2483 2484 /* This needs to be done before we call ftrace_update_record */ 2485 ftrace_old_addr = ftrace_get_addr_curr(rec); 2486 2487 ret = ftrace_update_record(rec, enable); 2488 2489 ftrace_bug_type = FTRACE_BUG_UNKNOWN; 2490 2491 switch (ret) { 2492 case FTRACE_UPDATE_IGNORE: 2493 return 0; 2494 2495 case FTRACE_UPDATE_MAKE_CALL: 2496 ftrace_bug_type = FTRACE_BUG_CALL; 2497 return ftrace_make_call(rec, ftrace_addr); 2498 2499 case FTRACE_UPDATE_MAKE_NOP: 2500 ftrace_bug_type = FTRACE_BUG_NOP; 2501 return ftrace_make_nop(NULL, rec, ftrace_old_addr); 2502 2503 case FTRACE_UPDATE_MODIFY_CALL: 2504 ftrace_bug_type = FTRACE_BUG_UPDATE; 2505 return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr); 2506 } 2507 2508 return -1; /* unknown ftrace bug */ 2509 } 2510 2511 void __weak ftrace_replace_code(int mod_flags) 2512 { 2513 struct dyn_ftrace *rec; 2514 struct ftrace_page *pg; 2515 bool enable = mod_flags & FTRACE_MODIFY_ENABLE_FL; 2516 int schedulable = mod_flags & FTRACE_MODIFY_MAY_SLEEP_FL; 2517 int failed; 2518 2519 if (unlikely(ftrace_disabled)) 2520 return; 2521 2522 do_for_each_ftrace_rec(pg, rec) { 2523 2524 if (rec->flags & FTRACE_FL_DISABLED) 2525 continue; 2526 2527 failed = __ftrace_replace_code(rec, enable); 2528 if (failed) { 2529 ftrace_bug(failed, rec); 2530 /* Stop processing */ 2531 return; 2532 } 2533 if (schedulable) 2534 cond_resched(); 2535 } while_for_each_ftrace_rec(); 2536 } 2537 2538 struct ftrace_rec_iter { 2539 struct ftrace_page *pg; 2540 int index; 2541 }; 2542 2543 /** 2544 * ftrace_rec_iter_start, start up iterating over traced functions 2545 * 2546 * Returns an iterator handle that is used to iterate over all 2547 * the records that represent address locations where functions 2548 * are traced. 2549 * 2550 * May return NULL if no records are available. 2551 */ 2552 struct ftrace_rec_iter *ftrace_rec_iter_start(void) 2553 { 2554 /* 2555 * We only use a single iterator. 2556 * Protected by the ftrace_lock mutex. 2557 */ 2558 static struct ftrace_rec_iter ftrace_rec_iter; 2559 struct ftrace_rec_iter *iter = &ftrace_rec_iter; 2560 2561 iter->pg = ftrace_pages_start; 2562 iter->index = 0; 2563 2564 /* Could have empty pages */ 2565 while (iter->pg && !iter->pg->index) 2566 iter->pg = iter->pg->next; 2567 2568 if (!iter->pg) 2569 return NULL; 2570 2571 return iter; 2572 } 2573 2574 /** 2575 * ftrace_rec_iter_next, get the next record to process. 2576 * @iter: The handle to the iterator. 2577 * 2578 * Returns the next iterator after the given iterator @iter. 2579 */ 2580 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter) 2581 { 2582 iter->index++; 2583 2584 if (iter->index >= iter->pg->index) { 2585 iter->pg = iter->pg->next; 2586 iter->index = 0; 2587 2588 /* Could have empty pages */ 2589 while (iter->pg && !iter->pg->index) 2590 iter->pg = iter->pg->next; 2591 } 2592 2593 if (!iter->pg) 2594 return NULL; 2595 2596 return iter; 2597 } 2598 2599 /** 2600 * ftrace_rec_iter_record, get the record at the iterator location 2601 * @iter: The current iterator location 2602 * 2603 * Returns the record that the current @iter is at. 2604 */ 2605 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter) 2606 { 2607 return &iter->pg->records[iter->index]; 2608 } 2609 2610 static int 2611 ftrace_nop_initialize(struct module *mod, struct dyn_ftrace *rec) 2612 { 2613 int ret; 2614 2615 if (unlikely(ftrace_disabled)) 2616 return 0; 2617 2618 ret = ftrace_init_nop(mod, rec); 2619 if (ret) { 2620 ftrace_bug_type = FTRACE_BUG_INIT; 2621 ftrace_bug(ret, rec); 2622 return 0; 2623 } 2624 return 1; 2625 } 2626 2627 /* 2628 * archs can override this function if they must do something 2629 * before the modifying code is performed. 2630 */ 2631 int __weak ftrace_arch_code_modify_prepare(void) 2632 { 2633 return 0; 2634 } 2635 2636 /* 2637 * archs can override this function if they must do something 2638 * after the modifying code is performed. 2639 */ 2640 int __weak ftrace_arch_code_modify_post_process(void) 2641 { 2642 return 0; 2643 } 2644 2645 void ftrace_modify_all_code(int command) 2646 { 2647 int update = command & FTRACE_UPDATE_TRACE_FUNC; 2648 int mod_flags = 0; 2649 int err = 0; 2650 2651 if (command & FTRACE_MAY_SLEEP) 2652 mod_flags = FTRACE_MODIFY_MAY_SLEEP_FL; 2653 2654 /* 2655 * If the ftrace_caller calls a ftrace_ops func directly, 2656 * we need to make sure that it only traces functions it 2657 * expects to trace. When doing the switch of functions, 2658 * we need to update to the ftrace_ops_list_func first 2659 * before the transition between old and new calls are set, 2660 * as the ftrace_ops_list_func will check the ops hashes 2661 * to make sure the ops are having the right functions 2662 * traced. 2663 */ 2664 if (update) { 2665 err = ftrace_update_ftrace_func(ftrace_ops_list_func); 2666 if (FTRACE_WARN_ON(err)) 2667 return; 2668 } 2669 2670 if (command & FTRACE_UPDATE_CALLS) 2671 ftrace_replace_code(mod_flags | FTRACE_MODIFY_ENABLE_FL); 2672 else if (command & FTRACE_DISABLE_CALLS) 2673 ftrace_replace_code(mod_flags); 2674 2675 if (update && ftrace_trace_function != ftrace_ops_list_func) { 2676 function_trace_op = set_function_trace_op; 2677 smp_wmb(); 2678 /* If irqs are disabled, we are in stop machine */ 2679 if (!irqs_disabled()) 2680 smp_call_function(ftrace_sync_ipi, NULL, 1); 2681 err = ftrace_update_ftrace_func(ftrace_trace_function); 2682 if (FTRACE_WARN_ON(err)) 2683 return; 2684 } 2685 2686 if (command & FTRACE_START_FUNC_RET) 2687 err = ftrace_enable_ftrace_graph_caller(); 2688 else if (command & FTRACE_STOP_FUNC_RET) 2689 err = ftrace_disable_ftrace_graph_caller(); 2690 FTRACE_WARN_ON(err); 2691 } 2692 2693 static int __ftrace_modify_code(void *data) 2694 { 2695 int *command = data; 2696 2697 ftrace_modify_all_code(*command); 2698 2699 return 0; 2700 } 2701 2702 /** 2703 * ftrace_run_stop_machine, go back to the stop machine method 2704 * @command: The command to tell ftrace what to do 2705 * 2706 * If an arch needs to fall back to the stop machine method, the 2707 * it can call this function. 2708 */ 2709 void ftrace_run_stop_machine(int command) 2710 { 2711 stop_machine(__ftrace_modify_code, &command, NULL); 2712 } 2713 2714 /** 2715 * arch_ftrace_update_code, modify the code to trace or not trace 2716 * @command: The command that needs to be done 2717 * 2718 * Archs can override this function if it does not need to 2719 * run stop_machine() to modify code. 2720 */ 2721 void __weak arch_ftrace_update_code(int command) 2722 { 2723 ftrace_run_stop_machine(command); 2724 } 2725 2726 static void ftrace_run_update_code(int command) 2727 { 2728 int ret; 2729 2730 ret = ftrace_arch_code_modify_prepare(); 2731 FTRACE_WARN_ON(ret); 2732 if (ret) 2733 return; 2734 2735 /* 2736 * By default we use stop_machine() to modify the code. 2737 * But archs can do what ever they want as long as it 2738 * is safe. The stop_machine() is the safest, but also 2739 * produces the most overhead. 2740 */ 2741 arch_ftrace_update_code(command); 2742 2743 ret = ftrace_arch_code_modify_post_process(); 2744 FTRACE_WARN_ON(ret); 2745 } 2746 2747 static void ftrace_run_modify_code(struct ftrace_ops *ops, int command, 2748 struct ftrace_ops_hash *old_hash) 2749 { 2750 ops->flags |= FTRACE_OPS_FL_MODIFYING; 2751 ops->old_hash.filter_hash = old_hash->filter_hash; 2752 ops->old_hash.notrace_hash = old_hash->notrace_hash; 2753 ftrace_run_update_code(command); 2754 ops->old_hash.filter_hash = NULL; 2755 ops->old_hash.notrace_hash = NULL; 2756 ops->flags &= ~FTRACE_OPS_FL_MODIFYING; 2757 } 2758 2759 static ftrace_func_t saved_ftrace_func; 2760 static int ftrace_start_up; 2761 2762 void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops) 2763 { 2764 } 2765 2766 static void ftrace_startup_enable(int command) 2767 { 2768 if (saved_ftrace_func != ftrace_trace_function) { 2769 saved_ftrace_func = ftrace_trace_function; 2770 command |= FTRACE_UPDATE_TRACE_FUNC; 2771 } 2772 2773 if (!command || !ftrace_enabled) 2774 return; 2775 2776 ftrace_run_update_code(command); 2777 } 2778 2779 static void ftrace_startup_all(int command) 2780 { 2781 update_all_ops = true; 2782 ftrace_startup_enable(command); 2783 update_all_ops = false; 2784 } 2785 2786 int ftrace_startup(struct ftrace_ops *ops, int command) 2787 { 2788 int ret; 2789 2790 if (unlikely(ftrace_disabled)) 2791 return -ENODEV; 2792 2793 ret = __register_ftrace_function(ops); 2794 if (ret) 2795 return ret; 2796 2797 ftrace_start_up++; 2798 2799 /* 2800 * Note that ftrace probes uses this to start up 2801 * and modify functions it will probe. But we still 2802 * set the ADDING flag for modification, as probes 2803 * do not have trampolines. If they add them in the 2804 * future, then the probes will need to distinguish 2805 * between adding and updating probes. 2806 */ 2807 ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING; 2808 2809 ret = ftrace_hash_ipmodify_enable(ops); 2810 if (ret < 0) { 2811 /* Rollback registration process */ 2812 __unregister_ftrace_function(ops); 2813 ftrace_start_up--; 2814 ops->flags &= ~FTRACE_OPS_FL_ENABLED; 2815 return ret; 2816 } 2817 2818 if (ftrace_hash_rec_enable(ops, 1)) 2819 command |= FTRACE_UPDATE_CALLS; 2820 2821 ftrace_startup_enable(command); 2822 2823 ops->flags &= ~FTRACE_OPS_FL_ADDING; 2824 2825 return 0; 2826 } 2827 2828 int ftrace_shutdown(struct ftrace_ops *ops, int command) 2829 { 2830 int ret; 2831 2832 if (unlikely(ftrace_disabled)) 2833 return -ENODEV; 2834 2835 ret = __unregister_ftrace_function(ops); 2836 if (ret) 2837 return ret; 2838 2839 ftrace_start_up--; 2840 /* 2841 * Just warn in case of unbalance, no need to kill ftrace, it's not 2842 * critical but the ftrace_call callers may be never nopped again after 2843 * further ftrace uses. 2844 */ 2845 WARN_ON_ONCE(ftrace_start_up < 0); 2846 2847 /* Disabling ipmodify never fails */ 2848 ftrace_hash_ipmodify_disable(ops); 2849 2850 if (ftrace_hash_rec_disable(ops, 1)) 2851 command |= FTRACE_UPDATE_CALLS; 2852 2853 ops->flags &= ~FTRACE_OPS_FL_ENABLED; 2854 2855 if (saved_ftrace_func != ftrace_trace_function) { 2856 saved_ftrace_func = ftrace_trace_function; 2857 command |= FTRACE_UPDATE_TRACE_FUNC; 2858 } 2859 2860 if (!command || !ftrace_enabled) { 2861 /* 2862 * If these are dynamic or per_cpu ops, they still 2863 * need their data freed. Since, function tracing is 2864 * not currently active, we can just free them 2865 * without synchronizing all CPUs. 2866 */ 2867 if (ops->flags & FTRACE_OPS_FL_DYNAMIC) 2868 goto free_ops; 2869 2870 return 0; 2871 } 2872 2873 /* 2874 * If the ops uses a trampoline, then it needs to be 2875 * tested first on update. 2876 */ 2877 ops->flags |= FTRACE_OPS_FL_REMOVING; 2878 removed_ops = ops; 2879 2880 /* The trampoline logic checks the old hashes */ 2881 ops->old_hash.filter_hash = ops->func_hash->filter_hash; 2882 ops->old_hash.notrace_hash = ops->func_hash->notrace_hash; 2883 2884 ftrace_run_update_code(command); 2885 2886 /* 2887 * If there's no more ops registered with ftrace, run a 2888 * sanity check to make sure all rec flags are cleared. 2889 */ 2890 if (rcu_dereference_protected(ftrace_ops_list, 2891 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) { 2892 struct ftrace_page *pg; 2893 struct dyn_ftrace *rec; 2894 2895 do_for_each_ftrace_rec(pg, rec) { 2896 if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_FL_DISABLED)) 2897 pr_warn(" %pS flags:%lx\n", 2898 (void *)rec->ip, rec->flags); 2899 } while_for_each_ftrace_rec(); 2900 } 2901 2902 ops->old_hash.filter_hash = NULL; 2903 ops->old_hash.notrace_hash = NULL; 2904 2905 removed_ops = NULL; 2906 ops->flags &= ~FTRACE_OPS_FL_REMOVING; 2907 2908 /* 2909 * Dynamic ops may be freed, we must make sure that all 2910 * callers are done before leaving this function. 2911 * The same goes for freeing the per_cpu data of the per_cpu 2912 * ops. 2913 */ 2914 if (ops->flags & FTRACE_OPS_FL_DYNAMIC) { 2915 /* 2916 * We need to do a hard force of sched synchronization. 2917 * This is because we use preempt_disable() to do RCU, but 2918 * the function tracers can be called where RCU is not watching 2919 * (like before user_exit()). We can not rely on the RCU 2920 * infrastructure to do the synchronization, thus we must do it 2921 * ourselves. 2922 */ 2923 schedule_on_each_cpu(ftrace_sync); 2924 2925 /* 2926 * When the kernel is preeptive, tasks can be preempted 2927 * while on a ftrace trampoline. Just scheduling a task on 2928 * a CPU is not good enough to flush them. Calling 2929 * synchornize_rcu_tasks() will wait for those tasks to 2930 * execute and either schedule voluntarily or enter user space. 2931 */ 2932 if (IS_ENABLED(CONFIG_PREEMPTION)) 2933 synchronize_rcu_tasks(); 2934 2935 free_ops: 2936 arch_ftrace_trampoline_free(ops); 2937 } 2938 2939 return 0; 2940 } 2941 2942 static void ftrace_startup_sysctl(void) 2943 { 2944 int command; 2945 2946 if (unlikely(ftrace_disabled)) 2947 return; 2948 2949 /* Force update next time */ 2950 saved_ftrace_func = NULL; 2951 /* ftrace_start_up is true if we want ftrace running */ 2952 if (ftrace_start_up) { 2953 command = FTRACE_UPDATE_CALLS; 2954 if (ftrace_graph_active) 2955 command |= FTRACE_START_FUNC_RET; 2956 ftrace_startup_enable(command); 2957 } 2958 } 2959 2960 static void ftrace_shutdown_sysctl(void) 2961 { 2962 int command; 2963 2964 if (unlikely(ftrace_disabled)) 2965 return; 2966 2967 /* ftrace_start_up is true if ftrace is running */ 2968 if (ftrace_start_up) { 2969 command = FTRACE_DISABLE_CALLS; 2970 if (ftrace_graph_active) 2971 command |= FTRACE_STOP_FUNC_RET; 2972 ftrace_run_update_code(command); 2973 } 2974 } 2975 2976 static u64 ftrace_update_time; 2977 unsigned long ftrace_update_tot_cnt; 2978 unsigned long ftrace_number_of_pages; 2979 unsigned long ftrace_number_of_groups; 2980 2981 static inline int ops_traces_mod(struct ftrace_ops *ops) 2982 { 2983 /* 2984 * Filter_hash being empty will default to trace module. 2985 * But notrace hash requires a test of individual module functions. 2986 */ 2987 return ftrace_hash_empty(ops->func_hash->filter_hash) && 2988 ftrace_hash_empty(ops->func_hash->notrace_hash); 2989 } 2990 2991 /* 2992 * Check if the current ops references the record. 2993 * 2994 * If the ops traces all functions, then it was already accounted for. 2995 * If the ops does not trace the current record function, skip it. 2996 * If the ops ignores the function via notrace filter, skip it. 2997 */ 2998 static inline bool 2999 ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec) 3000 { 3001 /* If ops isn't enabled, ignore it */ 3002 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) 3003 return false; 3004 3005 /* If ops traces all then it includes this function */ 3006 if (ops_traces_mod(ops)) 3007 return true; 3008 3009 /* The function must be in the filter */ 3010 if (!ftrace_hash_empty(ops->func_hash->filter_hash) && 3011 !__ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip)) 3012 return false; 3013 3014 /* If in notrace hash, we ignore it too */ 3015 if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) 3016 return false; 3017 3018 return true; 3019 } 3020 3021 static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs) 3022 { 3023 struct ftrace_page *pg; 3024 struct dyn_ftrace *p; 3025 u64 start, stop; 3026 unsigned long update_cnt = 0; 3027 unsigned long rec_flags = 0; 3028 int i; 3029 3030 start = ftrace_now(raw_smp_processor_id()); 3031 3032 /* 3033 * When a module is loaded, this function is called to convert 3034 * the calls to mcount in its text to nops, and also to create 3035 * an entry in the ftrace data. Now, if ftrace is activated 3036 * after this call, but before the module sets its text to 3037 * read-only, the modification of enabling ftrace can fail if 3038 * the read-only is done while ftrace is converting the calls. 3039 * To prevent this, the module's records are set as disabled 3040 * and will be enabled after the call to set the module's text 3041 * to read-only. 3042 */ 3043 if (mod) 3044 rec_flags |= FTRACE_FL_DISABLED; 3045 3046 for (pg = new_pgs; pg; pg = pg->next) { 3047 3048 for (i = 0; i < pg->index; i++) { 3049 3050 /* If something went wrong, bail without enabling anything */ 3051 if (unlikely(ftrace_disabled)) 3052 return -1; 3053 3054 p = &pg->records[i]; 3055 p->flags = rec_flags; 3056 3057 /* 3058 * Do the initial record conversion from mcount jump 3059 * to the NOP instructions. 3060 */ 3061 if (!__is_defined(CC_USING_NOP_MCOUNT) && 3062 !ftrace_nop_initialize(mod, p)) 3063 break; 3064 3065 update_cnt++; 3066 } 3067 } 3068 3069 stop = ftrace_now(raw_smp_processor_id()); 3070 ftrace_update_time = stop - start; 3071 ftrace_update_tot_cnt += update_cnt; 3072 3073 return 0; 3074 } 3075 3076 static int ftrace_allocate_records(struct ftrace_page *pg, int count) 3077 { 3078 int order; 3079 int cnt; 3080 3081 if (WARN_ON(!count)) 3082 return -EINVAL; 3083 3084 order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE)); 3085 3086 /* 3087 * We want to fill as much as possible. No more than a page 3088 * may be empty. 3089 */ 3090 while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE) 3091 order--; 3092 3093 again: 3094 pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); 3095 3096 if (!pg->records) { 3097 /* if we can't allocate this size, try something smaller */ 3098 if (!order) 3099 return -ENOMEM; 3100 order >>= 1; 3101 goto again; 3102 } 3103 3104 ftrace_number_of_pages += 1 << order; 3105 ftrace_number_of_groups++; 3106 3107 cnt = (PAGE_SIZE << order) / ENTRY_SIZE; 3108 pg->size = cnt; 3109 3110 if (cnt > count) 3111 cnt = count; 3112 3113 return cnt; 3114 } 3115 3116 static struct ftrace_page * 3117 ftrace_allocate_pages(unsigned long num_to_init) 3118 { 3119 struct ftrace_page *start_pg; 3120 struct ftrace_page *pg; 3121 int order; 3122 int cnt; 3123 3124 if (!num_to_init) 3125 return NULL; 3126 3127 start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL); 3128 if (!pg) 3129 return NULL; 3130 3131 /* 3132 * Try to allocate as much as possible in one continues 3133 * location that fills in all of the space. We want to 3134 * waste as little space as possible. 3135 */ 3136 for (;;) { 3137 cnt = ftrace_allocate_records(pg, num_to_init); 3138 if (cnt < 0) 3139 goto free_pages; 3140 3141 num_to_init -= cnt; 3142 if (!num_to_init) 3143 break; 3144 3145 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL); 3146 if (!pg->next) 3147 goto free_pages; 3148 3149 pg = pg->next; 3150 } 3151 3152 return start_pg; 3153 3154 free_pages: 3155 pg = start_pg; 3156 while (pg) { 3157 order = get_count_order(pg->size / ENTRIES_PER_PAGE); 3158 free_pages((unsigned long)pg->records, order); 3159 start_pg = pg->next; 3160 kfree(pg); 3161 pg = start_pg; 3162 ftrace_number_of_pages -= 1 << order; 3163 ftrace_number_of_groups--; 3164 } 3165 pr_info("ftrace: FAILED to allocate memory for functions\n"); 3166 return NULL; 3167 } 3168 3169 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ 3170 3171 struct ftrace_iterator { 3172 loff_t pos; 3173 loff_t func_pos; 3174 loff_t mod_pos; 3175 struct ftrace_page *pg; 3176 struct dyn_ftrace *func; 3177 struct ftrace_func_probe *probe; 3178 struct ftrace_func_entry *probe_entry; 3179 struct trace_parser parser; 3180 struct ftrace_hash *hash; 3181 struct ftrace_ops *ops; 3182 struct trace_array *tr; 3183 struct list_head *mod_list; 3184 int pidx; 3185 int idx; 3186 unsigned flags; 3187 }; 3188 3189 static void * 3190 t_probe_next(struct seq_file *m, loff_t *pos) 3191 { 3192 struct ftrace_iterator *iter = m->private; 3193 struct trace_array *tr = iter->ops->private; 3194 struct list_head *func_probes; 3195 struct ftrace_hash *hash; 3196 struct list_head *next; 3197 struct hlist_node *hnd = NULL; 3198 struct hlist_head *hhd; 3199 int size; 3200 3201 (*pos)++; 3202 iter->pos = *pos; 3203 3204 if (!tr) 3205 return NULL; 3206 3207 func_probes = &tr->func_probes; 3208 if (list_empty(func_probes)) 3209 return NULL; 3210 3211 if (!iter->probe) { 3212 next = func_probes->next; 3213 iter->probe = list_entry(next, struct ftrace_func_probe, list); 3214 } 3215 3216 if (iter->probe_entry) 3217 hnd = &iter->probe_entry->hlist; 3218 3219 hash = iter->probe->ops.func_hash->filter_hash; 3220 3221 /* 3222 * A probe being registered may temporarily have an empty hash 3223 * and it's at the end of the func_probes list. 3224 */ 3225 if (!hash || hash == EMPTY_HASH) 3226 return NULL; 3227 3228 size = 1 << hash->size_bits; 3229 3230 retry: 3231 if (iter->pidx >= size) { 3232 if (iter->probe->list.next == func_probes) 3233 return NULL; 3234 next = iter->probe->list.next; 3235 iter->probe = list_entry(next, struct ftrace_func_probe, list); 3236 hash = iter->probe->ops.func_hash->filter_hash; 3237 size = 1 << hash->size_bits; 3238 iter->pidx = 0; 3239 } 3240 3241 hhd = &hash->buckets[iter->pidx]; 3242 3243 if (hlist_empty(hhd)) { 3244 iter->pidx++; 3245 hnd = NULL; 3246 goto retry; 3247 } 3248 3249 if (!hnd) 3250 hnd = hhd->first; 3251 else { 3252 hnd = hnd->next; 3253 if (!hnd) { 3254 iter->pidx++; 3255 goto retry; 3256 } 3257 } 3258 3259 if (WARN_ON_ONCE(!hnd)) 3260 return NULL; 3261 3262 iter->probe_entry = hlist_entry(hnd, struct ftrace_func_entry, hlist); 3263 3264 return iter; 3265 } 3266 3267 static void *t_probe_start(struct seq_file *m, loff_t *pos) 3268 { 3269 struct ftrace_iterator *iter = m->private; 3270 void *p = NULL; 3271 loff_t l; 3272 3273 if (!(iter->flags & FTRACE_ITER_DO_PROBES)) 3274 return NULL; 3275 3276 if (iter->mod_pos > *pos) 3277 return NULL; 3278 3279 iter->probe = NULL; 3280 iter->probe_entry = NULL; 3281 iter->pidx = 0; 3282 for (l = 0; l <= (*pos - iter->mod_pos); ) { 3283 p = t_probe_next(m, &l); 3284 if (!p) 3285 break; 3286 } 3287 if (!p) 3288 return NULL; 3289 3290 /* Only set this if we have an item */ 3291 iter->flags |= FTRACE_ITER_PROBE; 3292 3293 return iter; 3294 } 3295 3296 static int 3297 t_probe_show(struct seq_file *m, struct ftrace_iterator *iter) 3298 { 3299 struct ftrace_func_entry *probe_entry; 3300 struct ftrace_probe_ops *probe_ops; 3301 struct ftrace_func_probe *probe; 3302 3303 probe = iter->probe; 3304 probe_entry = iter->probe_entry; 3305 3306 if (WARN_ON_ONCE(!probe || !probe_entry)) 3307 return -EIO; 3308 3309 probe_ops = probe->probe_ops; 3310 3311 if (probe_ops->print) 3312 return probe_ops->print(m, probe_entry->ip, probe_ops, probe->data); 3313 3314 seq_printf(m, "%ps:%ps\n", (void *)probe_entry->ip, 3315 (void *)probe_ops->func); 3316 3317 return 0; 3318 } 3319 3320 static void * 3321 t_mod_next(struct seq_file *m, loff_t *pos) 3322 { 3323 struct ftrace_iterator *iter = m->private; 3324 struct trace_array *tr = iter->tr; 3325 3326 (*pos)++; 3327 iter->pos = *pos; 3328 3329 iter->mod_list = iter->mod_list->next; 3330 3331 if (iter->mod_list == &tr->mod_trace || 3332 iter->mod_list == &tr->mod_notrace) { 3333 iter->flags &= ~FTRACE_ITER_MOD; 3334 return NULL; 3335 } 3336 3337 iter->mod_pos = *pos; 3338 3339 return iter; 3340 } 3341 3342 static void *t_mod_start(struct seq_file *m, loff_t *pos) 3343 { 3344 struct ftrace_iterator *iter = m->private; 3345 void *p = NULL; 3346 loff_t l; 3347 3348 if (iter->func_pos > *pos) 3349 return NULL; 3350 3351 iter->mod_pos = iter->func_pos; 3352 3353 /* probes are only available if tr is set */ 3354 if (!iter->tr) 3355 return NULL; 3356 3357 for (l = 0; l <= (*pos - iter->func_pos); ) { 3358 p = t_mod_next(m, &l); 3359 if (!p) 3360 break; 3361 } 3362 if (!p) { 3363 iter->flags &= ~FTRACE_ITER_MOD; 3364 return t_probe_start(m, pos); 3365 } 3366 3367 /* Only set this if we have an item */ 3368 iter->flags |= FTRACE_ITER_MOD; 3369 3370 return iter; 3371 } 3372 3373 static int 3374 t_mod_show(struct seq_file *m, struct ftrace_iterator *iter) 3375 { 3376 struct ftrace_mod_load *ftrace_mod; 3377 struct trace_array *tr = iter->tr; 3378 3379 if (WARN_ON_ONCE(!iter->mod_list) || 3380 iter->mod_list == &tr->mod_trace || 3381 iter->mod_list == &tr->mod_notrace) 3382 return -EIO; 3383 3384 ftrace_mod = list_entry(iter->mod_list, struct ftrace_mod_load, list); 3385 3386 if (ftrace_mod->func) 3387 seq_printf(m, "%s", ftrace_mod->func); 3388 else 3389 seq_putc(m, '*'); 3390 3391 seq_printf(m, ":mod:%s\n", ftrace_mod->module); 3392 3393 return 0; 3394 } 3395 3396 static void * 3397 t_func_next(struct seq_file *m, loff_t *pos) 3398 { 3399 struct ftrace_iterator *iter = m->private; 3400 struct dyn_ftrace *rec = NULL; 3401 3402 (*pos)++; 3403 3404 retry: 3405 if (iter->idx >= iter->pg->index) { 3406 if (iter->pg->next) { 3407 iter->pg = iter->pg->next; 3408 iter->idx = 0; 3409 goto retry; 3410 } 3411 } else { 3412 rec = &iter->pg->records[iter->idx++]; 3413 if (((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) && 3414 !ftrace_lookup_ip(iter->hash, rec->ip)) || 3415 3416 ((iter->flags & FTRACE_ITER_ENABLED) && 3417 !(rec->flags & FTRACE_FL_ENABLED))) { 3418 3419 rec = NULL; 3420 goto retry; 3421 } 3422 } 3423 3424 if (!rec) 3425 return NULL; 3426 3427 iter->pos = iter->func_pos = *pos; 3428 iter->func = rec; 3429 3430 return iter; 3431 } 3432 3433 static void * 3434 t_next(struct seq_file *m, void *v, loff_t *pos) 3435 { 3436 struct ftrace_iterator *iter = m->private; 3437 loff_t l = *pos; /* t_probe_start() must use original pos */ 3438 void *ret; 3439 3440 if (unlikely(ftrace_disabled)) 3441 return NULL; 3442 3443 if (iter->flags & FTRACE_ITER_PROBE) 3444 return t_probe_next(m, pos); 3445 3446 if (iter->flags & FTRACE_ITER_MOD) 3447 return t_mod_next(m, pos); 3448 3449 if (iter->flags & FTRACE_ITER_PRINTALL) { 3450 /* next must increment pos, and t_probe_start does not */ 3451 (*pos)++; 3452 return t_mod_start(m, &l); 3453 } 3454 3455 ret = t_func_next(m, pos); 3456 3457 if (!ret) 3458 return t_mod_start(m, &l); 3459 3460 return ret; 3461 } 3462 3463 static void reset_iter_read(struct ftrace_iterator *iter) 3464 { 3465 iter->pos = 0; 3466 iter->func_pos = 0; 3467 iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_PROBE | FTRACE_ITER_MOD); 3468 } 3469 3470 static void *t_start(struct seq_file *m, loff_t *pos) 3471 { 3472 struct ftrace_iterator *iter = m->private; 3473 void *p = NULL; 3474 loff_t l; 3475 3476 mutex_lock(&ftrace_lock); 3477 3478 if (unlikely(ftrace_disabled)) 3479 return NULL; 3480 3481 /* 3482 * If an lseek was done, then reset and start from beginning. 3483 */ 3484 if (*pos < iter->pos) 3485 reset_iter_read(iter); 3486 3487 /* 3488 * For set_ftrace_filter reading, if we have the filter 3489 * off, we can short cut and just print out that all 3490 * functions are enabled. 3491 */ 3492 if ((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) && 3493 ftrace_hash_empty(iter->hash)) { 3494 iter->func_pos = 1; /* Account for the message */ 3495 if (*pos > 0) 3496 return t_mod_start(m, pos); 3497 iter->flags |= FTRACE_ITER_PRINTALL; 3498 /* reset in case of seek/pread */ 3499 iter->flags &= ~FTRACE_ITER_PROBE; 3500 return iter; 3501 } 3502 3503 if (iter->flags & FTRACE_ITER_MOD) 3504 return t_mod_start(m, pos); 3505 3506 /* 3507 * Unfortunately, we need to restart at ftrace_pages_start 3508 * every time we let go of the ftrace_mutex. This is because 3509 * those pointers can change without the lock. 3510 */ 3511 iter->pg = ftrace_pages_start; 3512 iter->idx = 0; 3513 for (l = 0; l <= *pos; ) { 3514 p = t_func_next(m, &l); 3515 if (!p) 3516 break; 3517 } 3518 3519 if (!p) 3520 return t_mod_start(m, pos); 3521 3522 return iter; 3523 } 3524 3525 static void t_stop(struct seq_file *m, void *p) 3526 { 3527 mutex_unlock(&ftrace_lock); 3528 } 3529 3530 void * __weak 3531 arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec) 3532 { 3533 return NULL; 3534 } 3535 3536 static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops, 3537 struct dyn_ftrace *rec) 3538 { 3539 void *ptr; 3540 3541 ptr = arch_ftrace_trampoline_func(ops, rec); 3542 if (ptr) 3543 seq_printf(m, " ->%pS", ptr); 3544 } 3545 3546 static int t_show(struct seq_file *m, void *v) 3547 { 3548 struct ftrace_iterator *iter = m->private; 3549 struct dyn_ftrace *rec; 3550 3551 if (iter->flags & FTRACE_ITER_PROBE) 3552 return t_probe_show(m, iter); 3553 3554 if (iter->flags & FTRACE_ITER_MOD) 3555 return t_mod_show(m, iter); 3556 3557 if (iter->flags & FTRACE_ITER_PRINTALL) { 3558 if (iter->flags & FTRACE_ITER_NOTRACE) 3559 seq_puts(m, "#### no functions disabled ####\n"); 3560 else 3561 seq_puts(m, "#### all functions enabled ####\n"); 3562 return 0; 3563 } 3564 3565 rec = iter->func; 3566 3567 if (!rec) 3568 return 0; 3569 3570 seq_printf(m, "%ps", (void *)rec->ip); 3571 if (iter->flags & FTRACE_ITER_ENABLED) { 3572 struct ftrace_ops *ops; 3573 3574 seq_printf(m, " (%ld)%s%s%s", 3575 ftrace_rec_count(rec), 3576 rec->flags & FTRACE_FL_REGS ? " R" : " ", 3577 rec->flags & FTRACE_FL_IPMODIFY ? " I" : " ", 3578 rec->flags & FTRACE_FL_DIRECT ? " D" : " "); 3579 if (rec->flags & FTRACE_FL_TRAMP_EN) { 3580 ops = ftrace_find_tramp_ops_any(rec); 3581 if (ops) { 3582 do { 3583 seq_printf(m, "\ttramp: %pS (%pS)", 3584 (void *)ops->trampoline, 3585 (void *)ops->func); 3586 add_trampoline_func(m, ops, rec); 3587 ops = ftrace_find_tramp_ops_next(rec, ops); 3588 } while (ops); 3589 } else 3590 seq_puts(m, "\ttramp: ERROR!"); 3591 } else { 3592 add_trampoline_func(m, NULL, rec); 3593 } 3594 if (rec->flags & FTRACE_FL_DIRECT) { 3595 unsigned long direct; 3596 3597 direct = ftrace_find_rec_direct(rec->ip); 3598 if (direct) 3599 seq_printf(m, "\n\tdirect-->%pS", (void *)direct); 3600 } 3601 } 3602 3603 seq_putc(m, '\n'); 3604 3605 return 0; 3606 } 3607 3608 static const struct seq_operations show_ftrace_seq_ops = { 3609 .start = t_start, 3610 .next = t_next, 3611 .stop = t_stop, 3612 .show = t_show, 3613 }; 3614 3615 static int 3616 ftrace_avail_open(struct inode *inode, struct file *file) 3617 { 3618 struct ftrace_iterator *iter; 3619 int ret; 3620 3621 ret = security_locked_down(LOCKDOWN_TRACEFS); 3622 if (ret) 3623 return ret; 3624 3625 if (unlikely(ftrace_disabled)) 3626 return -ENODEV; 3627 3628 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); 3629 if (!iter) 3630 return -ENOMEM; 3631 3632 iter->pg = ftrace_pages_start; 3633 iter->ops = &global_ops; 3634 3635 return 0; 3636 } 3637 3638 static int 3639 ftrace_enabled_open(struct inode *inode, struct file *file) 3640 { 3641 struct ftrace_iterator *iter; 3642 3643 /* 3644 * This shows us what functions are currently being 3645 * traced and by what. Not sure if we want lockdown 3646 * to hide such critical information for an admin. 3647 * Although, perhaps it can show information we don't 3648 * want people to see, but if something is tracing 3649 * something, we probably want to know about it. 3650 */ 3651 3652 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); 3653 if (!iter) 3654 return -ENOMEM; 3655 3656 iter->pg = ftrace_pages_start; 3657 iter->flags = FTRACE_ITER_ENABLED; 3658 iter->ops = &global_ops; 3659 3660 return 0; 3661 } 3662 3663 /** 3664 * ftrace_regex_open - initialize function tracer filter files 3665 * @ops: The ftrace_ops that hold the hash filters 3666 * @flag: The type of filter to process 3667 * @inode: The inode, usually passed in to your open routine 3668 * @file: The file, usually passed in to your open routine 3669 * 3670 * ftrace_regex_open() initializes the filter files for the 3671 * @ops. Depending on @flag it may process the filter hash or 3672 * the notrace hash of @ops. With this called from the open 3673 * routine, you can use ftrace_filter_write() for the write 3674 * routine if @flag has FTRACE_ITER_FILTER set, or 3675 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set. 3676 * tracing_lseek() should be used as the lseek routine, and 3677 * release must call ftrace_regex_release(). 3678 */ 3679 int 3680 ftrace_regex_open(struct ftrace_ops *ops, int flag, 3681 struct inode *inode, struct file *file) 3682 { 3683 struct ftrace_iterator *iter; 3684 struct ftrace_hash *hash; 3685 struct list_head *mod_head; 3686 struct trace_array *tr = ops->private; 3687 int ret = -ENOMEM; 3688 3689 ftrace_ops_init(ops); 3690 3691 if (unlikely(ftrace_disabled)) 3692 return -ENODEV; 3693 3694 if (tracing_check_open_get_tr(tr)) 3695 return -ENODEV; 3696 3697 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 3698 if (!iter) 3699 goto out; 3700 3701 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) 3702 goto out; 3703 3704 iter->ops = ops; 3705 iter->flags = flag; 3706 iter->tr = tr; 3707 3708 mutex_lock(&ops->func_hash->regex_lock); 3709 3710 if (flag & FTRACE_ITER_NOTRACE) { 3711 hash = ops->func_hash->notrace_hash; 3712 mod_head = tr ? &tr->mod_notrace : NULL; 3713 } else { 3714 hash = ops->func_hash->filter_hash; 3715 mod_head = tr ? &tr->mod_trace : NULL; 3716 } 3717 3718 iter->mod_list = mod_head; 3719 3720 if (file->f_mode & FMODE_WRITE) { 3721 const int size_bits = FTRACE_HASH_DEFAULT_BITS; 3722 3723 if (file->f_flags & O_TRUNC) { 3724 iter->hash = alloc_ftrace_hash(size_bits); 3725 clear_ftrace_mod_list(mod_head); 3726 } else { 3727 iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash); 3728 } 3729 3730 if (!iter->hash) { 3731 trace_parser_put(&iter->parser); 3732 goto out_unlock; 3733 } 3734 } else 3735 iter->hash = hash; 3736 3737 ret = 0; 3738 3739 if (file->f_mode & FMODE_READ) { 3740 iter->pg = ftrace_pages_start; 3741 3742 ret = seq_open(file, &show_ftrace_seq_ops); 3743 if (!ret) { 3744 struct seq_file *m = file->private_data; 3745 m->private = iter; 3746 } else { 3747 /* Failed */ 3748 free_ftrace_hash(iter->hash); 3749 trace_parser_put(&iter->parser); 3750 } 3751 } else 3752 file->private_data = iter; 3753 3754 out_unlock: 3755 mutex_unlock(&ops->func_hash->regex_lock); 3756 3757 out: 3758 if (ret) { 3759 kfree(iter); 3760 if (tr) 3761 trace_array_put(tr); 3762 } 3763 3764 return ret; 3765 } 3766 3767 static int 3768 ftrace_filter_open(struct inode *inode, struct file *file) 3769 { 3770 struct ftrace_ops *ops = inode->i_private; 3771 3772 /* Checks for tracefs lockdown */ 3773 return ftrace_regex_open(ops, 3774 FTRACE_ITER_FILTER | FTRACE_ITER_DO_PROBES, 3775 inode, file); 3776 } 3777 3778 static int 3779 ftrace_notrace_open(struct inode *inode, struct file *file) 3780 { 3781 struct ftrace_ops *ops = inode->i_private; 3782 3783 /* Checks for tracefs lockdown */ 3784 return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE, 3785 inode, file); 3786 } 3787 3788 /* Type for quick search ftrace basic regexes (globs) from filter_parse_regex */ 3789 struct ftrace_glob { 3790 char *search; 3791 unsigned len; 3792 int type; 3793 }; 3794 3795 /* 3796 * If symbols in an architecture don't correspond exactly to the user-visible 3797 * name of what they represent, it is possible to define this function to 3798 * perform the necessary adjustments. 3799 */ 3800 char * __weak arch_ftrace_match_adjust(char *str, const char *search) 3801 { 3802 return str; 3803 } 3804 3805 static int ftrace_match(char *str, struct ftrace_glob *g) 3806 { 3807 int matched = 0; 3808 int slen; 3809 3810 str = arch_ftrace_match_adjust(str, g->search); 3811 3812 switch (g->type) { 3813 case MATCH_FULL: 3814 if (strcmp(str, g->search) == 0) 3815 matched = 1; 3816 break; 3817 case MATCH_FRONT_ONLY: 3818 if (strncmp(str, g->search, g->len) == 0) 3819 matched = 1; 3820 break; 3821 case MATCH_MIDDLE_ONLY: 3822 if (strstr(str, g->search)) 3823 matched = 1; 3824 break; 3825 case MATCH_END_ONLY: 3826 slen = strlen(str); 3827 if (slen >= g->len && 3828 memcmp(str + slen - g->len, g->search, g->len) == 0) 3829 matched = 1; 3830 break; 3831 case MATCH_GLOB: 3832 if (glob_match(g->search, str)) 3833 matched = 1; 3834 break; 3835 } 3836 3837 return matched; 3838 } 3839 3840 static int 3841 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter) 3842 { 3843 struct ftrace_func_entry *entry; 3844 int ret = 0; 3845 3846 entry = ftrace_lookup_ip(hash, rec->ip); 3847 if (clear_filter) { 3848 /* Do nothing if it doesn't exist */ 3849 if (!entry) 3850 return 0; 3851 3852 free_hash_entry(hash, entry); 3853 } else { 3854 /* Do nothing if it exists */ 3855 if (entry) 3856 return 0; 3857 3858 ret = add_hash_entry(hash, rec->ip); 3859 } 3860 return ret; 3861 } 3862 3863 static int 3864 add_rec_by_index(struct ftrace_hash *hash, struct ftrace_glob *func_g, 3865 int clear_filter) 3866 { 3867 long index = simple_strtoul(func_g->search, NULL, 0); 3868 struct ftrace_page *pg; 3869 struct dyn_ftrace *rec; 3870 3871 /* The index starts at 1 */ 3872 if (--index < 0) 3873 return 0; 3874 3875 do_for_each_ftrace_rec(pg, rec) { 3876 if (pg->index <= index) { 3877 index -= pg->index; 3878 /* this is a double loop, break goes to the next page */ 3879 break; 3880 } 3881 rec = &pg->records[index]; 3882 enter_record(hash, rec, clear_filter); 3883 return 1; 3884 } while_for_each_ftrace_rec(); 3885 return 0; 3886 } 3887 3888 static int 3889 ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g, 3890 struct ftrace_glob *mod_g, int exclude_mod) 3891 { 3892 char str[KSYM_SYMBOL_LEN]; 3893 char *modname; 3894 3895 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str); 3896 3897 if (mod_g) { 3898 int mod_matches = (modname) ? ftrace_match(modname, mod_g) : 0; 3899 3900 /* blank module name to match all modules */ 3901 if (!mod_g->len) { 3902 /* blank module globbing: modname xor exclude_mod */ 3903 if (!exclude_mod != !modname) 3904 goto func_match; 3905 return 0; 3906 } 3907 3908 /* 3909 * exclude_mod is set to trace everything but the given 3910 * module. If it is set and the module matches, then 3911 * return 0. If it is not set, and the module doesn't match 3912 * also return 0. Otherwise, check the function to see if 3913 * that matches. 3914 */ 3915 if (!mod_matches == !exclude_mod) 3916 return 0; 3917 func_match: 3918 /* blank search means to match all funcs in the mod */ 3919 if (!func_g->len) 3920 return 1; 3921 } 3922 3923 return ftrace_match(str, func_g); 3924 } 3925 3926 static int 3927 match_records(struct ftrace_hash *hash, char *func, int len, char *mod) 3928 { 3929 struct ftrace_page *pg; 3930 struct dyn_ftrace *rec; 3931 struct ftrace_glob func_g = { .type = MATCH_FULL }; 3932 struct ftrace_glob mod_g = { .type = MATCH_FULL }; 3933 struct ftrace_glob *mod_match = (mod) ? &mod_g : NULL; 3934 int exclude_mod = 0; 3935 int found = 0; 3936 int ret; 3937 int clear_filter = 0; 3938 3939 if (func) { 3940 func_g.type = filter_parse_regex(func, len, &func_g.search, 3941 &clear_filter); 3942 func_g.len = strlen(func_g.search); 3943 } 3944 3945 if (mod) { 3946 mod_g.type = filter_parse_regex(mod, strlen(mod), 3947 &mod_g.search, &exclude_mod); 3948 mod_g.len = strlen(mod_g.search); 3949 } 3950 3951 mutex_lock(&ftrace_lock); 3952 3953 if (unlikely(ftrace_disabled)) 3954 goto out_unlock; 3955 3956 if (func_g.type == MATCH_INDEX) { 3957 found = add_rec_by_index(hash, &func_g, clear_filter); 3958 goto out_unlock; 3959 } 3960 3961 do_for_each_ftrace_rec(pg, rec) { 3962 3963 if (rec->flags & FTRACE_FL_DISABLED) 3964 continue; 3965 3966 if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) { 3967 ret = enter_record(hash, rec, clear_filter); 3968 if (ret < 0) { 3969 found = ret; 3970 goto out_unlock; 3971 } 3972 found = 1; 3973 } 3974 } while_for_each_ftrace_rec(); 3975 out_unlock: 3976 mutex_unlock(&ftrace_lock); 3977 3978 return found; 3979 } 3980 3981 static int 3982 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len) 3983 { 3984 return match_records(hash, buff, len, NULL); 3985 } 3986 3987 static void ftrace_ops_update_code(struct ftrace_ops *ops, 3988 struct ftrace_ops_hash *old_hash) 3989 { 3990 struct ftrace_ops *op; 3991 3992 if (!ftrace_enabled) 3993 return; 3994 3995 if (ops->flags & FTRACE_OPS_FL_ENABLED) { 3996 ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash); 3997 return; 3998 } 3999 4000 /* 4001 * If this is the shared global_ops filter, then we need to 4002 * check if there is another ops that shares it, is enabled. 4003 * If so, we still need to run the modify code. 4004 */ 4005 if (ops->func_hash != &global_ops.local_hash) 4006 return; 4007 4008 do_for_each_ftrace_op(op, ftrace_ops_list) { 4009 if (op->func_hash == &global_ops.local_hash && 4010 op->flags & FTRACE_OPS_FL_ENABLED) { 4011 ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash); 4012 /* Only need to do this once */ 4013 return; 4014 } 4015 } while_for_each_ftrace_op(op); 4016 } 4017 4018 static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops, 4019 struct ftrace_hash **orig_hash, 4020 struct ftrace_hash *hash, 4021 int enable) 4022 { 4023 struct ftrace_ops_hash old_hash_ops; 4024 struct ftrace_hash *old_hash; 4025 int ret; 4026 4027 old_hash = *orig_hash; 4028 old_hash_ops.filter_hash = ops->func_hash->filter_hash; 4029 old_hash_ops.notrace_hash = ops->func_hash->notrace_hash; 4030 ret = ftrace_hash_move(ops, enable, orig_hash, hash); 4031 if (!ret) { 4032 ftrace_ops_update_code(ops, &old_hash_ops); 4033 free_ftrace_hash_rcu(old_hash); 4034 } 4035 return ret; 4036 } 4037 4038 static bool module_exists(const char *module) 4039 { 4040 /* All modules have the symbol __this_module */ 4041 static const char this_mod[] = "__this_module"; 4042 char modname[MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 2]; 4043 unsigned long val; 4044 int n; 4045 4046 n = snprintf(modname, sizeof(modname), "%s:%s", module, this_mod); 4047 4048 if (n > sizeof(modname) - 1) 4049 return false; 4050 4051 val = module_kallsyms_lookup_name(modname); 4052 return val != 0; 4053 } 4054 4055 static int cache_mod(struct trace_array *tr, 4056 const char *func, char *module, int enable) 4057 { 4058 struct ftrace_mod_load *ftrace_mod, *n; 4059 struct list_head *head = enable ? &tr->mod_trace : &tr->mod_notrace; 4060 int ret; 4061 4062 mutex_lock(&ftrace_lock); 4063 4064 /* We do not cache inverse filters */ 4065 if (func[0] == '!') { 4066 func++; 4067 ret = -EINVAL; 4068 4069 /* Look to remove this hash */ 4070 list_for_each_entry_safe(ftrace_mod, n, head, list) { 4071 if (strcmp(ftrace_mod->module, module) != 0) 4072 continue; 4073 4074 /* no func matches all */ 4075 if (strcmp(func, "*") == 0 || 4076 (ftrace_mod->func && 4077 strcmp(ftrace_mod->func, func) == 0)) { 4078 ret = 0; 4079 free_ftrace_mod(ftrace_mod); 4080 continue; 4081 } 4082 } 4083 goto out; 4084 } 4085 4086 ret = -EINVAL; 4087 /* We only care about modules that have not been loaded yet */ 4088 if (module_exists(module)) 4089 goto out; 4090 4091 /* Save this string off, and execute it when the module is loaded */ 4092 ret = ftrace_add_mod(tr, func, module, enable); 4093 out: 4094 mutex_unlock(&ftrace_lock); 4095 4096 return ret; 4097 } 4098 4099 static int 4100 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, 4101 int reset, int enable); 4102 4103 #ifdef CONFIG_MODULES 4104 static void process_mod_list(struct list_head *head, struct ftrace_ops *ops, 4105 char *mod, bool enable) 4106 { 4107 struct ftrace_mod_load *ftrace_mod, *n; 4108 struct ftrace_hash **orig_hash, *new_hash; 4109 LIST_HEAD(process_mods); 4110 char *func; 4111 int ret; 4112 4113 mutex_lock(&ops->func_hash->regex_lock); 4114 4115 if (enable) 4116 orig_hash = &ops->func_hash->filter_hash; 4117 else 4118 orig_hash = &ops->func_hash->notrace_hash; 4119 4120 new_hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, 4121 *orig_hash); 4122 if (!new_hash) 4123 goto out; /* warn? */ 4124 4125 mutex_lock(&ftrace_lock); 4126 4127 list_for_each_entry_safe(ftrace_mod, n, head, list) { 4128 4129 if (strcmp(ftrace_mod->module, mod) != 0) 4130 continue; 4131 4132 if (ftrace_mod->func) 4133 func = kstrdup(ftrace_mod->func, GFP_KERNEL); 4134 else 4135 func = kstrdup("*", GFP_KERNEL); 4136 4137 if (!func) /* warn? */ 4138 continue; 4139 4140 list_del(&ftrace_mod->list); 4141 list_add(&ftrace_mod->list, &process_mods); 4142 4143 /* Use the newly allocated func, as it may be "*" */ 4144 kfree(ftrace_mod->func); 4145 ftrace_mod->func = func; 4146 } 4147 4148 mutex_unlock(&ftrace_lock); 4149 4150 list_for_each_entry_safe(ftrace_mod, n, &process_mods, list) { 4151 4152 func = ftrace_mod->func; 4153 4154 /* Grabs ftrace_lock, which is why we have this extra step */ 4155 match_records(new_hash, func, strlen(func), mod); 4156 free_ftrace_mod(ftrace_mod); 4157 } 4158 4159 if (enable && list_empty(head)) 4160 new_hash->flags &= ~FTRACE_HASH_FL_MOD; 4161 4162 mutex_lock(&ftrace_lock); 4163 4164 ret = ftrace_hash_move_and_update_ops(ops, orig_hash, 4165 new_hash, enable); 4166 mutex_unlock(&ftrace_lock); 4167 4168 out: 4169 mutex_unlock(&ops->func_hash->regex_lock); 4170 4171 free_ftrace_hash(new_hash); 4172 } 4173 4174 static void process_cached_mods(const char *mod_name) 4175 { 4176 struct trace_array *tr; 4177 char *mod; 4178 4179 mod = kstrdup(mod_name, GFP_KERNEL); 4180 if (!mod) 4181 return; 4182 4183 mutex_lock(&trace_types_lock); 4184 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 4185 if (!list_empty(&tr->mod_trace)) 4186 process_mod_list(&tr->mod_trace, tr->ops, mod, true); 4187 if (!list_empty(&tr->mod_notrace)) 4188 process_mod_list(&tr->mod_notrace, tr->ops, mod, false); 4189 } 4190 mutex_unlock(&trace_types_lock); 4191 4192 kfree(mod); 4193 } 4194 #endif 4195 4196 /* 4197 * We register the module command as a template to show others how 4198 * to register the a command as well. 4199 */ 4200 4201 static int 4202 ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash, 4203 char *func_orig, char *cmd, char *module, int enable) 4204 { 4205 char *func; 4206 int ret; 4207 4208 /* match_records() modifies func, and we need the original */ 4209 func = kstrdup(func_orig, GFP_KERNEL); 4210 if (!func) 4211 return -ENOMEM; 4212 4213 /* 4214 * cmd == 'mod' because we only registered this func 4215 * for the 'mod' ftrace_func_command. 4216 * But if you register one func with multiple commands, 4217 * you can tell which command was used by the cmd 4218 * parameter. 4219 */ 4220 ret = match_records(hash, func, strlen(func), module); 4221 kfree(func); 4222 4223 if (!ret) 4224 return cache_mod(tr, func_orig, module, enable); 4225 if (ret < 0) 4226 return ret; 4227 return 0; 4228 } 4229 4230 static struct ftrace_func_command ftrace_mod_cmd = { 4231 .name = "mod", 4232 .func = ftrace_mod_callback, 4233 }; 4234 4235 static int __init ftrace_mod_cmd_init(void) 4236 { 4237 return register_ftrace_command(&ftrace_mod_cmd); 4238 } 4239 core_initcall(ftrace_mod_cmd_init); 4240 4241 static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip, 4242 struct ftrace_ops *op, struct pt_regs *pt_regs) 4243 { 4244 struct ftrace_probe_ops *probe_ops; 4245 struct ftrace_func_probe *probe; 4246 4247 probe = container_of(op, struct ftrace_func_probe, ops); 4248 probe_ops = probe->probe_ops; 4249 4250 /* 4251 * Disable preemption for these calls to prevent a RCU grace 4252 * period. This syncs the hash iteration and freeing of items 4253 * on the hash. rcu_read_lock is too dangerous here. 4254 */ 4255 preempt_disable_notrace(); 4256 probe_ops->func(ip, parent_ip, probe->tr, probe_ops, probe->data); 4257 preempt_enable_notrace(); 4258 } 4259 4260 struct ftrace_func_map { 4261 struct ftrace_func_entry entry; 4262 void *data; 4263 }; 4264 4265 struct ftrace_func_mapper { 4266 struct ftrace_hash hash; 4267 }; 4268 4269 /** 4270 * allocate_ftrace_func_mapper - allocate a new ftrace_func_mapper 4271 * 4272 * Returns a ftrace_func_mapper descriptor that can be used to map ips to data. 4273 */ 4274 struct ftrace_func_mapper *allocate_ftrace_func_mapper(void) 4275 { 4276 struct ftrace_hash *hash; 4277 4278 /* 4279 * The mapper is simply a ftrace_hash, but since the entries 4280 * in the hash are not ftrace_func_entry type, we define it 4281 * as a separate structure. 4282 */ 4283 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); 4284 return (struct ftrace_func_mapper *)hash; 4285 } 4286 4287 /** 4288 * ftrace_func_mapper_find_ip - Find some data mapped to an ip 4289 * @mapper: The mapper that has the ip maps 4290 * @ip: the instruction pointer to find the data for 4291 * 4292 * Returns the data mapped to @ip if found otherwise NULL. The return 4293 * is actually the address of the mapper data pointer. The address is 4294 * returned for use cases where the data is no bigger than a long, and 4295 * the user can use the data pointer as its data instead of having to 4296 * allocate more memory for the reference. 4297 */ 4298 void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper, 4299 unsigned long ip) 4300 { 4301 struct ftrace_func_entry *entry; 4302 struct ftrace_func_map *map; 4303 4304 entry = ftrace_lookup_ip(&mapper->hash, ip); 4305 if (!entry) 4306 return NULL; 4307 4308 map = (struct ftrace_func_map *)entry; 4309 return &map->data; 4310 } 4311 4312 /** 4313 * ftrace_func_mapper_add_ip - Map some data to an ip 4314 * @mapper: The mapper that has the ip maps 4315 * @ip: The instruction pointer address to map @data to 4316 * @data: The data to map to @ip 4317 * 4318 * Returns 0 on succes otherwise an error. 4319 */ 4320 int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper, 4321 unsigned long ip, void *data) 4322 { 4323 struct ftrace_func_entry *entry; 4324 struct ftrace_func_map *map; 4325 4326 entry = ftrace_lookup_ip(&mapper->hash, ip); 4327 if (entry) 4328 return -EBUSY; 4329 4330 map = kmalloc(sizeof(*map), GFP_KERNEL); 4331 if (!map) 4332 return -ENOMEM; 4333 4334 map->entry.ip = ip; 4335 map->data = data; 4336 4337 __add_hash_entry(&mapper->hash, &map->entry); 4338 4339 return 0; 4340 } 4341 4342 /** 4343 * ftrace_func_mapper_remove_ip - Remove an ip from the mapping 4344 * @mapper: The mapper that has the ip maps 4345 * @ip: The instruction pointer address to remove the data from 4346 * 4347 * Returns the data if it is found, otherwise NULL. 4348 * Note, if the data pointer is used as the data itself, (see 4349 * ftrace_func_mapper_find_ip(), then the return value may be meaningless, 4350 * if the data pointer was set to zero. 4351 */ 4352 void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper, 4353 unsigned long ip) 4354 { 4355 struct ftrace_func_entry *entry; 4356 struct ftrace_func_map *map; 4357 void *data; 4358 4359 entry = ftrace_lookup_ip(&mapper->hash, ip); 4360 if (!entry) 4361 return NULL; 4362 4363 map = (struct ftrace_func_map *)entry; 4364 data = map->data; 4365 4366 remove_hash_entry(&mapper->hash, entry); 4367 kfree(entry); 4368 4369 return data; 4370 } 4371 4372 /** 4373 * free_ftrace_func_mapper - free a mapping of ips and data 4374 * @mapper: The mapper that has the ip maps 4375 * @free_func: A function to be called on each data item. 4376 * 4377 * This is used to free the function mapper. The @free_func is optional 4378 * and can be used if the data needs to be freed as well. 4379 */ 4380 void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper, 4381 ftrace_mapper_func free_func) 4382 { 4383 struct ftrace_func_entry *entry; 4384 struct ftrace_func_map *map; 4385 struct hlist_head *hhd; 4386 int size, i; 4387 4388 if (!mapper) 4389 return; 4390 4391 if (free_func && mapper->hash.count) { 4392 size = 1 << mapper->hash.size_bits; 4393 for (i = 0; i < size; i++) { 4394 hhd = &mapper->hash.buckets[i]; 4395 hlist_for_each_entry(entry, hhd, hlist) { 4396 map = (struct ftrace_func_map *)entry; 4397 free_func(map); 4398 } 4399 } 4400 } 4401 free_ftrace_hash(&mapper->hash); 4402 } 4403 4404 static void release_probe(struct ftrace_func_probe *probe) 4405 { 4406 struct ftrace_probe_ops *probe_ops; 4407 4408 mutex_lock(&ftrace_lock); 4409 4410 WARN_ON(probe->ref <= 0); 4411 4412 /* Subtract the ref that was used to protect this instance */ 4413 probe->ref--; 4414 4415 if (!probe->ref) { 4416 probe_ops = probe->probe_ops; 4417 /* 4418 * Sending zero as ip tells probe_ops to free 4419 * the probe->data itself 4420 */ 4421 if (probe_ops->free) 4422 probe_ops->free(probe_ops, probe->tr, 0, probe->data); 4423 list_del(&probe->list); 4424 kfree(probe); 4425 } 4426 mutex_unlock(&ftrace_lock); 4427 } 4428 4429 static void acquire_probe_locked(struct ftrace_func_probe *probe) 4430 { 4431 /* 4432 * Add one ref to keep it from being freed when releasing the 4433 * ftrace_lock mutex. 4434 */ 4435 probe->ref++; 4436 } 4437 4438 int 4439 register_ftrace_function_probe(char *glob, struct trace_array *tr, 4440 struct ftrace_probe_ops *probe_ops, 4441 void *data) 4442 { 4443 struct ftrace_func_entry *entry; 4444 struct ftrace_func_probe *probe; 4445 struct ftrace_hash **orig_hash; 4446 struct ftrace_hash *old_hash; 4447 struct ftrace_hash *hash; 4448 int count = 0; 4449 int size; 4450 int ret; 4451 int i; 4452 4453 if (WARN_ON(!tr)) 4454 return -EINVAL; 4455 4456 /* We do not support '!' for function probes */ 4457 if (WARN_ON(glob[0] == '!')) 4458 return -EINVAL; 4459 4460 4461 mutex_lock(&ftrace_lock); 4462 /* Check if the probe_ops is already registered */ 4463 list_for_each_entry(probe, &tr->func_probes, list) { 4464 if (probe->probe_ops == probe_ops) 4465 break; 4466 } 4467 if (&probe->list == &tr->func_probes) { 4468 probe = kzalloc(sizeof(*probe), GFP_KERNEL); 4469 if (!probe) { 4470 mutex_unlock(&ftrace_lock); 4471 return -ENOMEM; 4472 } 4473 probe->probe_ops = probe_ops; 4474 probe->ops.func = function_trace_probe_call; 4475 probe->tr = tr; 4476 ftrace_ops_init(&probe->ops); 4477 list_add(&probe->list, &tr->func_probes); 4478 } 4479 4480 acquire_probe_locked(probe); 4481 4482 mutex_unlock(&ftrace_lock); 4483 4484 /* 4485 * Note, there's a small window here that the func_hash->filter_hash 4486 * may be NULL or empty. Need to be carefule when reading the loop. 4487 */ 4488 mutex_lock(&probe->ops.func_hash->regex_lock); 4489 4490 orig_hash = &probe->ops.func_hash->filter_hash; 4491 old_hash = *orig_hash; 4492 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash); 4493 4494 if (!hash) { 4495 ret = -ENOMEM; 4496 goto out; 4497 } 4498 4499 ret = ftrace_match_records(hash, glob, strlen(glob)); 4500 4501 /* Nothing found? */ 4502 if (!ret) 4503 ret = -EINVAL; 4504 4505 if (ret < 0) 4506 goto out; 4507 4508 size = 1 << hash->size_bits; 4509 for (i = 0; i < size; i++) { 4510 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { 4511 if (ftrace_lookup_ip(old_hash, entry->ip)) 4512 continue; 4513 /* 4514 * The caller might want to do something special 4515 * for each function we find. We call the callback 4516 * to give the caller an opportunity to do so. 4517 */ 4518 if (probe_ops->init) { 4519 ret = probe_ops->init(probe_ops, tr, 4520 entry->ip, data, 4521 &probe->data); 4522 if (ret < 0) { 4523 if (probe_ops->free && count) 4524 probe_ops->free(probe_ops, tr, 4525 0, probe->data); 4526 probe->data = NULL; 4527 goto out; 4528 } 4529 } 4530 count++; 4531 } 4532 } 4533 4534 mutex_lock(&ftrace_lock); 4535 4536 if (!count) { 4537 /* Nothing was added? */ 4538 ret = -EINVAL; 4539 goto out_unlock; 4540 } 4541 4542 ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash, 4543 hash, 1); 4544 if (ret < 0) 4545 goto err_unlock; 4546 4547 /* One ref for each new function traced */ 4548 probe->ref += count; 4549 4550 if (!(probe->ops.flags & FTRACE_OPS_FL_ENABLED)) 4551 ret = ftrace_startup(&probe->ops, 0); 4552 4553 out_unlock: 4554 mutex_unlock(&ftrace_lock); 4555 4556 if (!ret) 4557 ret = count; 4558 out: 4559 mutex_unlock(&probe->ops.func_hash->regex_lock); 4560 free_ftrace_hash(hash); 4561 4562 release_probe(probe); 4563 4564 return ret; 4565 4566 err_unlock: 4567 if (!probe_ops->free || !count) 4568 goto out_unlock; 4569 4570 /* Failed to do the move, need to call the free functions */ 4571 for (i = 0; i < size; i++) { 4572 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { 4573 if (ftrace_lookup_ip(old_hash, entry->ip)) 4574 continue; 4575 probe_ops->free(probe_ops, tr, entry->ip, probe->data); 4576 } 4577 } 4578 goto out_unlock; 4579 } 4580 4581 int 4582 unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr, 4583 struct ftrace_probe_ops *probe_ops) 4584 { 4585 struct ftrace_ops_hash old_hash_ops; 4586 struct ftrace_func_entry *entry; 4587 struct ftrace_func_probe *probe; 4588 struct ftrace_glob func_g; 4589 struct ftrace_hash **orig_hash; 4590 struct ftrace_hash *old_hash; 4591 struct ftrace_hash *hash = NULL; 4592 struct hlist_node *tmp; 4593 struct hlist_head hhd; 4594 char str[KSYM_SYMBOL_LEN]; 4595 int count = 0; 4596 int i, ret = -ENODEV; 4597 int size; 4598 4599 if (!glob || !strlen(glob) || !strcmp(glob, "*")) 4600 func_g.search = NULL; 4601 else { 4602 int not; 4603 4604 func_g.type = filter_parse_regex(glob, strlen(glob), 4605 &func_g.search, ¬); 4606 func_g.len = strlen(func_g.search); 4607 4608 /* we do not support '!' for function probes */ 4609 if (WARN_ON(not)) 4610 return -EINVAL; 4611 } 4612 4613 mutex_lock(&ftrace_lock); 4614 /* Check if the probe_ops is already registered */ 4615 list_for_each_entry(probe, &tr->func_probes, list) { 4616 if (probe->probe_ops == probe_ops) 4617 break; 4618 } 4619 if (&probe->list == &tr->func_probes) 4620 goto err_unlock_ftrace; 4621 4622 ret = -EINVAL; 4623 if (!(probe->ops.flags & FTRACE_OPS_FL_INITIALIZED)) 4624 goto err_unlock_ftrace; 4625 4626 acquire_probe_locked(probe); 4627 4628 mutex_unlock(&ftrace_lock); 4629 4630 mutex_lock(&probe->ops.func_hash->regex_lock); 4631 4632 orig_hash = &probe->ops.func_hash->filter_hash; 4633 old_hash = *orig_hash; 4634 4635 if (ftrace_hash_empty(old_hash)) 4636 goto out_unlock; 4637 4638 old_hash_ops.filter_hash = old_hash; 4639 /* Probes only have filters */ 4640 old_hash_ops.notrace_hash = NULL; 4641 4642 ret = -ENOMEM; 4643 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash); 4644 if (!hash) 4645 goto out_unlock; 4646 4647 INIT_HLIST_HEAD(&hhd); 4648 4649 size = 1 << hash->size_bits; 4650 for (i = 0; i < size; i++) { 4651 hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) { 4652 4653 if (func_g.search) { 4654 kallsyms_lookup(entry->ip, NULL, NULL, 4655 NULL, str); 4656 if (!ftrace_match(str, &func_g)) 4657 continue; 4658 } 4659 count++; 4660 remove_hash_entry(hash, entry); 4661 hlist_add_head(&entry->hlist, &hhd); 4662 } 4663 } 4664 4665 /* Nothing found? */ 4666 if (!count) { 4667 ret = -EINVAL; 4668 goto out_unlock; 4669 } 4670 4671 mutex_lock(&ftrace_lock); 4672 4673 WARN_ON(probe->ref < count); 4674 4675 probe->ref -= count; 4676 4677 if (ftrace_hash_empty(hash)) 4678 ftrace_shutdown(&probe->ops, 0); 4679 4680 ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash, 4681 hash, 1); 4682 4683 /* still need to update the function call sites */ 4684 if (ftrace_enabled && !ftrace_hash_empty(hash)) 4685 ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS, 4686 &old_hash_ops); 4687 synchronize_rcu(); 4688 4689 hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) { 4690 hlist_del(&entry->hlist); 4691 if (probe_ops->free) 4692 probe_ops->free(probe_ops, tr, entry->ip, probe->data); 4693 kfree(entry); 4694 } 4695 mutex_unlock(&ftrace_lock); 4696 4697 out_unlock: 4698 mutex_unlock(&probe->ops.func_hash->regex_lock); 4699 free_ftrace_hash(hash); 4700 4701 release_probe(probe); 4702 4703 return ret; 4704 4705 err_unlock_ftrace: 4706 mutex_unlock(&ftrace_lock); 4707 return ret; 4708 } 4709 4710 void clear_ftrace_function_probes(struct trace_array *tr) 4711 { 4712 struct ftrace_func_probe *probe, *n; 4713 4714 list_for_each_entry_safe(probe, n, &tr->func_probes, list) 4715 unregister_ftrace_function_probe_func(NULL, tr, probe->probe_ops); 4716 } 4717 4718 static LIST_HEAD(ftrace_commands); 4719 static DEFINE_MUTEX(ftrace_cmd_mutex); 4720 4721 /* 4722 * Currently we only register ftrace commands from __init, so mark this 4723 * __init too. 4724 */ 4725 __init int register_ftrace_command(struct ftrace_func_command *cmd) 4726 { 4727 struct ftrace_func_command *p; 4728 int ret = 0; 4729 4730 mutex_lock(&ftrace_cmd_mutex); 4731 list_for_each_entry(p, &ftrace_commands, list) { 4732 if (strcmp(cmd->name, p->name) == 0) { 4733 ret = -EBUSY; 4734 goto out_unlock; 4735 } 4736 } 4737 list_add(&cmd->list, &ftrace_commands); 4738 out_unlock: 4739 mutex_unlock(&ftrace_cmd_mutex); 4740 4741 return ret; 4742 } 4743 4744 /* 4745 * Currently we only unregister ftrace commands from __init, so mark 4746 * this __init too. 4747 */ 4748 __init int unregister_ftrace_command(struct ftrace_func_command *cmd) 4749 { 4750 struct ftrace_func_command *p, *n; 4751 int ret = -ENODEV; 4752 4753 mutex_lock(&ftrace_cmd_mutex); 4754 list_for_each_entry_safe(p, n, &ftrace_commands, list) { 4755 if (strcmp(cmd->name, p->name) == 0) { 4756 ret = 0; 4757 list_del_init(&p->list); 4758 goto out_unlock; 4759 } 4760 } 4761 out_unlock: 4762 mutex_unlock(&ftrace_cmd_mutex); 4763 4764 return ret; 4765 } 4766 4767 static int ftrace_process_regex(struct ftrace_iterator *iter, 4768 char *buff, int len, int enable) 4769 { 4770 struct ftrace_hash *hash = iter->hash; 4771 struct trace_array *tr = iter->ops->private; 4772 char *func, *command, *next = buff; 4773 struct ftrace_func_command *p; 4774 int ret = -EINVAL; 4775 4776 func = strsep(&next, ":"); 4777 4778 if (!next) { 4779 ret = ftrace_match_records(hash, func, len); 4780 if (!ret) 4781 ret = -EINVAL; 4782 if (ret < 0) 4783 return ret; 4784 return 0; 4785 } 4786 4787 /* command found */ 4788 4789 command = strsep(&next, ":"); 4790 4791 mutex_lock(&ftrace_cmd_mutex); 4792 list_for_each_entry(p, &ftrace_commands, list) { 4793 if (strcmp(p->name, command) == 0) { 4794 ret = p->func(tr, hash, func, command, next, enable); 4795 goto out_unlock; 4796 } 4797 } 4798 out_unlock: 4799 mutex_unlock(&ftrace_cmd_mutex); 4800 4801 return ret; 4802 } 4803 4804 static ssize_t 4805 ftrace_regex_write(struct file *file, const char __user *ubuf, 4806 size_t cnt, loff_t *ppos, int enable) 4807 { 4808 struct ftrace_iterator *iter; 4809 struct trace_parser *parser; 4810 ssize_t ret, read; 4811 4812 if (!cnt) 4813 return 0; 4814 4815 if (file->f_mode & FMODE_READ) { 4816 struct seq_file *m = file->private_data; 4817 iter = m->private; 4818 } else 4819 iter = file->private_data; 4820 4821 if (unlikely(ftrace_disabled)) 4822 return -ENODEV; 4823 4824 /* iter->hash is a local copy, so we don't need regex_lock */ 4825 4826 parser = &iter->parser; 4827 read = trace_get_user(parser, ubuf, cnt, ppos); 4828 4829 if (read >= 0 && trace_parser_loaded(parser) && 4830 !trace_parser_cont(parser)) { 4831 ret = ftrace_process_regex(iter, parser->buffer, 4832 parser->idx, enable); 4833 trace_parser_clear(parser); 4834 if (ret < 0) 4835 goto out; 4836 } 4837 4838 ret = read; 4839 out: 4840 return ret; 4841 } 4842 4843 ssize_t 4844 ftrace_filter_write(struct file *file, const char __user *ubuf, 4845 size_t cnt, loff_t *ppos) 4846 { 4847 return ftrace_regex_write(file, ubuf, cnt, ppos, 1); 4848 } 4849 4850 ssize_t 4851 ftrace_notrace_write(struct file *file, const char __user *ubuf, 4852 size_t cnt, loff_t *ppos) 4853 { 4854 return ftrace_regex_write(file, ubuf, cnt, ppos, 0); 4855 } 4856 4857 static int 4858 ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove) 4859 { 4860 struct ftrace_func_entry *entry; 4861 4862 if (!ftrace_location(ip)) 4863 return -EINVAL; 4864 4865 if (remove) { 4866 entry = ftrace_lookup_ip(hash, ip); 4867 if (!entry) 4868 return -ENOENT; 4869 free_hash_entry(hash, entry); 4870 return 0; 4871 } 4872 4873 return add_hash_entry(hash, ip); 4874 } 4875 4876 static int 4877 ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, 4878 unsigned long ip, int remove, int reset, int enable) 4879 { 4880 struct ftrace_hash **orig_hash; 4881 struct ftrace_hash *hash; 4882 int ret; 4883 4884 if (unlikely(ftrace_disabled)) 4885 return -ENODEV; 4886 4887 mutex_lock(&ops->func_hash->regex_lock); 4888 4889 if (enable) 4890 orig_hash = &ops->func_hash->filter_hash; 4891 else 4892 orig_hash = &ops->func_hash->notrace_hash; 4893 4894 if (reset) 4895 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); 4896 else 4897 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); 4898 4899 if (!hash) { 4900 ret = -ENOMEM; 4901 goto out_regex_unlock; 4902 } 4903 4904 if (buf && !ftrace_match_records(hash, buf, len)) { 4905 ret = -EINVAL; 4906 goto out_regex_unlock; 4907 } 4908 if (ip) { 4909 ret = ftrace_match_addr(hash, ip, remove); 4910 if (ret < 0) 4911 goto out_regex_unlock; 4912 } 4913 4914 mutex_lock(&ftrace_lock); 4915 ret = ftrace_hash_move_and_update_ops(ops, orig_hash, hash, enable); 4916 mutex_unlock(&ftrace_lock); 4917 4918 out_regex_unlock: 4919 mutex_unlock(&ops->func_hash->regex_lock); 4920 4921 free_ftrace_hash(hash); 4922 return ret; 4923 } 4924 4925 static int 4926 ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove, 4927 int reset, int enable) 4928 { 4929 return ftrace_set_hash(ops, NULL, 0, ip, remove, reset, enable); 4930 } 4931 4932 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 4933 4934 struct ftrace_direct_func { 4935 struct list_head next; 4936 unsigned long addr; 4937 int count; 4938 }; 4939 4940 static LIST_HEAD(ftrace_direct_funcs); 4941 4942 /** 4943 * ftrace_find_direct_func - test an address if it is a registered direct caller 4944 * @addr: The address of a registered direct caller 4945 * 4946 * This searches to see if a ftrace direct caller has been registered 4947 * at a specific address, and if so, it returns a descriptor for it. 4948 * 4949 * This can be used by architecture code to see if an address is 4950 * a direct caller (trampoline) attached to a fentry/mcount location. 4951 * This is useful for the function_graph tracer, as it may need to 4952 * do adjustments if it traced a location that also has a direct 4953 * trampoline attached to it. 4954 */ 4955 struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr) 4956 { 4957 struct ftrace_direct_func *entry; 4958 bool found = false; 4959 4960 /* May be called by fgraph trampoline (protected by rcu tasks) */ 4961 list_for_each_entry_rcu(entry, &ftrace_direct_funcs, next) { 4962 if (entry->addr == addr) { 4963 found = true; 4964 break; 4965 } 4966 } 4967 if (found) 4968 return entry; 4969 4970 return NULL; 4971 } 4972 4973 /** 4974 * register_ftrace_direct - Call a custom trampoline directly 4975 * @ip: The address of the nop at the beginning of a function 4976 * @addr: The address of the trampoline to call at @ip 4977 * 4978 * This is used to connect a direct call from the nop location (@ip) 4979 * at the start of ftrace traced functions. The location that it calls 4980 * (@addr) must be able to handle a direct call, and save the parameters 4981 * of the function being traced, and restore them (or inject new ones 4982 * if needed), before returning. 4983 * 4984 * Returns: 4985 * 0 on success 4986 * -EBUSY - Another direct function is already attached (there can be only one) 4987 * -ENODEV - @ip does not point to a ftrace nop location (or not supported) 4988 * -ENOMEM - There was an allocation failure. 4989 */ 4990 int register_ftrace_direct(unsigned long ip, unsigned long addr) 4991 { 4992 struct ftrace_direct_func *direct; 4993 struct ftrace_func_entry *entry; 4994 struct ftrace_hash *free_hash = NULL; 4995 struct dyn_ftrace *rec; 4996 int ret = -EBUSY; 4997 4998 mutex_lock(&direct_mutex); 4999 5000 /* See if there's a direct function at @ip already */ 5001 if (ftrace_find_rec_direct(ip)) 5002 goto out_unlock; 5003 5004 ret = -ENODEV; 5005 rec = lookup_rec(ip, ip); 5006 if (!rec) 5007 goto out_unlock; 5008 5009 /* 5010 * Check if the rec says it has a direct call but we didn't 5011 * find one earlier? 5012 */ 5013 if (WARN_ON(rec->flags & FTRACE_FL_DIRECT)) 5014 goto out_unlock; 5015 5016 /* Make sure the ip points to the exact record */ 5017 if (ip != rec->ip) { 5018 ip = rec->ip; 5019 /* Need to check this ip for a direct. */ 5020 if (ftrace_find_rec_direct(ip)) 5021 goto out_unlock; 5022 } 5023 5024 ret = -ENOMEM; 5025 if (ftrace_hash_empty(direct_functions) || 5026 direct_functions->count > 2 * (1 << direct_functions->size_bits)) { 5027 struct ftrace_hash *new_hash; 5028 int size = ftrace_hash_empty(direct_functions) ? 0 : 5029 direct_functions->count + 1; 5030 5031 if (size < 32) 5032 size = 32; 5033 5034 new_hash = dup_hash(direct_functions, size); 5035 if (!new_hash) 5036 goto out_unlock; 5037 5038 free_hash = direct_functions; 5039 direct_functions = new_hash; 5040 } 5041 5042 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 5043 if (!entry) 5044 goto out_unlock; 5045 5046 direct = ftrace_find_direct_func(addr); 5047 if (!direct) { 5048 direct = kmalloc(sizeof(*direct), GFP_KERNEL); 5049 if (!direct) { 5050 kfree(entry); 5051 goto out_unlock; 5052 } 5053 direct->addr = addr; 5054 direct->count = 0; 5055 list_add_rcu(&direct->next, &ftrace_direct_funcs); 5056 ftrace_direct_func_count++; 5057 } 5058 5059 entry->ip = ip; 5060 entry->direct = addr; 5061 __add_hash_entry(direct_functions, entry); 5062 5063 ret = ftrace_set_filter_ip(&direct_ops, ip, 0, 0); 5064 if (ret) 5065 remove_hash_entry(direct_functions, entry); 5066 5067 if (!ret && !(direct_ops.flags & FTRACE_OPS_FL_ENABLED)) { 5068 ret = register_ftrace_function(&direct_ops); 5069 if (ret) 5070 ftrace_set_filter_ip(&direct_ops, ip, 1, 0); 5071 } 5072 5073 if (ret) { 5074 kfree(entry); 5075 if (!direct->count) { 5076 list_del_rcu(&direct->next); 5077 synchronize_rcu_tasks(); 5078 kfree(direct); 5079 if (free_hash) 5080 free_ftrace_hash(free_hash); 5081 free_hash = NULL; 5082 ftrace_direct_func_count--; 5083 } 5084 } else { 5085 direct->count++; 5086 } 5087 out_unlock: 5088 mutex_unlock(&direct_mutex); 5089 5090 if (free_hash) { 5091 synchronize_rcu_tasks(); 5092 free_ftrace_hash(free_hash); 5093 } 5094 5095 return ret; 5096 } 5097 EXPORT_SYMBOL_GPL(register_ftrace_direct); 5098 5099 static struct ftrace_func_entry *find_direct_entry(unsigned long *ip, 5100 struct dyn_ftrace **recp) 5101 { 5102 struct ftrace_func_entry *entry; 5103 struct dyn_ftrace *rec; 5104 5105 rec = lookup_rec(*ip, *ip); 5106 if (!rec) 5107 return NULL; 5108 5109 entry = __ftrace_lookup_ip(direct_functions, rec->ip); 5110 if (!entry) { 5111 WARN_ON(rec->flags & FTRACE_FL_DIRECT); 5112 return NULL; 5113 } 5114 5115 WARN_ON(!(rec->flags & FTRACE_FL_DIRECT)); 5116 5117 /* Passed in ip just needs to be on the call site */ 5118 *ip = rec->ip; 5119 5120 if (recp) 5121 *recp = rec; 5122 5123 return entry; 5124 } 5125 5126 int unregister_ftrace_direct(unsigned long ip, unsigned long addr) 5127 { 5128 struct ftrace_direct_func *direct; 5129 struct ftrace_func_entry *entry; 5130 int ret = -ENODEV; 5131 5132 mutex_lock(&direct_mutex); 5133 5134 entry = find_direct_entry(&ip, NULL); 5135 if (!entry) 5136 goto out_unlock; 5137 5138 if (direct_functions->count == 1) 5139 unregister_ftrace_function(&direct_ops); 5140 5141 ret = ftrace_set_filter_ip(&direct_ops, ip, 1, 0); 5142 5143 WARN_ON(ret); 5144 5145 remove_hash_entry(direct_functions, entry); 5146 5147 direct = ftrace_find_direct_func(addr); 5148 if (!WARN_ON(!direct)) { 5149 /* This is the good path (see the ! before WARN) */ 5150 direct->count--; 5151 WARN_ON(direct->count < 0); 5152 if (!direct->count) { 5153 list_del_rcu(&direct->next); 5154 synchronize_rcu_tasks(); 5155 kfree(direct); 5156 ftrace_direct_func_count--; 5157 } 5158 } 5159 out_unlock: 5160 mutex_unlock(&direct_mutex); 5161 5162 return ret; 5163 } 5164 EXPORT_SYMBOL_GPL(unregister_ftrace_direct); 5165 5166 static struct ftrace_ops stub_ops = { 5167 .func = ftrace_stub, 5168 }; 5169 5170 /** 5171 * ftrace_modify_direct_caller - modify ftrace nop directly 5172 * @entry: The ftrace hash entry of the direct helper for @rec 5173 * @rec: The record representing the function site to patch 5174 * @old_addr: The location that the site at @rec->ip currently calls 5175 * @new_addr: The location that the site at @rec->ip should call 5176 * 5177 * An architecture may overwrite this function to optimize the 5178 * changing of the direct callback on an ftrace nop location. 5179 * This is called with the ftrace_lock mutex held, and no other 5180 * ftrace callbacks are on the associated record (@rec). Thus, 5181 * it is safe to modify the ftrace record, where it should be 5182 * currently calling @old_addr directly, to call @new_addr. 5183 * 5184 * Safety checks should be made to make sure that the code at 5185 * @rec->ip is currently calling @old_addr. And this must 5186 * also update entry->direct to @new_addr. 5187 */ 5188 int __weak ftrace_modify_direct_caller(struct ftrace_func_entry *entry, 5189 struct dyn_ftrace *rec, 5190 unsigned long old_addr, 5191 unsigned long new_addr) 5192 { 5193 unsigned long ip = rec->ip; 5194 int ret; 5195 5196 /* 5197 * The ftrace_lock was used to determine if the record 5198 * had more than one registered user to it. If it did, 5199 * we needed to prevent that from changing to do the quick 5200 * switch. But if it did not (only a direct caller was attached) 5201 * then this function is called. But this function can deal 5202 * with attached callers to the rec that we care about, and 5203 * since this function uses standard ftrace calls that take 5204 * the ftrace_lock mutex, we need to release it. 5205 */ 5206 mutex_unlock(&ftrace_lock); 5207 5208 /* 5209 * By setting a stub function at the same address, we force 5210 * the code to call the iterator and the direct_ops helper. 5211 * This means that @ip does not call the direct call, and 5212 * we can simply modify it. 5213 */ 5214 ret = ftrace_set_filter_ip(&stub_ops, ip, 0, 0); 5215 if (ret) 5216 goto out_lock; 5217 5218 ret = register_ftrace_function(&stub_ops); 5219 if (ret) { 5220 ftrace_set_filter_ip(&stub_ops, ip, 1, 0); 5221 goto out_lock; 5222 } 5223 5224 entry->direct = new_addr; 5225 5226 /* 5227 * By removing the stub, we put back the direct call, calling 5228 * the @new_addr. 5229 */ 5230 unregister_ftrace_function(&stub_ops); 5231 ftrace_set_filter_ip(&stub_ops, ip, 1, 0); 5232 5233 out_lock: 5234 mutex_lock(&ftrace_lock); 5235 5236 return ret; 5237 } 5238 5239 /** 5240 * modify_ftrace_direct - Modify an existing direct call to call something else 5241 * @ip: The instruction pointer to modify 5242 * @old_addr: The address that the current @ip calls directly 5243 * @new_addr: The address that the @ip should call 5244 * 5245 * This modifies a ftrace direct caller at an instruction pointer without 5246 * having to disable it first. The direct call will switch over to the 5247 * @new_addr without missing anything. 5248 * 5249 * Returns: zero on success. Non zero on error, which includes: 5250 * -ENODEV : the @ip given has no direct caller attached 5251 * -EINVAL : the @old_addr does not match the current direct caller 5252 */ 5253 int modify_ftrace_direct(unsigned long ip, 5254 unsigned long old_addr, unsigned long new_addr) 5255 { 5256 struct ftrace_func_entry *entry; 5257 struct dyn_ftrace *rec; 5258 int ret = -ENODEV; 5259 5260 mutex_lock(&direct_mutex); 5261 5262 mutex_lock(&ftrace_lock); 5263 entry = find_direct_entry(&ip, &rec); 5264 if (!entry) 5265 goto out_unlock; 5266 5267 ret = -EINVAL; 5268 if (entry->direct != old_addr) 5269 goto out_unlock; 5270 5271 /* 5272 * If there's no other ftrace callback on the rec->ip location, 5273 * then it can be changed directly by the architecture. 5274 * If there is another caller, then we just need to change the 5275 * direct caller helper to point to @new_addr. 5276 */ 5277 if (ftrace_rec_count(rec) == 1) { 5278 ret = ftrace_modify_direct_caller(entry, rec, old_addr, new_addr); 5279 } else { 5280 entry->direct = new_addr; 5281 ret = 0; 5282 } 5283 5284 out_unlock: 5285 mutex_unlock(&ftrace_lock); 5286 mutex_unlock(&direct_mutex); 5287 return ret; 5288 } 5289 EXPORT_SYMBOL_GPL(modify_ftrace_direct); 5290 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ 5291 5292 /** 5293 * ftrace_set_filter_ip - set a function to filter on in ftrace by address 5294 * @ops - the ops to set the filter with 5295 * @ip - the address to add to or remove from the filter. 5296 * @remove - non zero to remove the ip from the filter 5297 * @reset - non zero to reset all filters before applying this filter. 5298 * 5299 * Filters denote which functions should be enabled when tracing is enabled 5300 * If @ip is NULL, it failes to update filter. 5301 */ 5302 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, 5303 int remove, int reset) 5304 { 5305 ftrace_ops_init(ops); 5306 return ftrace_set_addr(ops, ip, remove, reset, 1); 5307 } 5308 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip); 5309 5310 /** 5311 * ftrace_ops_set_global_filter - setup ops to use global filters 5312 * @ops - the ops which will use the global filters 5313 * 5314 * ftrace users who need global function trace filtering should call this. 5315 * It can set the global filter only if ops were not initialized before. 5316 */ 5317 void ftrace_ops_set_global_filter(struct ftrace_ops *ops) 5318 { 5319 if (ops->flags & FTRACE_OPS_FL_INITIALIZED) 5320 return; 5321 5322 ftrace_ops_init(ops); 5323 ops->func_hash = &global_ops.local_hash; 5324 } 5325 EXPORT_SYMBOL_GPL(ftrace_ops_set_global_filter); 5326 5327 static int 5328 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, 5329 int reset, int enable) 5330 { 5331 return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable); 5332 } 5333 5334 /** 5335 * ftrace_set_filter - set a function to filter on in ftrace 5336 * @ops - the ops to set the filter with 5337 * @buf - the string that holds the function filter text. 5338 * @len - the length of the string. 5339 * @reset - non zero to reset all filters before applying this filter. 5340 * 5341 * Filters denote which functions should be enabled when tracing is enabled. 5342 * If @buf is NULL and reset is set, all functions will be enabled for tracing. 5343 */ 5344 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, 5345 int len, int reset) 5346 { 5347 ftrace_ops_init(ops); 5348 return ftrace_set_regex(ops, buf, len, reset, 1); 5349 } 5350 EXPORT_SYMBOL_GPL(ftrace_set_filter); 5351 5352 /** 5353 * ftrace_set_notrace - set a function to not trace in ftrace 5354 * @ops - the ops to set the notrace filter with 5355 * @buf - the string that holds the function notrace text. 5356 * @len - the length of the string. 5357 * @reset - non zero to reset all filters before applying this filter. 5358 * 5359 * Notrace Filters denote which functions should not be enabled when tracing 5360 * is enabled. If @buf is NULL and reset is set, all functions will be enabled 5361 * for tracing. 5362 */ 5363 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, 5364 int len, int reset) 5365 { 5366 ftrace_ops_init(ops); 5367 return ftrace_set_regex(ops, buf, len, reset, 0); 5368 } 5369 EXPORT_SYMBOL_GPL(ftrace_set_notrace); 5370 /** 5371 * ftrace_set_global_filter - set a function to filter on with global tracers 5372 * @buf - the string that holds the function filter text. 5373 * @len - the length of the string. 5374 * @reset - non zero to reset all filters before applying this filter. 5375 * 5376 * Filters denote which functions should be enabled when tracing is enabled. 5377 * If @buf is NULL and reset is set, all functions will be enabled for tracing. 5378 */ 5379 void ftrace_set_global_filter(unsigned char *buf, int len, int reset) 5380 { 5381 ftrace_set_regex(&global_ops, buf, len, reset, 1); 5382 } 5383 EXPORT_SYMBOL_GPL(ftrace_set_global_filter); 5384 5385 /** 5386 * ftrace_set_global_notrace - set a function to not trace with global tracers 5387 * @buf - the string that holds the function notrace text. 5388 * @len - the length of the string. 5389 * @reset - non zero to reset all filters before applying this filter. 5390 * 5391 * Notrace Filters denote which functions should not be enabled when tracing 5392 * is enabled. If @buf is NULL and reset is set, all functions will be enabled 5393 * for tracing. 5394 */ 5395 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset) 5396 { 5397 ftrace_set_regex(&global_ops, buf, len, reset, 0); 5398 } 5399 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace); 5400 5401 /* 5402 * command line interface to allow users to set filters on boot up. 5403 */ 5404 #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE 5405 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata; 5406 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata; 5407 5408 /* Used by function selftest to not test if filter is set */ 5409 bool ftrace_filter_param __initdata; 5410 5411 static int __init set_ftrace_notrace(char *str) 5412 { 5413 ftrace_filter_param = true; 5414 strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE); 5415 return 1; 5416 } 5417 __setup("ftrace_notrace=", set_ftrace_notrace); 5418 5419 static int __init set_ftrace_filter(char *str) 5420 { 5421 ftrace_filter_param = true; 5422 strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE); 5423 return 1; 5424 } 5425 __setup("ftrace_filter=", set_ftrace_filter); 5426 5427 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 5428 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata; 5429 static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata; 5430 static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer); 5431 5432 static int __init set_graph_function(char *str) 5433 { 5434 strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE); 5435 return 1; 5436 } 5437 __setup("ftrace_graph_filter=", set_graph_function); 5438 5439 static int __init set_graph_notrace_function(char *str) 5440 { 5441 strlcpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE); 5442 return 1; 5443 } 5444 __setup("ftrace_graph_notrace=", set_graph_notrace_function); 5445 5446 static int __init set_graph_max_depth_function(char *str) 5447 { 5448 if (!str) 5449 return 0; 5450 fgraph_max_depth = simple_strtoul(str, NULL, 0); 5451 return 1; 5452 } 5453 __setup("ftrace_graph_max_depth=", set_graph_max_depth_function); 5454 5455 static void __init set_ftrace_early_graph(char *buf, int enable) 5456 { 5457 int ret; 5458 char *func; 5459 struct ftrace_hash *hash; 5460 5461 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); 5462 if (MEM_FAIL(!hash, "Failed to allocate hash\n")) 5463 return; 5464 5465 while (buf) { 5466 func = strsep(&buf, ","); 5467 /* we allow only one expression at a time */ 5468 ret = ftrace_graph_set_hash(hash, func); 5469 if (ret) 5470 printk(KERN_DEBUG "ftrace: function %s not " 5471 "traceable\n", func); 5472 } 5473 5474 if (enable) 5475 ftrace_graph_hash = hash; 5476 else 5477 ftrace_graph_notrace_hash = hash; 5478 } 5479 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 5480 5481 void __init 5482 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable) 5483 { 5484 char *func; 5485 5486 ftrace_ops_init(ops); 5487 5488 while (buf) { 5489 func = strsep(&buf, ","); 5490 ftrace_set_regex(ops, func, strlen(func), 0, enable); 5491 } 5492 } 5493 5494 static void __init set_ftrace_early_filters(void) 5495 { 5496 if (ftrace_filter_buf[0]) 5497 ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1); 5498 if (ftrace_notrace_buf[0]) 5499 ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0); 5500 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 5501 if (ftrace_graph_buf[0]) 5502 set_ftrace_early_graph(ftrace_graph_buf, 1); 5503 if (ftrace_graph_notrace_buf[0]) 5504 set_ftrace_early_graph(ftrace_graph_notrace_buf, 0); 5505 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 5506 } 5507 5508 int ftrace_regex_release(struct inode *inode, struct file *file) 5509 { 5510 struct seq_file *m = (struct seq_file *)file->private_data; 5511 struct ftrace_iterator *iter; 5512 struct ftrace_hash **orig_hash; 5513 struct trace_parser *parser; 5514 int filter_hash; 5515 int ret; 5516 5517 if (file->f_mode & FMODE_READ) { 5518 iter = m->private; 5519 seq_release(inode, file); 5520 } else 5521 iter = file->private_data; 5522 5523 parser = &iter->parser; 5524 if (trace_parser_loaded(parser)) { 5525 ftrace_match_records(iter->hash, parser->buffer, parser->idx); 5526 } 5527 5528 trace_parser_put(parser); 5529 5530 mutex_lock(&iter->ops->func_hash->regex_lock); 5531 5532 if (file->f_mode & FMODE_WRITE) { 5533 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER); 5534 5535 if (filter_hash) { 5536 orig_hash = &iter->ops->func_hash->filter_hash; 5537 if (iter->tr && !list_empty(&iter->tr->mod_trace)) 5538 iter->hash->flags |= FTRACE_HASH_FL_MOD; 5539 } else 5540 orig_hash = &iter->ops->func_hash->notrace_hash; 5541 5542 mutex_lock(&ftrace_lock); 5543 ret = ftrace_hash_move_and_update_ops(iter->ops, orig_hash, 5544 iter->hash, filter_hash); 5545 mutex_unlock(&ftrace_lock); 5546 } else { 5547 /* For read only, the hash is the ops hash */ 5548 iter->hash = NULL; 5549 } 5550 5551 mutex_unlock(&iter->ops->func_hash->regex_lock); 5552 free_ftrace_hash(iter->hash); 5553 if (iter->tr) 5554 trace_array_put(iter->tr); 5555 kfree(iter); 5556 5557 return 0; 5558 } 5559 5560 static const struct file_operations ftrace_avail_fops = { 5561 .open = ftrace_avail_open, 5562 .read = seq_read, 5563 .llseek = seq_lseek, 5564 .release = seq_release_private, 5565 }; 5566 5567 static const struct file_operations ftrace_enabled_fops = { 5568 .open = ftrace_enabled_open, 5569 .read = seq_read, 5570 .llseek = seq_lseek, 5571 .release = seq_release_private, 5572 }; 5573 5574 static const struct file_operations ftrace_filter_fops = { 5575 .open = ftrace_filter_open, 5576 .read = seq_read, 5577 .write = ftrace_filter_write, 5578 .llseek = tracing_lseek, 5579 .release = ftrace_regex_release, 5580 }; 5581 5582 static const struct file_operations ftrace_notrace_fops = { 5583 .open = ftrace_notrace_open, 5584 .read = seq_read, 5585 .write = ftrace_notrace_write, 5586 .llseek = tracing_lseek, 5587 .release = ftrace_regex_release, 5588 }; 5589 5590 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 5591 5592 static DEFINE_MUTEX(graph_lock); 5593 5594 struct ftrace_hash __rcu *ftrace_graph_hash = EMPTY_HASH; 5595 struct ftrace_hash __rcu *ftrace_graph_notrace_hash = EMPTY_HASH; 5596 5597 enum graph_filter_type { 5598 GRAPH_FILTER_NOTRACE = 0, 5599 GRAPH_FILTER_FUNCTION, 5600 }; 5601 5602 #define FTRACE_GRAPH_EMPTY ((void *)1) 5603 5604 struct ftrace_graph_data { 5605 struct ftrace_hash *hash; 5606 struct ftrace_func_entry *entry; 5607 int idx; /* for hash table iteration */ 5608 enum graph_filter_type type; 5609 struct ftrace_hash *new_hash; 5610 const struct seq_operations *seq_ops; 5611 struct trace_parser parser; 5612 }; 5613 5614 static void * 5615 __g_next(struct seq_file *m, loff_t *pos) 5616 { 5617 struct ftrace_graph_data *fgd = m->private; 5618 struct ftrace_func_entry *entry = fgd->entry; 5619 struct hlist_head *head; 5620 int i, idx = fgd->idx; 5621 5622 if (*pos >= fgd->hash->count) 5623 return NULL; 5624 5625 if (entry) { 5626 hlist_for_each_entry_continue(entry, hlist) { 5627 fgd->entry = entry; 5628 return entry; 5629 } 5630 5631 idx++; 5632 } 5633 5634 for (i = idx; i < 1 << fgd->hash->size_bits; i++) { 5635 head = &fgd->hash->buckets[i]; 5636 hlist_for_each_entry(entry, head, hlist) { 5637 fgd->entry = entry; 5638 fgd->idx = i; 5639 return entry; 5640 } 5641 } 5642 return NULL; 5643 } 5644 5645 static void * 5646 g_next(struct seq_file *m, void *v, loff_t *pos) 5647 { 5648 (*pos)++; 5649 return __g_next(m, pos); 5650 } 5651 5652 static void *g_start(struct seq_file *m, loff_t *pos) 5653 { 5654 struct ftrace_graph_data *fgd = m->private; 5655 5656 mutex_lock(&graph_lock); 5657 5658 if (fgd->type == GRAPH_FILTER_FUNCTION) 5659 fgd->hash = rcu_dereference_protected(ftrace_graph_hash, 5660 lockdep_is_held(&graph_lock)); 5661 else 5662 fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash, 5663 lockdep_is_held(&graph_lock)); 5664 5665 /* Nothing, tell g_show to print all functions are enabled */ 5666 if (ftrace_hash_empty(fgd->hash) && !*pos) 5667 return FTRACE_GRAPH_EMPTY; 5668 5669 fgd->idx = 0; 5670 fgd->entry = NULL; 5671 return __g_next(m, pos); 5672 } 5673 5674 static void g_stop(struct seq_file *m, void *p) 5675 { 5676 mutex_unlock(&graph_lock); 5677 } 5678 5679 static int g_show(struct seq_file *m, void *v) 5680 { 5681 struct ftrace_func_entry *entry = v; 5682 5683 if (!entry) 5684 return 0; 5685 5686 if (entry == FTRACE_GRAPH_EMPTY) { 5687 struct ftrace_graph_data *fgd = m->private; 5688 5689 if (fgd->type == GRAPH_FILTER_FUNCTION) 5690 seq_puts(m, "#### all functions enabled ####\n"); 5691 else 5692 seq_puts(m, "#### no functions disabled ####\n"); 5693 return 0; 5694 } 5695 5696 seq_printf(m, "%ps\n", (void *)entry->ip); 5697 5698 return 0; 5699 } 5700 5701 static const struct seq_operations ftrace_graph_seq_ops = { 5702 .start = g_start, 5703 .next = g_next, 5704 .stop = g_stop, 5705 .show = g_show, 5706 }; 5707 5708 static int 5709 __ftrace_graph_open(struct inode *inode, struct file *file, 5710 struct ftrace_graph_data *fgd) 5711 { 5712 int ret; 5713 struct ftrace_hash *new_hash = NULL; 5714 5715 ret = security_locked_down(LOCKDOWN_TRACEFS); 5716 if (ret) 5717 return ret; 5718 5719 if (file->f_mode & FMODE_WRITE) { 5720 const int size_bits = FTRACE_HASH_DEFAULT_BITS; 5721 5722 if (trace_parser_get_init(&fgd->parser, FTRACE_BUFF_MAX)) 5723 return -ENOMEM; 5724 5725 if (file->f_flags & O_TRUNC) 5726 new_hash = alloc_ftrace_hash(size_bits); 5727 else 5728 new_hash = alloc_and_copy_ftrace_hash(size_bits, 5729 fgd->hash); 5730 if (!new_hash) { 5731 ret = -ENOMEM; 5732 goto out; 5733 } 5734 } 5735 5736 if (file->f_mode & FMODE_READ) { 5737 ret = seq_open(file, &ftrace_graph_seq_ops); 5738 if (!ret) { 5739 struct seq_file *m = file->private_data; 5740 m->private = fgd; 5741 } else { 5742 /* Failed */ 5743 free_ftrace_hash(new_hash); 5744 new_hash = NULL; 5745 } 5746 } else 5747 file->private_data = fgd; 5748 5749 out: 5750 if (ret < 0 && file->f_mode & FMODE_WRITE) 5751 trace_parser_put(&fgd->parser); 5752 5753 fgd->new_hash = new_hash; 5754 5755 /* 5756 * All uses of fgd->hash must be taken with the graph_lock 5757 * held. The graph_lock is going to be released, so force 5758 * fgd->hash to be reinitialized when it is taken again. 5759 */ 5760 fgd->hash = NULL; 5761 5762 return ret; 5763 } 5764 5765 static int 5766 ftrace_graph_open(struct inode *inode, struct file *file) 5767 { 5768 struct ftrace_graph_data *fgd; 5769 int ret; 5770 5771 if (unlikely(ftrace_disabled)) 5772 return -ENODEV; 5773 5774 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL); 5775 if (fgd == NULL) 5776 return -ENOMEM; 5777 5778 mutex_lock(&graph_lock); 5779 5780 fgd->hash = rcu_dereference_protected(ftrace_graph_hash, 5781 lockdep_is_held(&graph_lock)); 5782 fgd->type = GRAPH_FILTER_FUNCTION; 5783 fgd->seq_ops = &ftrace_graph_seq_ops; 5784 5785 ret = __ftrace_graph_open(inode, file, fgd); 5786 if (ret < 0) 5787 kfree(fgd); 5788 5789 mutex_unlock(&graph_lock); 5790 return ret; 5791 } 5792 5793 static int 5794 ftrace_graph_notrace_open(struct inode *inode, struct file *file) 5795 { 5796 struct ftrace_graph_data *fgd; 5797 int ret; 5798 5799 if (unlikely(ftrace_disabled)) 5800 return -ENODEV; 5801 5802 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL); 5803 if (fgd == NULL) 5804 return -ENOMEM; 5805 5806 mutex_lock(&graph_lock); 5807 5808 fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash, 5809 lockdep_is_held(&graph_lock)); 5810 fgd->type = GRAPH_FILTER_NOTRACE; 5811 fgd->seq_ops = &ftrace_graph_seq_ops; 5812 5813 ret = __ftrace_graph_open(inode, file, fgd); 5814 if (ret < 0) 5815 kfree(fgd); 5816 5817 mutex_unlock(&graph_lock); 5818 return ret; 5819 } 5820 5821 static int 5822 ftrace_graph_release(struct inode *inode, struct file *file) 5823 { 5824 struct ftrace_graph_data *fgd; 5825 struct ftrace_hash *old_hash, *new_hash; 5826 struct trace_parser *parser; 5827 int ret = 0; 5828 5829 if (file->f_mode & FMODE_READ) { 5830 struct seq_file *m = file->private_data; 5831 5832 fgd = m->private; 5833 seq_release(inode, file); 5834 } else { 5835 fgd = file->private_data; 5836 } 5837 5838 5839 if (file->f_mode & FMODE_WRITE) { 5840 5841 parser = &fgd->parser; 5842 5843 if (trace_parser_loaded((parser))) { 5844 ret = ftrace_graph_set_hash(fgd->new_hash, 5845 parser->buffer); 5846 } 5847 5848 trace_parser_put(parser); 5849 5850 new_hash = __ftrace_hash_move(fgd->new_hash); 5851 if (!new_hash) { 5852 ret = -ENOMEM; 5853 goto out; 5854 } 5855 5856 mutex_lock(&graph_lock); 5857 5858 if (fgd->type == GRAPH_FILTER_FUNCTION) { 5859 old_hash = rcu_dereference_protected(ftrace_graph_hash, 5860 lockdep_is_held(&graph_lock)); 5861 rcu_assign_pointer(ftrace_graph_hash, new_hash); 5862 } else { 5863 old_hash = rcu_dereference_protected(ftrace_graph_notrace_hash, 5864 lockdep_is_held(&graph_lock)); 5865 rcu_assign_pointer(ftrace_graph_notrace_hash, new_hash); 5866 } 5867 5868 mutex_unlock(&graph_lock); 5869 5870 /* 5871 * We need to do a hard force of sched synchronization. 5872 * This is because we use preempt_disable() to do RCU, but 5873 * the function tracers can be called where RCU is not watching 5874 * (like before user_exit()). We can not rely on the RCU 5875 * infrastructure to do the synchronization, thus we must do it 5876 * ourselves. 5877 */ 5878 schedule_on_each_cpu(ftrace_sync); 5879 5880 free_ftrace_hash(old_hash); 5881 } 5882 5883 out: 5884 free_ftrace_hash(fgd->new_hash); 5885 kfree(fgd); 5886 5887 return ret; 5888 } 5889 5890 static int 5891 ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer) 5892 { 5893 struct ftrace_glob func_g; 5894 struct dyn_ftrace *rec; 5895 struct ftrace_page *pg; 5896 struct ftrace_func_entry *entry; 5897 int fail = 1; 5898 int not; 5899 5900 /* decode regex */ 5901 func_g.type = filter_parse_regex(buffer, strlen(buffer), 5902 &func_g.search, ¬); 5903 5904 func_g.len = strlen(func_g.search); 5905 5906 mutex_lock(&ftrace_lock); 5907 5908 if (unlikely(ftrace_disabled)) { 5909 mutex_unlock(&ftrace_lock); 5910 return -ENODEV; 5911 } 5912 5913 do_for_each_ftrace_rec(pg, rec) { 5914 5915 if (rec->flags & FTRACE_FL_DISABLED) 5916 continue; 5917 5918 if (ftrace_match_record(rec, &func_g, NULL, 0)) { 5919 entry = ftrace_lookup_ip(hash, rec->ip); 5920 5921 if (!not) { 5922 fail = 0; 5923 5924 if (entry) 5925 continue; 5926 if (add_hash_entry(hash, rec->ip) < 0) 5927 goto out; 5928 } else { 5929 if (entry) { 5930 free_hash_entry(hash, entry); 5931 fail = 0; 5932 } 5933 } 5934 } 5935 } while_for_each_ftrace_rec(); 5936 out: 5937 mutex_unlock(&ftrace_lock); 5938 5939 if (fail) 5940 return -EINVAL; 5941 5942 return 0; 5943 } 5944 5945 static ssize_t 5946 ftrace_graph_write(struct file *file, const char __user *ubuf, 5947 size_t cnt, loff_t *ppos) 5948 { 5949 ssize_t read, ret = 0; 5950 struct ftrace_graph_data *fgd = file->private_data; 5951 struct trace_parser *parser; 5952 5953 if (!cnt) 5954 return 0; 5955 5956 /* Read mode uses seq functions */ 5957 if (file->f_mode & FMODE_READ) { 5958 struct seq_file *m = file->private_data; 5959 fgd = m->private; 5960 } 5961 5962 parser = &fgd->parser; 5963 5964 read = trace_get_user(parser, ubuf, cnt, ppos); 5965 5966 if (read >= 0 && trace_parser_loaded(parser) && 5967 !trace_parser_cont(parser)) { 5968 5969 ret = ftrace_graph_set_hash(fgd->new_hash, 5970 parser->buffer); 5971 trace_parser_clear(parser); 5972 } 5973 5974 if (!ret) 5975 ret = read; 5976 5977 return ret; 5978 } 5979 5980 static const struct file_operations ftrace_graph_fops = { 5981 .open = ftrace_graph_open, 5982 .read = seq_read, 5983 .write = ftrace_graph_write, 5984 .llseek = tracing_lseek, 5985 .release = ftrace_graph_release, 5986 }; 5987 5988 static const struct file_operations ftrace_graph_notrace_fops = { 5989 .open = ftrace_graph_notrace_open, 5990 .read = seq_read, 5991 .write = ftrace_graph_write, 5992 .llseek = tracing_lseek, 5993 .release = ftrace_graph_release, 5994 }; 5995 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 5996 5997 void ftrace_create_filter_files(struct ftrace_ops *ops, 5998 struct dentry *parent) 5999 { 6000 6001 trace_create_file("set_ftrace_filter", 0644, parent, 6002 ops, &ftrace_filter_fops); 6003 6004 trace_create_file("set_ftrace_notrace", 0644, parent, 6005 ops, &ftrace_notrace_fops); 6006 } 6007 6008 /* 6009 * The name "destroy_filter_files" is really a misnomer. Although 6010 * in the future, it may actually delete the files, but this is 6011 * really intended to make sure the ops passed in are disabled 6012 * and that when this function returns, the caller is free to 6013 * free the ops. 6014 * 6015 * The "destroy" name is only to match the "create" name that this 6016 * should be paired with. 6017 */ 6018 void ftrace_destroy_filter_files(struct ftrace_ops *ops) 6019 { 6020 mutex_lock(&ftrace_lock); 6021 if (ops->flags & FTRACE_OPS_FL_ENABLED) 6022 ftrace_shutdown(ops, 0); 6023 ops->flags |= FTRACE_OPS_FL_DELETED; 6024 ftrace_free_filter(ops); 6025 mutex_unlock(&ftrace_lock); 6026 } 6027 6028 static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer) 6029 { 6030 6031 trace_create_file("available_filter_functions", 0444, 6032 d_tracer, NULL, &ftrace_avail_fops); 6033 6034 trace_create_file("enabled_functions", 0444, 6035 d_tracer, NULL, &ftrace_enabled_fops); 6036 6037 ftrace_create_filter_files(&global_ops, d_tracer); 6038 6039 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 6040 trace_create_file("set_graph_function", 0644, d_tracer, 6041 NULL, 6042 &ftrace_graph_fops); 6043 trace_create_file("set_graph_notrace", 0644, d_tracer, 6044 NULL, 6045 &ftrace_graph_notrace_fops); 6046 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 6047 6048 return 0; 6049 } 6050 6051 static int ftrace_cmp_ips(const void *a, const void *b) 6052 { 6053 const unsigned long *ipa = a; 6054 const unsigned long *ipb = b; 6055 6056 if (*ipa > *ipb) 6057 return 1; 6058 if (*ipa < *ipb) 6059 return -1; 6060 return 0; 6061 } 6062 6063 static int ftrace_process_locs(struct module *mod, 6064 unsigned long *start, 6065 unsigned long *end) 6066 { 6067 struct ftrace_page *start_pg; 6068 struct ftrace_page *pg; 6069 struct dyn_ftrace *rec; 6070 unsigned long count; 6071 unsigned long *p; 6072 unsigned long addr; 6073 unsigned long flags = 0; /* Shut up gcc */ 6074 int ret = -ENOMEM; 6075 6076 count = end - start; 6077 6078 if (!count) 6079 return 0; 6080 6081 sort(start, count, sizeof(*start), 6082 ftrace_cmp_ips, NULL); 6083 6084 start_pg = ftrace_allocate_pages(count); 6085 if (!start_pg) 6086 return -ENOMEM; 6087 6088 mutex_lock(&ftrace_lock); 6089 6090 /* 6091 * Core and each module needs their own pages, as 6092 * modules will free them when they are removed. 6093 * Force a new page to be allocated for modules. 6094 */ 6095 if (!mod) { 6096 WARN_ON(ftrace_pages || ftrace_pages_start); 6097 /* First initialization */ 6098 ftrace_pages = ftrace_pages_start = start_pg; 6099 } else { 6100 if (!ftrace_pages) 6101 goto out; 6102 6103 if (WARN_ON(ftrace_pages->next)) { 6104 /* Hmm, we have free pages? */ 6105 while (ftrace_pages->next) 6106 ftrace_pages = ftrace_pages->next; 6107 } 6108 6109 ftrace_pages->next = start_pg; 6110 } 6111 6112 p = start; 6113 pg = start_pg; 6114 while (p < end) { 6115 addr = ftrace_call_adjust(*p++); 6116 /* 6117 * Some architecture linkers will pad between 6118 * the different mcount_loc sections of different 6119 * object files to satisfy alignments. 6120 * Skip any NULL pointers. 6121 */ 6122 if (!addr) 6123 continue; 6124 6125 if (pg->index == pg->size) { 6126 /* We should have allocated enough */ 6127 if (WARN_ON(!pg->next)) 6128 break; 6129 pg = pg->next; 6130 } 6131 6132 rec = &pg->records[pg->index++]; 6133 rec->ip = addr; 6134 } 6135 6136 /* We should have used all pages */ 6137 WARN_ON(pg->next); 6138 6139 /* Assign the last page to ftrace_pages */ 6140 ftrace_pages = pg; 6141 6142 /* 6143 * We only need to disable interrupts on start up 6144 * because we are modifying code that an interrupt 6145 * may execute, and the modification is not atomic. 6146 * But for modules, nothing runs the code we modify 6147 * until we are finished with it, and there's no 6148 * reason to cause large interrupt latencies while we do it. 6149 */ 6150 if (!mod) 6151 local_irq_save(flags); 6152 ftrace_update_code(mod, start_pg); 6153 if (!mod) 6154 local_irq_restore(flags); 6155 ret = 0; 6156 out: 6157 mutex_unlock(&ftrace_lock); 6158 6159 return ret; 6160 } 6161 6162 struct ftrace_mod_func { 6163 struct list_head list; 6164 char *name; 6165 unsigned long ip; 6166 unsigned int size; 6167 }; 6168 6169 struct ftrace_mod_map { 6170 struct rcu_head rcu; 6171 struct list_head list; 6172 struct module *mod; 6173 unsigned long start_addr; 6174 unsigned long end_addr; 6175 struct list_head funcs; 6176 unsigned int num_funcs; 6177 }; 6178 6179 #ifdef CONFIG_MODULES 6180 6181 #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next) 6182 6183 static LIST_HEAD(ftrace_mod_maps); 6184 6185 static int referenced_filters(struct dyn_ftrace *rec) 6186 { 6187 struct ftrace_ops *ops; 6188 int cnt = 0; 6189 6190 for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) { 6191 if (ops_references_rec(ops, rec)) 6192 cnt++; 6193 } 6194 6195 return cnt; 6196 } 6197 6198 static void 6199 clear_mod_from_hash(struct ftrace_page *pg, struct ftrace_hash *hash) 6200 { 6201 struct ftrace_func_entry *entry; 6202 struct dyn_ftrace *rec; 6203 int i; 6204 6205 if (ftrace_hash_empty(hash)) 6206 return; 6207 6208 for (i = 0; i < pg->index; i++) { 6209 rec = &pg->records[i]; 6210 entry = __ftrace_lookup_ip(hash, rec->ip); 6211 /* 6212 * Do not allow this rec to match again. 6213 * Yeah, it may waste some memory, but will be removed 6214 * if/when the hash is modified again. 6215 */ 6216 if (entry) 6217 entry->ip = 0; 6218 } 6219 } 6220 6221 /* Clear any records from hashs */ 6222 static void clear_mod_from_hashes(struct ftrace_page *pg) 6223 { 6224 struct trace_array *tr; 6225 6226 mutex_lock(&trace_types_lock); 6227 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 6228 if (!tr->ops || !tr->ops->func_hash) 6229 continue; 6230 mutex_lock(&tr->ops->func_hash->regex_lock); 6231 clear_mod_from_hash(pg, tr->ops->func_hash->filter_hash); 6232 clear_mod_from_hash(pg, tr->ops->func_hash->notrace_hash); 6233 mutex_unlock(&tr->ops->func_hash->regex_lock); 6234 } 6235 mutex_unlock(&trace_types_lock); 6236 } 6237 6238 static void ftrace_free_mod_map(struct rcu_head *rcu) 6239 { 6240 struct ftrace_mod_map *mod_map = container_of(rcu, struct ftrace_mod_map, rcu); 6241 struct ftrace_mod_func *mod_func; 6242 struct ftrace_mod_func *n; 6243 6244 /* All the contents of mod_map are now not visible to readers */ 6245 list_for_each_entry_safe(mod_func, n, &mod_map->funcs, list) { 6246 kfree(mod_func->name); 6247 list_del(&mod_func->list); 6248 kfree(mod_func); 6249 } 6250 6251 kfree(mod_map); 6252 } 6253 6254 void ftrace_release_mod(struct module *mod) 6255 { 6256 struct ftrace_mod_map *mod_map; 6257 struct ftrace_mod_map *n; 6258 struct dyn_ftrace *rec; 6259 struct ftrace_page **last_pg; 6260 struct ftrace_page *tmp_page = NULL; 6261 struct ftrace_page *pg; 6262 int order; 6263 6264 mutex_lock(&ftrace_lock); 6265 6266 if (ftrace_disabled) 6267 goto out_unlock; 6268 6269 list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) { 6270 if (mod_map->mod == mod) { 6271 list_del_rcu(&mod_map->list); 6272 call_rcu(&mod_map->rcu, ftrace_free_mod_map); 6273 break; 6274 } 6275 } 6276 6277 /* 6278 * Each module has its own ftrace_pages, remove 6279 * them from the list. 6280 */ 6281 last_pg = &ftrace_pages_start; 6282 for (pg = ftrace_pages_start; pg; pg = *last_pg) { 6283 rec = &pg->records[0]; 6284 if (within_module_core(rec->ip, mod) || 6285 within_module_init(rec->ip, mod)) { 6286 /* 6287 * As core pages are first, the first 6288 * page should never be a module page. 6289 */ 6290 if (WARN_ON(pg == ftrace_pages_start)) 6291 goto out_unlock; 6292 6293 /* Check if we are deleting the last page */ 6294 if (pg == ftrace_pages) 6295 ftrace_pages = next_to_ftrace_page(last_pg); 6296 6297 ftrace_update_tot_cnt -= pg->index; 6298 *last_pg = pg->next; 6299 6300 pg->next = tmp_page; 6301 tmp_page = pg; 6302 } else 6303 last_pg = &pg->next; 6304 } 6305 out_unlock: 6306 mutex_unlock(&ftrace_lock); 6307 6308 for (pg = tmp_page; pg; pg = tmp_page) { 6309 6310 /* Needs to be called outside of ftrace_lock */ 6311 clear_mod_from_hashes(pg); 6312 6313 order = get_count_order(pg->size / ENTRIES_PER_PAGE); 6314 free_pages((unsigned long)pg->records, order); 6315 tmp_page = pg->next; 6316 kfree(pg); 6317 ftrace_number_of_pages -= 1 << order; 6318 ftrace_number_of_groups--; 6319 } 6320 } 6321 6322 void ftrace_module_enable(struct module *mod) 6323 { 6324 struct dyn_ftrace *rec; 6325 struct ftrace_page *pg; 6326 6327 mutex_lock(&ftrace_lock); 6328 6329 if (ftrace_disabled) 6330 goto out_unlock; 6331 6332 /* 6333 * If the tracing is enabled, go ahead and enable the record. 6334 * 6335 * The reason not to enable the record immediately is the 6336 * inherent check of ftrace_make_nop/ftrace_make_call for 6337 * correct previous instructions. Making first the NOP 6338 * conversion puts the module to the correct state, thus 6339 * passing the ftrace_make_call check. 6340 * 6341 * We also delay this to after the module code already set the 6342 * text to read-only, as we now need to set it back to read-write 6343 * so that we can modify the text. 6344 */ 6345 if (ftrace_start_up) 6346 ftrace_arch_code_modify_prepare(); 6347 6348 do_for_each_ftrace_rec(pg, rec) { 6349 int cnt; 6350 /* 6351 * do_for_each_ftrace_rec() is a double loop. 6352 * module text shares the pg. If a record is 6353 * not part of this module, then skip this pg, 6354 * which the "break" will do. 6355 */ 6356 if (!within_module_core(rec->ip, mod) && 6357 !within_module_init(rec->ip, mod)) 6358 break; 6359 6360 cnt = 0; 6361 6362 /* 6363 * When adding a module, we need to check if tracers are 6364 * currently enabled and if they are, and can trace this record, 6365 * we need to enable the module functions as well as update the 6366 * reference counts for those function records. 6367 */ 6368 if (ftrace_start_up) 6369 cnt += referenced_filters(rec); 6370 6371 /* This clears FTRACE_FL_DISABLED */ 6372 rec->flags = cnt; 6373 6374 if (ftrace_start_up && cnt) { 6375 int failed = __ftrace_replace_code(rec, 1); 6376 if (failed) { 6377 ftrace_bug(failed, rec); 6378 goto out_loop; 6379 } 6380 } 6381 6382 } while_for_each_ftrace_rec(); 6383 6384 out_loop: 6385 if (ftrace_start_up) 6386 ftrace_arch_code_modify_post_process(); 6387 6388 out_unlock: 6389 mutex_unlock(&ftrace_lock); 6390 6391 process_cached_mods(mod->name); 6392 } 6393 6394 void ftrace_module_init(struct module *mod) 6395 { 6396 if (ftrace_disabled || !mod->num_ftrace_callsites) 6397 return; 6398 6399 ftrace_process_locs(mod, mod->ftrace_callsites, 6400 mod->ftrace_callsites + mod->num_ftrace_callsites); 6401 } 6402 6403 static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map, 6404 struct dyn_ftrace *rec) 6405 { 6406 struct ftrace_mod_func *mod_func; 6407 unsigned long symsize; 6408 unsigned long offset; 6409 char str[KSYM_SYMBOL_LEN]; 6410 char *modname; 6411 const char *ret; 6412 6413 ret = kallsyms_lookup(rec->ip, &symsize, &offset, &modname, str); 6414 if (!ret) 6415 return; 6416 6417 mod_func = kmalloc(sizeof(*mod_func), GFP_KERNEL); 6418 if (!mod_func) 6419 return; 6420 6421 mod_func->name = kstrdup(str, GFP_KERNEL); 6422 if (!mod_func->name) { 6423 kfree(mod_func); 6424 return; 6425 } 6426 6427 mod_func->ip = rec->ip - offset; 6428 mod_func->size = symsize; 6429 6430 mod_map->num_funcs++; 6431 6432 list_add_rcu(&mod_func->list, &mod_map->funcs); 6433 } 6434 6435 static struct ftrace_mod_map * 6436 allocate_ftrace_mod_map(struct module *mod, 6437 unsigned long start, unsigned long end) 6438 { 6439 struct ftrace_mod_map *mod_map; 6440 6441 mod_map = kmalloc(sizeof(*mod_map), GFP_KERNEL); 6442 if (!mod_map) 6443 return NULL; 6444 6445 mod_map->mod = mod; 6446 mod_map->start_addr = start; 6447 mod_map->end_addr = end; 6448 mod_map->num_funcs = 0; 6449 6450 INIT_LIST_HEAD_RCU(&mod_map->funcs); 6451 6452 list_add_rcu(&mod_map->list, &ftrace_mod_maps); 6453 6454 return mod_map; 6455 } 6456 6457 static const char * 6458 ftrace_func_address_lookup(struct ftrace_mod_map *mod_map, 6459 unsigned long addr, unsigned long *size, 6460 unsigned long *off, char *sym) 6461 { 6462 struct ftrace_mod_func *found_func = NULL; 6463 struct ftrace_mod_func *mod_func; 6464 6465 list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) { 6466 if (addr >= mod_func->ip && 6467 addr < mod_func->ip + mod_func->size) { 6468 found_func = mod_func; 6469 break; 6470 } 6471 } 6472 6473 if (found_func) { 6474 if (size) 6475 *size = found_func->size; 6476 if (off) 6477 *off = addr - found_func->ip; 6478 if (sym) 6479 strlcpy(sym, found_func->name, KSYM_NAME_LEN); 6480 6481 return found_func->name; 6482 } 6483 6484 return NULL; 6485 } 6486 6487 const char * 6488 ftrace_mod_address_lookup(unsigned long addr, unsigned long *size, 6489 unsigned long *off, char **modname, char *sym) 6490 { 6491 struct ftrace_mod_map *mod_map; 6492 const char *ret = NULL; 6493 6494 /* mod_map is freed via call_rcu() */ 6495 preempt_disable(); 6496 list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) { 6497 ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym); 6498 if (ret) { 6499 if (modname) 6500 *modname = mod_map->mod->name; 6501 break; 6502 } 6503 } 6504 preempt_enable(); 6505 6506 return ret; 6507 } 6508 6509 int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, 6510 char *type, char *name, 6511 char *module_name, int *exported) 6512 { 6513 struct ftrace_mod_map *mod_map; 6514 struct ftrace_mod_func *mod_func; 6515 6516 preempt_disable(); 6517 list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) { 6518 6519 if (symnum >= mod_map->num_funcs) { 6520 symnum -= mod_map->num_funcs; 6521 continue; 6522 } 6523 6524 list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) { 6525 if (symnum > 1) { 6526 symnum--; 6527 continue; 6528 } 6529 6530 *value = mod_func->ip; 6531 *type = 'T'; 6532 strlcpy(name, mod_func->name, KSYM_NAME_LEN); 6533 strlcpy(module_name, mod_map->mod->name, MODULE_NAME_LEN); 6534 *exported = 1; 6535 preempt_enable(); 6536 return 0; 6537 } 6538 WARN_ON(1); 6539 break; 6540 } 6541 preempt_enable(); 6542 return -ERANGE; 6543 } 6544 6545 #else 6546 static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map, 6547 struct dyn_ftrace *rec) { } 6548 static inline struct ftrace_mod_map * 6549 allocate_ftrace_mod_map(struct module *mod, 6550 unsigned long start, unsigned long end) 6551 { 6552 return NULL; 6553 } 6554 #endif /* CONFIG_MODULES */ 6555 6556 struct ftrace_init_func { 6557 struct list_head list; 6558 unsigned long ip; 6559 }; 6560 6561 /* Clear any init ips from hashes */ 6562 static void 6563 clear_func_from_hash(struct ftrace_init_func *func, struct ftrace_hash *hash) 6564 { 6565 struct ftrace_func_entry *entry; 6566 6567 entry = ftrace_lookup_ip(hash, func->ip); 6568 /* 6569 * Do not allow this rec to match again. 6570 * Yeah, it may waste some memory, but will be removed 6571 * if/when the hash is modified again. 6572 */ 6573 if (entry) 6574 entry->ip = 0; 6575 } 6576 6577 static void 6578 clear_func_from_hashes(struct ftrace_init_func *func) 6579 { 6580 struct trace_array *tr; 6581 6582 mutex_lock(&trace_types_lock); 6583 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 6584 if (!tr->ops || !tr->ops->func_hash) 6585 continue; 6586 mutex_lock(&tr->ops->func_hash->regex_lock); 6587 clear_func_from_hash(func, tr->ops->func_hash->filter_hash); 6588 clear_func_from_hash(func, tr->ops->func_hash->notrace_hash); 6589 mutex_unlock(&tr->ops->func_hash->regex_lock); 6590 } 6591 mutex_unlock(&trace_types_lock); 6592 } 6593 6594 static void add_to_clear_hash_list(struct list_head *clear_list, 6595 struct dyn_ftrace *rec) 6596 { 6597 struct ftrace_init_func *func; 6598 6599 func = kmalloc(sizeof(*func), GFP_KERNEL); 6600 if (!func) { 6601 MEM_FAIL(1, "alloc failure, ftrace filter could be stale\n"); 6602 return; 6603 } 6604 6605 func->ip = rec->ip; 6606 list_add(&func->list, clear_list); 6607 } 6608 6609 void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr) 6610 { 6611 unsigned long start = (unsigned long)(start_ptr); 6612 unsigned long end = (unsigned long)(end_ptr); 6613 struct ftrace_page **last_pg = &ftrace_pages_start; 6614 struct ftrace_page *pg; 6615 struct dyn_ftrace *rec; 6616 struct dyn_ftrace key; 6617 struct ftrace_mod_map *mod_map = NULL; 6618 struct ftrace_init_func *func, *func_next; 6619 struct list_head clear_hash; 6620 int order; 6621 6622 INIT_LIST_HEAD(&clear_hash); 6623 6624 key.ip = start; 6625 key.flags = end; /* overload flags, as it is unsigned long */ 6626 6627 mutex_lock(&ftrace_lock); 6628 6629 /* 6630 * If we are freeing module init memory, then check if 6631 * any tracer is active. If so, we need to save a mapping of 6632 * the module functions being freed with the address. 6633 */ 6634 if (mod && ftrace_ops_list != &ftrace_list_end) 6635 mod_map = allocate_ftrace_mod_map(mod, start, end); 6636 6637 for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) { 6638 if (end < pg->records[0].ip || 6639 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) 6640 continue; 6641 again: 6642 rec = bsearch(&key, pg->records, pg->index, 6643 sizeof(struct dyn_ftrace), 6644 ftrace_cmp_recs); 6645 if (!rec) 6646 continue; 6647 6648 /* rec will be cleared from hashes after ftrace_lock unlock */ 6649 add_to_clear_hash_list(&clear_hash, rec); 6650 6651 if (mod_map) 6652 save_ftrace_mod_rec(mod_map, rec); 6653 6654 pg->index--; 6655 ftrace_update_tot_cnt--; 6656 if (!pg->index) { 6657 *last_pg = pg->next; 6658 order = get_count_order(pg->size / ENTRIES_PER_PAGE); 6659 free_pages((unsigned long)pg->records, order); 6660 ftrace_number_of_pages -= 1 << order; 6661 ftrace_number_of_groups--; 6662 kfree(pg); 6663 pg = container_of(last_pg, struct ftrace_page, next); 6664 if (!(*last_pg)) 6665 ftrace_pages = pg; 6666 continue; 6667 } 6668 memmove(rec, rec + 1, 6669 (pg->index - (rec - pg->records)) * sizeof(*rec)); 6670 /* More than one function may be in this block */ 6671 goto again; 6672 } 6673 mutex_unlock(&ftrace_lock); 6674 6675 list_for_each_entry_safe(func, func_next, &clear_hash, list) { 6676 clear_func_from_hashes(func); 6677 kfree(func); 6678 } 6679 } 6680 6681 void __init ftrace_free_init_mem(void) 6682 { 6683 void *start = (void *)(&__init_begin); 6684 void *end = (void *)(&__init_end); 6685 6686 ftrace_free_mem(NULL, start, end); 6687 } 6688 6689 void __init ftrace_init(void) 6690 { 6691 extern unsigned long __start_mcount_loc[]; 6692 extern unsigned long __stop_mcount_loc[]; 6693 unsigned long count, flags; 6694 int ret; 6695 6696 local_irq_save(flags); 6697 ret = ftrace_dyn_arch_init(); 6698 local_irq_restore(flags); 6699 if (ret) 6700 goto failed; 6701 6702 count = __stop_mcount_loc - __start_mcount_loc; 6703 if (!count) { 6704 pr_info("ftrace: No functions to be traced?\n"); 6705 goto failed; 6706 } 6707 6708 pr_info("ftrace: allocating %ld entries in %ld pages\n", 6709 count, count / ENTRIES_PER_PAGE + 1); 6710 6711 last_ftrace_enabled = ftrace_enabled = 1; 6712 6713 ret = ftrace_process_locs(NULL, 6714 __start_mcount_loc, 6715 __stop_mcount_loc); 6716 6717 pr_info("ftrace: allocated %ld pages with %ld groups\n", 6718 ftrace_number_of_pages, ftrace_number_of_groups); 6719 6720 set_ftrace_early_filters(); 6721 6722 return; 6723 failed: 6724 ftrace_disabled = 1; 6725 } 6726 6727 /* Do nothing if arch does not support this */ 6728 void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops) 6729 { 6730 } 6731 6732 static void ftrace_update_trampoline(struct ftrace_ops *ops) 6733 { 6734 arch_ftrace_update_trampoline(ops); 6735 } 6736 6737 void ftrace_init_trace_array(struct trace_array *tr) 6738 { 6739 INIT_LIST_HEAD(&tr->func_probes); 6740 INIT_LIST_HEAD(&tr->mod_trace); 6741 INIT_LIST_HEAD(&tr->mod_notrace); 6742 } 6743 #else 6744 6745 struct ftrace_ops global_ops = { 6746 .func = ftrace_stub, 6747 .flags = FTRACE_OPS_FL_RECURSION_SAFE | 6748 FTRACE_OPS_FL_INITIALIZED | 6749 FTRACE_OPS_FL_PID, 6750 }; 6751 6752 static int __init ftrace_nodyn_init(void) 6753 { 6754 ftrace_enabled = 1; 6755 return 0; 6756 } 6757 core_initcall(ftrace_nodyn_init); 6758 6759 static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; } 6760 static inline void ftrace_startup_enable(int command) { } 6761 static inline void ftrace_startup_all(int command) { } 6762 6763 # define ftrace_startup_sysctl() do { } while (0) 6764 # define ftrace_shutdown_sysctl() do { } while (0) 6765 6766 static void ftrace_update_trampoline(struct ftrace_ops *ops) 6767 { 6768 } 6769 6770 #endif /* CONFIG_DYNAMIC_FTRACE */ 6771 6772 __init void ftrace_init_global_array_ops(struct trace_array *tr) 6773 { 6774 tr->ops = &global_ops; 6775 tr->ops->private = tr; 6776 ftrace_init_trace_array(tr); 6777 } 6778 6779 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func) 6780 { 6781 /* If we filter on pids, update to use the pid function */ 6782 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) { 6783 if (WARN_ON(tr->ops->func != ftrace_stub)) 6784 printk("ftrace ops had %pS for function\n", 6785 tr->ops->func); 6786 } 6787 tr->ops->func = func; 6788 tr->ops->private = tr; 6789 } 6790 6791 void ftrace_reset_array_ops(struct trace_array *tr) 6792 { 6793 tr->ops->func = ftrace_stub; 6794 } 6795 6796 static nokprobe_inline void 6797 __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, 6798 struct ftrace_ops *ignored, struct pt_regs *regs) 6799 { 6800 struct ftrace_ops *op; 6801 int bit; 6802 6803 bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX); 6804 if (bit < 0) 6805 return; 6806 6807 /* 6808 * Some of the ops may be dynamically allocated, 6809 * they must be freed after a synchronize_rcu(). 6810 */ 6811 preempt_disable_notrace(); 6812 6813 do_for_each_ftrace_op(op, ftrace_ops_list) { 6814 /* Stub functions don't need to be called nor tested */ 6815 if (op->flags & FTRACE_OPS_FL_STUB) 6816 continue; 6817 /* 6818 * Check the following for each ops before calling their func: 6819 * if RCU flag is set, then rcu_is_watching() must be true 6820 * if PER_CPU is set, then ftrace_function_local_disable() 6821 * must be false 6822 * Otherwise test if the ip matches the ops filter 6823 * 6824 * If any of the above fails then the op->func() is not executed. 6825 */ 6826 if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) && 6827 ftrace_ops_test(op, ip, regs)) { 6828 if (FTRACE_WARN_ON(!op->func)) { 6829 pr_warn("op=%p %pS\n", op, op); 6830 goto out; 6831 } 6832 op->func(ip, parent_ip, op, regs); 6833 } 6834 } while_for_each_ftrace_op(op); 6835 out: 6836 preempt_enable_notrace(); 6837 trace_clear_recursion(bit); 6838 } 6839 6840 /* 6841 * Some archs only support passing ip and parent_ip. Even though 6842 * the list function ignores the op parameter, we do not want any 6843 * C side effects, where a function is called without the caller 6844 * sending a third parameter. 6845 * Archs are to support both the regs and ftrace_ops at the same time. 6846 * If they support ftrace_ops, it is assumed they support regs. 6847 * If call backs want to use regs, they must either check for regs 6848 * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS. 6849 * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved. 6850 * An architecture can pass partial regs with ftrace_ops and still 6851 * set the ARCH_SUPPORTS_FTRACE_OPS. 6852 */ 6853 #if ARCH_SUPPORTS_FTRACE_OPS 6854 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, 6855 struct ftrace_ops *op, struct pt_regs *regs) 6856 { 6857 __ftrace_ops_list_func(ip, parent_ip, NULL, regs); 6858 } 6859 NOKPROBE_SYMBOL(ftrace_ops_list_func); 6860 #else 6861 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip) 6862 { 6863 __ftrace_ops_list_func(ip, parent_ip, NULL, NULL); 6864 } 6865 NOKPROBE_SYMBOL(ftrace_ops_no_ops); 6866 #endif 6867 6868 /* 6869 * If there's only one function registered but it does not support 6870 * recursion, needs RCU protection and/or requires per cpu handling, then 6871 * this function will be called by the mcount trampoline. 6872 */ 6873 static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip, 6874 struct ftrace_ops *op, struct pt_regs *regs) 6875 { 6876 int bit; 6877 6878 if ((op->flags & FTRACE_OPS_FL_RCU) && !rcu_is_watching()) 6879 return; 6880 6881 bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX); 6882 if (bit < 0) 6883 return; 6884 6885 preempt_disable_notrace(); 6886 6887 op->func(ip, parent_ip, op, regs); 6888 6889 preempt_enable_notrace(); 6890 trace_clear_recursion(bit); 6891 } 6892 NOKPROBE_SYMBOL(ftrace_ops_assist_func); 6893 6894 /** 6895 * ftrace_ops_get_func - get the function a trampoline should call 6896 * @ops: the ops to get the function for 6897 * 6898 * Normally the mcount trampoline will call the ops->func, but there 6899 * are times that it should not. For example, if the ops does not 6900 * have its own recursion protection, then it should call the 6901 * ftrace_ops_assist_func() instead. 6902 * 6903 * Returns the function that the trampoline should call for @ops. 6904 */ 6905 ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops) 6906 { 6907 /* 6908 * If the function does not handle recursion, needs to be RCU safe, 6909 * or does per cpu logic, then we need to call the assist handler. 6910 */ 6911 if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE) || 6912 ops->flags & FTRACE_OPS_FL_RCU) 6913 return ftrace_ops_assist_func; 6914 6915 return ops->func; 6916 } 6917 6918 static void 6919 ftrace_filter_pid_sched_switch_probe(void *data, bool preempt, 6920 struct task_struct *prev, struct task_struct *next) 6921 { 6922 struct trace_array *tr = data; 6923 struct trace_pid_list *pid_list; 6924 6925 pid_list = rcu_dereference_sched(tr->function_pids); 6926 6927 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid, 6928 trace_ignore_this_task(pid_list, next)); 6929 } 6930 6931 static void 6932 ftrace_pid_follow_sched_process_fork(void *data, 6933 struct task_struct *self, 6934 struct task_struct *task) 6935 { 6936 struct trace_pid_list *pid_list; 6937 struct trace_array *tr = data; 6938 6939 pid_list = rcu_dereference_sched(tr->function_pids); 6940 trace_filter_add_remove_task(pid_list, self, task); 6941 } 6942 6943 static void 6944 ftrace_pid_follow_sched_process_exit(void *data, struct task_struct *task) 6945 { 6946 struct trace_pid_list *pid_list; 6947 struct trace_array *tr = data; 6948 6949 pid_list = rcu_dereference_sched(tr->function_pids); 6950 trace_filter_add_remove_task(pid_list, NULL, task); 6951 } 6952 6953 void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) 6954 { 6955 if (enable) { 6956 register_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork, 6957 tr); 6958 register_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit, 6959 tr); 6960 } else { 6961 unregister_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork, 6962 tr); 6963 unregister_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit, 6964 tr); 6965 } 6966 } 6967 6968 static void clear_ftrace_pids(struct trace_array *tr) 6969 { 6970 struct trace_pid_list *pid_list; 6971 int cpu; 6972 6973 pid_list = rcu_dereference_protected(tr->function_pids, 6974 lockdep_is_held(&ftrace_lock)); 6975 if (!pid_list) 6976 return; 6977 6978 unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr); 6979 6980 for_each_possible_cpu(cpu) 6981 per_cpu_ptr(tr->array_buffer.data, cpu)->ftrace_ignore_pid = false; 6982 6983 rcu_assign_pointer(tr->function_pids, NULL); 6984 6985 /* Wait till all users are no longer using pid filtering */ 6986 synchronize_rcu(); 6987 6988 trace_free_pid_list(pid_list); 6989 } 6990 6991 void ftrace_clear_pids(struct trace_array *tr) 6992 { 6993 mutex_lock(&ftrace_lock); 6994 6995 clear_ftrace_pids(tr); 6996 6997 mutex_unlock(&ftrace_lock); 6998 } 6999 7000 static void ftrace_pid_reset(struct trace_array *tr) 7001 { 7002 mutex_lock(&ftrace_lock); 7003 clear_ftrace_pids(tr); 7004 7005 ftrace_update_pid_func(); 7006 ftrace_startup_all(0); 7007 7008 mutex_unlock(&ftrace_lock); 7009 } 7010 7011 /* Greater than any max PID */ 7012 #define FTRACE_NO_PIDS (void *)(PID_MAX_LIMIT + 1) 7013 7014 static void *fpid_start(struct seq_file *m, loff_t *pos) 7015 __acquires(RCU) 7016 { 7017 struct trace_pid_list *pid_list; 7018 struct trace_array *tr = m->private; 7019 7020 mutex_lock(&ftrace_lock); 7021 rcu_read_lock_sched(); 7022 7023 pid_list = rcu_dereference_sched(tr->function_pids); 7024 7025 if (!pid_list) 7026 return !(*pos) ? FTRACE_NO_PIDS : NULL; 7027 7028 return trace_pid_start(pid_list, pos); 7029 } 7030 7031 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos) 7032 { 7033 struct trace_array *tr = m->private; 7034 struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids); 7035 7036 if (v == FTRACE_NO_PIDS) { 7037 (*pos)++; 7038 return NULL; 7039 } 7040 return trace_pid_next(pid_list, v, pos); 7041 } 7042 7043 static void fpid_stop(struct seq_file *m, void *p) 7044 __releases(RCU) 7045 { 7046 rcu_read_unlock_sched(); 7047 mutex_unlock(&ftrace_lock); 7048 } 7049 7050 static int fpid_show(struct seq_file *m, void *v) 7051 { 7052 if (v == FTRACE_NO_PIDS) { 7053 seq_puts(m, "no pid\n"); 7054 return 0; 7055 } 7056 7057 return trace_pid_show(m, v); 7058 } 7059 7060 static const struct seq_operations ftrace_pid_sops = { 7061 .start = fpid_start, 7062 .next = fpid_next, 7063 .stop = fpid_stop, 7064 .show = fpid_show, 7065 }; 7066 7067 static int 7068 ftrace_pid_open(struct inode *inode, struct file *file) 7069 { 7070 struct trace_array *tr = inode->i_private; 7071 struct seq_file *m; 7072 int ret = 0; 7073 7074 ret = tracing_check_open_get_tr(tr); 7075 if (ret) 7076 return ret; 7077 7078 if ((file->f_mode & FMODE_WRITE) && 7079 (file->f_flags & O_TRUNC)) 7080 ftrace_pid_reset(tr); 7081 7082 ret = seq_open(file, &ftrace_pid_sops); 7083 if (ret < 0) { 7084 trace_array_put(tr); 7085 } else { 7086 m = file->private_data; 7087 /* copy tr over to seq ops */ 7088 m->private = tr; 7089 } 7090 7091 return ret; 7092 } 7093 7094 static void ignore_task_cpu(void *data) 7095 { 7096 struct trace_array *tr = data; 7097 struct trace_pid_list *pid_list; 7098 7099 /* 7100 * This function is called by on_each_cpu() while the 7101 * event_mutex is held. 7102 */ 7103 pid_list = rcu_dereference_protected(tr->function_pids, 7104 mutex_is_locked(&ftrace_lock)); 7105 7106 this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid, 7107 trace_ignore_this_task(pid_list, current)); 7108 } 7109 7110 static ssize_t 7111 ftrace_pid_write(struct file *filp, const char __user *ubuf, 7112 size_t cnt, loff_t *ppos) 7113 { 7114 struct seq_file *m = filp->private_data; 7115 struct trace_array *tr = m->private; 7116 struct trace_pid_list *filtered_pids = NULL; 7117 struct trace_pid_list *pid_list; 7118 ssize_t ret; 7119 7120 if (!cnt) 7121 return 0; 7122 7123 mutex_lock(&ftrace_lock); 7124 7125 filtered_pids = rcu_dereference_protected(tr->function_pids, 7126 lockdep_is_held(&ftrace_lock)); 7127 7128 ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt); 7129 if (ret < 0) 7130 goto out; 7131 7132 rcu_assign_pointer(tr->function_pids, pid_list); 7133 7134 if (filtered_pids) { 7135 synchronize_rcu(); 7136 trace_free_pid_list(filtered_pids); 7137 } else if (pid_list) { 7138 /* Register a probe to set whether to ignore the tracing of a task */ 7139 register_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr); 7140 } 7141 7142 /* 7143 * Ignoring of pids is done at task switch. But we have to 7144 * check for those tasks that are currently running. 7145 * Always do this in case a pid was appended or removed. 7146 */ 7147 on_each_cpu(ignore_task_cpu, tr, 1); 7148 7149 ftrace_update_pid_func(); 7150 ftrace_startup_all(0); 7151 out: 7152 mutex_unlock(&ftrace_lock); 7153 7154 if (ret > 0) 7155 *ppos += ret; 7156 7157 return ret; 7158 } 7159 7160 static int 7161 ftrace_pid_release(struct inode *inode, struct file *file) 7162 { 7163 struct trace_array *tr = inode->i_private; 7164 7165 trace_array_put(tr); 7166 7167 return seq_release(inode, file); 7168 } 7169 7170 static const struct file_operations ftrace_pid_fops = { 7171 .open = ftrace_pid_open, 7172 .write = ftrace_pid_write, 7173 .read = seq_read, 7174 .llseek = tracing_lseek, 7175 .release = ftrace_pid_release, 7176 }; 7177 7178 void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer) 7179 { 7180 trace_create_file("set_ftrace_pid", 0644, d_tracer, 7181 tr, &ftrace_pid_fops); 7182 } 7183 7184 void __init ftrace_init_tracefs_toplevel(struct trace_array *tr, 7185 struct dentry *d_tracer) 7186 { 7187 /* Only the top level directory has the dyn_tracefs and profile */ 7188 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL)); 7189 7190 ftrace_init_dyn_tracefs(d_tracer); 7191 ftrace_profile_tracefs(d_tracer); 7192 } 7193 7194 /** 7195 * ftrace_kill - kill ftrace 7196 * 7197 * This function should be used by panic code. It stops ftrace 7198 * but in a not so nice way. If you need to simply kill ftrace 7199 * from a non-atomic section, use ftrace_kill. 7200 */ 7201 void ftrace_kill(void) 7202 { 7203 ftrace_disabled = 1; 7204 ftrace_enabled = 0; 7205 ftrace_trace_function = ftrace_stub; 7206 } 7207 7208 /** 7209 * Test if ftrace is dead or not. 7210 */ 7211 int ftrace_is_dead(void) 7212 { 7213 return ftrace_disabled; 7214 } 7215 7216 /** 7217 * register_ftrace_function - register a function for profiling 7218 * @ops - ops structure that holds the function for profiling. 7219 * 7220 * Register a function to be called by all functions in the 7221 * kernel. 7222 * 7223 * Note: @ops->func and all the functions it calls must be labeled 7224 * with "notrace", otherwise it will go into a 7225 * recursive loop. 7226 */ 7227 int register_ftrace_function(struct ftrace_ops *ops) 7228 { 7229 int ret = -1; 7230 7231 ftrace_ops_init(ops); 7232 7233 mutex_lock(&ftrace_lock); 7234 7235 ret = ftrace_startup(ops, 0); 7236 7237 mutex_unlock(&ftrace_lock); 7238 7239 return ret; 7240 } 7241 EXPORT_SYMBOL_GPL(register_ftrace_function); 7242 7243 /** 7244 * unregister_ftrace_function - unregister a function for profiling. 7245 * @ops - ops structure that holds the function to unregister 7246 * 7247 * Unregister a function that was added to be called by ftrace profiling. 7248 */ 7249 int unregister_ftrace_function(struct ftrace_ops *ops) 7250 { 7251 int ret; 7252 7253 mutex_lock(&ftrace_lock); 7254 ret = ftrace_shutdown(ops, 0); 7255 mutex_unlock(&ftrace_lock); 7256 7257 return ret; 7258 } 7259 EXPORT_SYMBOL_GPL(unregister_ftrace_function); 7260 7261 static bool is_permanent_ops_registered(void) 7262 { 7263 struct ftrace_ops *op; 7264 7265 do_for_each_ftrace_op(op, ftrace_ops_list) { 7266 if (op->flags & FTRACE_OPS_FL_PERMANENT) 7267 return true; 7268 } while_for_each_ftrace_op(op); 7269 7270 return false; 7271 } 7272 7273 int 7274 ftrace_enable_sysctl(struct ctl_table *table, int write, 7275 void __user *buffer, size_t *lenp, 7276 loff_t *ppos) 7277 { 7278 int ret = -ENODEV; 7279 7280 mutex_lock(&ftrace_lock); 7281 7282 if (unlikely(ftrace_disabled)) 7283 goto out; 7284 7285 ret = proc_dointvec(table, write, buffer, lenp, ppos); 7286 7287 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled)) 7288 goto out; 7289 7290 if (ftrace_enabled) { 7291 7292 /* we are starting ftrace again */ 7293 if (rcu_dereference_protected(ftrace_ops_list, 7294 lockdep_is_held(&ftrace_lock)) != &ftrace_list_end) 7295 update_ftrace_function(); 7296 7297 ftrace_startup_sysctl(); 7298 7299 } else { 7300 if (is_permanent_ops_registered()) { 7301 ftrace_enabled = true; 7302 ret = -EBUSY; 7303 goto out; 7304 } 7305 7306 /* stopping ftrace calls (just send to ftrace_stub) */ 7307 ftrace_trace_function = ftrace_stub; 7308 7309 ftrace_shutdown_sysctl(); 7310 } 7311 7312 last_ftrace_enabled = !!ftrace_enabled; 7313 out: 7314 mutex_unlock(&ftrace_lock); 7315 return ret; 7316 } 7317