1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Infrastructure for profiling code inserted by 'gcc -pg'. 4 * 5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> 6 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com> 7 * 8 * Originally ported from the -rt patch by: 9 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com> 10 * 11 * Based on code in the latency_tracer, that is: 12 * 13 * Copyright (C) 2004-2006 Ingo Molnar 14 * Copyright (C) 2004 Nadia Yvette Chambers 15 */ 16 17 #include <linux/stop_machine.h> 18 #include <linux/clocksource.h> 19 #include <linux/sched/task.h> 20 #include <linux/kallsyms.h> 21 #include <linux/seq_file.h> 22 #include <linux/tracefs.h> 23 #include <linux/hardirq.h> 24 #include <linux/kthread.h> 25 #include <linux/uaccess.h> 26 #include <linux/bsearch.h> 27 #include <linux/module.h> 28 #include <linux/ftrace.h> 29 #include <linux/sysctl.h> 30 #include <linux/slab.h> 31 #include <linux/ctype.h> 32 #include <linux/sort.h> 33 #include <linux/list.h> 34 #include <linux/hash.h> 35 #include <linux/rcupdate.h> 36 #include <linux/kprobes.h> 37 38 #include <trace/events/sched.h> 39 40 #include <asm/sections.h> 41 #include <asm/setup.h> 42 43 #include "ftrace_internal.h" 44 #include "trace_output.h" 45 #include "trace_stat.h" 46 47 #define FTRACE_WARN_ON(cond) \ 48 ({ \ 49 int ___r = cond; \ 50 if (WARN_ON(___r)) \ 51 ftrace_kill(); \ 52 ___r; \ 53 }) 54 55 #define FTRACE_WARN_ON_ONCE(cond) \ 56 ({ \ 57 int ___r = cond; \ 58 if (WARN_ON_ONCE(___r)) \ 59 ftrace_kill(); \ 60 ___r; \ 61 }) 62 63 /* hash bits for specific function selection */ 64 #define FTRACE_HASH_BITS 7 65 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS) 66 #define FTRACE_HASH_DEFAULT_BITS 10 67 #define FTRACE_HASH_MAX_BITS 12 68 69 #ifdef CONFIG_DYNAMIC_FTRACE 70 #define INIT_OPS_HASH(opsname) \ 71 .func_hash = &opsname.local_hash, \ 72 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock), 73 #else 74 #define INIT_OPS_HASH(opsname) 75 #endif 76 77 enum { 78 FTRACE_MODIFY_ENABLE_FL = (1 << 0), 79 FTRACE_MODIFY_MAY_SLEEP_FL = (1 << 1), 80 }; 81 82 struct ftrace_ops ftrace_list_end __read_mostly = { 83 .func = ftrace_stub, 84 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB, 85 INIT_OPS_HASH(ftrace_list_end) 86 }; 87 88 /* ftrace_enabled is a method to turn ftrace on or off */ 89 int ftrace_enabled __read_mostly; 90 static int last_ftrace_enabled; 91 92 /* Current function tracing op */ 93 struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end; 94 /* What to set function_trace_op to */ 95 static struct ftrace_ops *set_function_trace_op; 96 97 static bool ftrace_pids_enabled(struct ftrace_ops *ops) 98 { 99 struct trace_array *tr; 100 101 if (!(ops->flags & FTRACE_OPS_FL_PID) || !ops->private) 102 return false; 103 104 tr = ops->private; 105 106 return tr->function_pids != NULL; 107 } 108 109 static void ftrace_update_trampoline(struct ftrace_ops *ops); 110 111 /* 112 * ftrace_disabled is set when an anomaly is discovered. 113 * ftrace_disabled is much stronger than ftrace_enabled. 114 */ 115 static int ftrace_disabled __read_mostly; 116 117 DEFINE_MUTEX(ftrace_lock); 118 119 struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end; 120 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; 121 struct ftrace_ops global_ops; 122 123 #if ARCH_SUPPORTS_FTRACE_OPS 124 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, 125 struct ftrace_ops *op, struct pt_regs *regs); 126 #else 127 /* See comment below, where ftrace_ops_list_func is defined */ 128 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip); 129 #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops) 130 #endif 131 132 static inline void ftrace_ops_init(struct ftrace_ops *ops) 133 { 134 #ifdef CONFIG_DYNAMIC_FTRACE 135 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) { 136 mutex_init(&ops->local_hash.regex_lock); 137 ops->func_hash = &ops->local_hash; 138 ops->flags |= FTRACE_OPS_FL_INITIALIZED; 139 } 140 #endif 141 } 142 143 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip, 144 struct ftrace_ops *op, struct pt_regs *regs) 145 { 146 struct trace_array *tr = op->private; 147 148 if (tr && this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid)) 149 return; 150 151 op->saved_func(ip, parent_ip, op, regs); 152 } 153 154 static void ftrace_sync(struct work_struct *work) 155 { 156 /* 157 * This function is just a stub to implement a hard force 158 * of synchronize_rcu(). This requires synchronizing 159 * tasks even in userspace and idle. 160 * 161 * Yes, function tracing is rude. 162 */ 163 } 164 165 static void ftrace_sync_ipi(void *data) 166 { 167 /* Probably not needed, but do it anyway */ 168 smp_rmb(); 169 } 170 171 static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops) 172 { 173 /* 174 * If this is a dynamic, RCU, or per CPU ops, or we force list func, 175 * then it needs to call the list anyway. 176 */ 177 if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) || 178 FTRACE_FORCE_LIST_FUNC) 179 return ftrace_ops_list_func; 180 181 return ftrace_ops_get_func(ops); 182 } 183 184 static void update_ftrace_function(void) 185 { 186 ftrace_func_t func; 187 188 /* 189 * Prepare the ftrace_ops that the arch callback will use. 190 * If there's only one ftrace_ops registered, the ftrace_ops_list 191 * will point to the ops we want. 192 */ 193 set_function_trace_op = rcu_dereference_protected(ftrace_ops_list, 194 lockdep_is_held(&ftrace_lock)); 195 196 /* If there's no ftrace_ops registered, just call the stub function */ 197 if (set_function_trace_op == &ftrace_list_end) { 198 func = ftrace_stub; 199 200 /* 201 * If we are at the end of the list and this ops is 202 * recursion safe and not dynamic and the arch supports passing ops, 203 * then have the mcount trampoline call the function directly. 204 */ 205 } else if (rcu_dereference_protected(ftrace_ops_list->next, 206 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) { 207 func = ftrace_ops_get_list_func(ftrace_ops_list); 208 209 } else { 210 /* Just use the default ftrace_ops */ 211 set_function_trace_op = &ftrace_list_end; 212 func = ftrace_ops_list_func; 213 } 214 215 update_function_graph_func(); 216 217 /* If there's no change, then do nothing more here */ 218 if (ftrace_trace_function == func) 219 return; 220 221 /* 222 * If we are using the list function, it doesn't care 223 * about the function_trace_ops. 224 */ 225 if (func == ftrace_ops_list_func) { 226 ftrace_trace_function = func; 227 /* 228 * Don't even bother setting function_trace_ops, 229 * it would be racy to do so anyway. 230 */ 231 return; 232 } 233 234 #ifndef CONFIG_DYNAMIC_FTRACE 235 /* 236 * For static tracing, we need to be a bit more careful. 237 * The function change takes affect immediately. Thus, 238 * we need to coorditate the setting of the function_trace_ops 239 * with the setting of the ftrace_trace_function. 240 * 241 * Set the function to the list ops, which will call the 242 * function we want, albeit indirectly, but it handles the 243 * ftrace_ops and doesn't depend on function_trace_op. 244 */ 245 ftrace_trace_function = ftrace_ops_list_func; 246 /* 247 * Make sure all CPUs see this. Yes this is slow, but static 248 * tracing is slow and nasty to have enabled. 249 */ 250 schedule_on_each_cpu(ftrace_sync); 251 /* Now all cpus are using the list ops. */ 252 function_trace_op = set_function_trace_op; 253 /* Make sure the function_trace_op is visible on all CPUs */ 254 smp_wmb(); 255 /* Nasty way to force a rmb on all cpus */ 256 smp_call_function(ftrace_sync_ipi, NULL, 1); 257 /* OK, we are all set to update the ftrace_trace_function now! */ 258 #endif /* !CONFIG_DYNAMIC_FTRACE */ 259 260 ftrace_trace_function = func; 261 } 262 263 static void add_ftrace_ops(struct ftrace_ops __rcu **list, 264 struct ftrace_ops *ops) 265 { 266 rcu_assign_pointer(ops->next, *list); 267 268 /* 269 * We are entering ops into the list but another 270 * CPU might be walking that list. We need to make sure 271 * the ops->next pointer is valid before another CPU sees 272 * the ops pointer included into the list. 273 */ 274 rcu_assign_pointer(*list, ops); 275 } 276 277 static int remove_ftrace_ops(struct ftrace_ops __rcu **list, 278 struct ftrace_ops *ops) 279 { 280 struct ftrace_ops **p; 281 282 /* 283 * If we are removing the last function, then simply point 284 * to the ftrace_stub. 285 */ 286 if (rcu_dereference_protected(*list, 287 lockdep_is_held(&ftrace_lock)) == ops && 288 rcu_dereference_protected(ops->next, 289 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) { 290 *list = &ftrace_list_end; 291 return 0; 292 } 293 294 for (p = list; *p != &ftrace_list_end; p = &(*p)->next) 295 if (*p == ops) 296 break; 297 298 if (*p != ops) 299 return -1; 300 301 *p = (*p)->next; 302 return 0; 303 } 304 305 static void ftrace_update_trampoline(struct ftrace_ops *ops); 306 307 int __register_ftrace_function(struct ftrace_ops *ops) 308 { 309 if (ops->flags & FTRACE_OPS_FL_DELETED) 310 return -EINVAL; 311 312 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED)) 313 return -EBUSY; 314 315 #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS 316 /* 317 * If the ftrace_ops specifies SAVE_REGS, then it only can be used 318 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set. 319 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant. 320 */ 321 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS && 322 !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)) 323 return -EINVAL; 324 325 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED) 326 ops->flags |= FTRACE_OPS_FL_SAVE_REGS; 327 #endif 328 329 if (!core_kernel_data((unsigned long)ops)) 330 ops->flags |= FTRACE_OPS_FL_DYNAMIC; 331 332 add_ftrace_ops(&ftrace_ops_list, ops); 333 334 /* Always save the function, and reset at unregistering */ 335 ops->saved_func = ops->func; 336 337 if (ftrace_pids_enabled(ops)) 338 ops->func = ftrace_pid_func; 339 340 ftrace_update_trampoline(ops); 341 342 if (ftrace_enabled) 343 update_ftrace_function(); 344 345 return 0; 346 } 347 348 int __unregister_ftrace_function(struct ftrace_ops *ops) 349 { 350 int ret; 351 352 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED))) 353 return -EBUSY; 354 355 ret = remove_ftrace_ops(&ftrace_ops_list, ops); 356 357 if (ret < 0) 358 return ret; 359 360 if (ftrace_enabled) 361 update_ftrace_function(); 362 363 ops->func = ops->saved_func; 364 365 return 0; 366 } 367 368 static void ftrace_update_pid_func(void) 369 { 370 struct ftrace_ops *op; 371 372 /* Only do something if we are tracing something */ 373 if (ftrace_trace_function == ftrace_stub) 374 return; 375 376 do_for_each_ftrace_op(op, ftrace_ops_list) { 377 if (op->flags & FTRACE_OPS_FL_PID) { 378 op->func = ftrace_pids_enabled(op) ? 379 ftrace_pid_func : op->saved_func; 380 ftrace_update_trampoline(op); 381 } 382 } while_for_each_ftrace_op(op); 383 384 update_ftrace_function(); 385 } 386 387 #ifdef CONFIG_FUNCTION_PROFILER 388 struct ftrace_profile { 389 struct hlist_node node; 390 unsigned long ip; 391 unsigned long counter; 392 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 393 unsigned long long time; 394 unsigned long long time_squared; 395 #endif 396 }; 397 398 struct ftrace_profile_page { 399 struct ftrace_profile_page *next; 400 unsigned long index; 401 struct ftrace_profile records[]; 402 }; 403 404 struct ftrace_profile_stat { 405 atomic_t disabled; 406 struct hlist_head *hash; 407 struct ftrace_profile_page *pages; 408 struct ftrace_profile_page *start; 409 struct tracer_stat stat; 410 }; 411 412 #define PROFILE_RECORDS_SIZE \ 413 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records)) 414 415 #define PROFILES_PER_PAGE \ 416 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile)) 417 418 static int ftrace_profile_enabled __read_mostly; 419 420 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */ 421 static DEFINE_MUTEX(ftrace_profile_lock); 422 423 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats); 424 425 #define FTRACE_PROFILE_HASH_BITS 10 426 #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS) 427 428 static void * 429 function_stat_next(void *v, int idx) 430 { 431 struct ftrace_profile *rec = v; 432 struct ftrace_profile_page *pg; 433 434 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK); 435 436 again: 437 if (idx != 0) 438 rec++; 439 440 if ((void *)rec >= (void *)&pg->records[pg->index]) { 441 pg = pg->next; 442 if (!pg) 443 return NULL; 444 rec = &pg->records[0]; 445 if (!rec->counter) 446 goto again; 447 } 448 449 return rec; 450 } 451 452 static void *function_stat_start(struct tracer_stat *trace) 453 { 454 struct ftrace_profile_stat *stat = 455 container_of(trace, struct ftrace_profile_stat, stat); 456 457 if (!stat || !stat->start) 458 return NULL; 459 460 return function_stat_next(&stat->start->records[0], 0); 461 } 462 463 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 464 /* function graph compares on total time */ 465 static int function_stat_cmp(void *p1, void *p2) 466 { 467 struct ftrace_profile *a = p1; 468 struct ftrace_profile *b = p2; 469 470 if (a->time < b->time) 471 return -1; 472 if (a->time > b->time) 473 return 1; 474 else 475 return 0; 476 } 477 #else 478 /* not function graph compares against hits */ 479 static int function_stat_cmp(void *p1, void *p2) 480 { 481 struct ftrace_profile *a = p1; 482 struct ftrace_profile *b = p2; 483 484 if (a->counter < b->counter) 485 return -1; 486 if (a->counter > b->counter) 487 return 1; 488 else 489 return 0; 490 } 491 #endif 492 493 static int function_stat_headers(struct seq_file *m) 494 { 495 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 496 seq_puts(m, " Function " 497 "Hit Time Avg s^2\n" 498 " -------- " 499 "--- ---- --- ---\n"); 500 #else 501 seq_puts(m, " Function Hit\n" 502 " -------- ---\n"); 503 #endif 504 return 0; 505 } 506 507 static int function_stat_show(struct seq_file *m, void *v) 508 { 509 struct ftrace_profile *rec = v; 510 char str[KSYM_SYMBOL_LEN]; 511 int ret = 0; 512 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 513 static struct trace_seq s; 514 unsigned long long avg; 515 unsigned long long stddev; 516 #endif 517 mutex_lock(&ftrace_profile_lock); 518 519 /* we raced with function_profile_reset() */ 520 if (unlikely(rec->counter == 0)) { 521 ret = -EBUSY; 522 goto out; 523 } 524 525 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 526 avg = rec->time; 527 do_div(avg, rec->counter); 528 if (tracing_thresh && (avg < tracing_thresh)) 529 goto out; 530 #endif 531 532 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); 533 seq_printf(m, " %-30.30s %10lu", str, rec->counter); 534 535 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 536 seq_puts(m, " "); 537 538 /* Sample standard deviation (s^2) */ 539 if (rec->counter <= 1) 540 stddev = 0; 541 else { 542 /* 543 * Apply Welford's method: 544 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2) 545 */ 546 stddev = rec->counter * rec->time_squared - 547 rec->time * rec->time; 548 549 /* 550 * Divide only 1000 for ns^2 -> us^2 conversion. 551 * trace_print_graph_duration will divide 1000 again. 552 */ 553 do_div(stddev, rec->counter * (rec->counter - 1) * 1000); 554 } 555 556 trace_seq_init(&s); 557 trace_print_graph_duration(rec->time, &s); 558 trace_seq_puts(&s, " "); 559 trace_print_graph_duration(avg, &s); 560 trace_seq_puts(&s, " "); 561 trace_print_graph_duration(stddev, &s); 562 trace_print_seq(m, &s); 563 #endif 564 seq_putc(m, '\n'); 565 out: 566 mutex_unlock(&ftrace_profile_lock); 567 568 return ret; 569 } 570 571 static void ftrace_profile_reset(struct ftrace_profile_stat *stat) 572 { 573 struct ftrace_profile_page *pg; 574 575 pg = stat->pages = stat->start; 576 577 while (pg) { 578 memset(pg->records, 0, PROFILE_RECORDS_SIZE); 579 pg->index = 0; 580 pg = pg->next; 581 } 582 583 memset(stat->hash, 0, 584 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head)); 585 } 586 587 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat) 588 { 589 struct ftrace_profile_page *pg; 590 int functions; 591 int pages; 592 int i; 593 594 /* If we already allocated, do nothing */ 595 if (stat->pages) 596 return 0; 597 598 stat->pages = (void *)get_zeroed_page(GFP_KERNEL); 599 if (!stat->pages) 600 return -ENOMEM; 601 602 #ifdef CONFIG_DYNAMIC_FTRACE 603 functions = ftrace_update_tot_cnt; 604 #else 605 /* 606 * We do not know the number of functions that exist because 607 * dynamic tracing is what counts them. With past experience 608 * we have around 20K functions. That should be more than enough. 609 * It is highly unlikely we will execute every function in 610 * the kernel. 611 */ 612 functions = 20000; 613 #endif 614 615 pg = stat->start = stat->pages; 616 617 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE); 618 619 for (i = 1; i < pages; i++) { 620 pg->next = (void *)get_zeroed_page(GFP_KERNEL); 621 if (!pg->next) 622 goto out_free; 623 pg = pg->next; 624 } 625 626 return 0; 627 628 out_free: 629 pg = stat->start; 630 while (pg) { 631 unsigned long tmp = (unsigned long)pg; 632 633 pg = pg->next; 634 free_page(tmp); 635 } 636 637 stat->pages = NULL; 638 stat->start = NULL; 639 640 return -ENOMEM; 641 } 642 643 static int ftrace_profile_init_cpu(int cpu) 644 { 645 struct ftrace_profile_stat *stat; 646 int size; 647 648 stat = &per_cpu(ftrace_profile_stats, cpu); 649 650 if (stat->hash) { 651 /* If the profile is already created, simply reset it */ 652 ftrace_profile_reset(stat); 653 return 0; 654 } 655 656 /* 657 * We are profiling all functions, but usually only a few thousand 658 * functions are hit. We'll make a hash of 1024 items. 659 */ 660 size = FTRACE_PROFILE_HASH_SIZE; 661 662 stat->hash = kcalloc(size, sizeof(struct hlist_head), GFP_KERNEL); 663 664 if (!stat->hash) 665 return -ENOMEM; 666 667 /* Preallocate the function profiling pages */ 668 if (ftrace_profile_pages_init(stat) < 0) { 669 kfree(stat->hash); 670 stat->hash = NULL; 671 return -ENOMEM; 672 } 673 674 return 0; 675 } 676 677 static int ftrace_profile_init(void) 678 { 679 int cpu; 680 int ret = 0; 681 682 for_each_possible_cpu(cpu) { 683 ret = ftrace_profile_init_cpu(cpu); 684 if (ret) 685 break; 686 } 687 688 return ret; 689 } 690 691 /* interrupts must be disabled */ 692 static struct ftrace_profile * 693 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip) 694 { 695 struct ftrace_profile *rec; 696 struct hlist_head *hhd; 697 unsigned long key; 698 699 key = hash_long(ip, FTRACE_PROFILE_HASH_BITS); 700 hhd = &stat->hash[key]; 701 702 if (hlist_empty(hhd)) 703 return NULL; 704 705 hlist_for_each_entry_rcu_notrace(rec, hhd, node) { 706 if (rec->ip == ip) 707 return rec; 708 } 709 710 return NULL; 711 } 712 713 static void ftrace_add_profile(struct ftrace_profile_stat *stat, 714 struct ftrace_profile *rec) 715 { 716 unsigned long key; 717 718 key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS); 719 hlist_add_head_rcu(&rec->node, &stat->hash[key]); 720 } 721 722 /* 723 * The memory is already allocated, this simply finds a new record to use. 724 */ 725 static struct ftrace_profile * 726 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip) 727 { 728 struct ftrace_profile *rec = NULL; 729 730 /* prevent recursion (from NMIs) */ 731 if (atomic_inc_return(&stat->disabled) != 1) 732 goto out; 733 734 /* 735 * Try to find the function again since an NMI 736 * could have added it 737 */ 738 rec = ftrace_find_profiled_func(stat, ip); 739 if (rec) 740 goto out; 741 742 if (stat->pages->index == PROFILES_PER_PAGE) { 743 if (!stat->pages->next) 744 goto out; 745 stat->pages = stat->pages->next; 746 } 747 748 rec = &stat->pages->records[stat->pages->index++]; 749 rec->ip = ip; 750 ftrace_add_profile(stat, rec); 751 752 out: 753 atomic_dec(&stat->disabled); 754 755 return rec; 756 } 757 758 static void 759 function_profile_call(unsigned long ip, unsigned long parent_ip, 760 struct ftrace_ops *ops, struct pt_regs *regs) 761 { 762 struct ftrace_profile_stat *stat; 763 struct ftrace_profile *rec; 764 unsigned long flags; 765 766 if (!ftrace_profile_enabled) 767 return; 768 769 local_irq_save(flags); 770 771 stat = this_cpu_ptr(&ftrace_profile_stats); 772 if (!stat->hash || !ftrace_profile_enabled) 773 goto out; 774 775 rec = ftrace_find_profiled_func(stat, ip); 776 if (!rec) { 777 rec = ftrace_profile_alloc(stat, ip); 778 if (!rec) 779 goto out; 780 } 781 782 rec->counter++; 783 out: 784 local_irq_restore(flags); 785 } 786 787 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 788 static bool fgraph_graph_time = true; 789 790 void ftrace_graph_graph_time_control(bool enable) 791 { 792 fgraph_graph_time = enable; 793 } 794 795 static int profile_graph_entry(struct ftrace_graph_ent *trace) 796 { 797 struct ftrace_ret_stack *ret_stack; 798 799 function_profile_call(trace->func, 0, NULL, NULL); 800 801 /* If function graph is shutting down, ret_stack can be NULL */ 802 if (!current->ret_stack) 803 return 0; 804 805 ret_stack = ftrace_graph_get_ret_stack(current, 0); 806 if (ret_stack) 807 ret_stack->subtime = 0; 808 809 return 1; 810 } 811 812 static void profile_graph_return(struct ftrace_graph_ret *trace) 813 { 814 struct ftrace_ret_stack *ret_stack; 815 struct ftrace_profile_stat *stat; 816 unsigned long long calltime; 817 struct ftrace_profile *rec; 818 unsigned long flags; 819 820 local_irq_save(flags); 821 stat = this_cpu_ptr(&ftrace_profile_stats); 822 if (!stat->hash || !ftrace_profile_enabled) 823 goto out; 824 825 /* If the calltime was zero'd ignore it */ 826 if (!trace->calltime) 827 goto out; 828 829 calltime = trace->rettime - trace->calltime; 830 831 if (!fgraph_graph_time) { 832 833 /* Append this call time to the parent time to subtract */ 834 ret_stack = ftrace_graph_get_ret_stack(current, 1); 835 if (ret_stack) 836 ret_stack->subtime += calltime; 837 838 ret_stack = ftrace_graph_get_ret_stack(current, 0); 839 if (ret_stack && ret_stack->subtime < calltime) 840 calltime -= ret_stack->subtime; 841 else 842 calltime = 0; 843 } 844 845 rec = ftrace_find_profiled_func(stat, trace->func); 846 if (rec) { 847 rec->time += calltime; 848 rec->time_squared += calltime * calltime; 849 } 850 851 out: 852 local_irq_restore(flags); 853 } 854 855 static struct fgraph_ops fprofiler_ops = { 856 .entryfunc = &profile_graph_entry, 857 .retfunc = &profile_graph_return, 858 }; 859 860 static int register_ftrace_profiler(void) 861 { 862 return register_ftrace_graph(&fprofiler_ops); 863 } 864 865 static void unregister_ftrace_profiler(void) 866 { 867 unregister_ftrace_graph(&fprofiler_ops); 868 } 869 #else 870 static struct ftrace_ops ftrace_profile_ops __read_mostly = { 871 .func = function_profile_call, 872 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, 873 INIT_OPS_HASH(ftrace_profile_ops) 874 }; 875 876 static int register_ftrace_profiler(void) 877 { 878 return register_ftrace_function(&ftrace_profile_ops); 879 } 880 881 static void unregister_ftrace_profiler(void) 882 { 883 unregister_ftrace_function(&ftrace_profile_ops); 884 } 885 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 886 887 static ssize_t 888 ftrace_profile_write(struct file *filp, const char __user *ubuf, 889 size_t cnt, loff_t *ppos) 890 { 891 unsigned long val; 892 int ret; 893 894 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 895 if (ret) 896 return ret; 897 898 val = !!val; 899 900 mutex_lock(&ftrace_profile_lock); 901 if (ftrace_profile_enabled ^ val) { 902 if (val) { 903 ret = ftrace_profile_init(); 904 if (ret < 0) { 905 cnt = ret; 906 goto out; 907 } 908 909 ret = register_ftrace_profiler(); 910 if (ret < 0) { 911 cnt = ret; 912 goto out; 913 } 914 ftrace_profile_enabled = 1; 915 } else { 916 ftrace_profile_enabled = 0; 917 /* 918 * unregister_ftrace_profiler calls stop_machine 919 * so this acts like an synchronize_rcu. 920 */ 921 unregister_ftrace_profiler(); 922 } 923 } 924 out: 925 mutex_unlock(&ftrace_profile_lock); 926 927 *ppos += cnt; 928 929 return cnt; 930 } 931 932 static ssize_t 933 ftrace_profile_read(struct file *filp, char __user *ubuf, 934 size_t cnt, loff_t *ppos) 935 { 936 char buf[64]; /* big enough to hold a number */ 937 int r; 938 939 r = sprintf(buf, "%u\n", ftrace_profile_enabled); 940 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 941 } 942 943 static const struct file_operations ftrace_profile_fops = { 944 .open = tracing_open_generic, 945 .read = ftrace_profile_read, 946 .write = ftrace_profile_write, 947 .llseek = default_llseek, 948 }; 949 950 /* used to initialize the real stat files */ 951 static struct tracer_stat function_stats __initdata = { 952 .name = "functions", 953 .stat_start = function_stat_start, 954 .stat_next = function_stat_next, 955 .stat_cmp = function_stat_cmp, 956 .stat_headers = function_stat_headers, 957 .stat_show = function_stat_show 958 }; 959 960 static __init void ftrace_profile_tracefs(struct dentry *d_tracer) 961 { 962 struct ftrace_profile_stat *stat; 963 struct dentry *entry; 964 char *name; 965 int ret; 966 int cpu; 967 968 for_each_possible_cpu(cpu) { 969 stat = &per_cpu(ftrace_profile_stats, cpu); 970 971 name = kasprintf(GFP_KERNEL, "function%d", cpu); 972 if (!name) { 973 /* 974 * The files created are permanent, if something happens 975 * we still do not free memory. 976 */ 977 WARN(1, 978 "Could not allocate stat file for cpu %d\n", 979 cpu); 980 return; 981 } 982 stat->stat = function_stats; 983 stat->stat.name = name; 984 ret = register_stat_tracer(&stat->stat); 985 if (ret) { 986 WARN(1, 987 "Could not register function stat for cpu %d\n", 988 cpu); 989 kfree(name); 990 return; 991 } 992 } 993 994 entry = tracefs_create_file("function_profile_enabled", 0644, 995 d_tracer, NULL, &ftrace_profile_fops); 996 if (!entry) 997 pr_warn("Could not create tracefs 'function_profile_enabled' entry\n"); 998 } 999 1000 #else /* CONFIG_FUNCTION_PROFILER */ 1001 static __init void ftrace_profile_tracefs(struct dentry *d_tracer) 1002 { 1003 } 1004 #endif /* CONFIG_FUNCTION_PROFILER */ 1005 1006 #ifdef CONFIG_DYNAMIC_FTRACE 1007 1008 static struct ftrace_ops *removed_ops; 1009 1010 /* 1011 * Set when doing a global update, like enabling all recs or disabling them. 1012 * It is not set when just updating a single ftrace_ops. 1013 */ 1014 static bool update_all_ops; 1015 1016 #ifndef CONFIG_FTRACE_MCOUNT_RECORD 1017 # error Dynamic ftrace depends on MCOUNT_RECORD 1018 #endif 1019 1020 struct ftrace_func_entry { 1021 struct hlist_node hlist; 1022 unsigned long ip; 1023 }; 1024 1025 struct ftrace_func_probe { 1026 struct ftrace_probe_ops *probe_ops; 1027 struct ftrace_ops ops; 1028 struct trace_array *tr; 1029 struct list_head list; 1030 void *data; 1031 int ref; 1032 }; 1033 1034 /* 1035 * We make these constant because no one should touch them, 1036 * but they are used as the default "empty hash", to avoid allocating 1037 * it all the time. These are in a read only section such that if 1038 * anyone does try to modify it, it will cause an exception. 1039 */ 1040 static const struct hlist_head empty_buckets[1]; 1041 static const struct ftrace_hash empty_hash = { 1042 .buckets = (struct hlist_head *)empty_buckets, 1043 }; 1044 #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash) 1045 1046 struct ftrace_ops global_ops = { 1047 .func = ftrace_stub, 1048 .local_hash.notrace_hash = EMPTY_HASH, 1049 .local_hash.filter_hash = EMPTY_HASH, 1050 INIT_OPS_HASH(global_ops) 1051 .flags = FTRACE_OPS_FL_RECURSION_SAFE | 1052 FTRACE_OPS_FL_INITIALIZED | 1053 FTRACE_OPS_FL_PID, 1054 }; 1055 1056 /* 1057 * Used by the stack undwinder to know about dynamic ftrace trampolines. 1058 */ 1059 struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr) 1060 { 1061 struct ftrace_ops *op = NULL; 1062 1063 /* 1064 * Some of the ops may be dynamically allocated, 1065 * they are freed after a synchronize_rcu(). 1066 */ 1067 preempt_disable_notrace(); 1068 1069 do_for_each_ftrace_op(op, ftrace_ops_list) { 1070 /* 1071 * This is to check for dynamically allocated trampolines. 1072 * Trampolines that are in kernel text will have 1073 * core_kernel_text() return true. 1074 */ 1075 if (op->trampoline && op->trampoline_size) 1076 if (addr >= op->trampoline && 1077 addr < op->trampoline + op->trampoline_size) { 1078 preempt_enable_notrace(); 1079 return op; 1080 } 1081 } while_for_each_ftrace_op(op); 1082 preempt_enable_notrace(); 1083 1084 return NULL; 1085 } 1086 1087 /* 1088 * This is used by __kernel_text_address() to return true if the 1089 * address is on a dynamically allocated trampoline that would 1090 * not return true for either core_kernel_text() or 1091 * is_module_text_address(). 1092 */ 1093 bool is_ftrace_trampoline(unsigned long addr) 1094 { 1095 return ftrace_ops_trampoline(addr) != NULL; 1096 } 1097 1098 struct ftrace_page { 1099 struct ftrace_page *next; 1100 struct dyn_ftrace *records; 1101 int index; 1102 int size; 1103 }; 1104 1105 #define ENTRY_SIZE sizeof(struct dyn_ftrace) 1106 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE) 1107 1108 /* estimate from running different kernels */ 1109 #define NR_TO_INIT 10000 1110 1111 static struct ftrace_page *ftrace_pages_start; 1112 static struct ftrace_page *ftrace_pages; 1113 1114 static __always_inline unsigned long 1115 ftrace_hash_key(struct ftrace_hash *hash, unsigned long ip) 1116 { 1117 if (hash->size_bits > 0) 1118 return hash_long(ip, hash->size_bits); 1119 1120 return 0; 1121 } 1122 1123 /* Only use this function if ftrace_hash_empty() has already been tested */ 1124 static __always_inline struct ftrace_func_entry * 1125 __ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) 1126 { 1127 unsigned long key; 1128 struct ftrace_func_entry *entry; 1129 struct hlist_head *hhd; 1130 1131 key = ftrace_hash_key(hash, ip); 1132 hhd = &hash->buckets[key]; 1133 1134 hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) { 1135 if (entry->ip == ip) 1136 return entry; 1137 } 1138 return NULL; 1139 } 1140 1141 /** 1142 * ftrace_lookup_ip - Test to see if an ip exists in an ftrace_hash 1143 * @hash: The hash to look at 1144 * @ip: The instruction pointer to test 1145 * 1146 * Search a given @hash to see if a given instruction pointer (@ip) 1147 * exists in it. 1148 * 1149 * Returns the entry that holds the @ip if found. NULL otherwise. 1150 */ 1151 struct ftrace_func_entry * 1152 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) 1153 { 1154 if (ftrace_hash_empty(hash)) 1155 return NULL; 1156 1157 return __ftrace_lookup_ip(hash, ip); 1158 } 1159 1160 static void __add_hash_entry(struct ftrace_hash *hash, 1161 struct ftrace_func_entry *entry) 1162 { 1163 struct hlist_head *hhd; 1164 unsigned long key; 1165 1166 key = ftrace_hash_key(hash, entry->ip); 1167 hhd = &hash->buckets[key]; 1168 hlist_add_head(&entry->hlist, hhd); 1169 hash->count++; 1170 } 1171 1172 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip) 1173 { 1174 struct ftrace_func_entry *entry; 1175 1176 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 1177 if (!entry) 1178 return -ENOMEM; 1179 1180 entry->ip = ip; 1181 __add_hash_entry(hash, entry); 1182 1183 return 0; 1184 } 1185 1186 static void 1187 free_hash_entry(struct ftrace_hash *hash, 1188 struct ftrace_func_entry *entry) 1189 { 1190 hlist_del(&entry->hlist); 1191 kfree(entry); 1192 hash->count--; 1193 } 1194 1195 static void 1196 remove_hash_entry(struct ftrace_hash *hash, 1197 struct ftrace_func_entry *entry) 1198 { 1199 hlist_del_rcu(&entry->hlist); 1200 hash->count--; 1201 } 1202 1203 static void ftrace_hash_clear(struct ftrace_hash *hash) 1204 { 1205 struct hlist_head *hhd; 1206 struct hlist_node *tn; 1207 struct ftrace_func_entry *entry; 1208 int size = 1 << hash->size_bits; 1209 int i; 1210 1211 if (!hash->count) 1212 return; 1213 1214 for (i = 0; i < size; i++) { 1215 hhd = &hash->buckets[i]; 1216 hlist_for_each_entry_safe(entry, tn, hhd, hlist) 1217 free_hash_entry(hash, entry); 1218 } 1219 FTRACE_WARN_ON(hash->count); 1220 } 1221 1222 static void free_ftrace_mod(struct ftrace_mod_load *ftrace_mod) 1223 { 1224 list_del(&ftrace_mod->list); 1225 kfree(ftrace_mod->module); 1226 kfree(ftrace_mod->func); 1227 kfree(ftrace_mod); 1228 } 1229 1230 static void clear_ftrace_mod_list(struct list_head *head) 1231 { 1232 struct ftrace_mod_load *p, *n; 1233 1234 /* stack tracer isn't supported yet */ 1235 if (!head) 1236 return; 1237 1238 mutex_lock(&ftrace_lock); 1239 list_for_each_entry_safe(p, n, head, list) 1240 free_ftrace_mod(p); 1241 mutex_unlock(&ftrace_lock); 1242 } 1243 1244 static void free_ftrace_hash(struct ftrace_hash *hash) 1245 { 1246 if (!hash || hash == EMPTY_HASH) 1247 return; 1248 ftrace_hash_clear(hash); 1249 kfree(hash->buckets); 1250 kfree(hash); 1251 } 1252 1253 static void __free_ftrace_hash_rcu(struct rcu_head *rcu) 1254 { 1255 struct ftrace_hash *hash; 1256 1257 hash = container_of(rcu, struct ftrace_hash, rcu); 1258 free_ftrace_hash(hash); 1259 } 1260 1261 static void free_ftrace_hash_rcu(struct ftrace_hash *hash) 1262 { 1263 if (!hash || hash == EMPTY_HASH) 1264 return; 1265 call_rcu(&hash->rcu, __free_ftrace_hash_rcu); 1266 } 1267 1268 void ftrace_free_filter(struct ftrace_ops *ops) 1269 { 1270 ftrace_ops_init(ops); 1271 free_ftrace_hash(ops->func_hash->filter_hash); 1272 free_ftrace_hash(ops->func_hash->notrace_hash); 1273 } 1274 1275 static struct ftrace_hash *alloc_ftrace_hash(int size_bits) 1276 { 1277 struct ftrace_hash *hash; 1278 int size; 1279 1280 hash = kzalloc(sizeof(*hash), GFP_KERNEL); 1281 if (!hash) 1282 return NULL; 1283 1284 size = 1 << size_bits; 1285 hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL); 1286 1287 if (!hash->buckets) { 1288 kfree(hash); 1289 return NULL; 1290 } 1291 1292 hash->size_bits = size_bits; 1293 1294 return hash; 1295 } 1296 1297 1298 static int ftrace_add_mod(struct trace_array *tr, 1299 const char *func, const char *module, 1300 int enable) 1301 { 1302 struct ftrace_mod_load *ftrace_mod; 1303 struct list_head *mod_head = enable ? &tr->mod_trace : &tr->mod_notrace; 1304 1305 ftrace_mod = kzalloc(sizeof(*ftrace_mod), GFP_KERNEL); 1306 if (!ftrace_mod) 1307 return -ENOMEM; 1308 1309 ftrace_mod->func = kstrdup(func, GFP_KERNEL); 1310 ftrace_mod->module = kstrdup(module, GFP_KERNEL); 1311 ftrace_mod->enable = enable; 1312 1313 if (!ftrace_mod->func || !ftrace_mod->module) 1314 goto out_free; 1315 1316 list_add(&ftrace_mod->list, mod_head); 1317 1318 return 0; 1319 1320 out_free: 1321 free_ftrace_mod(ftrace_mod); 1322 1323 return -ENOMEM; 1324 } 1325 1326 static struct ftrace_hash * 1327 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash) 1328 { 1329 struct ftrace_func_entry *entry; 1330 struct ftrace_hash *new_hash; 1331 int size; 1332 int ret; 1333 int i; 1334 1335 new_hash = alloc_ftrace_hash(size_bits); 1336 if (!new_hash) 1337 return NULL; 1338 1339 if (hash) 1340 new_hash->flags = hash->flags; 1341 1342 /* Empty hash? */ 1343 if (ftrace_hash_empty(hash)) 1344 return new_hash; 1345 1346 size = 1 << hash->size_bits; 1347 for (i = 0; i < size; i++) { 1348 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { 1349 ret = add_hash_entry(new_hash, entry->ip); 1350 if (ret < 0) 1351 goto free_hash; 1352 } 1353 } 1354 1355 FTRACE_WARN_ON(new_hash->count != hash->count); 1356 1357 return new_hash; 1358 1359 free_hash: 1360 free_ftrace_hash(new_hash); 1361 return NULL; 1362 } 1363 1364 static void 1365 ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash); 1366 static void 1367 ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash); 1368 1369 static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops, 1370 struct ftrace_hash *new_hash); 1371 1372 static struct ftrace_hash * 1373 __ftrace_hash_move(struct ftrace_hash *src) 1374 { 1375 struct ftrace_func_entry *entry; 1376 struct hlist_node *tn; 1377 struct hlist_head *hhd; 1378 struct ftrace_hash *new_hash; 1379 int size = src->count; 1380 int bits = 0; 1381 int i; 1382 1383 /* 1384 * If the new source is empty, just return the empty_hash. 1385 */ 1386 if (ftrace_hash_empty(src)) 1387 return EMPTY_HASH; 1388 1389 /* 1390 * Make the hash size about 1/2 the # found 1391 */ 1392 for (size /= 2; size; size >>= 1) 1393 bits++; 1394 1395 /* Don't allocate too much */ 1396 if (bits > FTRACE_HASH_MAX_BITS) 1397 bits = FTRACE_HASH_MAX_BITS; 1398 1399 new_hash = alloc_ftrace_hash(bits); 1400 if (!new_hash) 1401 return NULL; 1402 1403 new_hash->flags = src->flags; 1404 1405 size = 1 << src->size_bits; 1406 for (i = 0; i < size; i++) { 1407 hhd = &src->buckets[i]; 1408 hlist_for_each_entry_safe(entry, tn, hhd, hlist) { 1409 remove_hash_entry(src, entry); 1410 __add_hash_entry(new_hash, entry); 1411 } 1412 } 1413 1414 return new_hash; 1415 } 1416 1417 static int 1418 ftrace_hash_move(struct ftrace_ops *ops, int enable, 1419 struct ftrace_hash **dst, struct ftrace_hash *src) 1420 { 1421 struct ftrace_hash *new_hash; 1422 int ret; 1423 1424 /* Reject setting notrace hash on IPMODIFY ftrace_ops */ 1425 if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable) 1426 return -EINVAL; 1427 1428 new_hash = __ftrace_hash_move(src); 1429 if (!new_hash) 1430 return -ENOMEM; 1431 1432 /* Make sure this can be applied if it is IPMODIFY ftrace_ops */ 1433 if (enable) { 1434 /* IPMODIFY should be updated only when filter_hash updating */ 1435 ret = ftrace_hash_ipmodify_update(ops, new_hash); 1436 if (ret < 0) { 1437 free_ftrace_hash(new_hash); 1438 return ret; 1439 } 1440 } 1441 1442 /* 1443 * Remove the current set, update the hash and add 1444 * them back. 1445 */ 1446 ftrace_hash_rec_disable_modify(ops, enable); 1447 1448 rcu_assign_pointer(*dst, new_hash); 1449 1450 ftrace_hash_rec_enable_modify(ops, enable); 1451 1452 return 0; 1453 } 1454 1455 static bool hash_contains_ip(unsigned long ip, 1456 struct ftrace_ops_hash *hash) 1457 { 1458 /* 1459 * The function record is a match if it exists in the filter 1460 * hash and not in the notrace hash. Note, an emty hash is 1461 * considered a match for the filter hash, but an empty 1462 * notrace hash is considered not in the notrace hash. 1463 */ 1464 return (ftrace_hash_empty(hash->filter_hash) || 1465 __ftrace_lookup_ip(hash->filter_hash, ip)) && 1466 (ftrace_hash_empty(hash->notrace_hash) || 1467 !__ftrace_lookup_ip(hash->notrace_hash, ip)); 1468 } 1469 1470 /* 1471 * Test the hashes for this ops to see if we want to call 1472 * the ops->func or not. 1473 * 1474 * It's a match if the ip is in the ops->filter_hash or 1475 * the filter_hash does not exist or is empty, 1476 * AND 1477 * the ip is not in the ops->notrace_hash. 1478 * 1479 * This needs to be called with preemption disabled as 1480 * the hashes are freed with call_rcu(). 1481 */ 1482 int 1483 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) 1484 { 1485 struct ftrace_ops_hash hash; 1486 int ret; 1487 1488 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 1489 /* 1490 * There's a small race when adding ops that the ftrace handler 1491 * that wants regs, may be called without them. We can not 1492 * allow that handler to be called if regs is NULL. 1493 */ 1494 if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS)) 1495 return 0; 1496 #endif 1497 1498 rcu_assign_pointer(hash.filter_hash, ops->func_hash->filter_hash); 1499 rcu_assign_pointer(hash.notrace_hash, ops->func_hash->notrace_hash); 1500 1501 if (hash_contains_ip(ip, &hash)) 1502 ret = 1; 1503 else 1504 ret = 0; 1505 1506 return ret; 1507 } 1508 1509 /* 1510 * This is a double for. Do not use 'break' to break out of the loop, 1511 * you must use a goto. 1512 */ 1513 #define do_for_each_ftrace_rec(pg, rec) \ 1514 for (pg = ftrace_pages_start; pg; pg = pg->next) { \ 1515 int _____i; \ 1516 for (_____i = 0; _____i < pg->index; _____i++) { \ 1517 rec = &pg->records[_____i]; 1518 1519 #define while_for_each_ftrace_rec() \ 1520 } \ 1521 } 1522 1523 1524 static int ftrace_cmp_recs(const void *a, const void *b) 1525 { 1526 const struct dyn_ftrace *key = a; 1527 const struct dyn_ftrace *rec = b; 1528 1529 if (key->flags < rec->ip) 1530 return -1; 1531 if (key->ip >= rec->ip + MCOUNT_INSN_SIZE) 1532 return 1; 1533 return 0; 1534 } 1535 1536 /** 1537 * ftrace_location_range - return the first address of a traced location 1538 * if it touches the given ip range 1539 * @start: start of range to search. 1540 * @end: end of range to search (inclusive). @end points to the last byte 1541 * to check. 1542 * 1543 * Returns rec->ip if the related ftrace location is a least partly within 1544 * the given address range. That is, the first address of the instruction 1545 * that is either a NOP or call to the function tracer. It checks the ftrace 1546 * internal tables to determine if the address belongs or not. 1547 */ 1548 unsigned long ftrace_location_range(unsigned long start, unsigned long end) 1549 { 1550 struct ftrace_page *pg; 1551 struct dyn_ftrace *rec; 1552 struct dyn_ftrace key; 1553 1554 key.ip = start; 1555 key.flags = end; /* overload flags, as it is unsigned long */ 1556 1557 for (pg = ftrace_pages_start; pg; pg = pg->next) { 1558 if (end < pg->records[0].ip || 1559 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) 1560 continue; 1561 rec = bsearch(&key, pg->records, pg->index, 1562 sizeof(struct dyn_ftrace), 1563 ftrace_cmp_recs); 1564 if (rec) 1565 return rec->ip; 1566 } 1567 1568 return 0; 1569 } 1570 1571 /** 1572 * ftrace_location - return true if the ip giving is a traced location 1573 * @ip: the instruction pointer to check 1574 * 1575 * Returns rec->ip if @ip given is a pointer to a ftrace location. 1576 * That is, the instruction that is either a NOP or call to 1577 * the function tracer. It checks the ftrace internal tables to 1578 * determine if the address belongs or not. 1579 */ 1580 unsigned long ftrace_location(unsigned long ip) 1581 { 1582 return ftrace_location_range(ip, ip); 1583 } 1584 1585 /** 1586 * ftrace_text_reserved - return true if range contains an ftrace location 1587 * @start: start of range to search 1588 * @end: end of range to search (inclusive). @end points to the last byte to check. 1589 * 1590 * Returns 1 if @start and @end contains a ftrace location. 1591 * That is, the instruction that is either a NOP or call to 1592 * the function tracer. It checks the ftrace internal tables to 1593 * determine if the address belongs or not. 1594 */ 1595 int ftrace_text_reserved(const void *start, const void *end) 1596 { 1597 unsigned long ret; 1598 1599 ret = ftrace_location_range((unsigned long)start, 1600 (unsigned long)end); 1601 1602 return (int)!!ret; 1603 } 1604 1605 /* Test if ops registered to this rec needs regs */ 1606 static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec) 1607 { 1608 struct ftrace_ops *ops; 1609 bool keep_regs = false; 1610 1611 for (ops = ftrace_ops_list; 1612 ops != &ftrace_list_end; ops = ops->next) { 1613 /* pass rec in as regs to have non-NULL val */ 1614 if (ftrace_ops_test(ops, rec->ip, rec)) { 1615 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) { 1616 keep_regs = true; 1617 break; 1618 } 1619 } 1620 } 1621 1622 return keep_regs; 1623 } 1624 1625 static bool __ftrace_hash_rec_update(struct ftrace_ops *ops, 1626 int filter_hash, 1627 bool inc) 1628 { 1629 struct ftrace_hash *hash; 1630 struct ftrace_hash *other_hash; 1631 struct ftrace_page *pg; 1632 struct dyn_ftrace *rec; 1633 bool update = false; 1634 int count = 0; 1635 int all = false; 1636 1637 /* Only update if the ops has been registered */ 1638 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) 1639 return false; 1640 1641 /* 1642 * In the filter_hash case: 1643 * If the count is zero, we update all records. 1644 * Otherwise we just update the items in the hash. 1645 * 1646 * In the notrace_hash case: 1647 * We enable the update in the hash. 1648 * As disabling notrace means enabling the tracing, 1649 * and enabling notrace means disabling, the inc variable 1650 * gets inversed. 1651 */ 1652 if (filter_hash) { 1653 hash = ops->func_hash->filter_hash; 1654 other_hash = ops->func_hash->notrace_hash; 1655 if (ftrace_hash_empty(hash)) 1656 all = true; 1657 } else { 1658 inc = !inc; 1659 hash = ops->func_hash->notrace_hash; 1660 other_hash = ops->func_hash->filter_hash; 1661 /* 1662 * If the notrace hash has no items, 1663 * then there's nothing to do. 1664 */ 1665 if (ftrace_hash_empty(hash)) 1666 return false; 1667 } 1668 1669 do_for_each_ftrace_rec(pg, rec) { 1670 int in_other_hash = 0; 1671 int in_hash = 0; 1672 int match = 0; 1673 1674 if (rec->flags & FTRACE_FL_DISABLED) 1675 continue; 1676 1677 if (all) { 1678 /* 1679 * Only the filter_hash affects all records. 1680 * Update if the record is not in the notrace hash. 1681 */ 1682 if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip)) 1683 match = 1; 1684 } else { 1685 in_hash = !!ftrace_lookup_ip(hash, rec->ip); 1686 in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip); 1687 1688 /* 1689 * If filter_hash is set, we want to match all functions 1690 * that are in the hash but not in the other hash. 1691 * 1692 * If filter_hash is not set, then we are decrementing. 1693 * That means we match anything that is in the hash 1694 * and also in the other_hash. That is, we need to turn 1695 * off functions in the other hash because they are disabled 1696 * by this hash. 1697 */ 1698 if (filter_hash && in_hash && !in_other_hash) 1699 match = 1; 1700 else if (!filter_hash && in_hash && 1701 (in_other_hash || ftrace_hash_empty(other_hash))) 1702 match = 1; 1703 } 1704 if (!match) 1705 continue; 1706 1707 if (inc) { 1708 rec->flags++; 1709 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX)) 1710 return false; 1711 1712 /* 1713 * If there's only a single callback registered to a 1714 * function, and the ops has a trampoline registered 1715 * for it, then we can call it directly. 1716 */ 1717 if (ftrace_rec_count(rec) == 1 && ops->trampoline) 1718 rec->flags |= FTRACE_FL_TRAMP; 1719 else 1720 /* 1721 * If we are adding another function callback 1722 * to this function, and the previous had a 1723 * custom trampoline in use, then we need to go 1724 * back to the default trampoline. 1725 */ 1726 rec->flags &= ~FTRACE_FL_TRAMP; 1727 1728 /* 1729 * If any ops wants regs saved for this function 1730 * then all ops will get saved regs. 1731 */ 1732 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) 1733 rec->flags |= FTRACE_FL_REGS; 1734 } else { 1735 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0)) 1736 return false; 1737 rec->flags--; 1738 1739 /* 1740 * If the rec had REGS enabled and the ops that is 1741 * being removed had REGS set, then see if there is 1742 * still any ops for this record that wants regs. 1743 * If not, we can stop recording them. 1744 */ 1745 if (ftrace_rec_count(rec) > 0 && 1746 rec->flags & FTRACE_FL_REGS && 1747 ops->flags & FTRACE_OPS_FL_SAVE_REGS) { 1748 if (!test_rec_ops_needs_regs(rec)) 1749 rec->flags &= ~FTRACE_FL_REGS; 1750 } 1751 1752 /* 1753 * If the rec had TRAMP enabled, then it needs to 1754 * be cleared. As TRAMP can only be enabled iff 1755 * there is only a single ops attached to it. 1756 * In otherwords, always disable it on decrementing. 1757 * In the future, we may set it if rec count is 1758 * decremented to one, and the ops that is left 1759 * has a trampoline. 1760 */ 1761 rec->flags &= ~FTRACE_FL_TRAMP; 1762 1763 /* 1764 * flags will be cleared in ftrace_check_record() 1765 * if rec count is zero. 1766 */ 1767 } 1768 count++; 1769 1770 /* Must match FTRACE_UPDATE_CALLS in ftrace_modify_all_code() */ 1771 update |= ftrace_test_record(rec, 1) != FTRACE_UPDATE_IGNORE; 1772 1773 /* Shortcut, if we handled all records, we are done. */ 1774 if (!all && count == hash->count) 1775 return update; 1776 } while_for_each_ftrace_rec(); 1777 1778 return update; 1779 } 1780 1781 static bool ftrace_hash_rec_disable(struct ftrace_ops *ops, 1782 int filter_hash) 1783 { 1784 return __ftrace_hash_rec_update(ops, filter_hash, 0); 1785 } 1786 1787 static bool ftrace_hash_rec_enable(struct ftrace_ops *ops, 1788 int filter_hash) 1789 { 1790 return __ftrace_hash_rec_update(ops, filter_hash, 1); 1791 } 1792 1793 static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops, 1794 int filter_hash, int inc) 1795 { 1796 struct ftrace_ops *op; 1797 1798 __ftrace_hash_rec_update(ops, filter_hash, inc); 1799 1800 if (ops->func_hash != &global_ops.local_hash) 1801 return; 1802 1803 /* 1804 * If the ops shares the global_ops hash, then we need to update 1805 * all ops that are enabled and use this hash. 1806 */ 1807 do_for_each_ftrace_op(op, ftrace_ops_list) { 1808 /* Already done */ 1809 if (op == ops) 1810 continue; 1811 if (op->func_hash == &global_ops.local_hash) 1812 __ftrace_hash_rec_update(op, filter_hash, inc); 1813 } while_for_each_ftrace_op(op); 1814 } 1815 1816 static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, 1817 int filter_hash) 1818 { 1819 ftrace_hash_rec_update_modify(ops, filter_hash, 0); 1820 } 1821 1822 static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, 1823 int filter_hash) 1824 { 1825 ftrace_hash_rec_update_modify(ops, filter_hash, 1); 1826 } 1827 1828 /* 1829 * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK 1830 * or no-needed to update, -EBUSY if it detects a conflict of the flag 1831 * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs. 1832 * Note that old_hash and new_hash has below meanings 1833 * - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected) 1834 * - If the hash is EMPTY_HASH, it hits nothing 1835 * - Anything else hits the recs which match the hash entries. 1836 */ 1837 static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops, 1838 struct ftrace_hash *old_hash, 1839 struct ftrace_hash *new_hash) 1840 { 1841 struct ftrace_page *pg; 1842 struct dyn_ftrace *rec, *end = NULL; 1843 int in_old, in_new; 1844 1845 /* Only update if the ops has been registered */ 1846 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) 1847 return 0; 1848 1849 if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY)) 1850 return 0; 1851 1852 /* 1853 * Since the IPMODIFY is a very address sensitive action, we do not 1854 * allow ftrace_ops to set all functions to new hash. 1855 */ 1856 if (!new_hash || !old_hash) 1857 return -EINVAL; 1858 1859 /* Update rec->flags */ 1860 do_for_each_ftrace_rec(pg, rec) { 1861 1862 if (rec->flags & FTRACE_FL_DISABLED) 1863 continue; 1864 1865 /* We need to update only differences of filter_hash */ 1866 in_old = !!ftrace_lookup_ip(old_hash, rec->ip); 1867 in_new = !!ftrace_lookup_ip(new_hash, rec->ip); 1868 if (in_old == in_new) 1869 continue; 1870 1871 if (in_new) { 1872 /* New entries must ensure no others are using it */ 1873 if (rec->flags & FTRACE_FL_IPMODIFY) 1874 goto rollback; 1875 rec->flags |= FTRACE_FL_IPMODIFY; 1876 } else /* Removed entry */ 1877 rec->flags &= ~FTRACE_FL_IPMODIFY; 1878 } while_for_each_ftrace_rec(); 1879 1880 return 0; 1881 1882 rollback: 1883 end = rec; 1884 1885 /* Roll back what we did above */ 1886 do_for_each_ftrace_rec(pg, rec) { 1887 1888 if (rec->flags & FTRACE_FL_DISABLED) 1889 continue; 1890 1891 if (rec == end) 1892 goto err_out; 1893 1894 in_old = !!ftrace_lookup_ip(old_hash, rec->ip); 1895 in_new = !!ftrace_lookup_ip(new_hash, rec->ip); 1896 if (in_old == in_new) 1897 continue; 1898 1899 if (in_new) 1900 rec->flags &= ~FTRACE_FL_IPMODIFY; 1901 else 1902 rec->flags |= FTRACE_FL_IPMODIFY; 1903 } while_for_each_ftrace_rec(); 1904 1905 err_out: 1906 return -EBUSY; 1907 } 1908 1909 static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops) 1910 { 1911 struct ftrace_hash *hash = ops->func_hash->filter_hash; 1912 1913 if (ftrace_hash_empty(hash)) 1914 hash = NULL; 1915 1916 return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash); 1917 } 1918 1919 /* Disabling always succeeds */ 1920 static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops) 1921 { 1922 struct ftrace_hash *hash = ops->func_hash->filter_hash; 1923 1924 if (ftrace_hash_empty(hash)) 1925 hash = NULL; 1926 1927 __ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH); 1928 } 1929 1930 static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops, 1931 struct ftrace_hash *new_hash) 1932 { 1933 struct ftrace_hash *old_hash = ops->func_hash->filter_hash; 1934 1935 if (ftrace_hash_empty(old_hash)) 1936 old_hash = NULL; 1937 1938 if (ftrace_hash_empty(new_hash)) 1939 new_hash = NULL; 1940 1941 return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash); 1942 } 1943 1944 static void print_ip_ins(const char *fmt, const unsigned char *p) 1945 { 1946 int i; 1947 1948 printk(KERN_CONT "%s", fmt); 1949 1950 for (i = 0; i < MCOUNT_INSN_SIZE; i++) 1951 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]); 1952 } 1953 1954 static struct ftrace_ops * 1955 ftrace_find_tramp_ops_any(struct dyn_ftrace *rec); 1956 static struct ftrace_ops * 1957 ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops); 1958 1959 enum ftrace_bug_type ftrace_bug_type; 1960 const void *ftrace_expected; 1961 1962 static void print_bug_type(void) 1963 { 1964 switch (ftrace_bug_type) { 1965 case FTRACE_BUG_UNKNOWN: 1966 break; 1967 case FTRACE_BUG_INIT: 1968 pr_info("Initializing ftrace call sites\n"); 1969 break; 1970 case FTRACE_BUG_NOP: 1971 pr_info("Setting ftrace call site to NOP\n"); 1972 break; 1973 case FTRACE_BUG_CALL: 1974 pr_info("Setting ftrace call site to call ftrace function\n"); 1975 break; 1976 case FTRACE_BUG_UPDATE: 1977 pr_info("Updating ftrace call site to call a different ftrace function\n"); 1978 break; 1979 } 1980 } 1981 1982 /** 1983 * ftrace_bug - report and shutdown function tracer 1984 * @failed: The failed type (EFAULT, EINVAL, EPERM) 1985 * @rec: The record that failed 1986 * 1987 * The arch code that enables or disables the function tracing 1988 * can call ftrace_bug() when it has detected a problem in 1989 * modifying the code. @failed should be one of either: 1990 * EFAULT - if the problem happens on reading the @ip address 1991 * EINVAL - if what is read at @ip is not what was expected 1992 * EPERM - if the problem happens on writing to the @ip address 1993 */ 1994 void ftrace_bug(int failed, struct dyn_ftrace *rec) 1995 { 1996 unsigned long ip = rec ? rec->ip : 0; 1997 1998 switch (failed) { 1999 case -EFAULT: 2000 FTRACE_WARN_ON_ONCE(1); 2001 pr_info("ftrace faulted on modifying "); 2002 print_ip_sym(ip); 2003 break; 2004 case -EINVAL: 2005 FTRACE_WARN_ON_ONCE(1); 2006 pr_info("ftrace failed to modify "); 2007 print_ip_sym(ip); 2008 print_ip_ins(" actual: ", (unsigned char *)ip); 2009 pr_cont("\n"); 2010 if (ftrace_expected) { 2011 print_ip_ins(" expected: ", ftrace_expected); 2012 pr_cont("\n"); 2013 } 2014 break; 2015 case -EPERM: 2016 FTRACE_WARN_ON_ONCE(1); 2017 pr_info("ftrace faulted on writing "); 2018 print_ip_sym(ip); 2019 break; 2020 default: 2021 FTRACE_WARN_ON_ONCE(1); 2022 pr_info("ftrace faulted on unknown error "); 2023 print_ip_sym(ip); 2024 } 2025 print_bug_type(); 2026 if (rec) { 2027 struct ftrace_ops *ops = NULL; 2028 2029 pr_info("ftrace record flags: %lx\n", rec->flags); 2030 pr_cont(" (%ld)%s", ftrace_rec_count(rec), 2031 rec->flags & FTRACE_FL_REGS ? " R" : " "); 2032 if (rec->flags & FTRACE_FL_TRAMP_EN) { 2033 ops = ftrace_find_tramp_ops_any(rec); 2034 if (ops) { 2035 do { 2036 pr_cont("\ttramp: %pS (%pS)", 2037 (void *)ops->trampoline, 2038 (void *)ops->func); 2039 ops = ftrace_find_tramp_ops_next(rec, ops); 2040 } while (ops); 2041 } else 2042 pr_cont("\ttramp: ERROR!"); 2043 2044 } 2045 ip = ftrace_get_addr_curr(rec); 2046 pr_cont("\n expected tramp: %lx\n", ip); 2047 } 2048 } 2049 2050 static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update) 2051 { 2052 unsigned long flag = 0UL; 2053 2054 ftrace_bug_type = FTRACE_BUG_UNKNOWN; 2055 2056 if (rec->flags & FTRACE_FL_DISABLED) 2057 return FTRACE_UPDATE_IGNORE; 2058 2059 /* 2060 * If we are updating calls: 2061 * 2062 * If the record has a ref count, then we need to enable it 2063 * because someone is using it. 2064 * 2065 * Otherwise we make sure its disabled. 2066 * 2067 * If we are disabling calls, then disable all records that 2068 * are enabled. 2069 */ 2070 if (enable && ftrace_rec_count(rec)) 2071 flag = FTRACE_FL_ENABLED; 2072 2073 /* 2074 * If enabling and the REGS flag does not match the REGS_EN, or 2075 * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore 2076 * this record. Set flags to fail the compare against ENABLED. 2077 */ 2078 if (flag) { 2079 if (!(rec->flags & FTRACE_FL_REGS) != 2080 !(rec->flags & FTRACE_FL_REGS_EN)) 2081 flag |= FTRACE_FL_REGS; 2082 2083 if (!(rec->flags & FTRACE_FL_TRAMP) != 2084 !(rec->flags & FTRACE_FL_TRAMP_EN)) 2085 flag |= FTRACE_FL_TRAMP; 2086 } 2087 2088 /* If the state of this record hasn't changed, then do nothing */ 2089 if ((rec->flags & FTRACE_FL_ENABLED) == flag) 2090 return FTRACE_UPDATE_IGNORE; 2091 2092 if (flag) { 2093 /* Save off if rec is being enabled (for return value) */ 2094 flag ^= rec->flags & FTRACE_FL_ENABLED; 2095 2096 if (update) { 2097 rec->flags |= FTRACE_FL_ENABLED; 2098 if (flag & FTRACE_FL_REGS) { 2099 if (rec->flags & FTRACE_FL_REGS) 2100 rec->flags |= FTRACE_FL_REGS_EN; 2101 else 2102 rec->flags &= ~FTRACE_FL_REGS_EN; 2103 } 2104 if (flag & FTRACE_FL_TRAMP) { 2105 if (rec->flags & FTRACE_FL_TRAMP) 2106 rec->flags |= FTRACE_FL_TRAMP_EN; 2107 else 2108 rec->flags &= ~FTRACE_FL_TRAMP_EN; 2109 } 2110 } 2111 2112 /* 2113 * If this record is being updated from a nop, then 2114 * return UPDATE_MAKE_CALL. 2115 * Otherwise, 2116 * return UPDATE_MODIFY_CALL to tell the caller to convert 2117 * from the save regs, to a non-save regs function or 2118 * vice versa, or from a trampoline call. 2119 */ 2120 if (flag & FTRACE_FL_ENABLED) { 2121 ftrace_bug_type = FTRACE_BUG_CALL; 2122 return FTRACE_UPDATE_MAKE_CALL; 2123 } 2124 2125 ftrace_bug_type = FTRACE_BUG_UPDATE; 2126 return FTRACE_UPDATE_MODIFY_CALL; 2127 } 2128 2129 if (update) { 2130 /* If there's no more users, clear all flags */ 2131 if (!ftrace_rec_count(rec)) 2132 rec->flags = 0; 2133 else 2134 /* 2135 * Just disable the record, but keep the ops TRAMP 2136 * and REGS states. The _EN flags must be disabled though. 2137 */ 2138 rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN | 2139 FTRACE_FL_REGS_EN); 2140 } 2141 2142 ftrace_bug_type = FTRACE_BUG_NOP; 2143 return FTRACE_UPDATE_MAKE_NOP; 2144 } 2145 2146 /** 2147 * ftrace_update_record, set a record that now is tracing or not 2148 * @rec: the record to update 2149 * @enable: set to 1 if the record is tracing, zero to force disable 2150 * 2151 * The records that represent all functions that can be traced need 2152 * to be updated when tracing has been enabled. 2153 */ 2154 int ftrace_update_record(struct dyn_ftrace *rec, int enable) 2155 { 2156 return ftrace_check_record(rec, enable, 1); 2157 } 2158 2159 /** 2160 * ftrace_test_record, check if the record has been enabled or not 2161 * @rec: the record to test 2162 * @enable: set to 1 to check if enabled, 0 if it is disabled 2163 * 2164 * The arch code may need to test if a record is already set to 2165 * tracing to determine how to modify the function code that it 2166 * represents. 2167 */ 2168 int ftrace_test_record(struct dyn_ftrace *rec, int enable) 2169 { 2170 return ftrace_check_record(rec, enable, 0); 2171 } 2172 2173 static struct ftrace_ops * 2174 ftrace_find_tramp_ops_any(struct dyn_ftrace *rec) 2175 { 2176 struct ftrace_ops *op; 2177 unsigned long ip = rec->ip; 2178 2179 do_for_each_ftrace_op(op, ftrace_ops_list) { 2180 2181 if (!op->trampoline) 2182 continue; 2183 2184 if (hash_contains_ip(ip, op->func_hash)) 2185 return op; 2186 } while_for_each_ftrace_op(op); 2187 2188 return NULL; 2189 } 2190 2191 static struct ftrace_ops * 2192 ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, 2193 struct ftrace_ops *op) 2194 { 2195 unsigned long ip = rec->ip; 2196 2197 while_for_each_ftrace_op(op) { 2198 2199 if (!op->trampoline) 2200 continue; 2201 2202 if (hash_contains_ip(ip, op->func_hash)) 2203 return op; 2204 } 2205 2206 return NULL; 2207 } 2208 2209 static struct ftrace_ops * 2210 ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec) 2211 { 2212 struct ftrace_ops *op; 2213 unsigned long ip = rec->ip; 2214 2215 /* 2216 * Need to check removed ops first. 2217 * If they are being removed, and this rec has a tramp, 2218 * and this rec is in the ops list, then it would be the 2219 * one with the tramp. 2220 */ 2221 if (removed_ops) { 2222 if (hash_contains_ip(ip, &removed_ops->old_hash)) 2223 return removed_ops; 2224 } 2225 2226 /* 2227 * Need to find the current trampoline for a rec. 2228 * Now, a trampoline is only attached to a rec if there 2229 * was a single 'ops' attached to it. But this can be called 2230 * when we are adding another op to the rec or removing the 2231 * current one. Thus, if the op is being added, we can 2232 * ignore it because it hasn't attached itself to the rec 2233 * yet. 2234 * 2235 * If an ops is being modified (hooking to different functions) 2236 * then we don't care about the new functions that are being 2237 * added, just the old ones (that are probably being removed). 2238 * 2239 * If we are adding an ops to a function that already is using 2240 * a trampoline, it needs to be removed (trampolines are only 2241 * for single ops connected), then an ops that is not being 2242 * modified also needs to be checked. 2243 */ 2244 do_for_each_ftrace_op(op, ftrace_ops_list) { 2245 2246 if (!op->trampoline) 2247 continue; 2248 2249 /* 2250 * If the ops is being added, it hasn't gotten to 2251 * the point to be removed from this tree yet. 2252 */ 2253 if (op->flags & FTRACE_OPS_FL_ADDING) 2254 continue; 2255 2256 2257 /* 2258 * If the ops is being modified and is in the old 2259 * hash, then it is probably being removed from this 2260 * function. 2261 */ 2262 if ((op->flags & FTRACE_OPS_FL_MODIFYING) && 2263 hash_contains_ip(ip, &op->old_hash)) 2264 return op; 2265 /* 2266 * If the ops is not being added or modified, and it's 2267 * in its normal filter hash, then this must be the one 2268 * we want! 2269 */ 2270 if (!(op->flags & FTRACE_OPS_FL_MODIFYING) && 2271 hash_contains_ip(ip, op->func_hash)) 2272 return op; 2273 2274 } while_for_each_ftrace_op(op); 2275 2276 return NULL; 2277 } 2278 2279 static struct ftrace_ops * 2280 ftrace_find_tramp_ops_new(struct dyn_ftrace *rec) 2281 { 2282 struct ftrace_ops *op; 2283 unsigned long ip = rec->ip; 2284 2285 do_for_each_ftrace_op(op, ftrace_ops_list) { 2286 /* pass rec in as regs to have non-NULL val */ 2287 if (hash_contains_ip(ip, op->func_hash)) 2288 return op; 2289 } while_for_each_ftrace_op(op); 2290 2291 return NULL; 2292 } 2293 2294 /** 2295 * ftrace_get_addr_new - Get the call address to set to 2296 * @rec: The ftrace record descriptor 2297 * 2298 * If the record has the FTRACE_FL_REGS set, that means that it 2299 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS 2300 * is not not set, then it wants to convert to the normal callback. 2301 * 2302 * Returns the address of the trampoline to set to 2303 */ 2304 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec) 2305 { 2306 struct ftrace_ops *ops; 2307 2308 /* Trampolines take precedence over regs */ 2309 if (rec->flags & FTRACE_FL_TRAMP) { 2310 ops = ftrace_find_tramp_ops_new(rec); 2311 if (FTRACE_WARN_ON(!ops || !ops->trampoline)) { 2312 pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n", 2313 (void *)rec->ip, (void *)rec->ip, rec->flags); 2314 /* Ftrace is shutting down, return anything */ 2315 return (unsigned long)FTRACE_ADDR; 2316 } 2317 return ops->trampoline; 2318 } 2319 2320 if (rec->flags & FTRACE_FL_REGS) 2321 return (unsigned long)FTRACE_REGS_ADDR; 2322 else 2323 return (unsigned long)FTRACE_ADDR; 2324 } 2325 2326 /** 2327 * ftrace_get_addr_curr - Get the call address that is already there 2328 * @rec: The ftrace record descriptor 2329 * 2330 * The FTRACE_FL_REGS_EN is set when the record already points to 2331 * a function that saves all the regs. Basically the '_EN' version 2332 * represents the current state of the function. 2333 * 2334 * Returns the address of the trampoline that is currently being called 2335 */ 2336 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec) 2337 { 2338 struct ftrace_ops *ops; 2339 2340 /* Trampolines take precedence over regs */ 2341 if (rec->flags & FTRACE_FL_TRAMP_EN) { 2342 ops = ftrace_find_tramp_ops_curr(rec); 2343 if (FTRACE_WARN_ON(!ops)) { 2344 pr_warn("Bad trampoline accounting at: %p (%pS)\n", 2345 (void *)rec->ip, (void *)rec->ip); 2346 /* Ftrace is shutting down, return anything */ 2347 return (unsigned long)FTRACE_ADDR; 2348 } 2349 return ops->trampoline; 2350 } 2351 2352 if (rec->flags & FTRACE_FL_REGS_EN) 2353 return (unsigned long)FTRACE_REGS_ADDR; 2354 else 2355 return (unsigned long)FTRACE_ADDR; 2356 } 2357 2358 static int 2359 __ftrace_replace_code(struct dyn_ftrace *rec, int enable) 2360 { 2361 unsigned long ftrace_old_addr; 2362 unsigned long ftrace_addr; 2363 int ret; 2364 2365 ftrace_addr = ftrace_get_addr_new(rec); 2366 2367 /* This needs to be done before we call ftrace_update_record */ 2368 ftrace_old_addr = ftrace_get_addr_curr(rec); 2369 2370 ret = ftrace_update_record(rec, enable); 2371 2372 ftrace_bug_type = FTRACE_BUG_UNKNOWN; 2373 2374 switch (ret) { 2375 case FTRACE_UPDATE_IGNORE: 2376 return 0; 2377 2378 case FTRACE_UPDATE_MAKE_CALL: 2379 ftrace_bug_type = FTRACE_BUG_CALL; 2380 return ftrace_make_call(rec, ftrace_addr); 2381 2382 case FTRACE_UPDATE_MAKE_NOP: 2383 ftrace_bug_type = FTRACE_BUG_NOP; 2384 return ftrace_make_nop(NULL, rec, ftrace_old_addr); 2385 2386 case FTRACE_UPDATE_MODIFY_CALL: 2387 ftrace_bug_type = FTRACE_BUG_UPDATE; 2388 return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr); 2389 } 2390 2391 return -1; /* unknown ftrace bug */ 2392 } 2393 2394 void __weak ftrace_replace_code(int mod_flags) 2395 { 2396 struct dyn_ftrace *rec; 2397 struct ftrace_page *pg; 2398 int enable = mod_flags & FTRACE_MODIFY_ENABLE_FL; 2399 int schedulable = mod_flags & FTRACE_MODIFY_MAY_SLEEP_FL; 2400 int failed; 2401 2402 if (unlikely(ftrace_disabled)) 2403 return; 2404 2405 do_for_each_ftrace_rec(pg, rec) { 2406 2407 if (rec->flags & FTRACE_FL_DISABLED) 2408 continue; 2409 2410 failed = __ftrace_replace_code(rec, enable); 2411 if (failed) { 2412 ftrace_bug(failed, rec); 2413 /* Stop processing */ 2414 return; 2415 } 2416 if (schedulable) 2417 cond_resched(); 2418 } while_for_each_ftrace_rec(); 2419 } 2420 2421 struct ftrace_rec_iter { 2422 struct ftrace_page *pg; 2423 int index; 2424 }; 2425 2426 /** 2427 * ftrace_rec_iter_start, start up iterating over traced functions 2428 * 2429 * Returns an iterator handle that is used to iterate over all 2430 * the records that represent address locations where functions 2431 * are traced. 2432 * 2433 * May return NULL if no records are available. 2434 */ 2435 struct ftrace_rec_iter *ftrace_rec_iter_start(void) 2436 { 2437 /* 2438 * We only use a single iterator. 2439 * Protected by the ftrace_lock mutex. 2440 */ 2441 static struct ftrace_rec_iter ftrace_rec_iter; 2442 struct ftrace_rec_iter *iter = &ftrace_rec_iter; 2443 2444 iter->pg = ftrace_pages_start; 2445 iter->index = 0; 2446 2447 /* Could have empty pages */ 2448 while (iter->pg && !iter->pg->index) 2449 iter->pg = iter->pg->next; 2450 2451 if (!iter->pg) 2452 return NULL; 2453 2454 return iter; 2455 } 2456 2457 /** 2458 * ftrace_rec_iter_next, get the next record to process. 2459 * @iter: The handle to the iterator. 2460 * 2461 * Returns the next iterator after the given iterator @iter. 2462 */ 2463 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter) 2464 { 2465 iter->index++; 2466 2467 if (iter->index >= iter->pg->index) { 2468 iter->pg = iter->pg->next; 2469 iter->index = 0; 2470 2471 /* Could have empty pages */ 2472 while (iter->pg && !iter->pg->index) 2473 iter->pg = iter->pg->next; 2474 } 2475 2476 if (!iter->pg) 2477 return NULL; 2478 2479 return iter; 2480 } 2481 2482 /** 2483 * ftrace_rec_iter_record, get the record at the iterator location 2484 * @iter: The current iterator location 2485 * 2486 * Returns the record that the current @iter is at. 2487 */ 2488 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter) 2489 { 2490 return &iter->pg->records[iter->index]; 2491 } 2492 2493 static int 2494 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec) 2495 { 2496 int ret; 2497 2498 if (unlikely(ftrace_disabled)) 2499 return 0; 2500 2501 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR); 2502 if (ret) { 2503 ftrace_bug_type = FTRACE_BUG_INIT; 2504 ftrace_bug(ret, rec); 2505 return 0; 2506 } 2507 return 1; 2508 } 2509 2510 /* 2511 * archs can override this function if they must do something 2512 * before the modifying code is performed. 2513 */ 2514 int __weak ftrace_arch_code_modify_prepare(void) 2515 { 2516 return 0; 2517 } 2518 2519 /* 2520 * archs can override this function if they must do something 2521 * after the modifying code is performed. 2522 */ 2523 int __weak ftrace_arch_code_modify_post_process(void) 2524 { 2525 return 0; 2526 } 2527 2528 void ftrace_modify_all_code(int command) 2529 { 2530 int update = command & FTRACE_UPDATE_TRACE_FUNC; 2531 int mod_flags = 0; 2532 int err = 0; 2533 2534 if (command & FTRACE_MAY_SLEEP) 2535 mod_flags = FTRACE_MODIFY_MAY_SLEEP_FL; 2536 2537 /* 2538 * If the ftrace_caller calls a ftrace_ops func directly, 2539 * we need to make sure that it only traces functions it 2540 * expects to trace. When doing the switch of functions, 2541 * we need to update to the ftrace_ops_list_func first 2542 * before the transition between old and new calls are set, 2543 * as the ftrace_ops_list_func will check the ops hashes 2544 * to make sure the ops are having the right functions 2545 * traced. 2546 */ 2547 if (update) { 2548 err = ftrace_update_ftrace_func(ftrace_ops_list_func); 2549 if (FTRACE_WARN_ON(err)) 2550 return; 2551 } 2552 2553 if (command & FTRACE_UPDATE_CALLS) 2554 ftrace_replace_code(mod_flags | FTRACE_MODIFY_ENABLE_FL); 2555 else if (command & FTRACE_DISABLE_CALLS) 2556 ftrace_replace_code(mod_flags); 2557 2558 if (update && ftrace_trace_function != ftrace_ops_list_func) { 2559 function_trace_op = set_function_trace_op; 2560 smp_wmb(); 2561 /* If irqs are disabled, we are in stop machine */ 2562 if (!irqs_disabled()) 2563 smp_call_function(ftrace_sync_ipi, NULL, 1); 2564 err = ftrace_update_ftrace_func(ftrace_trace_function); 2565 if (FTRACE_WARN_ON(err)) 2566 return; 2567 } 2568 2569 if (command & FTRACE_START_FUNC_RET) 2570 err = ftrace_enable_ftrace_graph_caller(); 2571 else if (command & FTRACE_STOP_FUNC_RET) 2572 err = ftrace_disable_ftrace_graph_caller(); 2573 FTRACE_WARN_ON(err); 2574 } 2575 2576 static int __ftrace_modify_code(void *data) 2577 { 2578 int *command = data; 2579 2580 ftrace_modify_all_code(*command); 2581 2582 return 0; 2583 } 2584 2585 /** 2586 * ftrace_run_stop_machine, go back to the stop machine method 2587 * @command: The command to tell ftrace what to do 2588 * 2589 * If an arch needs to fall back to the stop machine method, the 2590 * it can call this function. 2591 */ 2592 void ftrace_run_stop_machine(int command) 2593 { 2594 stop_machine(__ftrace_modify_code, &command, NULL); 2595 } 2596 2597 /** 2598 * arch_ftrace_update_code, modify the code to trace or not trace 2599 * @command: The command that needs to be done 2600 * 2601 * Archs can override this function if it does not need to 2602 * run stop_machine() to modify code. 2603 */ 2604 void __weak arch_ftrace_update_code(int command) 2605 { 2606 ftrace_run_stop_machine(command); 2607 } 2608 2609 static void ftrace_run_update_code(int command) 2610 { 2611 int ret; 2612 2613 ret = ftrace_arch_code_modify_prepare(); 2614 FTRACE_WARN_ON(ret); 2615 if (ret) 2616 return; 2617 2618 /* 2619 * By default we use stop_machine() to modify the code. 2620 * But archs can do what ever they want as long as it 2621 * is safe. The stop_machine() is the safest, but also 2622 * produces the most overhead. 2623 */ 2624 arch_ftrace_update_code(command); 2625 2626 ret = ftrace_arch_code_modify_post_process(); 2627 FTRACE_WARN_ON(ret); 2628 } 2629 2630 static void ftrace_run_modify_code(struct ftrace_ops *ops, int command, 2631 struct ftrace_ops_hash *old_hash) 2632 { 2633 ops->flags |= FTRACE_OPS_FL_MODIFYING; 2634 ops->old_hash.filter_hash = old_hash->filter_hash; 2635 ops->old_hash.notrace_hash = old_hash->notrace_hash; 2636 ftrace_run_update_code(command); 2637 ops->old_hash.filter_hash = NULL; 2638 ops->old_hash.notrace_hash = NULL; 2639 ops->flags &= ~FTRACE_OPS_FL_MODIFYING; 2640 } 2641 2642 static ftrace_func_t saved_ftrace_func; 2643 static int ftrace_start_up; 2644 2645 void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops) 2646 { 2647 } 2648 2649 static void ftrace_startup_enable(int command) 2650 { 2651 if (saved_ftrace_func != ftrace_trace_function) { 2652 saved_ftrace_func = ftrace_trace_function; 2653 command |= FTRACE_UPDATE_TRACE_FUNC; 2654 } 2655 2656 if (!command || !ftrace_enabled) 2657 return; 2658 2659 ftrace_run_update_code(command); 2660 } 2661 2662 static void ftrace_startup_all(int command) 2663 { 2664 update_all_ops = true; 2665 ftrace_startup_enable(command); 2666 update_all_ops = false; 2667 } 2668 2669 int ftrace_startup(struct ftrace_ops *ops, int command) 2670 { 2671 int ret; 2672 2673 if (unlikely(ftrace_disabled)) 2674 return -ENODEV; 2675 2676 ret = __register_ftrace_function(ops); 2677 if (ret) 2678 return ret; 2679 2680 ftrace_start_up++; 2681 2682 /* 2683 * Note that ftrace probes uses this to start up 2684 * and modify functions it will probe. But we still 2685 * set the ADDING flag for modification, as probes 2686 * do not have trampolines. If they add them in the 2687 * future, then the probes will need to distinguish 2688 * between adding and updating probes. 2689 */ 2690 ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING; 2691 2692 ret = ftrace_hash_ipmodify_enable(ops); 2693 if (ret < 0) { 2694 /* Rollback registration process */ 2695 __unregister_ftrace_function(ops); 2696 ftrace_start_up--; 2697 ops->flags &= ~FTRACE_OPS_FL_ENABLED; 2698 return ret; 2699 } 2700 2701 if (ftrace_hash_rec_enable(ops, 1)) 2702 command |= FTRACE_UPDATE_CALLS; 2703 2704 ftrace_startup_enable(command); 2705 2706 ops->flags &= ~FTRACE_OPS_FL_ADDING; 2707 2708 return 0; 2709 } 2710 2711 int ftrace_shutdown(struct ftrace_ops *ops, int command) 2712 { 2713 int ret; 2714 2715 if (unlikely(ftrace_disabled)) 2716 return -ENODEV; 2717 2718 ret = __unregister_ftrace_function(ops); 2719 if (ret) 2720 return ret; 2721 2722 ftrace_start_up--; 2723 /* 2724 * Just warn in case of unbalance, no need to kill ftrace, it's not 2725 * critical but the ftrace_call callers may be never nopped again after 2726 * further ftrace uses. 2727 */ 2728 WARN_ON_ONCE(ftrace_start_up < 0); 2729 2730 /* Disabling ipmodify never fails */ 2731 ftrace_hash_ipmodify_disable(ops); 2732 2733 if (ftrace_hash_rec_disable(ops, 1)) 2734 command |= FTRACE_UPDATE_CALLS; 2735 2736 ops->flags &= ~FTRACE_OPS_FL_ENABLED; 2737 2738 if (saved_ftrace_func != ftrace_trace_function) { 2739 saved_ftrace_func = ftrace_trace_function; 2740 command |= FTRACE_UPDATE_TRACE_FUNC; 2741 } 2742 2743 if (!command || !ftrace_enabled) { 2744 /* 2745 * If these are dynamic or per_cpu ops, they still 2746 * need their data freed. Since, function tracing is 2747 * not currently active, we can just free them 2748 * without synchronizing all CPUs. 2749 */ 2750 if (ops->flags & FTRACE_OPS_FL_DYNAMIC) 2751 goto free_ops; 2752 2753 return 0; 2754 } 2755 2756 /* 2757 * If the ops uses a trampoline, then it needs to be 2758 * tested first on update. 2759 */ 2760 ops->flags |= FTRACE_OPS_FL_REMOVING; 2761 removed_ops = ops; 2762 2763 /* The trampoline logic checks the old hashes */ 2764 ops->old_hash.filter_hash = ops->func_hash->filter_hash; 2765 ops->old_hash.notrace_hash = ops->func_hash->notrace_hash; 2766 2767 ftrace_run_update_code(command); 2768 2769 /* 2770 * If there's no more ops registered with ftrace, run a 2771 * sanity check to make sure all rec flags are cleared. 2772 */ 2773 if (rcu_dereference_protected(ftrace_ops_list, 2774 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) { 2775 struct ftrace_page *pg; 2776 struct dyn_ftrace *rec; 2777 2778 do_for_each_ftrace_rec(pg, rec) { 2779 if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_FL_DISABLED)) 2780 pr_warn(" %pS flags:%lx\n", 2781 (void *)rec->ip, rec->flags); 2782 } while_for_each_ftrace_rec(); 2783 } 2784 2785 ops->old_hash.filter_hash = NULL; 2786 ops->old_hash.notrace_hash = NULL; 2787 2788 removed_ops = NULL; 2789 ops->flags &= ~FTRACE_OPS_FL_REMOVING; 2790 2791 /* 2792 * Dynamic ops may be freed, we must make sure that all 2793 * callers are done before leaving this function. 2794 * The same goes for freeing the per_cpu data of the per_cpu 2795 * ops. 2796 */ 2797 if (ops->flags & FTRACE_OPS_FL_DYNAMIC) { 2798 /* 2799 * We need to do a hard force of sched synchronization. 2800 * This is because we use preempt_disable() to do RCU, but 2801 * the function tracers can be called where RCU is not watching 2802 * (like before user_exit()). We can not rely on the RCU 2803 * infrastructure to do the synchronization, thus we must do it 2804 * ourselves. 2805 */ 2806 schedule_on_each_cpu(ftrace_sync); 2807 2808 /* 2809 * When the kernel is preeptive, tasks can be preempted 2810 * while on a ftrace trampoline. Just scheduling a task on 2811 * a CPU is not good enough to flush them. Calling 2812 * synchornize_rcu_tasks() will wait for those tasks to 2813 * execute and either schedule voluntarily or enter user space. 2814 */ 2815 if (IS_ENABLED(CONFIG_PREEMPT)) 2816 synchronize_rcu_tasks(); 2817 2818 free_ops: 2819 arch_ftrace_trampoline_free(ops); 2820 } 2821 2822 return 0; 2823 } 2824 2825 static void ftrace_startup_sysctl(void) 2826 { 2827 int command; 2828 2829 if (unlikely(ftrace_disabled)) 2830 return; 2831 2832 /* Force update next time */ 2833 saved_ftrace_func = NULL; 2834 /* ftrace_start_up is true if we want ftrace running */ 2835 if (ftrace_start_up) { 2836 command = FTRACE_UPDATE_CALLS; 2837 if (ftrace_graph_active) 2838 command |= FTRACE_START_FUNC_RET; 2839 ftrace_startup_enable(command); 2840 } 2841 } 2842 2843 static void ftrace_shutdown_sysctl(void) 2844 { 2845 int command; 2846 2847 if (unlikely(ftrace_disabled)) 2848 return; 2849 2850 /* ftrace_start_up is true if ftrace is running */ 2851 if (ftrace_start_up) { 2852 command = FTRACE_DISABLE_CALLS; 2853 if (ftrace_graph_active) 2854 command |= FTRACE_STOP_FUNC_RET; 2855 ftrace_run_update_code(command); 2856 } 2857 } 2858 2859 static u64 ftrace_update_time; 2860 unsigned long ftrace_update_tot_cnt; 2861 2862 static inline int ops_traces_mod(struct ftrace_ops *ops) 2863 { 2864 /* 2865 * Filter_hash being empty will default to trace module. 2866 * But notrace hash requires a test of individual module functions. 2867 */ 2868 return ftrace_hash_empty(ops->func_hash->filter_hash) && 2869 ftrace_hash_empty(ops->func_hash->notrace_hash); 2870 } 2871 2872 /* 2873 * Check if the current ops references the record. 2874 * 2875 * If the ops traces all functions, then it was already accounted for. 2876 * If the ops does not trace the current record function, skip it. 2877 * If the ops ignores the function via notrace filter, skip it. 2878 */ 2879 static inline bool 2880 ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec) 2881 { 2882 /* If ops isn't enabled, ignore it */ 2883 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) 2884 return false; 2885 2886 /* If ops traces all then it includes this function */ 2887 if (ops_traces_mod(ops)) 2888 return true; 2889 2890 /* The function must be in the filter */ 2891 if (!ftrace_hash_empty(ops->func_hash->filter_hash) && 2892 !__ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip)) 2893 return false; 2894 2895 /* If in notrace hash, we ignore it too */ 2896 if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) 2897 return false; 2898 2899 return true; 2900 } 2901 2902 static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs) 2903 { 2904 struct ftrace_page *pg; 2905 struct dyn_ftrace *p; 2906 u64 start, stop; 2907 unsigned long update_cnt = 0; 2908 unsigned long rec_flags = 0; 2909 int i; 2910 2911 start = ftrace_now(raw_smp_processor_id()); 2912 2913 /* 2914 * When a module is loaded, this function is called to convert 2915 * the calls to mcount in its text to nops, and also to create 2916 * an entry in the ftrace data. Now, if ftrace is activated 2917 * after this call, but before the module sets its text to 2918 * read-only, the modification of enabling ftrace can fail if 2919 * the read-only is done while ftrace is converting the calls. 2920 * To prevent this, the module's records are set as disabled 2921 * and will be enabled after the call to set the module's text 2922 * to read-only. 2923 */ 2924 if (mod) 2925 rec_flags |= FTRACE_FL_DISABLED; 2926 2927 for (pg = new_pgs; pg; pg = pg->next) { 2928 2929 for (i = 0; i < pg->index; i++) { 2930 2931 /* If something went wrong, bail without enabling anything */ 2932 if (unlikely(ftrace_disabled)) 2933 return -1; 2934 2935 p = &pg->records[i]; 2936 p->flags = rec_flags; 2937 2938 /* 2939 * Do the initial record conversion from mcount jump 2940 * to the NOP instructions. 2941 */ 2942 if (!__is_defined(CC_USING_NOP_MCOUNT) && 2943 !ftrace_code_disable(mod, p)) 2944 break; 2945 2946 update_cnt++; 2947 } 2948 } 2949 2950 stop = ftrace_now(raw_smp_processor_id()); 2951 ftrace_update_time = stop - start; 2952 ftrace_update_tot_cnt += update_cnt; 2953 2954 return 0; 2955 } 2956 2957 static int ftrace_allocate_records(struct ftrace_page *pg, int count) 2958 { 2959 int order; 2960 int cnt; 2961 2962 if (WARN_ON(!count)) 2963 return -EINVAL; 2964 2965 order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE)); 2966 2967 /* 2968 * We want to fill as much as possible. No more than a page 2969 * may be empty. 2970 */ 2971 while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE) 2972 order--; 2973 2974 again: 2975 pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); 2976 2977 if (!pg->records) { 2978 /* if we can't allocate this size, try something smaller */ 2979 if (!order) 2980 return -ENOMEM; 2981 order >>= 1; 2982 goto again; 2983 } 2984 2985 cnt = (PAGE_SIZE << order) / ENTRY_SIZE; 2986 pg->size = cnt; 2987 2988 if (cnt > count) 2989 cnt = count; 2990 2991 return cnt; 2992 } 2993 2994 static struct ftrace_page * 2995 ftrace_allocate_pages(unsigned long num_to_init) 2996 { 2997 struct ftrace_page *start_pg; 2998 struct ftrace_page *pg; 2999 int order; 3000 int cnt; 3001 3002 if (!num_to_init) 3003 return NULL; 3004 3005 start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL); 3006 if (!pg) 3007 return NULL; 3008 3009 /* 3010 * Try to allocate as much as possible in one continues 3011 * location that fills in all of the space. We want to 3012 * waste as little space as possible. 3013 */ 3014 for (;;) { 3015 cnt = ftrace_allocate_records(pg, num_to_init); 3016 if (cnt < 0) 3017 goto free_pages; 3018 3019 num_to_init -= cnt; 3020 if (!num_to_init) 3021 break; 3022 3023 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL); 3024 if (!pg->next) 3025 goto free_pages; 3026 3027 pg = pg->next; 3028 } 3029 3030 return start_pg; 3031 3032 free_pages: 3033 pg = start_pg; 3034 while (pg) { 3035 order = get_count_order(pg->size / ENTRIES_PER_PAGE); 3036 free_pages((unsigned long)pg->records, order); 3037 start_pg = pg->next; 3038 kfree(pg); 3039 pg = start_pg; 3040 } 3041 pr_info("ftrace: FAILED to allocate memory for functions\n"); 3042 return NULL; 3043 } 3044 3045 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ 3046 3047 struct ftrace_iterator { 3048 loff_t pos; 3049 loff_t func_pos; 3050 loff_t mod_pos; 3051 struct ftrace_page *pg; 3052 struct dyn_ftrace *func; 3053 struct ftrace_func_probe *probe; 3054 struct ftrace_func_entry *probe_entry; 3055 struct trace_parser parser; 3056 struct ftrace_hash *hash; 3057 struct ftrace_ops *ops; 3058 struct trace_array *tr; 3059 struct list_head *mod_list; 3060 int pidx; 3061 int idx; 3062 unsigned flags; 3063 }; 3064 3065 static void * 3066 t_probe_next(struct seq_file *m, loff_t *pos) 3067 { 3068 struct ftrace_iterator *iter = m->private; 3069 struct trace_array *tr = iter->ops->private; 3070 struct list_head *func_probes; 3071 struct ftrace_hash *hash; 3072 struct list_head *next; 3073 struct hlist_node *hnd = NULL; 3074 struct hlist_head *hhd; 3075 int size; 3076 3077 (*pos)++; 3078 iter->pos = *pos; 3079 3080 if (!tr) 3081 return NULL; 3082 3083 func_probes = &tr->func_probes; 3084 if (list_empty(func_probes)) 3085 return NULL; 3086 3087 if (!iter->probe) { 3088 next = func_probes->next; 3089 iter->probe = list_entry(next, struct ftrace_func_probe, list); 3090 } 3091 3092 if (iter->probe_entry) 3093 hnd = &iter->probe_entry->hlist; 3094 3095 hash = iter->probe->ops.func_hash->filter_hash; 3096 size = 1 << hash->size_bits; 3097 3098 retry: 3099 if (iter->pidx >= size) { 3100 if (iter->probe->list.next == func_probes) 3101 return NULL; 3102 next = iter->probe->list.next; 3103 iter->probe = list_entry(next, struct ftrace_func_probe, list); 3104 hash = iter->probe->ops.func_hash->filter_hash; 3105 size = 1 << hash->size_bits; 3106 iter->pidx = 0; 3107 } 3108 3109 hhd = &hash->buckets[iter->pidx]; 3110 3111 if (hlist_empty(hhd)) { 3112 iter->pidx++; 3113 hnd = NULL; 3114 goto retry; 3115 } 3116 3117 if (!hnd) 3118 hnd = hhd->first; 3119 else { 3120 hnd = hnd->next; 3121 if (!hnd) { 3122 iter->pidx++; 3123 goto retry; 3124 } 3125 } 3126 3127 if (WARN_ON_ONCE(!hnd)) 3128 return NULL; 3129 3130 iter->probe_entry = hlist_entry(hnd, struct ftrace_func_entry, hlist); 3131 3132 return iter; 3133 } 3134 3135 static void *t_probe_start(struct seq_file *m, loff_t *pos) 3136 { 3137 struct ftrace_iterator *iter = m->private; 3138 void *p = NULL; 3139 loff_t l; 3140 3141 if (!(iter->flags & FTRACE_ITER_DO_PROBES)) 3142 return NULL; 3143 3144 if (iter->mod_pos > *pos) 3145 return NULL; 3146 3147 iter->probe = NULL; 3148 iter->probe_entry = NULL; 3149 iter->pidx = 0; 3150 for (l = 0; l <= (*pos - iter->mod_pos); ) { 3151 p = t_probe_next(m, &l); 3152 if (!p) 3153 break; 3154 } 3155 if (!p) 3156 return NULL; 3157 3158 /* Only set this if we have an item */ 3159 iter->flags |= FTRACE_ITER_PROBE; 3160 3161 return iter; 3162 } 3163 3164 static int 3165 t_probe_show(struct seq_file *m, struct ftrace_iterator *iter) 3166 { 3167 struct ftrace_func_entry *probe_entry; 3168 struct ftrace_probe_ops *probe_ops; 3169 struct ftrace_func_probe *probe; 3170 3171 probe = iter->probe; 3172 probe_entry = iter->probe_entry; 3173 3174 if (WARN_ON_ONCE(!probe || !probe_entry)) 3175 return -EIO; 3176 3177 probe_ops = probe->probe_ops; 3178 3179 if (probe_ops->print) 3180 return probe_ops->print(m, probe_entry->ip, probe_ops, probe->data); 3181 3182 seq_printf(m, "%ps:%ps\n", (void *)probe_entry->ip, 3183 (void *)probe_ops->func); 3184 3185 return 0; 3186 } 3187 3188 static void * 3189 t_mod_next(struct seq_file *m, loff_t *pos) 3190 { 3191 struct ftrace_iterator *iter = m->private; 3192 struct trace_array *tr = iter->tr; 3193 3194 (*pos)++; 3195 iter->pos = *pos; 3196 3197 iter->mod_list = iter->mod_list->next; 3198 3199 if (iter->mod_list == &tr->mod_trace || 3200 iter->mod_list == &tr->mod_notrace) { 3201 iter->flags &= ~FTRACE_ITER_MOD; 3202 return NULL; 3203 } 3204 3205 iter->mod_pos = *pos; 3206 3207 return iter; 3208 } 3209 3210 static void *t_mod_start(struct seq_file *m, loff_t *pos) 3211 { 3212 struct ftrace_iterator *iter = m->private; 3213 void *p = NULL; 3214 loff_t l; 3215 3216 if (iter->func_pos > *pos) 3217 return NULL; 3218 3219 iter->mod_pos = iter->func_pos; 3220 3221 /* probes are only available if tr is set */ 3222 if (!iter->tr) 3223 return NULL; 3224 3225 for (l = 0; l <= (*pos - iter->func_pos); ) { 3226 p = t_mod_next(m, &l); 3227 if (!p) 3228 break; 3229 } 3230 if (!p) { 3231 iter->flags &= ~FTRACE_ITER_MOD; 3232 return t_probe_start(m, pos); 3233 } 3234 3235 /* Only set this if we have an item */ 3236 iter->flags |= FTRACE_ITER_MOD; 3237 3238 return iter; 3239 } 3240 3241 static int 3242 t_mod_show(struct seq_file *m, struct ftrace_iterator *iter) 3243 { 3244 struct ftrace_mod_load *ftrace_mod; 3245 struct trace_array *tr = iter->tr; 3246 3247 if (WARN_ON_ONCE(!iter->mod_list) || 3248 iter->mod_list == &tr->mod_trace || 3249 iter->mod_list == &tr->mod_notrace) 3250 return -EIO; 3251 3252 ftrace_mod = list_entry(iter->mod_list, struct ftrace_mod_load, list); 3253 3254 if (ftrace_mod->func) 3255 seq_printf(m, "%s", ftrace_mod->func); 3256 else 3257 seq_putc(m, '*'); 3258 3259 seq_printf(m, ":mod:%s\n", ftrace_mod->module); 3260 3261 return 0; 3262 } 3263 3264 static void * 3265 t_func_next(struct seq_file *m, loff_t *pos) 3266 { 3267 struct ftrace_iterator *iter = m->private; 3268 struct dyn_ftrace *rec = NULL; 3269 3270 (*pos)++; 3271 3272 retry: 3273 if (iter->idx >= iter->pg->index) { 3274 if (iter->pg->next) { 3275 iter->pg = iter->pg->next; 3276 iter->idx = 0; 3277 goto retry; 3278 } 3279 } else { 3280 rec = &iter->pg->records[iter->idx++]; 3281 if (((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) && 3282 !ftrace_lookup_ip(iter->hash, rec->ip)) || 3283 3284 ((iter->flags & FTRACE_ITER_ENABLED) && 3285 !(rec->flags & FTRACE_FL_ENABLED))) { 3286 3287 rec = NULL; 3288 goto retry; 3289 } 3290 } 3291 3292 if (!rec) 3293 return NULL; 3294 3295 iter->pos = iter->func_pos = *pos; 3296 iter->func = rec; 3297 3298 return iter; 3299 } 3300 3301 static void * 3302 t_next(struct seq_file *m, void *v, loff_t *pos) 3303 { 3304 struct ftrace_iterator *iter = m->private; 3305 loff_t l = *pos; /* t_probe_start() must use original pos */ 3306 void *ret; 3307 3308 if (unlikely(ftrace_disabled)) 3309 return NULL; 3310 3311 if (iter->flags & FTRACE_ITER_PROBE) 3312 return t_probe_next(m, pos); 3313 3314 if (iter->flags & FTRACE_ITER_MOD) 3315 return t_mod_next(m, pos); 3316 3317 if (iter->flags & FTRACE_ITER_PRINTALL) { 3318 /* next must increment pos, and t_probe_start does not */ 3319 (*pos)++; 3320 return t_mod_start(m, &l); 3321 } 3322 3323 ret = t_func_next(m, pos); 3324 3325 if (!ret) 3326 return t_mod_start(m, &l); 3327 3328 return ret; 3329 } 3330 3331 static void reset_iter_read(struct ftrace_iterator *iter) 3332 { 3333 iter->pos = 0; 3334 iter->func_pos = 0; 3335 iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_PROBE | FTRACE_ITER_MOD); 3336 } 3337 3338 static void *t_start(struct seq_file *m, loff_t *pos) 3339 { 3340 struct ftrace_iterator *iter = m->private; 3341 void *p = NULL; 3342 loff_t l; 3343 3344 mutex_lock(&ftrace_lock); 3345 3346 if (unlikely(ftrace_disabled)) 3347 return NULL; 3348 3349 /* 3350 * If an lseek was done, then reset and start from beginning. 3351 */ 3352 if (*pos < iter->pos) 3353 reset_iter_read(iter); 3354 3355 /* 3356 * For set_ftrace_filter reading, if we have the filter 3357 * off, we can short cut and just print out that all 3358 * functions are enabled. 3359 */ 3360 if ((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) && 3361 ftrace_hash_empty(iter->hash)) { 3362 iter->func_pos = 1; /* Account for the message */ 3363 if (*pos > 0) 3364 return t_mod_start(m, pos); 3365 iter->flags |= FTRACE_ITER_PRINTALL; 3366 /* reset in case of seek/pread */ 3367 iter->flags &= ~FTRACE_ITER_PROBE; 3368 return iter; 3369 } 3370 3371 if (iter->flags & FTRACE_ITER_MOD) 3372 return t_mod_start(m, pos); 3373 3374 /* 3375 * Unfortunately, we need to restart at ftrace_pages_start 3376 * every time we let go of the ftrace_mutex. This is because 3377 * those pointers can change without the lock. 3378 */ 3379 iter->pg = ftrace_pages_start; 3380 iter->idx = 0; 3381 for (l = 0; l <= *pos; ) { 3382 p = t_func_next(m, &l); 3383 if (!p) 3384 break; 3385 } 3386 3387 if (!p) 3388 return t_mod_start(m, pos); 3389 3390 return iter; 3391 } 3392 3393 static void t_stop(struct seq_file *m, void *p) 3394 { 3395 mutex_unlock(&ftrace_lock); 3396 } 3397 3398 void * __weak 3399 arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec) 3400 { 3401 return NULL; 3402 } 3403 3404 static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops, 3405 struct dyn_ftrace *rec) 3406 { 3407 void *ptr; 3408 3409 ptr = arch_ftrace_trampoline_func(ops, rec); 3410 if (ptr) 3411 seq_printf(m, " ->%pS", ptr); 3412 } 3413 3414 static int t_show(struct seq_file *m, void *v) 3415 { 3416 struct ftrace_iterator *iter = m->private; 3417 struct dyn_ftrace *rec; 3418 3419 if (iter->flags & FTRACE_ITER_PROBE) 3420 return t_probe_show(m, iter); 3421 3422 if (iter->flags & FTRACE_ITER_MOD) 3423 return t_mod_show(m, iter); 3424 3425 if (iter->flags & FTRACE_ITER_PRINTALL) { 3426 if (iter->flags & FTRACE_ITER_NOTRACE) 3427 seq_puts(m, "#### no functions disabled ####\n"); 3428 else 3429 seq_puts(m, "#### all functions enabled ####\n"); 3430 return 0; 3431 } 3432 3433 rec = iter->func; 3434 3435 if (!rec) 3436 return 0; 3437 3438 seq_printf(m, "%ps", (void *)rec->ip); 3439 if (iter->flags & FTRACE_ITER_ENABLED) { 3440 struct ftrace_ops *ops; 3441 3442 seq_printf(m, " (%ld)%s%s", 3443 ftrace_rec_count(rec), 3444 rec->flags & FTRACE_FL_REGS ? " R" : " ", 3445 rec->flags & FTRACE_FL_IPMODIFY ? " I" : " "); 3446 if (rec->flags & FTRACE_FL_TRAMP_EN) { 3447 ops = ftrace_find_tramp_ops_any(rec); 3448 if (ops) { 3449 do { 3450 seq_printf(m, "\ttramp: %pS (%pS)", 3451 (void *)ops->trampoline, 3452 (void *)ops->func); 3453 add_trampoline_func(m, ops, rec); 3454 ops = ftrace_find_tramp_ops_next(rec, ops); 3455 } while (ops); 3456 } else 3457 seq_puts(m, "\ttramp: ERROR!"); 3458 } else { 3459 add_trampoline_func(m, NULL, rec); 3460 } 3461 } 3462 3463 seq_putc(m, '\n'); 3464 3465 return 0; 3466 } 3467 3468 static const struct seq_operations show_ftrace_seq_ops = { 3469 .start = t_start, 3470 .next = t_next, 3471 .stop = t_stop, 3472 .show = t_show, 3473 }; 3474 3475 static int 3476 ftrace_avail_open(struct inode *inode, struct file *file) 3477 { 3478 struct ftrace_iterator *iter; 3479 3480 if (unlikely(ftrace_disabled)) 3481 return -ENODEV; 3482 3483 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); 3484 if (!iter) 3485 return -ENOMEM; 3486 3487 iter->pg = ftrace_pages_start; 3488 iter->ops = &global_ops; 3489 3490 return 0; 3491 } 3492 3493 static int 3494 ftrace_enabled_open(struct inode *inode, struct file *file) 3495 { 3496 struct ftrace_iterator *iter; 3497 3498 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); 3499 if (!iter) 3500 return -ENOMEM; 3501 3502 iter->pg = ftrace_pages_start; 3503 iter->flags = FTRACE_ITER_ENABLED; 3504 iter->ops = &global_ops; 3505 3506 return 0; 3507 } 3508 3509 /** 3510 * ftrace_regex_open - initialize function tracer filter files 3511 * @ops: The ftrace_ops that hold the hash filters 3512 * @flag: The type of filter to process 3513 * @inode: The inode, usually passed in to your open routine 3514 * @file: The file, usually passed in to your open routine 3515 * 3516 * ftrace_regex_open() initializes the filter files for the 3517 * @ops. Depending on @flag it may process the filter hash or 3518 * the notrace hash of @ops. With this called from the open 3519 * routine, you can use ftrace_filter_write() for the write 3520 * routine if @flag has FTRACE_ITER_FILTER set, or 3521 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set. 3522 * tracing_lseek() should be used as the lseek routine, and 3523 * release must call ftrace_regex_release(). 3524 */ 3525 int 3526 ftrace_regex_open(struct ftrace_ops *ops, int flag, 3527 struct inode *inode, struct file *file) 3528 { 3529 struct ftrace_iterator *iter; 3530 struct ftrace_hash *hash; 3531 struct list_head *mod_head; 3532 struct trace_array *tr = ops->private; 3533 int ret = 0; 3534 3535 ftrace_ops_init(ops); 3536 3537 if (unlikely(ftrace_disabled)) 3538 return -ENODEV; 3539 3540 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 3541 if (!iter) 3542 return -ENOMEM; 3543 3544 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) { 3545 kfree(iter); 3546 return -ENOMEM; 3547 } 3548 3549 iter->ops = ops; 3550 iter->flags = flag; 3551 iter->tr = tr; 3552 3553 mutex_lock(&ops->func_hash->regex_lock); 3554 3555 if (flag & FTRACE_ITER_NOTRACE) { 3556 hash = ops->func_hash->notrace_hash; 3557 mod_head = tr ? &tr->mod_notrace : NULL; 3558 } else { 3559 hash = ops->func_hash->filter_hash; 3560 mod_head = tr ? &tr->mod_trace : NULL; 3561 } 3562 3563 iter->mod_list = mod_head; 3564 3565 if (file->f_mode & FMODE_WRITE) { 3566 const int size_bits = FTRACE_HASH_DEFAULT_BITS; 3567 3568 if (file->f_flags & O_TRUNC) { 3569 iter->hash = alloc_ftrace_hash(size_bits); 3570 clear_ftrace_mod_list(mod_head); 3571 } else { 3572 iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash); 3573 } 3574 3575 if (!iter->hash) { 3576 trace_parser_put(&iter->parser); 3577 kfree(iter); 3578 ret = -ENOMEM; 3579 goto out_unlock; 3580 } 3581 } else 3582 iter->hash = hash; 3583 3584 if (file->f_mode & FMODE_READ) { 3585 iter->pg = ftrace_pages_start; 3586 3587 ret = seq_open(file, &show_ftrace_seq_ops); 3588 if (!ret) { 3589 struct seq_file *m = file->private_data; 3590 m->private = iter; 3591 } else { 3592 /* Failed */ 3593 free_ftrace_hash(iter->hash); 3594 trace_parser_put(&iter->parser); 3595 kfree(iter); 3596 } 3597 } else 3598 file->private_data = iter; 3599 3600 out_unlock: 3601 mutex_unlock(&ops->func_hash->regex_lock); 3602 3603 return ret; 3604 } 3605 3606 static int 3607 ftrace_filter_open(struct inode *inode, struct file *file) 3608 { 3609 struct ftrace_ops *ops = inode->i_private; 3610 3611 return ftrace_regex_open(ops, 3612 FTRACE_ITER_FILTER | FTRACE_ITER_DO_PROBES, 3613 inode, file); 3614 } 3615 3616 static int 3617 ftrace_notrace_open(struct inode *inode, struct file *file) 3618 { 3619 struct ftrace_ops *ops = inode->i_private; 3620 3621 return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE, 3622 inode, file); 3623 } 3624 3625 /* Type for quick search ftrace basic regexes (globs) from filter_parse_regex */ 3626 struct ftrace_glob { 3627 char *search; 3628 unsigned len; 3629 int type; 3630 }; 3631 3632 /* 3633 * If symbols in an architecture don't correspond exactly to the user-visible 3634 * name of what they represent, it is possible to define this function to 3635 * perform the necessary adjustments. 3636 */ 3637 char * __weak arch_ftrace_match_adjust(char *str, const char *search) 3638 { 3639 return str; 3640 } 3641 3642 static int ftrace_match(char *str, struct ftrace_glob *g) 3643 { 3644 int matched = 0; 3645 int slen; 3646 3647 str = arch_ftrace_match_adjust(str, g->search); 3648 3649 switch (g->type) { 3650 case MATCH_FULL: 3651 if (strcmp(str, g->search) == 0) 3652 matched = 1; 3653 break; 3654 case MATCH_FRONT_ONLY: 3655 if (strncmp(str, g->search, g->len) == 0) 3656 matched = 1; 3657 break; 3658 case MATCH_MIDDLE_ONLY: 3659 if (strstr(str, g->search)) 3660 matched = 1; 3661 break; 3662 case MATCH_END_ONLY: 3663 slen = strlen(str); 3664 if (slen >= g->len && 3665 memcmp(str + slen - g->len, g->search, g->len) == 0) 3666 matched = 1; 3667 break; 3668 case MATCH_GLOB: 3669 if (glob_match(g->search, str)) 3670 matched = 1; 3671 break; 3672 } 3673 3674 return matched; 3675 } 3676 3677 static int 3678 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter) 3679 { 3680 struct ftrace_func_entry *entry; 3681 int ret = 0; 3682 3683 entry = ftrace_lookup_ip(hash, rec->ip); 3684 if (clear_filter) { 3685 /* Do nothing if it doesn't exist */ 3686 if (!entry) 3687 return 0; 3688 3689 free_hash_entry(hash, entry); 3690 } else { 3691 /* Do nothing if it exists */ 3692 if (entry) 3693 return 0; 3694 3695 ret = add_hash_entry(hash, rec->ip); 3696 } 3697 return ret; 3698 } 3699 3700 static int 3701 add_rec_by_index(struct ftrace_hash *hash, struct ftrace_glob *func_g, 3702 int clear_filter) 3703 { 3704 long index = simple_strtoul(func_g->search, NULL, 0); 3705 struct ftrace_page *pg; 3706 struct dyn_ftrace *rec; 3707 3708 /* The index starts at 1 */ 3709 if (--index < 0) 3710 return 0; 3711 3712 do_for_each_ftrace_rec(pg, rec) { 3713 if (pg->index <= index) { 3714 index -= pg->index; 3715 /* this is a double loop, break goes to the next page */ 3716 break; 3717 } 3718 rec = &pg->records[index]; 3719 enter_record(hash, rec, clear_filter); 3720 return 1; 3721 } while_for_each_ftrace_rec(); 3722 return 0; 3723 } 3724 3725 static int 3726 ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g, 3727 struct ftrace_glob *mod_g, int exclude_mod) 3728 { 3729 char str[KSYM_SYMBOL_LEN]; 3730 char *modname; 3731 3732 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str); 3733 3734 if (mod_g) { 3735 int mod_matches = (modname) ? ftrace_match(modname, mod_g) : 0; 3736 3737 /* blank module name to match all modules */ 3738 if (!mod_g->len) { 3739 /* blank module globbing: modname xor exclude_mod */ 3740 if (!exclude_mod != !modname) 3741 goto func_match; 3742 return 0; 3743 } 3744 3745 /* 3746 * exclude_mod is set to trace everything but the given 3747 * module. If it is set and the module matches, then 3748 * return 0. If it is not set, and the module doesn't match 3749 * also return 0. Otherwise, check the function to see if 3750 * that matches. 3751 */ 3752 if (!mod_matches == !exclude_mod) 3753 return 0; 3754 func_match: 3755 /* blank search means to match all funcs in the mod */ 3756 if (!func_g->len) 3757 return 1; 3758 } 3759 3760 return ftrace_match(str, func_g); 3761 } 3762 3763 static int 3764 match_records(struct ftrace_hash *hash, char *func, int len, char *mod) 3765 { 3766 struct ftrace_page *pg; 3767 struct dyn_ftrace *rec; 3768 struct ftrace_glob func_g = { .type = MATCH_FULL }; 3769 struct ftrace_glob mod_g = { .type = MATCH_FULL }; 3770 struct ftrace_glob *mod_match = (mod) ? &mod_g : NULL; 3771 int exclude_mod = 0; 3772 int found = 0; 3773 int ret; 3774 int clear_filter = 0; 3775 3776 if (func) { 3777 func_g.type = filter_parse_regex(func, len, &func_g.search, 3778 &clear_filter); 3779 func_g.len = strlen(func_g.search); 3780 } 3781 3782 if (mod) { 3783 mod_g.type = filter_parse_regex(mod, strlen(mod), 3784 &mod_g.search, &exclude_mod); 3785 mod_g.len = strlen(mod_g.search); 3786 } 3787 3788 mutex_lock(&ftrace_lock); 3789 3790 if (unlikely(ftrace_disabled)) 3791 goto out_unlock; 3792 3793 if (func_g.type == MATCH_INDEX) { 3794 found = add_rec_by_index(hash, &func_g, clear_filter); 3795 goto out_unlock; 3796 } 3797 3798 do_for_each_ftrace_rec(pg, rec) { 3799 3800 if (rec->flags & FTRACE_FL_DISABLED) 3801 continue; 3802 3803 if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) { 3804 ret = enter_record(hash, rec, clear_filter); 3805 if (ret < 0) { 3806 found = ret; 3807 goto out_unlock; 3808 } 3809 found = 1; 3810 } 3811 } while_for_each_ftrace_rec(); 3812 out_unlock: 3813 mutex_unlock(&ftrace_lock); 3814 3815 return found; 3816 } 3817 3818 static int 3819 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len) 3820 { 3821 return match_records(hash, buff, len, NULL); 3822 } 3823 3824 static void ftrace_ops_update_code(struct ftrace_ops *ops, 3825 struct ftrace_ops_hash *old_hash) 3826 { 3827 struct ftrace_ops *op; 3828 3829 if (!ftrace_enabled) 3830 return; 3831 3832 if (ops->flags & FTRACE_OPS_FL_ENABLED) { 3833 ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash); 3834 return; 3835 } 3836 3837 /* 3838 * If this is the shared global_ops filter, then we need to 3839 * check if there is another ops that shares it, is enabled. 3840 * If so, we still need to run the modify code. 3841 */ 3842 if (ops->func_hash != &global_ops.local_hash) 3843 return; 3844 3845 do_for_each_ftrace_op(op, ftrace_ops_list) { 3846 if (op->func_hash == &global_ops.local_hash && 3847 op->flags & FTRACE_OPS_FL_ENABLED) { 3848 ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash); 3849 /* Only need to do this once */ 3850 return; 3851 } 3852 } while_for_each_ftrace_op(op); 3853 } 3854 3855 static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops, 3856 struct ftrace_hash **orig_hash, 3857 struct ftrace_hash *hash, 3858 int enable) 3859 { 3860 struct ftrace_ops_hash old_hash_ops; 3861 struct ftrace_hash *old_hash; 3862 int ret; 3863 3864 old_hash = *orig_hash; 3865 old_hash_ops.filter_hash = ops->func_hash->filter_hash; 3866 old_hash_ops.notrace_hash = ops->func_hash->notrace_hash; 3867 ret = ftrace_hash_move(ops, enable, orig_hash, hash); 3868 if (!ret) { 3869 ftrace_ops_update_code(ops, &old_hash_ops); 3870 free_ftrace_hash_rcu(old_hash); 3871 } 3872 return ret; 3873 } 3874 3875 static bool module_exists(const char *module) 3876 { 3877 /* All modules have the symbol __this_module */ 3878 static const char this_mod[] = "__this_module"; 3879 char modname[MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 2]; 3880 unsigned long val; 3881 int n; 3882 3883 n = snprintf(modname, sizeof(modname), "%s:%s", module, this_mod); 3884 3885 if (n > sizeof(modname) - 1) 3886 return false; 3887 3888 val = module_kallsyms_lookup_name(modname); 3889 return val != 0; 3890 } 3891 3892 static int cache_mod(struct trace_array *tr, 3893 const char *func, char *module, int enable) 3894 { 3895 struct ftrace_mod_load *ftrace_mod, *n; 3896 struct list_head *head = enable ? &tr->mod_trace : &tr->mod_notrace; 3897 int ret; 3898 3899 mutex_lock(&ftrace_lock); 3900 3901 /* We do not cache inverse filters */ 3902 if (func[0] == '!') { 3903 func++; 3904 ret = -EINVAL; 3905 3906 /* Look to remove this hash */ 3907 list_for_each_entry_safe(ftrace_mod, n, head, list) { 3908 if (strcmp(ftrace_mod->module, module) != 0) 3909 continue; 3910 3911 /* no func matches all */ 3912 if (strcmp(func, "*") == 0 || 3913 (ftrace_mod->func && 3914 strcmp(ftrace_mod->func, func) == 0)) { 3915 ret = 0; 3916 free_ftrace_mod(ftrace_mod); 3917 continue; 3918 } 3919 } 3920 goto out; 3921 } 3922 3923 ret = -EINVAL; 3924 /* We only care about modules that have not been loaded yet */ 3925 if (module_exists(module)) 3926 goto out; 3927 3928 /* Save this string off, and execute it when the module is loaded */ 3929 ret = ftrace_add_mod(tr, func, module, enable); 3930 out: 3931 mutex_unlock(&ftrace_lock); 3932 3933 return ret; 3934 } 3935 3936 static int 3937 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, 3938 int reset, int enable); 3939 3940 #ifdef CONFIG_MODULES 3941 static void process_mod_list(struct list_head *head, struct ftrace_ops *ops, 3942 char *mod, bool enable) 3943 { 3944 struct ftrace_mod_load *ftrace_mod, *n; 3945 struct ftrace_hash **orig_hash, *new_hash; 3946 LIST_HEAD(process_mods); 3947 char *func; 3948 int ret; 3949 3950 mutex_lock(&ops->func_hash->regex_lock); 3951 3952 if (enable) 3953 orig_hash = &ops->func_hash->filter_hash; 3954 else 3955 orig_hash = &ops->func_hash->notrace_hash; 3956 3957 new_hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, 3958 *orig_hash); 3959 if (!new_hash) 3960 goto out; /* warn? */ 3961 3962 mutex_lock(&ftrace_lock); 3963 3964 list_for_each_entry_safe(ftrace_mod, n, head, list) { 3965 3966 if (strcmp(ftrace_mod->module, mod) != 0) 3967 continue; 3968 3969 if (ftrace_mod->func) 3970 func = kstrdup(ftrace_mod->func, GFP_KERNEL); 3971 else 3972 func = kstrdup("*", GFP_KERNEL); 3973 3974 if (!func) /* warn? */ 3975 continue; 3976 3977 list_del(&ftrace_mod->list); 3978 list_add(&ftrace_mod->list, &process_mods); 3979 3980 /* Use the newly allocated func, as it may be "*" */ 3981 kfree(ftrace_mod->func); 3982 ftrace_mod->func = func; 3983 } 3984 3985 mutex_unlock(&ftrace_lock); 3986 3987 list_for_each_entry_safe(ftrace_mod, n, &process_mods, list) { 3988 3989 func = ftrace_mod->func; 3990 3991 /* Grabs ftrace_lock, which is why we have this extra step */ 3992 match_records(new_hash, func, strlen(func), mod); 3993 free_ftrace_mod(ftrace_mod); 3994 } 3995 3996 if (enable && list_empty(head)) 3997 new_hash->flags &= ~FTRACE_HASH_FL_MOD; 3998 3999 mutex_lock(&ftrace_lock); 4000 4001 ret = ftrace_hash_move_and_update_ops(ops, orig_hash, 4002 new_hash, enable); 4003 mutex_unlock(&ftrace_lock); 4004 4005 out: 4006 mutex_unlock(&ops->func_hash->regex_lock); 4007 4008 free_ftrace_hash(new_hash); 4009 } 4010 4011 static void process_cached_mods(const char *mod_name) 4012 { 4013 struct trace_array *tr; 4014 char *mod; 4015 4016 mod = kstrdup(mod_name, GFP_KERNEL); 4017 if (!mod) 4018 return; 4019 4020 mutex_lock(&trace_types_lock); 4021 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 4022 if (!list_empty(&tr->mod_trace)) 4023 process_mod_list(&tr->mod_trace, tr->ops, mod, true); 4024 if (!list_empty(&tr->mod_notrace)) 4025 process_mod_list(&tr->mod_notrace, tr->ops, mod, false); 4026 } 4027 mutex_unlock(&trace_types_lock); 4028 4029 kfree(mod); 4030 } 4031 #endif 4032 4033 /* 4034 * We register the module command as a template to show others how 4035 * to register the a command as well. 4036 */ 4037 4038 static int 4039 ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash, 4040 char *func_orig, char *cmd, char *module, int enable) 4041 { 4042 char *func; 4043 int ret; 4044 4045 /* match_records() modifies func, and we need the original */ 4046 func = kstrdup(func_orig, GFP_KERNEL); 4047 if (!func) 4048 return -ENOMEM; 4049 4050 /* 4051 * cmd == 'mod' because we only registered this func 4052 * for the 'mod' ftrace_func_command. 4053 * But if you register one func with multiple commands, 4054 * you can tell which command was used by the cmd 4055 * parameter. 4056 */ 4057 ret = match_records(hash, func, strlen(func), module); 4058 kfree(func); 4059 4060 if (!ret) 4061 return cache_mod(tr, func_orig, module, enable); 4062 if (ret < 0) 4063 return ret; 4064 return 0; 4065 } 4066 4067 static struct ftrace_func_command ftrace_mod_cmd = { 4068 .name = "mod", 4069 .func = ftrace_mod_callback, 4070 }; 4071 4072 static int __init ftrace_mod_cmd_init(void) 4073 { 4074 return register_ftrace_command(&ftrace_mod_cmd); 4075 } 4076 core_initcall(ftrace_mod_cmd_init); 4077 4078 static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip, 4079 struct ftrace_ops *op, struct pt_regs *pt_regs) 4080 { 4081 struct ftrace_probe_ops *probe_ops; 4082 struct ftrace_func_probe *probe; 4083 4084 probe = container_of(op, struct ftrace_func_probe, ops); 4085 probe_ops = probe->probe_ops; 4086 4087 /* 4088 * Disable preemption for these calls to prevent a RCU grace 4089 * period. This syncs the hash iteration and freeing of items 4090 * on the hash. rcu_read_lock is too dangerous here. 4091 */ 4092 preempt_disable_notrace(); 4093 probe_ops->func(ip, parent_ip, probe->tr, probe_ops, probe->data); 4094 preempt_enable_notrace(); 4095 } 4096 4097 struct ftrace_func_map { 4098 struct ftrace_func_entry entry; 4099 void *data; 4100 }; 4101 4102 struct ftrace_func_mapper { 4103 struct ftrace_hash hash; 4104 }; 4105 4106 /** 4107 * allocate_ftrace_func_mapper - allocate a new ftrace_func_mapper 4108 * 4109 * Returns a ftrace_func_mapper descriptor that can be used to map ips to data. 4110 */ 4111 struct ftrace_func_mapper *allocate_ftrace_func_mapper(void) 4112 { 4113 struct ftrace_hash *hash; 4114 4115 /* 4116 * The mapper is simply a ftrace_hash, but since the entries 4117 * in the hash are not ftrace_func_entry type, we define it 4118 * as a separate structure. 4119 */ 4120 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); 4121 return (struct ftrace_func_mapper *)hash; 4122 } 4123 4124 /** 4125 * ftrace_func_mapper_find_ip - Find some data mapped to an ip 4126 * @mapper: The mapper that has the ip maps 4127 * @ip: the instruction pointer to find the data for 4128 * 4129 * Returns the data mapped to @ip if found otherwise NULL. The return 4130 * is actually the address of the mapper data pointer. The address is 4131 * returned for use cases where the data is no bigger than a long, and 4132 * the user can use the data pointer as its data instead of having to 4133 * allocate more memory for the reference. 4134 */ 4135 void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper, 4136 unsigned long ip) 4137 { 4138 struct ftrace_func_entry *entry; 4139 struct ftrace_func_map *map; 4140 4141 entry = ftrace_lookup_ip(&mapper->hash, ip); 4142 if (!entry) 4143 return NULL; 4144 4145 map = (struct ftrace_func_map *)entry; 4146 return &map->data; 4147 } 4148 4149 /** 4150 * ftrace_func_mapper_add_ip - Map some data to an ip 4151 * @mapper: The mapper that has the ip maps 4152 * @ip: The instruction pointer address to map @data to 4153 * @data: The data to map to @ip 4154 * 4155 * Returns 0 on succes otherwise an error. 4156 */ 4157 int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper, 4158 unsigned long ip, void *data) 4159 { 4160 struct ftrace_func_entry *entry; 4161 struct ftrace_func_map *map; 4162 4163 entry = ftrace_lookup_ip(&mapper->hash, ip); 4164 if (entry) 4165 return -EBUSY; 4166 4167 map = kmalloc(sizeof(*map), GFP_KERNEL); 4168 if (!map) 4169 return -ENOMEM; 4170 4171 map->entry.ip = ip; 4172 map->data = data; 4173 4174 __add_hash_entry(&mapper->hash, &map->entry); 4175 4176 return 0; 4177 } 4178 4179 /** 4180 * ftrace_func_mapper_remove_ip - Remove an ip from the mapping 4181 * @mapper: The mapper that has the ip maps 4182 * @ip: The instruction pointer address to remove the data from 4183 * 4184 * Returns the data if it is found, otherwise NULL. 4185 * Note, if the data pointer is used as the data itself, (see 4186 * ftrace_func_mapper_find_ip(), then the return value may be meaningless, 4187 * if the data pointer was set to zero. 4188 */ 4189 void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper, 4190 unsigned long ip) 4191 { 4192 struct ftrace_func_entry *entry; 4193 struct ftrace_func_map *map; 4194 void *data; 4195 4196 entry = ftrace_lookup_ip(&mapper->hash, ip); 4197 if (!entry) 4198 return NULL; 4199 4200 map = (struct ftrace_func_map *)entry; 4201 data = map->data; 4202 4203 remove_hash_entry(&mapper->hash, entry); 4204 kfree(entry); 4205 4206 return data; 4207 } 4208 4209 /** 4210 * free_ftrace_func_mapper - free a mapping of ips and data 4211 * @mapper: The mapper that has the ip maps 4212 * @free_func: A function to be called on each data item. 4213 * 4214 * This is used to free the function mapper. The @free_func is optional 4215 * and can be used if the data needs to be freed as well. 4216 */ 4217 void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper, 4218 ftrace_mapper_func free_func) 4219 { 4220 struct ftrace_func_entry *entry; 4221 struct ftrace_func_map *map; 4222 struct hlist_head *hhd; 4223 int size, i; 4224 4225 if (!mapper) 4226 return; 4227 4228 if (free_func && mapper->hash.count) { 4229 size = 1 << mapper->hash.size_bits; 4230 for (i = 0; i < size; i++) { 4231 hhd = &mapper->hash.buckets[i]; 4232 hlist_for_each_entry(entry, hhd, hlist) { 4233 map = (struct ftrace_func_map *)entry; 4234 free_func(map); 4235 } 4236 } 4237 } 4238 free_ftrace_hash(&mapper->hash); 4239 } 4240 4241 static void release_probe(struct ftrace_func_probe *probe) 4242 { 4243 struct ftrace_probe_ops *probe_ops; 4244 4245 mutex_lock(&ftrace_lock); 4246 4247 WARN_ON(probe->ref <= 0); 4248 4249 /* Subtract the ref that was used to protect this instance */ 4250 probe->ref--; 4251 4252 if (!probe->ref) { 4253 probe_ops = probe->probe_ops; 4254 /* 4255 * Sending zero as ip tells probe_ops to free 4256 * the probe->data itself 4257 */ 4258 if (probe_ops->free) 4259 probe_ops->free(probe_ops, probe->tr, 0, probe->data); 4260 list_del(&probe->list); 4261 kfree(probe); 4262 } 4263 mutex_unlock(&ftrace_lock); 4264 } 4265 4266 static void acquire_probe_locked(struct ftrace_func_probe *probe) 4267 { 4268 /* 4269 * Add one ref to keep it from being freed when releasing the 4270 * ftrace_lock mutex. 4271 */ 4272 probe->ref++; 4273 } 4274 4275 int 4276 register_ftrace_function_probe(char *glob, struct trace_array *tr, 4277 struct ftrace_probe_ops *probe_ops, 4278 void *data) 4279 { 4280 struct ftrace_func_entry *entry; 4281 struct ftrace_func_probe *probe; 4282 struct ftrace_hash **orig_hash; 4283 struct ftrace_hash *old_hash; 4284 struct ftrace_hash *hash; 4285 int count = 0; 4286 int size; 4287 int ret; 4288 int i; 4289 4290 if (WARN_ON(!tr)) 4291 return -EINVAL; 4292 4293 /* We do not support '!' for function probes */ 4294 if (WARN_ON(glob[0] == '!')) 4295 return -EINVAL; 4296 4297 4298 mutex_lock(&ftrace_lock); 4299 /* Check if the probe_ops is already registered */ 4300 list_for_each_entry(probe, &tr->func_probes, list) { 4301 if (probe->probe_ops == probe_ops) 4302 break; 4303 } 4304 if (&probe->list == &tr->func_probes) { 4305 probe = kzalloc(sizeof(*probe), GFP_KERNEL); 4306 if (!probe) { 4307 mutex_unlock(&ftrace_lock); 4308 return -ENOMEM; 4309 } 4310 probe->probe_ops = probe_ops; 4311 probe->ops.func = function_trace_probe_call; 4312 probe->tr = tr; 4313 ftrace_ops_init(&probe->ops); 4314 list_add(&probe->list, &tr->func_probes); 4315 } 4316 4317 acquire_probe_locked(probe); 4318 4319 mutex_unlock(&ftrace_lock); 4320 4321 mutex_lock(&probe->ops.func_hash->regex_lock); 4322 4323 orig_hash = &probe->ops.func_hash->filter_hash; 4324 old_hash = *orig_hash; 4325 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash); 4326 4327 ret = ftrace_match_records(hash, glob, strlen(glob)); 4328 4329 /* Nothing found? */ 4330 if (!ret) 4331 ret = -EINVAL; 4332 4333 if (ret < 0) 4334 goto out; 4335 4336 size = 1 << hash->size_bits; 4337 for (i = 0; i < size; i++) { 4338 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { 4339 if (ftrace_lookup_ip(old_hash, entry->ip)) 4340 continue; 4341 /* 4342 * The caller might want to do something special 4343 * for each function we find. We call the callback 4344 * to give the caller an opportunity to do so. 4345 */ 4346 if (probe_ops->init) { 4347 ret = probe_ops->init(probe_ops, tr, 4348 entry->ip, data, 4349 &probe->data); 4350 if (ret < 0) { 4351 if (probe_ops->free && count) 4352 probe_ops->free(probe_ops, tr, 4353 0, probe->data); 4354 probe->data = NULL; 4355 goto out; 4356 } 4357 } 4358 count++; 4359 } 4360 } 4361 4362 mutex_lock(&ftrace_lock); 4363 4364 if (!count) { 4365 /* Nothing was added? */ 4366 ret = -EINVAL; 4367 goto out_unlock; 4368 } 4369 4370 ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash, 4371 hash, 1); 4372 if (ret < 0) 4373 goto err_unlock; 4374 4375 /* One ref for each new function traced */ 4376 probe->ref += count; 4377 4378 if (!(probe->ops.flags & FTRACE_OPS_FL_ENABLED)) 4379 ret = ftrace_startup(&probe->ops, 0); 4380 4381 out_unlock: 4382 mutex_unlock(&ftrace_lock); 4383 4384 if (!ret) 4385 ret = count; 4386 out: 4387 mutex_unlock(&probe->ops.func_hash->regex_lock); 4388 free_ftrace_hash(hash); 4389 4390 release_probe(probe); 4391 4392 return ret; 4393 4394 err_unlock: 4395 if (!probe_ops->free || !count) 4396 goto out_unlock; 4397 4398 /* Failed to do the move, need to call the free functions */ 4399 for (i = 0; i < size; i++) { 4400 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { 4401 if (ftrace_lookup_ip(old_hash, entry->ip)) 4402 continue; 4403 probe_ops->free(probe_ops, tr, entry->ip, probe->data); 4404 } 4405 } 4406 goto out_unlock; 4407 } 4408 4409 int 4410 unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr, 4411 struct ftrace_probe_ops *probe_ops) 4412 { 4413 struct ftrace_ops_hash old_hash_ops; 4414 struct ftrace_func_entry *entry; 4415 struct ftrace_func_probe *probe; 4416 struct ftrace_glob func_g; 4417 struct ftrace_hash **orig_hash; 4418 struct ftrace_hash *old_hash; 4419 struct ftrace_hash *hash = NULL; 4420 struct hlist_node *tmp; 4421 struct hlist_head hhd; 4422 char str[KSYM_SYMBOL_LEN]; 4423 int count = 0; 4424 int i, ret = -ENODEV; 4425 int size; 4426 4427 if (!glob || !strlen(glob) || !strcmp(glob, "*")) 4428 func_g.search = NULL; 4429 else { 4430 int not; 4431 4432 func_g.type = filter_parse_regex(glob, strlen(glob), 4433 &func_g.search, ¬); 4434 func_g.len = strlen(func_g.search); 4435 4436 /* we do not support '!' for function probes */ 4437 if (WARN_ON(not)) 4438 return -EINVAL; 4439 } 4440 4441 mutex_lock(&ftrace_lock); 4442 /* Check if the probe_ops is already registered */ 4443 list_for_each_entry(probe, &tr->func_probes, list) { 4444 if (probe->probe_ops == probe_ops) 4445 break; 4446 } 4447 if (&probe->list == &tr->func_probes) 4448 goto err_unlock_ftrace; 4449 4450 ret = -EINVAL; 4451 if (!(probe->ops.flags & FTRACE_OPS_FL_INITIALIZED)) 4452 goto err_unlock_ftrace; 4453 4454 acquire_probe_locked(probe); 4455 4456 mutex_unlock(&ftrace_lock); 4457 4458 mutex_lock(&probe->ops.func_hash->regex_lock); 4459 4460 orig_hash = &probe->ops.func_hash->filter_hash; 4461 old_hash = *orig_hash; 4462 4463 if (ftrace_hash_empty(old_hash)) 4464 goto out_unlock; 4465 4466 old_hash_ops.filter_hash = old_hash; 4467 /* Probes only have filters */ 4468 old_hash_ops.notrace_hash = NULL; 4469 4470 ret = -ENOMEM; 4471 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash); 4472 if (!hash) 4473 goto out_unlock; 4474 4475 INIT_HLIST_HEAD(&hhd); 4476 4477 size = 1 << hash->size_bits; 4478 for (i = 0; i < size; i++) { 4479 hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) { 4480 4481 if (func_g.search) { 4482 kallsyms_lookup(entry->ip, NULL, NULL, 4483 NULL, str); 4484 if (!ftrace_match(str, &func_g)) 4485 continue; 4486 } 4487 count++; 4488 remove_hash_entry(hash, entry); 4489 hlist_add_head(&entry->hlist, &hhd); 4490 } 4491 } 4492 4493 /* Nothing found? */ 4494 if (!count) { 4495 ret = -EINVAL; 4496 goto out_unlock; 4497 } 4498 4499 mutex_lock(&ftrace_lock); 4500 4501 WARN_ON(probe->ref < count); 4502 4503 probe->ref -= count; 4504 4505 if (ftrace_hash_empty(hash)) 4506 ftrace_shutdown(&probe->ops, 0); 4507 4508 ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash, 4509 hash, 1); 4510 4511 /* still need to update the function call sites */ 4512 if (ftrace_enabled && !ftrace_hash_empty(hash)) 4513 ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS, 4514 &old_hash_ops); 4515 synchronize_rcu(); 4516 4517 hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) { 4518 hlist_del(&entry->hlist); 4519 if (probe_ops->free) 4520 probe_ops->free(probe_ops, tr, entry->ip, probe->data); 4521 kfree(entry); 4522 } 4523 mutex_unlock(&ftrace_lock); 4524 4525 out_unlock: 4526 mutex_unlock(&probe->ops.func_hash->regex_lock); 4527 free_ftrace_hash(hash); 4528 4529 release_probe(probe); 4530 4531 return ret; 4532 4533 err_unlock_ftrace: 4534 mutex_unlock(&ftrace_lock); 4535 return ret; 4536 } 4537 4538 void clear_ftrace_function_probes(struct trace_array *tr) 4539 { 4540 struct ftrace_func_probe *probe, *n; 4541 4542 list_for_each_entry_safe(probe, n, &tr->func_probes, list) 4543 unregister_ftrace_function_probe_func(NULL, tr, probe->probe_ops); 4544 } 4545 4546 static LIST_HEAD(ftrace_commands); 4547 static DEFINE_MUTEX(ftrace_cmd_mutex); 4548 4549 /* 4550 * Currently we only register ftrace commands from __init, so mark this 4551 * __init too. 4552 */ 4553 __init int register_ftrace_command(struct ftrace_func_command *cmd) 4554 { 4555 struct ftrace_func_command *p; 4556 int ret = 0; 4557 4558 mutex_lock(&ftrace_cmd_mutex); 4559 list_for_each_entry(p, &ftrace_commands, list) { 4560 if (strcmp(cmd->name, p->name) == 0) { 4561 ret = -EBUSY; 4562 goto out_unlock; 4563 } 4564 } 4565 list_add(&cmd->list, &ftrace_commands); 4566 out_unlock: 4567 mutex_unlock(&ftrace_cmd_mutex); 4568 4569 return ret; 4570 } 4571 4572 /* 4573 * Currently we only unregister ftrace commands from __init, so mark 4574 * this __init too. 4575 */ 4576 __init int unregister_ftrace_command(struct ftrace_func_command *cmd) 4577 { 4578 struct ftrace_func_command *p, *n; 4579 int ret = -ENODEV; 4580 4581 mutex_lock(&ftrace_cmd_mutex); 4582 list_for_each_entry_safe(p, n, &ftrace_commands, list) { 4583 if (strcmp(cmd->name, p->name) == 0) { 4584 ret = 0; 4585 list_del_init(&p->list); 4586 goto out_unlock; 4587 } 4588 } 4589 out_unlock: 4590 mutex_unlock(&ftrace_cmd_mutex); 4591 4592 return ret; 4593 } 4594 4595 static int ftrace_process_regex(struct ftrace_iterator *iter, 4596 char *buff, int len, int enable) 4597 { 4598 struct ftrace_hash *hash = iter->hash; 4599 struct trace_array *tr = iter->ops->private; 4600 char *func, *command, *next = buff; 4601 struct ftrace_func_command *p; 4602 int ret = -EINVAL; 4603 4604 func = strsep(&next, ":"); 4605 4606 if (!next) { 4607 ret = ftrace_match_records(hash, func, len); 4608 if (!ret) 4609 ret = -EINVAL; 4610 if (ret < 0) 4611 return ret; 4612 return 0; 4613 } 4614 4615 /* command found */ 4616 4617 command = strsep(&next, ":"); 4618 4619 mutex_lock(&ftrace_cmd_mutex); 4620 list_for_each_entry(p, &ftrace_commands, list) { 4621 if (strcmp(p->name, command) == 0) { 4622 ret = p->func(tr, hash, func, command, next, enable); 4623 goto out_unlock; 4624 } 4625 } 4626 out_unlock: 4627 mutex_unlock(&ftrace_cmd_mutex); 4628 4629 return ret; 4630 } 4631 4632 static ssize_t 4633 ftrace_regex_write(struct file *file, const char __user *ubuf, 4634 size_t cnt, loff_t *ppos, int enable) 4635 { 4636 struct ftrace_iterator *iter; 4637 struct trace_parser *parser; 4638 ssize_t ret, read; 4639 4640 if (!cnt) 4641 return 0; 4642 4643 if (file->f_mode & FMODE_READ) { 4644 struct seq_file *m = file->private_data; 4645 iter = m->private; 4646 } else 4647 iter = file->private_data; 4648 4649 if (unlikely(ftrace_disabled)) 4650 return -ENODEV; 4651 4652 /* iter->hash is a local copy, so we don't need regex_lock */ 4653 4654 parser = &iter->parser; 4655 read = trace_get_user(parser, ubuf, cnt, ppos); 4656 4657 if (read >= 0 && trace_parser_loaded(parser) && 4658 !trace_parser_cont(parser)) { 4659 ret = ftrace_process_regex(iter, parser->buffer, 4660 parser->idx, enable); 4661 trace_parser_clear(parser); 4662 if (ret < 0) 4663 goto out; 4664 } 4665 4666 ret = read; 4667 out: 4668 return ret; 4669 } 4670 4671 ssize_t 4672 ftrace_filter_write(struct file *file, const char __user *ubuf, 4673 size_t cnt, loff_t *ppos) 4674 { 4675 return ftrace_regex_write(file, ubuf, cnt, ppos, 1); 4676 } 4677 4678 ssize_t 4679 ftrace_notrace_write(struct file *file, const char __user *ubuf, 4680 size_t cnt, loff_t *ppos) 4681 { 4682 return ftrace_regex_write(file, ubuf, cnt, ppos, 0); 4683 } 4684 4685 static int 4686 ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove) 4687 { 4688 struct ftrace_func_entry *entry; 4689 4690 if (!ftrace_location(ip)) 4691 return -EINVAL; 4692 4693 if (remove) { 4694 entry = ftrace_lookup_ip(hash, ip); 4695 if (!entry) 4696 return -ENOENT; 4697 free_hash_entry(hash, entry); 4698 return 0; 4699 } 4700 4701 return add_hash_entry(hash, ip); 4702 } 4703 4704 static int 4705 ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, 4706 unsigned long ip, int remove, int reset, int enable) 4707 { 4708 struct ftrace_hash **orig_hash; 4709 struct ftrace_hash *hash; 4710 int ret; 4711 4712 if (unlikely(ftrace_disabled)) 4713 return -ENODEV; 4714 4715 mutex_lock(&ops->func_hash->regex_lock); 4716 4717 if (enable) 4718 orig_hash = &ops->func_hash->filter_hash; 4719 else 4720 orig_hash = &ops->func_hash->notrace_hash; 4721 4722 if (reset) 4723 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); 4724 else 4725 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); 4726 4727 if (!hash) { 4728 ret = -ENOMEM; 4729 goto out_regex_unlock; 4730 } 4731 4732 if (buf && !ftrace_match_records(hash, buf, len)) { 4733 ret = -EINVAL; 4734 goto out_regex_unlock; 4735 } 4736 if (ip) { 4737 ret = ftrace_match_addr(hash, ip, remove); 4738 if (ret < 0) 4739 goto out_regex_unlock; 4740 } 4741 4742 mutex_lock(&ftrace_lock); 4743 ret = ftrace_hash_move_and_update_ops(ops, orig_hash, hash, enable); 4744 mutex_unlock(&ftrace_lock); 4745 4746 out_regex_unlock: 4747 mutex_unlock(&ops->func_hash->regex_lock); 4748 4749 free_ftrace_hash(hash); 4750 return ret; 4751 } 4752 4753 static int 4754 ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove, 4755 int reset, int enable) 4756 { 4757 return ftrace_set_hash(ops, NULL, 0, ip, remove, reset, enable); 4758 } 4759 4760 /** 4761 * ftrace_set_filter_ip - set a function to filter on in ftrace by address 4762 * @ops - the ops to set the filter with 4763 * @ip - the address to add to or remove from the filter. 4764 * @remove - non zero to remove the ip from the filter 4765 * @reset - non zero to reset all filters before applying this filter. 4766 * 4767 * Filters denote which functions should be enabled when tracing is enabled 4768 * If @ip is NULL, it failes to update filter. 4769 */ 4770 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, 4771 int remove, int reset) 4772 { 4773 ftrace_ops_init(ops); 4774 return ftrace_set_addr(ops, ip, remove, reset, 1); 4775 } 4776 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip); 4777 4778 /** 4779 * ftrace_ops_set_global_filter - setup ops to use global filters 4780 * @ops - the ops which will use the global filters 4781 * 4782 * ftrace users who need global function trace filtering should call this. 4783 * It can set the global filter only if ops were not initialized before. 4784 */ 4785 void ftrace_ops_set_global_filter(struct ftrace_ops *ops) 4786 { 4787 if (ops->flags & FTRACE_OPS_FL_INITIALIZED) 4788 return; 4789 4790 ftrace_ops_init(ops); 4791 ops->func_hash = &global_ops.local_hash; 4792 } 4793 EXPORT_SYMBOL_GPL(ftrace_ops_set_global_filter); 4794 4795 static int 4796 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, 4797 int reset, int enable) 4798 { 4799 return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable); 4800 } 4801 4802 /** 4803 * ftrace_set_filter - set a function to filter on in ftrace 4804 * @ops - the ops to set the filter with 4805 * @buf - the string that holds the function filter text. 4806 * @len - the length of the string. 4807 * @reset - non zero to reset all filters before applying this filter. 4808 * 4809 * Filters denote which functions should be enabled when tracing is enabled. 4810 * If @buf is NULL and reset is set, all functions will be enabled for tracing. 4811 */ 4812 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, 4813 int len, int reset) 4814 { 4815 ftrace_ops_init(ops); 4816 return ftrace_set_regex(ops, buf, len, reset, 1); 4817 } 4818 EXPORT_SYMBOL_GPL(ftrace_set_filter); 4819 4820 /** 4821 * ftrace_set_notrace - set a function to not trace in ftrace 4822 * @ops - the ops to set the notrace filter with 4823 * @buf - the string that holds the function notrace text. 4824 * @len - the length of the string. 4825 * @reset - non zero to reset all filters before applying this filter. 4826 * 4827 * Notrace Filters denote which functions should not be enabled when tracing 4828 * is enabled. If @buf is NULL and reset is set, all functions will be enabled 4829 * for tracing. 4830 */ 4831 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, 4832 int len, int reset) 4833 { 4834 ftrace_ops_init(ops); 4835 return ftrace_set_regex(ops, buf, len, reset, 0); 4836 } 4837 EXPORT_SYMBOL_GPL(ftrace_set_notrace); 4838 /** 4839 * ftrace_set_global_filter - set a function to filter on with global tracers 4840 * @buf - the string that holds the function filter text. 4841 * @len - the length of the string. 4842 * @reset - non zero to reset all filters before applying this filter. 4843 * 4844 * Filters denote which functions should be enabled when tracing is enabled. 4845 * If @buf is NULL and reset is set, all functions will be enabled for tracing. 4846 */ 4847 void ftrace_set_global_filter(unsigned char *buf, int len, int reset) 4848 { 4849 ftrace_set_regex(&global_ops, buf, len, reset, 1); 4850 } 4851 EXPORT_SYMBOL_GPL(ftrace_set_global_filter); 4852 4853 /** 4854 * ftrace_set_global_notrace - set a function to not trace with global tracers 4855 * @buf - the string that holds the function notrace text. 4856 * @len - the length of the string. 4857 * @reset - non zero to reset all filters before applying this filter. 4858 * 4859 * Notrace Filters denote which functions should not be enabled when tracing 4860 * is enabled. If @buf is NULL and reset is set, all functions will be enabled 4861 * for tracing. 4862 */ 4863 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset) 4864 { 4865 ftrace_set_regex(&global_ops, buf, len, reset, 0); 4866 } 4867 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace); 4868 4869 /* 4870 * command line interface to allow users to set filters on boot up. 4871 */ 4872 #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE 4873 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata; 4874 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata; 4875 4876 /* Used by function selftest to not test if filter is set */ 4877 bool ftrace_filter_param __initdata; 4878 4879 static int __init set_ftrace_notrace(char *str) 4880 { 4881 ftrace_filter_param = true; 4882 strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE); 4883 return 1; 4884 } 4885 __setup("ftrace_notrace=", set_ftrace_notrace); 4886 4887 static int __init set_ftrace_filter(char *str) 4888 { 4889 ftrace_filter_param = true; 4890 strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE); 4891 return 1; 4892 } 4893 __setup("ftrace_filter=", set_ftrace_filter); 4894 4895 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 4896 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata; 4897 static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata; 4898 static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer); 4899 4900 static int __init set_graph_function(char *str) 4901 { 4902 strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE); 4903 return 1; 4904 } 4905 __setup("ftrace_graph_filter=", set_graph_function); 4906 4907 static int __init set_graph_notrace_function(char *str) 4908 { 4909 strlcpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE); 4910 return 1; 4911 } 4912 __setup("ftrace_graph_notrace=", set_graph_notrace_function); 4913 4914 static int __init set_graph_max_depth_function(char *str) 4915 { 4916 if (!str) 4917 return 0; 4918 fgraph_max_depth = simple_strtoul(str, NULL, 0); 4919 return 1; 4920 } 4921 __setup("ftrace_graph_max_depth=", set_graph_max_depth_function); 4922 4923 static void __init set_ftrace_early_graph(char *buf, int enable) 4924 { 4925 int ret; 4926 char *func; 4927 struct ftrace_hash *hash; 4928 4929 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); 4930 if (WARN_ON(!hash)) 4931 return; 4932 4933 while (buf) { 4934 func = strsep(&buf, ","); 4935 /* we allow only one expression at a time */ 4936 ret = ftrace_graph_set_hash(hash, func); 4937 if (ret) 4938 printk(KERN_DEBUG "ftrace: function %s not " 4939 "traceable\n", func); 4940 } 4941 4942 if (enable) 4943 ftrace_graph_hash = hash; 4944 else 4945 ftrace_graph_notrace_hash = hash; 4946 } 4947 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 4948 4949 void __init 4950 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable) 4951 { 4952 char *func; 4953 4954 ftrace_ops_init(ops); 4955 4956 while (buf) { 4957 func = strsep(&buf, ","); 4958 ftrace_set_regex(ops, func, strlen(func), 0, enable); 4959 } 4960 } 4961 4962 static void __init set_ftrace_early_filters(void) 4963 { 4964 if (ftrace_filter_buf[0]) 4965 ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1); 4966 if (ftrace_notrace_buf[0]) 4967 ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0); 4968 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 4969 if (ftrace_graph_buf[0]) 4970 set_ftrace_early_graph(ftrace_graph_buf, 1); 4971 if (ftrace_graph_notrace_buf[0]) 4972 set_ftrace_early_graph(ftrace_graph_notrace_buf, 0); 4973 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 4974 } 4975 4976 int ftrace_regex_release(struct inode *inode, struct file *file) 4977 { 4978 struct seq_file *m = (struct seq_file *)file->private_data; 4979 struct ftrace_iterator *iter; 4980 struct ftrace_hash **orig_hash; 4981 struct trace_parser *parser; 4982 int filter_hash; 4983 int ret; 4984 4985 if (file->f_mode & FMODE_READ) { 4986 iter = m->private; 4987 seq_release(inode, file); 4988 } else 4989 iter = file->private_data; 4990 4991 parser = &iter->parser; 4992 if (trace_parser_loaded(parser)) { 4993 ftrace_match_records(iter->hash, parser->buffer, parser->idx); 4994 } 4995 4996 trace_parser_put(parser); 4997 4998 mutex_lock(&iter->ops->func_hash->regex_lock); 4999 5000 if (file->f_mode & FMODE_WRITE) { 5001 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER); 5002 5003 if (filter_hash) { 5004 orig_hash = &iter->ops->func_hash->filter_hash; 5005 if (iter->tr && !list_empty(&iter->tr->mod_trace)) 5006 iter->hash->flags |= FTRACE_HASH_FL_MOD; 5007 } else 5008 orig_hash = &iter->ops->func_hash->notrace_hash; 5009 5010 mutex_lock(&ftrace_lock); 5011 ret = ftrace_hash_move_and_update_ops(iter->ops, orig_hash, 5012 iter->hash, filter_hash); 5013 mutex_unlock(&ftrace_lock); 5014 } else { 5015 /* For read only, the hash is the ops hash */ 5016 iter->hash = NULL; 5017 } 5018 5019 mutex_unlock(&iter->ops->func_hash->regex_lock); 5020 free_ftrace_hash(iter->hash); 5021 kfree(iter); 5022 5023 return 0; 5024 } 5025 5026 static const struct file_operations ftrace_avail_fops = { 5027 .open = ftrace_avail_open, 5028 .read = seq_read, 5029 .llseek = seq_lseek, 5030 .release = seq_release_private, 5031 }; 5032 5033 static const struct file_operations ftrace_enabled_fops = { 5034 .open = ftrace_enabled_open, 5035 .read = seq_read, 5036 .llseek = seq_lseek, 5037 .release = seq_release_private, 5038 }; 5039 5040 static const struct file_operations ftrace_filter_fops = { 5041 .open = ftrace_filter_open, 5042 .read = seq_read, 5043 .write = ftrace_filter_write, 5044 .llseek = tracing_lseek, 5045 .release = ftrace_regex_release, 5046 }; 5047 5048 static const struct file_operations ftrace_notrace_fops = { 5049 .open = ftrace_notrace_open, 5050 .read = seq_read, 5051 .write = ftrace_notrace_write, 5052 .llseek = tracing_lseek, 5053 .release = ftrace_regex_release, 5054 }; 5055 5056 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 5057 5058 static DEFINE_MUTEX(graph_lock); 5059 5060 struct ftrace_hash *ftrace_graph_hash = EMPTY_HASH; 5061 struct ftrace_hash *ftrace_graph_notrace_hash = EMPTY_HASH; 5062 5063 enum graph_filter_type { 5064 GRAPH_FILTER_NOTRACE = 0, 5065 GRAPH_FILTER_FUNCTION, 5066 }; 5067 5068 #define FTRACE_GRAPH_EMPTY ((void *)1) 5069 5070 struct ftrace_graph_data { 5071 struct ftrace_hash *hash; 5072 struct ftrace_func_entry *entry; 5073 int idx; /* for hash table iteration */ 5074 enum graph_filter_type type; 5075 struct ftrace_hash *new_hash; 5076 const struct seq_operations *seq_ops; 5077 struct trace_parser parser; 5078 }; 5079 5080 static void * 5081 __g_next(struct seq_file *m, loff_t *pos) 5082 { 5083 struct ftrace_graph_data *fgd = m->private; 5084 struct ftrace_func_entry *entry = fgd->entry; 5085 struct hlist_head *head; 5086 int i, idx = fgd->idx; 5087 5088 if (*pos >= fgd->hash->count) 5089 return NULL; 5090 5091 if (entry) { 5092 hlist_for_each_entry_continue(entry, hlist) { 5093 fgd->entry = entry; 5094 return entry; 5095 } 5096 5097 idx++; 5098 } 5099 5100 for (i = idx; i < 1 << fgd->hash->size_bits; i++) { 5101 head = &fgd->hash->buckets[i]; 5102 hlist_for_each_entry(entry, head, hlist) { 5103 fgd->entry = entry; 5104 fgd->idx = i; 5105 return entry; 5106 } 5107 } 5108 return NULL; 5109 } 5110 5111 static void * 5112 g_next(struct seq_file *m, void *v, loff_t *pos) 5113 { 5114 (*pos)++; 5115 return __g_next(m, pos); 5116 } 5117 5118 static void *g_start(struct seq_file *m, loff_t *pos) 5119 { 5120 struct ftrace_graph_data *fgd = m->private; 5121 5122 mutex_lock(&graph_lock); 5123 5124 if (fgd->type == GRAPH_FILTER_FUNCTION) 5125 fgd->hash = rcu_dereference_protected(ftrace_graph_hash, 5126 lockdep_is_held(&graph_lock)); 5127 else 5128 fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash, 5129 lockdep_is_held(&graph_lock)); 5130 5131 /* Nothing, tell g_show to print all functions are enabled */ 5132 if (ftrace_hash_empty(fgd->hash) && !*pos) 5133 return FTRACE_GRAPH_EMPTY; 5134 5135 fgd->idx = 0; 5136 fgd->entry = NULL; 5137 return __g_next(m, pos); 5138 } 5139 5140 static void g_stop(struct seq_file *m, void *p) 5141 { 5142 mutex_unlock(&graph_lock); 5143 } 5144 5145 static int g_show(struct seq_file *m, void *v) 5146 { 5147 struct ftrace_func_entry *entry = v; 5148 5149 if (!entry) 5150 return 0; 5151 5152 if (entry == FTRACE_GRAPH_EMPTY) { 5153 struct ftrace_graph_data *fgd = m->private; 5154 5155 if (fgd->type == GRAPH_FILTER_FUNCTION) 5156 seq_puts(m, "#### all functions enabled ####\n"); 5157 else 5158 seq_puts(m, "#### no functions disabled ####\n"); 5159 return 0; 5160 } 5161 5162 seq_printf(m, "%ps\n", (void *)entry->ip); 5163 5164 return 0; 5165 } 5166 5167 static const struct seq_operations ftrace_graph_seq_ops = { 5168 .start = g_start, 5169 .next = g_next, 5170 .stop = g_stop, 5171 .show = g_show, 5172 }; 5173 5174 static int 5175 __ftrace_graph_open(struct inode *inode, struct file *file, 5176 struct ftrace_graph_data *fgd) 5177 { 5178 int ret = 0; 5179 struct ftrace_hash *new_hash = NULL; 5180 5181 if (file->f_mode & FMODE_WRITE) { 5182 const int size_bits = FTRACE_HASH_DEFAULT_BITS; 5183 5184 if (trace_parser_get_init(&fgd->parser, FTRACE_BUFF_MAX)) 5185 return -ENOMEM; 5186 5187 if (file->f_flags & O_TRUNC) 5188 new_hash = alloc_ftrace_hash(size_bits); 5189 else 5190 new_hash = alloc_and_copy_ftrace_hash(size_bits, 5191 fgd->hash); 5192 if (!new_hash) { 5193 ret = -ENOMEM; 5194 goto out; 5195 } 5196 } 5197 5198 if (file->f_mode & FMODE_READ) { 5199 ret = seq_open(file, &ftrace_graph_seq_ops); 5200 if (!ret) { 5201 struct seq_file *m = file->private_data; 5202 m->private = fgd; 5203 } else { 5204 /* Failed */ 5205 free_ftrace_hash(new_hash); 5206 new_hash = NULL; 5207 } 5208 } else 5209 file->private_data = fgd; 5210 5211 out: 5212 if (ret < 0 && file->f_mode & FMODE_WRITE) 5213 trace_parser_put(&fgd->parser); 5214 5215 fgd->new_hash = new_hash; 5216 5217 /* 5218 * All uses of fgd->hash must be taken with the graph_lock 5219 * held. The graph_lock is going to be released, so force 5220 * fgd->hash to be reinitialized when it is taken again. 5221 */ 5222 fgd->hash = NULL; 5223 5224 return ret; 5225 } 5226 5227 static int 5228 ftrace_graph_open(struct inode *inode, struct file *file) 5229 { 5230 struct ftrace_graph_data *fgd; 5231 int ret; 5232 5233 if (unlikely(ftrace_disabled)) 5234 return -ENODEV; 5235 5236 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL); 5237 if (fgd == NULL) 5238 return -ENOMEM; 5239 5240 mutex_lock(&graph_lock); 5241 5242 fgd->hash = rcu_dereference_protected(ftrace_graph_hash, 5243 lockdep_is_held(&graph_lock)); 5244 fgd->type = GRAPH_FILTER_FUNCTION; 5245 fgd->seq_ops = &ftrace_graph_seq_ops; 5246 5247 ret = __ftrace_graph_open(inode, file, fgd); 5248 if (ret < 0) 5249 kfree(fgd); 5250 5251 mutex_unlock(&graph_lock); 5252 return ret; 5253 } 5254 5255 static int 5256 ftrace_graph_notrace_open(struct inode *inode, struct file *file) 5257 { 5258 struct ftrace_graph_data *fgd; 5259 int ret; 5260 5261 if (unlikely(ftrace_disabled)) 5262 return -ENODEV; 5263 5264 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL); 5265 if (fgd == NULL) 5266 return -ENOMEM; 5267 5268 mutex_lock(&graph_lock); 5269 5270 fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash, 5271 lockdep_is_held(&graph_lock)); 5272 fgd->type = GRAPH_FILTER_NOTRACE; 5273 fgd->seq_ops = &ftrace_graph_seq_ops; 5274 5275 ret = __ftrace_graph_open(inode, file, fgd); 5276 if (ret < 0) 5277 kfree(fgd); 5278 5279 mutex_unlock(&graph_lock); 5280 return ret; 5281 } 5282 5283 static int 5284 ftrace_graph_release(struct inode *inode, struct file *file) 5285 { 5286 struct ftrace_graph_data *fgd; 5287 struct ftrace_hash *old_hash, *new_hash; 5288 struct trace_parser *parser; 5289 int ret = 0; 5290 5291 if (file->f_mode & FMODE_READ) { 5292 struct seq_file *m = file->private_data; 5293 5294 fgd = m->private; 5295 seq_release(inode, file); 5296 } else { 5297 fgd = file->private_data; 5298 } 5299 5300 5301 if (file->f_mode & FMODE_WRITE) { 5302 5303 parser = &fgd->parser; 5304 5305 if (trace_parser_loaded((parser))) { 5306 ret = ftrace_graph_set_hash(fgd->new_hash, 5307 parser->buffer); 5308 } 5309 5310 trace_parser_put(parser); 5311 5312 new_hash = __ftrace_hash_move(fgd->new_hash); 5313 if (!new_hash) { 5314 ret = -ENOMEM; 5315 goto out; 5316 } 5317 5318 mutex_lock(&graph_lock); 5319 5320 if (fgd->type == GRAPH_FILTER_FUNCTION) { 5321 old_hash = rcu_dereference_protected(ftrace_graph_hash, 5322 lockdep_is_held(&graph_lock)); 5323 rcu_assign_pointer(ftrace_graph_hash, new_hash); 5324 } else { 5325 old_hash = rcu_dereference_protected(ftrace_graph_notrace_hash, 5326 lockdep_is_held(&graph_lock)); 5327 rcu_assign_pointer(ftrace_graph_notrace_hash, new_hash); 5328 } 5329 5330 mutex_unlock(&graph_lock); 5331 5332 /* Wait till all users are no longer using the old hash */ 5333 synchronize_rcu(); 5334 5335 free_ftrace_hash(old_hash); 5336 } 5337 5338 out: 5339 free_ftrace_hash(fgd->new_hash); 5340 kfree(fgd); 5341 5342 return ret; 5343 } 5344 5345 static int 5346 ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer) 5347 { 5348 struct ftrace_glob func_g; 5349 struct dyn_ftrace *rec; 5350 struct ftrace_page *pg; 5351 struct ftrace_func_entry *entry; 5352 int fail = 1; 5353 int not; 5354 5355 /* decode regex */ 5356 func_g.type = filter_parse_regex(buffer, strlen(buffer), 5357 &func_g.search, ¬); 5358 5359 func_g.len = strlen(func_g.search); 5360 5361 mutex_lock(&ftrace_lock); 5362 5363 if (unlikely(ftrace_disabled)) { 5364 mutex_unlock(&ftrace_lock); 5365 return -ENODEV; 5366 } 5367 5368 do_for_each_ftrace_rec(pg, rec) { 5369 5370 if (rec->flags & FTRACE_FL_DISABLED) 5371 continue; 5372 5373 if (ftrace_match_record(rec, &func_g, NULL, 0)) { 5374 entry = ftrace_lookup_ip(hash, rec->ip); 5375 5376 if (!not) { 5377 fail = 0; 5378 5379 if (entry) 5380 continue; 5381 if (add_hash_entry(hash, rec->ip) < 0) 5382 goto out; 5383 } else { 5384 if (entry) { 5385 free_hash_entry(hash, entry); 5386 fail = 0; 5387 } 5388 } 5389 } 5390 } while_for_each_ftrace_rec(); 5391 out: 5392 mutex_unlock(&ftrace_lock); 5393 5394 if (fail) 5395 return -EINVAL; 5396 5397 return 0; 5398 } 5399 5400 static ssize_t 5401 ftrace_graph_write(struct file *file, const char __user *ubuf, 5402 size_t cnt, loff_t *ppos) 5403 { 5404 ssize_t read, ret = 0; 5405 struct ftrace_graph_data *fgd = file->private_data; 5406 struct trace_parser *parser; 5407 5408 if (!cnt) 5409 return 0; 5410 5411 /* Read mode uses seq functions */ 5412 if (file->f_mode & FMODE_READ) { 5413 struct seq_file *m = file->private_data; 5414 fgd = m->private; 5415 } 5416 5417 parser = &fgd->parser; 5418 5419 read = trace_get_user(parser, ubuf, cnt, ppos); 5420 5421 if (read >= 0 && trace_parser_loaded(parser) && 5422 !trace_parser_cont(parser)) { 5423 5424 ret = ftrace_graph_set_hash(fgd->new_hash, 5425 parser->buffer); 5426 trace_parser_clear(parser); 5427 } 5428 5429 if (!ret) 5430 ret = read; 5431 5432 return ret; 5433 } 5434 5435 static const struct file_operations ftrace_graph_fops = { 5436 .open = ftrace_graph_open, 5437 .read = seq_read, 5438 .write = ftrace_graph_write, 5439 .llseek = tracing_lseek, 5440 .release = ftrace_graph_release, 5441 }; 5442 5443 static const struct file_operations ftrace_graph_notrace_fops = { 5444 .open = ftrace_graph_notrace_open, 5445 .read = seq_read, 5446 .write = ftrace_graph_write, 5447 .llseek = tracing_lseek, 5448 .release = ftrace_graph_release, 5449 }; 5450 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 5451 5452 void ftrace_create_filter_files(struct ftrace_ops *ops, 5453 struct dentry *parent) 5454 { 5455 5456 trace_create_file("set_ftrace_filter", 0644, parent, 5457 ops, &ftrace_filter_fops); 5458 5459 trace_create_file("set_ftrace_notrace", 0644, parent, 5460 ops, &ftrace_notrace_fops); 5461 } 5462 5463 /* 5464 * The name "destroy_filter_files" is really a misnomer. Although 5465 * in the future, it may actually delete the files, but this is 5466 * really intended to make sure the ops passed in are disabled 5467 * and that when this function returns, the caller is free to 5468 * free the ops. 5469 * 5470 * The "destroy" name is only to match the "create" name that this 5471 * should be paired with. 5472 */ 5473 void ftrace_destroy_filter_files(struct ftrace_ops *ops) 5474 { 5475 mutex_lock(&ftrace_lock); 5476 if (ops->flags & FTRACE_OPS_FL_ENABLED) 5477 ftrace_shutdown(ops, 0); 5478 ops->flags |= FTRACE_OPS_FL_DELETED; 5479 ftrace_free_filter(ops); 5480 mutex_unlock(&ftrace_lock); 5481 } 5482 5483 static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer) 5484 { 5485 5486 trace_create_file("available_filter_functions", 0444, 5487 d_tracer, NULL, &ftrace_avail_fops); 5488 5489 trace_create_file("enabled_functions", 0444, 5490 d_tracer, NULL, &ftrace_enabled_fops); 5491 5492 ftrace_create_filter_files(&global_ops, d_tracer); 5493 5494 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 5495 trace_create_file("set_graph_function", 0644, d_tracer, 5496 NULL, 5497 &ftrace_graph_fops); 5498 trace_create_file("set_graph_notrace", 0644, d_tracer, 5499 NULL, 5500 &ftrace_graph_notrace_fops); 5501 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 5502 5503 return 0; 5504 } 5505 5506 static int ftrace_cmp_ips(const void *a, const void *b) 5507 { 5508 const unsigned long *ipa = a; 5509 const unsigned long *ipb = b; 5510 5511 if (*ipa > *ipb) 5512 return 1; 5513 if (*ipa < *ipb) 5514 return -1; 5515 return 0; 5516 } 5517 5518 static int ftrace_process_locs(struct module *mod, 5519 unsigned long *start, 5520 unsigned long *end) 5521 { 5522 struct ftrace_page *start_pg; 5523 struct ftrace_page *pg; 5524 struct dyn_ftrace *rec; 5525 unsigned long count; 5526 unsigned long *p; 5527 unsigned long addr; 5528 unsigned long flags = 0; /* Shut up gcc */ 5529 int ret = -ENOMEM; 5530 5531 count = end - start; 5532 5533 if (!count) 5534 return 0; 5535 5536 sort(start, count, sizeof(*start), 5537 ftrace_cmp_ips, NULL); 5538 5539 start_pg = ftrace_allocate_pages(count); 5540 if (!start_pg) 5541 return -ENOMEM; 5542 5543 mutex_lock(&ftrace_lock); 5544 5545 /* 5546 * Core and each module needs their own pages, as 5547 * modules will free them when they are removed. 5548 * Force a new page to be allocated for modules. 5549 */ 5550 if (!mod) { 5551 WARN_ON(ftrace_pages || ftrace_pages_start); 5552 /* First initialization */ 5553 ftrace_pages = ftrace_pages_start = start_pg; 5554 } else { 5555 if (!ftrace_pages) 5556 goto out; 5557 5558 if (WARN_ON(ftrace_pages->next)) { 5559 /* Hmm, we have free pages? */ 5560 while (ftrace_pages->next) 5561 ftrace_pages = ftrace_pages->next; 5562 } 5563 5564 ftrace_pages->next = start_pg; 5565 } 5566 5567 p = start; 5568 pg = start_pg; 5569 while (p < end) { 5570 addr = ftrace_call_adjust(*p++); 5571 /* 5572 * Some architecture linkers will pad between 5573 * the different mcount_loc sections of different 5574 * object files to satisfy alignments. 5575 * Skip any NULL pointers. 5576 */ 5577 if (!addr) 5578 continue; 5579 5580 if (pg->index == pg->size) { 5581 /* We should have allocated enough */ 5582 if (WARN_ON(!pg->next)) 5583 break; 5584 pg = pg->next; 5585 } 5586 5587 rec = &pg->records[pg->index++]; 5588 rec->ip = addr; 5589 } 5590 5591 /* We should have used all pages */ 5592 WARN_ON(pg->next); 5593 5594 /* Assign the last page to ftrace_pages */ 5595 ftrace_pages = pg; 5596 5597 /* 5598 * We only need to disable interrupts on start up 5599 * because we are modifying code that an interrupt 5600 * may execute, and the modification is not atomic. 5601 * But for modules, nothing runs the code we modify 5602 * until we are finished with it, and there's no 5603 * reason to cause large interrupt latencies while we do it. 5604 */ 5605 if (!mod) 5606 local_irq_save(flags); 5607 ftrace_update_code(mod, start_pg); 5608 if (!mod) 5609 local_irq_restore(flags); 5610 ret = 0; 5611 out: 5612 mutex_unlock(&ftrace_lock); 5613 5614 return ret; 5615 } 5616 5617 struct ftrace_mod_func { 5618 struct list_head list; 5619 char *name; 5620 unsigned long ip; 5621 unsigned int size; 5622 }; 5623 5624 struct ftrace_mod_map { 5625 struct rcu_head rcu; 5626 struct list_head list; 5627 struct module *mod; 5628 unsigned long start_addr; 5629 unsigned long end_addr; 5630 struct list_head funcs; 5631 unsigned int num_funcs; 5632 }; 5633 5634 #ifdef CONFIG_MODULES 5635 5636 #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next) 5637 5638 static LIST_HEAD(ftrace_mod_maps); 5639 5640 static int referenced_filters(struct dyn_ftrace *rec) 5641 { 5642 struct ftrace_ops *ops; 5643 int cnt = 0; 5644 5645 for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) { 5646 if (ops_references_rec(ops, rec)) 5647 cnt++; 5648 } 5649 5650 return cnt; 5651 } 5652 5653 static void 5654 clear_mod_from_hash(struct ftrace_page *pg, struct ftrace_hash *hash) 5655 { 5656 struct ftrace_func_entry *entry; 5657 struct dyn_ftrace *rec; 5658 int i; 5659 5660 if (ftrace_hash_empty(hash)) 5661 return; 5662 5663 for (i = 0; i < pg->index; i++) { 5664 rec = &pg->records[i]; 5665 entry = __ftrace_lookup_ip(hash, rec->ip); 5666 /* 5667 * Do not allow this rec to match again. 5668 * Yeah, it may waste some memory, but will be removed 5669 * if/when the hash is modified again. 5670 */ 5671 if (entry) 5672 entry->ip = 0; 5673 } 5674 } 5675 5676 /* Clear any records from hashs */ 5677 static void clear_mod_from_hashes(struct ftrace_page *pg) 5678 { 5679 struct trace_array *tr; 5680 5681 mutex_lock(&trace_types_lock); 5682 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 5683 if (!tr->ops || !tr->ops->func_hash) 5684 continue; 5685 mutex_lock(&tr->ops->func_hash->regex_lock); 5686 clear_mod_from_hash(pg, tr->ops->func_hash->filter_hash); 5687 clear_mod_from_hash(pg, tr->ops->func_hash->notrace_hash); 5688 mutex_unlock(&tr->ops->func_hash->regex_lock); 5689 } 5690 mutex_unlock(&trace_types_lock); 5691 } 5692 5693 static void ftrace_free_mod_map(struct rcu_head *rcu) 5694 { 5695 struct ftrace_mod_map *mod_map = container_of(rcu, struct ftrace_mod_map, rcu); 5696 struct ftrace_mod_func *mod_func; 5697 struct ftrace_mod_func *n; 5698 5699 /* All the contents of mod_map are now not visible to readers */ 5700 list_for_each_entry_safe(mod_func, n, &mod_map->funcs, list) { 5701 kfree(mod_func->name); 5702 list_del(&mod_func->list); 5703 kfree(mod_func); 5704 } 5705 5706 kfree(mod_map); 5707 } 5708 5709 void ftrace_release_mod(struct module *mod) 5710 { 5711 struct ftrace_mod_map *mod_map; 5712 struct ftrace_mod_map *n; 5713 struct dyn_ftrace *rec; 5714 struct ftrace_page **last_pg; 5715 struct ftrace_page *tmp_page = NULL; 5716 struct ftrace_page *pg; 5717 int order; 5718 5719 mutex_lock(&ftrace_lock); 5720 5721 if (ftrace_disabled) 5722 goto out_unlock; 5723 5724 list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) { 5725 if (mod_map->mod == mod) { 5726 list_del_rcu(&mod_map->list); 5727 call_rcu(&mod_map->rcu, ftrace_free_mod_map); 5728 break; 5729 } 5730 } 5731 5732 /* 5733 * Each module has its own ftrace_pages, remove 5734 * them from the list. 5735 */ 5736 last_pg = &ftrace_pages_start; 5737 for (pg = ftrace_pages_start; pg; pg = *last_pg) { 5738 rec = &pg->records[0]; 5739 if (within_module_core(rec->ip, mod) || 5740 within_module_init(rec->ip, mod)) { 5741 /* 5742 * As core pages are first, the first 5743 * page should never be a module page. 5744 */ 5745 if (WARN_ON(pg == ftrace_pages_start)) 5746 goto out_unlock; 5747 5748 /* Check if we are deleting the last page */ 5749 if (pg == ftrace_pages) 5750 ftrace_pages = next_to_ftrace_page(last_pg); 5751 5752 ftrace_update_tot_cnt -= pg->index; 5753 *last_pg = pg->next; 5754 5755 pg->next = tmp_page; 5756 tmp_page = pg; 5757 } else 5758 last_pg = &pg->next; 5759 } 5760 out_unlock: 5761 mutex_unlock(&ftrace_lock); 5762 5763 for (pg = tmp_page; pg; pg = tmp_page) { 5764 5765 /* Needs to be called outside of ftrace_lock */ 5766 clear_mod_from_hashes(pg); 5767 5768 order = get_count_order(pg->size / ENTRIES_PER_PAGE); 5769 free_pages((unsigned long)pg->records, order); 5770 tmp_page = pg->next; 5771 kfree(pg); 5772 } 5773 } 5774 5775 void ftrace_module_enable(struct module *mod) 5776 { 5777 struct dyn_ftrace *rec; 5778 struct ftrace_page *pg; 5779 5780 mutex_lock(&ftrace_lock); 5781 5782 if (ftrace_disabled) 5783 goto out_unlock; 5784 5785 /* 5786 * If the tracing is enabled, go ahead and enable the record. 5787 * 5788 * The reason not to enable the record immediately is the 5789 * inherent check of ftrace_make_nop/ftrace_make_call for 5790 * correct previous instructions. Making first the NOP 5791 * conversion puts the module to the correct state, thus 5792 * passing the ftrace_make_call check. 5793 * 5794 * We also delay this to after the module code already set the 5795 * text to read-only, as we now need to set it back to read-write 5796 * so that we can modify the text. 5797 */ 5798 if (ftrace_start_up) 5799 ftrace_arch_code_modify_prepare(); 5800 5801 do_for_each_ftrace_rec(pg, rec) { 5802 int cnt; 5803 /* 5804 * do_for_each_ftrace_rec() is a double loop. 5805 * module text shares the pg. If a record is 5806 * not part of this module, then skip this pg, 5807 * which the "break" will do. 5808 */ 5809 if (!within_module_core(rec->ip, mod) && 5810 !within_module_init(rec->ip, mod)) 5811 break; 5812 5813 cnt = 0; 5814 5815 /* 5816 * When adding a module, we need to check if tracers are 5817 * currently enabled and if they are, and can trace this record, 5818 * we need to enable the module functions as well as update the 5819 * reference counts for those function records. 5820 */ 5821 if (ftrace_start_up) 5822 cnt += referenced_filters(rec); 5823 5824 /* This clears FTRACE_FL_DISABLED */ 5825 rec->flags = cnt; 5826 5827 if (ftrace_start_up && cnt) { 5828 int failed = __ftrace_replace_code(rec, 1); 5829 if (failed) { 5830 ftrace_bug(failed, rec); 5831 goto out_loop; 5832 } 5833 } 5834 5835 } while_for_each_ftrace_rec(); 5836 5837 out_loop: 5838 if (ftrace_start_up) 5839 ftrace_arch_code_modify_post_process(); 5840 5841 out_unlock: 5842 mutex_unlock(&ftrace_lock); 5843 5844 process_cached_mods(mod->name); 5845 } 5846 5847 void ftrace_module_init(struct module *mod) 5848 { 5849 if (ftrace_disabled || !mod->num_ftrace_callsites) 5850 return; 5851 5852 ftrace_process_locs(mod, mod->ftrace_callsites, 5853 mod->ftrace_callsites + mod->num_ftrace_callsites); 5854 } 5855 5856 static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map, 5857 struct dyn_ftrace *rec) 5858 { 5859 struct ftrace_mod_func *mod_func; 5860 unsigned long symsize; 5861 unsigned long offset; 5862 char str[KSYM_SYMBOL_LEN]; 5863 char *modname; 5864 const char *ret; 5865 5866 ret = kallsyms_lookup(rec->ip, &symsize, &offset, &modname, str); 5867 if (!ret) 5868 return; 5869 5870 mod_func = kmalloc(sizeof(*mod_func), GFP_KERNEL); 5871 if (!mod_func) 5872 return; 5873 5874 mod_func->name = kstrdup(str, GFP_KERNEL); 5875 if (!mod_func->name) { 5876 kfree(mod_func); 5877 return; 5878 } 5879 5880 mod_func->ip = rec->ip - offset; 5881 mod_func->size = symsize; 5882 5883 mod_map->num_funcs++; 5884 5885 list_add_rcu(&mod_func->list, &mod_map->funcs); 5886 } 5887 5888 static struct ftrace_mod_map * 5889 allocate_ftrace_mod_map(struct module *mod, 5890 unsigned long start, unsigned long end) 5891 { 5892 struct ftrace_mod_map *mod_map; 5893 5894 mod_map = kmalloc(sizeof(*mod_map), GFP_KERNEL); 5895 if (!mod_map) 5896 return NULL; 5897 5898 mod_map->mod = mod; 5899 mod_map->start_addr = start; 5900 mod_map->end_addr = end; 5901 mod_map->num_funcs = 0; 5902 5903 INIT_LIST_HEAD_RCU(&mod_map->funcs); 5904 5905 list_add_rcu(&mod_map->list, &ftrace_mod_maps); 5906 5907 return mod_map; 5908 } 5909 5910 static const char * 5911 ftrace_func_address_lookup(struct ftrace_mod_map *mod_map, 5912 unsigned long addr, unsigned long *size, 5913 unsigned long *off, char *sym) 5914 { 5915 struct ftrace_mod_func *found_func = NULL; 5916 struct ftrace_mod_func *mod_func; 5917 5918 list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) { 5919 if (addr >= mod_func->ip && 5920 addr < mod_func->ip + mod_func->size) { 5921 found_func = mod_func; 5922 break; 5923 } 5924 } 5925 5926 if (found_func) { 5927 if (size) 5928 *size = found_func->size; 5929 if (off) 5930 *off = addr - found_func->ip; 5931 if (sym) 5932 strlcpy(sym, found_func->name, KSYM_NAME_LEN); 5933 5934 return found_func->name; 5935 } 5936 5937 return NULL; 5938 } 5939 5940 const char * 5941 ftrace_mod_address_lookup(unsigned long addr, unsigned long *size, 5942 unsigned long *off, char **modname, char *sym) 5943 { 5944 struct ftrace_mod_map *mod_map; 5945 const char *ret = NULL; 5946 5947 /* mod_map is freed via call_rcu() */ 5948 preempt_disable(); 5949 list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) { 5950 ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym); 5951 if (ret) { 5952 if (modname) 5953 *modname = mod_map->mod->name; 5954 break; 5955 } 5956 } 5957 preempt_enable(); 5958 5959 return ret; 5960 } 5961 5962 int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, 5963 char *type, char *name, 5964 char *module_name, int *exported) 5965 { 5966 struct ftrace_mod_map *mod_map; 5967 struct ftrace_mod_func *mod_func; 5968 5969 preempt_disable(); 5970 list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) { 5971 5972 if (symnum >= mod_map->num_funcs) { 5973 symnum -= mod_map->num_funcs; 5974 continue; 5975 } 5976 5977 list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) { 5978 if (symnum > 1) { 5979 symnum--; 5980 continue; 5981 } 5982 5983 *value = mod_func->ip; 5984 *type = 'T'; 5985 strlcpy(name, mod_func->name, KSYM_NAME_LEN); 5986 strlcpy(module_name, mod_map->mod->name, MODULE_NAME_LEN); 5987 *exported = 1; 5988 preempt_enable(); 5989 return 0; 5990 } 5991 WARN_ON(1); 5992 break; 5993 } 5994 preempt_enable(); 5995 return -ERANGE; 5996 } 5997 5998 #else 5999 static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map, 6000 struct dyn_ftrace *rec) { } 6001 static inline struct ftrace_mod_map * 6002 allocate_ftrace_mod_map(struct module *mod, 6003 unsigned long start, unsigned long end) 6004 { 6005 return NULL; 6006 } 6007 #endif /* CONFIG_MODULES */ 6008 6009 struct ftrace_init_func { 6010 struct list_head list; 6011 unsigned long ip; 6012 }; 6013 6014 /* Clear any init ips from hashes */ 6015 static void 6016 clear_func_from_hash(struct ftrace_init_func *func, struct ftrace_hash *hash) 6017 { 6018 struct ftrace_func_entry *entry; 6019 6020 if (ftrace_hash_empty(hash)) 6021 return; 6022 6023 entry = __ftrace_lookup_ip(hash, func->ip); 6024 6025 /* 6026 * Do not allow this rec to match again. 6027 * Yeah, it may waste some memory, but will be removed 6028 * if/when the hash is modified again. 6029 */ 6030 if (entry) 6031 entry->ip = 0; 6032 } 6033 6034 static void 6035 clear_func_from_hashes(struct ftrace_init_func *func) 6036 { 6037 struct trace_array *tr; 6038 6039 mutex_lock(&trace_types_lock); 6040 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 6041 if (!tr->ops || !tr->ops->func_hash) 6042 continue; 6043 mutex_lock(&tr->ops->func_hash->regex_lock); 6044 clear_func_from_hash(func, tr->ops->func_hash->filter_hash); 6045 clear_func_from_hash(func, tr->ops->func_hash->notrace_hash); 6046 mutex_unlock(&tr->ops->func_hash->regex_lock); 6047 } 6048 mutex_unlock(&trace_types_lock); 6049 } 6050 6051 static void add_to_clear_hash_list(struct list_head *clear_list, 6052 struct dyn_ftrace *rec) 6053 { 6054 struct ftrace_init_func *func; 6055 6056 func = kmalloc(sizeof(*func), GFP_KERNEL); 6057 if (!func) { 6058 WARN_ONCE(1, "alloc failure, ftrace filter could be stale\n"); 6059 return; 6060 } 6061 6062 func->ip = rec->ip; 6063 list_add(&func->list, clear_list); 6064 } 6065 6066 void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr) 6067 { 6068 unsigned long start = (unsigned long)(start_ptr); 6069 unsigned long end = (unsigned long)(end_ptr); 6070 struct ftrace_page **last_pg = &ftrace_pages_start; 6071 struct ftrace_page *pg; 6072 struct dyn_ftrace *rec; 6073 struct dyn_ftrace key; 6074 struct ftrace_mod_map *mod_map = NULL; 6075 struct ftrace_init_func *func, *func_next; 6076 struct list_head clear_hash; 6077 int order; 6078 6079 INIT_LIST_HEAD(&clear_hash); 6080 6081 key.ip = start; 6082 key.flags = end; /* overload flags, as it is unsigned long */ 6083 6084 mutex_lock(&ftrace_lock); 6085 6086 /* 6087 * If we are freeing module init memory, then check if 6088 * any tracer is active. If so, we need to save a mapping of 6089 * the module functions being freed with the address. 6090 */ 6091 if (mod && ftrace_ops_list != &ftrace_list_end) 6092 mod_map = allocate_ftrace_mod_map(mod, start, end); 6093 6094 for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) { 6095 if (end < pg->records[0].ip || 6096 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) 6097 continue; 6098 again: 6099 rec = bsearch(&key, pg->records, pg->index, 6100 sizeof(struct dyn_ftrace), 6101 ftrace_cmp_recs); 6102 if (!rec) 6103 continue; 6104 6105 /* rec will be cleared from hashes after ftrace_lock unlock */ 6106 add_to_clear_hash_list(&clear_hash, rec); 6107 6108 if (mod_map) 6109 save_ftrace_mod_rec(mod_map, rec); 6110 6111 pg->index--; 6112 ftrace_update_tot_cnt--; 6113 if (!pg->index) { 6114 *last_pg = pg->next; 6115 order = get_count_order(pg->size / ENTRIES_PER_PAGE); 6116 free_pages((unsigned long)pg->records, order); 6117 kfree(pg); 6118 pg = container_of(last_pg, struct ftrace_page, next); 6119 if (!(*last_pg)) 6120 ftrace_pages = pg; 6121 continue; 6122 } 6123 memmove(rec, rec + 1, 6124 (pg->index - (rec - pg->records)) * sizeof(*rec)); 6125 /* More than one function may be in this block */ 6126 goto again; 6127 } 6128 mutex_unlock(&ftrace_lock); 6129 6130 list_for_each_entry_safe(func, func_next, &clear_hash, list) { 6131 clear_func_from_hashes(func); 6132 kfree(func); 6133 } 6134 } 6135 6136 void __init ftrace_free_init_mem(void) 6137 { 6138 void *start = (void *)(&__init_begin); 6139 void *end = (void *)(&__init_end); 6140 6141 ftrace_free_mem(NULL, start, end); 6142 } 6143 6144 void __init ftrace_init(void) 6145 { 6146 extern unsigned long __start_mcount_loc[]; 6147 extern unsigned long __stop_mcount_loc[]; 6148 unsigned long count, flags; 6149 int ret; 6150 6151 local_irq_save(flags); 6152 ret = ftrace_dyn_arch_init(); 6153 local_irq_restore(flags); 6154 if (ret) 6155 goto failed; 6156 6157 count = __stop_mcount_loc - __start_mcount_loc; 6158 if (!count) { 6159 pr_info("ftrace: No functions to be traced?\n"); 6160 goto failed; 6161 } 6162 6163 pr_info("ftrace: allocating %ld entries in %ld pages\n", 6164 count, count / ENTRIES_PER_PAGE + 1); 6165 6166 last_ftrace_enabled = ftrace_enabled = 1; 6167 6168 ret = ftrace_process_locs(NULL, 6169 __start_mcount_loc, 6170 __stop_mcount_loc); 6171 6172 set_ftrace_early_filters(); 6173 6174 return; 6175 failed: 6176 ftrace_disabled = 1; 6177 } 6178 6179 /* Do nothing if arch does not support this */ 6180 void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops) 6181 { 6182 } 6183 6184 static void ftrace_update_trampoline(struct ftrace_ops *ops) 6185 { 6186 arch_ftrace_update_trampoline(ops); 6187 } 6188 6189 void ftrace_init_trace_array(struct trace_array *tr) 6190 { 6191 INIT_LIST_HEAD(&tr->func_probes); 6192 INIT_LIST_HEAD(&tr->mod_trace); 6193 INIT_LIST_HEAD(&tr->mod_notrace); 6194 } 6195 #else 6196 6197 struct ftrace_ops global_ops = { 6198 .func = ftrace_stub, 6199 .flags = FTRACE_OPS_FL_RECURSION_SAFE | 6200 FTRACE_OPS_FL_INITIALIZED | 6201 FTRACE_OPS_FL_PID, 6202 }; 6203 6204 static int __init ftrace_nodyn_init(void) 6205 { 6206 ftrace_enabled = 1; 6207 return 0; 6208 } 6209 core_initcall(ftrace_nodyn_init); 6210 6211 static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; } 6212 static inline void ftrace_startup_enable(int command) { } 6213 static inline void ftrace_startup_all(int command) { } 6214 6215 # define ftrace_startup_sysctl() do { } while (0) 6216 # define ftrace_shutdown_sysctl() do { } while (0) 6217 6218 static void ftrace_update_trampoline(struct ftrace_ops *ops) 6219 { 6220 } 6221 6222 #endif /* CONFIG_DYNAMIC_FTRACE */ 6223 6224 __init void ftrace_init_global_array_ops(struct trace_array *tr) 6225 { 6226 tr->ops = &global_ops; 6227 tr->ops->private = tr; 6228 ftrace_init_trace_array(tr); 6229 } 6230 6231 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func) 6232 { 6233 /* If we filter on pids, update to use the pid function */ 6234 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) { 6235 if (WARN_ON(tr->ops->func != ftrace_stub)) 6236 printk("ftrace ops had %pS for function\n", 6237 tr->ops->func); 6238 } 6239 tr->ops->func = func; 6240 tr->ops->private = tr; 6241 } 6242 6243 void ftrace_reset_array_ops(struct trace_array *tr) 6244 { 6245 tr->ops->func = ftrace_stub; 6246 } 6247 6248 static nokprobe_inline void 6249 __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, 6250 struct ftrace_ops *ignored, struct pt_regs *regs) 6251 { 6252 struct ftrace_ops *op; 6253 int bit; 6254 6255 bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX); 6256 if (bit < 0) 6257 return; 6258 6259 /* 6260 * Some of the ops may be dynamically allocated, 6261 * they must be freed after a synchronize_rcu(). 6262 */ 6263 preempt_disable_notrace(); 6264 6265 do_for_each_ftrace_op(op, ftrace_ops_list) { 6266 /* Stub functions don't need to be called nor tested */ 6267 if (op->flags & FTRACE_OPS_FL_STUB) 6268 continue; 6269 /* 6270 * Check the following for each ops before calling their func: 6271 * if RCU flag is set, then rcu_is_watching() must be true 6272 * if PER_CPU is set, then ftrace_function_local_disable() 6273 * must be false 6274 * Otherwise test if the ip matches the ops filter 6275 * 6276 * If any of the above fails then the op->func() is not executed. 6277 */ 6278 if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) && 6279 ftrace_ops_test(op, ip, regs)) { 6280 if (FTRACE_WARN_ON(!op->func)) { 6281 pr_warn("op=%p %pS\n", op, op); 6282 goto out; 6283 } 6284 op->func(ip, parent_ip, op, regs); 6285 } 6286 } while_for_each_ftrace_op(op); 6287 out: 6288 preempt_enable_notrace(); 6289 trace_clear_recursion(bit); 6290 } 6291 6292 /* 6293 * Some archs only support passing ip and parent_ip. Even though 6294 * the list function ignores the op parameter, we do not want any 6295 * C side effects, where a function is called without the caller 6296 * sending a third parameter. 6297 * Archs are to support both the regs and ftrace_ops at the same time. 6298 * If they support ftrace_ops, it is assumed they support regs. 6299 * If call backs want to use regs, they must either check for regs 6300 * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS. 6301 * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved. 6302 * An architecture can pass partial regs with ftrace_ops and still 6303 * set the ARCH_SUPPORTS_FTRACE_OPS. 6304 */ 6305 #if ARCH_SUPPORTS_FTRACE_OPS 6306 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, 6307 struct ftrace_ops *op, struct pt_regs *regs) 6308 { 6309 __ftrace_ops_list_func(ip, parent_ip, NULL, regs); 6310 } 6311 NOKPROBE_SYMBOL(ftrace_ops_list_func); 6312 #else 6313 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip) 6314 { 6315 __ftrace_ops_list_func(ip, parent_ip, NULL, NULL); 6316 } 6317 NOKPROBE_SYMBOL(ftrace_ops_no_ops); 6318 #endif 6319 6320 /* 6321 * If there's only one function registered but it does not support 6322 * recursion, needs RCU protection and/or requires per cpu handling, then 6323 * this function will be called by the mcount trampoline. 6324 */ 6325 static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip, 6326 struct ftrace_ops *op, struct pt_regs *regs) 6327 { 6328 int bit; 6329 6330 if ((op->flags & FTRACE_OPS_FL_RCU) && !rcu_is_watching()) 6331 return; 6332 6333 bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX); 6334 if (bit < 0) 6335 return; 6336 6337 preempt_disable_notrace(); 6338 6339 op->func(ip, parent_ip, op, regs); 6340 6341 preempt_enable_notrace(); 6342 trace_clear_recursion(bit); 6343 } 6344 NOKPROBE_SYMBOL(ftrace_ops_assist_func); 6345 6346 /** 6347 * ftrace_ops_get_func - get the function a trampoline should call 6348 * @ops: the ops to get the function for 6349 * 6350 * Normally the mcount trampoline will call the ops->func, but there 6351 * are times that it should not. For example, if the ops does not 6352 * have its own recursion protection, then it should call the 6353 * ftrace_ops_assist_func() instead. 6354 * 6355 * Returns the function that the trampoline should call for @ops. 6356 */ 6357 ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops) 6358 { 6359 /* 6360 * If the function does not handle recursion, needs to be RCU safe, 6361 * or does per cpu logic, then we need to call the assist handler. 6362 */ 6363 if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE) || 6364 ops->flags & FTRACE_OPS_FL_RCU) 6365 return ftrace_ops_assist_func; 6366 6367 return ops->func; 6368 } 6369 6370 static void 6371 ftrace_filter_pid_sched_switch_probe(void *data, bool preempt, 6372 struct task_struct *prev, struct task_struct *next) 6373 { 6374 struct trace_array *tr = data; 6375 struct trace_pid_list *pid_list; 6376 6377 pid_list = rcu_dereference_sched(tr->function_pids); 6378 6379 this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid, 6380 trace_ignore_this_task(pid_list, next)); 6381 } 6382 6383 static void 6384 ftrace_pid_follow_sched_process_fork(void *data, 6385 struct task_struct *self, 6386 struct task_struct *task) 6387 { 6388 struct trace_pid_list *pid_list; 6389 struct trace_array *tr = data; 6390 6391 pid_list = rcu_dereference_sched(tr->function_pids); 6392 trace_filter_add_remove_task(pid_list, self, task); 6393 } 6394 6395 static void 6396 ftrace_pid_follow_sched_process_exit(void *data, struct task_struct *task) 6397 { 6398 struct trace_pid_list *pid_list; 6399 struct trace_array *tr = data; 6400 6401 pid_list = rcu_dereference_sched(tr->function_pids); 6402 trace_filter_add_remove_task(pid_list, NULL, task); 6403 } 6404 6405 void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) 6406 { 6407 if (enable) { 6408 register_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork, 6409 tr); 6410 register_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit, 6411 tr); 6412 } else { 6413 unregister_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork, 6414 tr); 6415 unregister_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit, 6416 tr); 6417 } 6418 } 6419 6420 static void clear_ftrace_pids(struct trace_array *tr) 6421 { 6422 struct trace_pid_list *pid_list; 6423 int cpu; 6424 6425 pid_list = rcu_dereference_protected(tr->function_pids, 6426 lockdep_is_held(&ftrace_lock)); 6427 if (!pid_list) 6428 return; 6429 6430 unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr); 6431 6432 for_each_possible_cpu(cpu) 6433 per_cpu_ptr(tr->trace_buffer.data, cpu)->ftrace_ignore_pid = false; 6434 6435 rcu_assign_pointer(tr->function_pids, NULL); 6436 6437 /* Wait till all users are no longer using pid filtering */ 6438 synchronize_rcu(); 6439 6440 trace_free_pid_list(pid_list); 6441 } 6442 6443 void ftrace_clear_pids(struct trace_array *tr) 6444 { 6445 mutex_lock(&ftrace_lock); 6446 6447 clear_ftrace_pids(tr); 6448 6449 mutex_unlock(&ftrace_lock); 6450 } 6451 6452 static void ftrace_pid_reset(struct trace_array *tr) 6453 { 6454 mutex_lock(&ftrace_lock); 6455 clear_ftrace_pids(tr); 6456 6457 ftrace_update_pid_func(); 6458 ftrace_startup_all(0); 6459 6460 mutex_unlock(&ftrace_lock); 6461 } 6462 6463 /* Greater than any max PID */ 6464 #define FTRACE_NO_PIDS (void *)(PID_MAX_LIMIT + 1) 6465 6466 static void *fpid_start(struct seq_file *m, loff_t *pos) 6467 __acquires(RCU) 6468 { 6469 struct trace_pid_list *pid_list; 6470 struct trace_array *tr = m->private; 6471 6472 mutex_lock(&ftrace_lock); 6473 rcu_read_lock_sched(); 6474 6475 pid_list = rcu_dereference_sched(tr->function_pids); 6476 6477 if (!pid_list) 6478 return !(*pos) ? FTRACE_NO_PIDS : NULL; 6479 6480 return trace_pid_start(pid_list, pos); 6481 } 6482 6483 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos) 6484 { 6485 struct trace_array *tr = m->private; 6486 struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids); 6487 6488 if (v == FTRACE_NO_PIDS) 6489 return NULL; 6490 6491 return trace_pid_next(pid_list, v, pos); 6492 } 6493 6494 static void fpid_stop(struct seq_file *m, void *p) 6495 __releases(RCU) 6496 { 6497 rcu_read_unlock_sched(); 6498 mutex_unlock(&ftrace_lock); 6499 } 6500 6501 static int fpid_show(struct seq_file *m, void *v) 6502 { 6503 if (v == FTRACE_NO_PIDS) { 6504 seq_puts(m, "no pid\n"); 6505 return 0; 6506 } 6507 6508 return trace_pid_show(m, v); 6509 } 6510 6511 static const struct seq_operations ftrace_pid_sops = { 6512 .start = fpid_start, 6513 .next = fpid_next, 6514 .stop = fpid_stop, 6515 .show = fpid_show, 6516 }; 6517 6518 static int 6519 ftrace_pid_open(struct inode *inode, struct file *file) 6520 { 6521 struct trace_array *tr = inode->i_private; 6522 struct seq_file *m; 6523 int ret = 0; 6524 6525 if (trace_array_get(tr) < 0) 6526 return -ENODEV; 6527 6528 if ((file->f_mode & FMODE_WRITE) && 6529 (file->f_flags & O_TRUNC)) 6530 ftrace_pid_reset(tr); 6531 6532 ret = seq_open(file, &ftrace_pid_sops); 6533 if (ret < 0) { 6534 trace_array_put(tr); 6535 } else { 6536 m = file->private_data; 6537 /* copy tr over to seq ops */ 6538 m->private = tr; 6539 } 6540 6541 return ret; 6542 } 6543 6544 static void ignore_task_cpu(void *data) 6545 { 6546 struct trace_array *tr = data; 6547 struct trace_pid_list *pid_list; 6548 6549 /* 6550 * This function is called by on_each_cpu() while the 6551 * event_mutex is held. 6552 */ 6553 pid_list = rcu_dereference_protected(tr->function_pids, 6554 mutex_is_locked(&ftrace_lock)); 6555 6556 this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid, 6557 trace_ignore_this_task(pid_list, current)); 6558 } 6559 6560 static ssize_t 6561 ftrace_pid_write(struct file *filp, const char __user *ubuf, 6562 size_t cnt, loff_t *ppos) 6563 { 6564 struct seq_file *m = filp->private_data; 6565 struct trace_array *tr = m->private; 6566 struct trace_pid_list *filtered_pids = NULL; 6567 struct trace_pid_list *pid_list; 6568 ssize_t ret; 6569 6570 if (!cnt) 6571 return 0; 6572 6573 mutex_lock(&ftrace_lock); 6574 6575 filtered_pids = rcu_dereference_protected(tr->function_pids, 6576 lockdep_is_held(&ftrace_lock)); 6577 6578 ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt); 6579 if (ret < 0) 6580 goto out; 6581 6582 rcu_assign_pointer(tr->function_pids, pid_list); 6583 6584 if (filtered_pids) { 6585 synchronize_rcu(); 6586 trace_free_pid_list(filtered_pids); 6587 } else if (pid_list) { 6588 /* Register a probe to set whether to ignore the tracing of a task */ 6589 register_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr); 6590 } 6591 6592 /* 6593 * Ignoring of pids is done at task switch. But we have to 6594 * check for those tasks that are currently running. 6595 * Always do this in case a pid was appended or removed. 6596 */ 6597 on_each_cpu(ignore_task_cpu, tr, 1); 6598 6599 ftrace_update_pid_func(); 6600 ftrace_startup_all(0); 6601 out: 6602 mutex_unlock(&ftrace_lock); 6603 6604 if (ret > 0) 6605 *ppos += ret; 6606 6607 return ret; 6608 } 6609 6610 static int 6611 ftrace_pid_release(struct inode *inode, struct file *file) 6612 { 6613 struct trace_array *tr = inode->i_private; 6614 6615 trace_array_put(tr); 6616 6617 return seq_release(inode, file); 6618 } 6619 6620 static const struct file_operations ftrace_pid_fops = { 6621 .open = ftrace_pid_open, 6622 .write = ftrace_pid_write, 6623 .read = seq_read, 6624 .llseek = tracing_lseek, 6625 .release = ftrace_pid_release, 6626 }; 6627 6628 void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer) 6629 { 6630 trace_create_file("set_ftrace_pid", 0644, d_tracer, 6631 tr, &ftrace_pid_fops); 6632 } 6633 6634 void __init ftrace_init_tracefs_toplevel(struct trace_array *tr, 6635 struct dentry *d_tracer) 6636 { 6637 /* Only the top level directory has the dyn_tracefs and profile */ 6638 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL)); 6639 6640 ftrace_init_dyn_tracefs(d_tracer); 6641 ftrace_profile_tracefs(d_tracer); 6642 } 6643 6644 /** 6645 * ftrace_kill - kill ftrace 6646 * 6647 * This function should be used by panic code. It stops ftrace 6648 * but in a not so nice way. If you need to simply kill ftrace 6649 * from a non-atomic section, use ftrace_kill. 6650 */ 6651 void ftrace_kill(void) 6652 { 6653 ftrace_disabled = 1; 6654 ftrace_enabled = 0; 6655 ftrace_trace_function = ftrace_stub; 6656 } 6657 6658 /** 6659 * Test if ftrace is dead or not. 6660 */ 6661 int ftrace_is_dead(void) 6662 { 6663 return ftrace_disabled; 6664 } 6665 6666 /** 6667 * register_ftrace_function - register a function for profiling 6668 * @ops - ops structure that holds the function for profiling. 6669 * 6670 * Register a function to be called by all functions in the 6671 * kernel. 6672 * 6673 * Note: @ops->func and all the functions it calls must be labeled 6674 * with "notrace", otherwise it will go into a 6675 * recursive loop. 6676 */ 6677 int register_ftrace_function(struct ftrace_ops *ops) 6678 { 6679 int ret = -1; 6680 6681 ftrace_ops_init(ops); 6682 6683 mutex_lock(&ftrace_lock); 6684 6685 ret = ftrace_startup(ops, 0); 6686 6687 mutex_unlock(&ftrace_lock); 6688 6689 return ret; 6690 } 6691 EXPORT_SYMBOL_GPL(register_ftrace_function); 6692 6693 /** 6694 * unregister_ftrace_function - unregister a function for profiling. 6695 * @ops - ops structure that holds the function to unregister 6696 * 6697 * Unregister a function that was added to be called by ftrace profiling. 6698 */ 6699 int unregister_ftrace_function(struct ftrace_ops *ops) 6700 { 6701 int ret; 6702 6703 mutex_lock(&ftrace_lock); 6704 ret = ftrace_shutdown(ops, 0); 6705 mutex_unlock(&ftrace_lock); 6706 6707 return ret; 6708 } 6709 EXPORT_SYMBOL_GPL(unregister_ftrace_function); 6710 6711 int 6712 ftrace_enable_sysctl(struct ctl_table *table, int write, 6713 void __user *buffer, size_t *lenp, 6714 loff_t *ppos) 6715 { 6716 int ret = -ENODEV; 6717 6718 mutex_lock(&ftrace_lock); 6719 6720 if (unlikely(ftrace_disabled)) 6721 goto out; 6722 6723 ret = proc_dointvec(table, write, buffer, lenp, ppos); 6724 6725 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled)) 6726 goto out; 6727 6728 last_ftrace_enabled = !!ftrace_enabled; 6729 6730 if (ftrace_enabled) { 6731 6732 /* we are starting ftrace again */ 6733 if (rcu_dereference_protected(ftrace_ops_list, 6734 lockdep_is_held(&ftrace_lock)) != &ftrace_list_end) 6735 update_ftrace_function(); 6736 6737 ftrace_startup_sysctl(); 6738 6739 } else { 6740 /* stopping ftrace calls (just send to ftrace_stub) */ 6741 ftrace_trace_function = ftrace_stub; 6742 6743 ftrace_shutdown_sysctl(); 6744 } 6745 6746 out: 6747 mutex_unlock(&ftrace_lock); 6748 return ret; 6749 } 6750