1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Infrastructure for profiling code inserted by 'gcc -pg'. 4 * 5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> 6 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com> 7 * 8 * Originally ported from the -rt patch by: 9 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com> 10 * 11 * Based on code in the latency_tracer, that is: 12 * 13 * Copyright (C) 2004-2006 Ingo Molnar 14 * Copyright (C) 2004 Nadia Yvette Chambers 15 */ 16 17 #include <linux/stop_machine.h> 18 #include <linux/clocksource.h> 19 #include <linux/sched/task.h> 20 #include <linux/kallsyms.h> 21 #include <linux/seq_file.h> 22 #include <linux/tracefs.h> 23 #include <linux/hardirq.h> 24 #include <linux/kthread.h> 25 #include <linux/uaccess.h> 26 #include <linux/bsearch.h> 27 #include <linux/module.h> 28 #include <linux/ftrace.h> 29 #include <linux/sysctl.h> 30 #include <linux/slab.h> 31 #include <linux/ctype.h> 32 #include <linux/sort.h> 33 #include <linux/list.h> 34 #include <linux/hash.h> 35 #include <linux/rcupdate.h> 36 37 #include <trace/events/sched.h> 38 39 #include <asm/sections.h> 40 #include <asm/setup.h> 41 42 #include "ftrace_internal.h" 43 #include "trace_output.h" 44 #include "trace_stat.h" 45 46 #define FTRACE_WARN_ON(cond) \ 47 ({ \ 48 int ___r = cond; \ 49 if (WARN_ON(___r)) \ 50 ftrace_kill(); \ 51 ___r; \ 52 }) 53 54 #define FTRACE_WARN_ON_ONCE(cond) \ 55 ({ \ 56 int ___r = cond; \ 57 if (WARN_ON_ONCE(___r)) \ 58 ftrace_kill(); \ 59 ___r; \ 60 }) 61 62 /* hash bits for specific function selection */ 63 #define FTRACE_HASH_BITS 7 64 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS) 65 #define FTRACE_HASH_DEFAULT_BITS 10 66 #define FTRACE_HASH_MAX_BITS 12 67 68 #ifdef CONFIG_DYNAMIC_FTRACE 69 #define INIT_OPS_HASH(opsname) \ 70 .func_hash = &opsname.local_hash, \ 71 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock), 72 #define ASSIGN_OPS_HASH(opsname, val) \ 73 .func_hash = val, \ 74 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock), 75 #else 76 #define INIT_OPS_HASH(opsname) 77 #define ASSIGN_OPS_HASH(opsname, val) 78 #endif 79 80 enum { 81 FTRACE_MODIFY_ENABLE_FL = (1 << 0), 82 FTRACE_MODIFY_MAY_SLEEP_FL = (1 << 1), 83 }; 84 85 struct ftrace_ops ftrace_list_end __read_mostly = { 86 .func = ftrace_stub, 87 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB, 88 INIT_OPS_HASH(ftrace_list_end) 89 }; 90 91 /* ftrace_enabled is a method to turn ftrace on or off */ 92 int ftrace_enabled __read_mostly; 93 static int last_ftrace_enabled; 94 95 /* Current function tracing op */ 96 struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end; 97 /* What to set function_trace_op to */ 98 static struct ftrace_ops *set_function_trace_op; 99 100 static bool ftrace_pids_enabled(struct ftrace_ops *ops) 101 { 102 struct trace_array *tr; 103 104 if (!(ops->flags & FTRACE_OPS_FL_PID) || !ops->private) 105 return false; 106 107 tr = ops->private; 108 109 return tr->function_pids != NULL; 110 } 111 112 static void ftrace_update_trampoline(struct ftrace_ops *ops); 113 114 /* 115 * ftrace_disabled is set when an anomaly is discovered. 116 * ftrace_disabled is much stronger than ftrace_enabled. 117 */ 118 static int ftrace_disabled __read_mostly; 119 120 DEFINE_MUTEX(ftrace_lock); 121 122 struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end; 123 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; 124 struct ftrace_ops global_ops; 125 126 #if ARCH_SUPPORTS_FTRACE_OPS 127 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, 128 struct ftrace_ops *op, struct pt_regs *regs); 129 #else 130 /* See comment below, where ftrace_ops_list_func is defined */ 131 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip); 132 #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops) 133 #endif 134 135 static inline void ftrace_ops_init(struct ftrace_ops *ops) 136 { 137 #ifdef CONFIG_DYNAMIC_FTRACE 138 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) { 139 mutex_init(&ops->local_hash.regex_lock); 140 ops->func_hash = &ops->local_hash; 141 ops->flags |= FTRACE_OPS_FL_INITIALIZED; 142 } 143 #endif 144 } 145 146 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip, 147 struct ftrace_ops *op, struct pt_regs *regs) 148 { 149 struct trace_array *tr = op->private; 150 151 if (tr && this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid)) 152 return; 153 154 op->saved_func(ip, parent_ip, op, regs); 155 } 156 157 static void ftrace_sync(struct work_struct *work) 158 { 159 /* 160 * This function is just a stub to implement a hard force 161 * of synchronize_rcu(). This requires synchronizing 162 * tasks even in userspace and idle. 163 * 164 * Yes, function tracing is rude. 165 */ 166 } 167 168 static void ftrace_sync_ipi(void *data) 169 { 170 /* Probably not needed, but do it anyway */ 171 smp_rmb(); 172 } 173 174 static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops) 175 { 176 /* 177 * If this is a dynamic, RCU, or per CPU ops, or we force list func, 178 * then it needs to call the list anyway. 179 */ 180 if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) || 181 FTRACE_FORCE_LIST_FUNC) 182 return ftrace_ops_list_func; 183 184 return ftrace_ops_get_func(ops); 185 } 186 187 static void update_ftrace_function(void) 188 { 189 ftrace_func_t func; 190 191 /* 192 * Prepare the ftrace_ops that the arch callback will use. 193 * If there's only one ftrace_ops registered, the ftrace_ops_list 194 * will point to the ops we want. 195 */ 196 set_function_trace_op = rcu_dereference_protected(ftrace_ops_list, 197 lockdep_is_held(&ftrace_lock)); 198 199 /* If there's no ftrace_ops registered, just call the stub function */ 200 if (set_function_trace_op == &ftrace_list_end) { 201 func = ftrace_stub; 202 203 /* 204 * If we are at the end of the list and this ops is 205 * recursion safe and not dynamic and the arch supports passing ops, 206 * then have the mcount trampoline call the function directly. 207 */ 208 } else if (rcu_dereference_protected(ftrace_ops_list->next, 209 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) { 210 func = ftrace_ops_get_list_func(ftrace_ops_list); 211 212 } else { 213 /* Just use the default ftrace_ops */ 214 set_function_trace_op = &ftrace_list_end; 215 func = ftrace_ops_list_func; 216 } 217 218 update_function_graph_func(); 219 220 /* If there's no change, then do nothing more here */ 221 if (ftrace_trace_function == func) 222 return; 223 224 /* 225 * If we are using the list function, it doesn't care 226 * about the function_trace_ops. 227 */ 228 if (func == ftrace_ops_list_func) { 229 ftrace_trace_function = func; 230 /* 231 * Don't even bother setting function_trace_ops, 232 * it would be racy to do so anyway. 233 */ 234 return; 235 } 236 237 #ifndef CONFIG_DYNAMIC_FTRACE 238 /* 239 * For static tracing, we need to be a bit more careful. 240 * The function change takes affect immediately. Thus, 241 * we need to coorditate the setting of the function_trace_ops 242 * with the setting of the ftrace_trace_function. 243 * 244 * Set the function to the list ops, which will call the 245 * function we want, albeit indirectly, but it handles the 246 * ftrace_ops and doesn't depend on function_trace_op. 247 */ 248 ftrace_trace_function = ftrace_ops_list_func; 249 /* 250 * Make sure all CPUs see this. Yes this is slow, but static 251 * tracing is slow and nasty to have enabled. 252 */ 253 schedule_on_each_cpu(ftrace_sync); 254 /* Now all cpus are using the list ops. */ 255 function_trace_op = set_function_trace_op; 256 /* Make sure the function_trace_op is visible on all CPUs */ 257 smp_wmb(); 258 /* Nasty way to force a rmb on all cpus */ 259 smp_call_function(ftrace_sync_ipi, NULL, 1); 260 /* OK, we are all set to update the ftrace_trace_function now! */ 261 #endif /* !CONFIG_DYNAMIC_FTRACE */ 262 263 ftrace_trace_function = func; 264 } 265 266 static void add_ftrace_ops(struct ftrace_ops __rcu **list, 267 struct ftrace_ops *ops) 268 { 269 rcu_assign_pointer(ops->next, *list); 270 271 /* 272 * We are entering ops into the list but another 273 * CPU might be walking that list. We need to make sure 274 * the ops->next pointer is valid before another CPU sees 275 * the ops pointer included into the list. 276 */ 277 rcu_assign_pointer(*list, ops); 278 } 279 280 static int remove_ftrace_ops(struct ftrace_ops __rcu **list, 281 struct ftrace_ops *ops) 282 { 283 struct ftrace_ops **p; 284 285 /* 286 * If we are removing the last function, then simply point 287 * to the ftrace_stub. 288 */ 289 if (rcu_dereference_protected(*list, 290 lockdep_is_held(&ftrace_lock)) == ops && 291 rcu_dereference_protected(ops->next, 292 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) { 293 *list = &ftrace_list_end; 294 return 0; 295 } 296 297 for (p = list; *p != &ftrace_list_end; p = &(*p)->next) 298 if (*p == ops) 299 break; 300 301 if (*p != ops) 302 return -1; 303 304 *p = (*p)->next; 305 return 0; 306 } 307 308 static void ftrace_update_trampoline(struct ftrace_ops *ops); 309 310 int __register_ftrace_function(struct ftrace_ops *ops) 311 { 312 if (ops->flags & FTRACE_OPS_FL_DELETED) 313 return -EINVAL; 314 315 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED)) 316 return -EBUSY; 317 318 #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS 319 /* 320 * If the ftrace_ops specifies SAVE_REGS, then it only can be used 321 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set. 322 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant. 323 */ 324 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS && 325 !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)) 326 return -EINVAL; 327 328 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED) 329 ops->flags |= FTRACE_OPS_FL_SAVE_REGS; 330 #endif 331 332 if (!core_kernel_data((unsigned long)ops)) 333 ops->flags |= FTRACE_OPS_FL_DYNAMIC; 334 335 add_ftrace_ops(&ftrace_ops_list, ops); 336 337 /* Always save the function, and reset at unregistering */ 338 ops->saved_func = ops->func; 339 340 if (ftrace_pids_enabled(ops)) 341 ops->func = ftrace_pid_func; 342 343 ftrace_update_trampoline(ops); 344 345 if (ftrace_enabled) 346 update_ftrace_function(); 347 348 return 0; 349 } 350 351 int __unregister_ftrace_function(struct ftrace_ops *ops) 352 { 353 int ret; 354 355 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED))) 356 return -EBUSY; 357 358 ret = remove_ftrace_ops(&ftrace_ops_list, ops); 359 360 if (ret < 0) 361 return ret; 362 363 if (ftrace_enabled) 364 update_ftrace_function(); 365 366 ops->func = ops->saved_func; 367 368 return 0; 369 } 370 371 static void ftrace_update_pid_func(void) 372 { 373 struct ftrace_ops *op; 374 375 /* Only do something if we are tracing something */ 376 if (ftrace_trace_function == ftrace_stub) 377 return; 378 379 do_for_each_ftrace_op(op, ftrace_ops_list) { 380 if (op->flags & FTRACE_OPS_FL_PID) { 381 op->func = ftrace_pids_enabled(op) ? 382 ftrace_pid_func : op->saved_func; 383 ftrace_update_trampoline(op); 384 } 385 } while_for_each_ftrace_op(op); 386 387 update_ftrace_function(); 388 } 389 390 #ifdef CONFIG_FUNCTION_PROFILER 391 struct ftrace_profile { 392 struct hlist_node node; 393 unsigned long ip; 394 unsigned long counter; 395 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 396 unsigned long long time; 397 unsigned long long time_squared; 398 #endif 399 }; 400 401 struct ftrace_profile_page { 402 struct ftrace_profile_page *next; 403 unsigned long index; 404 struct ftrace_profile records[]; 405 }; 406 407 struct ftrace_profile_stat { 408 atomic_t disabled; 409 struct hlist_head *hash; 410 struct ftrace_profile_page *pages; 411 struct ftrace_profile_page *start; 412 struct tracer_stat stat; 413 }; 414 415 #define PROFILE_RECORDS_SIZE \ 416 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records)) 417 418 #define PROFILES_PER_PAGE \ 419 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile)) 420 421 static int ftrace_profile_enabled __read_mostly; 422 423 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */ 424 static DEFINE_MUTEX(ftrace_profile_lock); 425 426 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats); 427 428 #define FTRACE_PROFILE_HASH_BITS 10 429 #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS) 430 431 static void * 432 function_stat_next(void *v, int idx) 433 { 434 struct ftrace_profile *rec = v; 435 struct ftrace_profile_page *pg; 436 437 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK); 438 439 again: 440 if (idx != 0) 441 rec++; 442 443 if ((void *)rec >= (void *)&pg->records[pg->index]) { 444 pg = pg->next; 445 if (!pg) 446 return NULL; 447 rec = &pg->records[0]; 448 if (!rec->counter) 449 goto again; 450 } 451 452 return rec; 453 } 454 455 static void *function_stat_start(struct tracer_stat *trace) 456 { 457 struct ftrace_profile_stat *stat = 458 container_of(trace, struct ftrace_profile_stat, stat); 459 460 if (!stat || !stat->start) 461 return NULL; 462 463 return function_stat_next(&stat->start->records[0], 0); 464 } 465 466 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 467 /* function graph compares on total time */ 468 static int function_stat_cmp(void *p1, void *p2) 469 { 470 struct ftrace_profile *a = p1; 471 struct ftrace_profile *b = p2; 472 473 if (a->time < b->time) 474 return -1; 475 if (a->time > b->time) 476 return 1; 477 else 478 return 0; 479 } 480 #else 481 /* not function graph compares against hits */ 482 static int function_stat_cmp(void *p1, void *p2) 483 { 484 struct ftrace_profile *a = p1; 485 struct ftrace_profile *b = p2; 486 487 if (a->counter < b->counter) 488 return -1; 489 if (a->counter > b->counter) 490 return 1; 491 else 492 return 0; 493 } 494 #endif 495 496 static int function_stat_headers(struct seq_file *m) 497 { 498 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 499 seq_puts(m, " Function " 500 "Hit Time Avg s^2\n" 501 " -------- " 502 "--- ---- --- ---\n"); 503 #else 504 seq_puts(m, " Function Hit\n" 505 " -------- ---\n"); 506 #endif 507 return 0; 508 } 509 510 static int function_stat_show(struct seq_file *m, void *v) 511 { 512 struct ftrace_profile *rec = v; 513 char str[KSYM_SYMBOL_LEN]; 514 int ret = 0; 515 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 516 static struct trace_seq s; 517 unsigned long long avg; 518 unsigned long long stddev; 519 #endif 520 mutex_lock(&ftrace_profile_lock); 521 522 /* we raced with function_profile_reset() */ 523 if (unlikely(rec->counter == 0)) { 524 ret = -EBUSY; 525 goto out; 526 } 527 528 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 529 avg = rec->time; 530 do_div(avg, rec->counter); 531 if (tracing_thresh && (avg < tracing_thresh)) 532 goto out; 533 #endif 534 535 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); 536 seq_printf(m, " %-30.30s %10lu", str, rec->counter); 537 538 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 539 seq_puts(m, " "); 540 541 /* Sample standard deviation (s^2) */ 542 if (rec->counter <= 1) 543 stddev = 0; 544 else { 545 /* 546 * Apply Welford's method: 547 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2) 548 */ 549 stddev = rec->counter * rec->time_squared - 550 rec->time * rec->time; 551 552 /* 553 * Divide only 1000 for ns^2 -> us^2 conversion. 554 * trace_print_graph_duration will divide 1000 again. 555 */ 556 do_div(stddev, rec->counter * (rec->counter - 1) * 1000); 557 } 558 559 trace_seq_init(&s); 560 trace_print_graph_duration(rec->time, &s); 561 trace_seq_puts(&s, " "); 562 trace_print_graph_duration(avg, &s); 563 trace_seq_puts(&s, " "); 564 trace_print_graph_duration(stddev, &s); 565 trace_print_seq(m, &s); 566 #endif 567 seq_putc(m, '\n'); 568 out: 569 mutex_unlock(&ftrace_profile_lock); 570 571 return ret; 572 } 573 574 static void ftrace_profile_reset(struct ftrace_profile_stat *stat) 575 { 576 struct ftrace_profile_page *pg; 577 578 pg = stat->pages = stat->start; 579 580 while (pg) { 581 memset(pg->records, 0, PROFILE_RECORDS_SIZE); 582 pg->index = 0; 583 pg = pg->next; 584 } 585 586 memset(stat->hash, 0, 587 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head)); 588 } 589 590 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat) 591 { 592 struct ftrace_profile_page *pg; 593 int functions; 594 int pages; 595 int i; 596 597 /* If we already allocated, do nothing */ 598 if (stat->pages) 599 return 0; 600 601 stat->pages = (void *)get_zeroed_page(GFP_KERNEL); 602 if (!stat->pages) 603 return -ENOMEM; 604 605 #ifdef CONFIG_DYNAMIC_FTRACE 606 functions = ftrace_update_tot_cnt; 607 #else 608 /* 609 * We do not know the number of functions that exist because 610 * dynamic tracing is what counts them. With past experience 611 * we have around 20K functions. That should be more than enough. 612 * It is highly unlikely we will execute every function in 613 * the kernel. 614 */ 615 functions = 20000; 616 #endif 617 618 pg = stat->start = stat->pages; 619 620 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE); 621 622 for (i = 1; i < pages; i++) { 623 pg->next = (void *)get_zeroed_page(GFP_KERNEL); 624 if (!pg->next) 625 goto out_free; 626 pg = pg->next; 627 } 628 629 return 0; 630 631 out_free: 632 pg = stat->start; 633 while (pg) { 634 unsigned long tmp = (unsigned long)pg; 635 636 pg = pg->next; 637 free_page(tmp); 638 } 639 640 stat->pages = NULL; 641 stat->start = NULL; 642 643 return -ENOMEM; 644 } 645 646 static int ftrace_profile_init_cpu(int cpu) 647 { 648 struct ftrace_profile_stat *stat; 649 int size; 650 651 stat = &per_cpu(ftrace_profile_stats, cpu); 652 653 if (stat->hash) { 654 /* If the profile is already created, simply reset it */ 655 ftrace_profile_reset(stat); 656 return 0; 657 } 658 659 /* 660 * We are profiling all functions, but usually only a few thousand 661 * functions are hit. We'll make a hash of 1024 items. 662 */ 663 size = FTRACE_PROFILE_HASH_SIZE; 664 665 stat->hash = kcalloc(size, sizeof(struct hlist_head), GFP_KERNEL); 666 667 if (!stat->hash) 668 return -ENOMEM; 669 670 /* Preallocate the function profiling pages */ 671 if (ftrace_profile_pages_init(stat) < 0) { 672 kfree(stat->hash); 673 stat->hash = NULL; 674 return -ENOMEM; 675 } 676 677 return 0; 678 } 679 680 static int ftrace_profile_init(void) 681 { 682 int cpu; 683 int ret = 0; 684 685 for_each_possible_cpu(cpu) { 686 ret = ftrace_profile_init_cpu(cpu); 687 if (ret) 688 break; 689 } 690 691 return ret; 692 } 693 694 /* interrupts must be disabled */ 695 static struct ftrace_profile * 696 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip) 697 { 698 struct ftrace_profile *rec; 699 struct hlist_head *hhd; 700 unsigned long key; 701 702 key = hash_long(ip, FTRACE_PROFILE_HASH_BITS); 703 hhd = &stat->hash[key]; 704 705 if (hlist_empty(hhd)) 706 return NULL; 707 708 hlist_for_each_entry_rcu_notrace(rec, hhd, node) { 709 if (rec->ip == ip) 710 return rec; 711 } 712 713 return NULL; 714 } 715 716 static void ftrace_add_profile(struct ftrace_profile_stat *stat, 717 struct ftrace_profile *rec) 718 { 719 unsigned long key; 720 721 key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS); 722 hlist_add_head_rcu(&rec->node, &stat->hash[key]); 723 } 724 725 /* 726 * The memory is already allocated, this simply finds a new record to use. 727 */ 728 static struct ftrace_profile * 729 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip) 730 { 731 struct ftrace_profile *rec = NULL; 732 733 /* prevent recursion (from NMIs) */ 734 if (atomic_inc_return(&stat->disabled) != 1) 735 goto out; 736 737 /* 738 * Try to find the function again since an NMI 739 * could have added it 740 */ 741 rec = ftrace_find_profiled_func(stat, ip); 742 if (rec) 743 goto out; 744 745 if (stat->pages->index == PROFILES_PER_PAGE) { 746 if (!stat->pages->next) 747 goto out; 748 stat->pages = stat->pages->next; 749 } 750 751 rec = &stat->pages->records[stat->pages->index++]; 752 rec->ip = ip; 753 ftrace_add_profile(stat, rec); 754 755 out: 756 atomic_dec(&stat->disabled); 757 758 return rec; 759 } 760 761 static void 762 function_profile_call(unsigned long ip, unsigned long parent_ip, 763 struct ftrace_ops *ops, struct pt_regs *regs) 764 { 765 struct ftrace_profile_stat *stat; 766 struct ftrace_profile *rec; 767 unsigned long flags; 768 769 if (!ftrace_profile_enabled) 770 return; 771 772 local_irq_save(flags); 773 774 stat = this_cpu_ptr(&ftrace_profile_stats); 775 if (!stat->hash || !ftrace_profile_enabled) 776 goto out; 777 778 rec = ftrace_find_profiled_func(stat, ip); 779 if (!rec) { 780 rec = ftrace_profile_alloc(stat, ip); 781 if (!rec) 782 goto out; 783 } 784 785 rec->counter++; 786 out: 787 local_irq_restore(flags); 788 } 789 790 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 791 static bool fgraph_graph_time = true; 792 793 void ftrace_graph_graph_time_control(bool enable) 794 { 795 fgraph_graph_time = enable; 796 } 797 798 static int profile_graph_entry(struct ftrace_graph_ent *trace) 799 { 800 struct ftrace_ret_stack *ret_stack; 801 802 function_profile_call(trace->func, 0, NULL, NULL); 803 804 /* If function graph is shutting down, ret_stack can be NULL */ 805 if (!current->ret_stack) 806 return 0; 807 808 ret_stack = ftrace_graph_get_ret_stack(current, 0); 809 if (ret_stack) 810 ret_stack->subtime = 0; 811 812 return 1; 813 } 814 815 static void profile_graph_return(struct ftrace_graph_ret *trace) 816 { 817 struct ftrace_ret_stack *ret_stack; 818 struct ftrace_profile_stat *stat; 819 unsigned long long calltime; 820 struct ftrace_profile *rec; 821 unsigned long flags; 822 823 local_irq_save(flags); 824 stat = this_cpu_ptr(&ftrace_profile_stats); 825 if (!stat->hash || !ftrace_profile_enabled) 826 goto out; 827 828 /* If the calltime was zero'd ignore it */ 829 if (!trace->calltime) 830 goto out; 831 832 calltime = trace->rettime - trace->calltime; 833 834 if (!fgraph_graph_time) { 835 836 /* Append this call time to the parent time to subtract */ 837 ret_stack = ftrace_graph_get_ret_stack(current, 1); 838 if (ret_stack) 839 ret_stack->subtime += calltime; 840 841 ret_stack = ftrace_graph_get_ret_stack(current, 0); 842 if (ret_stack && ret_stack->subtime < calltime) 843 calltime -= ret_stack->subtime; 844 else 845 calltime = 0; 846 } 847 848 rec = ftrace_find_profiled_func(stat, trace->func); 849 if (rec) { 850 rec->time += calltime; 851 rec->time_squared += calltime * calltime; 852 } 853 854 out: 855 local_irq_restore(flags); 856 } 857 858 static struct fgraph_ops fprofiler_ops = { 859 .entryfunc = &profile_graph_entry, 860 .retfunc = &profile_graph_return, 861 }; 862 863 static int register_ftrace_profiler(void) 864 { 865 return register_ftrace_graph(&fprofiler_ops); 866 } 867 868 static void unregister_ftrace_profiler(void) 869 { 870 unregister_ftrace_graph(&fprofiler_ops); 871 } 872 #else 873 static struct ftrace_ops ftrace_profile_ops __read_mostly = { 874 .func = function_profile_call, 875 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, 876 INIT_OPS_HASH(ftrace_profile_ops) 877 }; 878 879 static int register_ftrace_profiler(void) 880 { 881 return register_ftrace_function(&ftrace_profile_ops); 882 } 883 884 static void unregister_ftrace_profiler(void) 885 { 886 unregister_ftrace_function(&ftrace_profile_ops); 887 } 888 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 889 890 static ssize_t 891 ftrace_profile_write(struct file *filp, const char __user *ubuf, 892 size_t cnt, loff_t *ppos) 893 { 894 unsigned long val; 895 int ret; 896 897 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 898 if (ret) 899 return ret; 900 901 val = !!val; 902 903 mutex_lock(&ftrace_profile_lock); 904 if (ftrace_profile_enabled ^ val) { 905 if (val) { 906 ret = ftrace_profile_init(); 907 if (ret < 0) { 908 cnt = ret; 909 goto out; 910 } 911 912 ret = register_ftrace_profiler(); 913 if (ret < 0) { 914 cnt = ret; 915 goto out; 916 } 917 ftrace_profile_enabled = 1; 918 } else { 919 ftrace_profile_enabled = 0; 920 /* 921 * unregister_ftrace_profiler calls stop_machine 922 * so this acts like an synchronize_rcu. 923 */ 924 unregister_ftrace_profiler(); 925 } 926 } 927 out: 928 mutex_unlock(&ftrace_profile_lock); 929 930 *ppos += cnt; 931 932 return cnt; 933 } 934 935 static ssize_t 936 ftrace_profile_read(struct file *filp, char __user *ubuf, 937 size_t cnt, loff_t *ppos) 938 { 939 char buf[64]; /* big enough to hold a number */ 940 int r; 941 942 r = sprintf(buf, "%u\n", ftrace_profile_enabled); 943 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 944 } 945 946 static const struct file_operations ftrace_profile_fops = { 947 .open = tracing_open_generic, 948 .read = ftrace_profile_read, 949 .write = ftrace_profile_write, 950 .llseek = default_llseek, 951 }; 952 953 /* used to initialize the real stat files */ 954 static struct tracer_stat function_stats __initdata = { 955 .name = "functions", 956 .stat_start = function_stat_start, 957 .stat_next = function_stat_next, 958 .stat_cmp = function_stat_cmp, 959 .stat_headers = function_stat_headers, 960 .stat_show = function_stat_show 961 }; 962 963 static __init void ftrace_profile_tracefs(struct dentry *d_tracer) 964 { 965 struct ftrace_profile_stat *stat; 966 struct dentry *entry; 967 char *name; 968 int ret; 969 int cpu; 970 971 for_each_possible_cpu(cpu) { 972 stat = &per_cpu(ftrace_profile_stats, cpu); 973 974 name = kasprintf(GFP_KERNEL, "function%d", cpu); 975 if (!name) { 976 /* 977 * The files created are permanent, if something happens 978 * we still do not free memory. 979 */ 980 WARN(1, 981 "Could not allocate stat file for cpu %d\n", 982 cpu); 983 return; 984 } 985 stat->stat = function_stats; 986 stat->stat.name = name; 987 ret = register_stat_tracer(&stat->stat); 988 if (ret) { 989 WARN(1, 990 "Could not register function stat for cpu %d\n", 991 cpu); 992 kfree(name); 993 return; 994 } 995 } 996 997 entry = tracefs_create_file("function_profile_enabled", 0644, 998 d_tracer, NULL, &ftrace_profile_fops); 999 if (!entry) 1000 pr_warn("Could not create tracefs 'function_profile_enabled' entry\n"); 1001 } 1002 1003 #else /* CONFIG_FUNCTION_PROFILER */ 1004 static __init void ftrace_profile_tracefs(struct dentry *d_tracer) 1005 { 1006 } 1007 #endif /* CONFIG_FUNCTION_PROFILER */ 1008 1009 #ifdef CONFIG_DYNAMIC_FTRACE 1010 1011 static struct ftrace_ops *removed_ops; 1012 1013 /* 1014 * Set when doing a global update, like enabling all recs or disabling them. 1015 * It is not set when just updating a single ftrace_ops. 1016 */ 1017 static bool update_all_ops; 1018 1019 #ifndef CONFIG_FTRACE_MCOUNT_RECORD 1020 # error Dynamic ftrace depends on MCOUNT_RECORD 1021 #endif 1022 1023 struct ftrace_func_entry { 1024 struct hlist_node hlist; 1025 unsigned long ip; 1026 }; 1027 1028 struct ftrace_func_probe { 1029 struct ftrace_probe_ops *probe_ops; 1030 struct ftrace_ops ops; 1031 struct trace_array *tr; 1032 struct list_head list; 1033 void *data; 1034 int ref; 1035 }; 1036 1037 /* 1038 * We make these constant because no one should touch them, 1039 * but they are used as the default "empty hash", to avoid allocating 1040 * it all the time. These are in a read only section such that if 1041 * anyone does try to modify it, it will cause an exception. 1042 */ 1043 static const struct hlist_head empty_buckets[1]; 1044 static const struct ftrace_hash empty_hash = { 1045 .buckets = (struct hlist_head *)empty_buckets, 1046 }; 1047 #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash) 1048 1049 struct ftrace_ops global_ops = { 1050 .func = ftrace_stub, 1051 .local_hash.notrace_hash = EMPTY_HASH, 1052 .local_hash.filter_hash = EMPTY_HASH, 1053 INIT_OPS_HASH(global_ops) 1054 .flags = FTRACE_OPS_FL_RECURSION_SAFE | 1055 FTRACE_OPS_FL_INITIALIZED | 1056 FTRACE_OPS_FL_PID, 1057 }; 1058 1059 /* 1060 * Used by the stack undwinder to know about dynamic ftrace trampolines. 1061 */ 1062 struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr) 1063 { 1064 struct ftrace_ops *op = NULL; 1065 1066 /* 1067 * Some of the ops may be dynamically allocated, 1068 * they are freed after a synchronize_rcu(). 1069 */ 1070 preempt_disable_notrace(); 1071 1072 do_for_each_ftrace_op(op, ftrace_ops_list) { 1073 /* 1074 * This is to check for dynamically allocated trampolines. 1075 * Trampolines that are in kernel text will have 1076 * core_kernel_text() return true. 1077 */ 1078 if (op->trampoline && op->trampoline_size) 1079 if (addr >= op->trampoline && 1080 addr < op->trampoline + op->trampoline_size) { 1081 preempt_enable_notrace(); 1082 return op; 1083 } 1084 } while_for_each_ftrace_op(op); 1085 preempt_enable_notrace(); 1086 1087 return NULL; 1088 } 1089 1090 /* 1091 * This is used by __kernel_text_address() to return true if the 1092 * address is on a dynamically allocated trampoline that would 1093 * not return true for either core_kernel_text() or 1094 * is_module_text_address(). 1095 */ 1096 bool is_ftrace_trampoline(unsigned long addr) 1097 { 1098 return ftrace_ops_trampoline(addr) != NULL; 1099 } 1100 1101 struct ftrace_page { 1102 struct ftrace_page *next; 1103 struct dyn_ftrace *records; 1104 int index; 1105 int size; 1106 }; 1107 1108 #define ENTRY_SIZE sizeof(struct dyn_ftrace) 1109 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE) 1110 1111 /* estimate from running different kernels */ 1112 #define NR_TO_INIT 10000 1113 1114 static struct ftrace_page *ftrace_pages_start; 1115 static struct ftrace_page *ftrace_pages; 1116 1117 static __always_inline unsigned long 1118 ftrace_hash_key(struct ftrace_hash *hash, unsigned long ip) 1119 { 1120 if (hash->size_bits > 0) 1121 return hash_long(ip, hash->size_bits); 1122 1123 return 0; 1124 } 1125 1126 /* Only use this function if ftrace_hash_empty() has already been tested */ 1127 static __always_inline struct ftrace_func_entry * 1128 __ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) 1129 { 1130 unsigned long key; 1131 struct ftrace_func_entry *entry; 1132 struct hlist_head *hhd; 1133 1134 key = ftrace_hash_key(hash, ip); 1135 hhd = &hash->buckets[key]; 1136 1137 hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) { 1138 if (entry->ip == ip) 1139 return entry; 1140 } 1141 return NULL; 1142 } 1143 1144 /** 1145 * ftrace_lookup_ip - Test to see if an ip exists in an ftrace_hash 1146 * @hash: The hash to look at 1147 * @ip: The instruction pointer to test 1148 * 1149 * Search a given @hash to see if a given instruction pointer (@ip) 1150 * exists in it. 1151 * 1152 * Returns the entry that holds the @ip if found. NULL otherwise. 1153 */ 1154 struct ftrace_func_entry * 1155 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) 1156 { 1157 if (ftrace_hash_empty(hash)) 1158 return NULL; 1159 1160 return __ftrace_lookup_ip(hash, ip); 1161 } 1162 1163 static void __add_hash_entry(struct ftrace_hash *hash, 1164 struct ftrace_func_entry *entry) 1165 { 1166 struct hlist_head *hhd; 1167 unsigned long key; 1168 1169 key = ftrace_hash_key(hash, entry->ip); 1170 hhd = &hash->buckets[key]; 1171 hlist_add_head(&entry->hlist, hhd); 1172 hash->count++; 1173 } 1174 1175 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip) 1176 { 1177 struct ftrace_func_entry *entry; 1178 1179 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 1180 if (!entry) 1181 return -ENOMEM; 1182 1183 entry->ip = ip; 1184 __add_hash_entry(hash, entry); 1185 1186 return 0; 1187 } 1188 1189 static void 1190 free_hash_entry(struct ftrace_hash *hash, 1191 struct ftrace_func_entry *entry) 1192 { 1193 hlist_del(&entry->hlist); 1194 kfree(entry); 1195 hash->count--; 1196 } 1197 1198 static void 1199 remove_hash_entry(struct ftrace_hash *hash, 1200 struct ftrace_func_entry *entry) 1201 { 1202 hlist_del_rcu(&entry->hlist); 1203 hash->count--; 1204 } 1205 1206 static void ftrace_hash_clear(struct ftrace_hash *hash) 1207 { 1208 struct hlist_head *hhd; 1209 struct hlist_node *tn; 1210 struct ftrace_func_entry *entry; 1211 int size = 1 << hash->size_bits; 1212 int i; 1213 1214 if (!hash->count) 1215 return; 1216 1217 for (i = 0; i < size; i++) { 1218 hhd = &hash->buckets[i]; 1219 hlist_for_each_entry_safe(entry, tn, hhd, hlist) 1220 free_hash_entry(hash, entry); 1221 } 1222 FTRACE_WARN_ON(hash->count); 1223 } 1224 1225 static void free_ftrace_mod(struct ftrace_mod_load *ftrace_mod) 1226 { 1227 list_del(&ftrace_mod->list); 1228 kfree(ftrace_mod->module); 1229 kfree(ftrace_mod->func); 1230 kfree(ftrace_mod); 1231 } 1232 1233 static void clear_ftrace_mod_list(struct list_head *head) 1234 { 1235 struct ftrace_mod_load *p, *n; 1236 1237 /* stack tracer isn't supported yet */ 1238 if (!head) 1239 return; 1240 1241 mutex_lock(&ftrace_lock); 1242 list_for_each_entry_safe(p, n, head, list) 1243 free_ftrace_mod(p); 1244 mutex_unlock(&ftrace_lock); 1245 } 1246 1247 static void free_ftrace_hash(struct ftrace_hash *hash) 1248 { 1249 if (!hash || hash == EMPTY_HASH) 1250 return; 1251 ftrace_hash_clear(hash); 1252 kfree(hash->buckets); 1253 kfree(hash); 1254 } 1255 1256 static void __free_ftrace_hash_rcu(struct rcu_head *rcu) 1257 { 1258 struct ftrace_hash *hash; 1259 1260 hash = container_of(rcu, struct ftrace_hash, rcu); 1261 free_ftrace_hash(hash); 1262 } 1263 1264 static void free_ftrace_hash_rcu(struct ftrace_hash *hash) 1265 { 1266 if (!hash || hash == EMPTY_HASH) 1267 return; 1268 call_rcu(&hash->rcu, __free_ftrace_hash_rcu); 1269 } 1270 1271 void ftrace_free_filter(struct ftrace_ops *ops) 1272 { 1273 ftrace_ops_init(ops); 1274 free_ftrace_hash(ops->func_hash->filter_hash); 1275 free_ftrace_hash(ops->func_hash->notrace_hash); 1276 } 1277 1278 static struct ftrace_hash *alloc_ftrace_hash(int size_bits) 1279 { 1280 struct ftrace_hash *hash; 1281 int size; 1282 1283 hash = kzalloc(sizeof(*hash), GFP_KERNEL); 1284 if (!hash) 1285 return NULL; 1286 1287 size = 1 << size_bits; 1288 hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL); 1289 1290 if (!hash->buckets) { 1291 kfree(hash); 1292 return NULL; 1293 } 1294 1295 hash->size_bits = size_bits; 1296 1297 return hash; 1298 } 1299 1300 1301 static int ftrace_add_mod(struct trace_array *tr, 1302 const char *func, const char *module, 1303 int enable) 1304 { 1305 struct ftrace_mod_load *ftrace_mod; 1306 struct list_head *mod_head = enable ? &tr->mod_trace : &tr->mod_notrace; 1307 1308 ftrace_mod = kzalloc(sizeof(*ftrace_mod), GFP_KERNEL); 1309 if (!ftrace_mod) 1310 return -ENOMEM; 1311 1312 ftrace_mod->func = kstrdup(func, GFP_KERNEL); 1313 ftrace_mod->module = kstrdup(module, GFP_KERNEL); 1314 ftrace_mod->enable = enable; 1315 1316 if (!ftrace_mod->func || !ftrace_mod->module) 1317 goto out_free; 1318 1319 list_add(&ftrace_mod->list, mod_head); 1320 1321 return 0; 1322 1323 out_free: 1324 free_ftrace_mod(ftrace_mod); 1325 1326 return -ENOMEM; 1327 } 1328 1329 static struct ftrace_hash * 1330 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash) 1331 { 1332 struct ftrace_func_entry *entry; 1333 struct ftrace_hash *new_hash; 1334 int size; 1335 int ret; 1336 int i; 1337 1338 new_hash = alloc_ftrace_hash(size_bits); 1339 if (!new_hash) 1340 return NULL; 1341 1342 if (hash) 1343 new_hash->flags = hash->flags; 1344 1345 /* Empty hash? */ 1346 if (ftrace_hash_empty(hash)) 1347 return new_hash; 1348 1349 size = 1 << hash->size_bits; 1350 for (i = 0; i < size; i++) { 1351 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { 1352 ret = add_hash_entry(new_hash, entry->ip); 1353 if (ret < 0) 1354 goto free_hash; 1355 } 1356 } 1357 1358 FTRACE_WARN_ON(new_hash->count != hash->count); 1359 1360 return new_hash; 1361 1362 free_hash: 1363 free_ftrace_hash(new_hash); 1364 return NULL; 1365 } 1366 1367 static void 1368 ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash); 1369 static void 1370 ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash); 1371 1372 static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops, 1373 struct ftrace_hash *new_hash); 1374 1375 static struct ftrace_hash * 1376 __ftrace_hash_move(struct ftrace_hash *src) 1377 { 1378 struct ftrace_func_entry *entry; 1379 struct hlist_node *tn; 1380 struct hlist_head *hhd; 1381 struct ftrace_hash *new_hash; 1382 int size = src->count; 1383 int bits = 0; 1384 int i; 1385 1386 /* 1387 * If the new source is empty, just return the empty_hash. 1388 */ 1389 if (ftrace_hash_empty(src)) 1390 return EMPTY_HASH; 1391 1392 /* 1393 * Make the hash size about 1/2 the # found 1394 */ 1395 for (size /= 2; size; size >>= 1) 1396 bits++; 1397 1398 /* Don't allocate too much */ 1399 if (bits > FTRACE_HASH_MAX_BITS) 1400 bits = FTRACE_HASH_MAX_BITS; 1401 1402 new_hash = alloc_ftrace_hash(bits); 1403 if (!new_hash) 1404 return NULL; 1405 1406 new_hash->flags = src->flags; 1407 1408 size = 1 << src->size_bits; 1409 for (i = 0; i < size; i++) { 1410 hhd = &src->buckets[i]; 1411 hlist_for_each_entry_safe(entry, tn, hhd, hlist) { 1412 remove_hash_entry(src, entry); 1413 __add_hash_entry(new_hash, entry); 1414 } 1415 } 1416 1417 return new_hash; 1418 } 1419 1420 static int 1421 ftrace_hash_move(struct ftrace_ops *ops, int enable, 1422 struct ftrace_hash **dst, struct ftrace_hash *src) 1423 { 1424 struct ftrace_hash *new_hash; 1425 int ret; 1426 1427 /* Reject setting notrace hash on IPMODIFY ftrace_ops */ 1428 if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable) 1429 return -EINVAL; 1430 1431 new_hash = __ftrace_hash_move(src); 1432 if (!new_hash) 1433 return -ENOMEM; 1434 1435 /* Make sure this can be applied if it is IPMODIFY ftrace_ops */ 1436 if (enable) { 1437 /* IPMODIFY should be updated only when filter_hash updating */ 1438 ret = ftrace_hash_ipmodify_update(ops, new_hash); 1439 if (ret < 0) { 1440 free_ftrace_hash(new_hash); 1441 return ret; 1442 } 1443 } 1444 1445 /* 1446 * Remove the current set, update the hash and add 1447 * them back. 1448 */ 1449 ftrace_hash_rec_disable_modify(ops, enable); 1450 1451 rcu_assign_pointer(*dst, new_hash); 1452 1453 ftrace_hash_rec_enable_modify(ops, enable); 1454 1455 return 0; 1456 } 1457 1458 static bool hash_contains_ip(unsigned long ip, 1459 struct ftrace_ops_hash *hash) 1460 { 1461 /* 1462 * The function record is a match if it exists in the filter 1463 * hash and not in the notrace hash. Note, an emty hash is 1464 * considered a match for the filter hash, but an empty 1465 * notrace hash is considered not in the notrace hash. 1466 */ 1467 return (ftrace_hash_empty(hash->filter_hash) || 1468 __ftrace_lookup_ip(hash->filter_hash, ip)) && 1469 (ftrace_hash_empty(hash->notrace_hash) || 1470 !__ftrace_lookup_ip(hash->notrace_hash, ip)); 1471 } 1472 1473 /* 1474 * Test the hashes for this ops to see if we want to call 1475 * the ops->func or not. 1476 * 1477 * It's a match if the ip is in the ops->filter_hash or 1478 * the filter_hash does not exist or is empty, 1479 * AND 1480 * the ip is not in the ops->notrace_hash. 1481 * 1482 * This needs to be called with preemption disabled as 1483 * the hashes are freed with call_rcu(). 1484 */ 1485 int 1486 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) 1487 { 1488 struct ftrace_ops_hash hash; 1489 int ret; 1490 1491 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 1492 /* 1493 * There's a small race when adding ops that the ftrace handler 1494 * that wants regs, may be called without them. We can not 1495 * allow that handler to be called if regs is NULL. 1496 */ 1497 if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS)) 1498 return 0; 1499 #endif 1500 1501 rcu_assign_pointer(hash.filter_hash, ops->func_hash->filter_hash); 1502 rcu_assign_pointer(hash.notrace_hash, ops->func_hash->notrace_hash); 1503 1504 if (hash_contains_ip(ip, &hash)) 1505 ret = 1; 1506 else 1507 ret = 0; 1508 1509 return ret; 1510 } 1511 1512 /* 1513 * This is a double for. Do not use 'break' to break out of the loop, 1514 * you must use a goto. 1515 */ 1516 #define do_for_each_ftrace_rec(pg, rec) \ 1517 for (pg = ftrace_pages_start; pg; pg = pg->next) { \ 1518 int _____i; \ 1519 for (_____i = 0; _____i < pg->index; _____i++) { \ 1520 rec = &pg->records[_____i]; 1521 1522 #define while_for_each_ftrace_rec() \ 1523 } \ 1524 } 1525 1526 1527 static int ftrace_cmp_recs(const void *a, const void *b) 1528 { 1529 const struct dyn_ftrace *key = a; 1530 const struct dyn_ftrace *rec = b; 1531 1532 if (key->flags < rec->ip) 1533 return -1; 1534 if (key->ip >= rec->ip + MCOUNT_INSN_SIZE) 1535 return 1; 1536 return 0; 1537 } 1538 1539 /** 1540 * ftrace_location_range - return the first address of a traced location 1541 * if it touches the given ip range 1542 * @start: start of range to search. 1543 * @end: end of range to search (inclusive). @end points to the last byte 1544 * to check. 1545 * 1546 * Returns rec->ip if the related ftrace location is a least partly within 1547 * the given address range. That is, the first address of the instruction 1548 * that is either a NOP or call to the function tracer. It checks the ftrace 1549 * internal tables to determine if the address belongs or not. 1550 */ 1551 unsigned long ftrace_location_range(unsigned long start, unsigned long end) 1552 { 1553 struct ftrace_page *pg; 1554 struct dyn_ftrace *rec; 1555 struct dyn_ftrace key; 1556 1557 key.ip = start; 1558 key.flags = end; /* overload flags, as it is unsigned long */ 1559 1560 for (pg = ftrace_pages_start; pg; pg = pg->next) { 1561 if (end < pg->records[0].ip || 1562 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) 1563 continue; 1564 rec = bsearch(&key, pg->records, pg->index, 1565 sizeof(struct dyn_ftrace), 1566 ftrace_cmp_recs); 1567 if (rec) 1568 return rec->ip; 1569 } 1570 1571 return 0; 1572 } 1573 1574 /** 1575 * ftrace_location - return true if the ip giving is a traced location 1576 * @ip: the instruction pointer to check 1577 * 1578 * Returns rec->ip if @ip given is a pointer to a ftrace location. 1579 * That is, the instruction that is either a NOP or call to 1580 * the function tracer. It checks the ftrace internal tables to 1581 * determine if the address belongs or not. 1582 */ 1583 unsigned long ftrace_location(unsigned long ip) 1584 { 1585 return ftrace_location_range(ip, ip); 1586 } 1587 1588 /** 1589 * ftrace_text_reserved - return true if range contains an ftrace location 1590 * @start: start of range to search 1591 * @end: end of range to search (inclusive). @end points to the last byte to check. 1592 * 1593 * Returns 1 if @start and @end contains a ftrace location. 1594 * That is, the instruction that is either a NOP or call to 1595 * the function tracer. It checks the ftrace internal tables to 1596 * determine if the address belongs or not. 1597 */ 1598 int ftrace_text_reserved(const void *start, const void *end) 1599 { 1600 unsigned long ret; 1601 1602 ret = ftrace_location_range((unsigned long)start, 1603 (unsigned long)end); 1604 1605 return (int)!!ret; 1606 } 1607 1608 /* Test if ops registered to this rec needs regs */ 1609 static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec) 1610 { 1611 struct ftrace_ops *ops; 1612 bool keep_regs = false; 1613 1614 for (ops = ftrace_ops_list; 1615 ops != &ftrace_list_end; ops = ops->next) { 1616 /* pass rec in as regs to have non-NULL val */ 1617 if (ftrace_ops_test(ops, rec->ip, rec)) { 1618 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) { 1619 keep_regs = true; 1620 break; 1621 } 1622 } 1623 } 1624 1625 return keep_regs; 1626 } 1627 1628 static bool __ftrace_hash_rec_update(struct ftrace_ops *ops, 1629 int filter_hash, 1630 bool inc) 1631 { 1632 struct ftrace_hash *hash; 1633 struct ftrace_hash *other_hash; 1634 struct ftrace_page *pg; 1635 struct dyn_ftrace *rec; 1636 bool update = false; 1637 int count = 0; 1638 int all = false; 1639 1640 /* Only update if the ops has been registered */ 1641 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) 1642 return false; 1643 1644 /* 1645 * In the filter_hash case: 1646 * If the count is zero, we update all records. 1647 * Otherwise we just update the items in the hash. 1648 * 1649 * In the notrace_hash case: 1650 * We enable the update in the hash. 1651 * As disabling notrace means enabling the tracing, 1652 * and enabling notrace means disabling, the inc variable 1653 * gets inversed. 1654 */ 1655 if (filter_hash) { 1656 hash = ops->func_hash->filter_hash; 1657 other_hash = ops->func_hash->notrace_hash; 1658 if (ftrace_hash_empty(hash)) 1659 all = true; 1660 } else { 1661 inc = !inc; 1662 hash = ops->func_hash->notrace_hash; 1663 other_hash = ops->func_hash->filter_hash; 1664 /* 1665 * If the notrace hash has no items, 1666 * then there's nothing to do. 1667 */ 1668 if (ftrace_hash_empty(hash)) 1669 return false; 1670 } 1671 1672 do_for_each_ftrace_rec(pg, rec) { 1673 int in_other_hash = 0; 1674 int in_hash = 0; 1675 int match = 0; 1676 1677 if (rec->flags & FTRACE_FL_DISABLED) 1678 continue; 1679 1680 if (all) { 1681 /* 1682 * Only the filter_hash affects all records. 1683 * Update if the record is not in the notrace hash. 1684 */ 1685 if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip)) 1686 match = 1; 1687 } else { 1688 in_hash = !!ftrace_lookup_ip(hash, rec->ip); 1689 in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip); 1690 1691 /* 1692 * If filter_hash is set, we want to match all functions 1693 * that are in the hash but not in the other hash. 1694 * 1695 * If filter_hash is not set, then we are decrementing. 1696 * That means we match anything that is in the hash 1697 * and also in the other_hash. That is, we need to turn 1698 * off functions in the other hash because they are disabled 1699 * by this hash. 1700 */ 1701 if (filter_hash && in_hash && !in_other_hash) 1702 match = 1; 1703 else if (!filter_hash && in_hash && 1704 (in_other_hash || ftrace_hash_empty(other_hash))) 1705 match = 1; 1706 } 1707 if (!match) 1708 continue; 1709 1710 if (inc) { 1711 rec->flags++; 1712 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX)) 1713 return false; 1714 1715 /* 1716 * If there's only a single callback registered to a 1717 * function, and the ops has a trampoline registered 1718 * for it, then we can call it directly. 1719 */ 1720 if (ftrace_rec_count(rec) == 1 && ops->trampoline) 1721 rec->flags |= FTRACE_FL_TRAMP; 1722 else 1723 /* 1724 * If we are adding another function callback 1725 * to this function, and the previous had a 1726 * custom trampoline in use, then we need to go 1727 * back to the default trampoline. 1728 */ 1729 rec->flags &= ~FTRACE_FL_TRAMP; 1730 1731 /* 1732 * If any ops wants regs saved for this function 1733 * then all ops will get saved regs. 1734 */ 1735 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) 1736 rec->flags |= FTRACE_FL_REGS; 1737 } else { 1738 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0)) 1739 return false; 1740 rec->flags--; 1741 1742 /* 1743 * If the rec had REGS enabled and the ops that is 1744 * being removed had REGS set, then see if there is 1745 * still any ops for this record that wants regs. 1746 * If not, we can stop recording them. 1747 */ 1748 if (ftrace_rec_count(rec) > 0 && 1749 rec->flags & FTRACE_FL_REGS && 1750 ops->flags & FTRACE_OPS_FL_SAVE_REGS) { 1751 if (!test_rec_ops_needs_regs(rec)) 1752 rec->flags &= ~FTRACE_FL_REGS; 1753 } 1754 1755 /* 1756 * If the rec had TRAMP enabled, then it needs to 1757 * be cleared. As TRAMP can only be enabled iff 1758 * there is only a single ops attached to it. 1759 * In otherwords, always disable it on decrementing. 1760 * In the future, we may set it if rec count is 1761 * decremented to one, and the ops that is left 1762 * has a trampoline. 1763 */ 1764 rec->flags &= ~FTRACE_FL_TRAMP; 1765 1766 /* 1767 * flags will be cleared in ftrace_check_record() 1768 * if rec count is zero. 1769 */ 1770 } 1771 count++; 1772 1773 /* Must match FTRACE_UPDATE_CALLS in ftrace_modify_all_code() */ 1774 update |= ftrace_test_record(rec, 1) != FTRACE_UPDATE_IGNORE; 1775 1776 /* Shortcut, if we handled all records, we are done. */ 1777 if (!all && count == hash->count) 1778 return update; 1779 } while_for_each_ftrace_rec(); 1780 1781 return update; 1782 } 1783 1784 static bool ftrace_hash_rec_disable(struct ftrace_ops *ops, 1785 int filter_hash) 1786 { 1787 return __ftrace_hash_rec_update(ops, filter_hash, 0); 1788 } 1789 1790 static bool ftrace_hash_rec_enable(struct ftrace_ops *ops, 1791 int filter_hash) 1792 { 1793 return __ftrace_hash_rec_update(ops, filter_hash, 1); 1794 } 1795 1796 static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops, 1797 int filter_hash, int inc) 1798 { 1799 struct ftrace_ops *op; 1800 1801 __ftrace_hash_rec_update(ops, filter_hash, inc); 1802 1803 if (ops->func_hash != &global_ops.local_hash) 1804 return; 1805 1806 /* 1807 * If the ops shares the global_ops hash, then we need to update 1808 * all ops that are enabled and use this hash. 1809 */ 1810 do_for_each_ftrace_op(op, ftrace_ops_list) { 1811 /* Already done */ 1812 if (op == ops) 1813 continue; 1814 if (op->func_hash == &global_ops.local_hash) 1815 __ftrace_hash_rec_update(op, filter_hash, inc); 1816 } while_for_each_ftrace_op(op); 1817 } 1818 1819 static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, 1820 int filter_hash) 1821 { 1822 ftrace_hash_rec_update_modify(ops, filter_hash, 0); 1823 } 1824 1825 static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, 1826 int filter_hash) 1827 { 1828 ftrace_hash_rec_update_modify(ops, filter_hash, 1); 1829 } 1830 1831 /* 1832 * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK 1833 * or no-needed to update, -EBUSY if it detects a conflict of the flag 1834 * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs. 1835 * Note that old_hash and new_hash has below meanings 1836 * - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected) 1837 * - If the hash is EMPTY_HASH, it hits nothing 1838 * - Anything else hits the recs which match the hash entries. 1839 */ 1840 static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops, 1841 struct ftrace_hash *old_hash, 1842 struct ftrace_hash *new_hash) 1843 { 1844 struct ftrace_page *pg; 1845 struct dyn_ftrace *rec, *end = NULL; 1846 int in_old, in_new; 1847 1848 /* Only update if the ops has been registered */ 1849 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) 1850 return 0; 1851 1852 if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY)) 1853 return 0; 1854 1855 /* 1856 * Since the IPMODIFY is a very address sensitive action, we do not 1857 * allow ftrace_ops to set all functions to new hash. 1858 */ 1859 if (!new_hash || !old_hash) 1860 return -EINVAL; 1861 1862 /* Update rec->flags */ 1863 do_for_each_ftrace_rec(pg, rec) { 1864 1865 if (rec->flags & FTRACE_FL_DISABLED) 1866 continue; 1867 1868 /* We need to update only differences of filter_hash */ 1869 in_old = !!ftrace_lookup_ip(old_hash, rec->ip); 1870 in_new = !!ftrace_lookup_ip(new_hash, rec->ip); 1871 if (in_old == in_new) 1872 continue; 1873 1874 if (in_new) { 1875 /* New entries must ensure no others are using it */ 1876 if (rec->flags & FTRACE_FL_IPMODIFY) 1877 goto rollback; 1878 rec->flags |= FTRACE_FL_IPMODIFY; 1879 } else /* Removed entry */ 1880 rec->flags &= ~FTRACE_FL_IPMODIFY; 1881 } while_for_each_ftrace_rec(); 1882 1883 return 0; 1884 1885 rollback: 1886 end = rec; 1887 1888 /* Roll back what we did above */ 1889 do_for_each_ftrace_rec(pg, rec) { 1890 1891 if (rec->flags & FTRACE_FL_DISABLED) 1892 continue; 1893 1894 if (rec == end) 1895 goto err_out; 1896 1897 in_old = !!ftrace_lookup_ip(old_hash, rec->ip); 1898 in_new = !!ftrace_lookup_ip(new_hash, rec->ip); 1899 if (in_old == in_new) 1900 continue; 1901 1902 if (in_new) 1903 rec->flags &= ~FTRACE_FL_IPMODIFY; 1904 else 1905 rec->flags |= FTRACE_FL_IPMODIFY; 1906 } while_for_each_ftrace_rec(); 1907 1908 err_out: 1909 return -EBUSY; 1910 } 1911 1912 static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops) 1913 { 1914 struct ftrace_hash *hash = ops->func_hash->filter_hash; 1915 1916 if (ftrace_hash_empty(hash)) 1917 hash = NULL; 1918 1919 return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash); 1920 } 1921 1922 /* Disabling always succeeds */ 1923 static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops) 1924 { 1925 struct ftrace_hash *hash = ops->func_hash->filter_hash; 1926 1927 if (ftrace_hash_empty(hash)) 1928 hash = NULL; 1929 1930 __ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH); 1931 } 1932 1933 static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops, 1934 struct ftrace_hash *new_hash) 1935 { 1936 struct ftrace_hash *old_hash = ops->func_hash->filter_hash; 1937 1938 if (ftrace_hash_empty(old_hash)) 1939 old_hash = NULL; 1940 1941 if (ftrace_hash_empty(new_hash)) 1942 new_hash = NULL; 1943 1944 return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash); 1945 } 1946 1947 static void print_ip_ins(const char *fmt, const unsigned char *p) 1948 { 1949 int i; 1950 1951 printk(KERN_CONT "%s", fmt); 1952 1953 for (i = 0; i < MCOUNT_INSN_SIZE; i++) 1954 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]); 1955 } 1956 1957 static struct ftrace_ops * 1958 ftrace_find_tramp_ops_any(struct dyn_ftrace *rec); 1959 static struct ftrace_ops * 1960 ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops); 1961 1962 enum ftrace_bug_type ftrace_bug_type; 1963 const void *ftrace_expected; 1964 1965 static void print_bug_type(void) 1966 { 1967 switch (ftrace_bug_type) { 1968 case FTRACE_BUG_UNKNOWN: 1969 break; 1970 case FTRACE_BUG_INIT: 1971 pr_info("Initializing ftrace call sites\n"); 1972 break; 1973 case FTRACE_BUG_NOP: 1974 pr_info("Setting ftrace call site to NOP\n"); 1975 break; 1976 case FTRACE_BUG_CALL: 1977 pr_info("Setting ftrace call site to call ftrace function\n"); 1978 break; 1979 case FTRACE_BUG_UPDATE: 1980 pr_info("Updating ftrace call site to call a different ftrace function\n"); 1981 break; 1982 } 1983 } 1984 1985 /** 1986 * ftrace_bug - report and shutdown function tracer 1987 * @failed: The failed type (EFAULT, EINVAL, EPERM) 1988 * @rec: The record that failed 1989 * 1990 * The arch code that enables or disables the function tracing 1991 * can call ftrace_bug() when it has detected a problem in 1992 * modifying the code. @failed should be one of either: 1993 * EFAULT - if the problem happens on reading the @ip address 1994 * EINVAL - if what is read at @ip is not what was expected 1995 * EPERM - if the problem happens on writting to the @ip address 1996 */ 1997 void ftrace_bug(int failed, struct dyn_ftrace *rec) 1998 { 1999 unsigned long ip = rec ? rec->ip : 0; 2000 2001 switch (failed) { 2002 case -EFAULT: 2003 FTRACE_WARN_ON_ONCE(1); 2004 pr_info("ftrace faulted on modifying "); 2005 print_ip_sym(ip); 2006 break; 2007 case -EINVAL: 2008 FTRACE_WARN_ON_ONCE(1); 2009 pr_info("ftrace failed to modify "); 2010 print_ip_sym(ip); 2011 print_ip_ins(" actual: ", (unsigned char *)ip); 2012 pr_cont("\n"); 2013 if (ftrace_expected) { 2014 print_ip_ins(" expected: ", ftrace_expected); 2015 pr_cont("\n"); 2016 } 2017 break; 2018 case -EPERM: 2019 FTRACE_WARN_ON_ONCE(1); 2020 pr_info("ftrace faulted on writing "); 2021 print_ip_sym(ip); 2022 break; 2023 default: 2024 FTRACE_WARN_ON_ONCE(1); 2025 pr_info("ftrace faulted on unknown error "); 2026 print_ip_sym(ip); 2027 } 2028 print_bug_type(); 2029 if (rec) { 2030 struct ftrace_ops *ops = NULL; 2031 2032 pr_info("ftrace record flags: %lx\n", rec->flags); 2033 pr_cont(" (%ld)%s", ftrace_rec_count(rec), 2034 rec->flags & FTRACE_FL_REGS ? " R" : " "); 2035 if (rec->flags & FTRACE_FL_TRAMP_EN) { 2036 ops = ftrace_find_tramp_ops_any(rec); 2037 if (ops) { 2038 do { 2039 pr_cont("\ttramp: %pS (%pS)", 2040 (void *)ops->trampoline, 2041 (void *)ops->func); 2042 ops = ftrace_find_tramp_ops_next(rec, ops); 2043 } while (ops); 2044 } else 2045 pr_cont("\ttramp: ERROR!"); 2046 2047 } 2048 ip = ftrace_get_addr_curr(rec); 2049 pr_cont("\n expected tramp: %lx\n", ip); 2050 } 2051 } 2052 2053 static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update) 2054 { 2055 unsigned long flag = 0UL; 2056 2057 ftrace_bug_type = FTRACE_BUG_UNKNOWN; 2058 2059 if (rec->flags & FTRACE_FL_DISABLED) 2060 return FTRACE_UPDATE_IGNORE; 2061 2062 /* 2063 * If we are updating calls: 2064 * 2065 * If the record has a ref count, then we need to enable it 2066 * because someone is using it. 2067 * 2068 * Otherwise we make sure its disabled. 2069 * 2070 * If we are disabling calls, then disable all records that 2071 * are enabled. 2072 */ 2073 if (enable && ftrace_rec_count(rec)) 2074 flag = FTRACE_FL_ENABLED; 2075 2076 /* 2077 * If enabling and the REGS flag does not match the REGS_EN, or 2078 * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore 2079 * this record. Set flags to fail the compare against ENABLED. 2080 */ 2081 if (flag) { 2082 if (!(rec->flags & FTRACE_FL_REGS) != 2083 !(rec->flags & FTRACE_FL_REGS_EN)) 2084 flag |= FTRACE_FL_REGS; 2085 2086 if (!(rec->flags & FTRACE_FL_TRAMP) != 2087 !(rec->flags & FTRACE_FL_TRAMP_EN)) 2088 flag |= FTRACE_FL_TRAMP; 2089 } 2090 2091 /* If the state of this record hasn't changed, then do nothing */ 2092 if ((rec->flags & FTRACE_FL_ENABLED) == flag) 2093 return FTRACE_UPDATE_IGNORE; 2094 2095 if (flag) { 2096 /* Save off if rec is being enabled (for return value) */ 2097 flag ^= rec->flags & FTRACE_FL_ENABLED; 2098 2099 if (update) { 2100 rec->flags |= FTRACE_FL_ENABLED; 2101 if (flag & FTRACE_FL_REGS) { 2102 if (rec->flags & FTRACE_FL_REGS) 2103 rec->flags |= FTRACE_FL_REGS_EN; 2104 else 2105 rec->flags &= ~FTRACE_FL_REGS_EN; 2106 } 2107 if (flag & FTRACE_FL_TRAMP) { 2108 if (rec->flags & FTRACE_FL_TRAMP) 2109 rec->flags |= FTRACE_FL_TRAMP_EN; 2110 else 2111 rec->flags &= ~FTRACE_FL_TRAMP_EN; 2112 } 2113 } 2114 2115 /* 2116 * If this record is being updated from a nop, then 2117 * return UPDATE_MAKE_CALL. 2118 * Otherwise, 2119 * return UPDATE_MODIFY_CALL to tell the caller to convert 2120 * from the save regs, to a non-save regs function or 2121 * vice versa, or from a trampoline call. 2122 */ 2123 if (flag & FTRACE_FL_ENABLED) { 2124 ftrace_bug_type = FTRACE_BUG_CALL; 2125 return FTRACE_UPDATE_MAKE_CALL; 2126 } 2127 2128 ftrace_bug_type = FTRACE_BUG_UPDATE; 2129 return FTRACE_UPDATE_MODIFY_CALL; 2130 } 2131 2132 if (update) { 2133 /* If there's no more users, clear all flags */ 2134 if (!ftrace_rec_count(rec)) 2135 rec->flags = 0; 2136 else 2137 /* 2138 * Just disable the record, but keep the ops TRAMP 2139 * and REGS states. The _EN flags must be disabled though. 2140 */ 2141 rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN | 2142 FTRACE_FL_REGS_EN); 2143 } 2144 2145 ftrace_bug_type = FTRACE_BUG_NOP; 2146 return FTRACE_UPDATE_MAKE_NOP; 2147 } 2148 2149 /** 2150 * ftrace_update_record, set a record that now is tracing or not 2151 * @rec: the record to update 2152 * @enable: set to 1 if the record is tracing, zero to force disable 2153 * 2154 * The records that represent all functions that can be traced need 2155 * to be updated when tracing has been enabled. 2156 */ 2157 int ftrace_update_record(struct dyn_ftrace *rec, int enable) 2158 { 2159 return ftrace_check_record(rec, enable, 1); 2160 } 2161 2162 /** 2163 * ftrace_test_record, check if the record has been enabled or not 2164 * @rec: the record to test 2165 * @enable: set to 1 to check if enabled, 0 if it is disabled 2166 * 2167 * The arch code may need to test if a record is already set to 2168 * tracing to determine how to modify the function code that it 2169 * represents. 2170 */ 2171 int ftrace_test_record(struct dyn_ftrace *rec, int enable) 2172 { 2173 return ftrace_check_record(rec, enable, 0); 2174 } 2175 2176 static struct ftrace_ops * 2177 ftrace_find_tramp_ops_any(struct dyn_ftrace *rec) 2178 { 2179 struct ftrace_ops *op; 2180 unsigned long ip = rec->ip; 2181 2182 do_for_each_ftrace_op(op, ftrace_ops_list) { 2183 2184 if (!op->trampoline) 2185 continue; 2186 2187 if (hash_contains_ip(ip, op->func_hash)) 2188 return op; 2189 } while_for_each_ftrace_op(op); 2190 2191 return NULL; 2192 } 2193 2194 static struct ftrace_ops * 2195 ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, 2196 struct ftrace_ops *op) 2197 { 2198 unsigned long ip = rec->ip; 2199 2200 while_for_each_ftrace_op(op) { 2201 2202 if (!op->trampoline) 2203 continue; 2204 2205 if (hash_contains_ip(ip, op->func_hash)) 2206 return op; 2207 } 2208 2209 return NULL; 2210 } 2211 2212 static struct ftrace_ops * 2213 ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec) 2214 { 2215 struct ftrace_ops *op; 2216 unsigned long ip = rec->ip; 2217 2218 /* 2219 * Need to check removed ops first. 2220 * If they are being removed, and this rec has a tramp, 2221 * and this rec is in the ops list, then it would be the 2222 * one with the tramp. 2223 */ 2224 if (removed_ops) { 2225 if (hash_contains_ip(ip, &removed_ops->old_hash)) 2226 return removed_ops; 2227 } 2228 2229 /* 2230 * Need to find the current trampoline for a rec. 2231 * Now, a trampoline is only attached to a rec if there 2232 * was a single 'ops' attached to it. But this can be called 2233 * when we are adding another op to the rec or removing the 2234 * current one. Thus, if the op is being added, we can 2235 * ignore it because it hasn't attached itself to the rec 2236 * yet. 2237 * 2238 * If an ops is being modified (hooking to different functions) 2239 * then we don't care about the new functions that are being 2240 * added, just the old ones (that are probably being removed). 2241 * 2242 * If we are adding an ops to a function that already is using 2243 * a trampoline, it needs to be removed (trampolines are only 2244 * for single ops connected), then an ops that is not being 2245 * modified also needs to be checked. 2246 */ 2247 do_for_each_ftrace_op(op, ftrace_ops_list) { 2248 2249 if (!op->trampoline) 2250 continue; 2251 2252 /* 2253 * If the ops is being added, it hasn't gotten to 2254 * the point to be removed from this tree yet. 2255 */ 2256 if (op->flags & FTRACE_OPS_FL_ADDING) 2257 continue; 2258 2259 2260 /* 2261 * If the ops is being modified and is in the old 2262 * hash, then it is probably being removed from this 2263 * function. 2264 */ 2265 if ((op->flags & FTRACE_OPS_FL_MODIFYING) && 2266 hash_contains_ip(ip, &op->old_hash)) 2267 return op; 2268 /* 2269 * If the ops is not being added or modified, and it's 2270 * in its normal filter hash, then this must be the one 2271 * we want! 2272 */ 2273 if (!(op->flags & FTRACE_OPS_FL_MODIFYING) && 2274 hash_contains_ip(ip, op->func_hash)) 2275 return op; 2276 2277 } while_for_each_ftrace_op(op); 2278 2279 return NULL; 2280 } 2281 2282 static struct ftrace_ops * 2283 ftrace_find_tramp_ops_new(struct dyn_ftrace *rec) 2284 { 2285 struct ftrace_ops *op; 2286 unsigned long ip = rec->ip; 2287 2288 do_for_each_ftrace_op(op, ftrace_ops_list) { 2289 /* pass rec in as regs to have non-NULL val */ 2290 if (hash_contains_ip(ip, op->func_hash)) 2291 return op; 2292 } while_for_each_ftrace_op(op); 2293 2294 return NULL; 2295 } 2296 2297 /** 2298 * ftrace_get_addr_new - Get the call address to set to 2299 * @rec: The ftrace record descriptor 2300 * 2301 * If the record has the FTRACE_FL_REGS set, that means that it 2302 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS 2303 * is not not set, then it wants to convert to the normal callback. 2304 * 2305 * Returns the address of the trampoline to set to 2306 */ 2307 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec) 2308 { 2309 struct ftrace_ops *ops; 2310 2311 /* Trampolines take precedence over regs */ 2312 if (rec->flags & FTRACE_FL_TRAMP) { 2313 ops = ftrace_find_tramp_ops_new(rec); 2314 if (FTRACE_WARN_ON(!ops || !ops->trampoline)) { 2315 pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n", 2316 (void *)rec->ip, (void *)rec->ip, rec->flags); 2317 /* Ftrace is shutting down, return anything */ 2318 return (unsigned long)FTRACE_ADDR; 2319 } 2320 return ops->trampoline; 2321 } 2322 2323 if (rec->flags & FTRACE_FL_REGS) 2324 return (unsigned long)FTRACE_REGS_ADDR; 2325 else 2326 return (unsigned long)FTRACE_ADDR; 2327 } 2328 2329 /** 2330 * ftrace_get_addr_curr - Get the call address that is already there 2331 * @rec: The ftrace record descriptor 2332 * 2333 * The FTRACE_FL_REGS_EN is set when the record already points to 2334 * a function that saves all the regs. Basically the '_EN' version 2335 * represents the current state of the function. 2336 * 2337 * Returns the address of the trampoline that is currently being called 2338 */ 2339 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec) 2340 { 2341 struct ftrace_ops *ops; 2342 2343 /* Trampolines take precedence over regs */ 2344 if (rec->flags & FTRACE_FL_TRAMP_EN) { 2345 ops = ftrace_find_tramp_ops_curr(rec); 2346 if (FTRACE_WARN_ON(!ops)) { 2347 pr_warn("Bad trampoline accounting at: %p (%pS)\n", 2348 (void *)rec->ip, (void *)rec->ip); 2349 /* Ftrace is shutting down, return anything */ 2350 return (unsigned long)FTRACE_ADDR; 2351 } 2352 return ops->trampoline; 2353 } 2354 2355 if (rec->flags & FTRACE_FL_REGS_EN) 2356 return (unsigned long)FTRACE_REGS_ADDR; 2357 else 2358 return (unsigned long)FTRACE_ADDR; 2359 } 2360 2361 static int 2362 __ftrace_replace_code(struct dyn_ftrace *rec, int enable) 2363 { 2364 unsigned long ftrace_old_addr; 2365 unsigned long ftrace_addr; 2366 int ret; 2367 2368 ftrace_addr = ftrace_get_addr_new(rec); 2369 2370 /* This needs to be done before we call ftrace_update_record */ 2371 ftrace_old_addr = ftrace_get_addr_curr(rec); 2372 2373 ret = ftrace_update_record(rec, enable); 2374 2375 ftrace_bug_type = FTRACE_BUG_UNKNOWN; 2376 2377 switch (ret) { 2378 case FTRACE_UPDATE_IGNORE: 2379 return 0; 2380 2381 case FTRACE_UPDATE_MAKE_CALL: 2382 ftrace_bug_type = FTRACE_BUG_CALL; 2383 return ftrace_make_call(rec, ftrace_addr); 2384 2385 case FTRACE_UPDATE_MAKE_NOP: 2386 ftrace_bug_type = FTRACE_BUG_NOP; 2387 return ftrace_make_nop(NULL, rec, ftrace_old_addr); 2388 2389 case FTRACE_UPDATE_MODIFY_CALL: 2390 ftrace_bug_type = FTRACE_BUG_UPDATE; 2391 return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr); 2392 } 2393 2394 return -1; /* unknow ftrace bug */ 2395 } 2396 2397 void __weak ftrace_replace_code(int mod_flags) 2398 { 2399 struct dyn_ftrace *rec; 2400 struct ftrace_page *pg; 2401 int enable = mod_flags & FTRACE_MODIFY_ENABLE_FL; 2402 int schedulable = mod_flags & FTRACE_MODIFY_MAY_SLEEP_FL; 2403 int failed; 2404 2405 if (unlikely(ftrace_disabled)) 2406 return; 2407 2408 do_for_each_ftrace_rec(pg, rec) { 2409 2410 if (rec->flags & FTRACE_FL_DISABLED) 2411 continue; 2412 2413 failed = __ftrace_replace_code(rec, enable); 2414 if (failed) { 2415 ftrace_bug(failed, rec); 2416 /* Stop processing */ 2417 return; 2418 } 2419 if (schedulable) 2420 cond_resched(); 2421 } while_for_each_ftrace_rec(); 2422 } 2423 2424 struct ftrace_rec_iter { 2425 struct ftrace_page *pg; 2426 int index; 2427 }; 2428 2429 /** 2430 * ftrace_rec_iter_start, start up iterating over traced functions 2431 * 2432 * Returns an iterator handle that is used to iterate over all 2433 * the records that represent address locations where functions 2434 * are traced. 2435 * 2436 * May return NULL if no records are available. 2437 */ 2438 struct ftrace_rec_iter *ftrace_rec_iter_start(void) 2439 { 2440 /* 2441 * We only use a single iterator. 2442 * Protected by the ftrace_lock mutex. 2443 */ 2444 static struct ftrace_rec_iter ftrace_rec_iter; 2445 struct ftrace_rec_iter *iter = &ftrace_rec_iter; 2446 2447 iter->pg = ftrace_pages_start; 2448 iter->index = 0; 2449 2450 /* Could have empty pages */ 2451 while (iter->pg && !iter->pg->index) 2452 iter->pg = iter->pg->next; 2453 2454 if (!iter->pg) 2455 return NULL; 2456 2457 return iter; 2458 } 2459 2460 /** 2461 * ftrace_rec_iter_next, get the next record to process. 2462 * @iter: The handle to the iterator. 2463 * 2464 * Returns the next iterator after the given iterator @iter. 2465 */ 2466 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter) 2467 { 2468 iter->index++; 2469 2470 if (iter->index >= iter->pg->index) { 2471 iter->pg = iter->pg->next; 2472 iter->index = 0; 2473 2474 /* Could have empty pages */ 2475 while (iter->pg && !iter->pg->index) 2476 iter->pg = iter->pg->next; 2477 } 2478 2479 if (!iter->pg) 2480 return NULL; 2481 2482 return iter; 2483 } 2484 2485 /** 2486 * ftrace_rec_iter_record, get the record at the iterator location 2487 * @iter: The current iterator location 2488 * 2489 * Returns the record that the current @iter is at. 2490 */ 2491 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter) 2492 { 2493 return &iter->pg->records[iter->index]; 2494 } 2495 2496 static int 2497 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec) 2498 { 2499 int ret; 2500 2501 if (unlikely(ftrace_disabled)) 2502 return 0; 2503 2504 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR); 2505 if (ret) { 2506 ftrace_bug_type = FTRACE_BUG_INIT; 2507 ftrace_bug(ret, rec); 2508 return 0; 2509 } 2510 return 1; 2511 } 2512 2513 /* 2514 * archs can override this function if they must do something 2515 * before the modifying code is performed. 2516 */ 2517 int __weak ftrace_arch_code_modify_prepare(void) 2518 { 2519 return 0; 2520 } 2521 2522 /* 2523 * archs can override this function if they must do something 2524 * after the modifying code is performed. 2525 */ 2526 int __weak ftrace_arch_code_modify_post_process(void) 2527 { 2528 return 0; 2529 } 2530 2531 void ftrace_modify_all_code(int command) 2532 { 2533 int update = command & FTRACE_UPDATE_TRACE_FUNC; 2534 int mod_flags = 0; 2535 int err = 0; 2536 2537 if (command & FTRACE_MAY_SLEEP) 2538 mod_flags = FTRACE_MODIFY_MAY_SLEEP_FL; 2539 2540 /* 2541 * If the ftrace_caller calls a ftrace_ops func directly, 2542 * we need to make sure that it only traces functions it 2543 * expects to trace. When doing the switch of functions, 2544 * we need to update to the ftrace_ops_list_func first 2545 * before the transition between old and new calls are set, 2546 * as the ftrace_ops_list_func will check the ops hashes 2547 * to make sure the ops are having the right functions 2548 * traced. 2549 */ 2550 if (update) { 2551 err = ftrace_update_ftrace_func(ftrace_ops_list_func); 2552 if (FTRACE_WARN_ON(err)) 2553 return; 2554 } 2555 2556 if (command & FTRACE_UPDATE_CALLS) 2557 ftrace_replace_code(mod_flags | FTRACE_MODIFY_ENABLE_FL); 2558 else if (command & FTRACE_DISABLE_CALLS) 2559 ftrace_replace_code(mod_flags); 2560 2561 if (update && ftrace_trace_function != ftrace_ops_list_func) { 2562 function_trace_op = set_function_trace_op; 2563 smp_wmb(); 2564 /* If irqs are disabled, we are in stop machine */ 2565 if (!irqs_disabled()) 2566 smp_call_function(ftrace_sync_ipi, NULL, 1); 2567 err = ftrace_update_ftrace_func(ftrace_trace_function); 2568 if (FTRACE_WARN_ON(err)) 2569 return; 2570 } 2571 2572 if (command & FTRACE_START_FUNC_RET) 2573 err = ftrace_enable_ftrace_graph_caller(); 2574 else if (command & FTRACE_STOP_FUNC_RET) 2575 err = ftrace_disable_ftrace_graph_caller(); 2576 FTRACE_WARN_ON(err); 2577 } 2578 2579 static int __ftrace_modify_code(void *data) 2580 { 2581 int *command = data; 2582 2583 ftrace_modify_all_code(*command); 2584 2585 return 0; 2586 } 2587 2588 /** 2589 * ftrace_run_stop_machine, go back to the stop machine method 2590 * @command: The command to tell ftrace what to do 2591 * 2592 * If an arch needs to fall back to the stop machine method, the 2593 * it can call this function. 2594 */ 2595 void ftrace_run_stop_machine(int command) 2596 { 2597 stop_machine(__ftrace_modify_code, &command, NULL); 2598 } 2599 2600 /** 2601 * arch_ftrace_update_code, modify the code to trace or not trace 2602 * @command: The command that needs to be done 2603 * 2604 * Archs can override this function if it does not need to 2605 * run stop_machine() to modify code. 2606 */ 2607 void __weak arch_ftrace_update_code(int command) 2608 { 2609 ftrace_run_stop_machine(command); 2610 } 2611 2612 static void ftrace_run_update_code(int command) 2613 { 2614 int ret; 2615 2616 ret = ftrace_arch_code_modify_prepare(); 2617 FTRACE_WARN_ON(ret); 2618 if (ret) 2619 return; 2620 2621 /* 2622 * By default we use stop_machine() to modify the code. 2623 * But archs can do what ever they want as long as it 2624 * is safe. The stop_machine() is the safest, but also 2625 * produces the most overhead. 2626 */ 2627 arch_ftrace_update_code(command); 2628 2629 ret = ftrace_arch_code_modify_post_process(); 2630 FTRACE_WARN_ON(ret); 2631 } 2632 2633 static void ftrace_run_modify_code(struct ftrace_ops *ops, int command, 2634 struct ftrace_ops_hash *old_hash) 2635 { 2636 ops->flags |= FTRACE_OPS_FL_MODIFYING; 2637 ops->old_hash.filter_hash = old_hash->filter_hash; 2638 ops->old_hash.notrace_hash = old_hash->notrace_hash; 2639 ftrace_run_update_code(command); 2640 ops->old_hash.filter_hash = NULL; 2641 ops->old_hash.notrace_hash = NULL; 2642 ops->flags &= ~FTRACE_OPS_FL_MODIFYING; 2643 } 2644 2645 static ftrace_func_t saved_ftrace_func; 2646 static int ftrace_start_up; 2647 2648 void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops) 2649 { 2650 } 2651 2652 static void ftrace_startup_enable(int command) 2653 { 2654 if (saved_ftrace_func != ftrace_trace_function) { 2655 saved_ftrace_func = ftrace_trace_function; 2656 command |= FTRACE_UPDATE_TRACE_FUNC; 2657 } 2658 2659 if (!command || !ftrace_enabled) 2660 return; 2661 2662 ftrace_run_update_code(command); 2663 } 2664 2665 static void ftrace_startup_all(int command) 2666 { 2667 update_all_ops = true; 2668 ftrace_startup_enable(command); 2669 update_all_ops = false; 2670 } 2671 2672 int ftrace_startup(struct ftrace_ops *ops, int command) 2673 { 2674 int ret; 2675 2676 if (unlikely(ftrace_disabled)) 2677 return -ENODEV; 2678 2679 ret = __register_ftrace_function(ops); 2680 if (ret) 2681 return ret; 2682 2683 ftrace_start_up++; 2684 2685 /* 2686 * Note that ftrace probes uses this to start up 2687 * and modify functions it will probe. But we still 2688 * set the ADDING flag for modification, as probes 2689 * do not have trampolines. If they add them in the 2690 * future, then the probes will need to distinguish 2691 * between adding and updating probes. 2692 */ 2693 ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING; 2694 2695 ret = ftrace_hash_ipmodify_enable(ops); 2696 if (ret < 0) { 2697 /* Rollback registration process */ 2698 __unregister_ftrace_function(ops); 2699 ftrace_start_up--; 2700 ops->flags &= ~FTRACE_OPS_FL_ENABLED; 2701 return ret; 2702 } 2703 2704 if (ftrace_hash_rec_enable(ops, 1)) 2705 command |= FTRACE_UPDATE_CALLS; 2706 2707 ftrace_startup_enable(command); 2708 2709 ops->flags &= ~FTRACE_OPS_FL_ADDING; 2710 2711 return 0; 2712 } 2713 2714 int ftrace_shutdown(struct ftrace_ops *ops, int command) 2715 { 2716 int ret; 2717 2718 if (unlikely(ftrace_disabled)) 2719 return -ENODEV; 2720 2721 ret = __unregister_ftrace_function(ops); 2722 if (ret) 2723 return ret; 2724 2725 ftrace_start_up--; 2726 /* 2727 * Just warn in case of unbalance, no need to kill ftrace, it's not 2728 * critical but the ftrace_call callers may be never nopped again after 2729 * further ftrace uses. 2730 */ 2731 WARN_ON_ONCE(ftrace_start_up < 0); 2732 2733 /* Disabling ipmodify never fails */ 2734 ftrace_hash_ipmodify_disable(ops); 2735 2736 if (ftrace_hash_rec_disable(ops, 1)) 2737 command |= FTRACE_UPDATE_CALLS; 2738 2739 ops->flags &= ~FTRACE_OPS_FL_ENABLED; 2740 2741 if (saved_ftrace_func != ftrace_trace_function) { 2742 saved_ftrace_func = ftrace_trace_function; 2743 command |= FTRACE_UPDATE_TRACE_FUNC; 2744 } 2745 2746 if (!command || !ftrace_enabled) { 2747 /* 2748 * If these are dynamic or per_cpu ops, they still 2749 * need their data freed. Since, function tracing is 2750 * not currently active, we can just free them 2751 * without synchronizing all CPUs. 2752 */ 2753 if (ops->flags & FTRACE_OPS_FL_DYNAMIC) 2754 goto free_ops; 2755 2756 return 0; 2757 } 2758 2759 /* 2760 * If the ops uses a trampoline, then it needs to be 2761 * tested first on update. 2762 */ 2763 ops->flags |= FTRACE_OPS_FL_REMOVING; 2764 removed_ops = ops; 2765 2766 /* The trampoline logic checks the old hashes */ 2767 ops->old_hash.filter_hash = ops->func_hash->filter_hash; 2768 ops->old_hash.notrace_hash = ops->func_hash->notrace_hash; 2769 2770 ftrace_run_update_code(command); 2771 2772 /* 2773 * If there's no more ops registered with ftrace, run a 2774 * sanity check to make sure all rec flags are cleared. 2775 */ 2776 if (rcu_dereference_protected(ftrace_ops_list, 2777 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) { 2778 struct ftrace_page *pg; 2779 struct dyn_ftrace *rec; 2780 2781 do_for_each_ftrace_rec(pg, rec) { 2782 if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_FL_DISABLED)) 2783 pr_warn(" %pS flags:%lx\n", 2784 (void *)rec->ip, rec->flags); 2785 } while_for_each_ftrace_rec(); 2786 } 2787 2788 ops->old_hash.filter_hash = NULL; 2789 ops->old_hash.notrace_hash = NULL; 2790 2791 removed_ops = NULL; 2792 ops->flags &= ~FTRACE_OPS_FL_REMOVING; 2793 2794 /* 2795 * Dynamic ops may be freed, we must make sure that all 2796 * callers are done before leaving this function. 2797 * The same goes for freeing the per_cpu data of the per_cpu 2798 * ops. 2799 */ 2800 if (ops->flags & FTRACE_OPS_FL_DYNAMIC) { 2801 /* 2802 * We need to do a hard force of sched synchronization. 2803 * This is because we use preempt_disable() to do RCU, but 2804 * the function tracers can be called where RCU is not watching 2805 * (like before user_exit()). We can not rely on the RCU 2806 * infrastructure to do the synchronization, thus we must do it 2807 * ourselves. 2808 */ 2809 schedule_on_each_cpu(ftrace_sync); 2810 2811 /* 2812 * When the kernel is preeptive, tasks can be preempted 2813 * while on a ftrace trampoline. Just scheduling a task on 2814 * a CPU is not good enough to flush them. Calling 2815 * synchornize_rcu_tasks() will wait for those tasks to 2816 * execute and either schedule voluntarily or enter user space. 2817 */ 2818 if (IS_ENABLED(CONFIG_PREEMPT)) 2819 synchronize_rcu_tasks(); 2820 2821 free_ops: 2822 arch_ftrace_trampoline_free(ops); 2823 } 2824 2825 return 0; 2826 } 2827 2828 static void ftrace_startup_sysctl(void) 2829 { 2830 int command; 2831 2832 if (unlikely(ftrace_disabled)) 2833 return; 2834 2835 /* Force update next time */ 2836 saved_ftrace_func = NULL; 2837 /* ftrace_start_up is true if we want ftrace running */ 2838 if (ftrace_start_up) { 2839 command = FTRACE_UPDATE_CALLS; 2840 if (ftrace_graph_active) 2841 command |= FTRACE_START_FUNC_RET; 2842 ftrace_startup_enable(command); 2843 } 2844 } 2845 2846 static void ftrace_shutdown_sysctl(void) 2847 { 2848 int command; 2849 2850 if (unlikely(ftrace_disabled)) 2851 return; 2852 2853 /* ftrace_start_up is true if ftrace is running */ 2854 if (ftrace_start_up) { 2855 command = FTRACE_DISABLE_CALLS; 2856 if (ftrace_graph_active) 2857 command |= FTRACE_STOP_FUNC_RET; 2858 ftrace_run_update_code(command); 2859 } 2860 } 2861 2862 static u64 ftrace_update_time; 2863 unsigned long ftrace_update_tot_cnt; 2864 2865 static inline int ops_traces_mod(struct ftrace_ops *ops) 2866 { 2867 /* 2868 * Filter_hash being empty will default to trace module. 2869 * But notrace hash requires a test of individual module functions. 2870 */ 2871 return ftrace_hash_empty(ops->func_hash->filter_hash) && 2872 ftrace_hash_empty(ops->func_hash->notrace_hash); 2873 } 2874 2875 /* 2876 * Check if the current ops references the record. 2877 * 2878 * If the ops traces all functions, then it was already accounted for. 2879 * If the ops does not trace the current record function, skip it. 2880 * If the ops ignores the function via notrace filter, skip it. 2881 */ 2882 static inline bool 2883 ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec) 2884 { 2885 /* If ops isn't enabled, ignore it */ 2886 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) 2887 return false; 2888 2889 /* If ops traces all then it includes this function */ 2890 if (ops_traces_mod(ops)) 2891 return true; 2892 2893 /* The function must be in the filter */ 2894 if (!ftrace_hash_empty(ops->func_hash->filter_hash) && 2895 !__ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip)) 2896 return false; 2897 2898 /* If in notrace hash, we ignore it too */ 2899 if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) 2900 return false; 2901 2902 return true; 2903 } 2904 2905 static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs) 2906 { 2907 struct ftrace_page *pg; 2908 struct dyn_ftrace *p; 2909 u64 start, stop; 2910 unsigned long update_cnt = 0; 2911 unsigned long rec_flags = 0; 2912 int i; 2913 2914 start = ftrace_now(raw_smp_processor_id()); 2915 2916 /* 2917 * When a module is loaded, this function is called to convert 2918 * the calls to mcount in its text to nops, and also to create 2919 * an entry in the ftrace data. Now, if ftrace is activated 2920 * after this call, but before the module sets its text to 2921 * read-only, the modification of enabling ftrace can fail if 2922 * the read-only is done while ftrace is converting the calls. 2923 * To prevent this, the module's records are set as disabled 2924 * and will be enabled after the call to set the module's text 2925 * to read-only. 2926 */ 2927 if (mod) 2928 rec_flags |= FTRACE_FL_DISABLED; 2929 2930 for (pg = new_pgs; pg; pg = pg->next) { 2931 2932 for (i = 0; i < pg->index; i++) { 2933 2934 /* If something went wrong, bail without enabling anything */ 2935 if (unlikely(ftrace_disabled)) 2936 return -1; 2937 2938 p = &pg->records[i]; 2939 p->flags = rec_flags; 2940 2941 #ifndef CC_USING_NOP_MCOUNT 2942 /* 2943 * Do the initial record conversion from mcount jump 2944 * to the NOP instructions. 2945 */ 2946 if (!ftrace_code_disable(mod, p)) 2947 break; 2948 #endif 2949 2950 update_cnt++; 2951 } 2952 } 2953 2954 stop = ftrace_now(raw_smp_processor_id()); 2955 ftrace_update_time = stop - start; 2956 ftrace_update_tot_cnt += update_cnt; 2957 2958 return 0; 2959 } 2960 2961 static int ftrace_allocate_records(struct ftrace_page *pg, int count) 2962 { 2963 int order; 2964 int cnt; 2965 2966 if (WARN_ON(!count)) 2967 return -EINVAL; 2968 2969 order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE)); 2970 2971 /* 2972 * We want to fill as much as possible. No more than a page 2973 * may be empty. 2974 */ 2975 while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE) 2976 order--; 2977 2978 again: 2979 pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); 2980 2981 if (!pg->records) { 2982 /* if we can't allocate this size, try something smaller */ 2983 if (!order) 2984 return -ENOMEM; 2985 order >>= 1; 2986 goto again; 2987 } 2988 2989 cnt = (PAGE_SIZE << order) / ENTRY_SIZE; 2990 pg->size = cnt; 2991 2992 if (cnt > count) 2993 cnt = count; 2994 2995 return cnt; 2996 } 2997 2998 static struct ftrace_page * 2999 ftrace_allocate_pages(unsigned long num_to_init) 3000 { 3001 struct ftrace_page *start_pg; 3002 struct ftrace_page *pg; 3003 int order; 3004 int cnt; 3005 3006 if (!num_to_init) 3007 return 0; 3008 3009 start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL); 3010 if (!pg) 3011 return NULL; 3012 3013 /* 3014 * Try to allocate as much as possible in one continues 3015 * location that fills in all of the space. We want to 3016 * waste as little space as possible. 3017 */ 3018 for (;;) { 3019 cnt = ftrace_allocate_records(pg, num_to_init); 3020 if (cnt < 0) 3021 goto free_pages; 3022 3023 num_to_init -= cnt; 3024 if (!num_to_init) 3025 break; 3026 3027 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL); 3028 if (!pg->next) 3029 goto free_pages; 3030 3031 pg = pg->next; 3032 } 3033 3034 return start_pg; 3035 3036 free_pages: 3037 pg = start_pg; 3038 while (pg) { 3039 order = get_count_order(pg->size / ENTRIES_PER_PAGE); 3040 free_pages((unsigned long)pg->records, order); 3041 start_pg = pg->next; 3042 kfree(pg); 3043 pg = start_pg; 3044 } 3045 pr_info("ftrace: FAILED to allocate memory for functions\n"); 3046 return NULL; 3047 } 3048 3049 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ 3050 3051 struct ftrace_iterator { 3052 loff_t pos; 3053 loff_t func_pos; 3054 loff_t mod_pos; 3055 struct ftrace_page *pg; 3056 struct dyn_ftrace *func; 3057 struct ftrace_func_probe *probe; 3058 struct ftrace_func_entry *probe_entry; 3059 struct trace_parser parser; 3060 struct ftrace_hash *hash; 3061 struct ftrace_ops *ops; 3062 struct trace_array *tr; 3063 struct list_head *mod_list; 3064 int pidx; 3065 int idx; 3066 unsigned flags; 3067 }; 3068 3069 static void * 3070 t_probe_next(struct seq_file *m, loff_t *pos) 3071 { 3072 struct ftrace_iterator *iter = m->private; 3073 struct trace_array *tr = iter->ops->private; 3074 struct list_head *func_probes; 3075 struct ftrace_hash *hash; 3076 struct list_head *next; 3077 struct hlist_node *hnd = NULL; 3078 struct hlist_head *hhd; 3079 int size; 3080 3081 (*pos)++; 3082 iter->pos = *pos; 3083 3084 if (!tr) 3085 return NULL; 3086 3087 func_probes = &tr->func_probes; 3088 if (list_empty(func_probes)) 3089 return NULL; 3090 3091 if (!iter->probe) { 3092 next = func_probes->next; 3093 iter->probe = list_entry(next, struct ftrace_func_probe, list); 3094 } 3095 3096 if (iter->probe_entry) 3097 hnd = &iter->probe_entry->hlist; 3098 3099 hash = iter->probe->ops.func_hash->filter_hash; 3100 size = 1 << hash->size_bits; 3101 3102 retry: 3103 if (iter->pidx >= size) { 3104 if (iter->probe->list.next == func_probes) 3105 return NULL; 3106 next = iter->probe->list.next; 3107 iter->probe = list_entry(next, struct ftrace_func_probe, list); 3108 hash = iter->probe->ops.func_hash->filter_hash; 3109 size = 1 << hash->size_bits; 3110 iter->pidx = 0; 3111 } 3112 3113 hhd = &hash->buckets[iter->pidx]; 3114 3115 if (hlist_empty(hhd)) { 3116 iter->pidx++; 3117 hnd = NULL; 3118 goto retry; 3119 } 3120 3121 if (!hnd) 3122 hnd = hhd->first; 3123 else { 3124 hnd = hnd->next; 3125 if (!hnd) { 3126 iter->pidx++; 3127 goto retry; 3128 } 3129 } 3130 3131 if (WARN_ON_ONCE(!hnd)) 3132 return NULL; 3133 3134 iter->probe_entry = hlist_entry(hnd, struct ftrace_func_entry, hlist); 3135 3136 return iter; 3137 } 3138 3139 static void *t_probe_start(struct seq_file *m, loff_t *pos) 3140 { 3141 struct ftrace_iterator *iter = m->private; 3142 void *p = NULL; 3143 loff_t l; 3144 3145 if (!(iter->flags & FTRACE_ITER_DO_PROBES)) 3146 return NULL; 3147 3148 if (iter->mod_pos > *pos) 3149 return NULL; 3150 3151 iter->probe = NULL; 3152 iter->probe_entry = NULL; 3153 iter->pidx = 0; 3154 for (l = 0; l <= (*pos - iter->mod_pos); ) { 3155 p = t_probe_next(m, &l); 3156 if (!p) 3157 break; 3158 } 3159 if (!p) 3160 return NULL; 3161 3162 /* Only set this if we have an item */ 3163 iter->flags |= FTRACE_ITER_PROBE; 3164 3165 return iter; 3166 } 3167 3168 static int 3169 t_probe_show(struct seq_file *m, struct ftrace_iterator *iter) 3170 { 3171 struct ftrace_func_entry *probe_entry; 3172 struct ftrace_probe_ops *probe_ops; 3173 struct ftrace_func_probe *probe; 3174 3175 probe = iter->probe; 3176 probe_entry = iter->probe_entry; 3177 3178 if (WARN_ON_ONCE(!probe || !probe_entry)) 3179 return -EIO; 3180 3181 probe_ops = probe->probe_ops; 3182 3183 if (probe_ops->print) 3184 return probe_ops->print(m, probe_entry->ip, probe_ops, probe->data); 3185 3186 seq_printf(m, "%ps:%ps\n", (void *)probe_entry->ip, 3187 (void *)probe_ops->func); 3188 3189 return 0; 3190 } 3191 3192 static void * 3193 t_mod_next(struct seq_file *m, loff_t *pos) 3194 { 3195 struct ftrace_iterator *iter = m->private; 3196 struct trace_array *tr = iter->tr; 3197 3198 (*pos)++; 3199 iter->pos = *pos; 3200 3201 iter->mod_list = iter->mod_list->next; 3202 3203 if (iter->mod_list == &tr->mod_trace || 3204 iter->mod_list == &tr->mod_notrace) { 3205 iter->flags &= ~FTRACE_ITER_MOD; 3206 return NULL; 3207 } 3208 3209 iter->mod_pos = *pos; 3210 3211 return iter; 3212 } 3213 3214 static void *t_mod_start(struct seq_file *m, loff_t *pos) 3215 { 3216 struct ftrace_iterator *iter = m->private; 3217 void *p = NULL; 3218 loff_t l; 3219 3220 if (iter->func_pos > *pos) 3221 return NULL; 3222 3223 iter->mod_pos = iter->func_pos; 3224 3225 /* probes are only available if tr is set */ 3226 if (!iter->tr) 3227 return NULL; 3228 3229 for (l = 0; l <= (*pos - iter->func_pos); ) { 3230 p = t_mod_next(m, &l); 3231 if (!p) 3232 break; 3233 } 3234 if (!p) { 3235 iter->flags &= ~FTRACE_ITER_MOD; 3236 return t_probe_start(m, pos); 3237 } 3238 3239 /* Only set this if we have an item */ 3240 iter->flags |= FTRACE_ITER_MOD; 3241 3242 return iter; 3243 } 3244 3245 static int 3246 t_mod_show(struct seq_file *m, struct ftrace_iterator *iter) 3247 { 3248 struct ftrace_mod_load *ftrace_mod; 3249 struct trace_array *tr = iter->tr; 3250 3251 if (WARN_ON_ONCE(!iter->mod_list) || 3252 iter->mod_list == &tr->mod_trace || 3253 iter->mod_list == &tr->mod_notrace) 3254 return -EIO; 3255 3256 ftrace_mod = list_entry(iter->mod_list, struct ftrace_mod_load, list); 3257 3258 if (ftrace_mod->func) 3259 seq_printf(m, "%s", ftrace_mod->func); 3260 else 3261 seq_putc(m, '*'); 3262 3263 seq_printf(m, ":mod:%s\n", ftrace_mod->module); 3264 3265 return 0; 3266 } 3267 3268 static void * 3269 t_func_next(struct seq_file *m, loff_t *pos) 3270 { 3271 struct ftrace_iterator *iter = m->private; 3272 struct dyn_ftrace *rec = NULL; 3273 3274 (*pos)++; 3275 3276 retry: 3277 if (iter->idx >= iter->pg->index) { 3278 if (iter->pg->next) { 3279 iter->pg = iter->pg->next; 3280 iter->idx = 0; 3281 goto retry; 3282 } 3283 } else { 3284 rec = &iter->pg->records[iter->idx++]; 3285 if (((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) && 3286 !ftrace_lookup_ip(iter->hash, rec->ip)) || 3287 3288 ((iter->flags & FTRACE_ITER_ENABLED) && 3289 !(rec->flags & FTRACE_FL_ENABLED))) { 3290 3291 rec = NULL; 3292 goto retry; 3293 } 3294 } 3295 3296 if (!rec) 3297 return NULL; 3298 3299 iter->pos = iter->func_pos = *pos; 3300 iter->func = rec; 3301 3302 return iter; 3303 } 3304 3305 static void * 3306 t_next(struct seq_file *m, void *v, loff_t *pos) 3307 { 3308 struct ftrace_iterator *iter = m->private; 3309 loff_t l = *pos; /* t_probe_start() must use original pos */ 3310 void *ret; 3311 3312 if (unlikely(ftrace_disabled)) 3313 return NULL; 3314 3315 if (iter->flags & FTRACE_ITER_PROBE) 3316 return t_probe_next(m, pos); 3317 3318 if (iter->flags & FTRACE_ITER_MOD) 3319 return t_mod_next(m, pos); 3320 3321 if (iter->flags & FTRACE_ITER_PRINTALL) { 3322 /* next must increment pos, and t_probe_start does not */ 3323 (*pos)++; 3324 return t_mod_start(m, &l); 3325 } 3326 3327 ret = t_func_next(m, pos); 3328 3329 if (!ret) 3330 return t_mod_start(m, &l); 3331 3332 return ret; 3333 } 3334 3335 static void reset_iter_read(struct ftrace_iterator *iter) 3336 { 3337 iter->pos = 0; 3338 iter->func_pos = 0; 3339 iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_PROBE | FTRACE_ITER_MOD); 3340 } 3341 3342 static void *t_start(struct seq_file *m, loff_t *pos) 3343 { 3344 struct ftrace_iterator *iter = m->private; 3345 void *p = NULL; 3346 loff_t l; 3347 3348 mutex_lock(&ftrace_lock); 3349 3350 if (unlikely(ftrace_disabled)) 3351 return NULL; 3352 3353 /* 3354 * If an lseek was done, then reset and start from beginning. 3355 */ 3356 if (*pos < iter->pos) 3357 reset_iter_read(iter); 3358 3359 /* 3360 * For set_ftrace_filter reading, if we have the filter 3361 * off, we can short cut and just print out that all 3362 * functions are enabled. 3363 */ 3364 if ((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) && 3365 ftrace_hash_empty(iter->hash)) { 3366 iter->func_pos = 1; /* Account for the message */ 3367 if (*pos > 0) 3368 return t_mod_start(m, pos); 3369 iter->flags |= FTRACE_ITER_PRINTALL; 3370 /* reset in case of seek/pread */ 3371 iter->flags &= ~FTRACE_ITER_PROBE; 3372 return iter; 3373 } 3374 3375 if (iter->flags & FTRACE_ITER_MOD) 3376 return t_mod_start(m, pos); 3377 3378 /* 3379 * Unfortunately, we need to restart at ftrace_pages_start 3380 * every time we let go of the ftrace_mutex. This is because 3381 * those pointers can change without the lock. 3382 */ 3383 iter->pg = ftrace_pages_start; 3384 iter->idx = 0; 3385 for (l = 0; l <= *pos; ) { 3386 p = t_func_next(m, &l); 3387 if (!p) 3388 break; 3389 } 3390 3391 if (!p) 3392 return t_mod_start(m, pos); 3393 3394 return iter; 3395 } 3396 3397 static void t_stop(struct seq_file *m, void *p) 3398 { 3399 mutex_unlock(&ftrace_lock); 3400 } 3401 3402 void * __weak 3403 arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec) 3404 { 3405 return NULL; 3406 } 3407 3408 static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops, 3409 struct dyn_ftrace *rec) 3410 { 3411 void *ptr; 3412 3413 ptr = arch_ftrace_trampoline_func(ops, rec); 3414 if (ptr) 3415 seq_printf(m, " ->%pS", ptr); 3416 } 3417 3418 static int t_show(struct seq_file *m, void *v) 3419 { 3420 struct ftrace_iterator *iter = m->private; 3421 struct dyn_ftrace *rec; 3422 3423 if (iter->flags & FTRACE_ITER_PROBE) 3424 return t_probe_show(m, iter); 3425 3426 if (iter->flags & FTRACE_ITER_MOD) 3427 return t_mod_show(m, iter); 3428 3429 if (iter->flags & FTRACE_ITER_PRINTALL) { 3430 if (iter->flags & FTRACE_ITER_NOTRACE) 3431 seq_puts(m, "#### no functions disabled ####\n"); 3432 else 3433 seq_puts(m, "#### all functions enabled ####\n"); 3434 return 0; 3435 } 3436 3437 rec = iter->func; 3438 3439 if (!rec) 3440 return 0; 3441 3442 seq_printf(m, "%ps", (void *)rec->ip); 3443 if (iter->flags & FTRACE_ITER_ENABLED) { 3444 struct ftrace_ops *ops; 3445 3446 seq_printf(m, " (%ld)%s%s", 3447 ftrace_rec_count(rec), 3448 rec->flags & FTRACE_FL_REGS ? " R" : " ", 3449 rec->flags & FTRACE_FL_IPMODIFY ? " I" : " "); 3450 if (rec->flags & FTRACE_FL_TRAMP_EN) { 3451 ops = ftrace_find_tramp_ops_any(rec); 3452 if (ops) { 3453 do { 3454 seq_printf(m, "\ttramp: %pS (%pS)", 3455 (void *)ops->trampoline, 3456 (void *)ops->func); 3457 add_trampoline_func(m, ops, rec); 3458 ops = ftrace_find_tramp_ops_next(rec, ops); 3459 } while (ops); 3460 } else 3461 seq_puts(m, "\ttramp: ERROR!"); 3462 } else { 3463 add_trampoline_func(m, NULL, rec); 3464 } 3465 } 3466 3467 seq_putc(m, '\n'); 3468 3469 return 0; 3470 } 3471 3472 static const struct seq_operations show_ftrace_seq_ops = { 3473 .start = t_start, 3474 .next = t_next, 3475 .stop = t_stop, 3476 .show = t_show, 3477 }; 3478 3479 static int 3480 ftrace_avail_open(struct inode *inode, struct file *file) 3481 { 3482 struct ftrace_iterator *iter; 3483 3484 if (unlikely(ftrace_disabled)) 3485 return -ENODEV; 3486 3487 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); 3488 if (!iter) 3489 return -ENOMEM; 3490 3491 iter->pg = ftrace_pages_start; 3492 iter->ops = &global_ops; 3493 3494 return 0; 3495 } 3496 3497 static int 3498 ftrace_enabled_open(struct inode *inode, struct file *file) 3499 { 3500 struct ftrace_iterator *iter; 3501 3502 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); 3503 if (!iter) 3504 return -ENOMEM; 3505 3506 iter->pg = ftrace_pages_start; 3507 iter->flags = FTRACE_ITER_ENABLED; 3508 iter->ops = &global_ops; 3509 3510 return 0; 3511 } 3512 3513 /** 3514 * ftrace_regex_open - initialize function tracer filter files 3515 * @ops: The ftrace_ops that hold the hash filters 3516 * @flag: The type of filter to process 3517 * @inode: The inode, usually passed in to your open routine 3518 * @file: The file, usually passed in to your open routine 3519 * 3520 * ftrace_regex_open() initializes the filter files for the 3521 * @ops. Depending on @flag it may process the filter hash or 3522 * the notrace hash of @ops. With this called from the open 3523 * routine, you can use ftrace_filter_write() for the write 3524 * routine if @flag has FTRACE_ITER_FILTER set, or 3525 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set. 3526 * tracing_lseek() should be used as the lseek routine, and 3527 * release must call ftrace_regex_release(). 3528 */ 3529 int 3530 ftrace_regex_open(struct ftrace_ops *ops, int flag, 3531 struct inode *inode, struct file *file) 3532 { 3533 struct ftrace_iterator *iter; 3534 struct ftrace_hash *hash; 3535 struct list_head *mod_head; 3536 struct trace_array *tr = ops->private; 3537 int ret = 0; 3538 3539 ftrace_ops_init(ops); 3540 3541 if (unlikely(ftrace_disabled)) 3542 return -ENODEV; 3543 3544 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 3545 if (!iter) 3546 return -ENOMEM; 3547 3548 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) { 3549 kfree(iter); 3550 return -ENOMEM; 3551 } 3552 3553 iter->ops = ops; 3554 iter->flags = flag; 3555 iter->tr = tr; 3556 3557 mutex_lock(&ops->func_hash->regex_lock); 3558 3559 if (flag & FTRACE_ITER_NOTRACE) { 3560 hash = ops->func_hash->notrace_hash; 3561 mod_head = tr ? &tr->mod_notrace : NULL; 3562 } else { 3563 hash = ops->func_hash->filter_hash; 3564 mod_head = tr ? &tr->mod_trace : NULL; 3565 } 3566 3567 iter->mod_list = mod_head; 3568 3569 if (file->f_mode & FMODE_WRITE) { 3570 const int size_bits = FTRACE_HASH_DEFAULT_BITS; 3571 3572 if (file->f_flags & O_TRUNC) { 3573 iter->hash = alloc_ftrace_hash(size_bits); 3574 clear_ftrace_mod_list(mod_head); 3575 } else { 3576 iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash); 3577 } 3578 3579 if (!iter->hash) { 3580 trace_parser_put(&iter->parser); 3581 kfree(iter); 3582 ret = -ENOMEM; 3583 goto out_unlock; 3584 } 3585 } else 3586 iter->hash = hash; 3587 3588 if (file->f_mode & FMODE_READ) { 3589 iter->pg = ftrace_pages_start; 3590 3591 ret = seq_open(file, &show_ftrace_seq_ops); 3592 if (!ret) { 3593 struct seq_file *m = file->private_data; 3594 m->private = iter; 3595 } else { 3596 /* Failed */ 3597 free_ftrace_hash(iter->hash); 3598 trace_parser_put(&iter->parser); 3599 kfree(iter); 3600 } 3601 } else 3602 file->private_data = iter; 3603 3604 out_unlock: 3605 mutex_unlock(&ops->func_hash->regex_lock); 3606 3607 return ret; 3608 } 3609 3610 static int 3611 ftrace_filter_open(struct inode *inode, struct file *file) 3612 { 3613 struct ftrace_ops *ops = inode->i_private; 3614 3615 return ftrace_regex_open(ops, 3616 FTRACE_ITER_FILTER | FTRACE_ITER_DO_PROBES, 3617 inode, file); 3618 } 3619 3620 static int 3621 ftrace_notrace_open(struct inode *inode, struct file *file) 3622 { 3623 struct ftrace_ops *ops = inode->i_private; 3624 3625 return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE, 3626 inode, file); 3627 } 3628 3629 /* Type for quick search ftrace basic regexes (globs) from filter_parse_regex */ 3630 struct ftrace_glob { 3631 char *search; 3632 unsigned len; 3633 int type; 3634 }; 3635 3636 /* 3637 * If symbols in an architecture don't correspond exactly to the user-visible 3638 * name of what they represent, it is possible to define this function to 3639 * perform the necessary adjustments. 3640 */ 3641 char * __weak arch_ftrace_match_adjust(char *str, const char *search) 3642 { 3643 return str; 3644 } 3645 3646 static int ftrace_match(char *str, struct ftrace_glob *g) 3647 { 3648 int matched = 0; 3649 int slen; 3650 3651 str = arch_ftrace_match_adjust(str, g->search); 3652 3653 switch (g->type) { 3654 case MATCH_FULL: 3655 if (strcmp(str, g->search) == 0) 3656 matched = 1; 3657 break; 3658 case MATCH_FRONT_ONLY: 3659 if (strncmp(str, g->search, g->len) == 0) 3660 matched = 1; 3661 break; 3662 case MATCH_MIDDLE_ONLY: 3663 if (strstr(str, g->search)) 3664 matched = 1; 3665 break; 3666 case MATCH_END_ONLY: 3667 slen = strlen(str); 3668 if (slen >= g->len && 3669 memcmp(str + slen - g->len, g->search, g->len) == 0) 3670 matched = 1; 3671 break; 3672 case MATCH_GLOB: 3673 if (glob_match(g->search, str)) 3674 matched = 1; 3675 break; 3676 } 3677 3678 return matched; 3679 } 3680 3681 static int 3682 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter) 3683 { 3684 struct ftrace_func_entry *entry; 3685 int ret = 0; 3686 3687 entry = ftrace_lookup_ip(hash, rec->ip); 3688 if (clear_filter) { 3689 /* Do nothing if it doesn't exist */ 3690 if (!entry) 3691 return 0; 3692 3693 free_hash_entry(hash, entry); 3694 } else { 3695 /* Do nothing if it exists */ 3696 if (entry) 3697 return 0; 3698 3699 ret = add_hash_entry(hash, rec->ip); 3700 } 3701 return ret; 3702 } 3703 3704 static int 3705 ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g, 3706 struct ftrace_glob *mod_g, int exclude_mod) 3707 { 3708 char str[KSYM_SYMBOL_LEN]; 3709 char *modname; 3710 3711 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str); 3712 3713 if (mod_g) { 3714 int mod_matches = (modname) ? ftrace_match(modname, mod_g) : 0; 3715 3716 /* blank module name to match all modules */ 3717 if (!mod_g->len) { 3718 /* blank module globbing: modname xor exclude_mod */ 3719 if (!exclude_mod != !modname) 3720 goto func_match; 3721 return 0; 3722 } 3723 3724 /* 3725 * exclude_mod is set to trace everything but the given 3726 * module. If it is set and the module matches, then 3727 * return 0. If it is not set, and the module doesn't match 3728 * also return 0. Otherwise, check the function to see if 3729 * that matches. 3730 */ 3731 if (!mod_matches == !exclude_mod) 3732 return 0; 3733 func_match: 3734 /* blank search means to match all funcs in the mod */ 3735 if (!func_g->len) 3736 return 1; 3737 } 3738 3739 return ftrace_match(str, func_g); 3740 } 3741 3742 static int 3743 match_records(struct ftrace_hash *hash, char *func, int len, char *mod) 3744 { 3745 struct ftrace_page *pg; 3746 struct dyn_ftrace *rec; 3747 struct ftrace_glob func_g = { .type = MATCH_FULL }; 3748 struct ftrace_glob mod_g = { .type = MATCH_FULL }; 3749 struct ftrace_glob *mod_match = (mod) ? &mod_g : NULL; 3750 int exclude_mod = 0; 3751 int found = 0; 3752 int ret; 3753 int clear_filter = 0; 3754 3755 if (func) { 3756 func_g.type = filter_parse_regex(func, len, &func_g.search, 3757 &clear_filter); 3758 func_g.len = strlen(func_g.search); 3759 } 3760 3761 if (mod) { 3762 mod_g.type = filter_parse_regex(mod, strlen(mod), 3763 &mod_g.search, &exclude_mod); 3764 mod_g.len = strlen(mod_g.search); 3765 } 3766 3767 mutex_lock(&ftrace_lock); 3768 3769 if (unlikely(ftrace_disabled)) 3770 goto out_unlock; 3771 3772 do_for_each_ftrace_rec(pg, rec) { 3773 3774 if (rec->flags & FTRACE_FL_DISABLED) 3775 continue; 3776 3777 if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) { 3778 ret = enter_record(hash, rec, clear_filter); 3779 if (ret < 0) { 3780 found = ret; 3781 goto out_unlock; 3782 } 3783 found = 1; 3784 } 3785 } while_for_each_ftrace_rec(); 3786 out_unlock: 3787 mutex_unlock(&ftrace_lock); 3788 3789 return found; 3790 } 3791 3792 static int 3793 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len) 3794 { 3795 return match_records(hash, buff, len, NULL); 3796 } 3797 3798 static void ftrace_ops_update_code(struct ftrace_ops *ops, 3799 struct ftrace_ops_hash *old_hash) 3800 { 3801 struct ftrace_ops *op; 3802 3803 if (!ftrace_enabled) 3804 return; 3805 3806 if (ops->flags & FTRACE_OPS_FL_ENABLED) { 3807 ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash); 3808 return; 3809 } 3810 3811 /* 3812 * If this is the shared global_ops filter, then we need to 3813 * check if there is another ops that shares it, is enabled. 3814 * If so, we still need to run the modify code. 3815 */ 3816 if (ops->func_hash != &global_ops.local_hash) 3817 return; 3818 3819 do_for_each_ftrace_op(op, ftrace_ops_list) { 3820 if (op->func_hash == &global_ops.local_hash && 3821 op->flags & FTRACE_OPS_FL_ENABLED) { 3822 ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash); 3823 /* Only need to do this once */ 3824 return; 3825 } 3826 } while_for_each_ftrace_op(op); 3827 } 3828 3829 static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops, 3830 struct ftrace_hash **orig_hash, 3831 struct ftrace_hash *hash, 3832 int enable) 3833 { 3834 struct ftrace_ops_hash old_hash_ops; 3835 struct ftrace_hash *old_hash; 3836 int ret; 3837 3838 old_hash = *orig_hash; 3839 old_hash_ops.filter_hash = ops->func_hash->filter_hash; 3840 old_hash_ops.notrace_hash = ops->func_hash->notrace_hash; 3841 ret = ftrace_hash_move(ops, enable, orig_hash, hash); 3842 if (!ret) { 3843 ftrace_ops_update_code(ops, &old_hash_ops); 3844 free_ftrace_hash_rcu(old_hash); 3845 } 3846 return ret; 3847 } 3848 3849 static bool module_exists(const char *module) 3850 { 3851 /* All modules have the symbol __this_module */ 3852 const char this_mod[] = "__this_module"; 3853 char modname[MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 2]; 3854 unsigned long val; 3855 int n; 3856 3857 n = snprintf(modname, sizeof(modname), "%s:%s", module, this_mod); 3858 3859 if (n > sizeof(modname) - 1) 3860 return false; 3861 3862 val = module_kallsyms_lookup_name(modname); 3863 return val != 0; 3864 } 3865 3866 static int cache_mod(struct trace_array *tr, 3867 const char *func, char *module, int enable) 3868 { 3869 struct ftrace_mod_load *ftrace_mod, *n; 3870 struct list_head *head = enable ? &tr->mod_trace : &tr->mod_notrace; 3871 int ret; 3872 3873 mutex_lock(&ftrace_lock); 3874 3875 /* We do not cache inverse filters */ 3876 if (func[0] == '!') { 3877 func++; 3878 ret = -EINVAL; 3879 3880 /* Look to remove this hash */ 3881 list_for_each_entry_safe(ftrace_mod, n, head, list) { 3882 if (strcmp(ftrace_mod->module, module) != 0) 3883 continue; 3884 3885 /* no func matches all */ 3886 if (strcmp(func, "*") == 0 || 3887 (ftrace_mod->func && 3888 strcmp(ftrace_mod->func, func) == 0)) { 3889 ret = 0; 3890 free_ftrace_mod(ftrace_mod); 3891 continue; 3892 } 3893 } 3894 goto out; 3895 } 3896 3897 ret = -EINVAL; 3898 /* We only care about modules that have not been loaded yet */ 3899 if (module_exists(module)) 3900 goto out; 3901 3902 /* Save this string off, and execute it when the module is loaded */ 3903 ret = ftrace_add_mod(tr, func, module, enable); 3904 out: 3905 mutex_unlock(&ftrace_lock); 3906 3907 return ret; 3908 } 3909 3910 static int 3911 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, 3912 int reset, int enable); 3913 3914 #ifdef CONFIG_MODULES 3915 static void process_mod_list(struct list_head *head, struct ftrace_ops *ops, 3916 char *mod, bool enable) 3917 { 3918 struct ftrace_mod_load *ftrace_mod, *n; 3919 struct ftrace_hash **orig_hash, *new_hash; 3920 LIST_HEAD(process_mods); 3921 char *func; 3922 int ret; 3923 3924 mutex_lock(&ops->func_hash->regex_lock); 3925 3926 if (enable) 3927 orig_hash = &ops->func_hash->filter_hash; 3928 else 3929 orig_hash = &ops->func_hash->notrace_hash; 3930 3931 new_hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, 3932 *orig_hash); 3933 if (!new_hash) 3934 goto out; /* warn? */ 3935 3936 mutex_lock(&ftrace_lock); 3937 3938 list_for_each_entry_safe(ftrace_mod, n, head, list) { 3939 3940 if (strcmp(ftrace_mod->module, mod) != 0) 3941 continue; 3942 3943 if (ftrace_mod->func) 3944 func = kstrdup(ftrace_mod->func, GFP_KERNEL); 3945 else 3946 func = kstrdup("*", GFP_KERNEL); 3947 3948 if (!func) /* warn? */ 3949 continue; 3950 3951 list_del(&ftrace_mod->list); 3952 list_add(&ftrace_mod->list, &process_mods); 3953 3954 /* Use the newly allocated func, as it may be "*" */ 3955 kfree(ftrace_mod->func); 3956 ftrace_mod->func = func; 3957 } 3958 3959 mutex_unlock(&ftrace_lock); 3960 3961 list_for_each_entry_safe(ftrace_mod, n, &process_mods, list) { 3962 3963 func = ftrace_mod->func; 3964 3965 /* Grabs ftrace_lock, which is why we have this extra step */ 3966 match_records(new_hash, func, strlen(func), mod); 3967 free_ftrace_mod(ftrace_mod); 3968 } 3969 3970 if (enable && list_empty(head)) 3971 new_hash->flags &= ~FTRACE_HASH_FL_MOD; 3972 3973 mutex_lock(&ftrace_lock); 3974 3975 ret = ftrace_hash_move_and_update_ops(ops, orig_hash, 3976 new_hash, enable); 3977 mutex_unlock(&ftrace_lock); 3978 3979 out: 3980 mutex_unlock(&ops->func_hash->regex_lock); 3981 3982 free_ftrace_hash(new_hash); 3983 } 3984 3985 static void process_cached_mods(const char *mod_name) 3986 { 3987 struct trace_array *tr; 3988 char *mod; 3989 3990 mod = kstrdup(mod_name, GFP_KERNEL); 3991 if (!mod) 3992 return; 3993 3994 mutex_lock(&trace_types_lock); 3995 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 3996 if (!list_empty(&tr->mod_trace)) 3997 process_mod_list(&tr->mod_trace, tr->ops, mod, true); 3998 if (!list_empty(&tr->mod_notrace)) 3999 process_mod_list(&tr->mod_notrace, tr->ops, mod, false); 4000 } 4001 mutex_unlock(&trace_types_lock); 4002 4003 kfree(mod); 4004 } 4005 #endif 4006 4007 /* 4008 * We register the module command as a template to show others how 4009 * to register the a command as well. 4010 */ 4011 4012 static int 4013 ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash, 4014 char *func_orig, char *cmd, char *module, int enable) 4015 { 4016 char *func; 4017 int ret; 4018 4019 /* match_records() modifies func, and we need the original */ 4020 func = kstrdup(func_orig, GFP_KERNEL); 4021 if (!func) 4022 return -ENOMEM; 4023 4024 /* 4025 * cmd == 'mod' because we only registered this func 4026 * for the 'mod' ftrace_func_command. 4027 * But if you register one func with multiple commands, 4028 * you can tell which command was used by the cmd 4029 * parameter. 4030 */ 4031 ret = match_records(hash, func, strlen(func), module); 4032 kfree(func); 4033 4034 if (!ret) 4035 return cache_mod(tr, func_orig, module, enable); 4036 if (ret < 0) 4037 return ret; 4038 return 0; 4039 } 4040 4041 static struct ftrace_func_command ftrace_mod_cmd = { 4042 .name = "mod", 4043 .func = ftrace_mod_callback, 4044 }; 4045 4046 static int __init ftrace_mod_cmd_init(void) 4047 { 4048 return register_ftrace_command(&ftrace_mod_cmd); 4049 } 4050 core_initcall(ftrace_mod_cmd_init); 4051 4052 static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip, 4053 struct ftrace_ops *op, struct pt_regs *pt_regs) 4054 { 4055 struct ftrace_probe_ops *probe_ops; 4056 struct ftrace_func_probe *probe; 4057 4058 probe = container_of(op, struct ftrace_func_probe, ops); 4059 probe_ops = probe->probe_ops; 4060 4061 /* 4062 * Disable preemption for these calls to prevent a RCU grace 4063 * period. This syncs the hash iteration and freeing of items 4064 * on the hash. rcu_read_lock is too dangerous here. 4065 */ 4066 preempt_disable_notrace(); 4067 probe_ops->func(ip, parent_ip, probe->tr, probe_ops, probe->data); 4068 preempt_enable_notrace(); 4069 } 4070 4071 struct ftrace_func_map { 4072 struct ftrace_func_entry entry; 4073 void *data; 4074 }; 4075 4076 struct ftrace_func_mapper { 4077 struct ftrace_hash hash; 4078 }; 4079 4080 /** 4081 * allocate_ftrace_func_mapper - allocate a new ftrace_func_mapper 4082 * 4083 * Returns a ftrace_func_mapper descriptor that can be used to map ips to data. 4084 */ 4085 struct ftrace_func_mapper *allocate_ftrace_func_mapper(void) 4086 { 4087 struct ftrace_hash *hash; 4088 4089 /* 4090 * The mapper is simply a ftrace_hash, but since the entries 4091 * in the hash are not ftrace_func_entry type, we define it 4092 * as a separate structure. 4093 */ 4094 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); 4095 return (struct ftrace_func_mapper *)hash; 4096 } 4097 4098 /** 4099 * ftrace_func_mapper_find_ip - Find some data mapped to an ip 4100 * @mapper: The mapper that has the ip maps 4101 * @ip: the instruction pointer to find the data for 4102 * 4103 * Returns the data mapped to @ip if found otherwise NULL. The return 4104 * is actually the address of the mapper data pointer. The address is 4105 * returned for use cases where the data is no bigger than a long, and 4106 * the user can use the data pointer as its data instead of having to 4107 * allocate more memory for the reference. 4108 */ 4109 void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper, 4110 unsigned long ip) 4111 { 4112 struct ftrace_func_entry *entry; 4113 struct ftrace_func_map *map; 4114 4115 entry = ftrace_lookup_ip(&mapper->hash, ip); 4116 if (!entry) 4117 return NULL; 4118 4119 map = (struct ftrace_func_map *)entry; 4120 return &map->data; 4121 } 4122 4123 /** 4124 * ftrace_func_mapper_add_ip - Map some data to an ip 4125 * @mapper: The mapper that has the ip maps 4126 * @ip: The instruction pointer address to map @data to 4127 * @data: The data to map to @ip 4128 * 4129 * Returns 0 on succes otherwise an error. 4130 */ 4131 int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper, 4132 unsigned long ip, void *data) 4133 { 4134 struct ftrace_func_entry *entry; 4135 struct ftrace_func_map *map; 4136 4137 entry = ftrace_lookup_ip(&mapper->hash, ip); 4138 if (entry) 4139 return -EBUSY; 4140 4141 map = kmalloc(sizeof(*map), GFP_KERNEL); 4142 if (!map) 4143 return -ENOMEM; 4144 4145 map->entry.ip = ip; 4146 map->data = data; 4147 4148 __add_hash_entry(&mapper->hash, &map->entry); 4149 4150 return 0; 4151 } 4152 4153 /** 4154 * ftrace_func_mapper_remove_ip - Remove an ip from the mapping 4155 * @mapper: The mapper that has the ip maps 4156 * @ip: The instruction pointer address to remove the data from 4157 * 4158 * Returns the data if it is found, otherwise NULL. 4159 * Note, if the data pointer is used as the data itself, (see 4160 * ftrace_func_mapper_find_ip(), then the return value may be meaningless, 4161 * if the data pointer was set to zero. 4162 */ 4163 void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper, 4164 unsigned long ip) 4165 { 4166 struct ftrace_func_entry *entry; 4167 struct ftrace_func_map *map; 4168 void *data; 4169 4170 entry = ftrace_lookup_ip(&mapper->hash, ip); 4171 if (!entry) 4172 return NULL; 4173 4174 map = (struct ftrace_func_map *)entry; 4175 data = map->data; 4176 4177 remove_hash_entry(&mapper->hash, entry); 4178 kfree(entry); 4179 4180 return data; 4181 } 4182 4183 /** 4184 * free_ftrace_func_mapper - free a mapping of ips and data 4185 * @mapper: The mapper that has the ip maps 4186 * @free_func: A function to be called on each data item. 4187 * 4188 * This is used to free the function mapper. The @free_func is optional 4189 * and can be used if the data needs to be freed as well. 4190 */ 4191 void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper, 4192 ftrace_mapper_func free_func) 4193 { 4194 struct ftrace_func_entry *entry; 4195 struct ftrace_func_map *map; 4196 struct hlist_head *hhd; 4197 int size = 1 << mapper->hash.size_bits; 4198 int i; 4199 4200 if (free_func && mapper->hash.count) { 4201 for (i = 0; i < size; i++) { 4202 hhd = &mapper->hash.buckets[i]; 4203 hlist_for_each_entry(entry, hhd, hlist) { 4204 map = (struct ftrace_func_map *)entry; 4205 free_func(map); 4206 } 4207 } 4208 } 4209 free_ftrace_hash(&mapper->hash); 4210 } 4211 4212 static void release_probe(struct ftrace_func_probe *probe) 4213 { 4214 struct ftrace_probe_ops *probe_ops; 4215 4216 mutex_lock(&ftrace_lock); 4217 4218 WARN_ON(probe->ref <= 0); 4219 4220 /* Subtract the ref that was used to protect this instance */ 4221 probe->ref--; 4222 4223 if (!probe->ref) { 4224 probe_ops = probe->probe_ops; 4225 /* 4226 * Sending zero as ip tells probe_ops to free 4227 * the probe->data itself 4228 */ 4229 if (probe_ops->free) 4230 probe_ops->free(probe_ops, probe->tr, 0, probe->data); 4231 list_del(&probe->list); 4232 kfree(probe); 4233 } 4234 mutex_unlock(&ftrace_lock); 4235 } 4236 4237 static void acquire_probe_locked(struct ftrace_func_probe *probe) 4238 { 4239 /* 4240 * Add one ref to keep it from being freed when releasing the 4241 * ftrace_lock mutex. 4242 */ 4243 probe->ref++; 4244 } 4245 4246 int 4247 register_ftrace_function_probe(char *glob, struct trace_array *tr, 4248 struct ftrace_probe_ops *probe_ops, 4249 void *data) 4250 { 4251 struct ftrace_func_entry *entry; 4252 struct ftrace_func_probe *probe; 4253 struct ftrace_hash **orig_hash; 4254 struct ftrace_hash *old_hash; 4255 struct ftrace_hash *hash; 4256 int count = 0; 4257 int size; 4258 int ret; 4259 int i; 4260 4261 if (WARN_ON(!tr)) 4262 return -EINVAL; 4263 4264 /* We do not support '!' for function probes */ 4265 if (WARN_ON(glob[0] == '!')) 4266 return -EINVAL; 4267 4268 4269 mutex_lock(&ftrace_lock); 4270 /* Check if the probe_ops is already registered */ 4271 list_for_each_entry(probe, &tr->func_probes, list) { 4272 if (probe->probe_ops == probe_ops) 4273 break; 4274 } 4275 if (&probe->list == &tr->func_probes) { 4276 probe = kzalloc(sizeof(*probe), GFP_KERNEL); 4277 if (!probe) { 4278 mutex_unlock(&ftrace_lock); 4279 return -ENOMEM; 4280 } 4281 probe->probe_ops = probe_ops; 4282 probe->ops.func = function_trace_probe_call; 4283 probe->tr = tr; 4284 ftrace_ops_init(&probe->ops); 4285 list_add(&probe->list, &tr->func_probes); 4286 } 4287 4288 acquire_probe_locked(probe); 4289 4290 mutex_unlock(&ftrace_lock); 4291 4292 mutex_lock(&probe->ops.func_hash->regex_lock); 4293 4294 orig_hash = &probe->ops.func_hash->filter_hash; 4295 old_hash = *orig_hash; 4296 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash); 4297 4298 ret = ftrace_match_records(hash, glob, strlen(glob)); 4299 4300 /* Nothing found? */ 4301 if (!ret) 4302 ret = -EINVAL; 4303 4304 if (ret < 0) 4305 goto out; 4306 4307 size = 1 << hash->size_bits; 4308 for (i = 0; i < size; i++) { 4309 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { 4310 if (ftrace_lookup_ip(old_hash, entry->ip)) 4311 continue; 4312 /* 4313 * The caller might want to do something special 4314 * for each function we find. We call the callback 4315 * to give the caller an opportunity to do so. 4316 */ 4317 if (probe_ops->init) { 4318 ret = probe_ops->init(probe_ops, tr, 4319 entry->ip, data, 4320 &probe->data); 4321 if (ret < 0) { 4322 if (probe_ops->free && count) 4323 probe_ops->free(probe_ops, tr, 4324 0, probe->data); 4325 probe->data = NULL; 4326 goto out; 4327 } 4328 } 4329 count++; 4330 } 4331 } 4332 4333 mutex_lock(&ftrace_lock); 4334 4335 if (!count) { 4336 /* Nothing was added? */ 4337 ret = -EINVAL; 4338 goto out_unlock; 4339 } 4340 4341 ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash, 4342 hash, 1); 4343 if (ret < 0) 4344 goto err_unlock; 4345 4346 /* One ref for each new function traced */ 4347 probe->ref += count; 4348 4349 if (!(probe->ops.flags & FTRACE_OPS_FL_ENABLED)) 4350 ret = ftrace_startup(&probe->ops, 0); 4351 4352 out_unlock: 4353 mutex_unlock(&ftrace_lock); 4354 4355 if (!ret) 4356 ret = count; 4357 out: 4358 mutex_unlock(&probe->ops.func_hash->regex_lock); 4359 free_ftrace_hash(hash); 4360 4361 release_probe(probe); 4362 4363 return ret; 4364 4365 err_unlock: 4366 if (!probe_ops->free || !count) 4367 goto out_unlock; 4368 4369 /* Failed to do the move, need to call the free functions */ 4370 for (i = 0; i < size; i++) { 4371 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { 4372 if (ftrace_lookup_ip(old_hash, entry->ip)) 4373 continue; 4374 probe_ops->free(probe_ops, tr, entry->ip, probe->data); 4375 } 4376 } 4377 goto out_unlock; 4378 } 4379 4380 int 4381 unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr, 4382 struct ftrace_probe_ops *probe_ops) 4383 { 4384 struct ftrace_ops_hash old_hash_ops; 4385 struct ftrace_func_entry *entry; 4386 struct ftrace_func_probe *probe; 4387 struct ftrace_glob func_g; 4388 struct ftrace_hash **orig_hash; 4389 struct ftrace_hash *old_hash; 4390 struct ftrace_hash *hash = NULL; 4391 struct hlist_node *tmp; 4392 struct hlist_head hhd; 4393 char str[KSYM_SYMBOL_LEN]; 4394 int count = 0; 4395 int i, ret = -ENODEV; 4396 int size; 4397 4398 if (!glob || !strlen(glob) || !strcmp(glob, "*")) 4399 func_g.search = NULL; 4400 else { 4401 int not; 4402 4403 func_g.type = filter_parse_regex(glob, strlen(glob), 4404 &func_g.search, ¬); 4405 func_g.len = strlen(func_g.search); 4406 4407 /* we do not support '!' for function probes */ 4408 if (WARN_ON(not)) 4409 return -EINVAL; 4410 } 4411 4412 mutex_lock(&ftrace_lock); 4413 /* Check if the probe_ops is already registered */ 4414 list_for_each_entry(probe, &tr->func_probes, list) { 4415 if (probe->probe_ops == probe_ops) 4416 break; 4417 } 4418 if (&probe->list == &tr->func_probes) 4419 goto err_unlock_ftrace; 4420 4421 ret = -EINVAL; 4422 if (!(probe->ops.flags & FTRACE_OPS_FL_INITIALIZED)) 4423 goto err_unlock_ftrace; 4424 4425 acquire_probe_locked(probe); 4426 4427 mutex_unlock(&ftrace_lock); 4428 4429 mutex_lock(&probe->ops.func_hash->regex_lock); 4430 4431 orig_hash = &probe->ops.func_hash->filter_hash; 4432 old_hash = *orig_hash; 4433 4434 if (ftrace_hash_empty(old_hash)) 4435 goto out_unlock; 4436 4437 old_hash_ops.filter_hash = old_hash; 4438 /* Probes only have filters */ 4439 old_hash_ops.notrace_hash = NULL; 4440 4441 ret = -ENOMEM; 4442 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash); 4443 if (!hash) 4444 goto out_unlock; 4445 4446 INIT_HLIST_HEAD(&hhd); 4447 4448 size = 1 << hash->size_bits; 4449 for (i = 0; i < size; i++) { 4450 hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) { 4451 4452 if (func_g.search) { 4453 kallsyms_lookup(entry->ip, NULL, NULL, 4454 NULL, str); 4455 if (!ftrace_match(str, &func_g)) 4456 continue; 4457 } 4458 count++; 4459 remove_hash_entry(hash, entry); 4460 hlist_add_head(&entry->hlist, &hhd); 4461 } 4462 } 4463 4464 /* Nothing found? */ 4465 if (!count) { 4466 ret = -EINVAL; 4467 goto out_unlock; 4468 } 4469 4470 mutex_lock(&ftrace_lock); 4471 4472 WARN_ON(probe->ref < count); 4473 4474 probe->ref -= count; 4475 4476 if (ftrace_hash_empty(hash)) 4477 ftrace_shutdown(&probe->ops, 0); 4478 4479 ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash, 4480 hash, 1); 4481 4482 /* still need to update the function call sites */ 4483 if (ftrace_enabled && !ftrace_hash_empty(hash)) 4484 ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS, 4485 &old_hash_ops); 4486 synchronize_rcu(); 4487 4488 hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) { 4489 hlist_del(&entry->hlist); 4490 if (probe_ops->free) 4491 probe_ops->free(probe_ops, tr, entry->ip, probe->data); 4492 kfree(entry); 4493 } 4494 mutex_unlock(&ftrace_lock); 4495 4496 out_unlock: 4497 mutex_unlock(&probe->ops.func_hash->regex_lock); 4498 free_ftrace_hash(hash); 4499 4500 release_probe(probe); 4501 4502 return ret; 4503 4504 err_unlock_ftrace: 4505 mutex_unlock(&ftrace_lock); 4506 return ret; 4507 } 4508 4509 void clear_ftrace_function_probes(struct trace_array *tr) 4510 { 4511 struct ftrace_func_probe *probe, *n; 4512 4513 list_for_each_entry_safe(probe, n, &tr->func_probes, list) 4514 unregister_ftrace_function_probe_func(NULL, tr, probe->probe_ops); 4515 } 4516 4517 static LIST_HEAD(ftrace_commands); 4518 static DEFINE_MUTEX(ftrace_cmd_mutex); 4519 4520 /* 4521 * Currently we only register ftrace commands from __init, so mark this 4522 * __init too. 4523 */ 4524 __init int register_ftrace_command(struct ftrace_func_command *cmd) 4525 { 4526 struct ftrace_func_command *p; 4527 int ret = 0; 4528 4529 mutex_lock(&ftrace_cmd_mutex); 4530 list_for_each_entry(p, &ftrace_commands, list) { 4531 if (strcmp(cmd->name, p->name) == 0) { 4532 ret = -EBUSY; 4533 goto out_unlock; 4534 } 4535 } 4536 list_add(&cmd->list, &ftrace_commands); 4537 out_unlock: 4538 mutex_unlock(&ftrace_cmd_mutex); 4539 4540 return ret; 4541 } 4542 4543 /* 4544 * Currently we only unregister ftrace commands from __init, so mark 4545 * this __init too. 4546 */ 4547 __init int unregister_ftrace_command(struct ftrace_func_command *cmd) 4548 { 4549 struct ftrace_func_command *p, *n; 4550 int ret = -ENODEV; 4551 4552 mutex_lock(&ftrace_cmd_mutex); 4553 list_for_each_entry_safe(p, n, &ftrace_commands, list) { 4554 if (strcmp(cmd->name, p->name) == 0) { 4555 ret = 0; 4556 list_del_init(&p->list); 4557 goto out_unlock; 4558 } 4559 } 4560 out_unlock: 4561 mutex_unlock(&ftrace_cmd_mutex); 4562 4563 return ret; 4564 } 4565 4566 static int ftrace_process_regex(struct ftrace_iterator *iter, 4567 char *buff, int len, int enable) 4568 { 4569 struct ftrace_hash *hash = iter->hash; 4570 struct trace_array *tr = iter->ops->private; 4571 char *func, *command, *next = buff; 4572 struct ftrace_func_command *p; 4573 int ret = -EINVAL; 4574 4575 func = strsep(&next, ":"); 4576 4577 if (!next) { 4578 ret = ftrace_match_records(hash, func, len); 4579 if (!ret) 4580 ret = -EINVAL; 4581 if (ret < 0) 4582 return ret; 4583 return 0; 4584 } 4585 4586 /* command found */ 4587 4588 command = strsep(&next, ":"); 4589 4590 mutex_lock(&ftrace_cmd_mutex); 4591 list_for_each_entry(p, &ftrace_commands, list) { 4592 if (strcmp(p->name, command) == 0) { 4593 ret = p->func(tr, hash, func, command, next, enable); 4594 goto out_unlock; 4595 } 4596 } 4597 out_unlock: 4598 mutex_unlock(&ftrace_cmd_mutex); 4599 4600 return ret; 4601 } 4602 4603 static ssize_t 4604 ftrace_regex_write(struct file *file, const char __user *ubuf, 4605 size_t cnt, loff_t *ppos, int enable) 4606 { 4607 struct ftrace_iterator *iter; 4608 struct trace_parser *parser; 4609 ssize_t ret, read; 4610 4611 if (!cnt) 4612 return 0; 4613 4614 if (file->f_mode & FMODE_READ) { 4615 struct seq_file *m = file->private_data; 4616 iter = m->private; 4617 } else 4618 iter = file->private_data; 4619 4620 if (unlikely(ftrace_disabled)) 4621 return -ENODEV; 4622 4623 /* iter->hash is a local copy, so we don't need regex_lock */ 4624 4625 parser = &iter->parser; 4626 read = trace_get_user(parser, ubuf, cnt, ppos); 4627 4628 if (read >= 0 && trace_parser_loaded(parser) && 4629 !trace_parser_cont(parser)) { 4630 ret = ftrace_process_regex(iter, parser->buffer, 4631 parser->idx, enable); 4632 trace_parser_clear(parser); 4633 if (ret < 0) 4634 goto out; 4635 } 4636 4637 ret = read; 4638 out: 4639 return ret; 4640 } 4641 4642 ssize_t 4643 ftrace_filter_write(struct file *file, const char __user *ubuf, 4644 size_t cnt, loff_t *ppos) 4645 { 4646 return ftrace_regex_write(file, ubuf, cnt, ppos, 1); 4647 } 4648 4649 ssize_t 4650 ftrace_notrace_write(struct file *file, const char __user *ubuf, 4651 size_t cnt, loff_t *ppos) 4652 { 4653 return ftrace_regex_write(file, ubuf, cnt, ppos, 0); 4654 } 4655 4656 static int 4657 ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove) 4658 { 4659 struct ftrace_func_entry *entry; 4660 4661 if (!ftrace_location(ip)) 4662 return -EINVAL; 4663 4664 if (remove) { 4665 entry = ftrace_lookup_ip(hash, ip); 4666 if (!entry) 4667 return -ENOENT; 4668 free_hash_entry(hash, entry); 4669 return 0; 4670 } 4671 4672 return add_hash_entry(hash, ip); 4673 } 4674 4675 static int 4676 ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, 4677 unsigned long ip, int remove, int reset, int enable) 4678 { 4679 struct ftrace_hash **orig_hash; 4680 struct ftrace_hash *hash; 4681 int ret; 4682 4683 if (unlikely(ftrace_disabled)) 4684 return -ENODEV; 4685 4686 mutex_lock(&ops->func_hash->regex_lock); 4687 4688 if (enable) 4689 orig_hash = &ops->func_hash->filter_hash; 4690 else 4691 orig_hash = &ops->func_hash->notrace_hash; 4692 4693 if (reset) 4694 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); 4695 else 4696 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); 4697 4698 if (!hash) { 4699 ret = -ENOMEM; 4700 goto out_regex_unlock; 4701 } 4702 4703 if (buf && !ftrace_match_records(hash, buf, len)) { 4704 ret = -EINVAL; 4705 goto out_regex_unlock; 4706 } 4707 if (ip) { 4708 ret = ftrace_match_addr(hash, ip, remove); 4709 if (ret < 0) 4710 goto out_regex_unlock; 4711 } 4712 4713 mutex_lock(&ftrace_lock); 4714 ret = ftrace_hash_move_and_update_ops(ops, orig_hash, hash, enable); 4715 mutex_unlock(&ftrace_lock); 4716 4717 out_regex_unlock: 4718 mutex_unlock(&ops->func_hash->regex_lock); 4719 4720 free_ftrace_hash(hash); 4721 return ret; 4722 } 4723 4724 static int 4725 ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove, 4726 int reset, int enable) 4727 { 4728 return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable); 4729 } 4730 4731 /** 4732 * ftrace_set_filter_ip - set a function to filter on in ftrace by address 4733 * @ops - the ops to set the filter with 4734 * @ip - the address to add to or remove from the filter. 4735 * @remove - non zero to remove the ip from the filter 4736 * @reset - non zero to reset all filters before applying this filter. 4737 * 4738 * Filters denote which functions should be enabled when tracing is enabled 4739 * If @ip is NULL, it failes to update filter. 4740 */ 4741 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, 4742 int remove, int reset) 4743 { 4744 ftrace_ops_init(ops); 4745 return ftrace_set_addr(ops, ip, remove, reset, 1); 4746 } 4747 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip); 4748 4749 /** 4750 * ftrace_ops_set_global_filter - setup ops to use global filters 4751 * @ops - the ops which will use the global filters 4752 * 4753 * ftrace users who need global function trace filtering should call this. 4754 * It can set the global filter only if ops were not initialized before. 4755 */ 4756 void ftrace_ops_set_global_filter(struct ftrace_ops *ops) 4757 { 4758 if (ops->flags & FTRACE_OPS_FL_INITIALIZED) 4759 return; 4760 4761 ftrace_ops_init(ops); 4762 ops->func_hash = &global_ops.local_hash; 4763 } 4764 EXPORT_SYMBOL_GPL(ftrace_ops_set_global_filter); 4765 4766 static int 4767 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, 4768 int reset, int enable) 4769 { 4770 return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable); 4771 } 4772 4773 /** 4774 * ftrace_set_filter - set a function to filter on in ftrace 4775 * @ops - the ops to set the filter with 4776 * @buf - the string that holds the function filter text. 4777 * @len - the length of the string. 4778 * @reset - non zero to reset all filters before applying this filter. 4779 * 4780 * Filters denote which functions should be enabled when tracing is enabled. 4781 * If @buf is NULL and reset is set, all functions will be enabled for tracing. 4782 */ 4783 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, 4784 int len, int reset) 4785 { 4786 ftrace_ops_init(ops); 4787 return ftrace_set_regex(ops, buf, len, reset, 1); 4788 } 4789 EXPORT_SYMBOL_GPL(ftrace_set_filter); 4790 4791 /** 4792 * ftrace_set_notrace - set a function to not trace in ftrace 4793 * @ops - the ops to set the notrace filter with 4794 * @buf - the string that holds the function notrace text. 4795 * @len - the length of the string. 4796 * @reset - non zero to reset all filters before applying this filter. 4797 * 4798 * Notrace Filters denote which functions should not be enabled when tracing 4799 * is enabled. If @buf is NULL and reset is set, all functions will be enabled 4800 * for tracing. 4801 */ 4802 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, 4803 int len, int reset) 4804 { 4805 ftrace_ops_init(ops); 4806 return ftrace_set_regex(ops, buf, len, reset, 0); 4807 } 4808 EXPORT_SYMBOL_GPL(ftrace_set_notrace); 4809 /** 4810 * ftrace_set_global_filter - set a function to filter on with global tracers 4811 * @buf - the string that holds the function filter text. 4812 * @len - the length of the string. 4813 * @reset - non zero to reset all filters before applying this filter. 4814 * 4815 * Filters denote which functions should be enabled when tracing is enabled. 4816 * If @buf is NULL and reset is set, all functions will be enabled for tracing. 4817 */ 4818 void ftrace_set_global_filter(unsigned char *buf, int len, int reset) 4819 { 4820 ftrace_set_regex(&global_ops, buf, len, reset, 1); 4821 } 4822 EXPORT_SYMBOL_GPL(ftrace_set_global_filter); 4823 4824 /** 4825 * ftrace_set_global_notrace - set a function to not trace with global tracers 4826 * @buf - the string that holds the function notrace text. 4827 * @len - the length of the string. 4828 * @reset - non zero to reset all filters before applying this filter. 4829 * 4830 * Notrace Filters denote which functions should not be enabled when tracing 4831 * is enabled. If @buf is NULL and reset is set, all functions will be enabled 4832 * for tracing. 4833 */ 4834 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset) 4835 { 4836 ftrace_set_regex(&global_ops, buf, len, reset, 0); 4837 } 4838 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace); 4839 4840 /* 4841 * command line interface to allow users to set filters on boot up. 4842 */ 4843 #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE 4844 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata; 4845 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata; 4846 4847 /* Used by function selftest to not test if filter is set */ 4848 bool ftrace_filter_param __initdata; 4849 4850 static int __init set_ftrace_notrace(char *str) 4851 { 4852 ftrace_filter_param = true; 4853 strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE); 4854 return 1; 4855 } 4856 __setup("ftrace_notrace=", set_ftrace_notrace); 4857 4858 static int __init set_ftrace_filter(char *str) 4859 { 4860 ftrace_filter_param = true; 4861 strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE); 4862 return 1; 4863 } 4864 __setup("ftrace_filter=", set_ftrace_filter); 4865 4866 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 4867 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata; 4868 static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata; 4869 static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer); 4870 4871 static int __init set_graph_function(char *str) 4872 { 4873 strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE); 4874 return 1; 4875 } 4876 __setup("ftrace_graph_filter=", set_graph_function); 4877 4878 static int __init set_graph_notrace_function(char *str) 4879 { 4880 strlcpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE); 4881 return 1; 4882 } 4883 __setup("ftrace_graph_notrace=", set_graph_notrace_function); 4884 4885 static int __init set_graph_max_depth_function(char *str) 4886 { 4887 if (!str) 4888 return 0; 4889 fgraph_max_depth = simple_strtoul(str, NULL, 0); 4890 return 1; 4891 } 4892 __setup("ftrace_graph_max_depth=", set_graph_max_depth_function); 4893 4894 static void __init set_ftrace_early_graph(char *buf, int enable) 4895 { 4896 int ret; 4897 char *func; 4898 struct ftrace_hash *hash; 4899 4900 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); 4901 if (WARN_ON(!hash)) 4902 return; 4903 4904 while (buf) { 4905 func = strsep(&buf, ","); 4906 /* we allow only one expression at a time */ 4907 ret = ftrace_graph_set_hash(hash, func); 4908 if (ret) 4909 printk(KERN_DEBUG "ftrace: function %s not " 4910 "traceable\n", func); 4911 } 4912 4913 if (enable) 4914 ftrace_graph_hash = hash; 4915 else 4916 ftrace_graph_notrace_hash = hash; 4917 } 4918 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 4919 4920 void __init 4921 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable) 4922 { 4923 char *func; 4924 4925 ftrace_ops_init(ops); 4926 4927 while (buf) { 4928 func = strsep(&buf, ","); 4929 ftrace_set_regex(ops, func, strlen(func), 0, enable); 4930 } 4931 } 4932 4933 static void __init set_ftrace_early_filters(void) 4934 { 4935 if (ftrace_filter_buf[0]) 4936 ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1); 4937 if (ftrace_notrace_buf[0]) 4938 ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0); 4939 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 4940 if (ftrace_graph_buf[0]) 4941 set_ftrace_early_graph(ftrace_graph_buf, 1); 4942 if (ftrace_graph_notrace_buf[0]) 4943 set_ftrace_early_graph(ftrace_graph_notrace_buf, 0); 4944 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 4945 } 4946 4947 int ftrace_regex_release(struct inode *inode, struct file *file) 4948 { 4949 struct seq_file *m = (struct seq_file *)file->private_data; 4950 struct ftrace_iterator *iter; 4951 struct ftrace_hash **orig_hash; 4952 struct trace_parser *parser; 4953 int filter_hash; 4954 int ret; 4955 4956 if (file->f_mode & FMODE_READ) { 4957 iter = m->private; 4958 seq_release(inode, file); 4959 } else 4960 iter = file->private_data; 4961 4962 parser = &iter->parser; 4963 if (trace_parser_loaded(parser)) { 4964 ftrace_match_records(iter->hash, parser->buffer, parser->idx); 4965 } 4966 4967 trace_parser_put(parser); 4968 4969 mutex_lock(&iter->ops->func_hash->regex_lock); 4970 4971 if (file->f_mode & FMODE_WRITE) { 4972 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER); 4973 4974 if (filter_hash) { 4975 orig_hash = &iter->ops->func_hash->filter_hash; 4976 if (iter->tr && !list_empty(&iter->tr->mod_trace)) 4977 iter->hash->flags |= FTRACE_HASH_FL_MOD; 4978 } else 4979 orig_hash = &iter->ops->func_hash->notrace_hash; 4980 4981 mutex_lock(&ftrace_lock); 4982 ret = ftrace_hash_move_and_update_ops(iter->ops, orig_hash, 4983 iter->hash, filter_hash); 4984 mutex_unlock(&ftrace_lock); 4985 } else { 4986 /* For read only, the hash is the ops hash */ 4987 iter->hash = NULL; 4988 } 4989 4990 mutex_unlock(&iter->ops->func_hash->regex_lock); 4991 free_ftrace_hash(iter->hash); 4992 kfree(iter); 4993 4994 return 0; 4995 } 4996 4997 static const struct file_operations ftrace_avail_fops = { 4998 .open = ftrace_avail_open, 4999 .read = seq_read, 5000 .llseek = seq_lseek, 5001 .release = seq_release_private, 5002 }; 5003 5004 static const struct file_operations ftrace_enabled_fops = { 5005 .open = ftrace_enabled_open, 5006 .read = seq_read, 5007 .llseek = seq_lseek, 5008 .release = seq_release_private, 5009 }; 5010 5011 static const struct file_operations ftrace_filter_fops = { 5012 .open = ftrace_filter_open, 5013 .read = seq_read, 5014 .write = ftrace_filter_write, 5015 .llseek = tracing_lseek, 5016 .release = ftrace_regex_release, 5017 }; 5018 5019 static const struct file_operations ftrace_notrace_fops = { 5020 .open = ftrace_notrace_open, 5021 .read = seq_read, 5022 .write = ftrace_notrace_write, 5023 .llseek = tracing_lseek, 5024 .release = ftrace_regex_release, 5025 }; 5026 5027 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 5028 5029 static DEFINE_MUTEX(graph_lock); 5030 5031 struct ftrace_hash *ftrace_graph_hash = EMPTY_HASH; 5032 struct ftrace_hash *ftrace_graph_notrace_hash = EMPTY_HASH; 5033 5034 enum graph_filter_type { 5035 GRAPH_FILTER_NOTRACE = 0, 5036 GRAPH_FILTER_FUNCTION, 5037 }; 5038 5039 #define FTRACE_GRAPH_EMPTY ((void *)1) 5040 5041 struct ftrace_graph_data { 5042 struct ftrace_hash *hash; 5043 struct ftrace_func_entry *entry; 5044 int idx; /* for hash table iteration */ 5045 enum graph_filter_type type; 5046 struct ftrace_hash *new_hash; 5047 const struct seq_operations *seq_ops; 5048 struct trace_parser parser; 5049 }; 5050 5051 static void * 5052 __g_next(struct seq_file *m, loff_t *pos) 5053 { 5054 struct ftrace_graph_data *fgd = m->private; 5055 struct ftrace_func_entry *entry = fgd->entry; 5056 struct hlist_head *head; 5057 int i, idx = fgd->idx; 5058 5059 if (*pos >= fgd->hash->count) 5060 return NULL; 5061 5062 if (entry) { 5063 hlist_for_each_entry_continue(entry, hlist) { 5064 fgd->entry = entry; 5065 return entry; 5066 } 5067 5068 idx++; 5069 } 5070 5071 for (i = idx; i < 1 << fgd->hash->size_bits; i++) { 5072 head = &fgd->hash->buckets[i]; 5073 hlist_for_each_entry(entry, head, hlist) { 5074 fgd->entry = entry; 5075 fgd->idx = i; 5076 return entry; 5077 } 5078 } 5079 return NULL; 5080 } 5081 5082 static void * 5083 g_next(struct seq_file *m, void *v, loff_t *pos) 5084 { 5085 (*pos)++; 5086 return __g_next(m, pos); 5087 } 5088 5089 static void *g_start(struct seq_file *m, loff_t *pos) 5090 { 5091 struct ftrace_graph_data *fgd = m->private; 5092 5093 mutex_lock(&graph_lock); 5094 5095 if (fgd->type == GRAPH_FILTER_FUNCTION) 5096 fgd->hash = rcu_dereference_protected(ftrace_graph_hash, 5097 lockdep_is_held(&graph_lock)); 5098 else 5099 fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash, 5100 lockdep_is_held(&graph_lock)); 5101 5102 /* Nothing, tell g_show to print all functions are enabled */ 5103 if (ftrace_hash_empty(fgd->hash) && !*pos) 5104 return FTRACE_GRAPH_EMPTY; 5105 5106 fgd->idx = 0; 5107 fgd->entry = NULL; 5108 return __g_next(m, pos); 5109 } 5110 5111 static void g_stop(struct seq_file *m, void *p) 5112 { 5113 mutex_unlock(&graph_lock); 5114 } 5115 5116 static int g_show(struct seq_file *m, void *v) 5117 { 5118 struct ftrace_func_entry *entry = v; 5119 5120 if (!entry) 5121 return 0; 5122 5123 if (entry == FTRACE_GRAPH_EMPTY) { 5124 struct ftrace_graph_data *fgd = m->private; 5125 5126 if (fgd->type == GRAPH_FILTER_FUNCTION) 5127 seq_puts(m, "#### all functions enabled ####\n"); 5128 else 5129 seq_puts(m, "#### no functions disabled ####\n"); 5130 return 0; 5131 } 5132 5133 seq_printf(m, "%ps\n", (void *)entry->ip); 5134 5135 return 0; 5136 } 5137 5138 static const struct seq_operations ftrace_graph_seq_ops = { 5139 .start = g_start, 5140 .next = g_next, 5141 .stop = g_stop, 5142 .show = g_show, 5143 }; 5144 5145 static int 5146 __ftrace_graph_open(struct inode *inode, struct file *file, 5147 struct ftrace_graph_data *fgd) 5148 { 5149 int ret = 0; 5150 struct ftrace_hash *new_hash = NULL; 5151 5152 if (file->f_mode & FMODE_WRITE) { 5153 const int size_bits = FTRACE_HASH_DEFAULT_BITS; 5154 5155 if (trace_parser_get_init(&fgd->parser, FTRACE_BUFF_MAX)) 5156 return -ENOMEM; 5157 5158 if (file->f_flags & O_TRUNC) 5159 new_hash = alloc_ftrace_hash(size_bits); 5160 else 5161 new_hash = alloc_and_copy_ftrace_hash(size_bits, 5162 fgd->hash); 5163 if (!new_hash) { 5164 ret = -ENOMEM; 5165 goto out; 5166 } 5167 } 5168 5169 if (file->f_mode & FMODE_READ) { 5170 ret = seq_open(file, &ftrace_graph_seq_ops); 5171 if (!ret) { 5172 struct seq_file *m = file->private_data; 5173 m->private = fgd; 5174 } else { 5175 /* Failed */ 5176 free_ftrace_hash(new_hash); 5177 new_hash = NULL; 5178 } 5179 } else 5180 file->private_data = fgd; 5181 5182 out: 5183 if (ret < 0 && file->f_mode & FMODE_WRITE) 5184 trace_parser_put(&fgd->parser); 5185 5186 fgd->new_hash = new_hash; 5187 5188 /* 5189 * All uses of fgd->hash must be taken with the graph_lock 5190 * held. The graph_lock is going to be released, so force 5191 * fgd->hash to be reinitialized when it is taken again. 5192 */ 5193 fgd->hash = NULL; 5194 5195 return ret; 5196 } 5197 5198 static int 5199 ftrace_graph_open(struct inode *inode, struct file *file) 5200 { 5201 struct ftrace_graph_data *fgd; 5202 int ret; 5203 5204 if (unlikely(ftrace_disabled)) 5205 return -ENODEV; 5206 5207 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL); 5208 if (fgd == NULL) 5209 return -ENOMEM; 5210 5211 mutex_lock(&graph_lock); 5212 5213 fgd->hash = rcu_dereference_protected(ftrace_graph_hash, 5214 lockdep_is_held(&graph_lock)); 5215 fgd->type = GRAPH_FILTER_FUNCTION; 5216 fgd->seq_ops = &ftrace_graph_seq_ops; 5217 5218 ret = __ftrace_graph_open(inode, file, fgd); 5219 if (ret < 0) 5220 kfree(fgd); 5221 5222 mutex_unlock(&graph_lock); 5223 return ret; 5224 } 5225 5226 static int 5227 ftrace_graph_notrace_open(struct inode *inode, struct file *file) 5228 { 5229 struct ftrace_graph_data *fgd; 5230 int ret; 5231 5232 if (unlikely(ftrace_disabled)) 5233 return -ENODEV; 5234 5235 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL); 5236 if (fgd == NULL) 5237 return -ENOMEM; 5238 5239 mutex_lock(&graph_lock); 5240 5241 fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash, 5242 lockdep_is_held(&graph_lock)); 5243 fgd->type = GRAPH_FILTER_NOTRACE; 5244 fgd->seq_ops = &ftrace_graph_seq_ops; 5245 5246 ret = __ftrace_graph_open(inode, file, fgd); 5247 if (ret < 0) 5248 kfree(fgd); 5249 5250 mutex_unlock(&graph_lock); 5251 return ret; 5252 } 5253 5254 static int 5255 ftrace_graph_release(struct inode *inode, struct file *file) 5256 { 5257 struct ftrace_graph_data *fgd; 5258 struct ftrace_hash *old_hash, *new_hash; 5259 struct trace_parser *parser; 5260 int ret = 0; 5261 5262 if (file->f_mode & FMODE_READ) { 5263 struct seq_file *m = file->private_data; 5264 5265 fgd = m->private; 5266 seq_release(inode, file); 5267 } else { 5268 fgd = file->private_data; 5269 } 5270 5271 5272 if (file->f_mode & FMODE_WRITE) { 5273 5274 parser = &fgd->parser; 5275 5276 if (trace_parser_loaded((parser))) { 5277 ret = ftrace_graph_set_hash(fgd->new_hash, 5278 parser->buffer); 5279 } 5280 5281 trace_parser_put(parser); 5282 5283 new_hash = __ftrace_hash_move(fgd->new_hash); 5284 if (!new_hash) { 5285 ret = -ENOMEM; 5286 goto out; 5287 } 5288 5289 mutex_lock(&graph_lock); 5290 5291 if (fgd->type == GRAPH_FILTER_FUNCTION) { 5292 old_hash = rcu_dereference_protected(ftrace_graph_hash, 5293 lockdep_is_held(&graph_lock)); 5294 rcu_assign_pointer(ftrace_graph_hash, new_hash); 5295 } else { 5296 old_hash = rcu_dereference_protected(ftrace_graph_notrace_hash, 5297 lockdep_is_held(&graph_lock)); 5298 rcu_assign_pointer(ftrace_graph_notrace_hash, new_hash); 5299 } 5300 5301 mutex_unlock(&graph_lock); 5302 5303 /* Wait till all users are no longer using the old hash */ 5304 synchronize_rcu(); 5305 5306 free_ftrace_hash(old_hash); 5307 } 5308 5309 out: 5310 free_ftrace_hash(fgd->new_hash); 5311 kfree(fgd); 5312 5313 return ret; 5314 } 5315 5316 static int 5317 ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer) 5318 { 5319 struct ftrace_glob func_g; 5320 struct dyn_ftrace *rec; 5321 struct ftrace_page *pg; 5322 struct ftrace_func_entry *entry; 5323 int fail = 1; 5324 int not; 5325 5326 /* decode regex */ 5327 func_g.type = filter_parse_regex(buffer, strlen(buffer), 5328 &func_g.search, ¬); 5329 5330 func_g.len = strlen(func_g.search); 5331 5332 mutex_lock(&ftrace_lock); 5333 5334 if (unlikely(ftrace_disabled)) { 5335 mutex_unlock(&ftrace_lock); 5336 return -ENODEV; 5337 } 5338 5339 do_for_each_ftrace_rec(pg, rec) { 5340 5341 if (rec->flags & FTRACE_FL_DISABLED) 5342 continue; 5343 5344 if (ftrace_match_record(rec, &func_g, NULL, 0)) { 5345 entry = ftrace_lookup_ip(hash, rec->ip); 5346 5347 if (!not) { 5348 fail = 0; 5349 5350 if (entry) 5351 continue; 5352 if (add_hash_entry(hash, rec->ip) < 0) 5353 goto out; 5354 } else { 5355 if (entry) { 5356 free_hash_entry(hash, entry); 5357 fail = 0; 5358 } 5359 } 5360 } 5361 } while_for_each_ftrace_rec(); 5362 out: 5363 mutex_unlock(&ftrace_lock); 5364 5365 if (fail) 5366 return -EINVAL; 5367 5368 return 0; 5369 } 5370 5371 static ssize_t 5372 ftrace_graph_write(struct file *file, const char __user *ubuf, 5373 size_t cnt, loff_t *ppos) 5374 { 5375 ssize_t read, ret = 0; 5376 struct ftrace_graph_data *fgd = file->private_data; 5377 struct trace_parser *parser; 5378 5379 if (!cnt) 5380 return 0; 5381 5382 /* Read mode uses seq functions */ 5383 if (file->f_mode & FMODE_READ) { 5384 struct seq_file *m = file->private_data; 5385 fgd = m->private; 5386 } 5387 5388 parser = &fgd->parser; 5389 5390 read = trace_get_user(parser, ubuf, cnt, ppos); 5391 5392 if (read >= 0 && trace_parser_loaded(parser) && 5393 !trace_parser_cont(parser)) { 5394 5395 ret = ftrace_graph_set_hash(fgd->new_hash, 5396 parser->buffer); 5397 trace_parser_clear(parser); 5398 } 5399 5400 if (!ret) 5401 ret = read; 5402 5403 return ret; 5404 } 5405 5406 static const struct file_operations ftrace_graph_fops = { 5407 .open = ftrace_graph_open, 5408 .read = seq_read, 5409 .write = ftrace_graph_write, 5410 .llseek = tracing_lseek, 5411 .release = ftrace_graph_release, 5412 }; 5413 5414 static const struct file_operations ftrace_graph_notrace_fops = { 5415 .open = ftrace_graph_notrace_open, 5416 .read = seq_read, 5417 .write = ftrace_graph_write, 5418 .llseek = tracing_lseek, 5419 .release = ftrace_graph_release, 5420 }; 5421 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 5422 5423 void ftrace_create_filter_files(struct ftrace_ops *ops, 5424 struct dentry *parent) 5425 { 5426 5427 trace_create_file("set_ftrace_filter", 0644, parent, 5428 ops, &ftrace_filter_fops); 5429 5430 trace_create_file("set_ftrace_notrace", 0644, parent, 5431 ops, &ftrace_notrace_fops); 5432 } 5433 5434 /* 5435 * The name "destroy_filter_files" is really a misnomer. Although 5436 * in the future, it may actualy delete the files, but this is 5437 * really intended to make sure the ops passed in are disabled 5438 * and that when this function returns, the caller is free to 5439 * free the ops. 5440 * 5441 * The "destroy" name is only to match the "create" name that this 5442 * should be paired with. 5443 */ 5444 void ftrace_destroy_filter_files(struct ftrace_ops *ops) 5445 { 5446 mutex_lock(&ftrace_lock); 5447 if (ops->flags & FTRACE_OPS_FL_ENABLED) 5448 ftrace_shutdown(ops, 0); 5449 ops->flags |= FTRACE_OPS_FL_DELETED; 5450 ftrace_free_filter(ops); 5451 mutex_unlock(&ftrace_lock); 5452 } 5453 5454 static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer) 5455 { 5456 5457 trace_create_file("available_filter_functions", 0444, 5458 d_tracer, NULL, &ftrace_avail_fops); 5459 5460 trace_create_file("enabled_functions", 0444, 5461 d_tracer, NULL, &ftrace_enabled_fops); 5462 5463 ftrace_create_filter_files(&global_ops, d_tracer); 5464 5465 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 5466 trace_create_file("set_graph_function", 0644, d_tracer, 5467 NULL, 5468 &ftrace_graph_fops); 5469 trace_create_file("set_graph_notrace", 0644, d_tracer, 5470 NULL, 5471 &ftrace_graph_notrace_fops); 5472 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 5473 5474 return 0; 5475 } 5476 5477 static int ftrace_cmp_ips(const void *a, const void *b) 5478 { 5479 const unsigned long *ipa = a; 5480 const unsigned long *ipb = b; 5481 5482 if (*ipa > *ipb) 5483 return 1; 5484 if (*ipa < *ipb) 5485 return -1; 5486 return 0; 5487 } 5488 5489 static int ftrace_process_locs(struct module *mod, 5490 unsigned long *start, 5491 unsigned long *end) 5492 { 5493 struct ftrace_page *start_pg; 5494 struct ftrace_page *pg; 5495 struct dyn_ftrace *rec; 5496 unsigned long count; 5497 unsigned long *p; 5498 unsigned long addr; 5499 unsigned long flags = 0; /* Shut up gcc */ 5500 int ret = -ENOMEM; 5501 5502 count = end - start; 5503 5504 if (!count) 5505 return 0; 5506 5507 sort(start, count, sizeof(*start), 5508 ftrace_cmp_ips, NULL); 5509 5510 start_pg = ftrace_allocate_pages(count); 5511 if (!start_pg) 5512 return -ENOMEM; 5513 5514 mutex_lock(&ftrace_lock); 5515 5516 /* 5517 * Core and each module needs their own pages, as 5518 * modules will free them when they are removed. 5519 * Force a new page to be allocated for modules. 5520 */ 5521 if (!mod) { 5522 WARN_ON(ftrace_pages || ftrace_pages_start); 5523 /* First initialization */ 5524 ftrace_pages = ftrace_pages_start = start_pg; 5525 } else { 5526 if (!ftrace_pages) 5527 goto out; 5528 5529 if (WARN_ON(ftrace_pages->next)) { 5530 /* Hmm, we have free pages? */ 5531 while (ftrace_pages->next) 5532 ftrace_pages = ftrace_pages->next; 5533 } 5534 5535 ftrace_pages->next = start_pg; 5536 } 5537 5538 p = start; 5539 pg = start_pg; 5540 while (p < end) { 5541 addr = ftrace_call_adjust(*p++); 5542 /* 5543 * Some architecture linkers will pad between 5544 * the different mcount_loc sections of different 5545 * object files to satisfy alignments. 5546 * Skip any NULL pointers. 5547 */ 5548 if (!addr) 5549 continue; 5550 5551 if (pg->index == pg->size) { 5552 /* We should have allocated enough */ 5553 if (WARN_ON(!pg->next)) 5554 break; 5555 pg = pg->next; 5556 } 5557 5558 rec = &pg->records[pg->index++]; 5559 rec->ip = addr; 5560 } 5561 5562 /* We should have used all pages */ 5563 WARN_ON(pg->next); 5564 5565 /* Assign the last page to ftrace_pages */ 5566 ftrace_pages = pg; 5567 5568 /* 5569 * We only need to disable interrupts on start up 5570 * because we are modifying code that an interrupt 5571 * may execute, and the modification is not atomic. 5572 * But for modules, nothing runs the code we modify 5573 * until we are finished with it, and there's no 5574 * reason to cause large interrupt latencies while we do it. 5575 */ 5576 if (!mod) 5577 local_irq_save(flags); 5578 ftrace_update_code(mod, start_pg); 5579 if (!mod) 5580 local_irq_restore(flags); 5581 ret = 0; 5582 out: 5583 mutex_unlock(&ftrace_lock); 5584 5585 return ret; 5586 } 5587 5588 struct ftrace_mod_func { 5589 struct list_head list; 5590 char *name; 5591 unsigned long ip; 5592 unsigned int size; 5593 }; 5594 5595 struct ftrace_mod_map { 5596 struct rcu_head rcu; 5597 struct list_head list; 5598 struct module *mod; 5599 unsigned long start_addr; 5600 unsigned long end_addr; 5601 struct list_head funcs; 5602 unsigned int num_funcs; 5603 }; 5604 5605 #ifdef CONFIG_MODULES 5606 5607 #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next) 5608 5609 static LIST_HEAD(ftrace_mod_maps); 5610 5611 static int referenced_filters(struct dyn_ftrace *rec) 5612 { 5613 struct ftrace_ops *ops; 5614 int cnt = 0; 5615 5616 for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) { 5617 if (ops_references_rec(ops, rec)) 5618 cnt++; 5619 } 5620 5621 return cnt; 5622 } 5623 5624 static void 5625 clear_mod_from_hash(struct ftrace_page *pg, struct ftrace_hash *hash) 5626 { 5627 struct ftrace_func_entry *entry; 5628 struct dyn_ftrace *rec; 5629 int i; 5630 5631 if (ftrace_hash_empty(hash)) 5632 return; 5633 5634 for (i = 0; i < pg->index; i++) { 5635 rec = &pg->records[i]; 5636 entry = __ftrace_lookup_ip(hash, rec->ip); 5637 /* 5638 * Do not allow this rec to match again. 5639 * Yeah, it may waste some memory, but will be removed 5640 * if/when the hash is modified again. 5641 */ 5642 if (entry) 5643 entry->ip = 0; 5644 } 5645 } 5646 5647 /* Clear any records from hashs */ 5648 static void clear_mod_from_hashes(struct ftrace_page *pg) 5649 { 5650 struct trace_array *tr; 5651 5652 mutex_lock(&trace_types_lock); 5653 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 5654 if (!tr->ops || !tr->ops->func_hash) 5655 continue; 5656 mutex_lock(&tr->ops->func_hash->regex_lock); 5657 clear_mod_from_hash(pg, tr->ops->func_hash->filter_hash); 5658 clear_mod_from_hash(pg, tr->ops->func_hash->notrace_hash); 5659 mutex_unlock(&tr->ops->func_hash->regex_lock); 5660 } 5661 mutex_unlock(&trace_types_lock); 5662 } 5663 5664 static void ftrace_free_mod_map(struct rcu_head *rcu) 5665 { 5666 struct ftrace_mod_map *mod_map = container_of(rcu, struct ftrace_mod_map, rcu); 5667 struct ftrace_mod_func *mod_func; 5668 struct ftrace_mod_func *n; 5669 5670 /* All the contents of mod_map are now not visible to readers */ 5671 list_for_each_entry_safe(mod_func, n, &mod_map->funcs, list) { 5672 kfree(mod_func->name); 5673 list_del(&mod_func->list); 5674 kfree(mod_func); 5675 } 5676 5677 kfree(mod_map); 5678 } 5679 5680 void ftrace_release_mod(struct module *mod) 5681 { 5682 struct ftrace_mod_map *mod_map; 5683 struct ftrace_mod_map *n; 5684 struct dyn_ftrace *rec; 5685 struct ftrace_page **last_pg; 5686 struct ftrace_page *tmp_page = NULL; 5687 struct ftrace_page *pg; 5688 int order; 5689 5690 mutex_lock(&ftrace_lock); 5691 5692 if (ftrace_disabled) 5693 goto out_unlock; 5694 5695 list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) { 5696 if (mod_map->mod == mod) { 5697 list_del_rcu(&mod_map->list); 5698 call_rcu(&mod_map->rcu, ftrace_free_mod_map); 5699 break; 5700 } 5701 } 5702 5703 /* 5704 * Each module has its own ftrace_pages, remove 5705 * them from the list. 5706 */ 5707 last_pg = &ftrace_pages_start; 5708 for (pg = ftrace_pages_start; pg; pg = *last_pg) { 5709 rec = &pg->records[0]; 5710 if (within_module_core(rec->ip, mod) || 5711 within_module_init(rec->ip, mod)) { 5712 /* 5713 * As core pages are first, the first 5714 * page should never be a module page. 5715 */ 5716 if (WARN_ON(pg == ftrace_pages_start)) 5717 goto out_unlock; 5718 5719 /* Check if we are deleting the last page */ 5720 if (pg == ftrace_pages) 5721 ftrace_pages = next_to_ftrace_page(last_pg); 5722 5723 ftrace_update_tot_cnt -= pg->index; 5724 *last_pg = pg->next; 5725 5726 pg->next = tmp_page; 5727 tmp_page = pg; 5728 } else 5729 last_pg = &pg->next; 5730 } 5731 out_unlock: 5732 mutex_unlock(&ftrace_lock); 5733 5734 for (pg = tmp_page; pg; pg = tmp_page) { 5735 5736 /* Needs to be called outside of ftrace_lock */ 5737 clear_mod_from_hashes(pg); 5738 5739 order = get_count_order(pg->size / ENTRIES_PER_PAGE); 5740 free_pages((unsigned long)pg->records, order); 5741 tmp_page = pg->next; 5742 kfree(pg); 5743 } 5744 } 5745 5746 void ftrace_module_enable(struct module *mod) 5747 { 5748 struct dyn_ftrace *rec; 5749 struct ftrace_page *pg; 5750 5751 mutex_lock(&ftrace_lock); 5752 5753 if (ftrace_disabled) 5754 goto out_unlock; 5755 5756 /* 5757 * If the tracing is enabled, go ahead and enable the record. 5758 * 5759 * The reason not to enable the record immediatelly is the 5760 * inherent check of ftrace_make_nop/ftrace_make_call for 5761 * correct previous instructions. Making first the NOP 5762 * conversion puts the module to the correct state, thus 5763 * passing the ftrace_make_call check. 5764 * 5765 * We also delay this to after the module code already set the 5766 * text to read-only, as we now need to set it back to read-write 5767 * so that we can modify the text. 5768 */ 5769 if (ftrace_start_up) 5770 ftrace_arch_code_modify_prepare(); 5771 5772 do_for_each_ftrace_rec(pg, rec) { 5773 int cnt; 5774 /* 5775 * do_for_each_ftrace_rec() is a double loop. 5776 * module text shares the pg. If a record is 5777 * not part of this module, then skip this pg, 5778 * which the "break" will do. 5779 */ 5780 if (!within_module_core(rec->ip, mod) && 5781 !within_module_init(rec->ip, mod)) 5782 break; 5783 5784 cnt = 0; 5785 5786 /* 5787 * When adding a module, we need to check if tracers are 5788 * currently enabled and if they are, and can trace this record, 5789 * we need to enable the module functions as well as update the 5790 * reference counts for those function records. 5791 */ 5792 if (ftrace_start_up) 5793 cnt += referenced_filters(rec); 5794 5795 /* This clears FTRACE_FL_DISABLED */ 5796 rec->flags = cnt; 5797 5798 if (ftrace_start_up && cnt) { 5799 int failed = __ftrace_replace_code(rec, 1); 5800 if (failed) { 5801 ftrace_bug(failed, rec); 5802 goto out_loop; 5803 } 5804 } 5805 5806 } while_for_each_ftrace_rec(); 5807 5808 out_loop: 5809 if (ftrace_start_up) 5810 ftrace_arch_code_modify_post_process(); 5811 5812 out_unlock: 5813 mutex_unlock(&ftrace_lock); 5814 5815 process_cached_mods(mod->name); 5816 } 5817 5818 void ftrace_module_init(struct module *mod) 5819 { 5820 if (ftrace_disabled || !mod->num_ftrace_callsites) 5821 return; 5822 5823 ftrace_process_locs(mod, mod->ftrace_callsites, 5824 mod->ftrace_callsites + mod->num_ftrace_callsites); 5825 } 5826 5827 static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map, 5828 struct dyn_ftrace *rec) 5829 { 5830 struct ftrace_mod_func *mod_func; 5831 unsigned long symsize; 5832 unsigned long offset; 5833 char str[KSYM_SYMBOL_LEN]; 5834 char *modname; 5835 const char *ret; 5836 5837 ret = kallsyms_lookup(rec->ip, &symsize, &offset, &modname, str); 5838 if (!ret) 5839 return; 5840 5841 mod_func = kmalloc(sizeof(*mod_func), GFP_KERNEL); 5842 if (!mod_func) 5843 return; 5844 5845 mod_func->name = kstrdup(str, GFP_KERNEL); 5846 if (!mod_func->name) { 5847 kfree(mod_func); 5848 return; 5849 } 5850 5851 mod_func->ip = rec->ip - offset; 5852 mod_func->size = symsize; 5853 5854 mod_map->num_funcs++; 5855 5856 list_add_rcu(&mod_func->list, &mod_map->funcs); 5857 } 5858 5859 static struct ftrace_mod_map * 5860 allocate_ftrace_mod_map(struct module *mod, 5861 unsigned long start, unsigned long end) 5862 { 5863 struct ftrace_mod_map *mod_map; 5864 5865 mod_map = kmalloc(sizeof(*mod_map), GFP_KERNEL); 5866 if (!mod_map) 5867 return NULL; 5868 5869 mod_map->mod = mod; 5870 mod_map->start_addr = start; 5871 mod_map->end_addr = end; 5872 mod_map->num_funcs = 0; 5873 5874 INIT_LIST_HEAD_RCU(&mod_map->funcs); 5875 5876 list_add_rcu(&mod_map->list, &ftrace_mod_maps); 5877 5878 return mod_map; 5879 } 5880 5881 static const char * 5882 ftrace_func_address_lookup(struct ftrace_mod_map *mod_map, 5883 unsigned long addr, unsigned long *size, 5884 unsigned long *off, char *sym) 5885 { 5886 struct ftrace_mod_func *found_func = NULL; 5887 struct ftrace_mod_func *mod_func; 5888 5889 list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) { 5890 if (addr >= mod_func->ip && 5891 addr < mod_func->ip + mod_func->size) { 5892 found_func = mod_func; 5893 break; 5894 } 5895 } 5896 5897 if (found_func) { 5898 if (size) 5899 *size = found_func->size; 5900 if (off) 5901 *off = addr - found_func->ip; 5902 if (sym) 5903 strlcpy(sym, found_func->name, KSYM_NAME_LEN); 5904 5905 return found_func->name; 5906 } 5907 5908 return NULL; 5909 } 5910 5911 const char * 5912 ftrace_mod_address_lookup(unsigned long addr, unsigned long *size, 5913 unsigned long *off, char **modname, char *sym) 5914 { 5915 struct ftrace_mod_map *mod_map; 5916 const char *ret = NULL; 5917 5918 /* mod_map is freed via call_rcu() */ 5919 preempt_disable(); 5920 list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) { 5921 ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym); 5922 if (ret) { 5923 if (modname) 5924 *modname = mod_map->mod->name; 5925 break; 5926 } 5927 } 5928 preempt_enable(); 5929 5930 return ret; 5931 } 5932 5933 int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, 5934 char *type, char *name, 5935 char *module_name, int *exported) 5936 { 5937 struct ftrace_mod_map *mod_map; 5938 struct ftrace_mod_func *mod_func; 5939 5940 preempt_disable(); 5941 list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) { 5942 5943 if (symnum >= mod_map->num_funcs) { 5944 symnum -= mod_map->num_funcs; 5945 continue; 5946 } 5947 5948 list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) { 5949 if (symnum > 1) { 5950 symnum--; 5951 continue; 5952 } 5953 5954 *value = mod_func->ip; 5955 *type = 'T'; 5956 strlcpy(name, mod_func->name, KSYM_NAME_LEN); 5957 strlcpy(module_name, mod_map->mod->name, MODULE_NAME_LEN); 5958 *exported = 1; 5959 preempt_enable(); 5960 return 0; 5961 } 5962 WARN_ON(1); 5963 break; 5964 } 5965 preempt_enable(); 5966 return -ERANGE; 5967 } 5968 5969 #else 5970 static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map, 5971 struct dyn_ftrace *rec) { } 5972 static inline struct ftrace_mod_map * 5973 allocate_ftrace_mod_map(struct module *mod, 5974 unsigned long start, unsigned long end) 5975 { 5976 return NULL; 5977 } 5978 #endif /* CONFIG_MODULES */ 5979 5980 struct ftrace_init_func { 5981 struct list_head list; 5982 unsigned long ip; 5983 }; 5984 5985 /* Clear any init ips from hashes */ 5986 static void 5987 clear_func_from_hash(struct ftrace_init_func *func, struct ftrace_hash *hash) 5988 { 5989 struct ftrace_func_entry *entry; 5990 5991 if (ftrace_hash_empty(hash)) 5992 return; 5993 5994 entry = __ftrace_lookup_ip(hash, func->ip); 5995 5996 /* 5997 * Do not allow this rec to match again. 5998 * Yeah, it may waste some memory, but will be removed 5999 * if/when the hash is modified again. 6000 */ 6001 if (entry) 6002 entry->ip = 0; 6003 } 6004 6005 static void 6006 clear_func_from_hashes(struct ftrace_init_func *func) 6007 { 6008 struct trace_array *tr; 6009 6010 mutex_lock(&trace_types_lock); 6011 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 6012 if (!tr->ops || !tr->ops->func_hash) 6013 continue; 6014 mutex_lock(&tr->ops->func_hash->regex_lock); 6015 clear_func_from_hash(func, tr->ops->func_hash->filter_hash); 6016 clear_func_from_hash(func, tr->ops->func_hash->notrace_hash); 6017 mutex_unlock(&tr->ops->func_hash->regex_lock); 6018 } 6019 mutex_unlock(&trace_types_lock); 6020 } 6021 6022 static void add_to_clear_hash_list(struct list_head *clear_list, 6023 struct dyn_ftrace *rec) 6024 { 6025 struct ftrace_init_func *func; 6026 6027 func = kmalloc(sizeof(*func), GFP_KERNEL); 6028 if (!func) { 6029 WARN_ONCE(1, "alloc failure, ftrace filter could be stale\n"); 6030 return; 6031 } 6032 6033 func->ip = rec->ip; 6034 list_add(&func->list, clear_list); 6035 } 6036 6037 void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr) 6038 { 6039 unsigned long start = (unsigned long)(start_ptr); 6040 unsigned long end = (unsigned long)(end_ptr); 6041 struct ftrace_page **last_pg = &ftrace_pages_start; 6042 struct ftrace_page *pg; 6043 struct dyn_ftrace *rec; 6044 struct dyn_ftrace key; 6045 struct ftrace_mod_map *mod_map = NULL; 6046 struct ftrace_init_func *func, *func_next; 6047 struct list_head clear_hash; 6048 int order; 6049 6050 INIT_LIST_HEAD(&clear_hash); 6051 6052 key.ip = start; 6053 key.flags = end; /* overload flags, as it is unsigned long */ 6054 6055 mutex_lock(&ftrace_lock); 6056 6057 /* 6058 * If we are freeing module init memory, then check if 6059 * any tracer is active. If so, we need to save a mapping of 6060 * the module functions being freed with the address. 6061 */ 6062 if (mod && ftrace_ops_list != &ftrace_list_end) 6063 mod_map = allocate_ftrace_mod_map(mod, start, end); 6064 6065 for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) { 6066 if (end < pg->records[0].ip || 6067 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) 6068 continue; 6069 again: 6070 rec = bsearch(&key, pg->records, pg->index, 6071 sizeof(struct dyn_ftrace), 6072 ftrace_cmp_recs); 6073 if (!rec) 6074 continue; 6075 6076 /* rec will be cleared from hashes after ftrace_lock unlock */ 6077 add_to_clear_hash_list(&clear_hash, rec); 6078 6079 if (mod_map) 6080 save_ftrace_mod_rec(mod_map, rec); 6081 6082 pg->index--; 6083 ftrace_update_tot_cnt--; 6084 if (!pg->index) { 6085 *last_pg = pg->next; 6086 order = get_count_order(pg->size / ENTRIES_PER_PAGE); 6087 free_pages((unsigned long)pg->records, order); 6088 kfree(pg); 6089 pg = container_of(last_pg, struct ftrace_page, next); 6090 if (!(*last_pg)) 6091 ftrace_pages = pg; 6092 continue; 6093 } 6094 memmove(rec, rec + 1, 6095 (pg->index - (rec - pg->records)) * sizeof(*rec)); 6096 /* More than one function may be in this block */ 6097 goto again; 6098 } 6099 mutex_unlock(&ftrace_lock); 6100 6101 list_for_each_entry_safe(func, func_next, &clear_hash, list) { 6102 clear_func_from_hashes(func); 6103 kfree(func); 6104 } 6105 } 6106 6107 void __init ftrace_free_init_mem(void) 6108 { 6109 void *start = (void *)(&__init_begin); 6110 void *end = (void *)(&__init_end); 6111 6112 ftrace_free_mem(NULL, start, end); 6113 } 6114 6115 void __init ftrace_init(void) 6116 { 6117 extern unsigned long __start_mcount_loc[]; 6118 extern unsigned long __stop_mcount_loc[]; 6119 unsigned long count, flags; 6120 int ret; 6121 6122 local_irq_save(flags); 6123 ret = ftrace_dyn_arch_init(); 6124 local_irq_restore(flags); 6125 if (ret) 6126 goto failed; 6127 6128 count = __stop_mcount_loc - __start_mcount_loc; 6129 if (!count) { 6130 pr_info("ftrace: No functions to be traced?\n"); 6131 goto failed; 6132 } 6133 6134 pr_info("ftrace: allocating %ld entries in %ld pages\n", 6135 count, count / ENTRIES_PER_PAGE + 1); 6136 6137 last_ftrace_enabled = ftrace_enabled = 1; 6138 6139 ret = ftrace_process_locs(NULL, 6140 __start_mcount_loc, 6141 __stop_mcount_loc); 6142 6143 set_ftrace_early_filters(); 6144 6145 return; 6146 failed: 6147 ftrace_disabled = 1; 6148 } 6149 6150 /* Do nothing if arch does not support this */ 6151 void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops) 6152 { 6153 } 6154 6155 static void ftrace_update_trampoline(struct ftrace_ops *ops) 6156 { 6157 arch_ftrace_update_trampoline(ops); 6158 } 6159 6160 void ftrace_init_trace_array(struct trace_array *tr) 6161 { 6162 INIT_LIST_HEAD(&tr->func_probes); 6163 INIT_LIST_HEAD(&tr->mod_trace); 6164 INIT_LIST_HEAD(&tr->mod_notrace); 6165 } 6166 #else 6167 6168 struct ftrace_ops global_ops = { 6169 .func = ftrace_stub, 6170 .flags = FTRACE_OPS_FL_RECURSION_SAFE | 6171 FTRACE_OPS_FL_INITIALIZED | 6172 FTRACE_OPS_FL_PID, 6173 }; 6174 6175 static int __init ftrace_nodyn_init(void) 6176 { 6177 ftrace_enabled = 1; 6178 return 0; 6179 } 6180 core_initcall(ftrace_nodyn_init); 6181 6182 static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; } 6183 static inline void ftrace_startup_enable(int command) { } 6184 static inline void ftrace_startup_all(int command) { } 6185 6186 # define ftrace_startup_sysctl() do { } while (0) 6187 # define ftrace_shutdown_sysctl() do { } while (0) 6188 6189 static void ftrace_update_trampoline(struct ftrace_ops *ops) 6190 { 6191 } 6192 6193 #endif /* CONFIG_DYNAMIC_FTRACE */ 6194 6195 __init void ftrace_init_global_array_ops(struct trace_array *tr) 6196 { 6197 tr->ops = &global_ops; 6198 tr->ops->private = tr; 6199 ftrace_init_trace_array(tr); 6200 } 6201 6202 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func) 6203 { 6204 /* If we filter on pids, update to use the pid function */ 6205 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) { 6206 if (WARN_ON(tr->ops->func != ftrace_stub)) 6207 printk("ftrace ops had %pS for function\n", 6208 tr->ops->func); 6209 } 6210 tr->ops->func = func; 6211 tr->ops->private = tr; 6212 } 6213 6214 void ftrace_reset_array_ops(struct trace_array *tr) 6215 { 6216 tr->ops->func = ftrace_stub; 6217 } 6218 6219 static inline void 6220 __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, 6221 struct ftrace_ops *ignored, struct pt_regs *regs) 6222 { 6223 struct ftrace_ops *op; 6224 int bit; 6225 6226 bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX); 6227 if (bit < 0) 6228 return; 6229 6230 /* 6231 * Some of the ops may be dynamically allocated, 6232 * they must be freed after a synchronize_rcu(). 6233 */ 6234 preempt_disable_notrace(); 6235 6236 do_for_each_ftrace_op(op, ftrace_ops_list) { 6237 /* 6238 * Check the following for each ops before calling their func: 6239 * if RCU flag is set, then rcu_is_watching() must be true 6240 * if PER_CPU is set, then ftrace_function_local_disable() 6241 * must be false 6242 * Otherwise test if the ip matches the ops filter 6243 * 6244 * If any of the above fails then the op->func() is not executed. 6245 */ 6246 if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) && 6247 ftrace_ops_test(op, ip, regs)) { 6248 if (FTRACE_WARN_ON(!op->func)) { 6249 pr_warn("op=%p %pS\n", op, op); 6250 goto out; 6251 } 6252 op->func(ip, parent_ip, op, regs); 6253 } 6254 } while_for_each_ftrace_op(op); 6255 out: 6256 preempt_enable_notrace(); 6257 trace_clear_recursion(bit); 6258 } 6259 6260 /* 6261 * Some archs only support passing ip and parent_ip. Even though 6262 * the list function ignores the op parameter, we do not want any 6263 * C side effects, where a function is called without the caller 6264 * sending a third parameter. 6265 * Archs are to support both the regs and ftrace_ops at the same time. 6266 * If they support ftrace_ops, it is assumed they support regs. 6267 * If call backs want to use regs, they must either check for regs 6268 * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS. 6269 * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved. 6270 * An architecture can pass partial regs with ftrace_ops and still 6271 * set the ARCH_SUPPORTS_FTRACE_OPS. 6272 */ 6273 #if ARCH_SUPPORTS_FTRACE_OPS 6274 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, 6275 struct ftrace_ops *op, struct pt_regs *regs) 6276 { 6277 __ftrace_ops_list_func(ip, parent_ip, NULL, regs); 6278 } 6279 #else 6280 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip) 6281 { 6282 __ftrace_ops_list_func(ip, parent_ip, NULL, NULL); 6283 } 6284 #endif 6285 6286 /* 6287 * If there's only one function registered but it does not support 6288 * recursion, needs RCU protection and/or requires per cpu handling, then 6289 * this function will be called by the mcount trampoline. 6290 */ 6291 static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip, 6292 struct ftrace_ops *op, struct pt_regs *regs) 6293 { 6294 int bit; 6295 6296 if ((op->flags & FTRACE_OPS_FL_RCU) && !rcu_is_watching()) 6297 return; 6298 6299 bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX); 6300 if (bit < 0) 6301 return; 6302 6303 preempt_disable_notrace(); 6304 6305 op->func(ip, parent_ip, op, regs); 6306 6307 preempt_enable_notrace(); 6308 trace_clear_recursion(bit); 6309 } 6310 6311 /** 6312 * ftrace_ops_get_func - get the function a trampoline should call 6313 * @ops: the ops to get the function for 6314 * 6315 * Normally the mcount trampoline will call the ops->func, but there 6316 * are times that it should not. For example, if the ops does not 6317 * have its own recursion protection, then it should call the 6318 * ftrace_ops_assist_func() instead. 6319 * 6320 * Returns the function that the trampoline should call for @ops. 6321 */ 6322 ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops) 6323 { 6324 /* 6325 * If the function does not handle recursion, needs to be RCU safe, 6326 * or does per cpu logic, then we need to call the assist handler. 6327 */ 6328 if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE) || 6329 ops->flags & FTRACE_OPS_FL_RCU) 6330 return ftrace_ops_assist_func; 6331 6332 return ops->func; 6333 } 6334 6335 static void 6336 ftrace_filter_pid_sched_switch_probe(void *data, bool preempt, 6337 struct task_struct *prev, struct task_struct *next) 6338 { 6339 struct trace_array *tr = data; 6340 struct trace_pid_list *pid_list; 6341 6342 pid_list = rcu_dereference_sched(tr->function_pids); 6343 6344 this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid, 6345 trace_ignore_this_task(pid_list, next)); 6346 } 6347 6348 static void 6349 ftrace_pid_follow_sched_process_fork(void *data, 6350 struct task_struct *self, 6351 struct task_struct *task) 6352 { 6353 struct trace_pid_list *pid_list; 6354 struct trace_array *tr = data; 6355 6356 pid_list = rcu_dereference_sched(tr->function_pids); 6357 trace_filter_add_remove_task(pid_list, self, task); 6358 } 6359 6360 static void 6361 ftrace_pid_follow_sched_process_exit(void *data, struct task_struct *task) 6362 { 6363 struct trace_pid_list *pid_list; 6364 struct trace_array *tr = data; 6365 6366 pid_list = rcu_dereference_sched(tr->function_pids); 6367 trace_filter_add_remove_task(pid_list, NULL, task); 6368 } 6369 6370 void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) 6371 { 6372 if (enable) { 6373 register_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork, 6374 tr); 6375 register_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit, 6376 tr); 6377 } else { 6378 unregister_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork, 6379 tr); 6380 unregister_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit, 6381 tr); 6382 } 6383 } 6384 6385 static void clear_ftrace_pids(struct trace_array *tr) 6386 { 6387 struct trace_pid_list *pid_list; 6388 int cpu; 6389 6390 pid_list = rcu_dereference_protected(tr->function_pids, 6391 lockdep_is_held(&ftrace_lock)); 6392 if (!pid_list) 6393 return; 6394 6395 unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr); 6396 6397 for_each_possible_cpu(cpu) 6398 per_cpu_ptr(tr->trace_buffer.data, cpu)->ftrace_ignore_pid = false; 6399 6400 rcu_assign_pointer(tr->function_pids, NULL); 6401 6402 /* Wait till all users are no longer using pid filtering */ 6403 synchronize_rcu(); 6404 6405 trace_free_pid_list(pid_list); 6406 } 6407 6408 void ftrace_clear_pids(struct trace_array *tr) 6409 { 6410 mutex_lock(&ftrace_lock); 6411 6412 clear_ftrace_pids(tr); 6413 6414 mutex_unlock(&ftrace_lock); 6415 } 6416 6417 static void ftrace_pid_reset(struct trace_array *tr) 6418 { 6419 mutex_lock(&ftrace_lock); 6420 clear_ftrace_pids(tr); 6421 6422 ftrace_update_pid_func(); 6423 ftrace_startup_all(0); 6424 6425 mutex_unlock(&ftrace_lock); 6426 } 6427 6428 /* Greater than any max PID */ 6429 #define FTRACE_NO_PIDS (void *)(PID_MAX_LIMIT + 1) 6430 6431 static void *fpid_start(struct seq_file *m, loff_t *pos) 6432 __acquires(RCU) 6433 { 6434 struct trace_pid_list *pid_list; 6435 struct trace_array *tr = m->private; 6436 6437 mutex_lock(&ftrace_lock); 6438 rcu_read_lock_sched(); 6439 6440 pid_list = rcu_dereference_sched(tr->function_pids); 6441 6442 if (!pid_list) 6443 return !(*pos) ? FTRACE_NO_PIDS : NULL; 6444 6445 return trace_pid_start(pid_list, pos); 6446 } 6447 6448 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos) 6449 { 6450 struct trace_array *tr = m->private; 6451 struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids); 6452 6453 if (v == FTRACE_NO_PIDS) 6454 return NULL; 6455 6456 return trace_pid_next(pid_list, v, pos); 6457 } 6458 6459 static void fpid_stop(struct seq_file *m, void *p) 6460 __releases(RCU) 6461 { 6462 rcu_read_unlock_sched(); 6463 mutex_unlock(&ftrace_lock); 6464 } 6465 6466 static int fpid_show(struct seq_file *m, void *v) 6467 { 6468 if (v == FTRACE_NO_PIDS) { 6469 seq_puts(m, "no pid\n"); 6470 return 0; 6471 } 6472 6473 return trace_pid_show(m, v); 6474 } 6475 6476 static const struct seq_operations ftrace_pid_sops = { 6477 .start = fpid_start, 6478 .next = fpid_next, 6479 .stop = fpid_stop, 6480 .show = fpid_show, 6481 }; 6482 6483 static int 6484 ftrace_pid_open(struct inode *inode, struct file *file) 6485 { 6486 struct trace_array *tr = inode->i_private; 6487 struct seq_file *m; 6488 int ret = 0; 6489 6490 if (trace_array_get(tr) < 0) 6491 return -ENODEV; 6492 6493 if ((file->f_mode & FMODE_WRITE) && 6494 (file->f_flags & O_TRUNC)) 6495 ftrace_pid_reset(tr); 6496 6497 ret = seq_open(file, &ftrace_pid_sops); 6498 if (ret < 0) { 6499 trace_array_put(tr); 6500 } else { 6501 m = file->private_data; 6502 /* copy tr over to seq ops */ 6503 m->private = tr; 6504 } 6505 6506 return ret; 6507 } 6508 6509 static void ignore_task_cpu(void *data) 6510 { 6511 struct trace_array *tr = data; 6512 struct trace_pid_list *pid_list; 6513 6514 /* 6515 * This function is called by on_each_cpu() while the 6516 * event_mutex is held. 6517 */ 6518 pid_list = rcu_dereference_protected(tr->function_pids, 6519 mutex_is_locked(&ftrace_lock)); 6520 6521 this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid, 6522 trace_ignore_this_task(pid_list, current)); 6523 } 6524 6525 static ssize_t 6526 ftrace_pid_write(struct file *filp, const char __user *ubuf, 6527 size_t cnt, loff_t *ppos) 6528 { 6529 struct seq_file *m = filp->private_data; 6530 struct trace_array *tr = m->private; 6531 struct trace_pid_list *filtered_pids = NULL; 6532 struct trace_pid_list *pid_list; 6533 ssize_t ret; 6534 6535 if (!cnt) 6536 return 0; 6537 6538 mutex_lock(&ftrace_lock); 6539 6540 filtered_pids = rcu_dereference_protected(tr->function_pids, 6541 lockdep_is_held(&ftrace_lock)); 6542 6543 ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt); 6544 if (ret < 0) 6545 goto out; 6546 6547 rcu_assign_pointer(tr->function_pids, pid_list); 6548 6549 if (filtered_pids) { 6550 synchronize_rcu(); 6551 trace_free_pid_list(filtered_pids); 6552 } else if (pid_list) { 6553 /* Register a probe to set whether to ignore the tracing of a task */ 6554 register_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr); 6555 } 6556 6557 /* 6558 * Ignoring of pids is done at task switch. But we have to 6559 * check for those tasks that are currently running. 6560 * Always do this in case a pid was appended or removed. 6561 */ 6562 on_each_cpu(ignore_task_cpu, tr, 1); 6563 6564 ftrace_update_pid_func(); 6565 ftrace_startup_all(0); 6566 out: 6567 mutex_unlock(&ftrace_lock); 6568 6569 if (ret > 0) 6570 *ppos += ret; 6571 6572 return ret; 6573 } 6574 6575 static int 6576 ftrace_pid_release(struct inode *inode, struct file *file) 6577 { 6578 struct trace_array *tr = inode->i_private; 6579 6580 trace_array_put(tr); 6581 6582 return seq_release(inode, file); 6583 } 6584 6585 static const struct file_operations ftrace_pid_fops = { 6586 .open = ftrace_pid_open, 6587 .write = ftrace_pid_write, 6588 .read = seq_read, 6589 .llseek = tracing_lseek, 6590 .release = ftrace_pid_release, 6591 }; 6592 6593 void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer) 6594 { 6595 trace_create_file("set_ftrace_pid", 0644, d_tracer, 6596 tr, &ftrace_pid_fops); 6597 } 6598 6599 void __init ftrace_init_tracefs_toplevel(struct trace_array *tr, 6600 struct dentry *d_tracer) 6601 { 6602 /* Only the top level directory has the dyn_tracefs and profile */ 6603 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL)); 6604 6605 ftrace_init_dyn_tracefs(d_tracer); 6606 ftrace_profile_tracefs(d_tracer); 6607 } 6608 6609 /** 6610 * ftrace_kill - kill ftrace 6611 * 6612 * This function should be used by panic code. It stops ftrace 6613 * but in a not so nice way. If you need to simply kill ftrace 6614 * from a non-atomic section, use ftrace_kill. 6615 */ 6616 void ftrace_kill(void) 6617 { 6618 ftrace_disabled = 1; 6619 ftrace_enabled = 0; 6620 ftrace_trace_function = ftrace_stub; 6621 } 6622 6623 /** 6624 * Test if ftrace is dead or not. 6625 */ 6626 int ftrace_is_dead(void) 6627 { 6628 return ftrace_disabled; 6629 } 6630 6631 /** 6632 * register_ftrace_function - register a function for profiling 6633 * @ops - ops structure that holds the function for profiling. 6634 * 6635 * Register a function to be called by all functions in the 6636 * kernel. 6637 * 6638 * Note: @ops->func and all the functions it calls must be labeled 6639 * with "notrace", otherwise it will go into a 6640 * recursive loop. 6641 */ 6642 int register_ftrace_function(struct ftrace_ops *ops) 6643 { 6644 int ret = -1; 6645 6646 ftrace_ops_init(ops); 6647 6648 mutex_lock(&ftrace_lock); 6649 6650 ret = ftrace_startup(ops, 0); 6651 6652 mutex_unlock(&ftrace_lock); 6653 6654 return ret; 6655 } 6656 EXPORT_SYMBOL_GPL(register_ftrace_function); 6657 6658 /** 6659 * unregister_ftrace_function - unregister a function for profiling. 6660 * @ops - ops structure that holds the function to unregister 6661 * 6662 * Unregister a function that was added to be called by ftrace profiling. 6663 */ 6664 int unregister_ftrace_function(struct ftrace_ops *ops) 6665 { 6666 int ret; 6667 6668 mutex_lock(&ftrace_lock); 6669 ret = ftrace_shutdown(ops, 0); 6670 mutex_unlock(&ftrace_lock); 6671 6672 return ret; 6673 } 6674 EXPORT_SYMBOL_GPL(unregister_ftrace_function); 6675 6676 int 6677 ftrace_enable_sysctl(struct ctl_table *table, int write, 6678 void __user *buffer, size_t *lenp, 6679 loff_t *ppos) 6680 { 6681 int ret = -ENODEV; 6682 6683 mutex_lock(&ftrace_lock); 6684 6685 if (unlikely(ftrace_disabled)) 6686 goto out; 6687 6688 ret = proc_dointvec(table, write, buffer, lenp, ppos); 6689 6690 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled)) 6691 goto out; 6692 6693 last_ftrace_enabled = !!ftrace_enabled; 6694 6695 if (ftrace_enabled) { 6696 6697 /* we are starting ftrace again */ 6698 if (rcu_dereference_protected(ftrace_ops_list, 6699 lockdep_is_held(&ftrace_lock)) != &ftrace_list_end) 6700 update_ftrace_function(); 6701 6702 ftrace_startup_sysctl(); 6703 6704 } else { 6705 /* stopping ftrace calls (just send to ftrace_stub) */ 6706 ftrace_trace_function = ftrace_stub; 6707 6708 ftrace_shutdown_sysctl(); 6709 } 6710 6711 out: 6712 mutex_unlock(&ftrace_lock); 6713 return ret; 6714 } 6715