1 /* 2 * Infrastructure for profiling code inserted by 'gcc -pg'. 3 * 4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> 5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com> 6 * 7 * Originally ported from the -rt patch by: 8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com> 9 * 10 * Based on code in the latency_tracer, that is: 11 * 12 * Copyright (C) 2004-2006 Ingo Molnar 13 * Copyright (C) 2004 Nadia Yvette Chambers 14 */ 15 16 #include <linux/stop_machine.h> 17 #include <linux/clocksource.h> 18 #include <linux/kallsyms.h> 19 #include <linux/seq_file.h> 20 #include <linux/suspend.h> 21 #include <linux/debugfs.h> 22 #include <linux/hardirq.h> 23 #include <linux/kthread.h> 24 #include <linux/uaccess.h> 25 #include <linux/bsearch.h> 26 #include <linux/module.h> 27 #include <linux/ftrace.h> 28 #include <linux/sysctl.h> 29 #include <linux/slab.h> 30 #include <linux/ctype.h> 31 #include <linux/sort.h> 32 #include <linux/list.h> 33 #include <linux/hash.h> 34 #include <linux/rcupdate.h> 35 36 #include <trace/events/sched.h> 37 38 #include <asm/setup.h> 39 40 #include "trace_output.h" 41 #include "trace_stat.h" 42 43 #define FTRACE_WARN_ON(cond) \ 44 ({ \ 45 int ___r = cond; \ 46 if (WARN_ON(___r)) \ 47 ftrace_kill(); \ 48 ___r; \ 49 }) 50 51 #define FTRACE_WARN_ON_ONCE(cond) \ 52 ({ \ 53 int ___r = cond; \ 54 if (WARN_ON_ONCE(___r)) \ 55 ftrace_kill(); \ 56 ___r; \ 57 }) 58 59 /* hash bits for specific function selection */ 60 #define FTRACE_HASH_BITS 7 61 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS) 62 #define FTRACE_HASH_DEFAULT_BITS 10 63 #define FTRACE_HASH_MAX_BITS 12 64 65 #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_CONTROL) 66 67 #ifdef CONFIG_DYNAMIC_FTRACE 68 #define INIT_REGEX_LOCK(opsname) \ 69 .regex_lock = __MUTEX_INITIALIZER(opsname.regex_lock), 70 #else 71 #define INIT_REGEX_LOCK(opsname) 72 #endif 73 74 static struct ftrace_ops ftrace_list_end __read_mostly = { 75 .func = ftrace_stub, 76 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB, 77 }; 78 79 /* ftrace_enabled is a method to turn ftrace on or off */ 80 int ftrace_enabled __read_mostly; 81 static int last_ftrace_enabled; 82 83 /* Quick disabling of function tracer. */ 84 int function_trace_stop __read_mostly; 85 86 /* Current function tracing op */ 87 struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end; 88 /* What to set function_trace_op to */ 89 static struct ftrace_ops *set_function_trace_op; 90 91 /* List for set_ftrace_pid's pids. */ 92 LIST_HEAD(ftrace_pids); 93 struct ftrace_pid { 94 struct list_head list; 95 struct pid *pid; 96 }; 97 98 /* 99 * ftrace_disabled is set when an anomaly is discovered. 100 * ftrace_disabled is much stronger than ftrace_enabled. 101 */ 102 static int ftrace_disabled __read_mostly; 103 104 static DEFINE_MUTEX(ftrace_lock); 105 106 static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end; 107 static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; 108 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; 109 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; 110 static struct ftrace_ops global_ops; 111 static struct ftrace_ops control_ops; 112 113 #if ARCH_SUPPORTS_FTRACE_OPS 114 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, 115 struct ftrace_ops *op, struct pt_regs *regs); 116 #else 117 /* See comment below, where ftrace_ops_list_func is defined */ 118 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip); 119 #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops) 120 #endif 121 122 /* 123 * Traverse the ftrace_global_list, invoking all entries. The reason that we 124 * can use rcu_dereference_raw_notrace() is that elements removed from this list 125 * are simply leaked, so there is no need to interact with a grace-period 126 * mechanism. The rcu_dereference_raw_notrace() calls are needed to handle 127 * concurrent insertions into the ftrace_global_list. 128 * 129 * Silly Alpha and silly pointer-speculation compiler optimizations! 130 */ 131 #define do_for_each_ftrace_op(op, list) \ 132 op = rcu_dereference_raw_notrace(list); \ 133 do 134 135 /* 136 * Optimized for just a single item in the list (as that is the normal case). 137 */ 138 #define while_for_each_ftrace_op(op) \ 139 while (likely(op = rcu_dereference_raw_notrace((op)->next)) && \ 140 unlikely((op) != &ftrace_list_end)) 141 142 static inline void ftrace_ops_init(struct ftrace_ops *ops) 143 { 144 #ifdef CONFIG_DYNAMIC_FTRACE 145 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) { 146 mutex_init(&ops->regex_lock); 147 ops->flags |= FTRACE_OPS_FL_INITIALIZED; 148 } 149 #endif 150 } 151 152 /** 153 * ftrace_nr_registered_ops - return number of ops registered 154 * 155 * Returns the number of ftrace_ops registered and tracing functions 156 */ 157 int ftrace_nr_registered_ops(void) 158 { 159 struct ftrace_ops *ops; 160 int cnt = 0; 161 162 mutex_lock(&ftrace_lock); 163 164 for (ops = ftrace_ops_list; 165 ops != &ftrace_list_end; ops = ops->next) 166 cnt++; 167 168 mutex_unlock(&ftrace_lock); 169 170 return cnt; 171 } 172 173 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip, 174 struct ftrace_ops *op, struct pt_regs *regs) 175 { 176 if (!test_tsk_trace_trace(current)) 177 return; 178 179 ftrace_pid_function(ip, parent_ip, op, regs); 180 } 181 182 static void set_ftrace_pid_function(ftrace_func_t func) 183 { 184 /* do not set ftrace_pid_function to itself! */ 185 if (func != ftrace_pid_func) 186 ftrace_pid_function = func; 187 } 188 189 /** 190 * clear_ftrace_function - reset the ftrace function 191 * 192 * This NULLs the ftrace function and in essence stops 193 * tracing. There may be lag 194 */ 195 void clear_ftrace_function(void) 196 { 197 ftrace_trace_function = ftrace_stub; 198 ftrace_pid_function = ftrace_stub; 199 } 200 201 static void control_ops_disable_all(struct ftrace_ops *ops) 202 { 203 int cpu; 204 205 for_each_possible_cpu(cpu) 206 *per_cpu_ptr(ops->disabled, cpu) = 1; 207 } 208 209 static int control_ops_alloc(struct ftrace_ops *ops) 210 { 211 int __percpu *disabled; 212 213 disabled = alloc_percpu(int); 214 if (!disabled) 215 return -ENOMEM; 216 217 ops->disabled = disabled; 218 control_ops_disable_all(ops); 219 return 0; 220 } 221 222 static void ftrace_sync(struct work_struct *work) 223 { 224 /* 225 * This function is just a stub to implement a hard force 226 * of synchronize_sched(). This requires synchronizing 227 * tasks even in userspace and idle. 228 * 229 * Yes, function tracing is rude. 230 */ 231 } 232 233 static void ftrace_sync_ipi(void *data) 234 { 235 /* Probably not needed, but do it anyway */ 236 smp_rmb(); 237 } 238 239 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 240 static void update_function_graph_func(void); 241 #else 242 static inline void update_function_graph_func(void) { } 243 #endif 244 245 static void update_ftrace_function(void) 246 { 247 ftrace_func_t func; 248 249 /* 250 * If we are at the end of the list and this ops is 251 * recursion safe and not dynamic and the arch supports passing ops, 252 * then have the mcount trampoline call the function directly. 253 */ 254 if (ftrace_ops_list == &ftrace_list_end || 255 (ftrace_ops_list->next == &ftrace_list_end && 256 !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) && 257 (ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) && 258 !FTRACE_FORCE_LIST_FUNC)) { 259 /* Set the ftrace_ops that the arch callback uses */ 260 set_function_trace_op = ftrace_ops_list; 261 func = ftrace_ops_list->func; 262 } else { 263 /* Just use the default ftrace_ops */ 264 set_function_trace_op = &ftrace_list_end; 265 func = ftrace_ops_list_func; 266 } 267 268 update_function_graph_func(); 269 270 /* If there's no change, then do nothing more here */ 271 if (ftrace_trace_function == func) 272 return; 273 274 /* 275 * If we are using the list function, it doesn't care 276 * about the function_trace_ops. 277 */ 278 if (func == ftrace_ops_list_func) { 279 ftrace_trace_function = func; 280 /* 281 * Don't even bother setting function_trace_ops, 282 * it would be racy to do so anyway. 283 */ 284 return; 285 } 286 287 #ifndef CONFIG_DYNAMIC_FTRACE 288 /* 289 * For static tracing, we need to be a bit more careful. 290 * The function change takes affect immediately. Thus, 291 * we need to coorditate the setting of the function_trace_ops 292 * with the setting of the ftrace_trace_function. 293 * 294 * Set the function to the list ops, which will call the 295 * function we want, albeit indirectly, but it handles the 296 * ftrace_ops and doesn't depend on function_trace_op. 297 */ 298 ftrace_trace_function = ftrace_ops_list_func; 299 /* 300 * Make sure all CPUs see this. Yes this is slow, but static 301 * tracing is slow and nasty to have enabled. 302 */ 303 schedule_on_each_cpu(ftrace_sync); 304 /* Now all cpus are using the list ops. */ 305 function_trace_op = set_function_trace_op; 306 /* Make sure the function_trace_op is visible on all CPUs */ 307 smp_wmb(); 308 /* Nasty way to force a rmb on all cpus */ 309 smp_call_function(ftrace_sync_ipi, NULL, 1); 310 /* OK, we are all set to update the ftrace_trace_function now! */ 311 #endif /* !CONFIG_DYNAMIC_FTRACE */ 312 313 ftrace_trace_function = func; 314 } 315 316 int using_ftrace_ops_list_func(void) 317 { 318 return ftrace_trace_function == ftrace_ops_list_func; 319 } 320 321 static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops) 322 { 323 ops->next = *list; 324 /* 325 * We are entering ops into the list but another 326 * CPU might be walking that list. We need to make sure 327 * the ops->next pointer is valid before another CPU sees 328 * the ops pointer included into the list. 329 */ 330 rcu_assign_pointer(*list, ops); 331 } 332 333 static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops) 334 { 335 struct ftrace_ops **p; 336 337 /* 338 * If we are removing the last function, then simply point 339 * to the ftrace_stub. 340 */ 341 if (*list == ops && ops->next == &ftrace_list_end) { 342 *list = &ftrace_list_end; 343 return 0; 344 } 345 346 for (p = list; *p != &ftrace_list_end; p = &(*p)->next) 347 if (*p == ops) 348 break; 349 350 if (*p != ops) 351 return -1; 352 353 *p = (*p)->next; 354 return 0; 355 } 356 357 static void add_ftrace_list_ops(struct ftrace_ops **list, 358 struct ftrace_ops *main_ops, 359 struct ftrace_ops *ops) 360 { 361 int first = *list == &ftrace_list_end; 362 add_ftrace_ops(list, ops); 363 if (first) 364 add_ftrace_ops(&ftrace_ops_list, main_ops); 365 } 366 367 static int remove_ftrace_list_ops(struct ftrace_ops **list, 368 struct ftrace_ops *main_ops, 369 struct ftrace_ops *ops) 370 { 371 int ret = remove_ftrace_ops(list, ops); 372 if (!ret && *list == &ftrace_list_end) 373 ret = remove_ftrace_ops(&ftrace_ops_list, main_ops); 374 return ret; 375 } 376 377 static int __register_ftrace_function(struct ftrace_ops *ops) 378 { 379 if (ops->flags & FTRACE_OPS_FL_DELETED) 380 return -EINVAL; 381 382 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED)) 383 return -EBUSY; 384 385 #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS 386 /* 387 * If the ftrace_ops specifies SAVE_REGS, then it only can be used 388 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set. 389 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant. 390 */ 391 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS && 392 !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)) 393 return -EINVAL; 394 395 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED) 396 ops->flags |= FTRACE_OPS_FL_SAVE_REGS; 397 #endif 398 399 if (!core_kernel_data((unsigned long)ops)) 400 ops->flags |= FTRACE_OPS_FL_DYNAMIC; 401 402 if (ops->flags & FTRACE_OPS_FL_CONTROL) { 403 if (control_ops_alloc(ops)) 404 return -ENOMEM; 405 add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops); 406 } else 407 add_ftrace_ops(&ftrace_ops_list, ops); 408 409 if (ftrace_enabled) 410 update_ftrace_function(); 411 412 return 0; 413 } 414 415 static int __unregister_ftrace_function(struct ftrace_ops *ops) 416 { 417 int ret; 418 419 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED))) 420 return -EBUSY; 421 422 if (ops->flags & FTRACE_OPS_FL_CONTROL) { 423 ret = remove_ftrace_list_ops(&ftrace_control_list, 424 &control_ops, ops); 425 } else 426 ret = remove_ftrace_ops(&ftrace_ops_list, ops); 427 428 if (ret < 0) 429 return ret; 430 431 if (ftrace_enabled) 432 update_ftrace_function(); 433 434 return 0; 435 } 436 437 static void ftrace_update_pid_func(void) 438 { 439 /* Only do something if we are tracing something */ 440 if (ftrace_trace_function == ftrace_stub) 441 return; 442 443 update_ftrace_function(); 444 } 445 446 #ifdef CONFIG_FUNCTION_PROFILER 447 struct ftrace_profile { 448 struct hlist_node node; 449 unsigned long ip; 450 unsigned long counter; 451 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 452 unsigned long long time; 453 unsigned long long time_squared; 454 #endif 455 }; 456 457 struct ftrace_profile_page { 458 struct ftrace_profile_page *next; 459 unsigned long index; 460 struct ftrace_profile records[]; 461 }; 462 463 struct ftrace_profile_stat { 464 atomic_t disabled; 465 struct hlist_head *hash; 466 struct ftrace_profile_page *pages; 467 struct ftrace_profile_page *start; 468 struct tracer_stat stat; 469 }; 470 471 #define PROFILE_RECORDS_SIZE \ 472 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records)) 473 474 #define PROFILES_PER_PAGE \ 475 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile)) 476 477 static int ftrace_profile_enabled __read_mostly; 478 479 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */ 480 static DEFINE_MUTEX(ftrace_profile_lock); 481 482 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats); 483 484 #define FTRACE_PROFILE_HASH_BITS 10 485 #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS) 486 487 static void * 488 function_stat_next(void *v, int idx) 489 { 490 struct ftrace_profile *rec = v; 491 struct ftrace_profile_page *pg; 492 493 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK); 494 495 again: 496 if (idx != 0) 497 rec++; 498 499 if ((void *)rec >= (void *)&pg->records[pg->index]) { 500 pg = pg->next; 501 if (!pg) 502 return NULL; 503 rec = &pg->records[0]; 504 if (!rec->counter) 505 goto again; 506 } 507 508 return rec; 509 } 510 511 static void *function_stat_start(struct tracer_stat *trace) 512 { 513 struct ftrace_profile_stat *stat = 514 container_of(trace, struct ftrace_profile_stat, stat); 515 516 if (!stat || !stat->start) 517 return NULL; 518 519 return function_stat_next(&stat->start->records[0], 0); 520 } 521 522 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 523 /* function graph compares on total time */ 524 static int function_stat_cmp(void *p1, void *p2) 525 { 526 struct ftrace_profile *a = p1; 527 struct ftrace_profile *b = p2; 528 529 if (a->time < b->time) 530 return -1; 531 if (a->time > b->time) 532 return 1; 533 else 534 return 0; 535 } 536 #else 537 /* not function graph compares against hits */ 538 static int function_stat_cmp(void *p1, void *p2) 539 { 540 struct ftrace_profile *a = p1; 541 struct ftrace_profile *b = p2; 542 543 if (a->counter < b->counter) 544 return -1; 545 if (a->counter > b->counter) 546 return 1; 547 else 548 return 0; 549 } 550 #endif 551 552 static int function_stat_headers(struct seq_file *m) 553 { 554 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 555 seq_printf(m, " Function " 556 "Hit Time Avg s^2\n" 557 " -------- " 558 "--- ---- --- ---\n"); 559 #else 560 seq_printf(m, " Function Hit\n" 561 " -------- ---\n"); 562 #endif 563 return 0; 564 } 565 566 static int function_stat_show(struct seq_file *m, void *v) 567 { 568 struct ftrace_profile *rec = v; 569 char str[KSYM_SYMBOL_LEN]; 570 int ret = 0; 571 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 572 static struct trace_seq s; 573 unsigned long long avg; 574 unsigned long long stddev; 575 #endif 576 mutex_lock(&ftrace_profile_lock); 577 578 /* we raced with function_profile_reset() */ 579 if (unlikely(rec->counter == 0)) { 580 ret = -EBUSY; 581 goto out; 582 } 583 584 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); 585 seq_printf(m, " %-30.30s %10lu", str, rec->counter); 586 587 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 588 seq_printf(m, " "); 589 avg = rec->time; 590 do_div(avg, rec->counter); 591 592 /* Sample standard deviation (s^2) */ 593 if (rec->counter <= 1) 594 stddev = 0; 595 else { 596 /* 597 * Apply Welford's method: 598 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2) 599 */ 600 stddev = rec->counter * rec->time_squared - 601 rec->time * rec->time; 602 603 /* 604 * Divide only 1000 for ns^2 -> us^2 conversion. 605 * trace_print_graph_duration will divide 1000 again. 606 */ 607 do_div(stddev, rec->counter * (rec->counter - 1) * 1000); 608 } 609 610 trace_seq_init(&s); 611 trace_print_graph_duration(rec->time, &s); 612 trace_seq_puts(&s, " "); 613 trace_print_graph_duration(avg, &s); 614 trace_seq_puts(&s, " "); 615 trace_print_graph_duration(stddev, &s); 616 trace_print_seq(m, &s); 617 #endif 618 seq_putc(m, '\n'); 619 out: 620 mutex_unlock(&ftrace_profile_lock); 621 622 return ret; 623 } 624 625 static void ftrace_profile_reset(struct ftrace_profile_stat *stat) 626 { 627 struct ftrace_profile_page *pg; 628 629 pg = stat->pages = stat->start; 630 631 while (pg) { 632 memset(pg->records, 0, PROFILE_RECORDS_SIZE); 633 pg->index = 0; 634 pg = pg->next; 635 } 636 637 memset(stat->hash, 0, 638 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head)); 639 } 640 641 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat) 642 { 643 struct ftrace_profile_page *pg; 644 int functions; 645 int pages; 646 int i; 647 648 /* If we already allocated, do nothing */ 649 if (stat->pages) 650 return 0; 651 652 stat->pages = (void *)get_zeroed_page(GFP_KERNEL); 653 if (!stat->pages) 654 return -ENOMEM; 655 656 #ifdef CONFIG_DYNAMIC_FTRACE 657 functions = ftrace_update_tot_cnt; 658 #else 659 /* 660 * We do not know the number of functions that exist because 661 * dynamic tracing is what counts them. With past experience 662 * we have around 20K functions. That should be more than enough. 663 * It is highly unlikely we will execute every function in 664 * the kernel. 665 */ 666 functions = 20000; 667 #endif 668 669 pg = stat->start = stat->pages; 670 671 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE); 672 673 for (i = 1; i < pages; i++) { 674 pg->next = (void *)get_zeroed_page(GFP_KERNEL); 675 if (!pg->next) 676 goto out_free; 677 pg = pg->next; 678 } 679 680 return 0; 681 682 out_free: 683 pg = stat->start; 684 while (pg) { 685 unsigned long tmp = (unsigned long)pg; 686 687 pg = pg->next; 688 free_page(tmp); 689 } 690 691 stat->pages = NULL; 692 stat->start = NULL; 693 694 return -ENOMEM; 695 } 696 697 static int ftrace_profile_init_cpu(int cpu) 698 { 699 struct ftrace_profile_stat *stat; 700 int size; 701 702 stat = &per_cpu(ftrace_profile_stats, cpu); 703 704 if (stat->hash) { 705 /* If the profile is already created, simply reset it */ 706 ftrace_profile_reset(stat); 707 return 0; 708 } 709 710 /* 711 * We are profiling all functions, but usually only a few thousand 712 * functions are hit. We'll make a hash of 1024 items. 713 */ 714 size = FTRACE_PROFILE_HASH_SIZE; 715 716 stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL); 717 718 if (!stat->hash) 719 return -ENOMEM; 720 721 /* Preallocate the function profiling pages */ 722 if (ftrace_profile_pages_init(stat) < 0) { 723 kfree(stat->hash); 724 stat->hash = NULL; 725 return -ENOMEM; 726 } 727 728 return 0; 729 } 730 731 static int ftrace_profile_init(void) 732 { 733 int cpu; 734 int ret = 0; 735 736 for_each_possible_cpu(cpu) { 737 ret = ftrace_profile_init_cpu(cpu); 738 if (ret) 739 break; 740 } 741 742 return ret; 743 } 744 745 /* interrupts must be disabled */ 746 static struct ftrace_profile * 747 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip) 748 { 749 struct ftrace_profile *rec; 750 struct hlist_head *hhd; 751 unsigned long key; 752 753 key = hash_long(ip, FTRACE_PROFILE_HASH_BITS); 754 hhd = &stat->hash[key]; 755 756 if (hlist_empty(hhd)) 757 return NULL; 758 759 hlist_for_each_entry_rcu_notrace(rec, hhd, node) { 760 if (rec->ip == ip) 761 return rec; 762 } 763 764 return NULL; 765 } 766 767 static void ftrace_add_profile(struct ftrace_profile_stat *stat, 768 struct ftrace_profile *rec) 769 { 770 unsigned long key; 771 772 key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS); 773 hlist_add_head_rcu(&rec->node, &stat->hash[key]); 774 } 775 776 /* 777 * The memory is already allocated, this simply finds a new record to use. 778 */ 779 static struct ftrace_profile * 780 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip) 781 { 782 struct ftrace_profile *rec = NULL; 783 784 /* prevent recursion (from NMIs) */ 785 if (atomic_inc_return(&stat->disabled) != 1) 786 goto out; 787 788 /* 789 * Try to find the function again since an NMI 790 * could have added it 791 */ 792 rec = ftrace_find_profiled_func(stat, ip); 793 if (rec) 794 goto out; 795 796 if (stat->pages->index == PROFILES_PER_PAGE) { 797 if (!stat->pages->next) 798 goto out; 799 stat->pages = stat->pages->next; 800 } 801 802 rec = &stat->pages->records[stat->pages->index++]; 803 rec->ip = ip; 804 ftrace_add_profile(stat, rec); 805 806 out: 807 atomic_dec(&stat->disabled); 808 809 return rec; 810 } 811 812 static void 813 function_profile_call(unsigned long ip, unsigned long parent_ip, 814 struct ftrace_ops *ops, struct pt_regs *regs) 815 { 816 struct ftrace_profile_stat *stat; 817 struct ftrace_profile *rec; 818 unsigned long flags; 819 820 if (!ftrace_profile_enabled) 821 return; 822 823 local_irq_save(flags); 824 825 stat = this_cpu_ptr(&ftrace_profile_stats); 826 if (!stat->hash || !ftrace_profile_enabled) 827 goto out; 828 829 rec = ftrace_find_profiled_func(stat, ip); 830 if (!rec) { 831 rec = ftrace_profile_alloc(stat, ip); 832 if (!rec) 833 goto out; 834 } 835 836 rec->counter++; 837 out: 838 local_irq_restore(flags); 839 } 840 841 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 842 static int profile_graph_entry(struct ftrace_graph_ent *trace) 843 { 844 function_profile_call(trace->func, 0, NULL, NULL); 845 return 1; 846 } 847 848 static void profile_graph_return(struct ftrace_graph_ret *trace) 849 { 850 struct ftrace_profile_stat *stat; 851 unsigned long long calltime; 852 struct ftrace_profile *rec; 853 unsigned long flags; 854 855 local_irq_save(flags); 856 stat = this_cpu_ptr(&ftrace_profile_stats); 857 if (!stat->hash || !ftrace_profile_enabled) 858 goto out; 859 860 /* If the calltime was zero'd ignore it */ 861 if (!trace->calltime) 862 goto out; 863 864 calltime = trace->rettime - trace->calltime; 865 866 if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) { 867 int index; 868 869 index = trace->depth; 870 871 /* Append this call time to the parent time to subtract */ 872 if (index) 873 current->ret_stack[index - 1].subtime += calltime; 874 875 if (current->ret_stack[index].subtime < calltime) 876 calltime -= current->ret_stack[index].subtime; 877 else 878 calltime = 0; 879 } 880 881 rec = ftrace_find_profiled_func(stat, trace->func); 882 if (rec) { 883 rec->time += calltime; 884 rec->time_squared += calltime * calltime; 885 } 886 887 out: 888 local_irq_restore(flags); 889 } 890 891 static int register_ftrace_profiler(void) 892 { 893 return register_ftrace_graph(&profile_graph_return, 894 &profile_graph_entry); 895 } 896 897 static void unregister_ftrace_profiler(void) 898 { 899 unregister_ftrace_graph(); 900 } 901 #else 902 static struct ftrace_ops ftrace_profile_ops __read_mostly = { 903 .func = function_profile_call, 904 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, 905 INIT_REGEX_LOCK(ftrace_profile_ops) 906 }; 907 908 static int register_ftrace_profiler(void) 909 { 910 return register_ftrace_function(&ftrace_profile_ops); 911 } 912 913 static void unregister_ftrace_profiler(void) 914 { 915 unregister_ftrace_function(&ftrace_profile_ops); 916 } 917 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 918 919 static ssize_t 920 ftrace_profile_write(struct file *filp, const char __user *ubuf, 921 size_t cnt, loff_t *ppos) 922 { 923 unsigned long val; 924 int ret; 925 926 ret = kstrtoul_from_user(ubuf, cnt, 10, &val); 927 if (ret) 928 return ret; 929 930 val = !!val; 931 932 mutex_lock(&ftrace_profile_lock); 933 if (ftrace_profile_enabled ^ val) { 934 if (val) { 935 ret = ftrace_profile_init(); 936 if (ret < 0) { 937 cnt = ret; 938 goto out; 939 } 940 941 ret = register_ftrace_profiler(); 942 if (ret < 0) { 943 cnt = ret; 944 goto out; 945 } 946 ftrace_profile_enabled = 1; 947 } else { 948 ftrace_profile_enabled = 0; 949 /* 950 * unregister_ftrace_profiler calls stop_machine 951 * so this acts like an synchronize_sched. 952 */ 953 unregister_ftrace_profiler(); 954 } 955 } 956 out: 957 mutex_unlock(&ftrace_profile_lock); 958 959 *ppos += cnt; 960 961 return cnt; 962 } 963 964 static ssize_t 965 ftrace_profile_read(struct file *filp, char __user *ubuf, 966 size_t cnt, loff_t *ppos) 967 { 968 char buf[64]; /* big enough to hold a number */ 969 int r; 970 971 r = sprintf(buf, "%u\n", ftrace_profile_enabled); 972 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 973 } 974 975 static const struct file_operations ftrace_profile_fops = { 976 .open = tracing_open_generic, 977 .read = ftrace_profile_read, 978 .write = ftrace_profile_write, 979 .llseek = default_llseek, 980 }; 981 982 /* used to initialize the real stat files */ 983 static struct tracer_stat function_stats __initdata = { 984 .name = "functions", 985 .stat_start = function_stat_start, 986 .stat_next = function_stat_next, 987 .stat_cmp = function_stat_cmp, 988 .stat_headers = function_stat_headers, 989 .stat_show = function_stat_show 990 }; 991 992 static __init void ftrace_profile_debugfs(struct dentry *d_tracer) 993 { 994 struct ftrace_profile_stat *stat; 995 struct dentry *entry; 996 char *name; 997 int ret; 998 int cpu; 999 1000 for_each_possible_cpu(cpu) { 1001 stat = &per_cpu(ftrace_profile_stats, cpu); 1002 1003 /* allocate enough for function name + cpu number */ 1004 name = kmalloc(32, GFP_KERNEL); 1005 if (!name) { 1006 /* 1007 * The files created are permanent, if something happens 1008 * we still do not free memory. 1009 */ 1010 WARN(1, 1011 "Could not allocate stat file for cpu %d\n", 1012 cpu); 1013 return; 1014 } 1015 stat->stat = function_stats; 1016 snprintf(name, 32, "function%d", cpu); 1017 stat->stat.name = name; 1018 ret = register_stat_tracer(&stat->stat); 1019 if (ret) { 1020 WARN(1, 1021 "Could not register function stat for cpu %d\n", 1022 cpu); 1023 kfree(name); 1024 return; 1025 } 1026 } 1027 1028 entry = debugfs_create_file("function_profile_enabled", 0644, 1029 d_tracer, NULL, &ftrace_profile_fops); 1030 if (!entry) 1031 pr_warning("Could not create debugfs " 1032 "'function_profile_enabled' entry\n"); 1033 } 1034 1035 #else /* CONFIG_FUNCTION_PROFILER */ 1036 static __init void ftrace_profile_debugfs(struct dentry *d_tracer) 1037 { 1038 } 1039 #endif /* CONFIG_FUNCTION_PROFILER */ 1040 1041 static struct pid * const ftrace_swapper_pid = &init_struct_pid; 1042 1043 #ifdef CONFIG_DYNAMIC_FTRACE 1044 1045 #ifndef CONFIG_FTRACE_MCOUNT_RECORD 1046 # error Dynamic ftrace depends on MCOUNT_RECORD 1047 #endif 1048 1049 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly; 1050 1051 struct ftrace_func_probe { 1052 struct hlist_node node; 1053 struct ftrace_probe_ops *ops; 1054 unsigned long flags; 1055 unsigned long ip; 1056 void *data; 1057 struct list_head free_list; 1058 }; 1059 1060 struct ftrace_func_entry { 1061 struct hlist_node hlist; 1062 unsigned long ip; 1063 }; 1064 1065 struct ftrace_hash { 1066 unsigned long size_bits; 1067 struct hlist_head *buckets; 1068 unsigned long count; 1069 struct rcu_head rcu; 1070 }; 1071 1072 /* 1073 * We make these constant because no one should touch them, 1074 * but they are used as the default "empty hash", to avoid allocating 1075 * it all the time. These are in a read only section such that if 1076 * anyone does try to modify it, it will cause an exception. 1077 */ 1078 static const struct hlist_head empty_buckets[1]; 1079 static const struct ftrace_hash empty_hash = { 1080 .buckets = (struct hlist_head *)empty_buckets, 1081 }; 1082 #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash) 1083 1084 static struct ftrace_ops global_ops = { 1085 .func = ftrace_stub, 1086 .notrace_hash = EMPTY_HASH, 1087 .filter_hash = EMPTY_HASH, 1088 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, 1089 INIT_REGEX_LOCK(global_ops) 1090 }; 1091 1092 struct ftrace_page { 1093 struct ftrace_page *next; 1094 struct dyn_ftrace *records; 1095 int index; 1096 int size; 1097 }; 1098 1099 #define ENTRY_SIZE sizeof(struct dyn_ftrace) 1100 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE) 1101 1102 /* estimate from running different kernels */ 1103 #define NR_TO_INIT 10000 1104 1105 static struct ftrace_page *ftrace_pages_start; 1106 static struct ftrace_page *ftrace_pages; 1107 1108 static bool __always_inline ftrace_hash_empty(struct ftrace_hash *hash) 1109 { 1110 return !hash || !hash->count; 1111 } 1112 1113 static struct ftrace_func_entry * 1114 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip) 1115 { 1116 unsigned long key; 1117 struct ftrace_func_entry *entry; 1118 struct hlist_head *hhd; 1119 1120 if (ftrace_hash_empty(hash)) 1121 return NULL; 1122 1123 if (hash->size_bits > 0) 1124 key = hash_long(ip, hash->size_bits); 1125 else 1126 key = 0; 1127 1128 hhd = &hash->buckets[key]; 1129 1130 hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) { 1131 if (entry->ip == ip) 1132 return entry; 1133 } 1134 return NULL; 1135 } 1136 1137 static void __add_hash_entry(struct ftrace_hash *hash, 1138 struct ftrace_func_entry *entry) 1139 { 1140 struct hlist_head *hhd; 1141 unsigned long key; 1142 1143 if (hash->size_bits) 1144 key = hash_long(entry->ip, hash->size_bits); 1145 else 1146 key = 0; 1147 1148 hhd = &hash->buckets[key]; 1149 hlist_add_head(&entry->hlist, hhd); 1150 hash->count++; 1151 } 1152 1153 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip) 1154 { 1155 struct ftrace_func_entry *entry; 1156 1157 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 1158 if (!entry) 1159 return -ENOMEM; 1160 1161 entry->ip = ip; 1162 __add_hash_entry(hash, entry); 1163 1164 return 0; 1165 } 1166 1167 static void 1168 free_hash_entry(struct ftrace_hash *hash, 1169 struct ftrace_func_entry *entry) 1170 { 1171 hlist_del(&entry->hlist); 1172 kfree(entry); 1173 hash->count--; 1174 } 1175 1176 static void 1177 remove_hash_entry(struct ftrace_hash *hash, 1178 struct ftrace_func_entry *entry) 1179 { 1180 hlist_del(&entry->hlist); 1181 hash->count--; 1182 } 1183 1184 static void ftrace_hash_clear(struct ftrace_hash *hash) 1185 { 1186 struct hlist_head *hhd; 1187 struct hlist_node *tn; 1188 struct ftrace_func_entry *entry; 1189 int size = 1 << hash->size_bits; 1190 int i; 1191 1192 if (!hash->count) 1193 return; 1194 1195 for (i = 0; i < size; i++) { 1196 hhd = &hash->buckets[i]; 1197 hlist_for_each_entry_safe(entry, tn, hhd, hlist) 1198 free_hash_entry(hash, entry); 1199 } 1200 FTRACE_WARN_ON(hash->count); 1201 } 1202 1203 static void free_ftrace_hash(struct ftrace_hash *hash) 1204 { 1205 if (!hash || hash == EMPTY_HASH) 1206 return; 1207 ftrace_hash_clear(hash); 1208 kfree(hash->buckets); 1209 kfree(hash); 1210 } 1211 1212 static void __free_ftrace_hash_rcu(struct rcu_head *rcu) 1213 { 1214 struct ftrace_hash *hash; 1215 1216 hash = container_of(rcu, struct ftrace_hash, rcu); 1217 free_ftrace_hash(hash); 1218 } 1219 1220 static void free_ftrace_hash_rcu(struct ftrace_hash *hash) 1221 { 1222 if (!hash || hash == EMPTY_HASH) 1223 return; 1224 call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu); 1225 } 1226 1227 void ftrace_free_filter(struct ftrace_ops *ops) 1228 { 1229 ftrace_ops_init(ops); 1230 free_ftrace_hash(ops->filter_hash); 1231 free_ftrace_hash(ops->notrace_hash); 1232 } 1233 1234 static struct ftrace_hash *alloc_ftrace_hash(int size_bits) 1235 { 1236 struct ftrace_hash *hash; 1237 int size; 1238 1239 hash = kzalloc(sizeof(*hash), GFP_KERNEL); 1240 if (!hash) 1241 return NULL; 1242 1243 size = 1 << size_bits; 1244 hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL); 1245 1246 if (!hash->buckets) { 1247 kfree(hash); 1248 return NULL; 1249 } 1250 1251 hash->size_bits = size_bits; 1252 1253 return hash; 1254 } 1255 1256 static struct ftrace_hash * 1257 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash) 1258 { 1259 struct ftrace_func_entry *entry; 1260 struct ftrace_hash *new_hash; 1261 int size; 1262 int ret; 1263 int i; 1264 1265 new_hash = alloc_ftrace_hash(size_bits); 1266 if (!new_hash) 1267 return NULL; 1268 1269 /* Empty hash? */ 1270 if (ftrace_hash_empty(hash)) 1271 return new_hash; 1272 1273 size = 1 << hash->size_bits; 1274 for (i = 0; i < size; i++) { 1275 hlist_for_each_entry(entry, &hash->buckets[i], hlist) { 1276 ret = add_hash_entry(new_hash, entry->ip); 1277 if (ret < 0) 1278 goto free_hash; 1279 } 1280 } 1281 1282 FTRACE_WARN_ON(new_hash->count != hash->count); 1283 1284 return new_hash; 1285 1286 free_hash: 1287 free_ftrace_hash(new_hash); 1288 return NULL; 1289 } 1290 1291 static void 1292 ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash); 1293 static void 1294 ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash); 1295 1296 static int 1297 ftrace_hash_move(struct ftrace_ops *ops, int enable, 1298 struct ftrace_hash **dst, struct ftrace_hash *src) 1299 { 1300 struct ftrace_func_entry *entry; 1301 struct hlist_node *tn; 1302 struct hlist_head *hhd; 1303 struct ftrace_hash *old_hash; 1304 struct ftrace_hash *new_hash; 1305 int size = src->count; 1306 int bits = 0; 1307 int ret; 1308 int i; 1309 1310 /* 1311 * Remove the current set, update the hash and add 1312 * them back. 1313 */ 1314 ftrace_hash_rec_disable(ops, enable); 1315 1316 /* 1317 * If the new source is empty, just free dst and assign it 1318 * the empty_hash. 1319 */ 1320 if (!src->count) { 1321 free_ftrace_hash_rcu(*dst); 1322 rcu_assign_pointer(*dst, EMPTY_HASH); 1323 /* still need to update the function records */ 1324 ret = 0; 1325 goto out; 1326 } 1327 1328 /* 1329 * Make the hash size about 1/2 the # found 1330 */ 1331 for (size /= 2; size; size >>= 1) 1332 bits++; 1333 1334 /* Don't allocate too much */ 1335 if (bits > FTRACE_HASH_MAX_BITS) 1336 bits = FTRACE_HASH_MAX_BITS; 1337 1338 ret = -ENOMEM; 1339 new_hash = alloc_ftrace_hash(bits); 1340 if (!new_hash) 1341 goto out; 1342 1343 size = 1 << src->size_bits; 1344 for (i = 0; i < size; i++) { 1345 hhd = &src->buckets[i]; 1346 hlist_for_each_entry_safe(entry, tn, hhd, hlist) { 1347 remove_hash_entry(src, entry); 1348 __add_hash_entry(new_hash, entry); 1349 } 1350 } 1351 1352 old_hash = *dst; 1353 rcu_assign_pointer(*dst, new_hash); 1354 free_ftrace_hash_rcu(old_hash); 1355 1356 ret = 0; 1357 out: 1358 /* 1359 * Enable regardless of ret: 1360 * On success, we enable the new hash. 1361 * On failure, we re-enable the original hash. 1362 */ 1363 ftrace_hash_rec_enable(ops, enable); 1364 1365 return ret; 1366 } 1367 1368 /* 1369 * Test the hashes for this ops to see if we want to call 1370 * the ops->func or not. 1371 * 1372 * It's a match if the ip is in the ops->filter_hash or 1373 * the filter_hash does not exist or is empty, 1374 * AND 1375 * the ip is not in the ops->notrace_hash. 1376 * 1377 * This needs to be called with preemption disabled as 1378 * the hashes are freed with call_rcu_sched(). 1379 */ 1380 static int 1381 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) 1382 { 1383 struct ftrace_hash *filter_hash; 1384 struct ftrace_hash *notrace_hash; 1385 int ret; 1386 1387 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 1388 /* 1389 * There's a small race when adding ops that the ftrace handler 1390 * that wants regs, may be called without them. We can not 1391 * allow that handler to be called if regs is NULL. 1392 */ 1393 if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS)) 1394 return 0; 1395 #endif 1396 1397 filter_hash = rcu_dereference_raw_notrace(ops->filter_hash); 1398 notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash); 1399 1400 if ((ftrace_hash_empty(filter_hash) || 1401 ftrace_lookup_ip(filter_hash, ip)) && 1402 (ftrace_hash_empty(notrace_hash) || 1403 !ftrace_lookup_ip(notrace_hash, ip))) 1404 ret = 1; 1405 else 1406 ret = 0; 1407 1408 return ret; 1409 } 1410 1411 /* 1412 * This is a double for. Do not use 'break' to break out of the loop, 1413 * you must use a goto. 1414 */ 1415 #define do_for_each_ftrace_rec(pg, rec) \ 1416 for (pg = ftrace_pages_start; pg; pg = pg->next) { \ 1417 int _____i; \ 1418 for (_____i = 0; _____i < pg->index; _____i++) { \ 1419 rec = &pg->records[_____i]; 1420 1421 #define while_for_each_ftrace_rec() \ 1422 } \ 1423 } 1424 1425 1426 static int ftrace_cmp_recs(const void *a, const void *b) 1427 { 1428 const struct dyn_ftrace *key = a; 1429 const struct dyn_ftrace *rec = b; 1430 1431 if (key->flags < rec->ip) 1432 return -1; 1433 if (key->ip >= rec->ip + MCOUNT_INSN_SIZE) 1434 return 1; 1435 return 0; 1436 } 1437 1438 static unsigned long ftrace_location_range(unsigned long start, unsigned long end) 1439 { 1440 struct ftrace_page *pg; 1441 struct dyn_ftrace *rec; 1442 struct dyn_ftrace key; 1443 1444 key.ip = start; 1445 key.flags = end; /* overload flags, as it is unsigned long */ 1446 1447 for (pg = ftrace_pages_start; pg; pg = pg->next) { 1448 if (end < pg->records[0].ip || 1449 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) 1450 continue; 1451 rec = bsearch(&key, pg->records, pg->index, 1452 sizeof(struct dyn_ftrace), 1453 ftrace_cmp_recs); 1454 if (rec) 1455 return rec->ip; 1456 } 1457 1458 return 0; 1459 } 1460 1461 /** 1462 * ftrace_location - return true if the ip giving is a traced location 1463 * @ip: the instruction pointer to check 1464 * 1465 * Returns rec->ip if @ip given is a pointer to a ftrace location. 1466 * That is, the instruction that is either a NOP or call to 1467 * the function tracer. It checks the ftrace internal tables to 1468 * determine if the address belongs or not. 1469 */ 1470 unsigned long ftrace_location(unsigned long ip) 1471 { 1472 return ftrace_location_range(ip, ip); 1473 } 1474 1475 /** 1476 * ftrace_text_reserved - return true if range contains an ftrace location 1477 * @start: start of range to search 1478 * @end: end of range to search (inclusive). @end points to the last byte to check. 1479 * 1480 * Returns 1 if @start and @end contains a ftrace location. 1481 * That is, the instruction that is either a NOP or call to 1482 * the function tracer. It checks the ftrace internal tables to 1483 * determine if the address belongs or not. 1484 */ 1485 int ftrace_text_reserved(const void *start, const void *end) 1486 { 1487 unsigned long ret; 1488 1489 ret = ftrace_location_range((unsigned long)start, 1490 (unsigned long)end); 1491 1492 return (int)!!ret; 1493 } 1494 1495 static void __ftrace_hash_rec_update(struct ftrace_ops *ops, 1496 int filter_hash, 1497 bool inc) 1498 { 1499 struct ftrace_hash *hash; 1500 struct ftrace_hash *other_hash; 1501 struct ftrace_page *pg; 1502 struct dyn_ftrace *rec; 1503 int count = 0; 1504 int all = 0; 1505 1506 /* Only update if the ops has been registered */ 1507 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) 1508 return; 1509 1510 /* 1511 * In the filter_hash case: 1512 * If the count is zero, we update all records. 1513 * Otherwise we just update the items in the hash. 1514 * 1515 * In the notrace_hash case: 1516 * We enable the update in the hash. 1517 * As disabling notrace means enabling the tracing, 1518 * and enabling notrace means disabling, the inc variable 1519 * gets inversed. 1520 */ 1521 if (filter_hash) { 1522 hash = ops->filter_hash; 1523 other_hash = ops->notrace_hash; 1524 if (ftrace_hash_empty(hash)) 1525 all = 1; 1526 } else { 1527 inc = !inc; 1528 hash = ops->notrace_hash; 1529 other_hash = ops->filter_hash; 1530 /* 1531 * If the notrace hash has no items, 1532 * then there's nothing to do. 1533 */ 1534 if (ftrace_hash_empty(hash)) 1535 return; 1536 } 1537 1538 do_for_each_ftrace_rec(pg, rec) { 1539 int in_other_hash = 0; 1540 int in_hash = 0; 1541 int match = 0; 1542 1543 if (all) { 1544 /* 1545 * Only the filter_hash affects all records. 1546 * Update if the record is not in the notrace hash. 1547 */ 1548 if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip)) 1549 match = 1; 1550 } else { 1551 in_hash = !!ftrace_lookup_ip(hash, rec->ip); 1552 in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip); 1553 1554 /* 1555 * If filter_hash is set, we want to match all functions 1556 * that are in the hash but not in the other hash. 1557 * 1558 * If filter_hash is not set, then we are decrementing. 1559 * That means we match anything that is in the hash 1560 * and also in the other_hash. That is, we need to turn 1561 * off functions in the other hash because they are disabled 1562 * by this hash. 1563 */ 1564 if (filter_hash && in_hash && !in_other_hash) 1565 match = 1; 1566 else if (!filter_hash && in_hash && 1567 (in_other_hash || ftrace_hash_empty(other_hash))) 1568 match = 1; 1569 } 1570 if (!match) 1571 continue; 1572 1573 if (inc) { 1574 rec->flags++; 1575 if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX)) 1576 return; 1577 /* 1578 * If any ops wants regs saved for this function 1579 * then all ops will get saved regs. 1580 */ 1581 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) 1582 rec->flags |= FTRACE_FL_REGS; 1583 } else { 1584 if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0)) 1585 return; 1586 rec->flags--; 1587 } 1588 count++; 1589 /* Shortcut, if we handled all records, we are done. */ 1590 if (!all && count == hash->count) 1591 return; 1592 } while_for_each_ftrace_rec(); 1593 } 1594 1595 static void ftrace_hash_rec_disable(struct ftrace_ops *ops, 1596 int filter_hash) 1597 { 1598 __ftrace_hash_rec_update(ops, filter_hash, 0); 1599 } 1600 1601 static void ftrace_hash_rec_enable(struct ftrace_ops *ops, 1602 int filter_hash) 1603 { 1604 __ftrace_hash_rec_update(ops, filter_hash, 1); 1605 } 1606 1607 static void print_ip_ins(const char *fmt, unsigned char *p) 1608 { 1609 int i; 1610 1611 printk(KERN_CONT "%s", fmt); 1612 1613 for (i = 0; i < MCOUNT_INSN_SIZE; i++) 1614 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]); 1615 } 1616 1617 /** 1618 * ftrace_bug - report and shutdown function tracer 1619 * @failed: The failed type (EFAULT, EINVAL, EPERM) 1620 * @ip: The address that failed 1621 * 1622 * The arch code that enables or disables the function tracing 1623 * can call ftrace_bug() when it has detected a problem in 1624 * modifying the code. @failed should be one of either: 1625 * EFAULT - if the problem happens on reading the @ip address 1626 * EINVAL - if what is read at @ip is not what was expected 1627 * EPERM - if the problem happens on writting to the @ip address 1628 */ 1629 void ftrace_bug(int failed, unsigned long ip) 1630 { 1631 switch (failed) { 1632 case -EFAULT: 1633 FTRACE_WARN_ON_ONCE(1); 1634 pr_info("ftrace faulted on modifying "); 1635 print_ip_sym(ip); 1636 break; 1637 case -EINVAL: 1638 FTRACE_WARN_ON_ONCE(1); 1639 pr_info("ftrace failed to modify "); 1640 print_ip_sym(ip); 1641 print_ip_ins(" actual: ", (unsigned char *)ip); 1642 printk(KERN_CONT "\n"); 1643 break; 1644 case -EPERM: 1645 FTRACE_WARN_ON_ONCE(1); 1646 pr_info("ftrace faulted on writing "); 1647 print_ip_sym(ip); 1648 break; 1649 default: 1650 FTRACE_WARN_ON_ONCE(1); 1651 pr_info("ftrace faulted on unknown error "); 1652 print_ip_sym(ip); 1653 } 1654 } 1655 1656 static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update) 1657 { 1658 unsigned long flag = 0UL; 1659 1660 /* 1661 * If we are updating calls: 1662 * 1663 * If the record has a ref count, then we need to enable it 1664 * because someone is using it. 1665 * 1666 * Otherwise we make sure its disabled. 1667 * 1668 * If we are disabling calls, then disable all records that 1669 * are enabled. 1670 */ 1671 if (enable && (rec->flags & ~FTRACE_FL_MASK)) 1672 flag = FTRACE_FL_ENABLED; 1673 1674 /* 1675 * If enabling and the REGS flag does not match the REGS_EN, then 1676 * do not ignore this record. Set flags to fail the compare against 1677 * ENABLED. 1678 */ 1679 if (flag && 1680 (!(rec->flags & FTRACE_FL_REGS) != !(rec->flags & FTRACE_FL_REGS_EN))) 1681 flag |= FTRACE_FL_REGS; 1682 1683 /* If the state of this record hasn't changed, then do nothing */ 1684 if ((rec->flags & FTRACE_FL_ENABLED) == flag) 1685 return FTRACE_UPDATE_IGNORE; 1686 1687 if (flag) { 1688 /* Save off if rec is being enabled (for return value) */ 1689 flag ^= rec->flags & FTRACE_FL_ENABLED; 1690 1691 if (update) { 1692 rec->flags |= FTRACE_FL_ENABLED; 1693 if (flag & FTRACE_FL_REGS) { 1694 if (rec->flags & FTRACE_FL_REGS) 1695 rec->flags |= FTRACE_FL_REGS_EN; 1696 else 1697 rec->flags &= ~FTRACE_FL_REGS_EN; 1698 } 1699 } 1700 1701 /* 1702 * If this record is being updated from a nop, then 1703 * return UPDATE_MAKE_CALL. 1704 * Otherwise, 1705 * return UPDATE_MODIFY_CALL to tell the caller to convert 1706 * from the save regs, to a non-save regs function or 1707 * vice versa. 1708 */ 1709 if (flag & FTRACE_FL_ENABLED) 1710 return FTRACE_UPDATE_MAKE_CALL; 1711 1712 return FTRACE_UPDATE_MODIFY_CALL; 1713 } 1714 1715 if (update) { 1716 /* If there's no more users, clear all flags */ 1717 if (!(rec->flags & ~FTRACE_FL_MASK)) 1718 rec->flags = 0; 1719 else 1720 /* Just disable the record (keep REGS state) */ 1721 rec->flags &= ~FTRACE_FL_ENABLED; 1722 } 1723 1724 return FTRACE_UPDATE_MAKE_NOP; 1725 } 1726 1727 /** 1728 * ftrace_update_record, set a record that now is tracing or not 1729 * @rec: the record to update 1730 * @enable: set to 1 if the record is tracing, zero to force disable 1731 * 1732 * The records that represent all functions that can be traced need 1733 * to be updated when tracing has been enabled. 1734 */ 1735 int ftrace_update_record(struct dyn_ftrace *rec, int enable) 1736 { 1737 return ftrace_check_record(rec, enable, 1); 1738 } 1739 1740 /** 1741 * ftrace_test_record, check if the record has been enabled or not 1742 * @rec: the record to test 1743 * @enable: set to 1 to check if enabled, 0 if it is disabled 1744 * 1745 * The arch code may need to test if a record is already set to 1746 * tracing to determine how to modify the function code that it 1747 * represents. 1748 */ 1749 int ftrace_test_record(struct dyn_ftrace *rec, int enable) 1750 { 1751 return ftrace_check_record(rec, enable, 0); 1752 } 1753 1754 /** 1755 * ftrace_get_addr_new - Get the call address to set to 1756 * @rec: The ftrace record descriptor 1757 * 1758 * If the record has the FTRACE_FL_REGS set, that means that it 1759 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS 1760 * is not not set, then it wants to convert to the normal callback. 1761 * 1762 * Returns the address of the trampoline to set to 1763 */ 1764 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec) 1765 { 1766 if (rec->flags & FTRACE_FL_REGS) 1767 return (unsigned long)FTRACE_REGS_ADDR; 1768 else 1769 return (unsigned long)FTRACE_ADDR; 1770 } 1771 1772 /** 1773 * ftrace_get_addr_curr - Get the call address that is already there 1774 * @rec: The ftrace record descriptor 1775 * 1776 * The FTRACE_FL_REGS_EN is set when the record already points to 1777 * a function that saves all the regs. Basically the '_EN' version 1778 * represents the current state of the function. 1779 * 1780 * Returns the address of the trampoline that is currently being called 1781 */ 1782 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec) 1783 { 1784 if (rec->flags & FTRACE_FL_REGS_EN) 1785 return (unsigned long)FTRACE_REGS_ADDR; 1786 else 1787 return (unsigned long)FTRACE_ADDR; 1788 } 1789 1790 static int 1791 __ftrace_replace_code(struct dyn_ftrace *rec, int enable) 1792 { 1793 unsigned long ftrace_old_addr; 1794 unsigned long ftrace_addr; 1795 int ret; 1796 1797 ftrace_addr = ftrace_get_addr_new(rec); 1798 1799 /* This needs to be done before we call ftrace_update_record */ 1800 ftrace_old_addr = ftrace_get_addr_curr(rec); 1801 1802 ret = ftrace_update_record(rec, enable); 1803 1804 switch (ret) { 1805 case FTRACE_UPDATE_IGNORE: 1806 return 0; 1807 1808 case FTRACE_UPDATE_MAKE_CALL: 1809 return ftrace_make_call(rec, ftrace_addr); 1810 1811 case FTRACE_UPDATE_MAKE_NOP: 1812 return ftrace_make_nop(NULL, rec, ftrace_addr); 1813 1814 case FTRACE_UPDATE_MODIFY_CALL: 1815 return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr); 1816 } 1817 1818 return -1; /* unknow ftrace bug */ 1819 } 1820 1821 void __weak ftrace_replace_code(int enable) 1822 { 1823 struct dyn_ftrace *rec; 1824 struct ftrace_page *pg; 1825 int failed; 1826 1827 if (unlikely(ftrace_disabled)) 1828 return; 1829 1830 do_for_each_ftrace_rec(pg, rec) { 1831 failed = __ftrace_replace_code(rec, enable); 1832 if (failed) { 1833 ftrace_bug(failed, rec->ip); 1834 /* Stop processing */ 1835 return; 1836 } 1837 } while_for_each_ftrace_rec(); 1838 } 1839 1840 struct ftrace_rec_iter { 1841 struct ftrace_page *pg; 1842 int index; 1843 }; 1844 1845 /** 1846 * ftrace_rec_iter_start, start up iterating over traced functions 1847 * 1848 * Returns an iterator handle that is used to iterate over all 1849 * the records that represent address locations where functions 1850 * are traced. 1851 * 1852 * May return NULL if no records are available. 1853 */ 1854 struct ftrace_rec_iter *ftrace_rec_iter_start(void) 1855 { 1856 /* 1857 * We only use a single iterator. 1858 * Protected by the ftrace_lock mutex. 1859 */ 1860 static struct ftrace_rec_iter ftrace_rec_iter; 1861 struct ftrace_rec_iter *iter = &ftrace_rec_iter; 1862 1863 iter->pg = ftrace_pages_start; 1864 iter->index = 0; 1865 1866 /* Could have empty pages */ 1867 while (iter->pg && !iter->pg->index) 1868 iter->pg = iter->pg->next; 1869 1870 if (!iter->pg) 1871 return NULL; 1872 1873 return iter; 1874 } 1875 1876 /** 1877 * ftrace_rec_iter_next, get the next record to process. 1878 * @iter: The handle to the iterator. 1879 * 1880 * Returns the next iterator after the given iterator @iter. 1881 */ 1882 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter) 1883 { 1884 iter->index++; 1885 1886 if (iter->index >= iter->pg->index) { 1887 iter->pg = iter->pg->next; 1888 iter->index = 0; 1889 1890 /* Could have empty pages */ 1891 while (iter->pg && !iter->pg->index) 1892 iter->pg = iter->pg->next; 1893 } 1894 1895 if (!iter->pg) 1896 return NULL; 1897 1898 return iter; 1899 } 1900 1901 /** 1902 * ftrace_rec_iter_record, get the record at the iterator location 1903 * @iter: The current iterator location 1904 * 1905 * Returns the record that the current @iter is at. 1906 */ 1907 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter) 1908 { 1909 return &iter->pg->records[iter->index]; 1910 } 1911 1912 static int 1913 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec) 1914 { 1915 unsigned long ip; 1916 int ret; 1917 1918 ip = rec->ip; 1919 1920 if (unlikely(ftrace_disabled)) 1921 return 0; 1922 1923 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR); 1924 if (ret) { 1925 ftrace_bug(ret, ip); 1926 return 0; 1927 } 1928 return 1; 1929 } 1930 1931 /* 1932 * archs can override this function if they must do something 1933 * before the modifying code is performed. 1934 */ 1935 int __weak ftrace_arch_code_modify_prepare(void) 1936 { 1937 return 0; 1938 } 1939 1940 /* 1941 * archs can override this function if they must do something 1942 * after the modifying code is performed. 1943 */ 1944 int __weak ftrace_arch_code_modify_post_process(void) 1945 { 1946 return 0; 1947 } 1948 1949 void ftrace_modify_all_code(int command) 1950 { 1951 int update = command & FTRACE_UPDATE_TRACE_FUNC; 1952 int err = 0; 1953 1954 /* 1955 * If the ftrace_caller calls a ftrace_ops func directly, 1956 * we need to make sure that it only traces functions it 1957 * expects to trace. When doing the switch of functions, 1958 * we need to update to the ftrace_ops_list_func first 1959 * before the transition between old and new calls are set, 1960 * as the ftrace_ops_list_func will check the ops hashes 1961 * to make sure the ops are having the right functions 1962 * traced. 1963 */ 1964 if (update) { 1965 err = ftrace_update_ftrace_func(ftrace_ops_list_func); 1966 if (FTRACE_WARN_ON(err)) 1967 return; 1968 } 1969 1970 if (command & FTRACE_UPDATE_CALLS) 1971 ftrace_replace_code(1); 1972 else if (command & FTRACE_DISABLE_CALLS) 1973 ftrace_replace_code(0); 1974 1975 if (update && ftrace_trace_function != ftrace_ops_list_func) { 1976 function_trace_op = set_function_trace_op; 1977 smp_wmb(); 1978 /* If irqs are disabled, we are in stop machine */ 1979 if (!irqs_disabled()) 1980 smp_call_function(ftrace_sync_ipi, NULL, 1); 1981 err = ftrace_update_ftrace_func(ftrace_trace_function); 1982 if (FTRACE_WARN_ON(err)) 1983 return; 1984 } 1985 1986 if (command & FTRACE_START_FUNC_RET) 1987 err = ftrace_enable_ftrace_graph_caller(); 1988 else if (command & FTRACE_STOP_FUNC_RET) 1989 err = ftrace_disable_ftrace_graph_caller(); 1990 FTRACE_WARN_ON(err); 1991 } 1992 1993 static int __ftrace_modify_code(void *data) 1994 { 1995 int *command = data; 1996 1997 ftrace_modify_all_code(*command); 1998 1999 return 0; 2000 } 2001 2002 /** 2003 * ftrace_run_stop_machine, go back to the stop machine method 2004 * @command: The command to tell ftrace what to do 2005 * 2006 * If an arch needs to fall back to the stop machine method, the 2007 * it can call this function. 2008 */ 2009 void ftrace_run_stop_machine(int command) 2010 { 2011 stop_machine(__ftrace_modify_code, &command, NULL); 2012 } 2013 2014 /** 2015 * arch_ftrace_update_code, modify the code to trace or not trace 2016 * @command: The command that needs to be done 2017 * 2018 * Archs can override this function if it does not need to 2019 * run stop_machine() to modify code. 2020 */ 2021 void __weak arch_ftrace_update_code(int command) 2022 { 2023 ftrace_run_stop_machine(command); 2024 } 2025 2026 static void ftrace_run_update_code(int command) 2027 { 2028 int ret; 2029 2030 ret = ftrace_arch_code_modify_prepare(); 2031 FTRACE_WARN_ON(ret); 2032 if (ret) 2033 return; 2034 /* 2035 * Do not call function tracer while we update the code. 2036 * We are in stop machine. 2037 */ 2038 function_trace_stop++; 2039 2040 /* 2041 * By default we use stop_machine() to modify the code. 2042 * But archs can do what ever they want as long as it 2043 * is safe. The stop_machine() is the safest, but also 2044 * produces the most overhead. 2045 */ 2046 arch_ftrace_update_code(command); 2047 2048 function_trace_stop--; 2049 2050 ret = ftrace_arch_code_modify_post_process(); 2051 FTRACE_WARN_ON(ret); 2052 } 2053 2054 static ftrace_func_t saved_ftrace_func; 2055 static int ftrace_start_up; 2056 static int global_start_up; 2057 2058 static void control_ops_free(struct ftrace_ops *ops) 2059 { 2060 free_percpu(ops->disabled); 2061 } 2062 2063 static void ftrace_startup_enable(int command) 2064 { 2065 if (saved_ftrace_func != ftrace_trace_function) { 2066 saved_ftrace_func = ftrace_trace_function; 2067 command |= FTRACE_UPDATE_TRACE_FUNC; 2068 } 2069 2070 if (!command || !ftrace_enabled) 2071 return; 2072 2073 ftrace_run_update_code(command); 2074 } 2075 2076 static int ftrace_startup(struct ftrace_ops *ops, int command) 2077 { 2078 int ret; 2079 2080 if (unlikely(ftrace_disabled)) 2081 return -ENODEV; 2082 2083 ret = __register_ftrace_function(ops); 2084 if (ret) 2085 return ret; 2086 2087 ftrace_start_up++; 2088 command |= FTRACE_UPDATE_CALLS; 2089 2090 ops->flags |= FTRACE_OPS_FL_ENABLED; 2091 2092 ftrace_hash_rec_enable(ops, 1); 2093 2094 ftrace_startup_enable(command); 2095 2096 return 0; 2097 } 2098 2099 static int ftrace_shutdown(struct ftrace_ops *ops, int command) 2100 { 2101 int ret; 2102 2103 if (unlikely(ftrace_disabled)) 2104 return -ENODEV; 2105 2106 ret = __unregister_ftrace_function(ops); 2107 if (ret) 2108 return ret; 2109 2110 ftrace_start_up--; 2111 /* 2112 * Just warn in case of unbalance, no need to kill ftrace, it's not 2113 * critical but the ftrace_call callers may be never nopped again after 2114 * further ftrace uses. 2115 */ 2116 WARN_ON_ONCE(ftrace_start_up < 0); 2117 2118 ftrace_hash_rec_disable(ops, 1); 2119 2120 if (!global_start_up) 2121 ops->flags &= ~FTRACE_OPS_FL_ENABLED; 2122 2123 command |= FTRACE_UPDATE_CALLS; 2124 2125 if (saved_ftrace_func != ftrace_trace_function) { 2126 saved_ftrace_func = ftrace_trace_function; 2127 command |= FTRACE_UPDATE_TRACE_FUNC; 2128 } 2129 2130 if (!command || !ftrace_enabled) { 2131 /* 2132 * If these are control ops, they still need their 2133 * per_cpu field freed. Since, function tracing is 2134 * not currently active, we can just free them 2135 * without synchronizing all CPUs. 2136 */ 2137 if (ops->flags & FTRACE_OPS_FL_CONTROL) 2138 control_ops_free(ops); 2139 return 0; 2140 } 2141 2142 ftrace_run_update_code(command); 2143 2144 /* 2145 * Dynamic ops may be freed, we must make sure that all 2146 * callers are done before leaving this function. 2147 * The same goes for freeing the per_cpu data of the control 2148 * ops. 2149 * 2150 * Again, normal synchronize_sched() is not good enough. 2151 * We need to do a hard force of sched synchronization. 2152 * This is because we use preempt_disable() to do RCU, but 2153 * the function tracers can be called where RCU is not watching 2154 * (like before user_exit()). We can not rely on the RCU 2155 * infrastructure to do the synchronization, thus we must do it 2156 * ourselves. 2157 */ 2158 if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) { 2159 schedule_on_each_cpu(ftrace_sync); 2160 2161 if (ops->flags & FTRACE_OPS_FL_CONTROL) 2162 control_ops_free(ops); 2163 } 2164 2165 return 0; 2166 } 2167 2168 static void ftrace_startup_sysctl(void) 2169 { 2170 if (unlikely(ftrace_disabled)) 2171 return; 2172 2173 /* Force update next time */ 2174 saved_ftrace_func = NULL; 2175 /* ftrace_start_up is true if we want ftrace running */ 2176 if (ftrace_start_up) 2177 ftrace_run_update_code(FTRACE_UPDATE_CALLS); 2178 } 2179 2180 static void ftrace_shutdown_sysctl(void) 2181 { 2182 if (unlikely(ftrace_disabled)) 2183 return; 2184 2185 /* ftrace_start_up is true if ftrace is running */ 2186 if (ftrace_start_up) 2187 ftrace_run_update_code(FTRACE_DISABLE_CALLS); 2188 } 2189 2190 static cycle_t ftrace_update_time; 2191 unsigned long ftrace_update_tot_cnt; 2192 2193 static inline int ops_traces_mod(struct ftrace_ops *ops) 2194 { 2195 /* 2196 * Filter_hash being empty will default to trace module. 2197 * But notrace hash requires a test of individual module functions. 2198 */ 2199 return ftrace_hash_empty(ops->filter_hash) && 2200 ftrace_hash_empty(ops->notrace_hash); 2201 } 2202 2203 /* 2204 * Check if the current ops references the record. 2205 * 2206 * If the ops traces all functions, then it was already accounted for. 2207 * If the ops does not trace the current record function, skip it. 2208 * If the ops ignores the function via notrace filter, skip it. 2209 */ 2210 static inline bool 2211 ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec) 2212 { 2213 /* If ops isn't enabled, ignore it */ 2214 if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) 2215 return 0; 2216 2217 /* If ops traces all mods, we already accounted for it */ 2218 if (ops_traces_mod(ops)) 2219 return 0; 2220 2221 /* The function must be in the filter */ 2222 if (!ftrace_hash_empty(ops->filter_hash) && 2223 !ftrace_lookup_ip(ops->filter_hash, rec->ip)) 2224 return 0; 2225 2226 /* If in notrace hash, we ignore it too */ 2227 if (ftrace_lookup_ip(ops->notrace_hash, rec->ip)) 2228 return 0; 2229 2230 return 1; 2231 } 2232 2233 static int referenced_filters(struct dyn_ftrace *rec) 2234 { 2235 struct ftrace_ops *ops; 2236 int cnt = 0; 2237 2238 for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) { 2239 if (ops_references_rec(ops, rec)) 2240 cnt++; 2241 } 2242 2243 return cnt; 2244 } 2245 2246 static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs) 2247 { 2248 struct ftrace_page *pg; 2249 struct dyn_ftrace *p; 2250 cycle_t start, stop; 2251 unsigned long update_cnt = 0; 2252 unsigned long ref = 0; 2253 bool test = false; 2254 int i; 2255 2256 /* 2257 * When adding a module, we need to check if tracers are 2258 * currently enabled and if they are set to trace all functions. 2259 * If they are, we need to enable the module functions as well 2260 * as update the reference counts for those function records. 2261 */ 2262 if (mod) { 2263 struct ftrace_ops *ops; 2264 2265 for (ops = ftrace_ops_list; 2266 ops != &ftrace_list_end; ops = ops->next) { 2267 if (ops->flags & FTRACE_OPS_FL_ENABLED) { 2268 if (ops_traces_mod(ops)) 2269 ref++; 2270 else 2271 test = true; 2272 } 2273 } 2274 } 2275 2276 start = ftrace_now(raw_smp_processor_id()); 2277 2278 for (pg = new_pgs; pg; pg = pg->next) { 2279 2280 for (i = 0; i < pg->index; i++) { 2281 int cnt = ref; 2282 2283 /* If something went wrong, bail without enabling anything */ 2284 if (unlikely(ftrace_disabled)) 2285 return -1; 2286 2287 p = &pg->records[i]; 2288 if (test) 2289 cnt += referenced_filters(p); 2290 p->flags = cnt; 2291 2292 /* 2293 * Do the initial record conversion from mcount jump 2294 * to the NOP instructions. 2295 */ 2296 if (!ftrace_code_disable(mod, p)) 2297 break; 2298 2299 update_cnt++; 2300 2301 /* 2302 * If the tracing is enabled, go ahead and enable the record. 2303 * 2304 * The reason not to enable the record immediatelly is the 2305 * inherent check of ftrace_make_nop/ftrace_make_call for 2306 * correct previous instructions. Making first the NOP 2307 * conversion puts the module to the correct state, thus 2308 * passing the ftrace_make_call check. 2309 */ 2310 if (ftrace_start_up && cnt) { 2311 int failed = __ftrace_replace_code(p, 1); 2312 if (failed) 2313 ftrace_bug(failed, p->ip); 2314 } 2315 } 2316 } 2317 2318 stop = ftrace_now(raw_smp_processor_id()); 2319 ftrace_update_time = stop - start; 2320 ftrace_update_tot_cnt += update_cnt; 2321 2322 return 0; 2323 } 2324 2325 static int ftrace_allocate_records(struct ftrace_page *pg, int count) 2326 { 2327 int order; 2328 int cnt; 2329 2330 if (WARN_ON(!count)) 2331 return -EINVAL; 2332 2333 order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE)); 2334 2335 /* 2336 * We want to fill as much as possible. No more than a page 2337 * may be empty. 2338 */ 2339 while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE) 2340 order--; 2341 2342 again: 2343 pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); 2344 2345 if (!pg->records) { 2346 /* if we can't allocate this size, try something smaller */ 2347 if (!order) 2348 return -ENOMEM; 2349 order >>= 1; 2350 goto again; 2351 } 2352 2353 cnt = (PAGE_SIZE << order) / ENTRY_SIZE; 2354 pg->size = cnt; 2355 2356 if (cnt > count) 2357 cnt = count; 2358 2359 return cnt; 2360 } 2361 2362 static struct ftrace_page * 2363 ftrace_allocate_pages(unsigned long num_to_init) 2364 { 2365 struct ftrace_page *start_pg; 2366 struct ftrace_page *pg; 2367 int order; 2368 int cnt; 2369 2370 if (!num_to_init) 2371 return 0; 2372 2373 start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL); 2374 if (!pg) 2375 return NULL; 2376 2377 /* 2378 * Try to allocate as much as possible in one continues 2379 * location that fills in all of the space. We want to 2380 * waste as little space as possible. 2381 */ 2382 for (;;) { 2383 cnt = ftrace_allocate_records(pg, num_to_init); 2384 if (cnt < 0) 2385 goto free_pages; 2386 2387 num_to_init -= cnt; 2388 if (!num_to_init) 2389 break; 2390 2391 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL); 2392 if (!pg->next) 2393 goto free_pages; 2394 2395 pg = pg->next; 2396 } 2397 2398 return start_pg; 2399 2400 free_pages: 2401 while (start_pg) { 2402 order = get_count_order(pg->size / ENTRIES_PER_PAGE); 2403 free_pages((unsigned long)pg->records, order); 2404 start_pg = pg->next; 2405 kfree(pg); 2406 pg = start_pg; 2407 } 2408 pr_info("ftrace: FAILED to allocate memory for functions\n"); 2409 return NULL; 2410 } 2411 2412 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ 2413 2414 struct ftrace_iterator { 2415 loff_t pos; 2416 loff_t func_pos; 2417 struct ftrace_page *pg; 2418 struct dyn_ftrace *func; 2419 struct ftrace_func_probe *probe; 2420 struct trace_parser parser; 2421 struct ftrace_hash *hash; 2422 struct ftrace_ops *ops; 2423 int hidx; 2424 int idx; 2425 unsigned flags; 2426 }; 2427 2428 static void * 2429 t_hash_next(struct seq_file *m, loff_t *pos) 2430 { 2431 struct ftrace_iterator *iter = m->private; 2432 struct hlist_node *hnd = NULL; 2433 struct hlist_head *hhd; 2434 2435 (*pos)++; 2436 iter->pos = *pos; 2437 2438 if (iter->probe) 2439 hnd = &iter->probe->node; 2440 retry: 2441 if (iter->hidx >= FTRACE_FUNC_HASHSIZE) 2442 return NULL; 2443 2444 hhd = &ftrace_func_hash[iter->hidx]; 2445 2446 if (hlist_empty(hhd)) { 2447 iter->hidx++; 2448 hnd = NULL; 2449 goto retry; 2450 } 2451 2452 if (!hnd) 2453 hnd = hhd->first; 2454 else { 2455 hnd = hnd->next; 2456 if (!hnd) { 2457 iter->hidx++; 2458 goto retry; 2459 } 2460 } 2461 2462 if (WARN_ON_ONCE(!hnd)) 2463 return NULL; 2464 2465 iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node); 2466 2467 return iter; 2468 } 2469 2470 static void *t_hash_start(struct seq_file *m, loff_t *pos) 2471 { 2472 struct ftrace_iterator *iter = m->private; 2473 void *p = NULL; 2474 loff_t l; 2475 2476 if (!(iter->flags & FTRACE_ITER_DO_HASH)) 2477 return NULL; 2478 2479 if (iter->func_pos > *pos) 2480 return NULL; 2481 2482 iter->hidx = 0; 2483 for (l = 0; l <= (*pos - iter->func_pos); ) { 2484 p = t_hash_next(m, &l); 2485 if (!p) 2486 break; 2487 } 2488 if (!p) 2489 return NULL; 2490 2491 /* Only set this if we have an item */ 2492 iter->flags |= FTRACE_ITER_HASH; 2493 2494 return iter; 2495 } 2496 2497 static int 2498 t_hash_show(struct seq_file *m, struct ftrace_iterator *iter) 2499 { 2500 struct ftrace_func_probe *rec; 2501 2502 rec = iter->probe; 2503 if (WARN_ON_ONCE(!rec)) 2504 return -EIO; 2505 2506 if (rec->ops->print) 2507 return rec->ops->print(m, rec->ip, rec->ops, rec->data); 2508 2509 seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func); 2510 2511 if (rec->data) 2512 seq_printf(m, ":%p", rec->data); 2513 seq_putc(m, '\n'); 2514 2515 return 0; 2516 } 2517 2518 static void * 2519 t_next(struct seq_file *m, void *v, loff_t *pos) 2520 { 2521 struct ftrace_iterator *iter = m->private; 2522 struct ftrace_ops *ops = iter->ops; 2523 struct dyn_ftrace *rec = NULL; 2524 2525 if (unlikely(ftrace_disabled)) 2526 return NULL; 2527 2528 if (iter->flags & FTRACE_ITER_HASH) 2529 return t_hash_next(m, pos); 2530 2531 (*pos)++; 2532 iter->pos = iter->func_pos = *pos; 2533 2534 if (iter->flags & FTRACE_ITER_PRINTALL) 2535 return t_hash_start(m, pos); 2536 2537 retry: 2538 if (iter->idx >= iter->pg->index) { 2539 if (iter->pg->next) { 2540 iter->pg = iter->pg->next; 2541 iter->idx = 0; 2542 goto retry; 2543 } 2544 } else { 2545 rec = &iter->pg->records[iter->idx++]; 2546 if (((iter->flags & FTRACE_ITER_FILTER) && 2547 !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) || 2548 2549 ((iter->flags & FTRACE_ITER_NOTRACE) && 2550 !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) || 2551 2552 ((iter->flags & FTRACE_ITER_ENABLED) && 2553 !(rec->flags & FTRACE_FL_ENABLED))) { 2554 2555 rec = NULL; 2556 goto retry; 2557 } 2558 } 2559 2560 if (!rec) 2561 return t_hash_start(m, pos); 2562 2563 iter->func = rec; 2564 2565 return iter; 2566 } 2567 2568 static void reset_iter_read(struct ftrace_iterator *iter) 2569 { 2570 iter->pos = 0; 2571 iter->func_pos = 0; 2572 iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH); 2573 } 2574 2575 static void *t_start(struct seq_file *m, loff_t *pos) 2576 { 2577 struct ftrace_iterator *iter = m->private; 2578 struct ftrace_ops *ops = iter->ops; 2579 void *p = NULL; 2580 loff_t l; 2581 2582 mutex_lock(&ftrace_lock); 2583 2584 if (unlikely(ftrace_disabled)) 2585 return NULL; 2586 2587 /* 2588 * If an lseek was done, then reset and start from beginning. 2589 */ 2590 if (*pos < iter->pos) 2591 reset_iter_read(iter); 2592 2593 /* 2594 * For set_ftrace_filter reading, if we have the filter 2595 * off, we can short cut and just print out that all 2596 * functions are enabled. 2597 */ 2598 if (iter->flags & FTRACE_ITER_FILTER && 2599 ftrace_hash_empty(ops->filter_hash)) { 2600 if (*pos > 0) 2601 return t_hash_start(m, pos); 2602 iter->flags |= FTRACE_ITER_PRINTALL; 2603 /* reset in case of seek/pread */ 2604 iter->flags &= ~FTRACE_ITER_HASH; 2605 return iter; 2606 } 2607 2608 if (iter->flags & FTRACE_ITER_HASH) 2609 return t_hash_start(m, pos); 2610 2611 /* 2612 * Unfortunately, we need to restart at ftrace_pages_start 2613 * every time we let go of the ftrace_mutex. This is because 2614 * those pointers can change without the lock. 2615 */ 2616 iter->pg = ftrace_pages_start; 2617 iter->idx = 0; 2618 for (l = 0; l <= *pos; ) { 2619 p = t_next(m, p, &l); 2620 if (!p) 2621 break; 2622 } 2623 2624 if (!p) 2625 return t_hash_start(m, pos); 2626 2627 return iter; 2628 } 2629 2630 static void t_stop(struct seq_file *m, void *p) 2631 { 2632 mutex_unlock(&ftrace_lock); 2633 } 2634 2635 static int t_show(struct seq_file *m, void *v) 2636 { 2637 struct ftrace_iterator *iter = m->private; 2638 struct dyn_ftrace *rec; 2639 2640 if (iter->flags & FTRACE_ITER_HASH) 2641 return t_hash_show(m, iter); 2642 2643 if (iter->flags & FTRACE_ITER_PRINTALL) { 2644 seq_printf(m, "#### all functions enabled ####\n"); 2645 return 0; 2646 } 2647 2648 rec = iter->func; 2649 2650 if (!rec) 2651 return 0; 2652 2653 seq_printf(m, "%ps", (void *)rec->ip); 2654 if (iter->flags & FTRACE_ITER_ENABLED) 2655 seq_printf(m, " (%ld)%s", 2656 rec->flags & ~FTRACE_FL_MASK, 2657 rec->flags & FTRACE_FL_REGS ? " R" : ""); 2658 seq_printf(m, "\n"); 2659 2660 return 0; 2661 } 2662 2663 static const struct seq_operations show_ftrace_seq_ops = { 2664 .start = t_start, 2665 .next = t_next, 2666 .stop = t_stop, 2667 .show = t_show, 2668 }; 2669 2670 static int 2671 ftrace_avail_open(struct inode *inode, struct file *file) 2672 { 2673 struct ftrace_iterator *iter; 2674 2675 if (unlikely(ftrace_disabled)) 2676 return -ENODEV; 2677 2678 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); 2679 if (iter) { 2680 iter->pg = ftrace_pages_start; 2681 iter->ops = &global_ops; 2682 } 2683 2684 return iter ? 0 : -ENOMEM; 2685 } 2686 2687 static int 2688 ftrace_enabled_open(struct inode *inode, struct file *file) 2689 { 2690 struct ftrace_iterator *iter; 2691 2692 if (unlikely(ftrace_disabled)) 2693 return -ENODEV; 2694 2695 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); 2696 if (iter) { 2697 iter->pg = ftrace_pages_start; 2698 iter->flags = FTRACE_ITER_ENABLED; 2699 iter->ops = &global_ops; 2700 } 2701 2702 return iter ? 0 : -ENOMEM; 2703 } 2704 2705 static void ftrace_filter_reset(struct ftrace_hash *hash) 2706 { 2707 mutex_lock(&ftrace_lock); 2708 ftrace_hash_clear(hash); 2709 mutex_unlock(&ftrace_lock); 2710 } 2711 2712 /** 2713 * ftrace_regex_open - initialize function tracer filter files 2714 * @ops: The ftrace_ops that hold the hash filters 2715 * @flag: The type of filter to process 2716 * @inode: The inode, usually passed in to your open routine 2717 * @file: The file, usually passed in to your open routine 2718 * 2719 * ftrace_regex_open() initializes the filter files for the 2720 * @ops. Depending on @flag it may process the filter hash or 2721 * the notrace hash of @ops. With this called from the open 2722 * routine, you can use ftrace_filter_write() for the write 2723 * routine if @flag has FTRACE_ITER_FILTER set, or 2724 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set. 2725 * tracing_lseek() should be used as the lseek routine, and 2726 * release must call ftrace_regex_release(). 2727 */ 2728 int 2729 ftrace_regex_open(struct ftrace_ops *ops, int flag, 2730 struct inode *inode, struct file *file) 2731 { 2732 struct ftrace_iterator *iter; 2733 struct ftrace_hash *hash; 2734 int ret = 0; 2735 2736 ftrace_ops_init(ops); 2737 2738 if (unlikely(ftrace_disabled)) 2739 return -ENODEV; 2740 2741 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 2742 if (!iter) 2743 return -ENOMEM; 2744 2745 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) { 2746 kfree(iter); 2747 return -ENOMEM; 2748 } 2749 2750 iter->ops = ops; 2751 iter->flags = flag; 2752 2753 mutex_lock(&ops->regex_lock); 2754 2755 if (flag & FTRACE_ITER_NOTRACE) 2756 hash = ops->notrace_hash; 2757 else 2758 hash = ops->filter_hash; 2759 2760 if (file->f_mode & FMODE_WRITE) { 2761 iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash); 2762 if (!iter->hash) { 2763 trace_parser_put(&iter->parser); 2764 kfree(iter); 2765 ret = -ENOMEM; 2766 goto out_unlock; 2767 } 2768 } 2769 2770 if ((file->f_mode & FMODE_WRITE) && 2771 (file->f_flags & O_TRUNC)) 2772 ftrace_filter_reset(iter->hash); 2773 2774 if (file->f_mode & FMODE_READ) { 2775 iter->pg = ftrace_pages_start; 2776 2777 ret = seq_open(file, &show_ftrace_seq_ops); 2778 if (!ret) { 2779 struct seq_file *m = file->private_data; 2780 m->private = iter; 2781 } else { 2782 /* Failed */ 2783 free_ftrace_hash(iter->hash); 2784 trace_parser_put(&iter->parser); 2785 kfree(iter); 2786 } 2787 } else 2788 file->private_data = iter; 2789 2790 out_unlock: 2791 mutex_unlock(&ops->regex_lock); 2792 2793 return ret; 2794 } 2795 2796 static int 2797 ftrace_filter_open(struct inode *inode, struct file *file) 2798 { 2799 struct ftrace_ops *ops = inode->i_private; 2800 2801 return ftrace_regex_open(ops, 2802 FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH, 2803 inode, file); 2804 } 2805 2806 static int 2807 ftrace_notrace_open(struct inode *inode, struct file *file) 2808 { 2809 struct ftrace_ops *ops = inode->i_private; 2810 2811 return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE, 2812 inode, file); 2813 } 2814 2815 static int ftrace_match(char *str, char *regex, int len, int type) 2816 { 2817 int matched = 0; 2818 int slen; 2819 2820 switch (type) { 2821 case MATCH_FULL: 2822 if (strcmp(str, regex) == 0) 2823 matched = 1; 2824 break; 2825 case MATCH_FRONT_ONLY: 2826 if (strncmp(str, regex, len) == 0) 2827 matched = 1; 2828 break; 2829 case MATCH_MIDDLE_ONLY: 2830 if (strstr(str, regex)) 2831 matched = 1; 2832 break; 2833 case MATCH_END_ONLY: 2834 slen = strlen(str); 2835 if (slen >= len && memcmp(str + slen - len, regex, len) == 0) 2836 matched = 1; 2837 break; 2838 } 2839 2840 return matched; 2841 } 2842 2843 static int 2844 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not) 2845 { 2846 struct ftrace_func_entry *entry; 2847 int ret = 0; 2848 2849 entry = ftrace_lookup_ip(hash, rec->ip); 2850 if (not) { 2851 /* Do nothing if it doesn't exist */ 2852 if (!entry) 2853 return 0; 2854 2855 free_hash_entry(hash, entry); 2856 } else { 2857 /* Do nothing if it exists */ 2858 if (entry) 2859 return 0; 2860 2861 ret = add_hash_entry(hash, rec->ip); 2862 } 2863 return ret; 2864 } 2865 2866 static int 2867 ftrace_match_record(struct dyn_ftrace *rec, char *mod, 2868 char *regex, int len, int type) 2869 { 2870 char str[KSYM_SYMBOL_LEN]; 2871 char *modname; 2872 2873 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str); 2874 2875 if (mod) { 2876 /* module lookup requires matching the module */ 2877 if (!modname || strcmp(modname, mod)) 2878 return 0; 2879 2880 /* blank search means to match all funcs in the mod */ 2881 if (!len) 2882 return 1; 2883 } 2884 2885 return ftrace_match(str, regex, len, type); 2886 } 2887 2888 static int 2889 match_records(struct ftrace_hash *hash, char *buff, 2890 int len, char *mod, int not) 2891 { 2892 unsigned search_len = 0; 2893 struct ftrace_page *pg; 2894 struct dyn_ftrace *rec; 2895 int type = MATCH_FULL; 2896 char *search = buff; 2897 int found = 0; 2898 int ret; 2899 2900 if (len) { 2901 type = filter_parse_regex(buff, len, &search, ¬); 2902 search_len = strlen(search); 2903 } 2904 2905 mutex_lock(&ftrace_lock); 2906 2907 if (unlikely(ftrace_disabled)) 2908 goto out_unlock; 2909 2910 do_for_each_ftrace_rec(pg, rec) { 2911 if (ftrace_match_record(rec, mod, search, search_len, type)) { 2912 ret = enter_record(hash, rec, not); 2913 if (ret < 0) { 2914 found = ret; 2915 goto out_unlock; 2916 } 2917 found = 1; 2918 } 2919 } while_for_each_ftrace_rec(); 2920 out_unlock: 2921 mutex_unlock(&ftrace_lock); 2922 2923 return found; 2924 } 2925 2926 static int 2927 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len) 2928 { 2929 return match_records(hash, buff, len, NULL, 0); 2930 } 2931 2932 static int 2933 ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod) 2934 { 2935 int not = 0; 2936 2937 /* blank or '*' mean the same */ 2938 if (strcmp(buff, "*") == 0) 2939 buff[0] = 0; 2940 2941 /* handle the case of 'dont filter this module' */ 2942 if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) { 2943 buff[0] = 0; 2944 not = 1; 2945 } 2946 2947 return match_records(hash, buff, strlen(buff), mod, not); 2948 } 2949 2950 /* 2951 * We register the module command as a template to show others how 2952 * to register the a command as well. 2953 */ 2954 2955 static int 2956 ftrace_mod_callback(struct ftrace_hash *hash, 2957 char *func, char *cmd, char *param, int enable) 2958 { 2959 char *mod; 2960 int ret = -EINVAL; 2961 2962 /* 2963 * cmd == 'mod' because we only registered this func 2964 * for the 'mod' ftrace_func_command. 2965 * But if you register one func with multiple commands, 2966 * you can tell which command was used by the cmd 2967 * parameter. 2968 */ 2969 2970 /* we must have a module name */ 2971 if (!param) 2972 return ret; 2973 2974 mod = strsep(¶m, ":"); 2975 if (!strlen(mod)) 2976 return ret; 2977 2978 ret = ftrace_match_module_records(hash, func, mod); 2979 if (!ret) 2980 ret = -EINVAL; 2981 if (ret < 0) 2982 return ret; 2983 2984 return 0; 2985 } 2986 2987 static struct ftrace_func_command ftrace_mod_cmd = { 2988 .name = "mod", 2989 .func = ftrace_mod_callback, 2990 }; 2991 2992 static int __init ftrace_mod_cmd_init(void) 2993 { 2994 return register_ftrace_command(&ftrace_mod_cmd); 2995 } 2996 core_initcall(ftrace_mod_cmd_init); 2997 2998 static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip, 2999 struct ftrace_ops *op, struct pt_regs *pt_regs) 3000 { 3001 struct ftrace_func_probe *entry; 3002 struct hlist_head *hhd; 3003 unsigned long key; 3004 3005 key = hash_long(ip, FTRACE_HASH_BITS); 3006 3007 hhd = &ftrace_func_hash[key]; 3008 3009 if (hlist_empty(hhd)) 3010 return; 3011 3012 /* 3013 * Disable preemption for these calls to prevent a RCU grace 3014 * period. This syncs the hash iteration and freeing of items 3015 * on the hash. rcu_read_lock is too dangerous here. 3016 */ 3017 preempt_disable_notrace(); 3018 hlist_for_each_entry_rcu_notrace(entry, hhd, node) { 3019 if (entry->ip == ip) 3020 entry->ops->func(ip, parent_ip, &entry->data); 3021 } 3022 preempt_enable_notrace(); 3023 } 3024 3025 static struct ftrace_ops trace_probe_ops __read_mostly = 3026 { 3027 .func = function_trace_probe_call, 3028 .flags = FTRACE_OPS_FL_INITIALIZED, 3029 INIT_REGEX_LOCK(trace_probe_ops) 3030 }; 3031 3032 static int ftrace_probe_registered; 3033 3034 static void __enable_ftrace_function_probe(void) 3035 { 3036 int ret; 3037 int i; 3038 3039 if (ftrace_probe_registered) { 3040 /* still need to update the function call sites */ 3041 if (ftrace_enabled) 3042 ftrace_run_update_code(FTRACE_UPDATE_CALLS); 3043 return; 3044 } 3045 3046 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { 3047 struct hlist_head *hhd = &ftrace_func_hash[i]; 3048 if (hhd->first) 3049 break; 3050 } 3051 /* Nothing registered? */ 3052 if (i == FTRACE_FUNC_HASHSIZE) 3053 return; 3054 3055 ret = ftrace_startup(&trace_probe_ops, 0); 3056 3057 ftrace_probe_registered = 1; 3058 } 3059 3060 static void __disable_ftrace_function_probe(void) 3061 { 3062 int i; 3063 3064 if (!ftrace_probe_registered) 3065 return; 3066 3067 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { 3068 struct hlist_head *hhd = &ftrace_func_hash[i]; 3069 if (hhd->first) 3070 return; 3071 } 3072 3073 /* no more funcs left */ 3074 ftrace_shutdown(&trace_probe_ops, 0); 3075 3076 ftrace_probe_registered = 0; 3077 } 3078 3079 3080 static void ftrace_free_entry(struct ftrace_func_probe *entry) 3081 { 3082 if (entry->ops->free) 3083 entry->ops->free(entry->ops, entry->ip, &entry->data); 3084 kfree(entry); 3085 } 3086 3087 int 3088 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, 3089 void *data) 3090 { 3091 struct ftrace_func_probe *entry; 3092 struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash; 3093 struct ftrace_hash *hash; 3094 struct ftrace_page *pg; 3095 struct dyn_ftrace *rec; 3096 int type, len, not; 3097 unsigned long key; 3098 int count = 0; 3099 char *search; 3100 int ret; 3101 3102 type = filter_parse_regex(glob, strlen(glob), &search, ¬); 3103 len = strlen(search); 3104 3105 /* we do not support '!' for function probes */ 3106 if (WARN_ON(not)) 3107 return -EINVAL; 3108 3109 mutex_lock(&trace_probe_ops.regex_lock); 3110 3111 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); 3112 if (!hash) { 3113 count = -ENOMEM; 3114 goto out; 3115 } 3116 3117 if (unlikely(ftrace_disabled)) { 3118 count = -ENODEV; 3119 goto out; 3120 } 3121 3122 mutex_lock(&ftrace_lock); 3123 3124 do_for_each_ftrace_rec(pg, rec) { 3125 3126 if (!ftrace_match_record(rec, NULL, search, len, type)) 3127 continue; 3128 3129 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 3130 if (!entry) { 3131 /* If we did not process any, then return error */ 3132 if (!count) 3133 count = -ENOMEM; 3134 goto out_unlock; 3135 } 3136 3137 count++; 3138 3139 entry->data = data; 3140 3141 /* 3142 * The caller might want to do something special 3143 * for each function we find. We call the callback 3144 * to give the caller an opportunity to do so. 3145 */ 3146 if (ops->init) { 3147 if (ops->init(ops, rec->ip, &entry->data) < 0) { 3148 /* caller does not like this func */ 3149 kfree(entry); 3150 continue; 3151 } 3152 } 3153 3154 ret = enter_record(hash, rec, 0); 3155 if (ret < 0) { 3156 kfree(entry); 3157 count = ret; 3158 goto out_unlock; 3159 } 3160 3161 entry->ops = ops; 3162 entry->ip = rec->ip; 3163 3164 key = hash_long(entry->ip, FTRACE_HASH_BITS); 3165 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]); 3166 3167 } while_for_each_ftrace_rec(); 3168 3169 ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); 3170 if (ret < 0) 3171 count = ret; 3172 3173 __enable_ftrace_function_probe(); 3174 3175 out_unlock: 3176 mutex_unlock(&ftrace_lock); 3177 out: 3178 mutex_unlock(&trace_probe_ops.regex_lock); 3179 free_ftrace_hash(hash); 3180 3181 return count; 3182 } 3183 3184 enum { 3185 PROBE_TEST_FUNC = 1, 3186 PROBE_TEST_DATA = 2 3187 }; 3188 3189 static void 3190 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, 3191 void *data, int flags) 3192 { 3193 struct ftrace_func_entry *rec_entry; 3194 struct ftrace_func_probe *entry; 3195 struct ftrace_func_probe *p; 3196 struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash; 3197 struct list_head free_list; 3198 struct ftrace_hash *hash; 3199 struct hlist_node *tmp; 3200 char str[KSYM_SYMBOL_LEN]; 3201 int type = MATCH_FULL; 3202 int i, len = 0; 3203 char *search; 3204 3205 if (glob && (strcmp(glob, "*") == 0 || !strlen(glob))) 3206 glob = NULL; 3207 else if (glob) { 3208 int not; 3209 3210 type = filter_parse_regex(glob, strlen(glob), &search, ¬); 3211 len = strlen(search); 3212 3213 /* we do not support '!' for function probes */ 3214 if (WARN_ON(not)) 3215 return; 3216 } 3217 3218 mutex_lock(&trace_probe_ops.regex_lock); 3219 3220 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); 3221 if (!hash) 3222 /* Hmm, should report this somehow */ 3223 goto out_unlock; 3224 3225 INIT_LIST_HEAD(&free_list); 3226 3227 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { 3228 struct hlist_head *hhd = &ftrace_func_hash[i]; 3229 3230 hlist_for_each_entry_safe(entry, tmp, hhd, node) { 3231 3232 /* break up if statements for readability */ 3233 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops) 3234 continue; 3235 3236 if ((flags & PROBE_TEST_DATA) && entry->data != data) 3237 continue; 3238 3239 /* do this last, since it is the most expensive */ 3240 if (glob) { 3241 kallsyms_lookup(entry->ip, NULL, NULL, 3242 NULL, str); 3243 if (!ftrace_match(str, glob, len, type)) 3244 continue; 3245 } 3246 3247 rec_entry = ftrace_lookup_ip(hash, entry->ip); 3248 /* It is possible more than one entry had this ip */ 3249 if (rec_entry) 3250 free_hash_entry(hash, rec_entry); 3251 3252 hlist_del_rcu(&entry->node); 3253 list_add(&entry->free_list, &free_list); 3254 } 3255 } 3256 mutex_lock(&ftrace_lock); 3257 __disable_ftrace_function_probe(); 3258 /* 3259 * Remove after the disable is called. Otherwise, if the last 3260 * probe is removed, a null hash means *all enabled*. 3261 */ 3262 ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); 3263 synchronize_sched(); 3264 list_for_each_entry_safe(entry, p, &free_list, free_list) { 3265 list_del(&entry->free_list); 3266 ftrace_free_entry(entry); 3267 } 3268 mutex_unlock(&ftrace_lock); 3269 3270 out_unlock: 3271 mutex_unlock(&trace_probe_ops.regex_lock); 3272 free_ftrace_hash(hash); 3273 } 3274 3275 void 3276 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, 3277 void *data) 3278 { 3279 __unregister_ftrace_function_probe(glob, ops, data, 3280 PROBE_TEST_FUNC | PROBE_TEST_DATA); 3281 } 3282 3283 void 3284 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops) 3285 { 3286 __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC); 3287 } 3288 3289 void unregister_ftrace_function_probe_all(char *glob) 3290 { 3291 __unregister_ftrace_function_probe(glob, NULL, NULL, 0); 3292 } 3293 3294 static LIST_HEAD(ftrace_commands); 3295 static DEFINE_MUTEX(ftrace_cmd_mutex); 3296 3297 /* 3298 * Currently we only register ftrace commands from __init, so mark this 3299 * __init too. 3300 */ 3301 __init int register_ftrace_command(struct ftrace_func_command *cmd) 3302 { 3303 struct ftrace_func_command *p; 3304 int ret = 0; 3305 3306 mutex_lock(&ftrace_cmd_mutex); 3307 list_for_each_entry(p, &ftrace_commands, list) { 3308 if (strcmp(cmd->name, p->name) == 0) { 3309 ret = -EBUSY; 3310 goto out_unlock; 3311 } 3312 } 3313 list_add(&cmd->list, &ftrace_commands); 3314 out_unlock: 3315 mutex_unlock(&ftrace_cmd_mutex); 3316 3317 return ret; 3318 } 3319 3320 /* 3321 * Currently we only unregister ftrace commands from __init, so mark 3322 * this __init too. 3323 */ 3324 __init int unregister_ftrace_command(struct ftrace_func_command *cmd) 3325 { 3326 struct ftrace_func_command *p, *n; 3327 int ret = -ENODEV; 3328 3329 mutex_lock(&ftrace_cmd_mutex); 3330 list_for_each_entry_safe(p, n, &ftrace_commands, list) { 3331 if (strcmp(cmd->name, p->name) == 0) { 3332 ret = 0; 3333 list_del_init(&p->list); 3334 goto out_unlock; 3335 } 3336 } 3337 out_unlock: 3338 mutex_unlock(&ftrace_cmd_mutex); 3339 3340 return ret; 3341 } 3342 3343 static int ftrace_process_regex(struct ftrace_hash *hash, 3344 char *buff, int len, int enable) 3345 { 3346 char *func, *command, *next = buff; 3347 struct ftrace_func_command *p; 3348 int ret = -EINVAL; 3349 3350 func = strsep(&next, ":"); 3351 3352 if (!next) { 3353 ret = ftrace_match_records(hash, func, len); 3354 if (!ret) 3355 ret = -EINVAL; 3356 if (ret < 0) 3357 return ret; 3358 return 0; 3359 } 3360 3361 /* command found */ 3362 3363 command = strsep(&next, ":"); 3364 3365 mutex_lock(&ftrace_cmd_mutex); 3366 list_for_each_entry(p, &ftrace_commands, list) { 3367 if (strcmp(p->name, command) == 0) { 3368 ret = p->func(hash, func, command, next, enable); 3369 goto out_unlock; 3370 } 3371 } 3372 out_unlock: 3373 mutex_unlock(&ftrace_cmd_mutex); 3374 3375 return ret; 3376 } 3377 3378 static ssize_t 3379 ftrace_regex_write(struct file *file, const char __user *ubuf, 3380 size_t cnt, loff_t *ppos, int enable) 3381 { 3382 struct ftrace_iterator *iter; 3383 struct trace_parser *parser; 3384 ssize_t ret, read; 3385 3386 if (!cnt) 3387 return 0; 3388 3389 if (file->f_mode & FMODE_READ) { 3390 struct seq_file *m = file->private_data; 3391 iter = m->private; 3392 } else 3393 iter = file->private_data; 3394 3395 if (unlikely(ftrace_disabled)) 3396 return -ENODEV; 3397 3398 /* iter->hash is a local copy, so we don't need regex_lock */ 3399 3400 parser = &iter->parser; 3401 read = trace_get_user(parser, ubuf, cnt, ppos); 3402 3403 if (read >= 0 && trace_parser_loaded(parser) && 3404 !trace_parser_cont(parser)) { 3405 ret = ftrace_process_regex(iter->hash, parser->buffer, 3406 parser->idx, enable); 3407 trace_parser_clear(parser); 3408 if (ret < 0) 3409 goto out; 3410 } 3411 3412 ret = read; 3413 out: 3414 return ret; 3415 } 3416 3417 ssize_t 3418 ftrace_filter_write(struct file *file, const char __user *ubuf, 3419 size_t cnt, loff_t *ppos) 3420 { 3421 return ftrace_regex_write(file, ubuf, cnt, ppos, 1); 3422 } 3423 3424 ssize_t 3425 ftrace_notrace_write(struct file *file, const char __user *ubuf, 3426 size_t cnt, loff_t *ppos) 3427 { 3428 return ftrace_regex_write(file, ubuf, cnt, ppos, 0); 3429 } 3430 3431 static int 3432 ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove) 3433 { 3434 struct ftrace_func_entry *entry; 3435 3436 if (!ftrace_location(ip)) 3437 return -EINVAL; 3438 3439 if (remove) { 3440 entry = ftrace_lookup_ip(hash, ip); 3441 if (!entry) 3442 return -ENOENT; 3443 free_hash_entry(hash, entry); 3444 return 0; 3445 } 3446 3447 return add_hash_entry(hash, ip); 3448 } 3449 3450 static void ftrace_ops_update_code(struct ftrace_ops *ops) 3451 { 3452 if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled) 3453 ftrace_run_update_code(FTRACE_UPDATE_CALLS); 3454 } 3455 3456 static int 3457 ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, 3458 unsigned long ip, int remove, int reset, int enable) 3459 { 3460 struct ftrace_hash **orig_hash; 3461 struct ftrace_hash *hash; 3462 int ret; 3463 3464 if (unlikely(ftrace_disabled)) 3465 return -ENODEV; 3466 3467 mutex_lock(&ops->regex_lock); 3468 3469 if (enable) 3470 orig_hash = &ops->filter_hash; 3471 else 3472 orig_hash = &ops->notrace_hash; 3473 3474 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); 3475 if (!hash) { 3476 ret = -ENOMEM; 3477 goto out_regex_unlock; 3478 } 3479 3480 if (reset) 3481 ftrace_filter_reset(hash); 3482 if (buf && !ftrace_match_records(hash, buf, len)) { 3483 ret = -EINVAL; 3484 goto out_regex_unlock; 3485 } 3486 if (ip) { 3487 ret = ftrace_match_addr(hash, ip, remove); 3488 if (ret < 0) 3489 goto out_regex_unlock; 3490 } 3491 3492 mutex_lock(&ftrace_lock); 3493 ret = ftrace_hash_move(ops, enable, orig_hash, hash); 3494 if (!ret) 3495 ftrace_ops_update_code(ops); 3496 3497 mutex_unlock(&ftrace_lock); 3498 3499 out_regex_unlock: 3500 mutex_unlock(&ops->regex_lock); 3501 3502 free_ftrace_hash(hash); 3503 return ret; 3504 } 3505 3506 static int 3507 ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove, 3508 int reset, int enable) 3509 { 3510 return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable); 3511 } 3512 3513 /** 3514 * ftrace_set_filter_ip - set a function to filter on in ftrace by address 3515 * @ops - the ops to set the filter with 3516 * @ip - the address to add to or remove from the filter. 3517 * @remove - non zero to remove the ip from the filter 3518 * @reset - non zero to reset all filters before applying this filter. 3519 * 3520 * Filters denote which functions should be enabled when tracing is enabled 3521 * If @ip is NULL, it failes to update filter. 3522 */ 3523 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, 3524 int remove, int reset) 3525 { 3526 ftrace_ops_init(ops); 3527 return ftrace_set_addr(ops, ip, remove, reset, 1); 3528 } 3529 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip); 3530 3531 static int 3532 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, 3533 int reset, int enable) 3534 { 3535 return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable); 3536 } 3537 3538 /** 3539 * ftrace_set_filter - set a function to filter on in ftrace 3540 * @ops - the ops to set the filter with 3541 * @buf - the string that holds the function filter text. 3542 * @len - the length of the string. 3543 * @reset - non zero to reset all filters before applying this filter. 3544 * 3545 * Filters denote which functions should be enabled when tracing is enabled. 3546 * If @buf is NULL and reset is set, all functions will be enabled for tracing. 3547 */ 3548 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, 3549 int len, int reset) 3550 { 3551 ftrace_ops_init(ops); 3552 return ftrace_set_regex(ops, buf, len, reset, 1); 3553 } 3554 EXPORT_SYMBOL_GPL(ftrace_set_filter); 3555 3556 /** 3557 * ftrace_set_notrace - set a function to not trace in ftrace 3558 * @ops - the ops to set the notrace filter with 3559 * @buf - the string that holds the function notrace text. 3560 * @len - the length of the string. 3561 * @reset - non zero to reset all filters before applying this filter. 3562 * 3563 * Notrace Filters denote which functions should not be enabled when tracing 3564 * is enabled. If @buf is NULL and reset is set, all functions will be enabled 3565 * for tracing. 3566 */ 3567 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, 3568 int len, int reset) 3569 { 3570 ftrace_ops_init(ops); 3571 return ftrace_set_regex(ops, buf, len, reset, 0); 3572 } 3573 EXPORT_SYMBOL_GPL(ftrace_set_notrace); 3574 /** 3575 * ftrace_set_global_filter - set a function to filter on with global tracers 3576 * @buf - the string that holds the function filter text. 3577 * @len - the length of the string. 3578 * @reset - non zero to reset all filters before applying this filter. 3579 * 3580 * Filters denote which functions should be enabled when tracing is enabled. 3581 * If @buf is NULL and reset is set, all functions will be enabled for tracing. 3582 */ 3583 void ftrace_set_global_filter(unsigned char *buf, int len, int reset) 3584 { 3585 ftrace_set_regex(&global_ops, buf, len, reset, 1); 3586 } 3587 EXPORT_SYMBOL_GPL(ftrace_set_global_filter); 3588 3589 /** 3590 * ftrace_set_global_notrace - set a function to not trace with global tracers 3591 * @buf - the string that holds the function notrace text. 3592 * @len - the length of the string. 3593 * @reset - non zero to reset all filters before applying this filter. 3594 * 3595 * Notrace Filters denote which functions should not be enabled when tracing 3596 * is enabled. If @buf is NULL and reset is set, all functions will be enabled 3597 * for tracing. 3598 */ 3599 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset) 3600 { 3601 ftrace_set_regex(&global_ops, buf, len, reset, 0); 3602 } 3603 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace); 3604 3605 /* 3606 * command line interface to allow users to set filters on boot up. 3607 */ 3608 #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE 3609 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata; 3610 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata; 3611 3612 /* Used by function selftest to not test if filter is set */ 3613 bool ftrace_filter_param __initdata; 3614 3615 static int __init set_ftrace_notrace(char *str) 3616 { 3617 ftrace_filter_param = true; 3618 strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE); 3619 return 1; 3620 } 3621 __setup("ftrace_notrace=", set_ftrace_notrace); 3622 3623 static int __init set_ftrace_filter(char *str) 3624 { 3625 ftrace_filter_param = true; 3626 strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE); 3627 return 1; 3628 } 3629 __setup("ftrace_filter=", set_ftrace_filter); 3630 3631 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 3632 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata; 3633 static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer); 3634 3635 static int __init set_graph_function(char *str) 3636 { 3637 strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE); 3638 return 1; 3639 } 3640 __setup("ftrace_graph_filter=", set_graph_function); 3641 3642 static void __init set_ftrace_early_graph(char *buf) 3643 { 3644 int ret; 3645 char *func; 3646 3647 while (buf) { 3648 func = strsep(&buf, ","); 3649 /* we allow only one expression at a time */ 3650 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count, 3651 FTRACE_GRAPH_MAX_FUNCS, func); 3652 if (ret) 3653 printk(KERN_DEBUG "ftrace: function %s not " 3654 "traceable\n", func); 3655 } 3656 } 3657 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 3658 3659 void __init 3660 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable) 3661 { 3662 char *func; 3663 3664 ftrace_ops_init(ops); 3665 3666 while (buf) { 3667 func = strsep(&buf, ","); 3668 ftrace_set_regex(ops, func, strlen(func), 0, enable); 3669 } 3670 } 3671 3672 static void __init set_ftrace_early_filters(void) 3673 { 3674 if (ftrace_filter_buf[0]) 3675 ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1); 3676 if (ftrace_notrace_buf[0]) 3677 ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0); 3678 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 3679 if (ftrace_graph_buf[0]) 3680 set_ftrace_early_graph(ftrace_graph_buf); 3681 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 3682 } 3683 3684 int ftrace_regex_release(struct inode *inode, struct file *file) 3685 { 3686 struct seq_file *m = (struct seq_file *)file->private_data; 3687 struct ftrace_iterator *iter; 3688 struct ftrace_hash **orig_hash; 3689 struct trace_parser *parser; 3690 int filter_hash; 3691 int ret; 3692 3693 if (file->f_mode & FMODE_READ) { 3694 iter = m->private; 3695 seq_release(inode, file); 3696 } else 3697 iter = file->private_data; 3698 3699 parser = &iter->parser; 3700 if (trace_parser_loaded(parser)) { 3701 parser->buffer[parser->idx] = 0; 3702 ftrace_match_records(iter->hash, parser->buffer, parser->idx); 3703 } 3704 3705 trace_parser_put(parser); 3706 3707 mutex_lock(&iter->ops->regex_lock); 3708 3709 if (file->f_mode & FMODE_WRITE) { 3710 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER); 3711 3712 if (filter_hash) 3713 orig_hash = &iter->ops->filter_hash; 3714 else 3715 orig_hash = &iter->ops->notrace_hash; 3716 3717 mutex_lock(&ftrace_lock); 3718 ret = ftrace_hash_move(iter->ops, filter_hash, 3719 orig_hash, iter->hash); 3720 if (!ret) 3721 ftrace_ops_update_code(iter->ops); 3722 3723 mutex_unlock(&ftrace_lock); 3724 } 3725 3726 mutex_unlock(&iter->ops->regex_lock); 3727 free_ftrace_hash(iter->hash); 3728 kfree(iter); 3729 3730 return 0; 3731 } 3732 3733 static const struct file_operations ftrace_avail_fops = { 3734 .open = ftrace_avail_open, 3735 .read = seq_read, 3736 .llseek = seq_lseek, 3737 .release = seq_release_private, 3738 }; 3739 3740 static const struct file_operations ftrace_enabled_fops = { 3741 .open = ftrace_enabled_open, 3742 .read = seq_read, 3743 .llseek = seq_lseek, 3744 .release = seq_release_private, 3745 }; 3746 3747 static const struct file_operations ftrace_filter_fops = { 3748 .open = ftrace_filter_open, 3749 .read = seq_read, 3750 .write = ftrace_filter_write, 3751 .llseek = tracing_lseek, 3752 .release = ftrace_regex_release, 3753 }; 3754 3755 static const struct file_operations ftrace_notrace_fops = { 3756 .open = ftrace_notrace_open, 3757 .read = seq_read, 3758 .write = ftrace_notrace_write, 3759 .llseek = tracing_lseek, 3760 .release = ftrace_regex_release, 3761 }; 3762 3763 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 3764 3765 static DEFINE_MUTEX(graph_lock); 3766 3767 int ftrace_graph_count; 3768 int ftrace_graph_notrace_count; 3769 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly; 3770 unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly; 3771 3772 struct ftrace_graph_data { 3773 unsigned long *table; 3774 size_t size; 3775 int *count; 3776 const struct seq_operations *seq_ops; 3777 }; 3778 3779 static void * 3780 __g_next(struct seq_file *m, loff_t *pos) 3781 { 3782 struct ftrace_graph_data *fgd = m->private; 3783 3784 if (*pos >= *fgd->count) 3785 return NULL; 3786 return &fgd->table[*pos]; 3787 } 3788 3789 static void * 3790 g_next(struct seq_file *m, void *v, loff_t *pos) 3791 { 3792 (*pos)++; 3793 return __g_next(m, pos); 3794 } 3795 3796 static void *g_start(struct seq_file *m, loff_t *pos) 3797 { 3798 struct ftrace_graph_data *fgd = m->private; 3799 3800 mutex_lock(&graph_lock); 3801 3802 /* Nothing, tell g_show to print all functions are enabled */ 3803 if (!*fgd->count && !*pos) 3804 return (void *)1; 3805 3806 return __g_next(m, pos); 3807 } 3808 3809 static void g_stop(struct seq_file *m, void *p) 3810 { 3811 mutex_unlock(&graph_lock); 3812 } 3813 3814 static int g_show(struct seq_file *m, void *v) 3815 { 3816 unsigned long *ptr = v; 3817 3818 if (!ptr) 3819 return 0; 3820 3821 if (ptr == (unsigned long *)1) { 3822 seq_printf(m, "#### all functions enabled ####\n"); 3823 return 0; 3824 } 3825 3826 seq_printf(m, "%ps\n", (void *)*ptr); 3827 3828 return 0; 3829 } 3830 3831 static const struct seq_operations ftrace_graph_seq_ops = { 3832 .start = g_start, 3833 .next = g_next, 3834 .stop = g_stop, 3835 .show = g_show, 3836 }; 3837 3838 static int 3839 __ftrace_graph_open(struct inode *inode, struct file *file, 3840 struct ftrace_graph_data *fgd) 3841 { 3842 int ret = 0; 3843 3844 mutex_lock(&graph_lock); 3845 if ((file->f_mode & FMODE_WRITE) && 3846 (file->f_flags & O_TRUNC)) { 3847 *fgd->count = 0; 3848 memset(fgd->table, 0, fgd->size * sizeof(*fgd->table)); 3849 } 3850 mutex_unlock(&graph_lock); 3851 3852 if (file->f_mode & FMODE_READ) { 3853 ret = seq_open(file, fgd->seq_ops); 3854 if (!ret) { 3855 struct seq_file *m = file->private_data; 3856 m->private = fgd; 3857 } 3858 } else 3859 file->private_data = fgd; 3860 3861 return ret; 3862 } 3863 3864 static int 3865 ftrace_graph_open(struct inode *inode, struct file *file) 3866 { 3867 struct ftrace_graph_data *fgd; 3868 3869 if (unlikely(ftrace_disabled)) 3870 return -ENODEV; 3871 3872 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL); 3873 if (fgd == NULL) 3874 return -ENOMEM; 3875 3876 fgd->table = ftrace_graph_funcs; 3877 fgd->size = FTRACE_GRAPH_MAX_FUNCS; 3878 fgd->count = &ftrace_graph_count; 3879 fgd->seq_ops = &ftrace_graph_seq_ops; 3880 3881 return __ftrace_graph_open(inode, file, fgd); 3882 } 3883 3884 static int 3885 ftrace_graph_notrace_open(struct inode *inode, struct file *file) 3886 { 3887 struct ftrace_graph_data *fgd; 3888 3889 if (unlikely(ftrace_disabled)) 3890 return -ENODEV; 3891 3892 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL); 3893 if (fgd == NULL) 3894 return -ENOMEM; 3895 3896 fgd->table = ftrace_graph_notrace_funcs; 3897 fgd->size = FTRACE_GRAPH_MAX_FUNCS; 3898 fgd->count = &ftrace_graph_notrace_count; 3899 fgd->seq_ops = &ftrace_graph_seq_ops; 3900 3901 return __ftrace_graph_open(inode, file, fgd); 3902 } 3903 3904 static int 3905 ftrace_graph_release(struct inode *inode, struct file *file) 3906 { 3907 if (file->f_mode & FMODE_READ) { 3908 struct seq_file *m = file->private_data; 3909 3910 kfree(m->private); 3911 seq_release(inode, file); 3912 } else { 3913 kfree(file->private_data); 3914 } 3915 3916 return 0; 3917 } 3918 3919 static int 3920 ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer) 3921 { 3922 struct dyn_ftrace *rec; 3923 struct ftrace_page *pg; 3924 int search_len; 3925 int fail = 1; 3926 int type, not; 3927 char *search; 3928 bool exists; 3929 int i; 3930 3931 /* decode regex */ 3932 type = filter_parse_regex(buffer, strlen(buffer), &search, ¬); 3933 if (!not && *idx >= size) 3934 return -EBUSY; 3935 3936 search_len = strlen(search); 3937 3938 mutex_lock(&ftrace_lock); 3939 3940 if (unlikely(ftrace_disabled)) { 3941 mutex_unlock(&ftrace_lock); 3942 return -ENODEV; 3943 } 3944 3945 do_for_each_ftrace_rec(pg, rec) { 3946 3947 if (ftrace_match_record(rec, NULL, search, search_len, type)) { 3948 /* if it is in the array */ 3949 exists = false; 3950 for (i = 0; i < *idx; i++) { 3951 if (array[i] == rec->ip) { 3952 exists = true; 3953 break; 3954 } 3955 } 3956 3957 if (!not) { 3958 fail = 0; 3959 if (!exists) { 3960 array[(*idx)++] = rec->ip; 3961 if (*idx >= size) 3962 goto out; 3963 } 3964 } else { 3965 if (exists) { 3966 array[i] = array[--(*idx)]; 3967 array[*idx] = 0; 3968 fail = 0; 3969 } 3970 } 3971 } 3972 } while_for_each_ftrace_rec(); 3973 out: 3974 mutex_unlock(&ftrace_lock); 3975 3976 if (fail) 3977 return -EINVAL; 3978 3979 return 0; 3980 } 3981 3982 static ssize_t 3983 ftrace_graph_write(struct file *file, const char __user *ubuf, 3984 size_t cnt, loff_t *ppos) 3985 { 3986 struct trace_parser parser; 3987 ssize_t read, ret = 0; 3988 struct ftrace_graph_data *fgd = file->private_data; 3989 3990 if (!cnt) 3991 return 0; 3992 3993 if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) 3994 return -ENOMEM; 3995 3996 read = trace_get_user(&parser, ubuf, cnt, ppos); 3997 3998 if (read >= 0 && trace_parser_loaded((&parser))) { 3999 parser.buffer[parser.idx] = 0; 4000 4001 mutex_lock(&graph_lock); 4002 4003 /* we allow only one expression at a time */ 4004 ret = ftrace_set_func(fgd->table, fgd->count, fgd->size, 4005 parser.buffer); 4006 4007 mutex_unlock(&graph_lock); 4008 } 4009 4010 if (!ret) 4011 ret = read; 4012 4013 trace_parser_put(&parser); 4014 4015 return ret; 4016 } 4017 4018 static const struct file_operations ftrace_graph_fops = { 4019 .open = ftrace_graph_open, 4020 .read = seq_read, 4021 .write = ftrace_graph_write, 4022 .llseek = tracing_lseek, 4023 .release = ftrace_graph_release, 4024 }; 4025 4026 static const struct file_operations ftrace_graph_notrace_fops = { 4027 .open = ftrace_graph_notrace_open, 4028 .read = seq_read, 4029 .write = ftrace_graph_write, 4030 .llseek = tracing_lseek, 4031 .release = ftrace_graph_release, 4032 }; 4033 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 4034 4035 void ftrace_create_filter_files(struct ftrace_ops *ops, 4036 struct dentry *parent) 4037 { 4038 4039 trace_create_file("set_ftrace_filter", 0644, parent, 4040 ops, &ftrace_filter_fops); 4041 4042 trace_create_file("set_ftrace_notrace", 0644, parent, 4043 ops, &ftrace_notrace_fops); 4044 } 4045 4046 /* 4047 * The name "destroy_filter_files" is really a misnomer. Although 4048 * in the future, it may actualy delete the files, but this is 4049 * really intended to make sure the ops passed in are disabled 4050 * and that when this function returns, the caller is free to 4051 * free the ops. 4052 * 4053 * The "destroy" name is only to match the "create" name that this 4054 * should be paired with. 4055 */ 4056 void ftrace_destroy_filter_files(struct ftrace_ops *ops) 4057 { 4058 mutex_lock(&ftrace_lock); 4059 if (ops->flags & FTRACE_OPS_FL_ENABLED) 4060 ftrace_shutdown(ops, 0); 4061 ops->flags |= FTRACE_OPS_FL_DELETED; 4062 mutex_unlock(&ftrace_lock); 4063 } 4064 4065 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) 4066 { 4067 4068 trace_create_file("available_filter_functions", 0444, 4069 d_tracer, NULL, &ftrace_avail_fops); 4070 4071 trace_create_file("enabled_functions", 0444, 4072 d_tracer, NULL, &ftrace_enabled_fops); 4073 4074 ftrace_create_filter_files(&global_ops, d_tracer); 4075 4076 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 4077 trace_create_file("set_graph_function", 0444, d_tracer, 4078 NULL, 4079 &ftrace_graph_fops); 4080 trace_create_file("set_graph_notrace", 0444, d_tracer, 4081 NULL, 4082 &ftrace_graph_notrace_fops); 4083 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 4084 4085 return 0; 4086 } 4087 4088 static int ftrace_cmp_ips(const void *a, const void *b) 4089 { 4090 const unsigned long *ipa = a; 4091 const unsigned long *ipb = b; 4092 4093 if (*ipa > *ipb) 4094 return 1; 4095 if (*ipa < *ipb) 4096 return -1; 4097 return 0; 4098 } 4099 4100 static void ftrace_swap_ips(void *a, void *b, int size) 4101 { 4102 unsigned long *ipa = a; 4103 unsigned long *ipb = b; 4104 unsigned long t; 4105 4106 t = *ipa; 4107 *ipa = *ipb; 4108 *ipb = t; 4109 } 4110 4111 static int ftrace_process_locs(struct module *mod, 4112 unsigned long *start, 4113 unsigned long *end) 4114 { 4115 struct ftrace_page *start_pg; 4116 struct ftrace_page *pg; 4117 struct dyn_ftrace *rec; 4118 unsigned long count; 4119 unsigned long *p; 4120 unsigned long addr; 4121 unsigned long flags = 0; /* Shut up gcc */ 4122 int ret = -ENOMEM; 4123 4124 count = end - start; 4125 4126 if (!count) 4127 return 0; 4128 4129 sort(start, count, sizeof(*start), 4130 ftrace_cmp_ips, ftrace_swap_ips); 4131 4132 start_pg = ftrace_allocate_pages(count); 4133 if (!start_pg) 4134 return -ENOMEM; 4135 4136 mutex_lock(&ftrace_lock); 4137 4138 /* 4139 * Core and each module needs their own pages, as 4140 * modules will free them when they are removed. 4141 * Force a new page to be allocated for modules. 4142 */ 4143 if (!mod) { 4144 WARN_ON(ftrace_pages || ftrace_pages_start); 4145 /* First initialization */ 4146 ftrace_pages = ftrace_pages_start = start_pg; 4147 } else { 4148 if (!ftrace_pages) 4149 goto out; 4150 4151 if (WARN_ON(ftrace_pages->next)) { 4152 /* Hmm, we have free pages? */ 4153 while (ftrace_pages->next) 4154 ftrace_pages = ftrace_pages->next; 4155 } 4156 4157 ftrace_pages->next = start_pg; 4158 } 4159 4160 p = start; 4161 pg = start_pg; 4162 while (p < end) { 4163 addr = ftrace_call_adjust(*p++); 4164 /* 4165 * Some architecture linkers will pad between 4166 * the different mcount_loc sections of different 4167 * object files to satisfy alignments. 4168 * Skip any NULL pointers. 4169 */ 4170 if (!addr) 4171 continue; 4172 4173 if (pg->index == pg->size) { 4174 /* We should have allocated enough */ 4175 if (WARN_ON(!pg->next)) 4176 break; 4177 pg = pg->next; 4178 } 4179 4180 rec = &pg->records[pg->index++]; 4181 rec->ip = addr; 4182 } 4183 4184 /* We should have used all pages */ 4185 WARN_ON(pg->next); 4186 4187 /* Assign the last page to ftrace_pages */ 4188 ftrace_pages = pg; 4189 4190 /* 4191 * We only need to disable interrupts on start up 4192 * because we are modifying code that an interrupt 4193 * may execute, and the modification is not atomic. 4194 * But for modules, nothing runs the code we modify 4195 * until we are finished with it, and there's no 4196 * reason to cause large interrupt latencies while we do it. 4197 */ 4198 if (!mod) 4199 local_irq_save(flags); 4200 ftrace_update_code(mod, start_pg); 4201 if (!mod) 4202 local_irq_restore(flags); 4203 ret = 0; 4204 out: 4205 mutex_unlock(&ftrace_lock); 4206 4207 return ret; 4208 } 4209 4210 #ifdef CONFIG_MODULES 4211 4212 #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next) 4213 4214 void ftrace_release_mod(struct module *mod) 4215 { 4216 struct dyn_ftrace *rec; 4217 struct ftrace_page **last_pg; 4218 struct ftrace_page *pg; 4219 int order; 4220 4221 mutex_lock(&ftrace_lock); 4222 4223 if (ftrace_disabled) 4224 goto out_unlock; 4225 4226 /* 4227 * Each module has its own ftrace_pages, remove 4228 * them from the list. 4229 */ 4230 last_pg = &ftrace_pages_start; 4231 for (pg = ftrace_pages_start; pg; pg = *last_pg) { 4232 rec = &pg->records[0]; 4233 if (within_module_core(rec->ip, mod)) { 4234 /* 4235 * As core pages are first, the first 4236 * page should never be a module page. 4237 */ 4238 if (WARN_ON(pg == ftrace_pages_start)) 4239 goto out_unlock; 4240 4241 /* Check if we are deleting the last page */ 4242 if (pg == ftrace_pages) 4243 ftrace_pages = next_to_ftrace_page(last_pg); 4244 4245 *last_pg = pg->next; 4246 order = get_count_order(pg->size / ENTRIES_PER_PAGE); 4247 free_pages((unsigned long)pg->records, order); 4248 kfree(pg); 4249 } else 4250 last_pg = &pg->next; 4251 } 4252 out_unlock: 4253 mutex_unlock(&ftrace_lock); 4254 } 4255 4256 static void ftrace_init_module(struct module *mod, 4257 unsigned long *start, unsigned long *end) 4258 { 4259 if (ftrace_disabled || start == end) 4260 return; 4261 ftrace_process_locs(mod, start, end); 4262 } 4263 4264 void ftrace_module_init(struct module *mod) 4265 { 4266 ftrace_init_module(mod, mod->ftrace_callsites, 4267 mod->ftrace_callsites + 4268 mod->num_ftrace_callsites); 4269 } 4270 4271 static int ftrace_module_notify_exit(struct notifier_block *self, 4272 unsigned long val, void *data) 4273 { 4274 struct module *mod = data; 4275 4276 if (val == MODULE_STATE_GOING) 4277 ftrace_release_mod(mod); 4278 4279 return 0; 4280 } 4281 #else 4282 static int ftrace_module_notify_exit(struct notifier_block *self, 4283 unsigned long val, void *data) 4284 { 4285 return 0; 4286 } 4287 #endif /* CONFIG_MODULES */ 4288 4289 struct notifier_block ftrace_module_exit_nb = { 4290 .notifier_call = ftrace_module_notify_exit, 4291 .priority = INT_MIN, /* Run after anything that can remove kprobes */ 4292 }; 4293 4294 void __init ftrace_init(void) 4295 { 4296 extern unsigned long __start_mcount_loc[]; 4297 extern unsigned long __stop_mcount_loc[]; 4298 unsigned long count, flags; 4299 int ret; 4300 4301 local_irq_save(flags); 4302 ret = ftrace_dyn_arch_init(); 4303 local_irq_restore(flags); 4304 if (ret) 4305 goto failed; 4306 4307 count = __stop_mcount_loc - __start_mcount_loc; 4308 if (!count) { 4309 pr_info("ftrace: No functions to be traced?\n"); 4310 goto failed; 4311 } 4312 4313 pr_info("ftrace: allocating %ld entries in %ld pages\n", 4314 count, count / ENTRIES_PER_PAGE + 1); 4315 4316 last_ftrace_enabled = ftrace_enabled = 1; 4317 4318 ret = ftrace_process_locs(NULL, 4319 __start_mcount_loc, 4320 __stop_mcount_loc); 4321 4322 ret = register_module_notifier(&ftrace_module_exit_nb); 4323 if (ret) 4324 pr_warning("Failed to register trace ftrace module exit notifier\n"); 4325 4326 set_ftrace_early_filters(); 4327 4328 return; 4329 failed: 4330 ftrace_disabled = 1; 4331 } 4332 4333 #else 4334 4335 static struct ftrace_ops global_ops = { 4336 .func = ftrace_stub, 4337 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, 4338 INIT_REGEX_LOCK(global_ops) 4339 }; 4340 4341 static int __init ftrace_nodyn_init(void) 4342 { 4343 ftrace_enabled = 1; 4344 return 0; 4345 } 4346 core_initcall(ftrace_nodyn_init); 4347 4348 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; } 4349 static inline void ftrace_startup_enable(int command) { } 4350 /* Keep as macros so we do not need to define the commands */ 4351 # define ftrace_startup(ops, command) \ 4352 ({ \ 4353 int ___ret = __register_ftrace_function(ops); \ 4354 if (!___ret) \ 4355 (ops)->flags |= FTRACE_OPS_FL_ENABLED; \ 4356 ___ret; \ 4357 }) 4358 # define ftrace_shutdown(ops, command) \ 4359 ({ \ 4360 int ___ret = __unregister_ftrace_function(ops); \ 4361 if (!___ret) \ 4362 (ops)->flags &= ~FTRACE_OPS_FL_ENABLED; \ 4363 ___ret; \ 4364 }) 4365 4366 # define ftrace_startup_sysctl() do { } while (0) 4367 # define ftrace_shutdown_sysctl() do { } while (0) 4368 4369 static inline int 4370 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) 4371 { 4372 return 1; 4373 } 4374 4375 #endif /* CONFIG_DYNAMIC_FTRACE */ 4376 4377 __init void ftrace_init_global_array_ops(struct trace_array *tr) 4378 { 4379 tr->ops = &global_ops; 4380 tr->ops->private = tr; 4381 } 4382 4383 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func) 4384 { 4385 /* If we filter on pids, update to use the pid function */ 4386 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) { 4387 if (WARN_ON(tr->ops->func != ftrace_stub)) 4388 printk("ftrace ops had %pS for function\n", 4389 tr->ops->func); 4390 /* Only the top level instance does pid tracing */ 4391 if (!list_empty(&ftrace_pids)) { 4392 set_ftrace_pid_function(func); 4393 func = ftrace_pid_func; 4394 } 4395 } 4396 tr->ops->func = func; 4397 tr->ops->private = tr; 4398 } 4399 4400 void ftrace_reset_array_ops(struct trace_array *tr) 4401 { 4402 tr->ops->func = ftrace_stub; 4403 } 4404 4405 static void 4406 ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip, 4407 struct ftrace_ops *op, struct pt_regs *regs) 4408 { 4409 if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT))) 4410 return; 4411 4412 /* 4413 * Some of the ops may be dynamically allocated, 4414 * they must be freed after a synchronize_sched(). 4415 */ 4416 preempt_disable_notrace(); 4417 trace_recursion_set(TRACE_CONTROL_BIT); 4418 4419 /* 4420 * Control funcs (perf) uses RCU. Only trace if 4421 * RCU is currently active. 4422 */ 4423 if (!rcu_is_watching()) 4424 goto out; 4425 4426 do_for_each_ftrace_op(op, ftrace_control_list) { 4427 if (!(op->flags & FTRACE_OPS_FL_STUB) && 4428 !ftrace_function_local_disabled(op) && 4429 ftrace_ops_test(op, ip, regs)) 4430 op->func(ip, parent_ip, op, regs); 4431 } while_for_each_ftrace_op(op); 4432 out: 4433 trace_recursion_clear(TRACE_CONTROL_BIT); 4434 preempt_enable_notrace(); 4435 } 4436 4437 static struct ftrace_ops control_ops = { 4438 .func = ftrace_ops_control_func, 4439 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, 4440 INIT_REGEX_LOCK(control_ops) 4441 }; 4442 4443 static inline void 4444 __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, 4445 struct ftrace_ops *ignored, struct pt_regs *regs) 4446 { 4447 struct ftrace_ops *op; 4448 int bit; 4449 4450 if (function_trace_stop) 4451 return; 4452 4453 bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX); 4454 if (bit < 0) 4455 return; 4456 4457 /* 4458 * Some of the ops may be dynamically allocated, 4459 * they must be freed after a synchronize_sched(). 4460 */ 4461 preempt_disable_notrace(); 4462 do_for_each_ftrace_op(op, ftrace_ops_list) { 4463 if (ftrace_ops_test(op, ip, regs)) { 4464 if (WARN_ON(!op->func)) { 4465 function_trace_stop = 1; 4466 printk("op=%p %pS\n", op, op); 4467 goto out; 4468 } 4469 op->func(ip, parent_ip, op, regs); 4470 } 4471 } while_for_each_ftrace_op(op); 4472 out: 4473 preempt_enable_notrace(); 4474 trace_clear_recursion(bit); 4475 } 4476 4477 /* 4478 * Some archs only support passing ip and parent_ip. Even though 4479 * the list function ignores the op parameter, we do not want any 4480 * C side effects, where a function is called without the caller 4481 * sending a third parameter. 4482 * Archs are to support both the regs and ftrace_ops at the same time. 4483 * If they support ftrace_ops, it is assumed they support regs. 4484 * If call backs want to use regs, they must either check for regs 4485 * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS. 4486 * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved. 4487 * An architecture can pass partial regs with ftrace_ops and still 4488 * set the ARCH_SUPPORT_FTARCE_OPS. 4489 */ 4490 #if ARCH_SUPPORTS_FTRACE_OPS 4491 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, 4492 struct ftrace_ops *op, struct pt_regs *regs) 4493 { 4494 __ftrace_ops_list_func(ip, parent_ip, NULL, regs); 4495 } 4496 #else 4497 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip) 4498 { 4499 __ftrace_ops_list_func(ip, parent_ip, NULL, NULL); 4500 } 4501 #endif 4502 4503 static void clear_ftrace_swapper(void) 4504 { 4505 struct task_struct *p; 4506 int cpu; 4507 4508 get_online_cpus(); 4509 for_each_online_cpu(cpu) { 4510 p = idle_task(cpu); 4511 clear_tsk_trace_trace(p); 4512 } 4513 put_online_cpus(); 4514 } 4515 4516 static void set_ftrace_swapper(void) 4517 { 4518 struct task_struct *p; 4519 int cpu; 4520 4521 get_online_cpus(); 4522 for_each_online_cpu(cpu) { 4523 p = idle_task(cpu); 4524 set_tsk_trace_trace(p); 4525 } 4526 put_online_cpus(); 4527 } 4528 4529 static void clear_ftrace_pid(struct pid *pid) 4530 { 4531 struct task_struct *p; 4532 4533 rcu_read_lock(); 4534 do_each_pid_task(pid, PIDTYPE_PID, p) { 4535 clear_tsk_trace_trace(p); 4536 } while_each_pid_task(pid, PIDTYPE_PID, p); 4537 rcu_read_unlock(); 4538 4539 put_pid(pid); 4540 } 4541 4542 static void set_ftrace_pid(struct pid *pid) 4543 { 4544 struct task_struct *p; 4545 4546 rcu_read_lock(); 4547 do_each_pid_task(pid, PIDTYPE_PID, p) { 4548 set_tsk_trace_trace(p); 4549 } while_each_pid_task(pid, PIDTYPE_PID, p); 4550 rcu_read_unlock(); 4551 } 4552 4553 static void clear_ftrace_pid_task(struct pid *pid) 4554 { 4555 if (pid == ftrace_swapper_pid) 4556 clear_ftrace_swapper(); 4557 else 4558 clear_ftrace_pid(pid); 4559 } 4560 4561 static void set_ftrace_pid_task(struct pid *pid) 4562 { 4563 if (pid == ftrace_swapper_pid) 4564 set_ftrace_swapper(); 4565 else 4566 set_ftrace_pid(pid); 4567 } 4568 4569 static int ftrace_pid_add(int p) 4570 { 4571 struct pid *pid; 4572 struct ftrace_pid *fpid; 4573 int ret = -EINVAL; 4574 4575 mutex_lock(&ftrace_lock); 4576 4577 if (!p) 4578 pid = ftrace_swapper_pid; 4579 else 4580 pid = find_get_pid(p); 4581 4582 if (!pid) 4583 goto out; 4584 4585 ret = 0; 4586 4587 list_for_each_entry(fpid, &ftrace_pids, list) 4588 if (fpid->pid == pid) 4589 goto out_put; 4590 4591 ret = -ENOMEM; 4592 4593 fpid = kmalloc(sizeof(*fpid), GFP_KERNEL); 4594 if (!fpid) 4595 goto out_put; 4596 4597 list_add(&fpid->list, &ftrace_pids); 4598 fpid->pid = pid; 4599 4600 set_ftrace_pid_task(pid); 4601 4602 ftrace_update_pid_func(); 4603 ftrace_startup_enable(0); 4604 4605 mutex_unlock(&ftrace_lock); 4606 return 0; 4607 4608 out_put: 4609 if (pid != ftrace_swapper_pid) 4610 put_pid(pid); 4611 4612 out: 4613 mutex_unlock(&ftrace_lock); 4614 return ret; 4615 } 4616 4617 static void ftrace_pid_reset(void) 4618 { 4619 struct ftrace_pid *fpid, *safe; 4620 4621 mutex_lock(&ftrace_lock); 4622 list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) { 4623 struct pid *pid = fpid->pid; 4624 4625 clear_ftrace_pid_task(pid); 4626 4627 list_del(&fpid->list); 4628 kfree(fpid); 4629 } 4630 4631 ftrace_update_pid_func(); 4632 ftrace_startup_enable(0); 4633 4634 mutex_unlock(&ftrace_lock); 4635 } 4636 4637 static void *fpid_start(struct seq_file *m, loff_t *pos) 4638 { 4639 mutex_lock(&ftrace_lock); 4640 4641 if (list_empty(&ftrace_pids) && (!*pos)) 4642 return (void *) 1; 4643 4644 return seq_list_start(&ftrace_pids, *pos); 4645 } 4646 4647 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos) 4648 { 4649 if (v == (void *)1) 4650 return NULL; 4651 4652 return seq_list_next(v, &ftrace_pids, pos); 4653 } 4654 4655 static void fpid_stop(struct seq_file *m, void *p) 4656 { 4657 mutex_unlock(&ftrace_lock); 4658 } 4659 4660 static int fpid_show(struct seq_file *m, void *v) 4661 { 4662 const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list); 4663 4664 if (v == (void *)1) { 4665 seq_printf(m, "no pid\n"); 4666 return 0; 4667 } 4668 4669 if (fpid->pid == ftrace_swapper_pid) 4670 seq_printf(m, "swapper tasks\n"); 4671 else 4672 seq_printf(m, "%u\n", pid_vnr(fpid->pid)); 4673 4674 return 0; 4675 } 4676 4677 static const struct seq_operations ftrace_pid_sops = { 4678 .start = fpid_start, 4679 .next = fpid_next, 4680 .stop = fpid_stop, 4681 .show = fpid_show, 4682 }; 4683 4684 static int 4685 ftrace_pid_open(struct inode *inode, struct file *file) 4686 { 4687 int ret = 0; 4688 4689 if ((file->f_mode & FMODE_WRITE) && 4690 (file->f_flags & O_TRUNC)) 4691 ftrace_pid_reset(); 4692 4693 if (file->f_mode & FMODE_READ) 4694 ret = seq_open(file, &ftrace_pid_sops); 4695 4696 return ret; 4697 } 4698 4699 static ssize_t 4700 ftrace_pid_write(struct file *filp, const char __user *ubuf, 4701 size_t cnt, loff_t *ppos) 4702 { 4703 char buf[64], *tmp; 4704 long val; 4705 int ret; 4706 4707 if (cnt >= sizeof(buf)) 4708 return -EINVAL; 4709 4710 if (copy_from_user(&buf, ubuf, cnt)) 4711 return -EFAULT; 4712 4713 buf[cnt] = 0; 4714 4715 /* 4716 * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid" 4717 * to clean the filter quietly. 4718 */ 4719 tmp = strstrip(buf); 4720 if (strlen(tmp) == 0) 4721 return 1; 4722 4723 ret = kstrtol(tmp, 10, &val); 4724 if (ret < 0) 4725 return ret; 4726 4727 ret = ftrace_pid_add(val); 4728 4729 return ret ? ret : cnt; 4730 } 4731 4732 static int 4733 ftrace_pid_release(struct inode *inode, struct file *file) 4734 { 4735 if (file->f_mode & FMODE_READ) 4736 seq_release(inode, file); 4737 4738 return 0; 4739 } 4740 4741 static const struct file_operations ftrace_pid_fops = { 4742 .open = ftrace_pid_open, 4743 .write = ftrace_pid_write, 4744 .read = seq_read, 4745 .llseek = tracing_lseek, 4746 .release = ftrace_pid_release, 4747 }; 4748 4749 static __init int ftrace_init_debugfs(void) 4750 { 4751 struct dentry *d_tracer; 4752 4753 d_tracer = tracing_init_dentry(); 4754 if (!d_tracer) 4755 return 0; 4756 4757 ftrace_init_dyn_debugfs(d_tracer); 4758 4759 trace_create_file("set_ftrace_pid", 0644, d_tracer, 4760 NULL, &ftrace_pid_fops); 4761 4762 ftrace_profile_debugfs(d_tracer); 4763 4764 return 0; 4765 } 4766 fs_initcall(ftrace_init_debugfs); 4767 4768 /** 4769 * ftrace_kill - kill ftrace 4770 * 4771 * This function should be used by panic code. It stops ftrace 4772 * but in a not so nice way. If you need to simply kill ftrace 4773 * from a non-atomic section, use ftrace_kill. 4774 */ 4775 void ftrace_kill(void) 4776 { 4777 ftrace_disabled = 1; 4778 ftrace_enabled = 0; 4779 clear_ftrace_function(); 4780 } 4781 4782 /** 4783 * Test if ftrace is dead or not. 4784 */ 4785 int ftrace_is_dead(void) 4786 { 4787 return ftrace_disabled; 4788 } 4789 4790 /** 4791 * register_ftrace_function - register a function for profiling 4792 * @ops - ops structure that holds the function for profiling. 4793 * 4794 * Register a function to be called by all functions in the 4795 * kernel. 4796 * 4797 * Note: @ops->func and all the functions it calls must be labeled 4798 * with "notrace", otherwise it will go into a 4799 * recursive loop. 4800 */ 4801 int register_ftrace_function(struct ftrace_ops *ops) 4802 { 4803 int ret = -1; 4804 4805 ftrace_ops_init(ops); 4806 4807 mutex_lock(&ftrace_lock); 4808 4809 ret = ftrace_startup(ops, 0); 4810 4811 mutex_unlock(&ftrace_lock); 4812 4813 return ret; 4814 } 4815 EXPORT_SYMBOL_GPL(register_ftrace_function); 4816 4817 /** 4818 * unregister_ftrace_function - unregister a function for profiling. 4819 * @ops - ops structure that holds the function to unregister 4820 * 4821 * Unregister a function that was added to be called by ftrace profiling. 4822 */ 4823 int unregister_ftrace_function(struct ftrace_ops *ops) 4824 { 4825 int ret; 4826 4827 mutex_lock(&ftrace_lock); 4828 ret = ftrace_shutdown(ops, 0); 4829 mutex_unlock(&ftrace_lock); 4830 4831 return ret; 4832 } 4833 EXPORT_SYMBOL_GPL(unregister_ftrace_function); 4834 4835 int 4836 ftrace_enable_sysctl(struct ctl_table *table, int write, 4837 void __user *buffer, size_t *lenp, 4838 loff_t *ppos) 4839 { 4840 int ret = -ENODEV; 4841 4842 mutex_lock(&ftrace_lock); 4843 4844 if (unlikely(ftrace_disabled)) 4845 goto out; 4846 4847 ret = proc_dointvec(table, write, buffer, lenp, ppos); 4848 4849 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled)) 4850 goto out; 4851 4852 last_ftrace_enabled = !!ftrace_enabled; 4853 4854 if (ftrace_enabled) { 4855 4856 ftrace_startup_sysctl(); 4857 4858 /* we are starting ftrace again */ 4859 if (ftrace_ops_list != &ftrace_list_end) 4860 update_ftrace_function(); 4861 4862 } else { 4863 /* stopping ftrace calls (just send to ftrace_stub) */ 4864 ftrace_trace_function = ftrace_stub; 4865 4866 ftrace_shutdown_sysctl(); 4867 } 4868 4869 out: 4870 mutex_unlock(&ftrace_lock); 4871 return ret; 4872 } 4873 4874 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 4875 4876 static int ftrace_graph_active; 4877 4878 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) 4879 { 4880 return 0; 4881 } 4882 4883 /* The callbacks that hook a function */ 4884 trace_func_graph_ret_t ftrace_graph_return = 4885 (trace_func_graph_ret_t)ftrace_stub; 4886 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub; 4887 static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub; 4888 4889 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */ 4890 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) 4891 { 4892 int i; 4893 int ret = 0; 4894 unsigned long flags; 4895 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE; 4896 struct task_struct *g, *t; 4897 4898 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) { 4899 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH 4900 * sizeof(struct ftrace_ret_stack), 4901 GFP_KERNEL); 4902 if (!ret_stack_list[i]) { 4903 start = 0; 4904 end = i; 4905 ret = -ENOMEM; 4906 goto free; 4907 } 4908 } 4909 4910 read_lock_irqsave(&tasklist_lock, flags); 4911 do_each_thread(g, t) { 4912 if (start == end) { 4913 ret = -EAGAIN; 4914 goto unlock; 4915 } 4916 4917 if (t->ret_stack == NULL) { 4918 atomic_set(&t->tracing_graph_pause, 0); 4919 atomic_set(&t->trace_overrun, 0); 4920 t->curr_ret_stack = -1; 4921 /* Make sure the tasks see the -1 first: */ 4922 smp_wmb(); 4923 t->ret_stack = ret_stack_list[start++]; 4924 } 4925 } while_each_thread(g, t); 4926 4927 unlock: 4928 read_unlock_irqrestore(&tasklist_lock, flags); 4929 free: 4930 for (i = start; i < end; i++) 4931 kfree(ret_stack_list[i]); 4932 return ret; 4933 } 4934 4935 static void 4936 ftrace_graph_probe_sched_switch(void *ignore, 4937 struct task_struct *prev, struct task_struct *next) 4938 { 4939 unsigned long long timestamp; 4940 int index; 4941 4942 /* 4943 * Does the user want to count the time a function was asleep. 4944 * If so, do not update the time stamps. 4945 */ 4946 if (trace_flags & TRACE_ITER_SLEEP_TIME) 4947 return; 4948 4949 timestamp = trace_clock_local(); 4950 4951 prev->ftrace_timestamp = timestamp; 4952 4953 /* only process tasks that we timestamped */ 4954 if (!next->ftrace_timestamp) 4955 return; 4956 4957 /* 4958 * Update all the counters in next to make up for the 4959 * time next was sleeping. 4960 */ 4961 timestamp -= next->ftrace_timestamp; 4962 4963 for (index = next->curr_ret_stack; index >= 0; index--) 4964 next->ret_stack[index].calltime += timestamp; 4965 } 4966 4967 /* Allocate a return stack for each task */ 4968 static int start_graph_tracing(void) 4969 { 4970 struct ftrace_ret_stack **ret_stack_list; 4971 int ret, cpu; 4972 4973 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE * 4974 sizeof(struct ftrace_ret_stack *), 4975 GFP_KERNEL); 4976 4977 if (!ret_stack_list) 4978 return -ENOMEM; 4979 4980 /* The cpu_boot init_task->ret_stack will never be freed */ 4981 for_each_online_cpu(cpu) { 4982 if (!idle_task(cpu)->ret_stack) 4983 ftrace_graph_init_idle_task(idle_task(cpu), cpu); 4984 } 4985 4986 do { 4987 ret = alloc_retstack_tasklist(ret_stack_list); 4988 } while (ret == -EAGAIN); 4989 4990 if (!ret) { 4991 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); 4992 if (ret) 4993 pr_info("ftrace_graph: Couldn't activate tracepoint" 4994 " probe to kernel_sched_switch\n"); 4995 } 4996 4997 kfree(ret_stack_list); 4998 return ret; 4999 } 5000 5001 /* 5002 * Hibernation protection. 5003 * The state of the current task is too much unstable during 5004 * suspend/restore to disk. We want to protect against that. 5005 */ 5006 static int 5007 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state, 5008 void *unused) 5009 { 5010 switch (state) { 5011 case PM_HIBERNATION_PREPARE: 5012 pause_graph_tracing(); 5013 break; 5014 5015 case PM_POST_HIBERNATION: 5016 unpause_graph_tracing(); 5017 break; 5018 } 5019 return NOTIFY_DONE; 5020 } 5021 5022 static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace) 5023 { 5024 if (!ftrace_ops_test(&global_ops, trace->func, NULL)) 5025 return 0; 5026 return __ftrace_graph_entry(trace); 5027 } 5028 5029 /* 5030 * The function graph tracer should only trace the functions defined 5031 * by set_ftrace_filter and set_ftrace_notrace. If another function 5032 * tracer ops is registered, the graph tracer requires testing the 5033 * function against the global ops, and not just trace any function 5034 * that any ftrace_ops registered. 5035 */ 5036 static void update_function_graph_func(void) 5037 { 5038 if (ftrace_ops_list == &ftrace_list_end || 5039 (ftrace_ops_list == &global_ops && 5040 global_ops.next == &ftrace_list_end)) 5041 ftrace_graph_entry = __ftrace_graph_entry; 5042 else 5043 ftrace_graph_entry = ftrace_graph_entry_test; 5044 } 5045 5046 static struct notifier_block ftrace_suspend_notifier = { 5047 .notifier_call = ftrace_suspend_notifier_call, 5048 }; 5049 5050 int register_ftrace_graph(trace_func_graph_ret_t retfunc, 5051 trace_func_graph_ent_t entryfunc) 5052 { 5053 int ret = 0; 5054 5055 mutex_lock(&ftrace_lock); 5056 5057 /* we currently allow only one tracer registered at a time */ 5058 if (ftrace_graph_active) { 5059 ret = -EBUSY; 5060 goto out; 5061 } 5062 5063 register_pm_notifier(&ftrace_suspend_notifier); 5064 5065 ftrace_graph_active++; 5066 ret = start_graph_tracing(); 5067 if (ret) { 5068 ftrace_graph_active--; 5069 goto out; 5070 } 5071 5072 ftrace_graph_return = retfunc; 5073 5074 /* 5075 * Update the indirect function to the entryfunc, and the 5076 * function that gets called to the entry_test first. Then 5077 * call the update fgraph entry function to determine if 5078 * the entryfunc should be called directly or not. 5079 */ 5080 __ftrace_graph_entry = entryfunc; 5081 ftrace_graph_entry = ftrace_graph_entry_test; 5082 update_function_graph_func(); 5083 5084 /* Function graph doesn't use the .func field of global_ops */ 5085 global_ops.flags |= FTRACE_OPS_FL_STUB; 5086 5087 ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET); 5088 5089 out: 5090 mutex_unlock(&ftrace_lock); 5091 return ret; 5092 } 5093 5094 void unregister_ftrace_graph(void) 5095 { 5096 mutex_lock(&ftrace_lock); 5097 5098 if (unlikely(!ftrace_graph_active)) 5099 goto out; 5100 5101 ftrace_graph_active--; 5102 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; 5103 ftrace_graph_entry = ftrace_graph_entry_stub; 5104 __ftrace_graph_entry = ftrace_graph_entry_stub; 5105 ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET); 5106 global_ops.flags &= ~FTRACE_OPS_FL_STUB; 5107 unregister_pm_notifier(&ftrace_suspend_notifier); 5108 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); 5109 5110 out: 5111 mutex_unlock(&ftrace_lock); 5112 } 5113 5114 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack); 5115 5116 static void 5117 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack) 5118 { 5119 atomic_set(&t->tracing_graph_pause, 0); 5120 atomic_set(&t->trace_overrun, 0); 5121 t->ftrace_timestamp = 0; 5122 /* make curr_ret_stack visible before we add the ret_stack */ 5123 smp_wmb(); 5124 t->ret_stack = ret_stack; 5125 } 5126 5127 /* 5128 * Allocate a return stack for the idle task. May be the first 5129 * time through, or it may be done by CPU hotplug online. 5130 */ 5131 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) 5132 { 5133 t->curr_ret_stack = -1; 5134 /* 5135 * The idle task has no parent, it either has its own 5136 * stack or no stack at all. 5137 */ 5138 if (t->ret_stack) 5139 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu)); 5140 5141 if (ftrace_graph_active) { 5142 struct ftrace_ret_stack *ret_stack; 5143 5144 ret_stack = per_cpu(idle_ret_stack, cpu); 5145 if (!ret_stack) { 5146 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH 5147 * sizeof(struct ftrace_ret_stack), 5148 GFP_KERNEL); 5149 if (!ret_stack) 5150 return; 5151 per_cpu(idle_ret_stack, cpu) = ret_stack; 5152 } 5153 graph_init_task(t, ret_stack); 5154 } 5155 } 5156 5157 /* Allocate a return stack for newly created task */ 5158 void ftrace_graph_init_task(struct task_struct *t) 5159 { 5160 /* Make sure we do not use the parent ret_stack */ 5161 t->ret_stack = NULL; 5162 t->curr_ret_stack = -1; 5163 5164 if (ftrace_graph_active) { 5165 struct ftrace_ret_stack *ret_stack; 5166 5167 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH 5168 * sizeof(struct ftrace_ret_stack), 5169 GFP_KERNEL); 5170 if (!ret_stack) 5171 return; 5172 graph_init_task(t, ret_stack); 5173 } 5174 } 5175 5176 void ftrace_graph_exit_task(struct task_struct *t) 5177 { 5178 struct ftrace_ret_stack *ret_stack = t->ret_stack; 5179 5180 t->ret_stack = NULL; 5181 /* NULL must become visible to IRQs before we free it: */ 5182 barrier(); 5183 5184 kfree(ret_stack); 5185 } 5186 5187 void ftrace_graph_stop(void) 5188 { 5189 ftrace_stop(); 5190 } 5191 #endif 5192