xref: /openbmc/linux/kernel/trace/ftrace.c (revision 1f9f6a78)
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 Nadia Yvette Chambers
14  */
15 
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/bsearch.h>
26 #include <linux/module.h>
27 #include <linux/ftrace.h>
28 #include <linux/sysctl.h>
29 #include <linux/slab.h>
30 #include <linux/ctype.h>
31 #include <linux/sort.h>
32 #include <linux/list.h>
33 #include <linux/hash.h>
34 #include <linux/rcupdate.h>
35 
36 #include <trace/events/sched.h>
37 
38 #include <asm/setup.h>
39 
40 #include "trace_output.h"
41 #include "trace_stat.h"
42 
43 #define FTRACE_WARN_ON(cond)			\
44 	({					\
45 		int ___r = cond;		\
46 		if (WARN_ON(___r))		\
47 			ftrace_kill();		\
48 		___r;				\
49 	})
50 
51 #define FTRACE_WARN_ON_ONCE(cond)		\
52 	({					\
53 		int ___r = cond;		\
54 		if (WARN_ON_ONCE(___r))		\
55 			ftrace_kill();		\
56 		___r;				\
57 	})
58 
59 /* hash bits for specific function selection */
60 #define FTRACE_HASH_BITS 7
61 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
62 #define FTRACE_HASH_DEFAULT_BITS 10
63 #define FTRACE_HASH_MAX_BITS 12
64 
65 #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_CONTROL)
66 
67 #ifdef CONFIG_DYNAMIC_FTRACE
68 #define INIT_OPS_HASH(opsname)	\
69 	.func_hash		= &opsname.local_hash,			\
70 	.local_hash.regex_lock	= __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
71 #define ASSIGN_OPS_HASH(opsname, val) \
72 	.func_hash		= val, \
73 	.local_hash.regex_lock	= __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
74 #else
75 #define INIT_OPS_HASH(opsname)
76 #define ASSIGN_OPS_HASH(opsname, val)
77 #endif
78 
79 static struct ftrace_ops ftrace_list_end __read_mostly = {
80 	.func		= ftrace_stub,
81 	.flags		= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
82 	INIT_OPS_HASH(ftrace_list_end)
83 };
84 
85 /* ftrace_enabled is a method to turn ftrace on or off */
86 int ftrace_enabled __read_mostly;
87 static int last_ftrace_enabled;
88 
89 /* Current function tracing op */
90 struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
91 /* What to set function_trace_op to */
92 static struct ftrace_ops *set_function_trace_op;
93 
94 /* List for set_ftrace_pid's pids. */
95 LIST_HEAD(ftrace_pids);
96 struct ftrace_pid {
97 	struct list_head list;
98 	struct pid *pid;
99 };
100 
101 /*
102  * ftrace_disabled is set when an anomaly is discovered.
103  * ftrace_disabled is much stronger than ftrace_enabled.
104  */
105 static int ftrace_disabled __read_mostly;
106 
107 static DEFINE_MUTEX(ftrace_lock);
108 
109 static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
110 static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
111 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
112 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
113 static struct ftrace_ops global_ops;
114 static struct ftrace_ops control_ops;
115 
116 static void ftrace_ops_recurs_func(unsigned long ip, unsigned long parent_ip,
117 				   struct ftrace_ops *op, struct pt_regs *regs);
118 
119 #if ARCH_SUPPORTS_FTRACE_OPS
120 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
121 				 struct ftrace_ops *op, struct pt_regs *regs);
122 #else
123 /* See comment below, where ftrace_ops_list_func is defined */
124 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
125 #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
126 #endif
127 
128 /*
129  * Traverse the ftrace_global_list, invoking all entries.  The reason that we
130  * can use rcu_dereference_raw_notrace() is that elements removed from this list
131  * are simply leaked, so there is no need to interact with a grace-period
132  * mechanism.  The rcu_dereference_raw_notrace() calls are needed to handle
133  * concurrent insertions into the ftrace_global_list.
134  *
135  * Silly Alpha and silly pointer-speculation compiler optimizations!
136  */
137 #define do_for_each_ftrace_op(op, list)			\
138 	op = rcu_dereference_raw_notrace(list);			\
139 	do
140 
141 /*
142  * Optimized for just a single item in the list (as that is the normal case).
143  */
144 #define while_for_each_ftrace_op(op)				\
145 	while (likely(op = rcu_dereference_raw_notrace((op)->next)) &&	\
146 	       unlikely((op) != &ftrace_list_end))
147 
148 static inline void ftrace_ops_init(struct ftrace_ops *ops)
149 {
150 #ifdef CONFIG_DYNAMIC_FTRACE
151 	if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
152 		mutex_init(&ops->local_hash.regex_lock);
153 		ops->func_hash = &ops->local_hash;
154 		ops->flags |= FTRACE_OPS_FL_INITIALIZED;
155 	}
156 #endif
157 }
158 
159 /**
160  * ftrace_nr_registered_ops - return number of ops registered
161  *
162  * Returns the number of ftrace_ops registered and tracing functions
163  */
164 int ftrace_nr_registered_ops(void)
165 {
166 	struct ftrace_ops *ops;
167 	int cnt = 0;
168 
169 	mutex_lock(&ftrace_lock);
170 
171 	for (ops = ftrace_ops_list;
172 	     ops != &ftrace_list_end; ops = ops->next)
173 		cnt++;
174 
175 	mutex_unlock(&ftrace_lock);
176 
177 	return cnt;
178 }
179 
180 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
181 			    struct ftrace_ops *op, struct pt_regs *regs)
182 {
183 	if (!test_tsk_trace_trace(current))
184 		return;
185 
186 	ftrace_pid_function(ip, parent_ip, op, regs);
187 }
188 
189 static void set_ftrace_pid_function(ftrace_func_t func)
190 {
191 	/* do not set ftrace_pid_function to itself! */
192 	if (func != ftrace_pid_func)
193 		ftrace_pid_function = func;
194 }
195 
196 /**
197  * clear_ftrace_function - reset the ftrace function
198  *
199  * This NULLs the ftrace function and in essence stops
200  * tracing.  There may be lag
201  */
202 void clear_ftrace_function(void)
203 {
204 	ftrace_trace_function = ftrace_stub;
205 	ftrace_pid_function = ftrace_stub;
206 }
207 
208 static void control_ops_disable_all(struct ftrace_ops *ops)
209 {
210 	int cpu;
211 
212 	for_each_possible_cpu(cpu)
213 		*per_cpu_ptr(ops->disabled, cpu) = 1;
214 }
215 
216 static int control_ops_alloc(struct ftrace_ops *ops)
217 {
218 	int __percpu *disabled;
219 
220 	disabled = alloc_percpu(int);
221 	if (!disabled)
222 		return -ENOMEM;
223 
224 	ops->disabled = disabled;
225 	control_ops_disable_all(ops);
226 	return 0;
227 }
228 
229 static void ftrace_sync(struct work_struct *work)
230 {
231 	/*
232 	 * This function is just a stub to implement a hard force
233 	 * of synchronize_sched(). This requires synchronizing
234 	 * tasks even in userspace and idle.
235 	 *
236 	 * Yes, function tracing is rude.
237 	 */
238 }
239 
240 static void ftrace_sync_ipi(void *data)
241 {
242 	/* Probably not needed, but do it anyway */
243 	smp_rmb();
244 }
245 
246 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
247 static void update_function_graph_func(void);
248 #else
249 static inline void update_function_graph_func(void) { }
250 #endif
251 
252 static void update_ftrace_function(void)
253 {
254 	ftrace_func_t func;
255 
256 	/*
257 	 * Prepare the ftrace_ops that the arch callback will use.
258 	 * If there's only one ftrace_ops registered, the ftrace_ops_list
259 	 * will point to the ops we want.
260 	 */
261 	set_function_trace_op = ftrace_ops_list;
262 
263 	/* If there's no ftrace_ops registered, just call the stub function */
264 	if (ftrace_ops_list == &ftrace_list_end) {
265 		func = ftrace_stub;
266 
267 	/*
268 	 * If we are at the end of the list and this ops is
269 	 * recursion safe and not dynamic and the arch supports passing ops,
270 	 * then have the mcount trampoline call the function directly.
271 	 */
272 	} else if (ftrace_ops_list->next == &ftrace_list_end) {
273 		func = ftrace_ops_get_func(ftrace_ops_list);
274 
275 	} else {
276 		/* Just use the default ftrace_ops */
277 		set_function_trace_op = &ftrace_list_end;
278 		func = ftrace_ops_list_func;
279 	}
280 
281 	update_function_graph_func();
282 
283 	/* If there's no change, then do nothing more here */
284 	if (ftrace_trace_function == func)
285 		return;
286 
287 	/*
288 	 * If we are using the list function, it doesn't care
289 	 * about the function_trace_ops.
290 	 */
291 	if (func == ftrace_ops_list_func) {
292 		ftrace_trace_function = func;
293 		/*
294 		 * Don't even bother setting function_trace_ops,
295 		 * it would be racy to do so anyway.
296 		 */
297 		return;
298 	}
299 
300 #ifndef CONFIG_DYNAMIC_FTRACE
301 	/*
302 	 * For static tracing, we need to be a bit more careful.
303 	 * The function change takes affect immediately. Thus,
304 	 * we need to coorditate the setting of the function_trace_ops
305 	 * with the setting of the ftrace_trace_function.
306 	 *
307 	 * Set the function to the list ops, which will call the
308 	 * function we want, albeit indirectly, but it handles the
309 	 * ftrace_ops and doesn't depend on function_trace_op.
310 	 */
311 	ftrace_trace_function = ftrace_ops_list_func;
312 	/*
313 	 * Make sure all CPUs see this. Yes this is slow, but static
314 	 * tracing is slow and nasty to have enabled.
315 	 */
316 	schedule_on_each_cpu(ftrace_sync);
317 	/* Now all cpus are using the list ops. */
318 	function_trace_op = set_function_trace_op;
319 	/* Make sure the function_trace_op is visible on all CPUs */
320 	smp_wmb();
321 	/* Nasty way to force a rmb on all cpus */
322 	smp_call_function(ftrace_sync_ipi, NULL, 1);
323 	/* OK, we are all set to update the ftrace_trace_function now! */
324 #endif /* !CONFIG_DYNAMIC_FTRACE */
325 
326 	ftrace_trace_function = func;
327 }
328 
329 int using_ftrace_ops_list_func(void)
330 {
331 	return ftrace_trace_function == ftrace_ops_list_func;
332 }
333 
334 static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
335 {
336 	ops->next = *list;
337 	/*
338 	 * We are entering ops into the list but another
339 	 * CPU might be walking that list. We need to make sure
340 	 * the ops->next pointer is valid before another CPU sees
341 	 * the ops pointer included into the list.
342 	 */
343 	rcu_assign_pointer(*list, ops);
344 }
345 
346 static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
347 {
348 	struct ftrace_ops **p;
349 
350 	/*
351 	 * If we are removing the last function, then simply point
352 	 * to the ftrace_stub.
353 	 */
354 	if (*list == ops && ops->next == &ftrace_list_end) {
355 		*list = &ftrace_list_end;
356 		return 0;
357 	}
358 
359 	for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
360 		if (*p == ops)
361 			break;
362 
363 	if (*p != ops)
364 		return -1;
365 
366 	*p = (*p)->next;
367 	return 0;
368 }
369 
370 static void add_ftrace_list_ops(struct ftrace_ops **list,
371 				struct ftrace_ops *main_ops,
372 				struct ftrace_ops *ops)
373 {
374 	int first = *list == &ftrace_list_end;
375 	add_ftrace_ops(list, ops);
376 	if (first)
377 		add_ftrace_ops(&ftrace_ops_list, main_ops);
378 }
379 
380 static int remove_ftrace_list_ops(struct ftrace_ops **list,
381 				  struct ftrace_ops *main_ops,
382 				  struct ftrace_ops *ops)
383 {
384 	int ret = remove_ftrace_ops(list, ops);
385 	if (!ret && *list == &ftrace_list_end)
386 		ret = remove_ftrace_ops(&ftrace_ops_list, main_ops);
387 	return ret;
388 }
389 
390 static void ftrace_update_trampoline(struct ftrace_ops *ops);
391 
392 static int __register_ftrace_function(struct ftrace_ops *ops)
393 {
394 	if (ops->flags & FTRACE_OPS_FL_DELETED)
395 		return -EINVAL;
396 
397 	if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
398 		return -EBUSY;
399 
400 #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
401 	/*
402 	 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
403 	 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
404 	 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
405 	 */
406 	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
407 	    !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
408 		return -EINVAL;
409 
410 	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
411 		ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
412 #endif
413 
414 	if (!core_kernel_data((unsigned long)ops))
415 		ops->flags |= FTRACE_OPS_FL_DYNAMIC;
416 
417 	if (ops->flags & FTRACE_OPS_FL_CONTROL) {
418 		if (control_ops_alloc(ops))
419 			return -ENOMEM;
420 		add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
421 		/* The control_ops needs the trampoline update */
422 		ops = &control_ops;
423 	} else
424 		add_ftrace_ops(&ftrace_ops_list, ops);
425 
426 	ftrace_update_trampoline(ops);
427 
428 	if (ftrace_enabled)
429 		update_ftrace_function();
430 
431 	return 0;
432 }
433 
434 static int __unregister_ftrace_function(struct ftrace_ops *ops)
435 {
436 	int ret;
437 
438 	if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
439 		return -EBUSY;
440 
441 	if (ops->flags & FTRACE_OPS_FL_CONTROL) {
442 		ret = remove_ftrace_list_ops(&ftrace_control_list,
443 					     &control_ops, ops);
444 	} else
445 		ret = remove_ftrace_ops(&ftrace_ops_list, ops);
446 
447 	if (ret < 0)
448 		return ret;
449 
450 	if (ftrace_enabled)
451 		update_ftrace_function();
452 
453 	return 0;
454 }
455 
456 static void ftrace_update_pid_func(void)
457 {
458 	/* Only do something if we are tracing something */
459 	if (ftrace_trace_function == ftrace_stub)
460 		return;
461 
462 	update_ftrace_function();
463 }
464 
465 #ifdef CONFIG_FUNCTION_PROFILER
466 struct ftrace_profile {
467 	struct hlist_node		node;
468 	unsigned long			ip;
469 	unsigned long			counter;
470 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
471 	unsigned long long		time;
472 	unsigned long long		time_squared;
473 #endif
474 };
475 
476 struct ftrace_profile_page {
477 	struct ftrace_profile_page	*next;
478 	unsigned long			index;
479 	struct ftrace_profile		records[];
480 };
481 
482 struct ftrace_profile_stat {
483 	atomic_t			disabled;
484 	struct hlist_head		*hash;
485 	struct ftrace_profile_page	*pages;
486 	struct ftrace_profile_page	*start;
487 	struct tracer_stat		stat;
488 };
489 
490 #define PROFILE_RECORDS_SIZE						\
491 	(PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
492 
493 #define PROFILES_PER_PAGE					\
494 	(PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
495 
496 static int ftrace_profile_enabled __read_mostly;
497 
498 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
499 static DEFINE_MUTEX(ftrace_profile_lock);
500 
501 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
502 
503 #define FTRACE_PROFILE_HASH_BITS 10
504 #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
505 
506 static void *
507 function_stat_next(void *v, int idx)
508 {
509 	struct ftrace_profile *rec = v;
510 	struct ftrace_profile_page *pg;
511 
512 	pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
513 
514  again:
515 	if (idx != 0)
516 		rec++;
517 
518 	if ((void *)rec >= (void *)&pg->records[pg->index]) {
519 		pg = pg->next;
520 		if (!pg)
521 			return NULL;
522 		rec = &pg->records[0];
523 		if (!rec->counter)
524 			goto again;
525 	}
526 
527 	return rec;
528 }
529 
530 static void *function_stat_start(struct tracer_stat *trace)
531 {
532 	struct ftrace_profile_stat *stat =
533 		container_of(trace, struct ftrace_profile_stat, stat);
534 
535 	if (!stat || !stat->start)
536 		return NULL;
537 
538 	return function_stat_next(&stat->start->records[0], 0);
539 }
540 
541 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
542 /* function graph compares on total time */
543 static int function_stat_cmp(void *p1, void *p2)
544 {
545 	struct ftrace_profile *a = p1;
546 	struct ftrace_profile *b = p2;
547 
548 	if (a->time < b->time)
549 		return -1;
550 	if (a->time > b->time)
551 		return 1;
552 	else
553 		return 0;
554 }
555 #else
556 /* not function graph compares against hits */
557 static int function_stat_cmp(void *p1, void *p2)
558 {
559 	struct ftrace_profile *a = p1;
560 	struct ftrace_profile *b = p2;
561 
562 	if (a->counter < b->counter)
563 		return -1;
564 	if (a->counter > b->counter)
565 		return 1;
566 	else
567 		return 0;
568 }
569 #endif
570 
571 static int function_stat_headers(struct seq_file *m)
572 {
573 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
574 	seq_puts(m, "  Function                               "
575 		 "Hit    Time            Avg             s^2\n"
576 		    "  --------                               "
577 		 "---    ----            ---             ---\n");
578 #else
579 	seq_puts(m, "  Function                               Hit\n"
580 		    "  --------                               ---\n");
581 #endif
582 	return 0;
583 }
584 
585 static int function_stat_show(struct seq_file *m, void *v)
586 {
587 	struct ftrace_profile *rec = v;
588 	char str[KSYM_SYMBOL_LEN];
589 	int ret = 0;
590 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
591 	static struct trace_seq s;
592 	unsigned long long avg;
593 	unsigned long long stddev;
594 #endif
595 	mutex_lock(&ftrace_profile_lock);
596 
597 	/* we raced with function_profile_reset() */
598 	if (unlikely(rec->counter == 0)) {
599 		ret = -EBUSY;
600 		goto out;
601 	}
602 
603 	kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
604 	seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
605 
606 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
607 	seq_puts(m, "    ");
608 	avg = rec->time;
609 	do_div(avg, rec->counter);
610 
611 	/* Sample standard deviation (s^2) */
612 	if (rec->counter <= 1)
613 		stddev = 0;
614 	else {
615 		/*
616 		 * Apply Welford's method:
617 		 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
618 		 */
619 		stddev = rec->counter * rec->time_squared -
620 			 rec->time * rec->time;
621 
622 		/*
623 		 * Divide only 1000 for ns^2 -> us^2 conversion.
624 		 * trace_print_graph_duration will divide 1000 again.
625 		 */
626 		do_div(stddev, rec->counter * (rec->counter - 1) * 1000);
627 	}
628 
629 	trace_seq_init(&s);
630 	trace_print_graph_duration(rec->time, &s);
631 	trace_seq_puts(&s, "    ");
632 	trace_print_graph_duration(avg, &s);
633 	trace_seq_puts(&s, "    ");
634 	trace_print_graph_duration(stddev, &s);
635 	trace_print_seq(m, &s);
636 #endif
637 	seq_putc(m, '\n');
638 out:
639 	mutex_unlock(&ftrace_profile_lock);
640 
641 	return ret;
642 }
643 
644 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
645 {
646 	struct ftrace_profile_page *pg;
647 
648 	pg = stat->pages = stat->start;
649 
650 	while (pg) {
651 		memset(pg->records, 0, PROFILE_RECORDS_SIZE);
652 		pg->index = 0;
653 		pg = pg->next;
654 	}
655 
656 	memset(stat->hash, 0,
657 	       FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
658 }
659 
660 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
661 {
662 	struct ftrace_profile_page *pg;
663 	int functions;
664 	int pages;
665 	int i;
666 
667 	/* If we already allocated, do nothing */
668 	if (stat->pages)
669 		return 0;
670 
671 	stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
672 	if (!stat->pages)
673 		return -ENOMEM;
674 
675 #ifdef CONFIG_DYNAMIC_FTRACE
676 	functions = ftrace_update_tot_cnt;
677 #else
678 	/*
679 	 * We do not know the number of functions that exist because
680 	 * dynamic tracing is what counts them. With past experience
681 	 * we have around 20K functions. That should be more than enough.
682 	 * It is highly unlikely we will execute every function in
683 	 * the kernel.
684 	 */
685 	functions = 20000;
686 #endif
687 
688 	pg = stat->start = stat->pages;
689 
690 	pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
691 
692 	for (i = 1; i < pages; i++) {
693 		pg->next = (void *)get_zeroed_page(GFP_KERNEL);
694 		if (!pg->next)
695 			goto out_free;
696 		pg = pg->next;
697 	}
698 
699 	return 0;
700 
701  out_free:
702 	pg = stat->start;
703 	while (pg) {
704 		unsigned long tmp = (unsigned long)pg;
705 
706 		pg = pg->next;
707 		free_page(tmp);
708 	}
709 
710 	stat->pages = NULL;
711 	stat->start = NULL;
712 
713 	return -ENOMEM;
714 }
715 
716 static int ftrace_profile_init_cpu(int cpu)
717 {
718 	struct ftrace_profile_stat *stat;
719 	int size;
720 
721 	stat = &per_cpu(ftrace_profile_stats, cpu);
722 
723 	if (stat->hash) {
724 		/* If the profile is already created, simply reset it */
725 		ftrace_profile_reset(stat);
726 		return 0;
727 	}
728 
729 	/*
730 	 * We are profiling all functions, but usually only a few thousand
731 	 * functions are hit. We'll make a hash of 1024 items.
732 	 */
733 	size = FTRACE_PROFILE_HASH_SIZE;
734 
735 	stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
736 
737 	if (!stat->hash)
738 		return -ENOMEM;
739 
740 	/* Preallocate the function profiling pages */
741 	if (ftrace_profile_pages_init(stat) < 0) {
742 		kfree(stat->hash);
743 		stat->hash = NULL;
744 		return -ENOMEM;
745 	}
746 
747 	return 0;
748 }
749 
750 static int ftrace_profile_init(void)
751 {
752 	int cpu;
753 	int ret = 0;
754 
755 	for_each_possible_cpu(cpu) {
756 		ret = ftrace_profile_init_cpu(cpu);
757 		if (ret)
758 			break;
759 	}
760 
761 	return ret;
762 }
763 
764 /* interrupts must be disabled */
765 static struct ftrace_profile *
766 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
767 {
768 	struct ftrace_profile *rec;
769 	struct hlist_head *hhd;
770 	unsigned long key;
771 
772 	key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
773 	hhd = &stat->hash[key];
774 
775 	if (hlist_empty(hhd))
776 		return NULL;
777 
778 	hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
779 		if (rec->ip == ip)
780 			return rec;
781 	}
782 
783 	return NULL;
784 }
785 
786 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
787 			       struct ftrace_profile *rec)
788 {
789 	unsigned long key;
790 
791 	key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
792 	hlist_add_head_rcu(&rec->node, &stat->hash[key]);
793 }
794 
795 /*
796  * The memory is already allocated, this simply finds a new record to use.
797  */
798 static struct ftrace_profile *
799 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
800 {
801 	struct ftrace_profile *rec = NULL;
802 
803 	/* prevent recursion (from NMIs) */
804 	if (atomic_inc_return(&stat->disabled) != 1)
805 		goto out;
806 
807 	/*
808 	 * Try to find the function again since an NMI
809 	 * could have added it
810 	 */
811 	rec = ftrace_find_profiled_func(stat, ip);
812 	if (rec)
813 		goto out;
814 
815 	if (stat->pages->index == PROFILES_PER_PAGE) {
816 		if (!stat->pages->next)
817 			goto out;
818 		stat->pages = stat->pages->next;
819 	}
820 
821 	rec = &stat->pages->records[stat->pages->index++];
822 	rec->ip = ip;
823 	ftrace_add_profile(stat, rec);
824 
825  out:
826 	atomic_dec(&stat->disabled);
827 
828 	return rec;
829 }
830 
831 static void
832 function_profile_call(unsigned long ip, unsigned long parent_ip,
833 		      struct ftrace_ops *ops, struct pt_regs *regs)
834 {
835 	struct ftrace_profile_stat *stat;
836 	struct ftrace_profile *rec;
837 	unsigned long flags;
838 
839 	if (!ftrace_profile_enabled)
840 		return;
841 
842 	local_irq_save(flags);
843 
844 	stat = this_cpu_ptr(&ftrace_profile_stats);
845 	if (!stat->hash || !ftrace_profile_enabled)
846 		goto out;
847 
848 	rec = ftrace_find_profiled_func(stat, ip);
849 	if (!rec) {
850 		rec = ftrace_profile_alloc(stat, ip);
851 		if (!rec)
852 			goto out;
853 	}
854 
855 	rec->counter++;
856  out:
857 	local_irq_restore(flags);
858 }
859 
860 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
861 static int profile_graph_entry(struct ftrace_graph_ent *trace)
862 {
863 	function_profile_call(trace->func, 0, NULL, NULL);
864 	return 1;
865 }
866 
867 static void profile_graph_return(struct ftrace_graph_ret *trace)
868 {
869 	struct ftrace_profile_stat *stat;
870 	unsigned long long calltime;
871 	struct ftrace_profile *rec;
872 	unsigned long flags;
873 
874 	local_irq_save(flags);
875 	stat = this_cpu_ptr(&ftrace_profile_stats);
876 	if (!stat->hash || !ftrace_profile_enabled)
877 		goto out;
878 
879 	/* If the calltime was zero'd ignore it */
880 	if (!trace->calltime)
881 		goto out;
882 
883 	calltime = trace->rettime - trace->calltime;
884 
885 	if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
886 		int index;
887 
888 		index = trace->depth;
889 
890 		/* Append this call time to the parent time to subtract */
891 		if (index)
892 			current->ret_stack[index - 1].subtime += calltime;
893 
894 		if (current->ret_stack[index].subtime < calltime)
895 			calltime -= current->ret_stack[index].subtime;
896 		else
897 			calltime = 0;
898 	}
899 
900 	rec = ftrace_find_profiled_func(stat, trace->func);
901 	if (rec) {
902 		rec->time += calltime;
903 		rec->time_squared += calltime * calltime;
904 	}
905 
906  out:
907 	local_irq_restore(flags);
908 }
909 
910 static int register_ftrace_profiler(void)
911 {
912 	return register_ftrace_graph(&profile_graph_return,
913 				     &profile_graph_entry);
914 }
915 
916 static void unregister_ftrace_profiler(void)
917 {
918 	unregister_ftrace_graph();
919 }
920 #else
921 static struct ftrace_ops ftrace_profile_ops __read_mostly = {
922 	.func		= function_profile_call,
923 	.flags		= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
924 	INIT_OPS_HASH(ftrace_profile_ops)
925 };
926 
927 static int register_ftrace_profiler(void)
928 {
929 	return register_ftrace_function(&ftrace_profile_ops);
930 }
931 
932 static void unregister_ftrace_profiler(void)
933 {
934 	unregister_ftrace_function(&ftrace_profile_ops);
935 }
936 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
937 
938 static ssize_t
939 ftrace_profile_write(struct file *filp, const char __user *ubuf,
940 		     size_t cnt, loff_t *ppos)
941 {
942 	unsigned long val;
943 	int ret;
944 
945 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
946 	if (ret)
947 		return ret;
948 
949 	val = !!val;
950 
951 	mutex_lock(&ftrace_profile_lock);
952 	if (ftrace_profile_enabled ^ val) {
953 		if (val) {
954 			ret = ftrace_profile_init();
955 			if (ret < 0) {
956 				cnt = ret;
957 				goto out;
958 			}
959 
960 			ret = register_ftrace_profiler();
961 			if (ret < 0) {
962 				cnt = ret;
963 				goto out;
964 			}
965 			ftrace_profile_enabled = 1;
966 		} else {
967 			ftrace_profile_enabled = 0;
968 			/*
969 			 * unregister_ftrace_profiler calls stop_machine
970 			 * so this acts like an synchronize_sched.
971 			 */
972 			unregister_ftrace_profiler();
973 		}
974 	}
975  out:
976 	mutex_unlock(&ftrace_profile_lock);
977 
978 	*ppos += cnt;
979 
980 	return cnt;
981 }
982 
983 static ssize_t
984 ftrace_profile_read(struct file *filp, char __user *ubuf,
985 		     size_t cnt, loff_t *ppos)
986 {
987 	char buf[64];		/* big enough to hold a number */
988 	int r;
989 
990 	r = sprintf(buf, "%u\n", ftrace_profile_enabled);
991 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
992 }
993 
994 static const struct file_operations ftrace_profile_fops = {
995 	.open		= tracing_open_generic,
996 	.read		= ftrace_profile_read,
997 	.write		= ftrace_profile_write,
998 	.llseek		= default_llseek,
999 };
1000 
1001 /* used to initialize the real stat files */
1002 static struct tracer_stat function_stats __initdata = {
1003 	.name		= "functions",
1004 	.stat_start	= function_stat_start,
1005 	.stat_next	= function_stat_next,
1006 	.stat_cmp	= function_stat_cmp,
1007 	.stat_headers	= function_stat_headers,
1008 	.stat_show	= function_stat_show
1009 };
1010 
1011 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
1012 {
1013 	struct ftrace_profile_stat *stat;
1014 	struct dentry *entry;
1015 	char *name;
1016 	int ret;
1017 	int cpu;
1018 
1019 	for_each_possible_cpu(cpu) {
1020 		stat = &per_cpu(ftrace_profile_stats, cpu);
1021 
1022 		/* allocate enough for function name + cpu number */
1023 		name = kmalloc(32, GFP_KERNEL);
1024 		if (!name) {
1025 			/*
1026 			 * The files created are permanent, if something happens
1027 			 * we still do not free memory.
1028 			 */
1029 			WARN(1,
1030 			     "Could not allocate stat file for cpu %d\n",
1031 			     cpu);
1032 			return;
1033 		}
1034 		stat->stat = function_stats;
1035 		snprintf(name, 32, "function%d", cpu);
1036 		stat->stat.name = name;
1037 		ret = register_stat_tracer(&stat->stat);
1038 		if (ret) {
1039 			WARN(1,
1040 			     "Could not register function stat for cpu %d\n",
1041 			     cpu);
1042 			kfree(name);
1043 			return;
1044 		}
1045 	}
1046 
1047 	entry = debugfs_create_file("function_profile_enabled", 0644,
1048 				    d_tracer, NULL, &ftrace_profile_fops);
1049 	if (!entry)
1050 		pr_warning("Could not create debugfs "
1051 			   "'function_profile_enabled' entry\n");
1052 }
1053 
1054 #else /* CONFIG_FUNCTION_PROFILER */
1055 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
1056 {
1057 }
1058 #endif /* CONFIG_FUNCTION_PROFILER */
1059 
1060 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
1061 
1062 #ifdef CONFIG_DYNAMIC_FTRACE
1063 
1064 static struct ftrace_ops *removed_ops;
1065 
1066 /*
1067  * Set when doing a global update, like enabling all recs or disabling them.
1068  * It is not set when just updating a single ftrace_ops.
1069  */
1070 static bool update_all_ops;
1071 
1072 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
1073 # error Dynamic ftrace depends on MCOUNT_RECORD
1074 #endif
1075 
1076 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
1077 
1078 struct ftrace_func_probe {
1079 	struct hlist_node	node;
1080 	struct ftrace_probe_ops	*ops;
1081 	unsigned long		flags;
1082 	unsigned long		ip;
1083 	void			*data;
1084 	struct list_head	free_list;
1085 };
1086 
1087 struct ftrace_func_entry {
1088 	struct hlist_node hlist;
1089 	unsigned long ip;
1090 };
1091 
1092 struct ftrace_hash {
1093 	unsigned long		size_bits;
1094 	struct hlist_head	*buckets;
1095 	unsigned long		count;
1096 	struct rcu_head		rcu;
1097 };
1098 
1099 /*
1100  * We make these constant because no one should touch them,
1101  * but they are used as the default "empty hash", to avoid allocating
1102  * it all the time. These are in a read only section such that if
1103  * anyone does try to modify it, it will cause an exception.
1104  */
1105 static const struct hlist_head empty_buckets[1];
1106 static const struct ftrace_hash empty_hash = {
1107 	.buckets = (struct hlist_head *)empty_buckets,
1108 };
1109 #define EMPTY_HASH	((struct ftrace_hash *)&empty_hash)
1110 
1111 static struct ftrace_ops global_ops = {
1112 	.func				= ftrace_stub,
1113 	.local_hash.notrace_hash	= EMPTY_HASH,
1114 	.local_hash.filter_hash		= EMPTY_HASH,
1115 	INIT_OPS_HASH(global_ops)
1116 	.flags				= FTRACE_OPS_FL_RECURSION_SAFE |
1117 					  FTRACE_OPS_FL_INITIALIZED,
1118 };
1119 
1120 /*
1121  * This is used by __kernel_text_address() to return true if the
1122  * address is on a dynamically allocated trampoline that would
1123  * not return true for either core_kernel_text() or
1124  * is_module_text_address().
1125  */
1126 bool is_ftrace_trampoline(unsigned long addr)
1127 {
1128 	struct ftrace_ops *op;
1129 	bool ret = false;
1130 
1131 	/*
1132 	 * Some of the ops may be dynamically allocated,
1133 	 * they are freed after a synchronize_sched().
1134 	 */
1135 	preempt_disable_notrace();
1136 
1137 	do_for_each_ftrace_op(op, ftrace_ops_list) {
1138 		/*
1139 		 * This is to check for dynamically allocated trampolines.
1140 		 * Trampolines that are in kernel text will have
1141 		 * core_kernel_text() return true.
1142 		 */
1143 		if (op->trampoline && op->trampoline_size)
1144 			if (addr >= op->trampoline &&
1145 			    addr < op->trampoline + op->trampoline_size) {
1146 				ret = true;
1147 				goto out;
1148 			}
1149 	} while_for_each_ftrace_op(op);
1150 
1151  out:
1152 	preempt_enable_notrace();
1153 
1154 	return ret;
1155 }
1156 
1157 struct ftrace_page {
1158 	struct ftrace_page	*next;
1159 	struct dyn_ftrace	*records;
1160 	int			index;
1161 	int			size;
1162 };
1163 
1164 #define ENTRY_SIZE sizeof(struct dyn_ftrace)
1165 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1166 
1167 /* estimate from running different kernels */
1168 #define NR_TO_INIT		10000
1169 
1170 static struct ftrace_page	*ftrace_pages_start;
1171 static struct ftrace_page	*ftrace_pages;
1172 
1173 static bool __always_inline ftrace_hash_empty(struct ftrace_hash *hash)
1174 {
1175 	return !hash || !hash->count;
1176 }
1177 
1178 static struct ftrace_func_entry *
1179 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1180 {
1181 	unsigned long key;
1182 	struct ftrace_func_entry *entry;
1183 	struct hlist_head *hhd;
1184 
1185 	if (ftrace_hash_empty(hash))
1186 		return NULL;
1187 
1188 	if (hash->size_bits > 0)
1189 		key = hash_long(ip, hash->size_bits);
1190 	else
1191 		key = 0;
1192 
1193 	hhd = &hash->buckets[key];
1194 
1195 	hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
1196 		if (entry->ip == ip)
1197 			return entry;
1198 	}
1199 	return NULL;
1200 }
1201 
1202 static void __add_hash_entry(struct ftrace_hash *hash,
1203 			     struct ftrace_func_entry *entry)
1204 {
1205 	struct hlist_head *hhd;
1206 	unsigned long key;
1207 
1208 	if (hash->size_bits)
1209 		key = hash_long(entry->ip, hash->size_bits);
1210 	else
1211 		key = 0;
1212 
1213 	hhd = &hash->buckets[key];
1214 	hlist_add_head(&entry->hlist, hhd);
1215 	hash->count++;
1216 }
1217 
1218 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1219 {
1220 	struct ftrace_func_entry *entry;
1221 
1222 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1223 	if (!entry)
1224 		return -ENOMEM;
1225 
1226 	entry->ip = ip;
1227 	__add_hash_entry(hash, entry);
1228 
1229 	return 0;
1230 }
1231 
1232 static void
1233 free_hash_entry(struct ftrace_hash *hash,
1234 		  struct ftrace_func_entry *entry)
1235 {
1236 	hlist_del(&entry->hlist);
1237 	kfree(entry);
1238 	hash->count--;
1239 }
1240 
1241 static void
1242 remove_hash_entry(struct ftrace_hash *hash,
1243 		  struct ftrace_func_entry *entry)
1244 {
1245 	hlist_del(&entry->hlist);
1246 	hash->count--;
1247 }
1248 
1249 static void ftrace_hash_clear(struct ftrace_hash *hash)
1250 {
1251 	struct hlist_head *hhd;
1252 	struct hlist_node *tn;
1253 	struct ftrace_func_entry *entry;
1254 	int size = 1 << hash->size_bits;
1255 	int i;
1256 
1257 	if (!hash->count)
1258 		return;
1259 
1260 	for (i = 0; i < size; i++) {
1261 		hhd = &hash->buckets[i];
1262 		hlist_for_each_entry_safe(entry, tn, hhd, hlist)
1263 			free_hash_entry(hash, entry);
1264 	}
1265 	FTRACE_WARN_ON(hash->count);
1266 }
1267 
1268 static void free_ftrace_hash(struct ftrace_hash *hash)
1269 {
1270 	if (!hash || hash == EMPTY_HASH)
1271 		return;
1272 	ftrace_hash_clear(hash);
1273 	kfree(hash->buckets);
1274 	kfree(hash);
1275 }
1276 
1277 static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1278 {
1279 	struct ftrace_hash *hash;
1280 
1281 	hash = container_of(rcu, struct ftrace_hash, rcu);
1282 	free_ftrace_hash(hash);
1283 }
1284 
1285 static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1286 {
1287 	if (!hash || hash == EMPTY_HASH)
1288 		return;
1289 	call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1290 }
1291 
1292 void ftrace_free_filter(struct ftrace_ops *ops)
1293 {
1294 	ftrace_ops_init(ops);
1295 	free_ftrace_hash(ops->func_hash->filter_hash);
1296 	free_ftrace_hash(ops->func_hash->notrace_hash);
1297 }
1298 
1299 static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1300 {
1301 	struct ftrace_hash *hash;
1302 	int size;
1303 
1304 	hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1305 	if (!hash)
1306 		return NULL;
1307 
1308 	size = 1 << size_bits;
1309 	hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1310 
1311 	if (!hash->buckets) {
1312 		kfree(hash);
1313 		return NULL;
1314 	}
1315 
1316 	hash->size_bits = size_bits;
1317 
1318 	return hash;
1319 }
1320 
1321 static struct ftrace_hash *
1322 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1323 {
1324 	struct ftrace_func_entry *entry;
1325 	struct ftrace_hash *new_hash;
1326 	int size;
1327 	int ret;
1328 	int i;
1329 
1330 	new_hash = alloc_ftrace_hash(size_bits);
1331 	if (!new_hash)
1332 		return NULL;
1333 
1334 	/* Empty hash? */
1335 	if (ftrace_hash_empty(hash))
1336 		return new_hash;
1337 
1338 	size = 1 << hash->size_bits;
1339 	for (i = 0; i < size; i++) {
1340 		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
1341 			ret = add_hash_entry(new_hash, entry->ip);
1342 			if (ret < 0)
1343 				goto free_hash;
1344 		}
1345 	}
1346 
1347 	FTRACE_WARN_ON(new_hash->count != hash->count);
1348 
1349 	return new_hash;
1350 
1351  free_hash:
1352 	free_ftrace_hash(new_hash);
1353 	return NULL;
1354 }
1355 
1356 static void
1357 ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash);
1358 static void
1359 ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash);
1360 
1361 static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1362 				       struct ftrace_hash *new_hash);
1363 
1364 static int
1365 ftrace_hash_move(struct ftrace_ops *ops, int enable,
1366 		 struct ftrace_hash **dst, struct ftrace_hash *src)
1367 {
1368 	struct ftrace_func_entry *entry;
1369 	struct hlist_node *tn;
1370 	struct hlist_head *hhd;
1371 	struct ftrace_hash *new_hash;
1372 	int size = src->count;
1373 	int bits = 0;
1374 	int ret;
1375 	int i;
1376 
1377 	/* Reject setting notrace hash on IPMODIFY ftrace_ops */
1378 	if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable)
1379 		return -EINVAL;
1380 
1381 	/*
1382 	 * If the new source is empty, just free dst and assign it
1383 	 * the empty_hash.
1384 	 */
1385 	if (!src->count) {
1386 		new_hash = EMPTY_HASH;
1387 		goto update;
1388 	}
1389 
1390 	/*
1391 	 * Make the hash size about 1/2 the # found
1392 	 */
1393 	for (size /= 2; size; size >>= 1)
1394 		bits++;
1395 
1396 	/* Don't allocate too much */
1397 	if (bits > FTRACE_HASH_MAX_BITS)
1398 		bits = FTRACE_HASH_MAX_BITS;
1399 
1400 	new_hash = alloc_ftrace_hash(bits);
1401 	if (!new_hash)
1402 		return -ENOMEM;
1403 
1404 	size = 1 << src->size_bits;
1405 	for (i = 0; i < size; i++) {
1406 		hhd = &src->buckets[i];
1407 		hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
1408 			remove_hash_entry(src, entry);
1409 			__add_hash_entry(new_hash, entry);
1410 		}
1411 	}
1412 
1413 update:
1414 	/* Make sure this can be applied if it is IPMODIFY ftrace_ops */
1415 	if (enable) {
1416 		/* IPMODIFY should be updated only when filter_hash updating */
1417 		ret = ftrace_hash_ipmodify_update(ops, new_hash);
1418 		if (ret < 0) {
1419 			free_ftrace_hash(new_hash);
1420 			return ret;
1421 		}
1422 	}
1423 
1424 	/*
1425 	 * Remove the current set, update the hash and add
1426 	 * them back.
1427 	 */
1428 	ftrace_hash_rec_disable_modify(ops, enable);
1429 
1430 	rcu_assign_pointer(*dst, new_hash);
1431 
1432 	ftrace_hash_rec_enable_modify(ops, enable);
1433 
1434 	return 0;
1435 }
1436 
1437 static bool hash_contains_ip(unsigned long ip,
1438 			     struct ftrace_ops_hash *hash)
1439 {
1440 	/*
1441 	 * The function record is a match if it exists in the filter
1442 	 * hash and not in the notrace hash. Note, an emty hash is
1443 	 * considered a match for the filter hash, but an empty
1444 	 * notrace hash is considered not in the notrace hash.
1445 	 */
1446 	return (ftrace_hash_empty(hash->filter_hash) ||
1447 		ftrace_lookup_ip(hash->filter_hash, ip)) &&
1448 		(ftrace_hash_empty(hash->notrace_hash) ||
1449 		 !ftrace_lookup_ip(hash->notrace_hash, ip));
1450 }
1451 
1452 /*
1453  * Test the hashes for this ops to see if we want to call
1454  * the ops->func or not.
1455  *
1456  * It's a match if the ip is in the ops->filter_hash or
1457  * the filter_hash does not exist or is empty,
1458  *  AND
1459  * the ip is not in the ops->notrace_hash.
1460  *
1461  * This needs to be called with preemption disabled as
1462  * the hashes are freed with call_rcu_sched().
1463  */
1464 static int
1465 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1466 {
1467 	struct ftrace_ops_hash hash;
1468 	int ret;
1469 
1470 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1471 	/*
1472 	 * There's a small race when adding ops that the ftrace handler
1473 	 * that wants regs, may be called without them. We can not
1474 	 * allow that handler to be called if regs is NULL.
1475 	 */
1476 	if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
1477 		return 0;
1478 #endif
1479 
1480 	hash.filter_hash = rcu_dereference_raw_notrace(ops->func_hash->filter_hash);
1481 	hash.notrace_hash = rcu_dereference_raw_notrace(ops->func_hash->notrace_hash);
1482 
1483 	if (hash_contains_ip(ip, &hash))
1484 		ret = 1;
1485 	else
1486 		ret = 0;
1487 
1488 	return ret;
1489 }
1490 
1491 /*
1492  * This is a double for. Do not use 'break' to break out of the loop,
1493  * you must use a goto.
1494  */
1495 #define do_for_each_ftrace_rec(pg, rec)					\
1496 	for (pg = ftrace_pages_start; pg; pg = pg->next) {		\
1497 		int _____i;						\
1498 		for (_____i = 0; _____i < pg->index; _____i++) {	\
1499 			rec = &pg->records[_____i];
1500 
1501 #define while_for_each_ftrace_rec()		\
1502 		}				\
1503 	}
1504 
1505 
1506 static int ftrace_cmp_recs(const void *a, const void *b)
1507 {
1508 	const struct dyn_ftrace *key = a;
1509 	const struct dyn_ftrace *rec = b;
1510 
1511 	if (key->flags < rec->ip)
1512 		return -1;
1513 	if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1514 		return 1;
1515 	return 0;
1516 }
1517 
1518 static unsigned long ftrace_location_range(unsigned long start, unsigned long end)
1519 {
1520 	struct ftrace_page *pg;
1521 	struct dyn_ftrace *rec;
1522 	struct dyn_ftrace key;
1523 
1524 	key.ip = start;
1525 	key.flags = end;	/* overload flags, as it is unsigned long */
1526 
1527 	for (pg = ftrace_pages_start; pg; pg = pg->next) {
1528 		if (end < pg->records[0].ip ||
1529 		    start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1530 			continue;
1531 		rec = bsearch(&key, pg->records, pg->index,
1532 			      sizeof(struct dyn_ftrace),
1533 			      ftrace_cmp_recs);
1534 		if (rec)
1535 			return rec->ip;
1536 	}
1537 
1538 	return 0;
1539 }
1540 
1541 /**
1542  * ftrace_location - return true if the ip giving is a traced location
1543  * @ip: the instruction pointer to check
1544  *
1545  * Returns rec->ip if @ip given is a pointer to a ftrace location.
1546  * That is, the instruction that is either a NOP or call to
1547  * the function tracer. It checks the ftrace internal tables to
1548  * determine if the address belongs or not.
1549  */
1550 unsigned long ftrace_location(unsigned long ip)
1551 {
1552 	return ftrace_location_range(ip, ip);
1553 }
1554 
1555 /**
1556  * ftrace_text_reserved - return true if range contains an ftrace location
1557  * @start: start of range to search
1558  * @end: end of range to search (inclusive). @end points to the last byte to check.
1559  *
1560  * Returns 1 if @start and @end contains a ftrace location.
1561  * That is, the instruction that is either a NOP or call to
1562  * the function tracer. It checks the ftrace internal tables to
1563  * determine if the address belongs or not.
1564  */
1565 int ftrace_text_reserved(const void *start, const void *end)
1566 {
1567 	unsigned long ret;
1568 
1569 	ret = ftrace_location_range((unsigned long)start,
1570 				    (unsigned long)end);
1571 
1572 	return (int)!!ret;
1573 }
1574 
1575 /* Test if ops registered to this rec needs regs */
1576 static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
1577 {
1578 	struct ftrace_ops *ops;
1579 	bool keep_regs = false;
1580 
1581 	for (ops = ftrace_ops_list;
1582 	     ops != &ftrace_list_end; ops = ops->next) {
1583 		/* pass rec in as regs to have non-NULL val */
1584 		if (ftrace_ops_test(ops, rec->ip, rec)) {
1585 			if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1586 				keep_regs = true;
1587 				break;
1588 			}
1589 		}
1590 	}
1591 
1592 	return  keep_regs;
1593 }
1594 
1595 static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1596 				     int filter_hash,
1597 				     bool inc)
1598 {
1599 	struct ftrace_hash *hash;
1600 	struct ftrace_hash *other_hash;
1601 	struct ftrace_page *pg;
1602 	struct dyn_ftrace *rec;
1603 	int count = 0;
1604 	int all = 0;
1605 
1606 	/* Only update if the ops has been registered */
1607 	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1608 		return;
1609 
1610 	/*
1611 	 * In the filter_hash case:
1612 	 *   If the count is zero, we update all records.
1613 	 *   Otherwise we just update the items in the hash.
1614 	 *
1615 	 * In the notrace_hash case:
1616 	 *   We enable the update in the hash.
1617 	 *   As disabling notrace means enabling the tracing,
1618 	 *   and enabling notrace means disabling, the inc variable
1619 	 *   gets inversed.
1620 	 */
1621 	if (filter_hash) {
1622 		hash = ops->func_hash->filter_hash;
1623 		other_hash = ops->func_hash->notrace_hash;
1624 		if (ftrace_hash_empty(hash))
1625 			all = 1;
1626 	} else {
1627 		inc = !inc;
1628 		hash = ops->func_hash->notrace_hash;
1629 		other_hash = ops->func_hash->filter_hash;
1630 		/*
1631 		 * If the notrace hash has no items,
1632 		 * then there's nothing to do.
1633 		 */
1634 		if (ftrace_hash_empty(hash))
1635 			return;
1636 	}
1637 
1638 	do_for_each_ftrace_rec(pg, rec) {
1639 		int in_other_hash = 0;
1640 		int in_hash = 0;
1641 		int match = 0;
1642 
1643 		if (all) {
1644 			/*
1645 			 * Only the filter_hash affects all records.
1646 			 * Update if the record is not in the notrace hash.
1647 			 */
1648 			if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1649 				match = 1;
1650 		} else {
1651 			in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1652 			in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1653 
1654 			/*
1655 			 * If filter_hash is set, we want to match all functions
1656 			 * that are in the hash but not in the other hash.
1657 			 *
1658 			 * If filter_hash is not set, then we are decrementing.
1659 			 * That means we match anything that is in the hash
1660 			 * and also in the other_hash. That is, we need to turn
1661 			 * off functions in the other hash because they are disabled
1662 			 * by this hash.
1663 			 */
1664 			if (filter_hash && in_hash && !in_other_hash)
1665 				match = 1;
1666 			else if (!filter_hash && in_hash &&
1667 				 (in_other_hash || ftrace_hash_empty(other_hash)))
1668 				match = 1;
1669 		}
1670 		if (!match)
1671 			continue;
1672 
1673 		if (inc) {
1674 			rec->flags++;
1675 			if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX))
1676 				return;
1677 
1678 			/*
1679 			 * If there's only a single callback registered to a
1680 			 * function, and the ops has a trampoline registered
1681 			 * for it, then we can call it directly.
1682 			 */
1683 			if (ftrace_rec_count(rec) == 1 && ops->trampoline)
1684 				rec->flags |= FTRACE_FL_TRAMP;
1685 			else
1686 				/*
1687 				 * If we are adding another function callback
1688 				 * to this function, and the previous had a
1689 				 * custom trampoline in use, then we need to go
1690 				 * back to the default trampoline.
1691 				 */
1692 				rec->flags &= ~FTRACE_FL_TRAMP;
1693 
1694 			/*
1695 			 * If any ops wants regs saved for this function
1696 			 * then all ops will get saved regs.
1697 			 */
1698 			if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1699 				rec->flags |= FTRACE_FL_REGS;
1700 		} else {
1701 			if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0))
1702 				return;
1703 			rec->flags--;
1704 
1705 			/*
1706 			 * If the rec had REGS enabled and the ops that is
1707 			 * being removed had REGS set, then see if there is
1708 			 * still any ops for this record that wants regs.
1709 			 * If not, we can stop recording them.
1710 			 */
1711 			if (ftrace_rec_count(rec) > 0 &&
1712 			    rec->flags & FTRACE_FL_REGS &&
1713 			    ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1714 				if (!test_rec_ops_needs_regs(rec))
1715 					rec->flags &= ~FTRACE_FL_REGS;
1716 			}
1717 
1718 			/*
1719 			 * If the rec had TRAMP enabled, then it needs to
1720 			 * be cleared. As TRAMP can only be enabled iff
1721 			 * there is only a single ops attached to it.
1722 			 * In otherwords, always disable it on decrementing.
1723 			 * In the future, we may set it if rec count is
1724 			 * decremented to one, and the ops that is left
1725 			 * has a trampoline.
1726 			 */
1727 			rec->flags &= ~FTRACE_FL_TRAMP;
1728 
1729 			/*
1730 			 * flags will be cleared in ftrace_check_record()
1731 			 * if rec count is zero.
1732 			 */
1733 		}
1734 		count++;
1735 		/* Shortcut, if we handled all records, we are done. */
1736 		if (!all && count == hash->count)
1737 			return;
1738 	} while_for_each_ftrace_rec();
1739 }
1740 
1741 static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1742 				    int filter_hash)
1743 {
1744 	__ftrace_hash_rec_update(ops, filter_hash, 0);
1745 }
1746 
1747 static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1748 				   int filter_hash)
1749 {
1750 	__ftrace_hash_rec_update(ops, filter_hash, 1);
1751 }
1752 
1753 static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops,
1754 					  int filter_hash, int inc)
1755 {
1756 	struct ftrace_ops *op;
1757 
1758 	__ftrace_hash_rec_update(ops, filter_hash, inc);
1759 
1760 	if (ops->func_hash != &global_ops.local_hash)
1761 		return;
1762 
1763 	/*
1764 	 * If the ops shares the global_ops hash, then we need to update
1765 	 * all ops that are enabled and use this hash.
1766 	 */
1767 	do_for_each_ftrace_op(op, ftrace_ops_list) {
1768 		/* Already done */
1769 		if (op == ops)
1770 			continue;
1771 		if (op->func_hash == &global_ops.local_hash)
1772 			__ftrace_hash_rec_update(op, filter_hash, inc);
1773 	} while_for_each_ftrace_op(op);
1774 }
1775 
1776 static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops,
1777 					   int filter_hash)
1778 {
1779 	ftrace_hash_rec_update_modify(ops, filter_hash, 0);
1780 }
1781 
1782 static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops,
1783 					  int filter_hash)
1784 {
1785 	ftrace_hash_rec_update_modify(ops, filter_hash, 1);
1786 }
1787 
1788 /*
1789  * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK
1790  * or no-needed to update, -EBUSY if it detects a conflict of the flag
1791  * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs.
1792  * Note that old_hash and new_hash has below meanings
1793  *  - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected)
1794  *  - If the hash is EMPTY_HASH, it hits nothing
1795  *  - Anything else hits the recs which match the hash entries.
1796  */
1797 static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
1798 					 struct ftrace_hash *old_hash,
1799 					 struct ftrace_hash *new_hash)
1800 {
1801 	struct ftrace_page *pg;
1802 	struct dyn_ftrace *rec, *end = NULL;
1803 	int in_old, in_new;
1804 
1805 	/* Only update if the ops has been registered */
1806 	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1807 		return 0;
1808 
1809 	if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
1810 		return 0;
1811 
1812 	/*
1813 	 * Since the IPMODIFY is a very address sensitive action, we do not
1814 	 * allow ftrace_ops to set all functions to new hash.
1815 	 */
1816 	if (!new_hash || !old_hash)
1817 		return -EINVAL;
1818 
1819 	/* Update rec->flags */
1820 	do_for_each_ftrace_rec(pg, rec) {
1821 		/* We need to update only differences of filter_hash */
1822 		in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1823 		in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1824 		if (in_old == in_new)
1825 			continue;
1826 
1827 		if (in_new) {
1828 			/* New entries must ensure no others are using it */
1829 			if (rec->flags & FTRACE_FL_IPMODIFY)
1830 				goto rollback;
1831 			rec->flags |= FTRACE_FL_IPMODIFY;
1832 		} else /* Removed entry */
1833 			rec->flags &= ~FTRACE_FL_IPMODIFY;
1834 	} while_for_each_ftrace_rec();
1835 
1836 	return 0;
1837 
1838 rollback:
1839 	end = rec;
1840 
1841 	/* Roll back what we did above */
1842 	do_for_each_ftrace_rec(pg, rec) {
1843 		if (rec == end)
1844 			goto err_out;
1845 
1846 		in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1847 		in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1848 		if (in_old == in_new)
1849 			continue;
1850 
1851 		if (in_new)
1852 			rec->flags &= ~FTRACE_FL_IPMODIFY;
1853 		else
1854 			rec->flags |= FTRACE_FL_IPMODIFY;
1855 	} while_for_each_ftrace_rec();
1856 
1857 err_out:
1858 	return -EBUSY;
1859 }
1860 
1861 static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops)
1862 {
1863 	struct ftrace_hash *hash = ops->func_hash->filter_hash;
1864 
1865 	if (ftrace_hash_empty(hash))
1866 		hash = NULL;
1867 
1868 	return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash);
1869 }
1870 
1871 /* Disabling always succeeds */
1872 static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops)
1873 {
1874 	struct ftrace_hash *hash = ops->func_hash->filter_hash;
1875 
1876 	if (ftrace_hash_empty(hash))
1877 		hash = NULL;
1878 
1879 	__ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH);
1880 }
1881 
1882 static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1883 				       struct ftrace_hash *new_hash)
1884 {
1885 	struct ftrace_hash *old_hash = ops->func_hash->filter_hash;
1886 
1887 	if (ftrace_hash_empty(old_hash))
1888 		old_hash = NULL;
1889 
1890 	if (ftrace_hash_empty(new_hash))
1891 		new_hash = NULL;
1892 
1893 	return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash);
1894 }
1895 
1896 static void print_ip_ins(const char *fmt, unsigned char *p)
1897 {
1898 	int i;
1899 
1900 	printk(KERN_CONT "%s", fmt);
1901 
1902 	for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1903 		printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1904 }
1905 
1906 static struct ftrace_ops *
1907 ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
1908 
1909 /**
1910  * ftrace_bug - report and shutdown function tracer
1911  * @failed: The failed type (EFAULT, EINVAL, EPERM)
1912  * @rec: The record that failed
1913  *
1914  * The arch code that enables or disables the function tracing
1915  * can call ftrace_bug() when it has detected a problem in
1916  * modifying the code. @failed should be one of either:
1917  * EFAULT - if the problem happens on reading the @ip address
1918  * EINVAL - if what is read at @ip is not what was expected
1919  * EPERM - if the problem happens on writting to the @ip address
1920  */
1921 void ftrace_bug(int failed, struct dyn_ftrace *rec)
1922 {
1923 	unsigned long ip = rec ? rec->ip : 0;
1924 
1925 	switch (failed) {
1926 	case -EFAULT:
1927 		FTRACE_WARN_ON_ONCE(1);
1928 		pr_info("ftrace faulted on modifying ");
1929 		print_ip_sym(ip);
1930 		break;
1931 	case -EINVAL:
1932 		FTRACE_WARN_ON_ONCE(1);
1933 		pr_info("ftrace failed to modify ");
1934 		print_ip_sym(ip);
1935 		print_ip_ins(" actual: ", (unsigned char *)ip);
1936 		pr_cont("\n");
1937 		break;
1938 	case -EPERM:
1939 		FTRACE_WARN_ON_ONCE(1);
1940 		pr_info("ftrace faulted on writing ");
1941 		print_ip_sym(ip);
1942 		break;
1943 	default:
1944 		FTRACE_WARN_ON_ONCE(1);
1945 		pr_info("ftrace faulted on unknown error ");
1946 		print_ip_sym(ip);
1947 	}
1948 	if (rec) {
1949 		struct ftrace_ops *ops = NULL;
1950 
1951 		pr_info("ftrace record flags: %lx\n", rec->flags);
1952 		pr_cont(" (%ld)%s", ftrace_rec_count(rec),
1953 			rec->flags & FTRACE_FL_REGS ? " R" : "  ");
1954 		if (rec->flags & FTRACE_FL_TRAMP_EN) {
1955 			ops = ftrace_find_tramp_ops_any(rec);
1956 			if (ops)
1957 				pr_cont("\ttramp: %pS",
1958 					(void *)ops->trampoline);
1959 			else
1960 				pr_cont("\ttramp: ERROR!");
1961 
1962 		}
1963 		ip = ftrace_get_addr_curr(rec);
1964 		pr_cont(" expected tramp: %lx\n", ip);
1965 	}
1966 }
1967 
1968 static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
1969 {
1970 	unsigned long flag = 0UL;
1971 
1972 	/*
1973 	 * If we are updating calls:
1974 	 *
1975 	 *   If the record has a ref count, then we need to enable it
1976 	 *   because someone is using it.
1977 	 *
1978 	 *   Otherwise we make sure its disabled.
1979 	 *
1980 	 * If we are disabling calls, then disable all records that
1981 	 * are enabled.
1982 	 */
1983 	if (enable && ftrace_rec_count(rec))
1984 		flag = FTRACE_FL_ENABLED;
1985 
1986 	/*
1987 	 * If enabling and the REGS flag does not match the REGS_EN, or
1988 	 * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore
1989 	 * this record. Set flags to fail the compare against ENABLED.
1990 	 */
1991 	if (flag) {
1992 		if (!(rec->flags & FTRACE_FL_REGS) !=
1993 		    !(rec->flags & FTRACE_FL_REGS_EN))
1994 			flag |= FTRACE_FL_REGS;
1995 
1996 		if (!(rec->flags & FTRACE_FL_TRAMP) !=
1997 		    !(rec->flags & FTRACE_FL_TRAMP_EN))
1998 			flag |= FTRACE_FL_TRAMP;
1999 	}
2000 
2001 	/* If the state of this record hasn't changed, then do nothing */
2002 	if ((rec->flags & FTRACE_FL_ENABLED) == flag)
2003 		return FTRACE_UPDATE_IGNORE;
2004 
2005 	if (flag) {
2006 		/* Save off if rec is being enabled (for return value) */
2007 		flag ^= rec->flags & FTRACE_FL_ENABLED;
2008 
2009 		if (update) {
2010 			rec->flags |= FTRACE_FL_ENABLED;
2011 			if (flag & FTRACE_FL_REGS) {
2012 				if (rec->flags & FTRACE_FL_REGS)
2013 					rec->flags |= FTRACE_FL_REGS_EN;
2014 				else
2015 					rec->flags &= ~FTRACE_FL_REGS_EN;
2016 			}
2017 			if (flag & FTRACE_FL_TRAMP) {
2018 				if (rec->flags & FTRACE_FL_TRAMP)
2019 					rec->flags |= FTRACE_FL_TRAMP_EN;
2020 				else
2021 					rec->flags &= ~FTRACE_FL_TRAMP_EN;
2022 			}
2023 		}
2024 
2025 		/*
2026 		 * If this record is being updated from a nop, then
2027 		 *   return UPDATE_MAKE_CALL.
2028 		 * Otherwise,
2029 		 *   return UPDATE_MODIFY_CALL to tell the caller to convert
2030 		 *   from the save regs, to a non-save regs function or
2031 		 *   vice versa, or from a trampoline call.
2032 		 */
2033 		if (flag & FTRACE_FL_ENABLED)
2034 			return FTRACE_UPDATE_MAKE_CALL;
2035 
2036 		return FTRACE_UPDATE_MODIFY_CALL;
2037 	}
2038 
2039 	if (update) {
2040 		/* If there's no more users, clear all flags */
2041 		if (!ftrace_rec_count(rec))
2042 			rec->flags = 0;
2043 		else
2044 			/* Just disable the record (keep REGS state) */
2045 			rec->flags &= ~FTRACE_FL_ENABLED;
2046 	}
2047 
2048 	return FTRACE_UPDATE_MAKE_NOP;
2049 }
2050 
2051 /**
2052  * ftrace_update_record, set a record that now is tracing or not
2053  * @rec: the record to update
2054  * @enable: set to 1 if the record is tracing, zero to force disable
2055  *
2056  * The records that represent all functions that can be traced need
2057  * to be updated when tracing has been enabled.
2058  */
2059 int ftrace_update_record(struct dyn_ftrace *rec, int enable)
2060 {
2061 	return ftrace_check_record(rec, enable, 1);
2062 }
2063 
2064 /**
2065  * ftrace_test_record, check if the record has been enabled or not
2066  * @rec: the record to test
2067  * @enable: set to 1 to check if enabled, 0 if it is disabled
2068  *
2069  * The arch code may need to test if a record is already set to
2070  * tracing to determine how to modify the function code that it
2071  * represents.
2072  */
2073 int ftrace_test_record(struct dyn_ftrace *rec, int enable)
2074 {
2075 	return ftrace_check_record(rec, enable, 0);
2076 }
2077 
2078 static struct ftrace_ops *
2079 ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
2080 {
2081 	struct ftrace_ops *op;
2082 	unsigned long ip = rec->ip;
2083 
2084 	do_for_each_ftrace_op(op, ftrace_ops_list) {
2085 
2086 		if (!op->trampoline)
2087 			continue;
2088 
2089 		if (hash_contains_ip(ip, op->func_hash))
2090 			return op;
2091 	} while_for_each_ftrace_op(op);
2092 
2093 	return NULL;
2094 }
2095 
2096 static struct ftrace_ops *
2097 ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
2098 {
2099 	struct ftrace_ops *op;
2100 	unsigned long ip = rec->ip;
2101 
2102 	/*
2103 	 * Need to check removed ops first.
2104 	 * If they are being removed, and this rec has a tramp,
2105 	 * and this rec is in the ops list, then it would be the
2106 	 * one with the tramp.
2107 	 */
2108 	if (removed_ops) {
2109 		if (hash_contains_ip(ip, &removed_ops->old_hash))
2110 			return removed_ops;
2111 	}
2112 
2113 	/*
2114 	 * Need to find the current trampoline for a rec.
2115 	 * Now, a trampoline is only attached to a rec if there
2116 	 * was a single 'ops' attached to it. But this can be called
2117 	 * when we are adding another op to the rec or removing the
2118 	 * current one. Thus, if the op is being added, we can
2119 	 * ignore it because it hasn't attached itself to the rec
2120 	 * yet.
2121 	 *
2122 	 * If an ops is being modified (hooking to different functions)
2123 	 * then we don't care about the new functions that are being
2124 	 * added, just the old ones (that are probably being removed).
2125 	 *
2126 	 * If we are adding an ops to a function that already is using
2127 	 * a trampoline, it needs to be removed (trampolines are only
2128 	 * for single ops connected), then an ops that is not being
2129 	 * modified also needs to be checked.
2130 	 */
2131 	do_for_each_ftrace_op(op, ftrace_ops_list) {
2132 
2133 		if (!op->trampoline)
2134 			continue;
2135 
2136 		/*
2137 		 * If the ops is being added, it hasn't gotten to
2138 		 * the point to be removed from this tree yet.
2139 		 */
2140 		if (op->flags & FTRACE_OPS_FL_ADDING)
2141 			continue;
2142 
2143 
2144 		/*
2145 		 * If the ops is being modified and is in the old
2146 		 * hash, then it is probably being removed from this
2147 		 * function.
2148 		 */
2149 		if ((op->flags & FTRACE_OPS_FL_MODIFYING) &&
2150 		    hash_contains_ip(ip, &op->old_hash))
2151 			return op;
2152 		/*
2153 		 * If the ops is not being added or modified, and it's
2154 		 * in its normal filter hash, then this must be the one
2155 		 * we want!
2156 		 */
2157 		if (!(op->flags & FTRACE_OPS_FL_MODIFYING) &&
2158 		    hash_contains_ip(ip, op->func_hash))
2159 			return op;
2160 
2161 	} while_for_each_ftrace_op(op);
2162 
2163 	return NULL;
2164 }
2165 
2166 static struct ftrace_ops *
2167 ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
2168 {
2169 	struct ftrace_ops *op;
2170 	unsigned long ip = rec->ip;
2171 
2172 	do_for_each_ftrace_op(op, ftrace_ops_list) {
2173 		/* pass rec in as regs to have non-NULL val */
2174 		if (hash_contains_ip(ip, op->func_hash))
2175 			return op;
2176 	} while_for_each_ftrace_op(op);
2177 
2178 	return NULL;
2179 }
2180 
2181 /**
2182  * ftrace_get_addr_new - Get the call address to set to
2183  * @rec:  The ftrace record descriptor
2184  *
2185  * If the record has the FTRACE_FL_REGS set, that means that it
2186  * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
2187  * is not not set, then it wants to convert to the normal callback.
2188  *
2189  * Returns the address of the trampoline to set to
2190  */
2191 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
2192 {
2193 	struct ftrace_ops *ops;
2194 
2195 	/* Trampolines take precedence over regs */
2196 	if (rec->flags & FTRACE_FL_TRAMP) {
2197 		ops = ftrace_find_tramp_ops_new(rec);
2198 		if (FTRACE_WARN_ON(!ops || !ops->trampoline)) {
2199 			pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
2200 				(void *)rec->ip, (void *)rec->ip, rec->flags);
2201 			/* Ftrace is shutting down, return anything */
2202 			return (unsigned long)FTRACE_ADDR;
2203 		}
2204 		return ops->trampoline;
2205 	}
2206 
2207 	if (rec->flags & FTRACE_FL_REGS)
2208 		return (unsigned long)FTRACE_REGS_ADDR;
2209 	else
2210 		return (unsigned long)FTRACE_ADDR;
2211 }
2212 
2213 /**
2214  * ftrace_get_addr_curr - Get the call address that is already there
2215  * @rec:  The ftrace record descriptor
2216  *
2217  * The FTRACE_FL_REGS_EN is set when the record already points to
2218  * a function that saves all the regs. Basically the '_EN' version
2219  * represents the current state of the function.
2220  *
2221  * Returns the address of the trampoline that is currently being called
2222  */
2223 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
2224 {
2225 	struct ftrace_ops *ops;
2226 
2227 	/* Trampolines take precedence over regs */
2228 	if (rec->flags & FTRACE_FL_TRAMP_EN) {
2229 		ops = ftrace_find_tramp_ops_curr(rec);
2230 		if (FTRACE_WARN_ON(!ops)) {
2231 			pr_warning("Bad trampoline accounting at: %p (%pS)\n",
2232 				    (void *)rec->ip, (void *)rec->ip);
2233 			/* Ftrace is shutting down, return anything */
2234 			return (unsigned long)FTRACE_ADDR;
2235 		}
2236 		return ops->trampoline;
2237 	}
2238 
2239 	if (rec->flags & FTRACE_FL_REGS_EN)
2240 		return (unsigned long)FTRACE_REGS_ADDR;
2241 	else
2242 		return (unsigned long)FTRACE_ADDR;
2243 }
2244 
2245 static int
2246 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
2247 {
2248 	unsigned long ftrace_old_addr;
2249 	unsigned long ftrace_addr;
2250 	int ret;
2251 
2252 	ftrace_addr = ftrace_get_addr_new(rec);
2253 
2254 	/* This needs to be done before we call ftrace_update_record */
2255 	ftrace_old_addr = ftrace_get_addr_curr(rec);
2256 
2257 	ret = ftrace_update_record(rec, enable);
2258 
2259 	switch (ret) {
2260 	case FTRACE_UPDATE_IGNORE:
2261 		return 0;
2262 
2263 	case FTRACE_UPDATE_MAKE_CALL:
2264 		return ftrace_make_call(rec, ftrace_addr);
2265 
2266 	case FTRACE_UPDATE_MAKE_NOP:
2267 		return ftrace_make_nop(NULL, rec, ftrace_old_addr);
2268 
2269 	case FTRACE_UPDATE_MODIFY_CALL:
2270 		return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
2271 	}
2272 
2273 	return -1; /* unknow ftrace bug */
2274 }
2275 
2276 void __weak ftrace_replace_code(int enable)
2277 {
2278 	struct dyn_ftrace *rec;
2279 	struct ftrace_page *pg;
2280 	int failed;
2281 
2282 	if (unlikely(ftrace_disabled))
2283 		return;
2284 
2285 	do_for_each_ftrace_rec(pg, rec) {
2286 		failed = __ftrace_replace_code(rec, enable);
2287 		if (failed) {
2288 			ftrace_bug(failed, rec);
2289 			/* Stop processing */
2290 			return;
2291 		}
2292 	} while_for_each_ftrace_rec();
2293 }
2294 
2295 struct ftrace_rec_iter {
2296 	struct ftrace_page	*pg;
2297 	int			index;
2298 };
2299 
2300 /**
2301  * ftrace_rec_iter_start, start up iterating over traced functions
2302  *
2303  * Returns an iterator handle that is used to iterate over all
2304  * the records that represent address locations where functions
2305  * are traced.
2306  *
2307  * May return NULL if no records are available.
2308  */
2309 struct ftrace_rec_iter *ftrace_rec_iter_start(void)
2310 {
2311 	/*
2312 	 * We only use a single iterator.
2313 	 * Protected by the ftrace_lock mutex.
2314 	 */
2315 	static struct ftrace_rec_iter ftrace_rec_iter;
2316 	struct ftrace_rec_iter *iter = &ftrace_rec_iter;
2317 
2318 	iter->pg = ftrace_pages_start;
2319 	iter->index = 0;
2320 
2321 	/* Could have empty pages */
2322 	while (iter->pg && !iter->pg->index)
2323 		iter->pg = iter->pg->next;
2324 
2325 	if (!iter->pg)
2326 		return NULL;
2327 
2328 	return iter;
2329 }
2330 
2331 /**
2332  * ftrace_rec_iter_next, get the next record to process.
2333  * @iter: The handle to the iterator.
2334  *
2335  * Returns the next iterator after the given iterator @iter.
2336  */
2337 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
2338 {
2339 	iter->index++;
2340 
2341 	if (iter->index >= iter->pg->index) {
2342 		iter->pg = iter->pg->next;
2343 		iter->index = 0;
2344 
2345 		/* Could have empty pages */
2346 		while (iter->pg && !iter->pg->index)
2347 			iter->pg = iter->pg->next;
2348 	}
2349 
2350 	if (!iter->pg)
2351 		return NULL;
2352 
2353 	return iter;
2354 }
2355 
2356 /**
2357  * ftrace_rec_iter_record, get the record at the iterator location
2358  * @iter: The current iterator location
2359  *
2360  * Returns the record that the current @iter is at.
2361  */
2362 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
2363 {
2364 	return &iter->pg->records[iter->index];
2365 }
2366 
2367 static int
2368 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
2369 {
2370 	int ret;
2371 
2372 	if (unlikely(ftrace_disabled))
2373 		return 0;
2374 
2375 	ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
2376 	if (ret) {
2377 		ftrace_bug(ret, rec);
2378 		return 0;
2379 	}
2380 	return 1;
2381 }
2382 
2383 /*
2384  * archs can override this function if they must do something
2385  * before the modifying code is performed.
2386  */
2387 int __weak ftrace_arch_code_modify_prepare(void)
2388 {
2389 	return 0;
2390 }
2391 
2392 /*
2393  * archs can override this function if they must do something
2394  * after the modifying code is performed.
2395  */
2396 int __weak ftrace_arch_code_modify_post_process(void)
2397 {
2398 	return 0;
2399 }
2400 
2401 void ftrace_modify_all_code(int command)
2402 {
2403 	int update = command & FTRACE_UPDATE_TRACE_FUNC;
2404 	int err = 0;
2405 
2406 	/*
2407 	 * If the ftrace_caller calls a ftrace_ops func directly,
2408 	 * we need to make sure that it only traces functions it
2409 	 * expects to trace. When doing the switch of functions,
2410 	 * we need to update to the ftrace_ops_list_func first
2411 	 * before the transition between old and new calls are set,
2412 	 * as the ftrace_ops_list_func will check the ops hashes
2413 	 * to make sure the ops are having the right functions
2414 	 * traced.
2415 	 */
2416 	if (update) {
2417 		err = ftrace_update_ftrace_func(ftrace_ops_list_func);
2418 		if (FTRACE_WARN_ON(err))
2419 			return;
2420 	}
2421 
2422 	if (command & FTRACE_UPDATE_CALLS)
2423 		ftrace_replace_code(1);
2424 	else if (command & FTRACE_DISABLE_CALLS)
2425 		ftrace_replace_code(0);
2426 
2427 	if (update && ftrace_trace_function != ftrace_ops_list_func) {
2428 		function_trace_op = set_function_trace_op;
2429 		smp_wmb();
2430 		/* If irqs are disabled, we are in stop machine */
2431 		if (!irqs_disabled())
2432 			smp_call_function(ftrace_sync_ipi, NULL, 1);
2433 		err = ftrace_update_ftrace_func(ftrace_trace_function);
2434 		if (FTRACE_WARN_ON(err))
2435 			return;
2436 	}
2437 
2438 	if (command & FTRACE_START_FUNC_RET)
2439 		err = ftrace_enable_ftrace_graph_caller();
2440 	else if (command & FTRACE_STOP_FUNC_RET)
2441 		err = ftrace_disable_ftrace_graph_caller();
2442 	FTRACE_WARN_ON(err);
2443 }
2444 
2445 static int __ftrace_modify_code(void *data)
2446 {
2447 	int *command = data;
2448 
2449 	ftrace_modify_all_code(*command);
2450 
2451 	return 0;
2452 }
2453 
2454 /**
2455  * ftrace_run_stop_machine, go back to the stop machine method
2456  * @command: The command to tell ftrace what to do
2457  *
2458  * If an arch needs to fall back to the stop machine method, the
2459  * it can call this function.
2460  */
2461 void ftrace_run_stop_machine(int command)
2462 {
2463 	stop_machine(__ftrace_modify_code, &command, NULL);
2464 }
2465 
2466 /**
2467  * arch_ftrace_update_code, modify the code to trace or not trace
2468  * @command: The command that needs to be done
2469  *
2470  * Archs can override this function if it does not need to
2471  * run stop_machine() to modify code.
2472  */
2473 void __weak arch_ftrace_update_code(int command)
2474 {
2475 	ftrace_run_stop_machine(command);
2476 }
2477 
2478 static void ftrace_run_update_code(int command)
2479 {
2480 	int ret;
2481 
2482 	ret = ftrace_arch_code_modify_prepare();
2483 	FTRACE_WARN_ON(ret);
2484 	if (ret)
2485 		return;
2486 
2487 	/*
2488 	 * By default we use stop_machine() to modify the code.
2489 	 * But archs can do what ever they want as long as it
2490 	 * is safe. The stop_machine() is the safest, but also
2491 	 * produces the most overhead.
2492 	 */
2493 	arch_ftrace_update_code(command);
2494 
2495 	ret = ftrace_arch_code_modify_post_process();
2496 	FTRACE_WARN_ON(ret);
2497 }
2498 
2499 static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
2500 				   struct ftrace_hash *old_hash)
2501 {
2502 	ops->flags |= FTRACE_OPS_FL_MODIFYING;
2503 	ops->old_hash.filter_hash = old_hash;
2504 	ftrace_run_update_code(command);
2505 	ops->old_hash.filter_hash = NULL;
2506 	ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
2507 }
2508 
2509 static ftrace_func_t saved_ftrace_func;
2510 static int ftrace_start_up;
2511 
2512 void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops)
2513 {
2514 }
2515 
2516 static void control_ops_free(struct ftrace_ops *ops)
2517 {
2518 	free_percpu(ops->disabled);
2519 }
2520 
2521 static void ftrace_startup_enable(int command)
2522 {
2523 	if (saved_ftrace_func != ftrace_trace_function) {
2524 		saved_ftrace_func = ftrace_trace_function;
2525 		command |= FTRACE_UPDATE_TRACE_FUNC;
2526 	}
2527 
2528 	if (!command || !ftrace_enabled)
2529 		return;
2530 
2531 	ftrace_run_update_code(command);
2532 }
2533 
2534 static void ftrace_startup_all(int command)
2535 {
2536 	update_all_ops = true;
2537 	ftrace_startup_enable(command);
2538 	update_all_ops = false;
2539 }
2540 
2541 static int ftrace_startup(struct ftrace_ops *ops, int command)
2542 {
2543 	int ret;
2544 
2545 	if (unlikely(ftrace_disabled))
2546 		return -ENODEV;
2547 
2548 	ret = __register_ftrace_function(ops);
2549 	if (ret)
2550 		return ret;
2551 
2552 	ftrace_start_up++;
2553 	command |= FTRACE_UPDATE_CALLS;
2554 
2555 	/*
2556 	 * Note that ftrace probes uses this to start up
2557 	 * and modify functions it will probe. But we still
2558 	 * set the ADDING flag for modification, as probes
2559 	 * do not have trampolines. If they add them in the
2560 	 * future, then the probes will need to distinguish
2561 	 * between adding and updating probes.
2562 	 */
2563 	ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING;
2564 
2565 	ret = ftrace_hash_ipmodify_enable(ops);
2566 	if (ret < 0) {
2567 		/* Rollback registration process */
2568 		__unregister_ftrace_function(ops);
2569 		ftrace_start_up--;
2570 		ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2571 		return ret;
2572 	}
2573 
2574 	ftrace_hash_rec_enable(ops, 1);
2575 
2576 	ftrace_startup_enable(command);
2577 
2578 	ops->flags &= ~FTRACE_OPS_FL_ADDING;
2579 
2580 	return 0;
2581 }
2582 
2583 static int ftrace_shutdown(struct ftrace_ops *ops, int command)
2584 {
2585 	int ret;
2586 
2587 	if (unlikely(ftrace_disabled))
2588 		return -ENODEV;
2589 
2590 	ret = __unregister_ftrace_function(ops);
2591 	if (ret)
2592 		return ret;
2593 
2594 	ftrace_start_up--;
2595 	/*
2596 	 * Just warn in case of unbalance, no need to kill ftrace, it's not
2597 	 * critical but the ftrace_call callers may be never nopped again after
2598 	 * further ftrace uses.
2599 	 */
2600 	WARN_ON_ONCE(ftrace_start_up < 0);
2601 
2602 	/* Disabling ipmodify never fails */
2603 	ftrace_hash_ipmodify_disable(ops);
2604 	ftrace_hash_rec_disable(ops, 1);
2605 
2606 	ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2607 
2608 	command |= FTRACE_UPDATE_CALLS;
2609 
2610 	if (saved_ftrace_func != ftrace_trace_function) {
2611 		saved_ftrace_func = ftrace_trace_function;
2612 		command |= FTRACE_UPDATE_TRACE_FUNC;
2613 	}
2614 
2615 	if (!command || !ftrace_enabled) {
2616 		/*
2617 		 * If these are control ops, they still need their
2618 		 * per_cpu field freed. Since, function tracing is
2619 		 * not currently active, we can just free them
2620 		 * without synchronizing all CPUs.
2621 		 */
2622 		if (ops->flags & FTRACE_OPS_FL_CONTROL)
2623 			control_ops_free(ops);
2624 		return 0;
2625 	}
2626 
2627 	/*
2628 	 * If the ops uses a trampoline, then it needs to be
2629 	 * tested first on update.
2630 	 */
2631 	ops->flags |= FTRACE_OPS_FL_REMOVING;
2632 	removed_ops = ops;
2633 
2634 	/* The trampoline logic checks the old hashes */
2635 	ops->old_hash.filter_hash = ops->func_hash->filter_hash;
2636 	ops->old_hash.notrace_hash = ops->func_hash->notrace_hash;
2637 
2638 	ftrace_run_update_code(command);
2639 
2640 	/*
2641 	 * If there's no more ops registered with ftrace, run a
2642 	 * sanity check to make sure all rec flags are cleared.
2643 	 */
2644 	if (ftrace_ops_list == &ftrace_list_end) {
2645 		struct ftrace_page *pg;
2646 		struct dyn_ftrace *rec;
2647 
2648 		do_for_each_ftrace_rec(pg, rec) {
2649 			if (FTRACE_WARN_ON_ONCE(rec->flags))
2650 				pr_warn("  %pS flags:%lx\n",
2651 					(void *)rec->ip, rec->flags);
2652 		} while_for_each_ftrace_rec();
2653 	}
2654 
2655 	ops->old_hash.filter_hash = NULL;
2656 	ops->old_hash.notrace_hash = NULL;
2657 
2658 	removed_ops = NULL;
2659 	ops->flags &= ~FTRACE_OPS_FL_REMOVING;
2660 
2661 	/*
2662 	 * Dynamic ops may be freed, we must make sure that all
2663 	 * callers are done before leaving this function.
2664 	 * The same goes for freeing the per_cpu data of the control
2665 	 * ops.
2666 	 *
2667 	 * Again, normal synchronize_sched() is not good enough.
2668 	 * We need to do a hard force of sched synchronization.
2669 	 * This is because we use preempt_disable() to do RCU, but
2670 	 * the function tracers can be called where RCU is not watching
2671 	 * (like before user_exit()). We can not rely on the RCU
2672 	 * infrastructure to do the synchronization, thus we must do it
2673 	 * ourselves.
2674 	 */
2675 	if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) {
2676 		schedule_on_each_cpu(ftrace_sync);
2677 
2678 		arch_ftrace_trampoline_free(ops);
2679 
2680 		if (ops->flags & FTRACE_OPS_FL_CONTROL)
2681 			control_ops_free(ops);
2682 	}
2683 
2684 	return 0;
2685 }
2686 
2687 static void ftrace_startup_sysctl(void)
2688 {
2689 	if (unlikely(ftrace_disabled))
2690 		return;
2691 
2692 	/* Force update next time */
2693 	saved_ftrace_func = NULL;
2694 	/* ftrace_start_up is true if we want ftrace running */
2695 	if (ftrace_start_up)
2696 		ftrace_run_update_code(FTRACE_UPDATE_CALLS);
2697 }
2698 
2699 static void ftrace_shutdown_sysctl(void)
2700 {
2701 	if (unlikely(ftrace_disabled))
2702 		return;
2703 
2704 	/* ftrace_start_up is true if ftrace is running */
2705 	if (ftrace_start_up)
2706 		ftrace_run_update_code(FTRACE_DISABLE_CALLS);
2707 }
2708 
2709 static cycle_t		ftrace_update_time;
2710 unsigned long		ftrace_update_tot_cnt;
2711 
2712 static inline int ops_traces_mod(struct ftrace_ops *ops)
2713 {
2714 	/*
2715 	 * Filter_hash being empty will default to trace module.
2716 	 * But notrace hash requires a test of individual module functions.
2717 	 */
2718 	return ftrace_hash_empty(ops->func_hash->filter_hash) &&
2719 		ftrace_hash_empty(ops->func_hash->notrace_hash);
2720 }
2721 
2722 /*
2723  * Check if the current ops references the record.
2724  *
2725  * If the ops traces all functions, then it was already accounted for.
2726  * If the ops does not trace the current record function, skip it.
2727  * If the ops ignores the function via notrace filter, skip it.
2728  */
2729 static inline bool
2730 ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
2731 {
2732 	/* If ops isn't enabled, ignore it */
2733 	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
2734 		return 0;
2735 
2736 	/* If ops traces all mods, we already accounted for it */
2737 	if (ops_traces_mod(ops))
2738 		return 0;
2739 
2740 	/* The function must be in the filter */
2741 	if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
2742 	    !ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))
2743 		return 0;
2744 
2745 	/* If in notrace hash, we ignore it too */
2746 	if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip))
2747 		return 0;
2748 
2749 	return 1;
2750 }
2751 
2752 static int referenced_filters(struct dyn_ftrace *rec)
2753 {
2754 	struct ftrace_ops *ops;
2755 	int cnt = 0;
2756 
2757 	for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
2758 		if (ops_references_rec(ops, rec))
2759 		    cnt++;
2760 	}
2761 
2762 	return cnt;
2763 }
2764 
2765 static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
2766 {
2767 	struct ftrace_page *pg;
2768 	struct dyn_ftrace *p;
2769 	cycle_t start, stop;
2770 	unsigned long update_cnt = 0;
2771 	unsigned long ref = 0;
2772 	bool test = false;
2773 	int i;
2774 
2775 	/*
2776 	 * When adding a module, we need to check if tracers are
2777 	 * currently enabled and if they are set to trace all functions.
2778 	 * If they are, we need to enable the module functions as well
2779 	 * as update the reference counts for those function records.
2780 	 */
2781 	if (mod) {
2782 		struct ftrace_ops *ops;
2783 
2784 		for (ops = ftrace_ops_list;
2785 		     ops != &ftrace_list_end; ops = ops->next) {
2786 			if (ops->flags & FTRACE_OPS_FL_ENABLED) {
2787 				if (ops_traces_mod(ops))
2788 					ref++;
2789 				else
2790 					test = true;
2791 			}
2792 		}
2793 	}
2794 
2795 	start = ftrace_now(raw_smp_processor_id());
2796 
2797 	for (pg = new_pgs; pg; pg = pg->next) {
2798 
2799 		for (i = 0; i < pg->index; i++) {
2800 			int cnt = ref;
2801 
2802 			/* If something went wrong, bail without enabling anything */
2803 			if (unlikely(ftrace_disabled))
2804 				return -1;
2805 
2806 			p = &pg->records[i];
2807 			if (test)
2808 				cnt += referenced_filters(p);
2809 			p->flags = cnt;
2810 
2811 			/*
2812 			 * Do the initial record conversion from mcount jump
2813 			 * to the NOP instructions.
2814 			 */
2815 			if (!ftrace_code_disable(mod, p))
2816 				break;
2817 
2818 			update_cnt++;
2819 
2820 			/*
2821 			 * If the tracing is enabled, go ahead and enable the record.
2822 			 *
2823 			 * The reason not to enable the record immediatelly is the
2824 			 * inherent check of ftrace_make_nop/ftrace_make_call for
2825 			 * correct previous instructions.  Making first the NOP
2826 			 * conversion puts the module to the correct state, thus
2827 			 * passing the ftrace_make_call check.
2828 			 */
2829 			if (ftrace_start_up && cnt) {
2830 				int failed = __ftrace_replace_code(p, 1);
2831 				if (failed)
2832 					ftrace_bug(failed, p);
2833 			}
2834 		}
2835 	}
2836 
2837 	stop = ftrace_now(raw_smp_processor_id());
2838 	ftrace_update_time = stop - start;
2839 	ftrace_update_tot_cnt += update_cnt;
2840 
2841 	return 0;
2842 }
2843 
2844 static int ftrace_allocate_records(struct ftrace_page *pg, int count)
2845 {
2846 	int order;
2847 	int cnt;
2848 
2849 	if (WARN_ON(!count))
2850 		return -EINVAL;
2851 
2852 	order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
2853 
2854 	/*
2855 	 * We want to fill as much as possible. No more than a page
2856 	 * may be empty.
2857 	 */
2858 	while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
2859 		order--;
2860 
2861  again:
2862 	pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
2863 
2864 	if (!pg->records) {
2865 		/* if we can't allocate this size, try something smaller */
2866 		if (!order)
2867 			return -ENOMEM;
2868 		order >>= 1;
2869 		goto again;
2870 	}
2871 
2872 	cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
2873 	pg->size = cnt;
2874 
2875 	if (cnt > count)
2876 		cnt = count;
2877 
2878 	return cnt;
2879 }
2880 
2881 static struct ftrace_page *
2882 ftrace_allocate_pages(unsigned long num_to_init)
2883 {
2884 	struct ftrace_page *start_pg;
2885 	struct ftrace_page *pg;
2886 	int order;
2887 	int cnt;
2888 
2889 	if (!num_to_init)
2890 		return 0;
2891 
2892 	start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
2893 	if (!pg)
2894 		return NULL;
2895 
2896 	/*
2897 	 * Try to allocate as much as possible in one continues
2898 	 * location that fills in all of the space. We want to
2899 	 * waste as little space as possible.
2900 	 */
2901 	for (;;) {
2902 		cnt = ftrace_allocate_records(pg, num_to_init);
2903 		if (cnt < 0)
2904 			goto free_pages;
2905 
2906 		num_to_init -= cnt;
2907 		if (!num_to_init)
2908 			break;
2909 
2910 		pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
2911 		if (!pg->next)
2912 			goto free_pages;
2913 
2914 		pg = pg->next;
2915 	}
2916 
2917 	return start_pg;
2918 
2919  free_pages:
2920 	pg = start_pg;
2921 	while (pg) {
2922 		order = get_count_order(pg->size / ENTRIES_PER_PAGE);
2923 		free_pages((unsigned long)pg->records, order);
2924 		start_pg = pg->next;
2925 		kfree(pg);
2926 		pg = start_pg;
2927 	}
2928 	pr_info("ftrace: FAILED to allocate memory for functions\n");
2929 	return NULL;
2930 }
2931 
2932 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
2933 
2934 struct ftrace_iterator {
2935 	loff_t				pos;
2936 	loff_t				func_pos;
2937 	struct ftrace_page		*pg;
2938 	struct dyn_ftrace		*func;
2939 	struct ftrace_func_probe	*probe;
2940 	struct trace_parser		parser;
2941 	struct ftrace_hash		*hash;
2942 	struct ftrace_ops		*ops;
2943 	int				hidx;
2944 	int				idx;
2945 	unsigned			flags;
2946 };
2947 
2948 static void *
2949 t_hash_next(struct seq_file *m, loff_t *pos)
2950 {
2951 	struct ftrace_iterator *iter = m->private;
2952 	struct hlist_node *hnd = NULL;
2953 	struct hlist_head *hhd;
2954 
2955 	(*pos)++;
2956 	iter->pos = *pos;
2957 
2958 	if (iter->probe)
2959 		hnd = &iter->probe->node;
2960  retry:
2961 	if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
2962 		return NULL;
2963 
2964 	hhd = &ftrace_func_hash[iter->hidx];
2965 
2966 	if (hlist_empty(hhd)) {
2967 		iter->hidx++;
2968 		hnd = NULL;
2969 		goto retry;
2970 	}
2971 
2972 	if (!hnd)
2973 		hnd = hhd->first;
2974 	else {
2975 		hnd = hnd->next;
2976 		if (!hnd) {
2977 			iter->hidx++;
2978 			goto retry;
2979 		}
2980 	}
2981 
2982 	if (WARN_ON_ONCE(!hnd))
2983 		return NULL;
2984 
2985 	iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
2986 
2987 	return iter;
2988 }
2989 
2990 static void *t_hash_start(struct seq_file *m, loff_t *pos)
2991 {
2992 	struct ftrace_iterator *iter = m->private;
2993 	void *p = NULL;
2994 	loff_t l;
2995 
2996 	if (!(iter->flags & FTRACE_ITER_DO_HASH))
2997 		return NULL;
2998 
2999 	if (iter->func_pos > *pos)
3000 		return NULL;
3001 
3002 	iter->hidx = 0;
3003 	for (l = 0; l <= (*pos - iter->func_pos); ) {
3004 		p = t_hash_next(m, &l);
3005 		if (!p)
3006 			break;
3007 	}
3008 	if (!p)
3009 		return NULL;
3010 
3011 	/* Only set this if we have an item */
3012 	iter->flags |= FTRACE_ITER_HASH;
3013 
3014 	return iter;
3015 }
3016 
3017 static int
3018 t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
3019 {
3020 	struct ftrace_func_probe *rec;
3021 
3022 	rec = iter->probe;
3023 	if (WARN_ON_ONCE(!rec))
3024 		return -EIO;
3025 
3026 	if (rec->ops->print)
3027 		return rec->ops->print(m, rec->ip, rec->ops, rec->data);
3028 
3029 	seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
3030 
3031 	if (rec->data)
3032 		seq_printf(m, ":%p", rec->data);
3033 	seq_putc(m, '\n');
3034 
3035 	return 0;
3036 }
3037 
3038 static void *
3039 t_next(struct seq_file *m, void *v, loff_t *pos)
3040 {
3041 	struct ftrace_iterator *iter = m->private;
3042 	struct ftrace_ops *ops = iter->ops;
3043 	struct dyn_ftrace *rec = NULL;
3044 
3045 	if (unlikely(ftrace_disabled))
3046 		return NULL;
3047 
3048 	if (iter->flags & FTRACE_ITER_HASH)
3049 		return t_hash_next(m, pos);
3050 
3051 	(*pos)++;
3052 	iter->pos = iter->func_pos = *pos;
3053 
3054 	if (iter->flags & FTRACE_ITER_PRINTALL)
3055 		return t_hash_start(m, pos);
3056 
3057  retry:
3058 	if (iter->idx >= iter->pg->index) {
3059 		if (iter->pg->next) {
3060 			iter->pg = iter->pg->next;
3061 			iter->idx = 0;
3062 			goto retry;
3063 		}
3064 	} else {
3065 		rec = &iter->pg->records[iter->idx++];
3066 		if (((iter->flags & FTRACE_ITER_FILTER) &&
3067 		     !(ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))) ||
3068 
3069 		    ((iter->flags & FTRACE_ITER_NOTRACE) &&
3070 		     !ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) ||
3071 
3072 		    ((iter->flags & FTRACE_ITER_ENABLED) &&
3073 		     !(rec->flags & FTRACE_FL_ENABLED))) {
3074 
3075 			rec = NULL;
3076 			goto retry;
3077 		}
3078 	}
3079 
3080 	if (!rec)
3081 		return t_hash_start(m, pos);
3082 
3083 	iter->func = rec;
3084 
3085 	return iter;
3086 }
3087 
3088 static void reset_iter_read(struct ftrace_iterator *iter)
3089 {
3090 	iter->pos = 0;
3091 	iter->func_pos = 0;
3092 	iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH);
3093 }
3094 
3095 static void *t_start(struct seq_file *m, loff_t *pos)
3096 {
3097 	struct ftrace_iterator *iter = m->private;
3098 	struct ftrace_ops *ops = iter->ops;
3099 	void *p = NULL;
3100 	loff_t l;
3101 
3102 	mutex_lock(&ftrace_lock);
3103 
3104 	if (unlikely(ftrace_disabled))
3105 		return NULL;
3106 
3107 	/*
3108 	 * If an lseek was done, then reset and start from beginning.
3109 	 */
3110 	if (*pos < iter->pos)
3111 		reset_iter_read(iter);
3112 
3113 	/*
3114 	 * For set_ftrace_filter reading, if we have the filter
3115 	 * off, we can short cut and just print out that all
3116 	 * functions are enabled.
3117 	 */
3118 	if ((iter->flags & FTRACE_ITER_FILTER &&
3119 	     ftrace_hash_empty(ops->func_hash->filter_hash)) ||
3120 	    (iter->flags & FTRACE_ITER_NOTRACE &&
3121 	     ftrace_hash_empty(ops->func_hash->notrace_hash))) {
3122 		if (*pos > 0)
3123 			return t_hash_start(m, pos);
3124 		iter->flags |= FTRACE_ITER_PRINTALL;
3125 		/* reset in case of seek/pread */
3126 		iter->flags &= ~FTRACE_ITER_HASH;
3127 		return iter;
3128 	}
3129 
3130 	if (iter->flags & FTRACE_ITER_HASH)
3131 		return t_hash_start(m, pos);
3132 
3133 	/*
3134 	 * Unfortunately, we need to restart at ftrace_pages_start
3135 	 * every time we let go of the ftrace_mutex. This is because
3136 	 * those pointers can change without the lock.
3137 	 */
3138 	iter->pg = ftrace_pages_start;
3139 	iter->idx = 0;
3140 	for (l = 0; l <= *pos; ) {
3141 		p = t_next(m, p, &l);
3142 		if (!p)
3143 			break;
3144 	}
3145 
3146 	if (!p)
3147 		return t_hash_start(m, pos);
3148 
3149 	return iter;
3150 }
3151 
3152 static void t_stop(struct seq_file *m, void *p)
3153 {
3154 	mutex_unlock(&ftrace_lock);
3155 }
3156 
3157 void * __weak
3158 arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
3159 {
3160 	return NULL;
3161 }
3162 
3163 static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops,
3164 				struct dyn_ftrace *rec)
3165 {
3166 	void *ptr;
3167 
3168 	ptr = arch_ftrace_trampoline_func(ops, rec);
3169 	if (ptr)
3170 		seq_printf(m, " ->%pS", ptr);
3171 }
3172 
3173 static int t_show(struct seq_file *m, void *v)
3174 {
3175 	struct ftrace_iterator *iter = m->private;
3176 	struct dyn_ftrace *rec;
3177 
3178 	if (iter->flags & FTRACE_ITER_HASH)
3179 		return t_hash_show(m, iter);
3180 
3181 	if (iter->flags & FTRACE_ITER_PRINTALL) {
3182 		if (iter->flags & FTRACE_ITER_NOTRACE)
3183 			seq_puts(m, "#### no functions disabled ####\n");
3184 		else
3185 			seq_puts(m, "#### all functions enabled ####\n");
3186 		return 0;
3187 	}
3188 
3189 	rec = iter->func;
3190 
3191 	if (!rec)
3192 		return 0;
3193 
3194 	seq_printf(m, "%ps", (void *)rec->ip);
3195 	if (iter->flags & FTRACE_ITER_ENABLED) {
3196 		struct ftrace_ops *ops = NULL;
3197 
3198 		seq_printf(m, " (%ld)%s%s",
3199 			   ftrace_rec_count(rec),
3200 			   rec->flags & FTRACE_FL_REGS ? " R" : "  ",
3201 			   rec->flags & FTRACE_FL_IPMODIFY ? " I" : "  ");
3202 		if (rec->flags & FTRACE_FL_TRAMP_EN) {
3203 			ops = ftrace_find_tramp_ops_any(rec);
3204 			if (ops)
3205 				seq_printf(m, "\ttramp: %pS",
3206 					   (void *)ops->trampoline);
3207 			else
3208 				seq_puts(m, "\ttramp: ERROR!");
3209 
3210 		}
3211 		add_trampoline_func(m, ops, rec);
3212 	}
3213 
3214 	seq_putc(m, '\n');
3215 
3216 	return 0;
3217 }
3218 
3219 static const struct seq_operations show_ftrace_seq_ops = {
3220 	.start = t_start,
3221 	.next = t_next,
3222 	.stop = t_stop,
3223 	.show = t_show,
3224 };
3225 
3226 static int
3227 ftrace_avail_open(struct inode *inode, struct file *file)
3228 {
3229 	struct ftrace_iterator *iter;
3230 
3231 	if (unlikely(ftrace_disabled))
3232 		return -ENODEV;
3233 
3234 	iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3235 	if (iter) {
3236 		iter->pg = ftrace_pages_start;
3237 		iter->ops = &global_ops;
3238 	}
3239 
3240 	return iter ? 0 : -ENOMEM;
3241 }
3242 
3243 static int
3244 ftrace_enabled_open(struct inode *inode, struct file *file)
3245 {
3246 	struct ftrace_iterator *iter;
3247 
3248 	iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3249 	if (iter) {
3250 		iter->pg = ftrace_pages_start;
3251 		iter->flags = FTRACE_ITER_ENABLED;
3252 		iter->ops = &global_ops;
3253 	}
3254 
3255 	return iter ? 0 : -ENOMEM;
3256 }
3257 
3258 /**
3259  * ftrace_regex_open - initialize function tracer filter files
3260  * @ops: The ftrace_ops that hold the hash filters
3261  * @flag: The type of filter to process
3262  * @inode: The inode, usually passed in to your open routine
3263  * @file: The file, usually passed in to your open routine
3264  *
3265  * ftrace_regex_open() initializes the filter files for the
3266  * @ops. Depending on @flag it may process the filter hash or
3267  * the notrace hash of @ops. With this called from the open
3268  * routine, you can use ftrace_filter_write() for the write
3269  * routine if @flag has FTRACE_ITER_FILTER set, or
3270  * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
3271  * tracing_lseek() should be used as the lseek routine, and
3272  * release must call ftrace_regex_release().
3273  */
3274 int
3275 ftrace_regex_open(struct ftrace_ops *ops, int flag,
3276 		  struct inode *inode, struct file *file)
3277 {
3278 	struct ftrace_iterator *iter;
3279 	struct ftrace_hash *hash;
3280 	int ret = 0;
3281 
3282 	ftrace_ops_init(ops);
3283 
3284 	if (unlikely(ftrace_disabled))
3285 		return -ENODEV;
3286 
3287 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
3288 	if (!iter)
3289 		return -ENOMEM;
3290 
3291 	if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
3292 		kfree(iter);
3293 		return -ENOMEM;
3294 	}
3295 
3296 	iter->ops = ops;
3297 	iter->flags = flag;
3298 
3299 	mutex_lock(&ops->func_hash->regex_lock);
3300 
3301 	if (flag & FTRACE_ITER_NOTRACE)
3302 		hash = ops->func_hash->notrace_hash;
3303 	else
3304 		hash = ops->func_hash->filter_hash;
3305 
3306 	if (file->f_mode & FMODE_WRITE) {
3307 		const int size_bits = FTRACE_HASH_DEFAULT_BITS;
3308 
3309 		if (file->f_flags & O_TRUNC)
3310 			iter->hash = alloc_ftrace_hash(size_bits);
3311 		else
3312 			iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);
3313 
3314 		if (!iter->hash) {
3315 			trace_parser_put(&iter->parser);
3316 			kfree(iter);
3317 			ret = -ENOMEM;
3318 			goto out_unlock;
3319 		}
3320 	}
3321 
3322 	if (file->f_mode & FMODE_READ) {
3323 		iter->pg = ftrace_pages_start;
3324 
3325 		ret = seq_open(file, &show_ftrace_seq_ops);
3326 		if (!ret) {
3327 			struct seq_file *m = file->private_data;
3328 			m->private = iter;
3329 		} else {
3330 			/* Failed */
3331 			free_ftrace_hash(iter->hash);
3332 			trace_parser_put(&iter->parser);
3333 			kfree(iter);
3334 		}
3335 	} else
3336 		file->private_data = iter;
3337 
3338  out_unlock:
3339 	mutex_unlock(&ops->func_hash->regex_lock);
3340 
3341 	return ret;
3342 }
3343 
3344 static int
3345 ftrace_filter_open(struct inode *inode, struct file *file)
3346 {
3347 	struct ftrace_ops *ops = inode->i_private;
3348 
3349 	return ftrace_regex_open(ops,
3350 			FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH,
3351 			inode, file);
3352 }
3353 
3354 static int
3355 ftrace_notrace_open(struct inode *inode, struct file *file)
3356 {
3357 	struct ftrace_ops *ops = inode->i_private;
3358 
3359 	return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
3360 				 inode, file);
3361 }
3362 
3363 static int ftrace_match(char *str, char *regex, int len, int type)
3364 {
3365 	int matched = 0;
3366 	int slen;
3367 
3368 	switch (type) {
3369 	case MATCH_FULL:
3370 		if (strcmp(str, regex) == 0)
3371 			matched = 1;
3372 		break;
3373 	case MATCH_FRONT_ONLY:
3374 		if (strncmp(str, regex, len) == 0)
3375 			matched = 1;
3376 		break;
3377 	case MATCH_MIDDLE_ONLY:
3378 		if (strstr(str, regex))
3379 			matched = 1;
3380 		break;
3381 	case MATCH_END_ONLY:
3382 		slen = strlen(str);
3383 		if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
3384 			matched = 1;
3385 		break;
3386 	}
3387 
3388 	return matched;
3389 }
3390 
3391 static int
3392 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
3393 {
3394 	struct ftrace_func_entry *entry;
3395 	int ret = 0;
3396 
3397 	entry = ftrace_lookup_ip(hash, rec->ip);
3398 	if (not) {
3399 		/* Do nothing if it doesn't exist */
3400 		if (!entry)
3401 			return 0;
3402 
3403 		free_hash_entry(hash, entry);
3404 	} else {
3405 		/* Do nothing if it exists */
3406 		if (entry)
3407 			return 0;
3408 
3409 		ret = add_hash_entry(hash, rec->ip);
3410 	}
3411 	return ret;
3412 }
3413 
3414 static int
3415 ftrace_match_record(struct dyn_ftrace *rec, char *mod,
3416 		    char *regex, int len, int type)
3417 {
3418 	char str[KSYM_SYMBOL_LEN];
3419 	char *modname;
3420 
3421 	kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
3422 
3423 	if (mod) {
3424 		/* module lookup requires matching the module */
3425 		if (!modname || strcmp(modname, mod))
3426 			return 0;
3427 
3428 		/* blank search means to match all funcs in the mod */
3429 		if (!len)
3430 			return 1;
3431 	}
3432 
3433 	return ftrace_match(str, regex, len, type);
3434 }
3435 
3436 static int
3437 match_records(struct ftrace_hash *hash, char *buff,
3438 	      int len, char *mod, int not)
3439 {
3440 	unsigned search_len = 0;
3441 	struct ftrace_page *pg;
3442 	struct dyn_ftrace *rec;
3443 	int type = MATCH_FULL;
3444 	char *search = buff;
3445 	int found = 0;
3446 	int ret;
3447 
3448 	if (len) {
3449 		type = filter_parse_regex(buff, len, &search, &not);
3450 		search_len = strlen(search);
3451 	}
3452 
3453 	mutex_lock(&ftrace_lock);
3454 
3455 	if (unlikely(ftrace_disabled))
3456 		goto out_unlock;
3457 
3458 	do_for_each_ftrace_rec(pg, rec) {
3459 		if (ftrace_match_record(rec, mod, search, search_len, type)) {
3460 			ret = enter_record(hash, rec, not);
3461 			if (ret < 0) {
3462 				found = ret;
3463 				goto out_unlock;
3464 			}
3465 			found = 1;
3466 		}
3467 	} while_for_each_ftrace_rec();
3468  out_unlock:
3469 	mutex_unlock(&ftrace_lock);
3470 
3471 	return found;
3472 }
3473 
3474 static int
3475 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
3476 {
3477 	return match_records(hash, buff, len, NULL, 0);
3478 }
3479 
3480 static int
3481 ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
3482 {
3483 	int not = 0;
3484 
3485 	/* blank or '*' mean the same */
3486 	if (strcmp(buff, "*") == 0)
3487 		buff[0] = 0;
3488 
3489 	/* handle the case of 'dont filter this module' */
3490 	if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
3491 		buff[0] = 0;
3492 		not = 1;
3493 	}
3494 
3495 	return match_records(hash, buff, strlen(buff), mod, not);
3496 }
3497 
3498 /*
3499  * We register the module command as a template to show others how
3500  * to register the a command as well.
3501  */
3502 
3503 static int
3504 ftrace_mod_callback(struct ftrace_hash *hash,
3505 		    char *func, char *cmd, char *param, int enable)
3506 {
3507 	char *mod;
3508 	int ret = -EINVAL;
3509 
3510 	/*
3511 	 * cmd == 'mod' because we only registered this func
3512 	 * for the 'mod' ftrace_func_command.
3513 	 * But if you register one func with multiple commands,
3514 	 * you can tell which command was used by the cmd
3515 	 * parameter.
3516 	 */
3517 
3518 	/* we must have a module name */
3519 	if (!param)
3520 		return ret;
3521 
3522 	mod = strsep(&param, ":");
3523 	if (!strlen(mod))
3524 		return ret;
3525 
3526 	ret = ftrace_match_module_records(hash, func, mod);
3527 	if (!ret)
3528 		ret = -EINVAL;
3529 	if (ret < 0)
3530 		return ret;
3531 
3532 	return 0;
3533 }
3534 
3535 static struct ftrace_func_command ftrace_mod_cmd = {
3536 	.name			= "mod",
3537 	.func			= ftrace_mod_callback,
3538 };
3539 
3540 static int __init ftrace_mod_cmd_init(void)
3541 {
3542 	return register_ftrace_command(&ftrace_mod_cmd);
3543 }
3544 core_initcall(ftrace_mod_cmd_init);
3545 
3546 static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
3547 				      struct ftrace_ops *op, struct pt_regs *pt_regs)
3548 {
3549 	struct ftrace_func_probe *entry;
3550 	struct hlist_head *hhd;
3551 	unsigned long key;
3552 
3553 	key = hash_long(ip, FTRACE_HASH_BITS);
3554 
3555 	hhd = &ftrace_func_hash[key];
3556 
3557 	if (hlist_empty(hhd))
3558 		return;
3559 
3560 	/*
3561 	 * Disable preemption for these calls to prevent a RCU grace
3562 	 * period. This syncs the hash iteration and freeing of items
3563 	 * on the hash. rcu_read_lock is too dangerous here.
3564 	 */
3565 	preempt_disable_notrace();
3566 	hlist_for_each_entry_rcu_notrace(entry, hhd, node) {
3567 		if (entry->ip == ip)
3568 			entry->ops->func(ip, parent_ip, &entry->data);
3569 	}
3570 	preempt_enable_notrace();
3571 }
3572 
3573 static struct ftrace_ops trace_probe_ops __read_mostly =
3574 {
3575 	.func		= function_trace_probe_call,
3576 	.flags		= FTRACE_OPS_FL_INITIALIZED,
3577 	INIT_OPS_HASH(trace_probe_ops)
3578 };
3579 
3580 static int ftrace_probe_registered;
3581 
3582 static void __enable_ftrace_function_probe(struct ftrace_hash *old_hash)
3583 {
3584 	int ret;
3585 	int i;
3586 
3587 	if (ftrace_probe_registered) {
3588 		/* still need to update the function call sites */
3589 		if (ftrace_enabled)
3590 			ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS,
3591 					       old_hash);
3592 		return;
3593 	}
3594 
3595 	for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3596 		struct hlist_head *hhd = &ftrace_func_hash[i];
3597 		if (hhd->first)
3598 			break;
3599 	}
3600 	/* Nothing registered? */
3601 	if (i == FTRACE_FUNC_HASHSIZE)
3602 		return;
3603 
3604 	ret = ftrace_startup(&trace_probe_ops, 0);
3605 
3606 	ftrace_probe_registered = 1;
3607 }
3608 
3609 static void __disable_ftrace_function_probe(void)
3610 {
3611 	int i;
3612 
3613 	if (!ftrace_probe_registered)
3614 		return;
3615 
3616 	for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3617 		struct hlist_head *hhd = &ftrace_func_hash[i];
3618 		if (hhd->first)
3619 			return;
3620 	}
3621 
3622 	/* no more funcs left */
3623 	ftrace_shutdown(&trace_probe_ops, 0);
3624 
3625 	ftrace_probe_registered = 0;
3626 }
3627 
3628 
3629 static void ftrace_free_entry(struct ftrace_func_probe *entry)
3630 {
3631 	if (entry->ops->free)
3632 		entry->ops->free(entry->ops, entry->ip, &entry->data);
3633 	kfree(entry);
3634 }
3635 
3636 int
3637 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3638 			      void *data)
3639 {
3640 	struct ftrace_func_probe *entry;
3641 	struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
3642 	struct ftrace_hash *old_hash = *orig_hash;
3643 	struct ftrace_hash *hash;
3644 	struct ftrace_page *pg;
3645 	struct dyn_ftrace *rec;
3646 	int type, len, not;
3647 	unsigned long key;
3648 	int count = 0;
3649 	char *search;
3650 	int ret;
3651 
3652 	type = filter_parse_regex(glob, strlen(glob), &search, &not);
3653 	len = strlen(search);
3654 
3655 	/* we do not support '!' for function probes */
3656 	if (WARN_ON(not))
3657 		return -EINVAL;
3658 
3659 	mutex_lock(&trace_probe_ops.func_hash->regex_lock);
3660 
3661 	hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
3662 	if (!hash) {
3663 		count = -ENOMEM;
3664 		goto out;
3665 	}
3666 
3667 	if (unlikely(ftrace_disabled)) {
3668 		count = -ENODEV;
3669 		goto out;
3670 	}
3671 
3672 	mutex_lock(&ftrace_lock);
3673 
3674 	do_for_each_ftrace_rec(pg, rec) {
3675 
3676 		if (!ftrace_match_record(rec, NULL, search, len, type))
3677 			continue;
3678 
3679 		entry = kmalloc(sizeof(*entry), GFP_KERNEL);
3680 		if (!entry) {
3681 			/* If we did not process any, then return error */
3682 			if (!count)
3683 				count = -ENOMEM;
3684 			goto out_unlock;
3685 		}
3686 
3687 		count++;
3688 
3689 		entry->data = data;
3690 
3691 		/*
3692 		 * The caller might want to do something special
3693 		 * for each function we find. We call the callback
3694 		 * to give the caller an opportunity to do so.
3695 		 */
3696 		if (ops->init) {
3697 			if (ops->init(ops, rec->ip, &entry->data) < 0) {
3698 				/* caller does not like this func */
3699 				kfree(entry);
3700 				continue;
3701 			}
3702 		}
3703 
3704 		ret = enter_record(hash, rec, 0);
3705 		if (ret < 0) {
3706 			kfree(entry);
3707 			count = ret;
3708 			goto out_unlock;
3709 		}
3710 
3711 		entry->ops = ops;
3712 		entry->ip = rec->ip;
3713 
3714 		key = hash_long(entry->ip, FTRACE_HASH_BITS);
3715 		hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
3716 
3717 	} while_for_each_ftrace_rec();
3718 
3719 	ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3720 
3721 	__enable_ftrace_function_probe(old_hash);
3722 
3723 	if (!ret)
3724 		free_ftrace_hash_rcu(old_hash);
3725 	else
3726 		count = ret;
3727 
3728  out_unlock:
3729 	mutex_unlock(&ftrace_lock);
3730  out:
3731 	mutex_unlock(&trace_probe_ops.func_hash->regex_lock);
3732 	free_ftrace_hash(hash);
3733 
3734 	return count;
3735 }
3736 
3737 enum {
3738 	PROBE_TEST_FUNC		= 1,
3739 	PROBE_TEST_DATA		= 2
3740 };
3741 
3742 static void
3743 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3744 				  void *data, int flags)
3745 {
3746 	struct ftrace_func_entry *rec_entry;
3747 	struct ftrace_func_probe *entry;
3748 	struct ftrace_func_probe *p;
3749 	struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
3750 	struct ftrace_hash *old_hash = *orig_hash;
3751 	struct list_head free_list;
3752 	struct ftrace_hash *hash;
3753 	struct hlist_node *tmp;
3754 	char str[KSYM_SYMBOL_LEN];
3755 	int type = MATCH_FULL;
3756 	int i, len = 0;
3757 	char *search;
3758 	int ret;
3759 
3760 	if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
3761 		glob = NULL;
3762 	else if (glob) {
3763 		int not;
3764 
3765 		type = filter_parse_regex(glob, strlen(glob), &search, &not);
3766 		len = strlen(search);
3767 
3768 		/* we do not support '!' for function probes */
3769 		if (WARN_ON(not))
3770 			return;
3771 	}
3772 
3773 	mutex_lock(&trace_probe_ops.func_hash->regex_lock);
3774 
3775 	hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3776 	if (!hash)
3777 		/* Hmm, should report this somehow */
3778 		goto out_unlock;
3779 
3780 	INIT_LIST_HEAD(&free_list);
3781 
3782 	for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3783 		struct hlist_head *hhd = &ftrace_func_hash[i];
3784 
3785 		hlist_for_each_entry_safe(entry, tmp, hhd, node) {
3786 
3787 			/* break up if statements for readability */
3788 			if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
3789 				continue;
3790 
3791 			if ((flags & PROBE_TEST_DATA) && entry->data != data)
3792 				continue;
3793 
3794 			/* do this last, since it is the most expensive */
3795 			if (glob) {
3796 				kallsyms_lookup(entry->ip, NULL, NULL,
3797 						NULL, str);
3798 				if (!ftrace_match(str, glob, len, type))
3799 					continue;
3800 			}
3801 
3802 			rec_entry = ftrace_lookup_ip(hash, entry->ip);
3803 			/* It is possible more than one entry had this ip */
3804 			if (rec_entry)
3805 				free_hash_entry(hash, rec_entry);
3806 
3807 			hlist_del_rcu(&entry->node);
3808 			list_add(&entry->free_list, &free_list);
3809 		}
3810 	}
3811 	mutex_lock(&ftrace_lock);
3812 	__disable_ftrace_function_probe();
3813 	/*
3814 	 * Remove after the disable is called. Otherwise, if the last
3815 	 * probe is removed, a null hash means *all enabled*.
3816 	 */
3817 	ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3818 	synchronize_sched();
3819 	if (!ret)
3820 		free_ftrace_hash_rcu(old_hash);
3821 
3822 	list_for_each_entry_safe(entry, p, &free_list, free_list) {
3823 		list_del(&entry->free_list);
3824 		ftrace_free_entry(entry);
3825 	}
3826 	mutex_unlock(&ftrace_lock);
3827 
3828  out_unlock:
3829 	mutex_unlock(&trace_probe_ops.func_hash->regex_lock);
3830 	free_ftrace_hash(hash);
3831 }
3832 
3833 void
3834 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3835 				void *data)
3836 {
3837 	__unregister_ftrace_function_probe(glob, ops, data,
3838 					  PROBE_TEST_FUNC | PROBE_TEST_DATA);
3839 }
3840 
3841 void
3842 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
3843 {
3844 	__unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
3845 }
3846 
3847 void unregister_ftrace_function_probe_all(char *glob)
3848 {
3849 	__unregister_ftrace_function_probe(glob, NULL, NULL, 0);
3850 }
3851 
3852 static LIST_HEAD(ftrace_commands);
3853 static DEFINE_MUTEX(ftrace_cmd_mutex);
3854 
3855 /*
3856  * Currently we only register ftrace commands from __init, so mark this
3857  * __init too.
3858  */
3859 __init int register_ftrace_command(struct ftrace_func_command *cmd)
3860 {
3861 	struct ftrace_func_command *p;
3862 	int ret = 0;
3863 
3864 	mutex_lock(&ftrace_cmd_mutex);
3865 	list_for_each_entry(p, &ftrace_commands, list) {
3866 		if (strcmp(cmd->name, p->name) == 0) {
3867 			ret = -EBUSY;
3868 			goto out_unlock;
3869 		}
3870 	}
3871 	list_add(&cmd->list, &ftrace_commands);
3872  out_unlock:
3873 	mutex_unlock(&ftrace_cmd_mutex);
3874 
3875 	return ret;
3876 }
3877 
3878 /*
3879  * Currently we only unregister ftrace commands from __init, so mark
3880  * this __init too.
3881  */
3882 __init int unregister_ftrace_command(struct ftrace_func_command *cmd)
3883 {
3884 	struct ftrace_func_command *p, *n;
3885 	int ret = -ENODEV;
3886 
3887 	mutex_lock(&ftrace_cmd_mutex);
3888 	list_for_each_entry_safe(p, n, &ftrace_commands, list) {
3889 		if (strcmp(cmd->name, p->name) == 0) {
3890 			ret = 0;
3891 			list_del_init(&p->list);
3892 			goto out_unlock;
3893 		}
3894 	}
3895  out_unlock:
3896 	mutex_unlock(&ftrace_cmd_mutex);
3897 
3898 	return ret;
3899 }
3900 
3901 static int ftrace_process_regex(struct ftrace_hash *hash,
3902 				char *buff, int len, int enable)
3903 {
3904 	char *func, *command, *next = buff;
3905 	struct ftrace_func_command *p;
3906 	int ret = -EINVAL;
3907 
3908 	func = strsep(&next, ":");
3909 
3910 	if (!next) {
3911 		ret = ftrace_match_records(hash, func, len);
3912 		if (!ret)
3913 			ret = -EINVAL;
3914 		if (ret < 0)
3915 			return ret;
3916 		return 0;
3917 	}
3918 
3919 	/* command found */
3920 
3921 	command = strsep(&next, ":");
3922 
3923 	mutex_lock(&ftrace_cmd_mutex);
3924 	list_for_each_entry(p, &ftrace_commands, list) {
3925 		if (strcmp(p->name, command) == 0) {
3926 			ret = p->func(hash, func, command, next, enable);
3927 			goto out_unlock;
3928 		}
3929 	}
3930  out_unlock:
3931 	mutex_unlock(&ftrace_cmd_mutex);
3932 
3933 	return ret;
3934 }
3935 
3936 static ssize_t
3937 ftrace_regex_write(struct file *file, const char __user *ubuf,
3938 		   size_t cnt, loff_t *ppos, int enable)
3939 {
3940 	struct ftrace_iterator *iter;
3941 	struct trace_parser *parser;
3942 	ssize_t ret, read;
3943 
3944 	if (!cnt)
3945 		return 0;
3946 
3947 	if (file->f_mode & FMODE_READ) {
3948 		struct seq_file *m = file->private_data;
3949 		iter = m->private;
3950 	} else
3951 		iter = file->private_data;
3952 
3953 	if (unlikely(ftrace_disabled))
3954 		return -ENODEV;
3955 
3956 	/* iter->hash is a local copy, so we don't need regex_lock */
3957 
3958 	parser = &iter->parser;
3959 	read = trace_get_user(parser, ubuf, cnt, ppos);
3960 
3961 	if (read >= 0 && trace_parser_loaded(parser) &&
3962 	    !trace_parser_cont(parser)) {
3963 		ret = ftrace_process_regex(iter->hash, parser->buffer,
3964 					   parser->idx, enable);
3965 		trace_parser_clear(parser);
3966 		if (ret < 0)
3967 			goto out;
3968 	}
3969 
3970 	ret = read;
3971  out:
3972 	return ret;
3973 }
3974 
3975 ssize_t
3976 ftrace_filter_write(struct file *file, const char __user *ubuf,
3977 		    size_t cnt, loff_t *ppos)
3978 {
3979 	return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
3980 }
3981 
3982 ssize_t
3983 ftrace_notrace_write(struct file *file, const char __user *ubuf,
3984 		     size_t cnt, loff_t *ppos)
3985 {
3986 	return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
3987 }
3988 
3989 static int
3990 ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
3991 {
3992 	struct ftrace_func_entry *entry;
3993 
3994 	if (!ftrace_location(ip))
3995 		return -EINVAL;
3996 
3997 	if (remove) {
3998 		entry = ftrace_lookup_ip(hash, ip);
3999 		if (!entry)
4000 			return -ENOENT;
4001 		free_hash_entry(hash, entry);
4002 		return 0;
4003 	}
4004 
4005 	return add_hash_entry(hash, ip);
4006 }
4007 
4008 static void ftrace_ops_update_code(struct ftrace_ops *ops,
4009 				   struct ftrace_hash *old_hash)
4010 {
4011 	if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled)
4012 		ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
4013 }
4014 
4015 static int
4016 ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
4017 		unsigned long ip, int remove, int reset, int enable)
4018 {
4019 	struct ftrace_hash **orig_hash;
4020 	struct ftrace_hash *old_hash;
4021 	struct ftrace_hash *hash;
4022 	int ret;
4023 
4024 	if (unlikely(ftrace_disabled))
4025 		return -ENODEV;
4026 
4027 	mutex_lock(&ops->func_hash->regex_lock);
4028 
4029 	if (enable)
4030 		orig_hash = &ops->func_hash->filter_hash;
4031 	else
4032 		orig_hash = &ops->func_hash->notrace_hash;
4033 
4034 	if (reset)
4035 		hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
4036 	else
4037 		hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
4038 
4039 	if (!hash) {
4040 		ret = -ENOMEM;
4041 		goto out_regex_unlock;
4042 	}
4043 
4044 	if (buf && !ftrace_match_records(hash, buf, len)) {
4045 		ret = -EINVAL;
4046 		goto out_regex_unlock;
4047 	}
4048 	if (ip) {
4049 		ret = ftrace_match_addr(hash, ip, remove);
4050 		if (ret < 0)
4051 			goto out_regex_unlock;
4052 	}
4053 
4054 	mutex_lock(&ftrace_lock);
4055 	old_hash = *orig_hash;
4056 	ret = ftrace_hash_move(ops, enable, orig_hash, hash);
4057 	if (!ret) {
4058 		ftrace_ops_update_code(ops, old_hash);
4059 		free_ftrace_hash_rcu(old_hash);
4060 	}
4061 	mutex_unlock(&ftrace_lock);
4062 
4063  out_regex_unlock:
4064 	mutex_unlock(&ops->func_hash->regex_lock);
4065 
4066 	free_ftrace_hash(hash);
4067 	return ret;
4068 }
4069 
4070 static int
4071 ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
4072 		int reset, int enable)
4073 {
4074 	return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable);
4075 }
4076 
4077 /**
4078  * ftrace_set_filter_ip - set a function to filter on in ftrace by address
4079  * @ops - the ops to set the filter with
4080  * @ip - the address to add to or remove from the filter.
4081  * @remove - non zero to remove the ip from the filter
4082  * @reset - non zero to reset all filters before applying this filter.
4083  *
4084  * Filters denote which functions should be enabled when tracing is enabled
4085  * If @ip is NULL, it failes to update filter.
4086  */
4087 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
4088 			 int remove, int reset)
4089 {
4090 	ftrace_ops_init(ops);
4091 	return ftrace_set_addr(ops, ip, remove, reset, 1);
4092 }
4093 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
4094 
4095 static int
4096 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
4097 		 int reset, int enable)
4098 {
4099 	return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable);
4100 }
4101 
4102 /**
4103  * ftrace_set_filter - set a function to filter on in ftrace
4104  * @ops - the ops to set the filter with
4105  * @buf - the string that holds the function filter text.
4106  * @len - the length of the string.
4107  * @reset - non zero to reset all filters before applying this filter.
4108  *
4109  * Filters denote which functions should be enabled when tracing is enabled.
4110  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
4111  */
4112 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
4113 		       int len, int reset)
4114 {
4115 	ftrace_ops_init(ops);
4116 	return ftrace_set_regex(ops, buf, len, reset, 1);
4117 }
4118 EXPORT_SYMBOL_GPL(ftrace_set_filter);
4119 
4120 /**
4121  * ftrace_set_notrace - set a function to not trace in ftrace
4122  * @ops - the ops to set the notrace filter with
4123  * @buf - the string that holds the function notrace text.
4124  * @len - the length of the string.
4125  * @reset - non zero to reset all filters before applying this filter.
4126  *
4127  * Notrace Filters denote which functions should not be enabled when tracing
4128  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
4129  * for tracing.
4130  */
4131 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
4132 			int len, int reset)
4133 {
4134 	ftrace_ops_init(ops);
4135 	return ftrace_set_regex(ops, buf, len, reset, 0);
4136 }
4137 EXPORT_SYMBOL_GPL(ftrace_set_notrace);
4138 /**
4139  * ftrace_set_global_filter - set a function to filter on with global tracers
4140  * @buf - the string that holds the function filter text.
4141  * @len - the length of the string.
4142  * @reset - non zero to reset all filters before applying this filter.
4143  *
4144  * Filters denote which functions should be enabled when tracing is enabled.
4145  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
4146  */
4147 void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
4148 {
4149 	ftrace_set_regex(&global_ops, buf, len, reset, 1);
4150 }
4151 EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
4152 
4153 /**
4154  * ftrace_set_global_notrace - set a function to not trace with global tracers
4155  * @buf - the string that holds the function notrace text.
4156  * @len - the length of the string.
4157  * @reset - non zero to reset all filters before applying this filter.
4158  *
4159  * Notrace Filters denote which functions should not be enabled when tracing
4160  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
4161  * for tracing.
4162  */
4163 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
4164 {
4165 	ftrace_set_regex(&global_ops, buf, len, reset, 0);
4166 }
4167 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
4168 
4169 /*
4170  * command line interface to allow users to set filters on boot up.
4171  */
4172 #define FTRACE_FILTER_SIZE		COMMAND_LINE_SIZE
4173 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
4174 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
4175 
4176 /* Used by function selftest to not test if filter is set */
4177 bool ftrace_filter_param __initdata;
4178 
4179 static int __init set_ftrace_notrace(char *str)
4180 {
4181 	ftrace_filter_param = true;
4182 	strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
4183 	return 1;
4184 }
4185 __setup("ftrace_notrace=", set_ftrace_notrace);
4186 
4187 static int __init set_ftrace_filter(char *str)
4188 {
4189 	ftrace_filter_param = true;
4190 	strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
4191 	return 1;
4192 }
4193 __setup("ftrace_filter=", set_ftrace_filter);
4194 
4195 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4196 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
4197 static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
4198 static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer);
4199 
4200 static unsigned long save_global_trampoline;
4201 static unsigned long save_global_flags;
4202 
4203 static int __init set_graph_function(char *str)
4204 {
4205 	strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
4206 	return 1;
4207 }
4208 __setup("ftrace_graph_filter=", set_graph_function);
4209 
4210 static int __init set_graph_notrace_function(char *str)
4211 {
4212 	strlcpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE);
4213 	return 1;
4214 }
4215 __setup("ftrace_graph_notrace=", set_graph_notrace_function);
4216 
4217 static void __init set_ftrace_early_graph(char *buf, int enable)
4218 {
4219 	int ret;
4220 	char *func;
4221 	unsigned long *table = ftrace_graph_funcs;
4222 	int *count = &ftrace_graph_count;
4223 
4224 	if (!enable) {
4225 		table = ftrace_graph_notrace_funcs;
4226 		count = &ftrace_graph_notrace_count;
4227 	}
4228 
4229 	while (buf) {
4230 		func = strsep(&buf, ",");
4231 		/* we allow only one expression at a time */
4232 		ret = ftrace_set_func(table, count, FTRACE_GRAPH_MAX_FUNCS, func);
4233 		if (ret)
4234 			printk(KERN_DEBUG "ftrace: function %s not "
4235 					  "traceable\n", func);
4236 	}
4237 }
4238 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4239 
4240 void __init
4241 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
4242 {
4243 	char *func;
4244 
4245 	ftrace_ops_init(ops);
4246 
4247 	while (buf) {
4248 		func = strsep(&buf, ",");
4249 		ftrace_set_regex(ops, func, strlen(func), 0, enable);
4250 	}
4251 }
4252 
4253 static void __init set_ftrace_early_filters(void)
4254 {
4255 	if (ftrace_filter_buf[0])
4256 		ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
4257 	if (ftrace_notrace_buf[0])
4258 		ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
4259 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4260 	if (ftrace_graph_buf[0])
4261 		set_ftrace_early_graph(ftrace_graph_buf, 1);
4262 	if (ftrace_graph_notrace_buf[0])
4263 		set_ftrace_early_graph(ftrace_graph_notrace_buf, 0);
4264 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4265 }
4266 
4267 int ftrace_regex_release(struct inode *inode, struct file *file)
4268 {
4269 	struct seq_file *m = (struct seq_file *)file->private_data;
4270 	struct ftrace_iterator *iter;
4271 	struct ftrace_hash **orig_hash;
4272 	struct ftrace_hash *old_hash;
4273 	struct trace_parser *parser;
4274 	int filter_hash;
4275 	int ret;
4276 
4277 	if (file->f_mode & FMODE_READ) {
4278 		iter = m->private;
4279 		seq_release(inode, file);
4280 	} else
4281 		iter = file->private_data;
4282 
4283 	parser = &iter->parser;
4284 	if (trace_parser_loaded(parser)) {
4285 		parser->buffer[parser->idx] = 0;
4286 		ftrace_match_records(iter->hash, parser->buffer, parser->idx);
4287 	}
4288 
4289 	trace_parser_put(parser);
4290 
4291 	mutex_lock(&iter->ops->func_hash->regex_lock);
4292 
4293 	if (file->f_mode & FMODE_WRITE) {
4294 		filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
4295 
4296 		if (filter_hash)
4297 			orig_hash = &iter->ops->func_hash->filter_hash;
4298 		else
4299 			orig_hash = &iter->ops->func_hash->notrace_hash;
4300 
4301 		mutex_lock(&ftrace_lock);
4302 		old_hash = *orig_hash;
4303 		ret = ftrace_hash_move(iter->ops, filter_hash,
4304 				       orig_hash, iter->hash);
4305 		if (!ret) {
4306 			ftrace_ops_update_code(iter->ops, old_hash);
4307 			free_ftrace_hash_rcu(old_hash);
4308 		}
4309 		mutex_unlock(&ftrace_lock);
4310 	}
4311 
4312 	mutex_unlock(&iter->ops->func_hash->regex_lock);
4313 	free_ftrace_hash(iter->hash);
4314 	kfree(iter);
4315 
4316 	return 0;
4317 }
4318 
4319 static const struct file_operations ftrace_avail_fops = {
4320 	.open = ftrace_avail_open,
4321 	.read = seq_read,
4322 	.llseek = seq_lseek,
4323 	.release = seq_release_private,
4324 };
4325 
4326 static const struct file_operations ftrace_enabled_fops = {
4327 	.open = ftrace_enabled_open,
4328 	.read = seq_read,
4329 	.llseek = seq_lseek,
4330 	.release = seq_release_private,
4331 };
4332 
4333 static const struct file_operations ftrace_filter_fops = {
4334 	.open = ftrace_filter_open,
4335 	.read = seq_read,
4336 	.write = ftrace_filter_write,
4337 	.llseek = tracing_lseek,
4338 	.release = ftrace_regex_release,
4339 };
4340 
4341 static const struct file_operations ftrace_notrace_fops = {
4342 	.open = ftrace_notrace_open,
4343 	.read = seq_read,
4344 	.write = ftrace_notrace_write,
4345 	.llseek = tracing_lseek,
4346 	.release = ftrace_regex_release,
4347 };
4348 
4349 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4350 
4351 static DEFINE_MUTEX(graph_lock);
4352 
4353 int ftrace_graph_count;
4354 int ftrace_graph_notrace_count;
4355 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
4356 unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
4357 
4358 struct ftrace_graph_data {
4359 	unsigned long *table;
4360 	size_t size;
4361 	int *count;
4362 	const struct seq_operations *seq_ops;
4363 };
4364 
4365 static void *
4366 __g_next(struct seq_file *m, loff_t *pos)
4367 {
4368 	struct ftrace_graph_data *fgd = m->private;
4369 
4370 	if (*pos >= *fgd->count)
4371 		return NULL;
4372 	return &fgd->table[*pos];
4373 }
4374 
4375 static void *
4376 g_next(struct seq_file *m, void *v, loff_t *pos)
4377 {
4378 	(*pos)++;
4379 	return __g_next(m, pos);
4380 }
4381 
4382 static void *g_start(struct seq_file *m, loff_t *pos)
4383 {
4384 	struct ftrace_graph_data *fgd = m->private;
4385 
4386 	mutex_lock(&graph_lock);
4387 
4388 	/* Nothing, tell g_show to print all functions are enabled */
4389 	if (!*fgd->count && !*pos)
4390 		return (void *)1;
4391 
4392 	return __g_next(m, pos);
4393 }
4394 
4395 static void g_stop(struct seq_file *m, void *p)
4396 {
4397 	mutex_unlock(&graph_lock);
4398 }
4399 
4400 static int g_show(struct seq_file *m, void *v)
4401 {
4402 	unsigned long *ptr = v;
4403 
4404 	if (!ptr)
4405 		return 0;
4406 
4407 	if (ptr == (unsigned long *)1) {
4408 		struct ftrace_graph_data *fgd = m->private;
4409 
4410 		if (fgd->table == ftrace_graph_funcs)
4411 			seq_puts(m, "#### all functions enabled ####\n");
4412 		else
4413 			seq_puts(m, "#### no functions disabled ####\n");
4414 		return 0;
4415 	}
4416 
4417 	seq_printf(m, "%ps\n", (void *)*ptr);
4418 
4419 	return 0;
4420 }
4421 
4422 static const struct seq_operations ftrace_graph_seq_ops = {
4423 	.start = g_start,
4424 	.next = g_next,
4425 	.stop = g_stop,
4426 	.show = g_show,
4427 };
4428 
4429 static int
4430 __ftrace_graph_open(struct inode *inode, struct file *file,
4431 		    struct ftrace_graph_data *fgd)
4432 {
4433 	int ret = 0;
4434 
4435 	mutex_lock(&graph_lock);
4436 	if ((file->f_mode & FMODE_WRITE) &&
4437 	    (file->f_flags & O_TRUNC)) {
4438 		*fgd->count = 0;
4439 		memset(fgd->table, 0, fgd->size * sizeof(*fgd->table));
4440 	}
4441 	mutex_unlock(&graph_lock);
4442 
4443 	if (file->f_mode & FMODE_READ) {
4444 		ret = seq_open(file, fgd->seq_ops);
4445 		if (!ret) {
4446 			struct seq_file *m = file->private_data;
4447 			m->private = fgd;
4448 		}
4449 	} else
4450 		file->private_data = fgd;
4451 
4452 	return ret;
4453 }
4454 
4455 static int
4456 ftrace_graph_open(struct inode *inode, struct file *file)
4457 {
4458 	struct ftrace_graph_data *fgd;
4459 
4460 	if (unlikely(ftrace_disabled))
4461 		return -ENODEV;
4462 
4463 	fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
4464 	if (fgd == NULL)
4465 		return -ENOMEM;
4466 
4467 	fgd->table = ftrace_graph_funcs;
4468 	fgd->size = FTRACE_GRAPH_MAX_FUNCS;
4469 	fgd->count = &ftrace_graph_count;
4470 	fgd->seq_ops = &ftrace_graph_seq_ops;
4471 
4472 	return __ftrace_graph_open(inode, file, fgd);
4473 }
4474 
4475 static int
4476 ftrace_graph_notrace_open(struct inode *inode, struct file *file)
4477 {
4478 	struct ftrace_graph_data *fgd;
4479 
4480 	if (unlikely(ftrace_disabled))
4481 		return -ENODEV;
4482 
4483 	fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
4484 	if (fgd == NULL)
4485 		return -ENOMEM;
4486 
4487 	fgd->table = ftrace_graph_notrace_funcs;
4488 	fgd->size = FTRACE_GRAPH_MAX_FUNCS;
4489 	fgd->count = &ftrace_graph_notrace_count;
4490 	fgd->seq_ops = &ftrace_graph_seq_ops;
4491 
4492 	return __ftrace_graph_open(inode, file, fgd);
4493 }
4494 
4495 static int
4496 ftrace_graph_release(struct inode *inode, struct file *file)
4497 {
4498 	if (file->f_mode & FMODE_READ) {
4499 		struct seq_file *m = file->private_data;
4500 
4501 		kfree(m->private);
4502 		seq_release(inode, file);
4503 	} else {
4504 		kfree(file->private_data);
4505 	}
4506 
4507 	return 0;
4508 }
4509 
4510 static int
4511 ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer)
4512 {
4513 	struct dyn_ftrace *rec;
4514 	struct ftrace_page *pg;
4515 	int search_len;
4516 	int fail = 1;
4517 	int type, not;
4518 	char *search;
4519 	bool exists;
4520 	int i;
4521 
4522 	/* decode regex */
4523 	type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
4524 	if (!not && *idx >= size)
4525 		return -EBUSY;
4526 
4527 	search_len = strlen(search);
4528 
4529 	mutex_lock(&ftrace_lock);
4530 
4531 	if (unlikely(ftrace_disabled)) {
4532 		mutex_unlock(&ftrace_lock);
4533 		return -ENODEV;
4534 	}
4535 
4536 	do_for_each_ftrace_rec(pg, rec) {
4537 
4538 		if (ftrace_match_record(rec, NULL, search, search_len, type)) {
4539 			/* if it is in the array */
4540 			exists = false;
4541 			for (i = 0; i < *idx; i++) {
4542 				if (array[i] == rec->ip) {
4543 					exists = true;
4544 					break;
4545 				}
4546 			}
4547 
4548 			if (!not) {
4549 				fail = 0;
4550 				if (!exists) {
4551 					array[(*idx)++] = rec->ip;
4552 					if (*idx >= size)
4553 						goto out;
4554 				}
4555 			} else {
4556 				if (exists) {
4557 					array[i] = array[--(*idx)];
4558 					array[*idx] = 0;
4559 					fail = 0;
4560 				}
4561 			}
4562 		}
4563 	} while_for_each_ftrace_rec();
4564 out:
4565 	mutex_unlock(&ftrace_lock);
4566 
4567 	if (fail)
4568 		return -EINVAL;
4569 
4570 	return 0;
4571 }
4572 
4573 static ssize_t
4574 ftrace_graph_write(struct file *file, const char __user *ubuf,
4575 		   size_t cnt, loff_t *ppos)
4576 {
4577 	struct trace_parser parser;
4578 	ssize_t read, ret = 0;
4579 	struct ftrace_graph_data *fgd = file->private_data;
4580 
4581 	if (!cnt)
4582 		return 0;
4583 
4584 	if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX))
4585 		return -ENOMEM;
4586 
4587 	read = trace_get_user(&parser, ubuf, cnt, ppos);
4588 
4589 	if (read >= 0 && trace_parser_loaded((&parser))) {
4590 		parser.buffer[parser.idx] = 0;
4591 
4592 		mutex_lock(&graph_lock);
4593 
4594 		/* we allow only one expression at a time */
4595 		ret = ftrace_set_func(fgd->table, fgd->count, fgd->size,
4596 				      parser.buffer);
4597 
4598 		mutex_unlock(&graph_lock);
4599 	}
4600 
4601 	if (!ret)
4602 		ret = read;
4603 
4604 	trace_parser_put(&parser);
4605 
4606 	return ret;
4607 }
4608 
4609 static const struct file_operations ftrace_graph_fops = {
4610 	.open		= ftrace_graph_open,
4611 	.read		= seq_read,
4612 	.write		= ftrace_graph_write,
4613 	.llseek		= tracing_lseek,
4614 	.release	= ftrace_graph_release,
4615 };
4616 
4617 static const struct file_operations ftrace_graph_notrace_fops = {
4618 	.open		= ftrace_graph_notrace_open,
4619 	.read		= seq_read,
4620 	.write		= ftrace_graph_write,
4621 	.llseek		= tracing_lseek,
4622 	.release	= ftrace_graph_release,
4623 };
4624 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4625 
4626 void ftrace_create_filter_files(struct ftrace_ops *ops,
4627 				struct dentry *parent)
4628 {
4629 
4630 	trace_create_file("set_ftrace_filter", 0644, parent,
4631 			  ops, &ftrace_filter_fops);
4632 
4633 	trace_create_file("set_ftrace_notrace", 0644, parent,
4634 			  ops, &ftrace_notrace_fops);
4635 }
4636 
4637 /*
4638  * The name "destroy_filter_files" is really a misnomer. Although
4639  * in the future, it may actualy delete the files, but this is
4640  * really intended to make sure the ops passed in are disabled
4641  * and that when this function returns, the caller is free to
4642  * free the ops.
4643  *
4644  * The "destroy" name is only to match the "create" name that this
4645  * should be paired with.
4646  */
4647 void ftrace_destroy_filter_files(struct ftrace_ops *ops)
4648 {
4649 	mutex_lock(&ftrace_lock);
4650 	if (ops->flags & FTRACE_OPS_FL_ENABLED)
4651 		ftrace_shutdown(ops, 0);
4652 	ops->flags |= FTRACE_OPS_FL_DELETED;
4653 	mutex_unlock(&ftrace_lock);
4654 }
4655 
4656 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
4657 {
4658 
4659 	trace_create_file("available_filter_functions", 0444,
4660 			d_tracer, NULL, &ftrace_avail_fops);
4661 
4662 	trace_create_file("enabled_functions", 0444,
4663 			d_tracer, NULL, &ftrace_enabled_fops);
4664 
4665 	ftrace_create_filter_files(&global_ops, d_tracer);
4666 
4667 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4668 	trace_create_file("set_graph_function", 0444, d_tracer,
4669 				    NULL,
4670 				    &ftrace_graph_fops);
4671 	trace_create_file("set_graph_notrace", 0444, d_tracer,
4672 				    NULL,
4673 				    &ftrace_graph_notrace_fops);
4674 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4675 
4676 	return 0;
4677 }
4678 
4679 static int ftrace_cmp_ips(const void *a, const void *b)
4680 {
4681 	const unsigned long *ipa = a;
4682 	const unsigned long *ipb = b;
4683 
4684 	if (*ipa > *ipb)
4685 		return 1;
4686 	if (*ipa < *ipb)
4687 		return -1;
4688 	return 0;
4689 }
4690 
4691 static void ftrace_swap_ips(void *a, void *b, int size)
4692 {
4693 	unsigned long *ipa = a;
4694 	unsigned long *ipb = b;
4695 	unsigned long t;
4696 
4697 	t = *ipa;
4698 	*ipa = *ipb;
4699 	*ipb = t;
4700 }
4701 
4702 static int ftrace_process_locs(struct module *mod,
4703 			       unsigned long *start,
4704 			       unsigned long *end)
4705 {
4706 	struct ftrace_page *start_pg;
4707 	struct ftrace_page *pg;
4708 	struct dyn_ftrace *rec;
4709 	unsigned long count;
4710 	unsigned long *p;
4711 	unsigned long addr;
4712 	unsigned long flags = 0; /* Shut up gcc */
4713 	int ret = -ENOMEM;
4714 
4715 	count = end - start;
4716 
4717 	if (!count)
4718 		return 0;
4719 
4720 	sort(start, count, sizeof(*start),
4721 	     ftrace_cmp_ips, ftrace_swap_ips);
4722 
4723 	start_pg = ftrace_allocate_pages(count);
4724 	if (!start_pg)
4725 		return -ENOMEM;
4726 
4727 	mutex_lock(&ftrace_lock);
4728 
4729 	/*
4730 	 * Core and each module needs their own pages, as
4731 	 * modules will free them when they are removed.
4732 	 * Force a new page to be allocated for modules.
4733 	 */
4734 	if (!mod) {
4735 		WARN_ON(ftrace_pages || ftrace_pages_start);
4736 		/* First initialization */
4737 		ftrace_pages = ftrace_pages_start = start_pg;
4738 	} else {
4739 		if (!ftrace_pages)
4740 			goto out;
4741 
4742 		if (WARN_ON(ftrace_pages->next)) {
4743 			/* Hmm, we have free pages? */
4744 			while (ftrace_pages->next)
4745 				ftrace_pages = ftrace_pages->next;
4746 		}
4747 
4748 		ftrace_pages->next = start_pg;
4749 	}
4750 
4751 	p = start;
4752 	pg = start_pg;
4753 	while (p < end) {
4754 		addr = ftrace_call_adjust(*p++);
4755 		/*
4756 		 * Some architecture linkers will pad between
4757 		 * the different mcount_loc sections of different
4758 		 * object files to satisfy alignments.
4759 		 * Skip any NULL pointers.
4760 		 */
4761 		if (!addr)
4762 			continue;
4763 
4764 		if (pg->index == pg->size) {
4765 			/* We should have allocated enough */
4766 			if (WARN_ON(!pg->next))
4767 				break;
4768 			pg = pg->next;
4769 		}
4770 
4771 		rec = &pg->records[pg->index++];
4772 		rec->ip = addr;
4773 	}
4774 
4775 	/* We should have used all pages */
4776 	WARN_ON(pg->next);
4777 
4778 	/* Assign the last page to ftrace_pages */
4779 	ftrace_pages = pg;
4780 
4781 	/*
4782 	 * We only need to disable interrupts on start up
4783 	 * because we are modifying code that an interrupt
4784 	 * may execute, and the modification is not atomic.
4785 	 * But for modules, nothing runs the code we modify
4786 	 * until we are finished with it, and there's no
4787 	 * reason to cause large interrupt latencies while we do it.
4788 	 */
4789 	if (!mod)
4790 		local_irq_save(flags);
4791 	ftrace_update_code(mod, start_pg);
4792 	if (!mod)
4793 		local_irq_restore(flags);
4794 	ret = 0;
4795  out:
4796 	mutex_unlock(&ftrace_lock);
4797 
4798 	return ret;
4799 }
4800 
4801 #ifdef CONFIG_MODULES
4802 
4803 #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
4804 
4805 void ftrace_release_mod(struct module *mod)
4806 {
4807 	struct dyn_ftrace *rec;
4808 	struct ftrace_page **last_pg;
4809 	struct ftrace_page *pg;
4810 	int order;
4811 
4812 	mutex_lock(&ftrace_lock);
4813 
4814 	if (ftrace_disabled)
4815 		goto out_unlock;
4816 
4817 	/*
4818 	 * Each module has its own ftrace_pages, remove
4819 	 * them from the list.
4820 	 */
4821 	last_pg = &ftrace_pages_start;
4822 	for (pg = ftrace_pages_start; pg; pg = *last_pg) {
4823 		rec = &pg->records[0];
4824 		if (within_module_core(rec->ip, mod)) {
4825 			/*
4826 			 * As core pages are first, the first
4827 			 * page should never be a module page.
4828 			 */
4829 			if (WARN_ON(pg == ftrace_pages_start))
4830 				goto out_unlock;
4831 
4832 			/* Check if we are deleting the last page */
4833 			if (pg == ftrace_pages)
4834 				ftrace_pages = next_to_ftrace_page(last_pg);
4835 
4836 			*last_pg = pg->next;
4837 			order = get_count_order(pg->size / ENTRIES_PER_PAGE);
4838 			free_pages((unsigned long)pg->records, order);
4839 			kfree(pg);
4840 		} else
4841 			last_pg = &pg->next;
4842 	}
4843  out_unlock:
4844 	mutex_unlock(&ftrace_lock);
4845 }
4846 
4847 static void ftrace_init_module(struct module *mod,
4848 			       unsigned long *start, unsigned long *end)
4849 {
4850 	if (ftrace_disabled || start == end)
4851 		return;
4852 	ftrace_process_locs(mod, start, end);
4853 }
4854 
4855 void ftrace_module_init(struct module *mod)
4856 {
4857 	ftrace_init_module(mod, mod->ftrace_callsites,
4858 			   mod->ftrace_callsites +
4859 			   mod->num_ftrace_callsites);
4860 }
4861 
4862 static int ftrace_module_notify_exit(struct notifier_block *self,
4863 				     unsigned long val, void *data)
4864 {
4865 	struct module *mod = data;
4866 
4867 	if (val == MODULE_STATE_GOING)
4868 		ftrace_release_mod(mod);
4869 
4870 	return 0;
4871 }
4872 #else
4873 static int ftrace_module_notify_exit(struct notifier_block *self,
4874 				     unsigned long val, void *data)
4875 {
4876 	return 0;
4877 }
4878 #endif /* CONFIG_MODULES */
4879 
4880 struct notifier_block ftrace_module_exit_nb = {
4881 	.notifier_call = ftrace_module_notify_exit,
4882 	.priority = INT_MIN,	/* Run after anything that can remove kprobes */
4883 };
4884 
4885 void __init ftrace_init(void)
4886 {
4887 	extern unsigned long __start_mcount_loc[];
4888 	extern unsigned long __stop_mcount_loc[];
4889 	unsigned long count, flags;
4890 	int ret;
4891 
4892 	local_irq_save(flags);
4893 	ret = ftrace_dyn_arch_init();
4894 	local_irq_restore(flags);
4895 	if (ret)
4896 		goto failed;
4897 
4898 	count = __stop_mcount_loc - __start_mcount_loc;
4899 	if (!count) {
4900 		pr_info("ftrace: No functions to be traced?\n");
4901 		goto failed;
4902 	}
4903 
4904 	pr_info("ftrace: allocating %ld entries in %ld pages\n",
4905 		count, count / ENTRIES_PER_PAGE + 1);
4906 
4907 	last_ftrace_enabled = ftrace_enabled = 1;
4908 
4909 	ret = ftrace_process_locs(NULL,
4910 				  __start_mcount_loc,
4911 				  __stop_mcount_loc);
4912 
4913 	ret = register_module_notifier(&ftrace_module_exit_nb);
4914 	if (ret)
4915 		pr_warning("Failed to register trace ftrace module exit notifier\n");
4916 
4917 	set_ftrace_early_filters();
4918 
4919 	return;
4920  failed:
4921 	ftrace_disabled = 1;
4922 }
4923 
4924 /* Do nothing if arch does not support this */
4925 void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops)
4926 {
4927 }
4928 
4929 static void ftrace_update_trampoline(struct ftrace_ops *ops)
4930 {
4931 
4932 /*
4933  * Currently there's no safe way to free a trampoline when the kernel
4934  * is configured with PREEMPT. That is because a task could be preempted
4935  * when it jumped to the trampoline, it may be preempted for a long time
4936  * depending on the system load, and currently there's no way to know
4937  * when it will be off the trampoline. If the trampoline is freed
4938  * too early, when the task runs again, it will be executing on freed
4939  * memory and crash.
4940  */
4941 #ifdef CONFIG_PREEMPT
4942 	/* Currently, only non dynamic ops can have a trampoline */
4943 	if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
4944 		return;
4945 #endif
4946 
4947 	arch_ftrace_update_trampoline(ops);
4948 }
4949 
4950 #else
4951 
4952 static struct ftrace_ops global_ops = {
4953 	.func			= ftrace_stub,
4954 	.flags			= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
4955 };
4956 
4957 static int __init ftrace_nodyn_init(void)
4958 {
4959 	ftrace_enabled = 1;
4960 	return 0;
4961 }
4962 core_initcall(ftrace_nodyn_init);
4963 
4964 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
4965 static inline void ftrace_startup_enable(int command) { }
4966 static inline void ftrace_startup_all(int command) { }
4967 /* Keep as macros so we do not need to define the commands */
4968 # define ftrace_startup(ops, command)					\
4969 	({								\
4970 		int ___ret = __register_ftrace_function(ops);		\
4971 		if (!___ret)						\
4972 			(ops)->flags |= FTRACE_OPS_FL_ENABLED;		\
4973 		___ret;							\
4974 	})
4975 # define ftrace_shutdown(ops, command)					\
4976 	({								\
4977 		int ___ret = __unregister_ftrace_function(ops);		\
4978 		if (!___ret)						\
4979 			(ops)->flags &= ~FTRACE_OPS_FL_ENABLED;		\
4980 		___ret;							\
4981 	})
4982 
4983 # define ftrace_startup_sysctl()	do { } while (0)
4984 # define ftrace_shutdown_sysctl()	do { } while (0)
4985 
4986 static inline int
4987 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
4988 {
4989 	return 1;
4990 }
4991 
4992 static void ftrace_update_trampoline(struct ftrace_ops *ops)
4993 {
4994 }
4995 
4996 #endif /* CONFIG_DYNAMIC_FTRACE */
4997 
4998 __init void ftrace_init_global_array_ops(struct trace_array *tr)
4999 {
5000 	tr->ops = &global_ops;
5001 	tr->ops->private = tr;
5002 }
5003 
5004 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
5005 {
5006 	/* If we filter on pids, update to use the pid function */
5007 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
5008 		if (WARN_ON(tr->ops->func != ftrace_stub))
5009 			printk("ftrace ops had %pS for function\n",
5010 			       tr->ops->func);
5011 		/* Only the top level instance does pid tracing */
5012 		if (!list_empty(&ftrace_pids)) {
5013 			set_ftrace_pid_function(func);
5014 			func = ftrace_pid_func;
5015 		}
5016 	}
5017 	tr->ops->func = func;
5018 	tr->ops->private = tr;
5019 }
5020 
5021 void ftrace_reset_array_ops(struct trace_array *tr)
5022 {
5023 	tr->ops->func = ftrace_stub;
5024 }
5025 
5026 static void
5027 ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
5028 			struct ftrace_ops *op, struct pt_regs *regs)
5029 {
5030 	if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
5031 		return;
5032 
5033 	/*
5034 	 * Some of the ops may be dynamically allocated,
5035 	 * they must be freed after a synchronize_sched().
5036 	 */
5037 	preempt_disable_notrace();
5038 	trace_recursion_set(TRACE_CONTROL_BIT);
5039 
5040 	/*
5041 	 * Control funcs (perf) uses RCU. Only trace if
5042 	 * RCU is currently active.
5043 	 */
5044 	if (!rcu_is_watching())
5045 		goto out;
5046 
5047 	do_for_each_ftrace_op(op, ftrace_control_list) {
5048 		if (!(op->flags & FTRACE_OPS_FL_STUB) &&
5049 		    !ftrace_function_local_disabled(op) &&
5050 		    ftrace_ops_test(op, ip, regs))
5051 			op->func(ip, parent_ip, op, regs);
5052 	} while_for_each_ftrace_op(op);
5053  out:
5054 	trace_recursion_clear(TRACE_CONTROL_BIT);
5055 	preempt_enable_notrace();
5056 }
5057 
5058 static struct ftrace_ops control_ops = {
5059 	.func	= ftrace_ops_control_func,
5060 	.flags	= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
5061 	INIT_OPS_HASH(control_ops)
5062 };
5063 
5064 static inline void
5065 __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
5066 		       struct ftrace_ops *ignored, struct pt_regs *regs)
5067 {
5068 	struct ftrace_ops *op;
5069 	int bit;
5070 
5071 	bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
5072 	if (bit < 0)
5073 		return;
5074 
5075 	/*
5076 	 * Some of the ops may be dynamically allocated,
5077 	 * they must be freed after a synchronize_sched().
5078 	 */
5079 	preempt_disable_notrace();
5080 	do_for_each_ftrace_op(op, ftrace_ops_list) {
5081 		if (ftrace_ops_test(op, ip, regs)) {
5082 			if (FTRACE_WARN_ON(!op->func)) {
5083 				pr_warn("op=%p %pS\n", op, op);
5084 				goto out;
5085 			}
5086 			op->func(ip, parent_ip, op, regs);
5087 		}
5088 	} while_for_each_ftrace_op(op);
5089 out:
5090 	preempt_enable_notrace();
5091 	trace_clear_recursion(bit);
5092 }
5093 
5094 /*
5095  * Some archs only support passing ip and parent_ip. Even though
5096  * the list function ignores the op parameter, we do not want any
5097  * C side effects, where a function is called without the caller
5098  * sending a third parameter.
5099  * Archs are to support both the regs and ftrace_ops at the same time.
5100  * If they support ftrace_ops, it is assumed they support regs.
5101  * If call backs want to use regs, they must either check for regs
5102  * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
5103  * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
5104  * An architecture can pass partial regs with ftrace_ops and still
5105  * set the ARCH_SUPPORT_FTARCE_OPS.
5106  */
5107 #if ARCH_SUPPORTS_FTRACE_OPS
5108 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
5109 				 struct ftrace_ops *op, struct pt_regs *regs)
5110 {
5111 	__ftrace_ops_list_func(ip, parent_ip, NULL, regs);
5112 }
5113 #else
5114 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
5115 {
5116 	__ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
5117 }
5118 #endif
5119 
5120 /*
5121  * If there's only one function registered but it does not support
5122  * recursion, this function will be called by the mcount trampoline.
5123  * This function will handle recursion protection.
5124  */
5125 static void ftrace_ops_recurs_func(unsigned long ip, unsigned long parent_ip,
5126 				   struct ftrace_ops *op, struct pt_regs *regs)
5127 {
5128 	int bit;
5129 
5130 	bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
5131 	if (bit < 0)
5132 		return;
5133 
5134 	op->func(ip, parent_ip, op, regs);
5135 
5136 	trace_clear_recursion(bit);
5137 }
5138 
5139 /**
5140  * ftrace_ops_get_func - get the function a trampoline should call
5141  * @ops: the ops to get the function for
5142  *
5143  * Normally the mcount trampoline will call the ops->func, but there
5144  * are times that it should not. For example, if the ops does not
5145  * have its own recursion protection, then it should call the
5146  * ftrace_ops_recurs_func() instead.
5147  *
5148  * Returns the function that the trampoline should call for @ops.
5149  */
5150 ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
5151 {
5152 	/*
5153 	 * If this is a dynamic ops or we force list func,
5154 	 * then it needs to call the list anyway.
5155 	 */
5156 	if (ops->flags & FTRACE_OPS_FL_DYNAMIC || FTRACE_FORCE_LIST_FUNC)
5157 		return ftrace_ops_list_func;
5158 
5159 	/*
5160 	 * If the func handles its own recursion, call it directly.
5161 	 * Otherwise call the recursion protected function that
5162 	 * will call the ftrace ops function.
5163 	 */
5164 	if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE))
5165 		return ftrace_ops_recurs_func;
5166 
5167 	return ops->func;
5168 }
5169 
5170 static void clear_ftrace_swapper(void)
5171 {
5172 	struct task_struct *p;
5173 	int cpu;
5174 
5175 	get_online_cpus();
5176 	for_each_online_cpu(cpu) {
5177 		p = idle_task(cpu);
5178 		clear_tsk_trace_trace(p);
5179 	}
5180 	put_online_cpus();
5181 }
5182 
5183 static void set_ftrace_swapper(void)
5184 {
5185 	struct task_struct *p;
5186 	int cpu;
5187 
5188 	get_online_cpus();
5189 	for_each_online_cpu(cpu) {
5190 		p = idle_task(cpu);
5191 		set_tsk_trace_trace(p);
5192 	}
5193 	put_online_cpus();
5194 }
5195 
5196 static void clear_ftrace_pid(struct pid *pid)
5197 {
5198 	struct task_struct *p;
5199 
5200 	rcu_read_lock();
5201 	do_each_pid_task(pid, PIDTYPE_PID, p) {
5202 		clear_tsk_trace_trace(p);
5203 	} while_each_pid_task(pid, PIDTYPE_PID, p);
5204 	rcu_read_unlock();
5205 
5206 	put_pid(pid);
5207 }
5208 
5209 static void set_ftrace_pid(struct pid *pid)
5210 {
5211 	struct task_struct *p;
5212 
5213 	rcu_read_lock();
5214 	do_each_pid_task(pid, PIDTYPE_PID, p) {
5215 		set_tsk_trace_trace(p);
5216 	} while_each_pid_task(pid, PIDTYPE_PID, p);
5217 	rcu_read_unlock();
5218 }
5219 
5220 static void clear_ftrace_pid_task(struct pid *pid)
5221 {
5222 	if (pid == ftrace_swapper_pid)
5223 		clear_ftrace_swapper();
5224 	else
5225 		clear_ftrace_pid(pid);
5226 }
5227 
5228 static void set_ftrace_pid_task(struct pid *pid)
5229 {
5230 	if (pid == ftrace_swapper_pid)
5231 		set_ftrace_swapper();
5232 	else
5233 		set_ftrace_pid(pid);
5234 }
5235 
5236 static int ftrace_pid_add(int p)
5237 {
5238 	struct pid *pid;
5239 	struct ftrace_pid *fpid;
5240 	int ret = -EINVAL;
5241 
5242 	mutex_lock(&ftrace_lock);
5243 
5244 	if (!p)
5245 		pid = ftrace_swapper_pid;
5246 	else
5247 		pid = find_get_pid(p);
5248 
5249 	if (!pid)
5250 		goto out;
5251 
5252 	ret = 0;
5253 
5254 	list_for_each_entry(fpid, &ftrace_pids, list)
5255 		if (fpid->pid == pid)
5256 			goto out_put;
5257 
5258 	ret = -ENOMEM;
5259 
5260 	fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
5261 	if (!fpid)
5262 		goto out_put;
5263 
5264 	list_add(&fpid->list, &ftrace_pids);
5265 	fpid->pid = pid;
5266 
5267 	set_ftrace_pid_task(pid);
5268 
5269 	ftrace_update_pid_func();
5270 
5271 	ftrace_startup_all(0);
5272 
5273 	mutex_unlock(&ftrace_lock);
5274 	return 0;
5275 
5276 out_put:
5277 	if (pid != ftrace_swapper_pid)
5278 		put_pid(pid);
5279 
5280 out:
5281 	mutex_unlock(&ftrace_lock);
5282 	return ret;
5283 }
5284 
5285 static void ftrace_pid_reset(void)
5286 {
5287 	struct ftrace_pid *fpid, *safe;
5288 
5289 	mutex_lock(&ftrace_lock);
5290 	list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
5291 		struct pid *pid = fpid->pid;
5292 
5293 		clear_ftrace_pid_task(pid);
5294 
5295 		list_del(&fpid->list);
5296 		kfree(fpid);
5297 	}
5298 
5299 	ftrace_update_pid_func();
5300 	ftrace_startup_all(0);
5301 
5302 	mutex_unlock(&ftrace_lock);
5303 }
5304 
5305 static void *fpid_start(struct seq_file *m, loff_t *pos)
5306 {
5307 	mutex_lock(&ftrace_lock);
5308 
5309 	if (list_empty(&ftrace_pids) && (!*pos))
5310 		return (void *) 1;
5311 
5312 	return seq_list_start(&ftrace_pids, *pos);
5313 }
5314 
5315 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
5316 {
5317 	if (v == (void *)1)
5318 		return NULL;
5319 
5320 	return seq_list_next(v, &ftrace_pids, pos);
5321 }
5322 
5323 static void fpid_stop(struct seq_file *m, void *p)
5324 {
5325 	mutex_unlock(&ftrace_lock);
5326 }
5327 
5328 static int fpid_show(struct seq_file *m, void *v)
5329 {
5330 	const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
5331 
5332 	if (v == (void *)1) {
5333 		seq_puts(m, "no pid\n");
5334 		return 0;
5335 	}
5336 
5337 	if (fpid->pid == ftrace_swapper_pid)
5338 		seq_puts(m, "swapper tasks\n");
5339 	else
5340 		seq_printf(m, "%u\n", pid_vnr(fpid->pid));
5341 
5342 	return 0;
5343 }
5344 
5345 static const struct seq_operations ftrace_pid_sops = {
5346 	.start = fpid_start,
5347 	.next = fpid_next,
5348 	.stop = fpid_stop,
5349 	.show = fpid_show,
5350 };
5351 
5352 static int
5353 ftrace_pid_open(struct inode *inode, struct file *file)
5354 {
5355 	int ret = 0;
5356 
5357 	if ((file->f_mode & FMODE_WRITE) &&
5358 	    (file->f_flags & O_TRUNC))
5359 		ftrace_pid_reset();
5360 
5361 	if (file->f_mode & FMODE_READ)
5362 		ret = seq_open(file, &ftrace_pid_sops);
5363 
5364 	return ret;
5365 }
5366 
5367 static ssize_t
5368 ftrace_pid_write(struct file *filp, const char __user *ubuf,
5369 		   size_t cnt, loff_t *ppos)
5370 {
5371 	char buf[64], *tmp;
5372 	long val;
5373 	int ret;
5374 
5375 	if (cnt >= sizeof(buf))
5376 		return -EINVAL;
5377 
5378 	if (copy_from_user(&buf, ubuf, cnt))
5379 		return -EFAULT;
5380 
5381 	buf[cnt] = 0;
5382 
5383 	/*
5384 	 * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
5385 	 * to clean the filter quietly.
5386 	 */
5387 	tmp = strstrip(buf);
5388 	if (strlen(tmp) == 0)
5389 		return 1;
5390 
5391 	ret = kstrtol(tmp, 10, &val);
5392 	if (ret < 0)
5393 		return ret;
5394 
5395 	ret = ftrace_pid_add(val);
5396 
5397 	return ret ? ret : cnt;
5398 }
5399 
5400 static int
5401 ftrace_pid_release(struct inode *inode, struct file *file)
5402 {
5403 	if (file->f_mode & FMODE_READ)
5404 		seq_release(inode, file);
5405 
5406 	return 0;
5407 }
5408 
5409 static const struct file_operations ftrace_pid_fops = {
5410 	.open		= ftrace_pid_open,
5411 	.write		= ftrace_pid_write,
5412 	.read		= seq_read,
5413 	.llseek		= tracing_lseek,
5414 	.release	= ftrace_pid_release,
5415 };
5416 
5417 static __init int ftrace_init_debugfs(void)
5418 {
5419 	struct dentry *d_tracer;
5420 
5421 	d_tracer = tracing_init_dentry();
5422 	if (!d_tracer)
5423 		return 0;
5424 
5425 	ftrace_init_dyn_debugfs(d_tracer);
5426 
5427 	trace_create_file("set_ftrace_pid", 0644, d_tracer,
5428 			    NULL, &ftrace_pid_fops);
5429 
5430 	ftrace_profile_debugfs(d_tracer);
5431 
5432 	return 0;
5433 }
5434 fs_initcall(ftrace_init_debugfs);
5435 
5436 /**
5437  * ftrace_kill - kill ftrace
5438  *
5439  * This function should be used by panic code. It stops ftrace
5440  * but in a not so nice way. If you need to simply kill ftrace
5441  * from a non-atomic section, use ftrace_kill.
5442  */
5443 void ftrace_kill(void)
5444 {
5445 	ftrace_disabled = 1;
5446 	ftrace_enabled = 0;
5447 	clear_ftrace_function();
5448 }
5449 
5450 /**
5451  * Test if ftrace is dead or not.
5452  */
5453 int ftrace_is_dead(void)
5454 {
5455 	return ftrace_disabled;
5456 }
5457 
5458 /**
5459  * register_ftrace_function - register a function for profiling
5460  * @ops - ops structure that holds the function for profiling.
5461  *
5462  * Register a function to be called by all functions in the
5463  * kernel.
5464  *
5465  * Note: @ops->func and all the functions it calls must be labeled
5466  *       with "notrace", otherwise it will go into a
5467  *       recursive loop.
5468  */
5469 int register_ftrace_function(struct ftrace_ops *ops)
5470 {
5471 	int ret = -1;
5472 
5473 	ftrace_ops_init(ops);
5474 
5475 	mutex_lock(&ftrace_lock);
5476 
5477 	ret = ftrace_startup(ops, 0);
5478 
5479 	mutex_unlock(&ftrace_lock);
5480 
5481 	return ret;
5482 }
5483 EXPORT_SYMBOL_GPL(register_ftrace_function);
5484 
5485 /**
5486  * unregister_ftrace_function - unregister a function for profiling.
5487  * @ops - ops structure that holds the function to unregister
5488  *
5489  * Unregister a function that was added to be called by ftrace profiling.
5490  */
5491 int unregister_ftrace_function(struct ftrace_ops *ops)
5492 {
5493 	int ret;
5494 
5495 	mutex_lock(&ftrace_lock);
5496 	ret = ftrace_shutdown(ops, 0);
5497 	mutex_unlock(&ftrace_lock);
5498 
5499 	return ret;
5500 }
5501 EXPORT_SYMBOL_GPL(unregister_ftrace_function);
5502 
5503 int
5504 ftrace_enable_sysctl(struct ctl_table *table, int write,
5505 		     void __user *buffer, size_t *lenp,
5506 		     loff_t *ppos)
5507 {
5508 	int ret = -ENODEV;
5509 
5510 	mutex_lock(&ftrace_lock);
5511 
5512 	if (unlikely(ftrace_disabled))
5513 		goto out;
5514 
5515 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
5516 
5517 	if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
5518 		goto out;
5519 
5520 	last_ftrace_enabled = !!ftrace_enabled;
5521 
5522 	if (ftrace_enabled) {
5523 
5524 		ftrace_startup_sysctl();
5525 
5526 		/* we are starting ftrace again */
5527 		if (ftrace_ops_list != &ftrace_list_end)
5528 			update_ftrace_function();
5529 
5530 	} else {
5531 		/* stopping ftrace calls (just send to ftrace_stub) */
5532 		ftrace_trace_function = ftrace_stub;
5533 
5534 		ftrace_shutdown_sysctl();
5535 	}
5536 
5537  out:
5538 	mutex_unlock(&ftrace_lock);
5539 	return ret;
5540 }
5541 
5542 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5543 
5544 static struct ftrace_ops graph_ops = {
5545 	.func			= ftrace_stub,
5546 	.flags			= FTRACE_OPS_FL_RECURSION_SAFE |
5547 				   FTRACE_OPS_FL_INITIALIZED |
5548 				   FTRACE_OPS_FL_STUB,
5549 #ifdef FTRACE_GRAPH_TRAMP_ADDR
5550 	.trampoline		= FTRACE_GRAPH_TRAMP_ADDR,
5551 	/* trampoline_size is only needed for dynamically allocated tramps */
5552 #endif
5553 	ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
5554 };
5555 
5556 static int ftrace_graph_active;
5557 
5558 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
5559 {
5560 	return 0;
5561 }
5562 
5563 /* The callbacks that hook a function */
5564 trace_func_graph_ret_t ftrace_graph_return =
5565 			(trace_func_graph_ret_t)ftrace_stub;
5566 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
5567 static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
5568 
5569 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
5570 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
5571 {
5572 	int i;
5573 	int ret = 0;
5574 	unsigned long flags;
5575 	int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
5576 	struct task_struct *g, *t;
5577 
5578 	for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
5579 		ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
5580 					* sizeof(struct ftrace_ret_stack),
5581 					GFP_KERNEL);
5582 		if (!ret_stack_list[i]) {
5583 			start = 0;
5584 			end = i;
5585 			ret = -ENOMEM;
5586 			goto free;
5587 		}
5588 	}
5589 
5590 	read_lock_irqsave(&tasklist_lock, flags);
5591 	do_each_thread(g, t) {
5592 		if (start == end) {
5593 			ret = -EAGAIN;
5594 			goto unlock;
5595 		}
5596 
5597 		if (t->ret_stack == NULL) {
5598 			atomic_set(&t->tracing_graph_pause, 0);
5599 			atomic_set(&t->trace_overrun, 0);
5600 			t->curr_ret_stack = -1;
5601 			/* Make sure the tasks see the -1 first: */
5602 			smp_wmb();
5603 			t->ret_stack = ret_stack_list[start++];
5604 		}
5605 	} while_each_thread(g, t);
5606 
5607 unlock:
5608 	read_unlock_irqrestore(&tasklist_lock, flags);
5609 free:
5610 	for (i = start; i < end; i++)
5611 		kfree(ret_stack_list[i]);
5612 	return ret;
5613 }
5614 
5615 static void
5616 ftrace_graph_probe_sched_switch(void *ignore,
5617 			struct task_struct *prev, struct task_struct *next)
5618 {
5619 	unsigned long long timestamp;
5620 	int index;
5621 
5622 	/*
5623 	 * Does the user want to count the time a function was asleep.
5624 	 * If so, do not update the time stamps.
5625 	 */
5626 	if (trace_flags & TRACE_ITER_SLEEP_TIME)
5627 		return;
5628 
5629 	timestamp = trace_clock_local();
5630 
5631 	prev->ftrace_timestamp = timestamp;
5632 
5633 	/* only process tasks that we timestamped */
5634 	if (!next->ftrace_timestamp)
5635 		return;
5636 
5637 	/*
5638 	 * Update all the counters in next to make up for the
5639 	 * time next was sleeping.
5640 	 */
5641 	timestamp -= next->ftrace_timestamp;
5642 
5643 	for (index = next->curr_ret_stack; index >= 0; index--)
5644 		next->ret_stack[index].calltime += timestamp;
5645 }
5646 
5647 /* Allocate a return stack for each task */
5648 static int start_graph_tracing(void)
5649 {
5650 	struct ftrace_ret_stack **ret_stack_list;
5651 	int ret, cpu;
5652 
5653 	ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
5654 				sizeof(struct ftrace_ret_stack *),
5655 				GFP_KERNEL);
5656 
5657 	if (!ret_stack_list)
5658 		return -ENOMEM;
5659 
5660 	/* The cpu_boot init_task->ret_stack will never be freed */
5661 	for_each_online_cpu(cpu) {
5662 		if (!idle_task(cpu)->ret_stack)
5663 			ftrace_graph_init_idle_task(idle_task(cpu), cpu);
5664 	}
5665 
5666 	do {
5667 		ret = alloc_retstack_tasklist(ret_stack_list);
5668 	} while (ret == -EAGAIN);
5669 
5670 	if (!ret) {
5671 		ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
5672 		if (ret)
5673 			pr_info("ftrace_graph: Couldn't activate tracepoint"
5674 				" probe to kernel_sched_switch\n");
5675 	}
5676 
5677 	kfree(ret_stack_list);
5678 	return ret;
5679 }
5680 
5681 /*
5682  * Hibernation protection.
5683  * The state of the current task is too much unstable during
5684  * suspend/restore to disk. We want to protect against that.
5685  */
5686 static int
5687 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
5688 							void *unused)
5689 {
5690 	switch (state) {
5691 	case PM_HIBERNATION_PREPARE:
5692 		pause_graph_tracing();
5693 		break;
5694 
5695 	case PM_POST_HIBERNATION:
5696 		unpause_graph_tracing();
5697 		break;
5698 	}
5699 	return NOTIFY_DONE;
5700 }
5701 
5702 static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
5703 {
5704 	if (!ftrace_ops_test(&global_ops, trace->func, NULL))
5705 		return 0;
5706 	return __ftrace_graph_entry(trace);
5707 }
5708 
5709 /*
5710  * The function graph tracer should only trace the functions defined
5711  * by set_ftrace_filter and set_ftrace_notrace. If another function
5712  * tracer ops is registered, the graph tracer requires testing the
5713  * function against the global ops, and not just trace any function
5714  * that any ftrace_ops registered.
5715  */
5716 static void update_function_graph_func(void)
5717 {
5718 	struct ftrace_ops *op;
5719 	bool do_test = false;
5720 
5721 	/*
5722 	 * The graph and global ops share the same set of functions
5723 	 * to test. If any other ops is on the list, then
5724 	 * the graph tracing needs to test if its the function
5725 	 * it should call.
5726 	 */
5727 	do_for_each_ftrace_op(op, ftrace_ops_list) {
5728 		if (op != &global_ops && op != &graph_ops &&
5729 		    op != &ftrace_list_end) {
5730 			do_test = true;
5731 			/* in double loop, break out with goto */
5732 			goto out;
5733 		}
5734 	} while_for_each_ftrace_op(op);
5735  out:
5736 	if (do_test)
5737 		ftrace_graph_entry = ftrace_graph_entry_test;
5738 	else
5739 		ftrace_graph_entry = __ftrace_graph_entry;
5740 }
5741 
5742 static struct notifier_block ftrace_suspend_notifier = {
5743 	.notifier_call = ftrace_suspend_notifier_call,
5744 };
5745 
5746 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
5747 			trace_func_graph_ent_t entryfunc)
5748 {
5749 	int ret = 0;
5750 
5751 	mutex_lock(&ftrace_lock);
5752 
5753 	/* we currently allow only one tracer registered at a time */
5754 	if (ftrace_graph_active) {
5755 		ret = -EBUSY;
5756 		goto out;
5757 	}
5758 
5759 	register_pm_notifier(&ftrace_suspend_notifier);
5760 
5761 	ftrace_graph_active++;
5762 	ret = start_graph_tracing();
5763 	if (ret) {
5764 		ftrace_graph_active--;
5765 		goto out;
5766 	}
5767 
5768 	ftrace_graph_return = retfunc;
5769 
5770 	/*
5771 	 * Update the indirect function to the entryfunc, and the
5772 	 * function that gets called to the entry_test first. Then
5773 	 * call the update fgraph entry function to determine if
5774 	 * the entryfunc should be called directly or not.
5775 	 */
5776 	__ftrace_graph_entry = entryfunc;
5777 	ftrace_graph_entry = ftrace_graph_entry_test;
5778 	update_function_graph_func();
5779 
5780 	ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET);
5781 out:
5782 	mutex_unlock(&ftrace_lock);
5783 	return ret;
5784 }
5785 
5786 void unregister_ftrace_graph(void)
5787 {
5788 	mutex_lock(&ftrace_lock);
5789 
5790 	if (unlikely(!ftrace_graph_active))
5791 		goto out;
5792 
5793 	ftrace_graph_active--;
5794 	ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
5795 	ftrace_graph_entry = ftrace_graph_entry_stub;
5796 	__ftrace_graph_entry = ftrace_graph_entry_stub;
5797 	ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
5798 	unregister_pm_notifier(&ftrace_suspend_notifier);
5799 	unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
5800 
5801 #ifdef CONFIG_DYNAMIC_FTRACE
5802 	/*
5803 	 * Function graph does not allocate the trampoline, but
5804 	 * other global_ops do. We need to reset the ALLOC_TRAMP flag
5805 	 * if one was used.
5806 	 */
5807 	global_ops.trampoline = save_global_trampoline;
5808 	if (save_global_flags & FTRACE_OPS_FL_ALLOC_TRAMP)
5809 		global_ops.flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
5810 #endif
5811 
5812  out:
5813 	mutex_unlock(&ftrace_lock);
5814 }
5815 
5816 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
5817 
5818 static void
5819 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
5820 {
5821 	atomic_set(&t->tracing_graph_pause, 0);
5822 	atomic_set(&t->trace_overrun, 0);
5823 	t->ftrace_timestamp = 0;
5824 	/* make curr_ret_stack visible before we add the ret_stack */
5825 	smp_wmb();
5826 	t->ret_stack = ret_stack;
5827 }
5828 
5829 /*
5830  * Allocate a return stack for the idle task. May be the first
5831  * time through, or it may be done by CPU hotplug online.
5832  */
5833 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
5834 {
5835 	t->curr_ret_stack = -1;
5836 	/*
5837 	 * The idle task has no parent, it either has its own
5838 	 * stack or no stack at all.
5839 	 */
5840 	if (t->ret_stack)
5841 		WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
5842 
5843 	if (ftrace_graph_active) {
5844 		struct ftrace_ret_stack *ret_stack;
5845 
5846 		ret_stack = per_cpu(idle_ret_stack, cpu);
5847 		if (!ret_stack) {
5848 			ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
5849 					    * sizeof(struct ftrace_ret_stack),
5850 					    GFP_KERNEL);
5851 			if (!ret_stack)
5852 				return;
5853 			per_cpu(idle_ret_stack, cpu) = ret_stack;
5854 		}
5855 		graph_init_task(t, ret_stack);
5856 	}
5857 }
5858 
5859 /* Allocate a return stack for newly created task */
5860 void ftrace_graph_init_task(struct task_struct *t)
5861 {
5862 	/* Make sure we do not use the parent ret_stack */
5863 	t->ret_stack = NULL;
5864 	t->curr_ret_stack = -1;
5865 
5866 	if (ftrace_graph_active) {
5867 		struct ftrace_ret_stack *ret_stack;
5868 
5869 		ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
5870 				* sizeof(struct ftrace_ret_stack),
5871 				GFP_KERNEL);
5872 		if (!ret_stack)
5873 			return;
5874 		graph_init_task(t, ret_stack);
5875 	}
5876 }
5877 
5878 void ftrace_graph_exit_task(struct task_struct *t)
5879 {
5880 	struct ftrace_ret_stack	*ret_stack = t->ret_stack;
5881 
5882 	t->ret_stack = NULL;
5883 	/* NULL must become visible to IRQs before we free it: */
5884 	barrier();
5885 
5886 	kfree(ret_stack);
5887 }
5888 #endif
5889