xref: /openbmc/linux/kernel/trace/ftrace.c (revision 6f52b16c)
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 Nadia Yvette Chambers
14  */
15 
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/sched/task.h>
19 #include <linux/kallsyms.h>
20 #include <linux/seq_file.h>
21 #include <linux/suspend.h>
22 #include <linux/tracefs.h>
23 #include <linux/hardirq.h>
24 #include <linux/kthread.h>
25 #include <linux/uaccess.h>
26 #include <linux/bsearch.h>
27 #include <linux/module.h>
28 #include <linux/ftrace.h>
29 #include <linux/sysctl.h>
30 #include <linux/slab.h>
31 #include <linux/ctype.h>
32 #include <linux/sort.h>
33 #include <linux/list.h>
34 #include <linux/hash.h>
35 #include <linux/rcupdate.h>
36 
37 #include <trace/events/sched.h>
38 
39 #include <asm/sections.h>
40 #include <asm/setup.h>
41 
42 #include "trace_output.h"
43 #include "trace_stat.h"
44 
45 #define FTRACE_WARN_ON(cond)			\
46 	({					\
47 		int ___r = cond;		\
48 		if (WARN_ON(___r))		\
49 			ftrace_kill();		\
50 		___r;				\
51 	})
52 
53 #define FTRACE_WARN_ON_ONCE(cond)		\
54 	({					\
55 		int ___r = cond;		\
56 		if (WARN_ON_ONCE(___r))		\
57 			ftrace_kill();		\
58 		___r;				\
59 	})
60 
61 /* hash bits for specific function selection */
62 #define FTRACE_HASH_BITS 7
63 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
64 #define FTRACE_HASH_DEFAULT_BITS 10
65 #define FTRACE_HASH_MAX_BITS 12
66 
67 #ifdef CONFIG_DYNAMIC_FTRACE
68 #define INIT_OPS_HASH(opsname)	\
69 	.func_hash		= &opsname.local_hash,			\
70 	.local_hash.regex_lock	= __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
71 #define ASSIGN_OPS_HASH(opsname, val) \
72 	.func_hash		= val, \
73 	.local_hash.regex_lock	= __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
74 #else
75 #define INIT_OPS_HASH(opsname)
76 #define ASSIGN_OPS_HASH(opsname, val)
77 #endif
78 
79 static struct ftrace_ops ftrace_list_end __read_mostly = {
80 	.func		= ftrace_stub,
81 	.flags		= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
82 	INIT_OPS_HASH(ftrace_list_end)
83 };
84 
85 /* ftrace_enabled is a method to turn ftrace on or off */
86 int ftrace_enabled __read_mostly;
87 static int last_ftrace_enabled;
88 
89 /* Current function tracing op */
90 struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
91 /* What to set function_trace_op to */
92 static struct ftrace_ops *set_function_trace_op;
93 
94 static bool ftrace_pids_enabled(struct ftrace_ops *ops)
95 {
96 	struct trace_array *tr;
97 
98 	if (!(ops->flags & FTRACE_OPS_FL_PID) || !ops->private)
99 		return false;
100 
101 	tr = ops->private;
102 
103 	return tr->function_pids != NULL;
104 }
105 
106 static void ftrace_update_trampoline(struct ftrace_ops *ops);
107 
108 /*
109  * ftrace_disabled is set when an anomaly is discovered.
110  * ftrace_disabled is much stronger than ftrace_enabled.
111  */
112 static int ftrace_disabled __read_mostly;
113 
114 static DEFINE_MUTEX(ftrace_lock);
115 
116 static struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end;
117 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
118 static struct ftrace_ops global_ops;
119 
120 #if ARCH_SUPPORTS_FTRACE_OPS
121 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
122 				 struct ftrace_ops *op, struct pt_regs *regs);
123 #else
124 /* See comment below, where ftrace_ops_list_func is defined */
125 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
126 #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
127 #endif
128 
129 /*
130  * Traverse the ftrace_global_list, invoking all entries.  The reason that we
131  * can use rcu_dereference_raw_notrace() is that elements removed from this list
132  * are simply leaked, so there is no need to interact with a grace-period
133  * mechanism.  The rcu_dereference_raw_notrace() calls are needed to handle
134  * concurrent insertions into the ftrace_global_list.
135  *
136  * Silly Alpha and silly pointer-speculation compiler optimizations!
137  */
138 #define do_for_each_ftrace_op(op, list)			\
139 	op = rcu_dereference_raw_notrace(list);			\
140 	do
141 
142 /*
143  * Optimized for just a single item in the list (as that is the normal case).
144  */
145 #define while_for_each_ftrace_op(op)				\
146 	while (likely(op = rcu_dereference_raw_notrace((op)->next)) &&	\
147 	       unlikely((op) != &ftrace_list_end))
148 
149 static inline void ftrace_ops_init(struct ftrace_ops *ops)
150 {
151 #ifdef CONFIG_DYNAMIC_FTRACE
152 	if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
153 		mutex_init(&ops->local_hash.regex_lock);
154 		ops->func_hash = &ops->local_hash;
155 		ops->flags |= FTRACE_OPS_FL_INITIALIZED;
156 	}
157 #endif
158 }
159 
160 /**
161  * ftrace_nr_registered_ops - return number of ops registered
162  *
163  * Returns the number of ftrace_ops registered and tracing functions
164  */
165 int ftrace_nr_registered_ops(void)
166 {
167 	struct ftrace_ops *ops;
168 	int cnt = 0;
169 
170 	mutex_lock(&ftrace_lock);
171 
172 	for (ops = rcu_dereference_protected(ftrace_ops_list,
173 					     lockdep_is_held(&ftrace_lock));
174 	     ops != &ftrace_list_end;
175 	     ops = rcu_dereference_protected(ops->next,
176 					     lockdep_is_held(&ftrace_lock)))
177 		cnt++;
178 
179 	mutex_unlock(&ftrace_lock);
180 
181 	return cnt;
182 }
183 
184 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
185 			    struct ftrace_ops *op, struct pt_regs *regs)
186 {
187 	struct trace_array *tr = op->private;
188 
189 	if (tr && this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid))
190 		return;
191 
192 	op->saved_func(ip, parent_ip, op, regs);
193 }
194 
195 /**
196  * clear_ftrace_function - reset the ftrace function
197  *
198  * This NULLs the ftrace function and in essence stops
199  * tracing.  There may be lag
200  */
201 void clear_ftrace_function(void)
202 {
203 	ftrace_trace_function = ftrace_stub;
204 }
205 
206 static void per_cpu_ops_disable_all(struct ftrace_ops *ops)
207 {
208 	int cpu;
209 
210 	for_each_possible_cpu(cpu)
211 		*per_cpu_ptr(ops->disabled, cpu) = 1;
212 }
213 
214 static int per_cpu_ops_alloc(struct ftrace_ops *ops)
215 {
216 	int __percpu *disabled;
217 
218 	if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU)))
219 		return -EINVAL;
220 
221 	disabled = alloc_percpu(int);
222 	if (!disabled)
223 		return -ENOMEM;
224 
225 	ops->disabled = disabled;
226 	per_cpu_ops_disable_all(ops);
227 	return 0;
228 }
229 
230 static void ftrace_sync(struct work_struct *work)
231 {
232 	/*
233 	 * This function is just a stub to implement a hard force
234 	 * of synchronize_sched(). This requires synchronizing
235 	 * tasks even in userspace and idle.
236 	 *
237 	 * Yes, function tracing is rude.
238 	 */
239 }
240 
241 static void ftrace_sync_ipi(void *data)
242 {
243 	/* Probably not needed, but do it anyway */
244 	smp_rmb();
245 }
246 
247 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
248 static void update_function_graph_func(void);
249 
250 /* Both enabled by default (can be cleared by function_graph tracer flags */
251 static bool fgraph_sleep_time = true;
252 static bool fgraph_graph_time = true;
253 
254 #else
255 static inline void update_function_graph_func(void) { }
256 #endif
257 
258 
259 static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
260 {
261 	/*
262 	 * If this is a dynamic, RCU, or per CPU ops, or we force list func,
263 	 * then it needs to call the list anyway.
264 	 */
265 	if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU |
266 			  FTRACE_OPS_FL_RCU) || FTRACE_FORCE_LIST_FUNC)
267 		return ftrace_ops_list_func;
268 
269 	return ftrace_ops_get_func(ops);
270 }
271 
272 static void update_ftrace_function(void)
273 {
274 	ftrace_func_t func;
275 
276 	/*
277 	 * Prepare the ftrace_ops that the arch callback will use.
278 	 * If there's only one ftrace_ops registered, the ftrace_ops_list
279 	 * will point to the ops we want.
280 	 */
281 	set_function_trace_op = rcu_dereference_protected(ftrace_ops_list,
282 						lockdep_is_held(&ftrace_lock));
283 
284 	/* If there's no ftrace_ops registered, just call the stub function */
285 	if (set_function_trace_op == &ftrace_list_end) {
286 		func = ftrace_stub;
287 
288 	/*
289 	 * If we are at the end of the list and this ops is
290 	 * recursion safe and not dynamic and the arch supports passing ops,
291 	 * then have the mcount trampoline call the function directly.
292 	 */
293 	} else if (rcu_dereference_protected(ftrace_ops_list->next,
294 			lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
295 		func = ftrace_ops_get_list_func(ftrace_ops_list);
296 
297 	} else {
298 		/* Just use the default ftrace_ops */
299 		set_function_trace_op = &ftrace_list_end;
300 		func = ftrace_ops_list_func;
301 	}
302 
303 	update_function_graph_func();
304 
305 	/* If there's no change, then do nothing more here */
306 	if (ftrace_trace_function == func)
307 		return;
308 
309 	/*
310 	 * If we are using the list function, it doesn't care
311 	 * about the function_trace_ops.
312 	 */
313 	if (func == ftrace_ops_list_func) {
314 		ftrace_trace_function = func;
315 		/*
316 		 * Don't even bother setting function_trace_ops,
317 		 * it would be racy to do so anyway.
318 		 */
319 		return;
320 	}
321 
322 #ifndef CONFIG_DYNAMIC_FTRACE
323 	/*
324 	 * For static tracing, we need to be a bit more careful.
325 	 * The function change takes affect immediately. Thus,
326 	 * we need to coorditate the setting of the function_trace_ops
327 	 * with the setting of the ftrace_trace_function.
328 	 *
329 	 * Set the function to the list ops, which will call the
330 	 * function we want, albeit indirectly, but it handles the
331 	 * ftrace_ops and doesn't depend on function_trace_op.
332 	 */
333 	ftrace_trace_function = ftrace_ops_list_func;
334 	/*
335 	 * Make sure all CPUs see this. Yes this is slow, but static
336 	 * tracing is slow and nasty to have enabled.
337 	 */
338 	schedule_on_each_cpu(ftrace_sync);
339 	/* Now all cpus are using the list ops. */
340 	function_trace_op = set_function_trace_op;
341 	/* Make sure the function_trace_op is visible on all CPUs */
342 	smp_wmb();
343 	/* Nasty way to force a rmb on all cpus */
344 	smp_call_function(ftrace_sync_ipi, NULL, 1);
345 	/* OK, we are all set to update the ftrace_trace_function now! */
346 #endif /* !CONFIG_DYNAMIC_FTRACE */
347 
348 	ftrace_trace_function = func;
349 }
350 
351 int using_ftrace_ops_list_func(void)
352 {
353 	return ftrace_trace_function == ftrace_ops_list_func;
354 }
355 
356 static void add_ftrace_ops(struct ftrace_ops __rcu **list,
357 			   struct ftrace_ops *ops)
358 {
359 	rcu_assign_pointer(ops->next, *list);
360 
361 	/*
362 	 * We are entering ops into the list but another
363 	 * CPU might be walking that list. We need to make sure
364 	 * the ops->next pointer is valid before another CPU sees
365 	 * the ops pointer included into the list.
366 	 */
367 	rcu_assign_pointer(*list, ops);
368 }
369 
370 static int remove_ftrace_ops(struct ftrace_ops __rcu **list,
371 			     struct ftrace_ops *ops)
372 {
373 	struct ftrace_ops **p;
374 
375 	/*
376 	 * If we are removing the last function, then simply point
377 	 * to the ftrace_stub.
378 	 */
379 	if (rcu_dereference_protected(*list,
380 			lockdep_is_held(&ftrace_lock)) == ops &&
381 	    rcu_dereference_protected(ops->next,
382 			lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
383 		*list = &ftrace_list_end;
384 		return 0;
385 	}
386 
387 	for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
388 		if (*p == ops)
389 			break;
390 
391 	if (*p != ops)
392 		return -1;
393 
394 	*p = (*p)->next;
395 	return 0;
396 }
397 
398 static void ftrace_update_trampoline(struct ftrace_ops *ops);
399 
400 static int __register_ftrace_function(struct ftrace_ops *ops)
401 {
402 	if (ops->flags & FTRACE_OPS_FL_DELETED)
403 		return -EINVAL;
404 
405 	if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
406 		return -EBUSY;
407 
408 #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
409 	/*
410 	 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
411 	 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
412 	 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
413 	 */
414 	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
415 	    !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
416 		return -EINVAL;
417 
418 	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
419 		ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
420 #endif
421 
422 	if (!core_kernel_data((unsigned long)ops))
423 		ops->flags |= FTRACE_OPS_FL_DYNAMIC;
424 
425 	if (ops->flags & FTRACE_OPS_FL_PER_CPU) {
426 		if (per_cpu_ops_alloc(ops))
427 			return -ENOMEM;
428 	}
429 
430 	add_ftrace_ops(&ftrace_ops_list, ops);
431 
432 	/* Always save the function, and reset at unregistering */
433 	ops->saved_func = ops->func;
434 
435 	if (ftrace_pids_enabled(ops))
436 		ops->func = ftrace_pid_func;
437 
438 	ftrace_update_trampoline(ops);
439 
440 	if (ftrace_enabled)
441 		update_ftrace_function();
442 
443 	return 0;
444 }
445 
446 static int __unregister_ftrace_function(struct ftrace_ops *ops)
447 {
448 	int ret;
449 
450 	if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
451 		return -EBUSY;
452 
453 	ret = remove_ftrace_ops(&ftrace_ops_list, ops);
454 
455 	if (ret < 0)
456 		return ret;
457 
458 	if (ftrace_enabled)
459 		update_ftrace_function();
460 
461 	ops->func = ops->saved_func;
462 
463 	return 0;
464 }
465 
466 static void ftrace_update_pid_func(void)
467 {
468 	struct ftrace_ops *op;
469 
470 	/* Only do something if we are tracing something */
471 	if (ftrace_trace_function == ftrace_stub)
472 		return;
473 
474 	do_for_each_ftrace_op(op, ftrace_ops_list) {
475 		if (op->flags & FTRACE_OPS_FL_PID) {
476 			op->func = ftrace_pids_enabled(op) ?
477 				ftrace_pid_func : op->saved_func;
478 			ftrace_update_trampoline(op);
479 		}
480 	} while_for_each_ftrace_op(op);
481 
482 	update_ftrace_function();
483 }
484 
485 #ifdef CONFIG_FUNCTION_PROFILER
486 struct ftrace_profile {
487 	struct hlist_node		node;
488 	unsigned long			ip;
489 	unsigned long			counter;
490 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
491 	unsigned long long		time;
492 	unsigned long long		time_squared;
493 #endif
494 };
495 
496 struct ftrace_profile_page {
497 	struct ftrace_profile_page	*next;
498 	unsigned long			index;
499 	struct ftrace_profile		records[];
500 };
501 
502 struct ftrace_profile_stat {
503 	atomic_t			disabled;
504 	struct hlist_head		*hash;
505 	struct ftrace_profile_page	*pages;
506 	struct ftrace_profile_page	*start;
507 	struct tracer_stat		stat;
508 };
509 
510 #define PROFILE_RECORDS_SIZE						\
511 	(PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
512 
513 #define PROFILES_PER_PAGE					\
514 	(PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
515 
516 static int ftrace_profile_enabled __read_mostly;
517 
518 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
519 static DEFINE_MUTEX(ftrace_profile_lock);
520 
521 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
522 
523 #define FTRACE_PROFILE_HASH_BITS 10
524 #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
525 
526 static void *
527 function_stat_next(void *v, int idx)
528 {
529 	struct ftrace_profile *rec = v;
530 	struct ftrace_profile_page *pg;
531 
532 	pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
533 
534  again:
535 	if (idx != 0)
536 		rec++;
537 
538 	if ((void *)rec >= (void *)&pg->records[pg->index]) {
539 		pg = pg->next;
540 		if (!pg)
541 			return NULL;
542 		rec = &pg->records[0];
543 		if (!rec->counter)
544 			goto again;
545 	}
546 
547 	return rec;
548 }
549 
550 static void *function_stat_start(struct tracer_stat *trace)
551 {
552 	struct ftrace_profile_stat *stat =
553 		container_of(trace, struct ftrace_profile_stat, stat);
554 
555 	if (!stat || !stat->start)
556 		return NULL;
557 
558 	return function_stat_next(&stat->start->records[0], 0);
559 }
560 
561 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
562 /* function graph compares on total time */
563 static int function_stat_cmp(void *p1, void *p2)
564 {
565 	struct ftrace_profile *a = p1;
566 	struct ftrace_profile *b = p2;
567 
568 	if (a->time < b->time)
569 		return -1;
570 	if (a->time > b->time)
571 		return 1;
572 	else
573 		return 0;
574 }
575 #else
576 /* not function graph compares against hits */
577 static int function_stat_cmp(void *p1, void *p2)
578 {
579 	struct ftrace_profile *a = p1;
580 	struct ftrace_profile *b = p2;
581 
582 	if (a->counter < b->counter)
583 		return -1;
584 	if (a->counter > b->counter)
585 		return 1;
586 	else
587 		return 0;
588 }
589 #endif
590 
591 static int function_stat_headers(struct seq_file *m)
592 {
593 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
594 	seq_puts(m, "  Function                               "
595 		 "Hit    Time            Avg             s^2\n"
596 		    "  --------                               "
597 		 "---    ----            ---             ---\n");
598 #else
599 	seq_puts(m, "  Function                               Hit\n"
600 		    "  --------                               ---\n");
601 #endif
602 	return 0;
603 }
604 
605 static int function_stat_show(struct seq_file *m, void *v)
606 {
607 	struct ftrace_profile *rec = v;
608 	char str[KSYM_SYMBOL_LEN];
609 	int ret = 0;
610 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
611 	static struct trace_seq s;
612 	unsigned long long avg;
613 	unsigned long long stddev;
614 #endif
615 	mutex_lock(&ftrace_profile_lock);
616 
617 	/* we raced with function_profile_reset() */
618 	if (unlikely(rec->counter == 0)) {
619 		ret = -EBUSY;
620 		goto out;
621 	}
622 
623 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
624 	avg = rec->time;
625 	do_div(avg, rec->counter);
626 	if (tracing_thresh && (avg < tracing_thresh))
627 		goto out;
628 #endif
629 
630 	kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
631 	seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
632 
633 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
634 	seq_puts(m, "    ");
635 
636 	/* Sample standard deviation (s^2) */
637 	if (rec->counter <= 1)
638 		stddev = 0;
639 	else {
640 		/*
641 		 * Apply Welford's method:
642 		 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
643 		 */
644 		stddev = rec->counter * rec->time_squared -
645 			 rec->time * rec->time;
646 
647 		/*
648 		 * Divide only 1000 for ns^2 -> us^2 conversion.
649 		 * trace_print_graph_duration will divide 1000 again.
650 		 */
651 		do_div(stddev, rec->counter * (rec->counter - 1) * 1000);
652 	}
653 
654 	trace_seq_init(&s);
655 	trace_print_graph_duration(rec->time, &s);
656 	trace_seq_puts(&s, "    ");
657 	trace_print_graph_duration(avg, &s);
658 	trace_seq_puts(&s, "    ");
659 	trace_print_graph_duration(stddev, &s);
660 	trace_print_seq(m, &s);
661 #endif
662 	seq_putc(m, '\n');
663 out:
664 	mutex_unlock(&ftrace_profile_lock);
665 
666 	return ret;
667 }
668 
669 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
670 {
671 	struct ftrace_profile_page *pg;
672 
673 	pg = stat->pages = stat->start;
674 
675 	while (pg) {
676 		memset(pg->records, 0, PROFILE_RECORDS_SIZE);
677 		pg->index = 0;
678 		pg = pg->next;
679 	}
680 
681 	memset(stat->hash, 0,
682 	       FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
683 }
684 
685 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
686 {
687 	struct ftrace_profile_page *pg;
688 	int functions;
689 	int pages;
690 	int i;
691 
692 	/* If we already allocated, do nothing */
693 	if (stat->pages)
694 		return 0;
695 
696 	stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
697 	if (!stat->pages)
698 		return -ENOMEM;
699 
700 #ifdef CONFIG_DYNAMIC_FTRACE
701 	functions = ftrace_update_tot_cnt;
702 #else
703 	/*
704 	 * We do not know the number of functions that exist because
705 	 * dynamic tracing is what counts them. With past experience
706 	 * we have around 20K functions. That should be more than enough.
707 	 * It is highly unlikely we will execute every function in
708 	 * the kernel.
709 	 */
710 	functions = 20000;
711 #endif
712 
713 	pg = stat->start = stat->pages;
714 
715 	pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
716 
717 	for (i = 1; i < pages; i++) {
718 		pg->next = (void *)get_zeroed_page(GFP_KERNEL);
719 		if (!pg->next)
720 			goto out_free;
721 		pg = pg->next;
722 	}
723 
724 	return 0;
725 
726  out_free:
727 	pg = stat->start;
728 	while (pg) {
729 		unsigned long tmp = (unsigned long)pg;
730 
731 		pg = pg->next;
732 		free_page(tmp);
733 	}
734 
735 	stat->pages = NULL;
736 	stat->start = NULL;
737 
738 	return -ENOMEM;
739 }
740 
741 static int ftrace_profile_init_cpu(int cpu)
742 {
743 	struct ftrace_profile_stat *stat;
744 	int size;
745 
746 	stat = &per_cpu(ftrace_profile_stats, cpu);
747 
748 	if (stat->hash) {
749 		/* If the profile is already created, simply reset it */
750 		ftrace_profile_reset(stat);
751 		return 0;
752 	}
753 
754 	/*
755 	 * We are profiling all functions, but usually only a few thousand
756 	 * functions are hit. We'll make a hash of 1024 items.
757 	 */
758 	size = FTRACE_PROFILE_HASH_SIZE;
759 
760 	stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
761 
762 	if (!stat->hash)
763 		return -ENOMEM;
764 
765 	/* Preallocate the function profiling pages */
766 	if (ftrace_profile_pages_init(stat) < 0) {
767 		kfree(stat->hash);
768 		stat->hash = NULL;
769 		return -ENOMEM;
770 	}
771 
772 	return 0;
773 }
774 
775 static int ftrace_profile_init(void)
776 {
777 	int cpu;
778 	int ret = 0;
779 
780 	for_each_possible_cpu(cpu) {
781 		ret = ftrace_profile_init_cpu(cpu);
782 		if (ret)
783 			break;
784 	}
785 
786 	return ret;
787 }
788 
789 /* interrupts must be disabled */
790 static struct ftrace_profile *
791 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
792 {
793 	struct ftrace_profile *rec;
794 	struct hlist_head *hhd;
795 	unsigned long key;
796 
797 	key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
798 	hhd = &stat->hash[key];
799 
800 	if (hlist_empty(hhd))
801 		return NULL;
802 
803 	hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
804 		if (rec->ip == ip)
805 			return rec;
806 	}
807 
808 	return NULL;
809 }
810 
811 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
812 			       struct ftrace_profile *rec)
813 {
814 	unsigned long key;
815 
816 	key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
817 	hlist_add_head_rcu(&rec->node, &stat->hash[key]);
818 }
819 
820 /*
821  * The memory is already allocated, this simply finds a new record to use.
822  */
823 static struct ftrace_profile *
824 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
825 {
826 	struct ftrace_profile *rec = NULL;
827 
828 	/* prevent recursion (from NMIs) */
829 	if (atomic_inc_return(&stat->disabled) != 1)
830 		goto out;
831 
832 	/*
833 	 * Try to find the function again since an NMI
834 	 * could have added it
835 	 */
836 	rec = ftrace_find_profiled_func(stat, ip);
837 	if (rec)
838 		goto out;
839 
840 	if (stat->pages->index == PROFILES_PER_PAGE) {
841 		if (!stat->pages->next)
842 			goto out;
843 		stat->pages = stat->pages->next;
844 	}
845 
846 	rec = &stat->pages->records[stat->pages->index++];
847 	rec->ip = ip;
848 	ftrace_add_profile(stat, rec);
849 
850  out:
851 	atomic_dec(&stat->disabled);
852 
853 	return rec;
854 }
855 
856 static void
857 function_profile_call(unsigned long ip, unsigned long parent_ip,
858 		      struct ftrace_ops *ops, struct pt_regs *regs)
859 {
860 	struct ftrace_profile_stat *stat;
861 	struct ftrace_profile *rec;
862 	unsigned long flags;
863 
864 	if (!ftrace_profile_enabled)
865 		return;
866 
867 	local_irq_save(flags);
868 
869 	stat = this_cpu_ptr(&ftrace_profile_stats);
870 	if (!stat->hash || !ftrace_profile_enabled)
871 		goto out;
872 
873 	rec = ftrace_find_profiled_func(stat, ip);
874 	if (!rec) {
875 		rec = ftrace_profile_alloc(stat, ip);
876 		if (!rec)
877 			goto out;
878 	}
879 
880 	rec->counter++;
881  out:
882 	local_irq_restore(flags);
883 }
884 
885 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
886 static int profile_graph_entry(struct ftrace_graph_ent *trace)
887 {
888 	int index = trace->depth;
889 
890 	function_profile_call(trace->func, 0, NULL, NULL);
891 
892 	/* If function graph is shutting down, ret_stack can be NULL */
893 	if (!current->ret_stack)
894 		return 0;
895 
896 	if (index >= 0 && index < FTRACE_RETFUNC_DEPTH)
897 		current->ret_stack[index].subtime = 0;
898 
899 	return 1;
900 }
901 
902 static void profile_graph_return(struct ftrace_graph_ret *trace)
903 {
904 	struct ftrace_profile_stat *stat;
905 	unsigned long long calltime;
906 	struct ftrace_profile *rec;
907 	unsigned long flags;
908 
909 	local_irq_save(flags);
910 	stat = this_cpu_ptr(&ftrace_profile_stats);
911 	if (!stat->hash || !ftrace_profile_enabled)
912 		goto out;
913 
914 	/* If the calltime was zero'd ignore it */
915 	if (!trace->calltime)
916 		goto out;
917 
918 	calltime = trace->rettime - trace->calltime;
919 
920 	if (!fgraph_graph_time) {
921 		int index;
922 
923 		index = trace->depth;
924 
925 		/* Append this call time to the parent time to subtract */
926 		if (index)
927 			current->ret_stack[index - 1].subtime += calltime;
928 
929 		if (current->ret_stack[index].subtime < calltime)
930 			calltime -= current->ret_stack[index].subtime;
931 		else
932 			calltime = 0;
933 	}
934 
935 	rec = ftrace_find_profiled_func(stat, trace->func);
936 	if (rec) {
937 		rec->time += calltime;
938 		rec->time_squared += calltime * calltime;
939 	}
940 
941  out:
942 	local_irq_restore(flags);
943 }
944 
945 static int register_ftrace_profiler(void)
946 {
947 	return register_ftrace_graph(&profile_graph_return,
948 				     &profile_graph_entry);
949 }
950 
951 static void unregister_ftrace_profiler(void)
952 {
953 	unregister_ftrace_graph();
954 }
955 #else
956 static struct ftrace_ops ftrace_profile_ops __read_mostly = {
957 	.func		= function_profile_call,
958 	.flags		= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
959 	INIT_OPS_HASH(ftrace_profile_ops)
960 };
961 
962 static int register_ftrace_profiler(void)
963 {
964 	return register_ftrace_function(&ftrace_profile_ops);
965 }
966 
967 static void unregister_ftrace_profiler(void)
968 {
969 	unregister_ftrace_function(&ftrace_profile_ops);
970 }
971 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
972 
973 static ssize_t
974 ftrace_profile_write(struct file *filp, const char __user *ubuf,
975 		     size_t cnt, loff_t *ppos)
976 {
977 	unsigned long val;
978 	int ret;
979 
980 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
981 	if (ret)
982 		return ret;
983 
984 	val = !!val;
985 
986 	mutex_lock(&ftrace_profile_lock);
987 	if (ftrace_profile_enabled ^ val) {
988 		if (val) {
989 			ret = ftrace_profile_init();
990 			if (ret < 0) {
991 				cnt = ret;
992 				goto out;
993 			}
994 
995 			ret = register_ftrace_profiler();
996 			if (ret < 0) {
997 				cnt = ret;
998 				goto out;
999 			}
1000 			ftrace_profile_enabled = 1;
1001 		} else {
1002 			ftrace_profile_enabled = 0;
1003 			/*
1004 			 * unregister_ftrace_profiler calls stop_machine
1005 			 * so this acts like an synchronize_sched.
1006 			 */
1007 			unregister_ftrace_profiler();
1008 		}
1009 	}
1010  out:
1011 	mutex_unlock(&ftrace_profile_lock);
1012 
1013 	*ppos += cnt;
1014 
1015 	return cnt;
1016 }
1017 
1018 static ssize_t
1019 ftrace_profile_read(struct file *filp, char __user *ubuf,
1020 		     size_t cnt, loff_t *ppos)
1021 {
1022 	char buf[64];		/* big enough to hold a number */
1023 	int r;
1024 
1025 	r = sprintf(buf, "%u\n", ftrace_profile_enabled);
1026 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1027 }
1028 
1029 static const struct file_operations ftrace_profile_fops = {
1030 	.open		= tracing_open_generic,
1031 	.read		= ftrace_profile_read,
1032 	.write		= ftrace_profile_write,
1033 	.llseek		= default_llseek,
1034 };
1035 
1036 /* used to initialize the real stat files */
1037 static struct tracer_stat function_stats __initdata = {
1038 	.name		= "functions",
1039 	.stat_start	= function_stat_start,
1040 	.stat_next	= function_stat_next,
1041 	.stat_cmp	= function_stat_cmp,
1042 	.stat_headers	= function_stat_headers,
1043 	.stat_show	= function_stat_show
1044 };
1045 
1046 static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
1047 {
1048 	struct ftrace_profile_stat *stat;
1049 	struct dentry *entry;
1050 	char *name;
1051 	int ret;
1052 	int cpu;
1053 
1054 	for_each_possible_cpu(cpu) {
1055 		stat = &per_cpu(ftrace_profile_stats, cpu);
1056 
1057 		name = kasprintf(GFP_KERNEL, "function%d", cpu);
1058 		if (!name) {
1059 			/*
1060 			 * The files created are permanent, if something happens
1061 			 * we still do not free memory.
1062 			 */
1063 			WARN(1,
1064 			     "Could not allocate stat file for cpu %d\n",
1065 			     cpu);
1066 			return;
1067 		}
1068 		stat->stat = function_stats;
1069 		stat->stat.name = name;
1070 		ret = register_stat_tracer(&stat->stat);
1071 		if (ret) {
1072 			WARN(1,
1073 			     "Could not register function stat for cpu %d\n",
1074 			     cpu);
1075 			kfree(name);
1076 			return;
1077 		}
1078 	}
1079 
1080 	entry = tracefs_create_file("function_profile_enabled", 0644,
1081 				    d_tracer, NULL, &ftrace_profile_fops);
1082 	if (!entry)
1083 		pr_warn("Could not create tracefs 'function_profile_enabled' entry\n");
1084 }
1085 
1086 #else /* CONFIG_FUNCTION_PROFILER */
1087 static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
1088 {
1089 }
1090 #endif /* CONFIG_FUNCTION_PROFILER */
1091 
1092 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
1093 
1094 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1095 static int ftrace_graph_active;
1096 #else
1097 # define ftrace_graph_active 0
1098 #endif
1099 
1100 #ifdef CONFIG_DYNAMIC_FTRACE
1101 
1102 static struct ftrace_ops *removed_ops;
1103 
1104 /*
1105  * Set when doing a global update, like enabling all recs or disabling them.
1106  * It is not set when just updating a single ftrace_ops.
1107  */
1108 static bool update_all_ops;
1109 
1110 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
1111 # error Dynamic ftrace depends on MCOUNT_RECORD
1112 #endif
1113 
1114 struct ftrace_func_entry {
1115 	struct hlist_node hlist;
1116 	unsigned long ip;
1117 };
1118 
1119 struct ftrace_func_probe {
1120 	struct ftrace_probe_ops	*probe_ops;
1121 	struct ftrace_ops	ops;
1122 	struct trace_array	*tr;
1123 	struct list_head	list;
1124 	void			*data;
1125 	int			ref;
1126 };
1127 
1128 /*
1129  * We make these constant because no one should touch them,
1130  * but they are used as the default "empty hash", to avoid allocating
1131  * it all the time. These are in a read only section such that if
1132  * anyone does try to modify it, it will cause an exception.
1133  */
1134 static const struct hlist_head empty_buckets[1];
1135 static const struct ftrace_hash empty_hash = {
1136 	.buckets = (struct hlist_head *)empty_buckets,
1137 };
1138 #define EMPTY_HASH	((struct ftrace_hash *)&empty_hash)
1139 
1140 static struct ftrace_ops global_ops = {
1141 	.func				= ftrace_stub,
1142 	.local_hash.notrace_hash	= EMPTY_HASH,
1143 	.local_hash.filter_hash		= EMPTY_HASH,
1144 	INIT_OPS_HASH(global_ops)
1145 	.flags				= FTRACE_OPS_FL_RECURSION_SAFE |
1146 					  FTRACE_OPS_FL_INITIALIZED |
1147 					  FTRACE_OPS_FL_PID,
1148 };
1149 
1150 /*
1151  * This is used by __kernel_text_address() to return true if the
1152  * address is on a dynamically allocated trampoline that would
1153  * not return true for either core_kernel_text() or
1154  * is_module_text_address().
1155  */
1156 bool is_ftrace_trampoline(unsigned long addr)
1157 {
1158 	struct ftrace_ops *op;
1159 	bool ret = false;
1160 
1161 	/*
1162 	 * Some of the ops may be dynamically allocated,
1163 	 * they are freed after a synchronize_sched().
1164 	 */
1165 	preempt_disable_notrace();
1166 
1167 	do_for_each_ftrace_op(op, ftrace_ops_list) {
1168 		/*
1169 		 * This is to check for dynamically allocated trampolines.
1170 		 * Trampolines that are in kernel text will have
1171 		 * core_kernel_text() return true.
1172 		 */
1173 		if (op->trampoline && op->trampoline_size)
1174 			if (addr >= op->trampoline &&
1175 			    addr < op->trampoline + op->trampoline_size) {
1176 				ret = true;
1177 				goto out;
1178 			}
1179 	} while_for_each_ftrace_op(op);
1180 
1181  out:
1182 	preempt_enable_notrace();
1183 
1184 	return ret;
1185 }
1186 
1187 struct ftrace_page {
1188 	struct ftrace_page	*next;
1189 	struct dyn_ftrace	*records;
1190 	int			index;
1191 	int			size;
1192 };
1193 
1194 #define ENTRY_SIZE sizeof(struct dyn_ftrace)
1195 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1196 
1197 /* estimate from running different kernels */
1198 #define NR_TO_INIT		10000
1199 
1200 static struct ftrace_page	*ftrace_pages_start;
1201 static struct ftrace_page	*ftrace_pages;
1202 
1203 static __always_inline unsigned long
1204 ftrace_hash_key(struct ftrace_hash *hash, unsigned long ip)
1205 {
1206 	if (hash->size_bits > 0)
1207 		return hash_long(ip, hash->size_bits);
1208 
1209 	return 0;
1210 }
1211 
1212 /* Only use this function if ftrace_hash_empty() has already been tested */
1213 static __always_inline struct ftrace_func_entry *
1214 __ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1215 {
1216 	unsigned long key;
1217 	struct ftrace_func_entry *entry;
1218 	struct hlist_head *hhd;
1219 
1220 	key = ftrace_hash_key(hash, ip);
1221 	hhd = &hash->buckets[key];
1222 
1223 	hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
1224 		if (entry->ip == ip)
1225 			return entry;
1226 	}
1227 	return NULL;
1228 }
1229 
1230 /**
1231  * ftrace_lookup_ip - Test to see if an ip exists in an ftrace_hash
1232  * @hash: The hash to look at
1233  * @ip: The instruction pointer to test
1234  *
1235  * Search a given @hash to see if a given instruction pointer (@ip)
1236  * exists in it.
1237  *
1238  * Returns the entry that holds the @ip if found. NULL otherwise.
1239  */
1240 struct ftrace_func_entry *
1241 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1242 {
1243 	if (ftrace_hash_empty(hash))
1244 		return NULL;
1245 
1246 	return __ftrace_lookup_ip(hash, ip);
1247 }
1248 
1249 static void __add_hash_entry(struct ftrace_hash *hash,
1250 			     struct ftrace_func_entry *entry)
1251 {
1252 	struct hlist_head *hhd;
1253 	unsigned long key;
1254 
1255 	key = ftrace_hash_key(hash, entry->ip);
1256 	hhd = &hash->buckets[key];
1257 	hlist_add_head(&entry->hlist, hhd);
1258 	hash->count++;
1259 }
1260 
1261 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1262 {
1263 	struct ftrace_func_entry *entry;
1264 
1265 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1266 	if (!entry)
1267 		return -ENOMEM;
1268 
1269 	entry->ip = ip;
1270 	__add_hash_entry(hash, entry);
1271 
1272 	return 0;
1273 }
1274 
1275 static void
1276 free_hash_entry(struct ftrace_hash *hash,
1277 		  struct ftrace_func_entry *entry)
1278 {
1279 	hlist_del(&entry->hlist);
1280 	kfree(entry);
1281 	hash->count--;
1282 }
1283 
1284 static void
1285 remove_hash_entry(struct ftrace_hash *hash,
1286 		  struct ftrace_func_entry *entry)
1287 {
1288 	hlist_del_rcu(&entry->hlist);
1289 	hash->count--;
1290 }
1291 
1292 static void ftrace_hash_clear(struct ftrace_hash *hash)
1293 {
1294 	struct hlist_head *hhd;
1295 	struct hlist_node *tn;
1296 	struct ftrace_func_entry *entry;
1297 	int size = 1 << hash->size_bits;
1298 	int i;
1299 
1300 	if (!hash->count)
1301 		return;
1302 
1303 	for (i = 0; i < size; i++) {
1304 		hhd = &hash->buckets[i];
1305 		hlist_for_each_entry_safe(entry, tn, hhd, hlist)
1306 			free_hash_entry(hash, entry);
1307 	}
1308 	FTRACE_WARN_ON(hash->count);
1309 }
1310 
1311 static void free_ftrace_mod(struct ftrace_mod_load *ftrace_mod)
1312 {
1313 	list_del(&ftrace_mod->list);
1314 	kfree(ftrace_mod->module);
1315 	kfree(ftrace_mod->func);
1316 	kfree(ftrace_mod);
1317 }
1318 
1319 static void clear_ftrace_mod_list(struct list_head *head)
1320 {
1321 	struct ftrace_mod_load *p, *n;
1322 
1323 	/* stack tracer isn't supported yet */
1324 	if (!head)
1325 		return;
1326 
1327 	mutex_lock(&ftrace_lock);
1328 	list_for_each_entry_safe(p, n, head, list)
1329 		free_ftrace_mod(p);
1330 	mutex_unlock(&ftrace_lock);
1331 }
1332 
1333 static void free_ftrace_hash(struct ftrace_hash *hash)
1334 {
1335 	if (!hash || hash == EMPTY_HASH)
1336 		return;
1337 	ftrace_hash_clear(hash);
1338 	kfree(hash->buckets);
1339 	kfree(hash);
1340 }
1341 
1342 static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1343 {
1344 	struct ftrace_hash *hash;
1345 
1346 	hash = container_of(rcu, struct ftrace_hash, rcu);
1347 	free_ftrace_hash(hash);
1348 }
1349 
1350 static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1351 {
1352 	if (!hash || hash == EMPTY_HASH)
1353 		return;
1354 	call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1355 }
1356 
1357 void ftrace_free_filter(struct ftrace_ops *ops)
1358 {
1359 	ftrace_ops_init(ops);
1360 	free_ftrace_hash(ops->func_hash->filter_hash);
1361 	free_ftrace_hash(ops->func_hash->notrace_hash);
1362 }
1363 
1364 static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1365 {
1366 	struct ftrace_hash *hash;
1367 	int size;
1368 
1369 	hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1370 	if (!hash)
1371 		return NULL;
1372 
1373 	size = 1 << size_bits;
1374 	hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1375 
1376 	if (!hash->buckets) {
1377 		kfree(hash);
1378 		return NULL;
1379 	}
1380 
1381 	hash->size_bits = size_bits;
1382 
1383 	return hash;
1384 }
1385 
1386 
1387 static int ftrace_add_mod(struct trace_array *tr,
1388 			  const char *func, const char *module,
1389 			  int enable)
1390 {
1391 	struct ftrace_mod_load *ftrace_mod;
1392 	struct list_head *mod_head = enable ? &tr->mod_trace : &tr->mod_notrace;
1393 
1394 	ftrace_mod = kzalloc(sizeof(*ftrace_mod), GFP_KERNEL);
1395 	if (!ftrace_mod)
1396 		return -ENOMEM;
1397 
1398 	ftrace_mod->func = kstrdup(func, GFP_KERNEL);
1399 	ftrace_mod->module = kstrdup(module, GFP_KERNEL);
1400 	ftrace_mod->enable = enable;
1401 
1402 	if (!ftrace_mod->func || !ftrace_mod->module)
1403 		goto out_free;
1404 
1405 	list_add(&ftrace_mod->list, mod_head);
1406 
1407 	return 0;
1408 
1409  out_free:
1410 	free_ftrace_mod(ftrace_mod);
1411 
1412 	return -ENOMEM;
1413 }
1414 
1415 static struct ftrace_hash *
1416 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1417 {
1418 	struct ftrace_func_entry *entry;
1419 	struct ftrace_hash *new_hash;
1420 	int size;
1421 	int ret;
1422 	int i;
1423 
1424 	new_hash = alloc_ftrace_hash(size_bits);
1425 	if (!new_hash)
1426 		return NULL;
1427 
1428 	if (hash)
1429 		new_hash->flags = hash->flags;
1430 
1431 	/* Empty hash? */
1432 	if (ftrace_hash_empty(hash))
1433 		return new_hash;
1434 
1435 	size = 1 << hash->size_bits;
1436 	for (i = 0; i < size; i++) {
1437 		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
1438 			ret = add_hash_entry(new_hash, entry->ip);
1439 			if (ret < 0)
1440 				goto free_hash;
1441 		}
1442 	}
1443 
1444 	FTRACE_WARN_ON(new_hash->count != hash->count);
1445 
1446 	return new_hash;
1447 
1448  free_hash:
1449 	free_ftrace_hash(new_hash);
1450 	return NULL;
1451 }
1452 
1453 static void
1454 ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash);
1455 static void
1456 ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash);
1457 
1458 static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1459 				       struct ftrace_hash *new_hash);
1460 
1461 static struct ftrace_hash *
1462 __ftrace_hash_move(struct ftrace_hash *src)
1463 {
1464 	struct ftrace_func_entry *entry;
1465 	struct hlist_node *tn;
1466 	struct hlist_head *hhd;
1467 	struct ftrace_hash *new_hash;
1468 	int size = src->count;
1469 	int bits = 0;
1470 	int i;
1471 
1472 	/*
1473 	 * If the new source is empty, just return the empty_hash.
1474 	 */
1475 	if (ftrace_hash_empty(src))
1476 		return EMPTY_HASH;
1477 
1478 	/*
1479 	 * Make the hash size about 1/2 the # found
1480 	 */
1481 	for (size /= 2; size; size >>= 1)
1482 		bits++;
1483 
1484 	/* Don't allocate too much */
1485 	if (bits > FTRACE_HASH_MAX_BITS)
1486 		bits = FTRACE_HASH_MAX_BITS;
1487 
1488 	new_hash = alloc_ftrace_hash(bits);
1489 	if (!new_hash)
1490 		return NULL;
1491 
1492 	new_hash->flags = src->flags;
1493 
1494 	size = 1 << src->size_bits;
1495 	for (i = 0; i < size; i++) {
1496 		hhd = &src->buckets[i];
1497 		hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
1498 			remove_hash_entry(src, entry);
1499 			__add_hash_entry(new_hash, entry);
1500 		}
1501 	}
1502 
1503 	return new_hash;
1504 }
1505 
1506 static int
1507 ftrace_hash_move(struct ftrace_ops *ops, int enable,
1508 		 struct ftrace_hash **dst, struct ftrace_hash *src)
1509 {
1510 	struct ftrace_hash *new_hash;
1511 	int ret;
1512 
1513 	/* Reject setting notrace hash on IPMODIFY ftrace_ops */
1514 	if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable)
1515 		return -EINVAL;
1516 
1517 	new_hash = __ftrace_hash_move(src);
1518 	if (!new_hash)
1519 		return -ENOMEM;
1520 
1521 	/* Make sure this can be applied if it is IPMODIFY ftrace_ops */
1522 	if (enable) {
1523 		/* IPMODIFY should be updated only when filter_hash updating */
1524 		ret = ftrace_hash_ipmodify_update(ops, new_hash);
1525 		if (ret < 0) {
1526 			free_ftrace_hash(new_hash);
1527 			return ret;
1528 		}
1529 	}
1530 
1531 	/*
1532 	 * Remove the current set, update the hash and add
1533 	 * them back.
1534 	 */
1535 	ftrace_hash_rec_disable_modify(ops, enable);
1536 
1537 	rcu_assign_pointer(*dst, new_hash);
1538 
1539 	ftrace_hash_rec_enable_modify(ops, enable);
1540 
1541 	return 0;
1542 }
1543 
1544 static bool hash_contains_ip(unsigned long ip,
1545 			     struct ftrace_ops_hash *hash)
1546 {
1547 	/*
1548 	 * The function record is a match if it exists in the filter
1549 	 * hash and not in the notrace hash. Note, an emty hash is
1550 	 * considered a match for the filter hash, but an empty
1551 	 * notrace hash is considered not in the notrace hash.
1552 	 */
1553 	return (ftrace_hash_empty(hash->filter_hash) ||
1554 		__ftrace_lookup_ip(hash->filter_hash, ip)) &&
1555 		(ftrace_hash_empty(hash->notrace_hash) ||
1556 		 !__ftrace_lookup_ip(hash->notrace_hash, ip));
1557 }
1558 
1559 /*
1560  * Test the hashes for this ops to see if we want to call
1561  * the ops->func or not.
1562  *
1563  * It's a match if the ip is in the ops->filter_hash or
1564  * the filter_hash does not exist or is empty,
1565  *  AND
1566  * the ip is not in the ops->notrace_hash.
1567  *
1568  * This needs to be called with preemption disabled as
1569  * the hashes are freed with call_rcu_sched().
1570  */
1571 static int
1572 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1573 {
1574 	struct ftrace_ops_hash hash;
1575 	int ret;
1576 
1577 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1578 	/*
1579 	 * There's a small race when adding ops that the ftrace handler
1580 	 * that wants regs, may be called without them. We can not
1581 	 * allow that handler to be called if regs is NULL.
1582 	 */
1583 	if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
1584 		return 0;
1585 #endif
1586 
1587 	rcu_assign_pointer(hash.filter_hash, ops->func_hash->filter_hash);
1588 	rcu_assign_pointer(hash.notrace_hash, ops->func_hash->notrace_hash);
1589 
1590 	if (hash_contains_ip(ip, &hash))
1591 		ret = 1;
1592 	else
1593 		ret = 0;
1594 
1595 	return ret;
1596 }
1597 
1598 /*
1599  * This is a double for. Do not use 'break' to break out of the loop,
1600  * you must use a goto.
1601  */
1602 #define do_for_each_ftrace_rec(pg, rec)					\
1603 	for (pg = ftrace_pages_start; pg; pg = pg->next) {		\
1604 		int _____i;						\
1605 		for (_____i = 0; _____i < pg->index; _____i++) {	\
1606 			rec = &pg->records[_____i];
1607 
1608 #define while_for_each_ftrace_rec()		\
1609 		}				\
1610 	}
1611 
1612 
1613 static int ftrace_cmp_recs(const void *a, const void *b)
1614 {
1615 	const struct dyn_ftrace *key = a;
1616 	const struct dyn_ftrace *rec = b;
1617 
1618 	if (key->flags < rec->ip)
1619 		return -1;
1620 	if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1621 		return 1;
1622 	return 0;
1623 }
1624 
1625 /**
1626  * ftrace_location_range - return the first address of a traced location
1627  *	if it touches the given ip range
1628  * @start: start of range to search.
1629  * @end: end of range to search (inclusive). @end points to the last byte
1630  *	to check.
1631  *
1632  * Returns rec->ip if the related ftrace location is a least partly within
1633  * the given address range. That is, the first address of the instruction
1634  * that is either a NOP or call to the function tracer. It checks the ftrace
1635  * internal tables to determine if the address belongs or not.
1636  */
1637 unsigned long ftrace_location_range(unsigned long start, unsigned long end)
1638 {
1639 	struct ftrace_page *pg;
1640 	struct dyn_ftrace *rec;
1641 	struct dyn_ftrace key;
1642 
1643 	key.ip = start;
1644 	key.flags = end;	/* overload flags, as it is unsigned long */
1645 
1646 	for (pg = ftrace_pages_start; pg; pg = pg->next) {
1647 		if (end < pg->records[0].ip ||
1648 		    start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1649 			continue;
1650 		rec = bsearch(&key, pg->records, pg->index,
1651 			      sizeof(struct dyn_ftrace),
1652 			      ftrace_cmp_recs);
1653 		if (rec)
1654 			return rec->ip;
1655 	}
1656 
1657 	return 0;
1658 }
1659 
1660 /**
1661  * ftrace_location - return true if the ip giving is a traced location
1662  * @ip: the instruction pointer to check
1663  *
1664  * Returns rec->ip if @ip given is a pointer to a ftrace location.
1665  * That is, the instruction that is either a NOP or call to
1666  * the function tracer. It checks the ftrace internal tables to
1667  * determine if the address belongs or not.
1668  */
1669 unsigned long ftrace_location(unsigned long ip)
1670 {
1671 	return ftrace_location_range(ip, ip);
1672 }
1673 
1674 /**
1675  * ftrace_text_reserved - return true if range contains an ftrace location
1676  * @start: start of range to search
1677  * @end: end of range to search (inclusive). @end points to the last byte to check.
1678  *
1679  * Returns 1 if @start and @end contains a ftrace location.
1680  * That is, the instruction that is either a NOP or call to
1681  * the function tracer. It checks the ftrace internal tables to
1682  * determine if the address belongs or not.
1683  */
1684 int ftrace_text_reserved(const void *start, const void *end)
1685 {
1686 	unsigned long ret;
1687 
1688 	ret = ftrace_location_range((unsigned long)start,
1689 				    (unsigned long)end);
1690 
1691 	return (int)!!ret;
1692 }
1693 
1694 /* Test if ops registered to this rec needs regs */
1695 static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
1696 {
1697 	struct ftrace_ops *ops;
1698 	bool keep_regs = false;
1699 
1700 	for (ops = ftrace_ops_list;
1701 	     ops != &ftrace_list_end; ops = ops->next) {
1702 		/* pass rec in as regs to have non-NULL val */
1703 		if (ftrace_ops_test(ops, rec->ip, rec)) {
1704 			if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1705 				keep_regs = true;
1706 				break;
1707 			}
1708 		}
1709 	}
1710 
1711 	return  keep_regs;
1712 }
1713 
1714 static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
1715 				     int filter_hash,
1716 				     bool inc)
1717 {
1718 	struct ftrace_hash *hash;
1719 	struct ftrace_hash *other_hash;
1720 	struct ftrace_page *pg;
1721 	struct dyn_ftrace *rec;
1722 	bool update = false;
1723 	int count = 0;
1724 	int all = false;
1725 
1726 	/* Only update if the ops has been registered */
1727 	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1728 		return false;
1729 
1730 	/*
1731 	 * In the filter_hash case:
1732 	 *   If the count is zero, we update all records.
1733 	 *   Otherwise we just update the items in the hash.
1734 	 *
1735 	 * In the notrace_hash case:
1736 	 *   We enable the update in the hash.
1737 	 *   As disabling notrace means enabling the tracing,
1738 	 *   and enabling notrace means disabling, the inc variable
1739 	 *   gets inversed.
1740 	 */
1741 	if (filter_hash) {
1742 		hash = ops->func_hash->filter_hash;
1743 		other_hash = ops->func_hash->notrace_hash;
1744 		if (ftrace_hash_empty(hash))
1745 			all = true;
1746 	} else {
1747 		inc = !inc;
1748 		hash = ops->func_hash->notrace_hash;
1749 		other_hash = ops->func_hash->filter_hash;
1750 		/*
1751 		 * If the notrace hash has no items,
1752 		 * then there's nothing to do.
1753 		 */
1754 		if (ftrace_hash_empty(hash))
1755 			return false;
1756 	}
1757 
1758 	do_for_each_ftrace_rec(pg, rec) {
1759 		int in_other_hash = 0;
1760 		int in_hash = 0;
1761 		int match = 0;
1762 
1763 		if (rec->flags & FTRACE_FL_DISABLED)
1764 			continue;
1765 
1766 		if (all) {
1767 			/*
1768 			 * Only the filter_hash affects all records.
1769 			 * Update if the record is not in the notrace hash.
1770 			 */
1771 			if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1772 				match = 1;
1773 		} else {
1774 			in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1775 			in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1776 
1777 			/*
1778 			 * If filter_hash is set, we want to match all functions
1779 			 * that are in the hash but not in the other hash.
1780 			 *
1781 			 * If filter_hash is not set, then we are decrementing.
1782 			 * That means we match anything that is in the hash
1783 			 * and also in the other_hash. That is, we need to turn
1784 			 * off functions in the other hash because they are disabled
1785 			 * by this hash.
1786 			 */
1787 			if (filter_hash && in_hash && !in_other_hash)
1788 				match = 1;
1789 			else if (!filter_hash && in_hash &&
1790 				 (in_other_hash || ftrace_hash_empty(other_hash)))
1791 				match = 1;
1792 		}
1793 		if (!match)
1794 			continue;
1795 
1796 		if (inc) {
1797 			rec->flags++;
1798 			if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX))
1799 				return false;
1800 
1801 			/*
1802 			 * If there's only a single callback registered to a
1803 			 * function, and the ops has a trampoline registered
1804 			 * for it, then we can call it directly.
1805 			 */
1806 			if (ftrace_rec_count(rec) == 1 && ops->trampoline)
1807 				rec->flags |= FTRACE_FL_TRAMP;
1808 			else
1809 				/*
1810 				 * If we are adding another function callback
1811 				 * to this function, and the previous had a
1812 				 * custom trampoline in use, then we need to go
1813 				 * back to the default trampoline.
1814 				 */
1815 				rec->flags &= ~FTRACE_FL_TRAMP;
1816 
1817 			/*
1818 			 * If any ops wants regs saved for this function
1819 			 * then all ops will get saved regs.
1820 			 */
1821 			if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1822 				rec->flags |= FTRACE_FL_REGS;
1823 		} else {
1824 			if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0))
1825 				return false;
1826 			rec->flags--;
1827 
1828 			/*
1829 			 * If the rec had REGS enabled and the ops that is
1830 			 * being removed had REGS set, then see if there is
1831 			 * still any ops for this record that wants regs.
1832 			 * If not, we can stop recording them.
1833 			 */
1834 			if (ftrace_rec_count(rec) > 0 &&
1835 			    rec->flags & FTRACE_FL_REGS &&
1836 			    ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1837 				if (!test_rec_ops_needs_regs(rec))
1838 					rec->flags &= ~FTRACE_FL_REGS;
1839 			}
1840 
1841 			/*
1842 			 * If the rec had TRAMP enabled, then it needs to
1843 			 * be cleared. As TRAMP can only be enabled iff
1844 			 * there is only a single ops attached to it.
1845 			 * In otherwords, always disable it on decrementing.
1846 			 * In the future, we may set it if rec count is
1847 			 * decremented to one, and the ops that is left
1848 			 * has a trampoline.
1849 			 */
1850 			rec->flags &= ~FTRACE_FL_TRAMP;
1851 
1852 			/*
1853 			 * flags will be cleared in ftrace_check_record()
1854 			 * if rec count is zero.
1855 			 */
1856 		}
1857 		count++;
1858 
1859 		/* Must match FTRACE_UPDATE_CALLS in ftrace_modify_all_code() */
1860 		update |= ftrace_test_record(rec, 1) != FTRACE_UPDATE_IGNORE;
1861 
1862 		/* Shortcut, if we handled all records, we are done. */
1863 		if (!all && count == hash->count)
1864 			return update;
1865 	} while_for_each_ftrace_rec();
1866 
1867 	return update;
1868 }
1869 
1870 static bool ftrace_hash_rec_disable(struct ftrace_ops *ops,
1871 				    int filter_hash)
1872 {
1873 	return __ftrace_hash_rec_update(ops, filter_hash, 0);
1874 }
1875 
1876 static bool ftrace_hash_rec_enable(struct ftrace_ops *ops,
1877 				   int filter_hash)
1878 {
1879 	return __ftrace_hash_rec_update(ops, filter_hash, 1);
1880 }
1881 
1882 static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops,
1883 					  int filter_hash, int inc)
1884 {
1885 	struct ftrace_ops *op;
1886 
1887 	__ftrace_hash_rec_update(ops, filter_hash, inc);
1888 
1889 	if (ops->func_hash != &global_ops.local_hash)
1890 		return;
1891 
1892 	/*
1893 	 * If the ops shares the global_ops hash, then we need to update
1894 	 * all ops that are enabled and use this hash.
1895 	 */
1896 	do_for_each_ftrace_op(op, ftrace_ops_list) {
1897 		/* Already done */
1898 		if (op == ops)
1899 			continue;
1900 		if (op->func_hash == &global_ops.local_hash)
1901 			__ftrace_hash_rec_update(op, filter_hash, inc);
1902 	} while_for_each_ftrace_op(op);
1903 }
1904 
1905 static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops,
1906 					   int filter_hash)
1907 {
1908 	ftrace_hash_rec_update_modify(ops, filter_hash, 0);
1909 }
1910 
1911 static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops,
1912 					  int filter_hash)
1913 {
1914 	ftrace_hash_rec_update_modify(ops, filter_hash, 1);
1915 }
1916 
1917 /*
1918  * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK
1919  * or no-needed to update, -EBUSY if it detects a conflict of the flag
1920  * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs.
1921  * Note that old_hash and new_hash has below meanings
1922  *  - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected)
1923  *  - If the hash is EMPTY_HASH, it hits nothing
1924  *  - Anything else hits the recs which match the hash entries.
1925  */
1926 static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
1927 					 struct ftrace_hash *old_hash,
1928 					 struct ftrace_hash *new_hash)
1929 {
1930 	struct ftrace_page *pg;
1931 	struct dyn_ftrace *rec, *end = NULL;
1932 	int in_old, in_new;
1933 
1934 	/* Only update if the ops has been registered */
1935 	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1936 		return 0;
1937 
1938 	if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
1939 		return 0;
1940 
1941 	/*
1942 	 * Since the IPMODIFY is a very address sensitive action, we do not
1943 	 * allow ftrace_ops to set all functions to new hash.
1944 	 */
1945 	if (!new_hash || !old_hash)
1946 		return -EINVAL;
1947 
1948 	/* Update rec->flags */
1949 	do_for_each_ftrace_rec(pg, rec) {
1950 
1951 		if (rec->flags & FTRACE_FL_DISABLED)
1952 			continue;
1953 
1954 		/* We need to update only differences of filter_hash */
1955 		in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1956 		in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1957 		if (in_old == in_new)
1958 			continue;
1959 
1960 		if (in_new) {
1961 			/* New entries must ensure no others are using it */
1962 			if (rec->flags & FTRACE_FL_IPMODIFY)
1963 				goto rollback;
1964 			rec->flags |= FTRACE_FL_IPMODIFY;
1965 		} else /* Removed entry */
1966 			rec->flags &= ~FTRACE_FL_IPMODIFY;
1967 	} while_for_each_ftrace_rec();
1968 
1969 	return 0;
1970 
1971 rollback:
1972 	end = rec;
1973 
1974 	/* Roll back what we did above */
1975 	do_for_each_ftrace_rec(pg, rec) {
1976 
1977 		if (rec->flags & FTRACE_FL_DISABLED)
1978 			continue;
1979 
1980 		if (rec == end)
1981 			goto err_out;
1982 
1983 		in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1984 		in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1985 		if (in_old == in_new)
1986 			continue;
1987 
1988 		if (in_new)
1989 			rec->flags &= ~FTRACE_FL_IPMODIFY;
1990 		else
1991 			rec->flags |= FTRACE_FL_IPMODIFY;
1992 	} while_for_each_ftrace_rec();
1993 
1994 err_out:
1995 	return -EBUSY;
1996 }
1997 
1998 static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops)
1999 {
2000 	struct ftrace_hash *hash = ops->func_hash->filter_hash;
2001 
2002 	if (ftrace_hash_empty(hash))
2003 		hash = NULL;
2004 
2005 	return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash);
2006 }
2007 
2008 /* Disabling always succeeds */
2009 static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops)
2010 {
2011 	struct ftrace_hash *hash = ops->func_hash->filter_hash;
2012 
2013 	if (ftrace_hash_empty(hash))
2014 		hash = NULL;
2015 
2016 	__ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH);
2017 }
2018 
2019 static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
2020 				       struct ftrace_hash *new_hash)
2021 {
2022 	struct ftrace_hash *old_hash = ops->func_hash->filter_hash;
2023 
2024 	if (ftrace_hash_empty(old_hash))
2025 		old_hash = NULL;
2026 
2027 	if (ftrace_hash_empty(new_hash))
2028 		new_hash = NULL;
2029 
2030 	return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash);
2031 }
2032 
2033 static void print_ip_ins(const char *fmt, const unsigned char *p)
2034 {
2035 	int i;
2036 
2037 	printk(KERN_CONT "%s", fmt);
2038 
2039 	for (i = 0; i < MCOUNT_INSN_SIZE; i++)
2040 		printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
2041 }
2042 
2043 static struct ftrace_ops *
2044 ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
2045 static struct ftrace_ops *
2046 ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
2047 
2048 enum ftrace_bug_type ftrace_bug_type;
2049 const void *ftrace_expected;
2050 
2051 static void print_bug_type(void)
2052 {
2053 	switch (ftrace_bug_type) {
2054 	case FTRACE_BUG_UNKNOWN:
2055 		break;
2056 	case FTRACE_BUG_INIT:
2057 		pr_info("Initializing ftrace call sites\n");
2058 		break;
2059 	case FTRACE_BUG_NOP:
2060 		pr_info("Setting ftrace call site to NOP\n");
2061 		break;
2062 	case FTRACE_BUG_CALL:
2063 		pr_info("Setting ftrace call site to call ftrace function\n");
2064 		break;
2065 	case FTRACE_BUG_UPDATE:
2066 		pr_info("Updating ftrace call site to call a different ftrace function\n");
2067 		break;
2068 	}
2069 }
2070 
2071 /**
2072  * ftrace_bug - report and shutdown function tracer
2073  * @failed: The failed type (EFAULT, EINVAL, EPERM)
2074  * @rec: The record that failed
2075  *
2076  * The arch code that enables or disables the function tracing
2077  * can call ftrace_bug() when it has detected a problem in
2078  * modifying the code. @failed should be one of either:
2079  * EFAULT - if the problem happens on reading the @ip address
2080  * EINVAL - if what is read at @ip is not what was expected
2081  * EPERM - if the problem happens on writting to the @ip address
2082  */
2083 void ftrace_bug(int failed, struct dyn_ftrace *rec)
2084 {
2085 	unsigned long ip = rec ? rec->ip : 0;
2086 
2087 	switch (failed) {
2088 	case -EFAULT:
2089 		FTRACE_WARN_ON_ONCE(1);
2090 		pr_info("ftrace faulted on modifying ");
2091 		print_ip_sym(ip);
2092 		break;
2093 	case -EINVAL:
2094 		FTRACE_WARN_ON_ONCE(1);
2095 		pr_info("ftrace failed to modify ");
2096 		print_ip_sym(ip);
2097 		print_ip_ins(" actual:   ", (unsigned char *)ip);
2098 		pr_cont("\n");
2099 		if (ftrace_expected) {
2100 			print_ip_ins(" expected: ", ftrace_expected);
2101 			pr_cont("\n");
2102 		}
2103 		break;
2104 	case -EPERM:
2105 		FTRACE_WARN_ON_ONCE(1);
2106 		pr_info("ftrace faulted on writing ");
2107 		print_ip_sym(ip);
2108 		break;
2109 	default:
2110 		FTRACE_WARN_ON_ONCE(1);
2111 		pr_info("ftrace faulted on unknown error ");
2112 		print_ip_sym(ip);
2113 	}
2114 	print_bug_type();
2115 	if (rec) {
2116 		struct ftrace_ops *ops = NULL;
2117 
2118 		pr_info("ftrace record flags: %lx\n", rec->flags);
2119 		pr_cont(" (%ld)%s", ftrace_rec_count(rec),
2120 			rec->flags & FTRACE_FL_REGS ? " R" : "  ");
2121 		if (rec->flags & FTRACE_FL_TRAMP_EN) {
2122 			ops = ftrace_find_tramp_ops_any(rec);
2123 			if (ops) {
2124 				do {
2125 					pr_cont("\ttramp: %pS (%pS)",
2126 						(void *)ops->trampoline,
2127 						(void *)ops->func);
2128 					ops = ftrace_find_tramp_ops_next(rec, ops);
2129 				} while (ops);
2130 			} else
2131 				pr_cont("\ttramp: ERROR!");
2132 
2133 		}
2134 		ip = ftrace_get_addr_curr(rec);
2135 		pr_cont("\n expected tramp: %lx\n", ip);
2136 	}
2137 }
2138 
2139 static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
2140 {
2141 	unsigned long flag = 0UL;
2142 
2143 	ftrace_bug_type = FTRACE_BUG_UNKNOWN;
2144 
2145 	if (rec->flags & FTRACE_FL_DISABLED)
2146 		return FTRACE_UPDATE_IGNORE;
2147 
2148 	/*
2149 	 * If we are updating calls:
2150 	 *
2151 	 *   If the record has a ref count, then we need to enable it
2152 	 *   because someone is using it.
2153 	 *
2154 	 *   Otherwise we make sure its disabled.
2155 	 *
2156 	 * If we are disabling calls, then disable all records that
2157 	 * are enabled.
2158 	 */
2159 	if (enable && ftrace_rec_count(rec))
2160 		flag = FTRACE_FL_ENABLED;
2161 
2162 	/*
2163 	 * If enabling and the REGS flag does not match the REGS_EN, or
2164 	 * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore
2165 	 * this record. Set flags to fail the compare against ENABLED.
2166 	 */
2167 	if (flag) {
2168 		if (!(rec->flags & FTRACE_FL_REGS) !=
2169 		    !(rec->flags & FTRACE_FL_REGS_EN))
2170 			flag |= FTRACE_FL_REGS;
2171 
2172 		if (!(rec->flags & FTRACE_FL_TRAMP) !=
2173 		    !(rec->flags & FTRACE_FL_TRAMP_EN))
2174 			flag |= FTRACE_FL_TRAMP;
2175 	}
2176 
2177 	/* If the state of this record hasn't changed, then do nothing */
2178 	if ((rec->flags & FTRACE_FL_ENABLED) == flag)
2179 		return FTRACE_UPDATE_IGNORE;
2180 
2181 	if (flag) {
2182 		/* Save off if rec is being enabled (for return value) */
2183 		flag ^= rec->flags & FTRACE_FL_ENABLED;
2184 
2185 		if (update) {
2186 			rec->flags |= FTRACE_FL_ENABLED;
2187 			if (flag & FTRACE_FL_REGS) {
2188 				if (rec->flags & FTRACE_FL_REGS)
2189 					rec->flags |= FTRACE_FL_REGS_EN;
2190 				else
2191 					rec->flags &= ~FTRACE_FL_REGS_EN;
2192 			}
2193 			if (flag & FTRACE_FL_TRAMP) {
2194 				if (rec->flags & FTRACE_FL_TRAMP)
2195 					rec->flags |= FTRACE_FL_TRAMP_EN;
2196 				else
2197 					rec->flags &= ~FTRACE_FL_TRAMP_EN;
2198 			}
2199 		}
2200 
2201 		/*
2202 		 * If this record is being updated from a nop, then
2203 		 *   return UPDATE_MAKE_CALL.
2204 		 * Otherwise,
2205 		 *   return UPDATE_MODIFY_CALL to tell the caller to convert
2206 		 *   from the save regs, to a non-save regs function or
2207 		 *   vice versa, or from a trampoline call.
2208 		 */
2209 		if (flag & FTRACE_FL_ENABLED) {
2210 			ftrace_bug_type = FTRACE_BUG_CALL;
2211 			return FTRACE_UPDATE_MAKE_CALL;
2212 		}
2213 
2214 		ftrace_bug_type = FTRACE_BUG_UPDATE;
2215 		return FTRACE_UPDATE_MODIFY_CALL;
2216 	}
2217 
2218 	if (update) {
2219 		/* If there's no more users, clear all flags */
2220 		if (!ftrace_rec_count(rec))
2221 			rec->flags = 0;
2222 		else
2223 			/*
2224 			 * Just disable the record, but keep the ops TRAMP
2225 			 * and REGS states. The _EN flags must be disabled though.
2226 			 */
2227 			rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN |
2228 					FTRACE_FL_REGS_EN);
2229 	}
2230 
2231 	ftrace_bug_type = FTRACE_BUG_NOP;
2232 	return FTRACE_UPDATE_MAKE_NOP;
2233 }
2234 
2235 /**
2236  * ftrace_update_record, set a record that now is tracing or not
2237  * @rec: the record to update
2238  * @enable: set to 1 if the record is tracing, zero to force disable
2239  *
2240  * The records that represent all functions that can be traced need
2241  * to be updated when tracing has been enabled.
2242  */
2243 int ftrace_update_record(struct dyn_ftrace *rec, int enable)
2244 {
2245 	return ftrace_check_record(rec, enable, 1);
2246 }
2247 
2248 /**
2249  * ftrace_test_record, check if the record has been enabled or not
2250  * @rec: the record to test
2251  * @enable: set to 1 to check if enabled, 0 if it is disabled
2252  *
2253  * The arch code may need to test if a record is already set to
2254  * tracing to determine how to modify the function code that it
2255  * represents.
2256  */
2257 int ftrace_test_record(struct dyn_ftrace *rec, int enable)
2258 {
2259 	return ftrace_check_record(rec, enable, 0);
2260 }
2261 
2262 static struct ftrace_ops *
2263 ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
2264 {
2265 	struct ftrace_ops *op;
2266 	unsigned long ip = rec->ip;
2267 
2268 	do_for_each_ftrace_op(op, ftrace_ops_list) {
2269 
2270 		if (!op->trampoline)
2271 			continue;
2272 
2273 		if (hash_contains_ip(ip, op->func_hash))
2274 			return op;
2275 	} while_for_each_ftrace_op(op);
2276 
2277 	return NULL;
2278 }
2279 
2280 static struct ftrace_ops *
2281 ftrace_find_tramp_ops_next(struct dyn_ftrace *rec,
2282 			   struct ftrace_ops *op)
2283 {
2284 	unsigned long ip = rec->ip;
2285 
2286 	while_for_each_ftrace_op(op) {
2287 
2288 		if (!op->trampoline)
2289 			continue;
2290 
2291 		if (hash_contains_ip(ip, op->func_hash))
2292 			return op;
2293 	}
2294 
2295 	return NULL;
2296 }
2297 
2298 static struct ftrace_ops *
2299 ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
2300 {
2301 	struct ftrace_ops *op;
2302 	unsigned long ip = rec->ip;
2303 
2304 	/*
2305 	 * Need to check removed ops first.
2306 	 * If they are being removed, and this rec has a tramp,
2307 	 * and this rec is in the ops list, then it would be the
2308 	 * one with the tramp.
2309 	 */
2310 	if (removed_ops) {
2311 		if (hash_contains_ip(ip, &removed_ops->old_hash))
2312 			return removed_ops;
2313 	}
2314 
2315 	/*
2316 	 * Need to find the current trampoline for a rec.
2317 	 * Now, a trampoline is only attached to a rec if there
2318 	 * was a single 'ops' attached to it. But this can be called
2319 	 * when we are adding another op to the rec or removing the
2320 	 * current one. Thus, if the op is being added, we can
2321 	 * ignore it because it hasn't attached itself to the rec
2322 	 * yet.
2323 	 *
2324 	 * If an ops is being modified (hooking to different functions)
2325 	 * then we don't care about the new functions that are being
2326 	 * added, just the old ones (that are probably being removed).
2327 	 *
2328 	 * If we are adding an ops to a function that already is using
2329 	 * a trampoline, it needs to be removed (trampolines are only
2330 	 * for single ops connected), then an ops that is not being
2331 	 * modified also needs to be checked.
2332 	 */
2333 	do_for_each_ftrace_op(op, ftrace_ops_list) {
2334 
2335 		if (!op->trampoline)
2336 			continue;
2337 
2338 		/*
2339 		 * If the ops is being added, it hasn't gotten to
2340 		 * the point to be removed from this tree yet.
2341 		 */
2342 		if (op->flags & FTRACE_OPS_FL_ADDING)
2343 			continue;
2344 
2345 
2346 		/*
2347 		 * If the ops is being modified and is in the old
2348 		 * hash, then it is probably being removed from this
2349 		 * function.
2350 		 */
2351 		if ((op->flags & FTRACE_OPS_FL_MODIFYING) &&
2352 		    hash_contains_ip(ip, &op->old_hash))
2353 			return op;
2354 		/*
2355 		 * If the ops is not being added or modified, and it's
2356 		 * in its normal filter hash, then this must be the one
2357 		 * we want!
2358 		 */
2359 		if (!(op->flags & FTRACE_OPS_FL_MODIFYING) &&
2360 		    hash_contains_ip(ip, op->func_hash))
2361 			return op;
2362 
2363 	} while_for_each_ftrace_op(op);
2364 
2365 	return NULL;
2366 }
2367 
2368 static struct ftrace_ops *
2369 ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
2370 {
2371 	struct ftrace_ops *op;
2372 	unsigned long ip = rec->ip;
2373 
2374 	do_for_each_ftrace_op(op, ftrace_ops_list) {
2375 		/* pass rec in as regs to have non-NULL val */
2376 		if (hash_contains_ip(ip, op->func_hash))
2377 			return op;
2378 	} while_for_each_ftrace_op(op);
2379 
2380 	return NULL;
2381 }
2382 
2383 /**
2384  * ftrace_get_addr_new - Get the call address to set to
2385  * @rec:  The ftrace record descriptor
2386  *
2387  * If the record has the FTRACE_FL_REGS set, that means that it
2388  * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
2389  * is not not set, then it wants to convert to the normal callback.
2390  *
2391  * Returns the address of the trampoline to set to
2392  */
2393 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
2394 {
2395 	struct ftrace_ops *ops;
2396 
2397 	/* Trampolines take precedence over regs */
2398 	if (rec->flags & FTRACE_FL_TRAMP) {
2399 		ops = ftrace_find_tramp_ops_new(rec);
2400 		if (FTRACE_WARN_ON(!ops || !ops->trampoline)) {
2401 			pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
2402 				(void *)rec->ip, (void *)rec->ip, rec->flags);
2403 			/* Ftrace is shutting down, return anything */
2404 			return (unsigned long)FTRACE_ADDR;
2405 		}
2406 		return ops->trampoline;
2407 	}
2408 
2409 	if (rec->flags & FTRACE_FL_REGS)
2410 		return (unsigned long)FTRACE_REGS_ADDR;
2411 	else
2412 		return (unsigned long)FTRACE_ADDR;
2413 }
2414 
2415 /**
2416  * ftrace_get_addr_curr - Get the call address that is already there
2417  * @rec:  The ftrace record descriptor
2418  *
2419  * The FTRACE_FL_REGS_EN is set when the record already points to
2420  * a function that saves all the regs. Basically the '_EN' version
2421  * represents the current state of the function.
2422  *
2423  * Returns the address of the trampoline that is currently being called
2424  */
2425 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
2426 {
2427 	struct ftrace_ops *ops;
2428 
2429 	/* Trampolines take precedence over regs */
2430 	if (rec->flags & FTRACE_FL_TRAMP_EN) {
2431 		ops = ftrace_find_tramp_ops_curr(rec);
2432 		if (FTRACE_WARN_ON(!ops)) {
2433 			pr_warn("Bad trampoline accounting at: %p (%pS)\n",
2434 				(void *)rec->ip, (void *)rec->ip);
2435 			/* Ftrace is shutting down, return anything */
2436 			return (unsigned long)FTRACE_ADDR;
2437 		}
2438 		return ops->trampoline;
2439 	}
2440 
2441 	if (rec->flags & FTRACE_FL_REGS_EN)
2442 		return (unsigned long)FTRACE_REGS_ADDR;
2443 	else
2444 		return (unsigned long)FTRACE_ADDR;
2445 }
2446 
2447 static int
2448 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
2449 {
2450 	unsigned long ftrace_old_addr;
2451 	unsigned long ftrace_addr;
2452 	int ret;
2453 
2454 	ftrace_addr = ftrace_get_addr_new(rec);
2455 
2456 	/* This needs to be done before we call ftrace_update_record */
2457 	ftrace_old_addr = ftrace_get_addr_curr(rec);
2458 
2459 	ret = ftrace_update_record(rec, enable);
2460 
2461 	ftrace_bug_type = FTRACE_BUG_UNKNOWN;
2462 
2463 	switch (ret) {
2464 	case FTRACE_UPDATE_IGNORE:
2465 		return 0;
2466 
2467 	case FTRACE_UPDATE_MAKE_CALL:
2468 		ftrace_bug_type = FTRACE_BUG_CALL;
2469 		return ftrace_make_call(rec, ftrace_addr);
2470 
2471 	case FTRACE_UPDATE_MAKE_NOP:
2472 		ftrace_bug_type = FTRACE_BUG_NOP;
2473 		return ftrace_make_nop(NULL, rec, ftrace_old_addr);
2474 
2475 	case FTRACE_UPDATE_MODIFY_CALL:
2476 		ftrace_bug_type = FTRACE_BUG_UPDATE;
2477 		return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
2478 	}
2479 
2480 	return -1; /* unknow ftrace bug */
2481 }
2482 
2483 void __weak ftrace_replace_code(int enable)
2484 {
2485 	struct dyn_ftrace *rec;
2486 	struct ftrace_page *pg;
2487 	int failed;
2488 
2489 	if (unlikely(ftrace_disabled))
2490 		return;
2491 
2492 	do_for_each_ftrace_rec(pg, rec) {
2493 
2494 		if (rec->flags & FTRACE_FL_DISABLED)
2495 			continue;
2496 
2497 		failed = __ftrace_replace_code(rec, enable);
2498 		if (failed) {
2499 			ftrace_bug(failed, rec);
2500 			/* Stop processing */
2501 			return;
2502 		}
2503 	} while_for_each_ftrace_rec();
2504 }
2505 
2506 struct ftrace_rec_iter {
2507 	struct ftrace_page	*pg;
2508 	int			index;
2509 };
2510 
2511 /**
2512  * ftrace_rec_iter_start, start up iterating over traced functions
2513  *
2514  * Returns an iterator handle that is used to iterate over all
2515  * the records that represent address locations where functions
2516  * are traced.
2517  *
2518  * May return NULL if no records are available.
2519  */
2520 struct ftrace_rec_iter *ftrace_rec_iter_start(void)
2521 {
2522 	/*
2523 	 * We only use a single iterator.
2524 	 * Protected by the ftrace_lock mutex.
2525 	 */
2526 	static struct ftrace_rec_iter ftrace_rec_iter;
2527 	struct ftrace_rec_iter *iter = &ftrace_rec_iter;
2528 
2529 	iter->pg = ftrace_pages_start;
2530 	iter->index = 0;
2531 
2532 	/* Could have empty pages */
2533 	while (iter->pg && !iter->pg->index)
2534 		iter->pg = iter->pg->next;
2535 
2536 	if (!iter->pg)
2537 		return NULL;
2538 
2539 	return iter;
2540 }
2541 
2542 /**
2543  * ftrace_rec_iter_next, get the next record to process.
2544  * @iter: The handle to the iterator.
2545  *
2546  * Returns the next iterator after the given iterator @iter.
2547  */
2548 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
2549 {
2550 	iter->index++;
2551 
2552 	if (iter->index >= iter->pg->index) {
2553 		iter->pg = iter->pg->next;
2554 		iter->index = 0;
2555 
2556 		/* Could have empty pages */
2557 		while (iter->pg && !iter->pg->index)
2558 			iter->pg = iter->pg->next;
2559 	}
2560 
2561 	if (!iter->pg)
2562 		return NULL;
2563 
2564 	return iter;
2565 }
2566 
2567 /**
2568  * ftrace_rec_iter_record, get the record at the iterator location
2569  * @iter: The current iterator location
2570  *
2571  * Returns the record that the current @iter is at.
2572  */
2573 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
2574 {
2575 	return &iter->pg->records[iter->index];
2576 }
2577 
2578 static int
2579 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
2580 {
2581 	int ret;
2582 
2583 	if (unlikely(ftrace_disabled))
2584 		return 0;
2585 
2586 	ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
2587 	if (ret) {
2588 		ftrace_bug_type = FTRACE_BUG_INIT;
2589 		ftrace_bug(ret, rec);
2590 		return 0;
2591 	}
2592 	return 1;
2593 }
2594 
2595 /*
2596  * archs can override this function if they must do something
2597  * before the modifying code is performed.
2598  */
2599 int __weak ftrace_arch_code_modify_prepare(void)
2600 {
2601 	return 0;
2602 }
2603 
2604 /*
2605  * archs can override this function if they must do something
2606  * after the modifying code is performed.
2607  */
2608 int __weak ftrace_arch_code_modify_post_process(void)
2609 {
2610 	return 0;
2611 }
2612 
2613 void ftrace_modify_all_code(int command)
2614 {
2615 	int update = command & FTRACE_UPDATE_TRACE_FUNC;
2616 	int err = 0;
2617 
2618 	/*
2619 	 * If the ftrace_caller calls a ftrace_ops func directly,
2620 	 * we need to make sure that it only traces functions it
2621 	 * expects to trace. When doing the switch of functions,
2622 	 * we need to update to the ftrace_ops_list_func first
2623 	 * before the transition between old and new calls are set,
2624 	 * as the ftrace_ops_list_func will check the ops hashes
2625 	 * to make sure the ops are having the right functions
2626 	 * traced.
2627 	 */
2628 	if (update) {
2629 		err = ftrace_update_ftrace_func(ftrace_ops_list_func);
2630 		if (FTRACE_WARN_ON(err))
2631 			return;
2632 	}
2633 
2634 	if (command & FTRACE_UPDATE_CALLS)
2635 		ftrace_replace_code(1);
2636 	else if (command & FTRACE_DISABLE_CALLS)
2637 		ftrace_replace_code(0);
2638 
2639 	if (update && ftrace_trace_function != ftrace_ops_list_func) {
2640 		function_trace_op = set_function_trace_op;
2641 		smp_wmb();
2642 		/* If irqs are disabled, we are in stop machine */
2643 		if (!irqs_disabled())
2644 			smp_call_function(ftrace_sync_ipi, NULL, 1);
2645 		err = ftrace_update_ftrace_func(ftrace_trace_function);
2646 		if (FTRACE_WARN_ON(err))
2647 			return;
2648 	}
2649 
2650 	if (command & FTRACE_START_FUNC_RET)
2651 		err = ftrace_enable_ftrace_graph_caller();
2652 	else if (command & FTRACE_STOP_FUNC_RET)
2653 		err = ftrace_disable_ftrace_graph_caller();
2654 	FTRACE_WARN_ON(err);
2655 }
2656 
2657 static int __ftrace_modify_code(void *data)
2658 {
2659 	int *command = data;
2660 
2661 	ftrace_modify_all_code(*command);
2662 
2663 	return 0;
2664 }
2665 
2666 /**
2667  * ftrace_run_stop_machine, go back to the stop machine method
2668  * @command: The command to tell ftrace what to do
2669  *
2670  * If an arch needs to fall back to the stop machine method, the
2671  * it can call this function.
2672  */
2673 void ftrace_run_stop_machine(int command)
2674 {
2675 	stop_machine(__ftrace_modify_code, &command, NULL);
2676 }
2677 
2678 /**
2679  * arch_ftrace_update_code, modify the code to trace or not trace
2680  * @command: The command that needs to be done
2681  *
2682  * Archs can override this function if it does not need to
2683  * run stop_machine() to modify code.
2684  */
2685 void __weak arch_ftrace_update_code(int command)
2686 {
2687 	ftrace_run_stop_machine(command);
2688 }
2689 
2690 static void ftrace_run_update_code(int command)
2691 {
2692 	int ret;
2693 
2694 	ret = ftrace_arch_code_modify_prepare();
2695 	FTRACE_WARN_ON(ret);
2696 	if (ret)
2697 		return;
2698 
2699 	/*
2700 	 * By default we use stop_machine() to modify the code.
2701 	 * But archs can do what ever they want as long as it
2702 	 * is safe. The stop_machine() is the safest, but also
2703 	 * produces the most overhead.
2704 	 */
2705 	arch_ftrace_update_code(command);
2706 
2707 	ret = ftrace_arch_code_modify_post_process();
2708 	FTRACE_WARN_ON(ret);
2709 }
2710 
2711 static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
2712 				   struct ftrace_ops_hash *old_hash)
2713 {
2714 	ops->flags |= FTRACE_OPS_FL_MODIFYING;
2715 	ops->old_hash.filter_hash = old_hash->filter_hash;
2716 	ops->old_hash.notrace_hash = old_hash->notrace_hash;
2717 	ftrace_run_update_code(command);
2718 	ops->old_hash.filter_hash = NULL;
2719 	ops->old_hash.notrace_hash = NULL;
2720 	ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
2721 }
2722 
2723 static ftrace_func_t saved_ftrace_func;
2724 static int ftrace_start_up;
2725 
2726 void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops)
2727 {
2728 }
2729 
2730 static void per_cpu_ops_free(struct ftrace_ops *ops)
2731 {
2732 	free_percpu(ops->disabled);
2733 }
2734 
2735 static void ftrace_startup_enable(int command)
2736 {
2737 	if (saved_ftrace_func != ftrace_trace_function) {
2738 		saved_ftrace_func = ftrace_trace_function;
2739 		command |= FTRACE_UPDATE_TRACE_FUNC;
2740 	}
2741 
2742 	if (!command || !ftrace_enabled)
2743 		return;
2744 
2745 	ftrace_run_update_code(command);
2746 }
2747 
2748 static void ftrace_startup_all(int command)
2749 {
2750 	update_all_ops = true;
2751 	ftrace_startup_enable(command);
2752 	update_all_ops = false;
2753 }
2754 
2755 static int ftrace_startup(struct ftrace_ops *ops, int command)
2756 {
2757 	int ret;
2758 
2759 	if (unlikely(ftrace_disabled))
2760 		return -ENODEV;
2761 
2762 	ret = __register_ftrace_function(ops);
2763 	if (ret)
2764 		return ret;
2765 
2766 	ftrace_start_up++;
2767 
2768 	/*
2769 	 * Note that ftrace probes uses this to start up
2770 	 * and modify functions it will probe. But we still
2771 	 * set the ADDING flag for modification, as probes
2772 	 * do not have trampolines. If they add them in the
2773 	 * future, then the probes will need to distinguish
2774 	 * between adding and updating probes.
2775 	 */
2776 	ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING;
2777 
2778 	ret = ftrace_hash_ipmodify_enable(ops);
2779 	if (ret < 0) {
2780 		/* Rollback registration process */
2781 		__unregister_ftrace_function(ops);
2782 		ftrace_start_up--;
2783 		ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2784 		return ret;
2785 	}
2786 
2787 	if (ftrace_hash_rec_enable(ops, 1))
2788 		command |= FTRACE_UPDATE_CALLS;
2789 
2790 	ftrace_startup_enable(command);
2791 
2792 	ops->flags &= ~FTRACE_OPS_FL_ADDING;
2793 
2794 	return 0;
2795 }
2796 
2797 static int ftrace_shutdown(struct ftrace_ops *ops, int command)
2798 {
2799 	int ret;
2800 
2801 	if (unlikely(ftrace_disabled))
2802 		return -ENODEV;
2803 
2804 	ret = __unregister_ftrace_function(ops);
2805 	if (ret)
2806 		return ret;
2807 
2808 	ftrace_start_up--;
2809 	/*
2810 	 * Just warn in case of unbalance, no need to kill ftrace, it's not
2811 	 * critical but the ftrace_call callers may be never nopped again after
2812 	 * further ftrace uses.
2813 	 */
2814 	WARN_ON_ONCE(ftrace_start_up < 0);
2815 
2816 	/* Disabling ipmodify never fails */
2817 	ftrace_hash_ipmodify_disable(ops);
2818 
2819 	if (ftrace_hash_rec_disable(ops, 1))
2820 		command |= FTRACE_UPDATE_CALLS;
2821 
2822 	ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2823 
2824 	if (saved_ftrace_func != ftrace_trace_function) {
2825 		saved_ftrace_func = ftrace_trace_function;
2826 		command |= FTRACE_UPDATE_TRACE_FUNC;
2827 	}
2828 
2829 	if (!command || !ftrace_enabled) {
2830 		/*
2831 		 * If these are dynamic or per_cpu ops, they still
2832 		 * need their data freed. Since, function tracing is
2833 		 * not currently active, we can just free them
2834 		 * without synchronizing all CPUs.
2835 		 */
2836 		if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU))
2837 			goto free_ops;
2838 
2839 		return 0;
2840 	}
2841 
2842 	/*
2843 	 * If the ops uses a trampoline, then it needs to be
2844 	 * tested first on update.
2845 	 */
2846 	ops->flags |= FTRACE_OPS_FL_REMOVING;
2847 	removed_ops = ops;
2848 
2849 	/* The trampoline logic checks the old hashes */
2850 	ops->old_hash.filter_hash = ops->func_hash->filter_hash;
2851 	ops->old_hash.notrace_hash = ops->func_hash->notrace_hash;
2852 
2853 	ftrace_run_update_code(command);
2854 
2855 	/*
2856 	 * If there's no more ops registered with ftrace, run a
2857 	 * sanity check to make sure all rec flags are cleared.
2858 	 */
2859 	if (rcu_dereference_protected(ftrace_ops_list,
2860 			lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
2861 		struct ftrace_page *pg;
2862 		struct dyn_ftrace *rec;
2863 
2864 		do_for_each_ftrace_rec(pg, rec) {
2865 			if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_FL_DISABLED))
2866 				pr_warn("  %pS flags:%lx\n",
2867 					(void *)rec->ip, rec->flags);
2868 		} while_for_each_ftrace_rec();
2869 	}
2870 
2871 	ops->old_hash.filter_hash = NULL;
2872 	ops->old_hash.notrace_hash = NULL;
2873 
2874 	removed_ops = NULL;
2875 	ops->flags &= ~FTRACE_OPS_FL_REMOVING;
2876 
2877 	/*
2878 	 * Dynamic ops may be freed, we must make sure that all
2879 	 * callers are done before leaving this function.
2880 	 * The same goes for freeing the per_cpu data of the per_cpu
2881 	 * ops.
2882 	 */
2883 	if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU)) {
2884 		/*
2885 		 * We need to do a hard force of sched synchronization.
2886 		 * This is because we use preempt_disable() to do RCU, but
2887 		 * the function tracers can be called where RCU is not watching
2888 		 * (like before user_exit()). We can not rely on the RCU
2889 		 * infrastructure to do the synchronization, thus we must do it
2890 		 * ourselves.
2891 		 */
2892 		schedule_on_each_cpu(ftrace_sync);
2893 
2894 		/*
2895 		 * When the kernel is preeptive, tasks can be preempted
2896 		 * while on a ftrace trampoline. Just scheduling a task on
2897 		 * a CPU is not good enough to flush them. Calling
2898 		 * synchornize_rcu_tasks() will wait for those tasks to
2899 		 * execute and either schedule voluntarily or enter user space.
2900 		 */
2901 		if (IS_ENABLED(CONFIG_PREEMPT))
2902 			synchronize_rcu_tasks();
2903 
2904  free_ops:
2905 		arch_ftrace_trampoline_free(ops);
2906 
2907 		if (ops->flags & FTRACE_OPS_FL_PER_CPU)
2908 			per_cpu_ops_free(ops);
2909 	}
2910 
2911 	return 0;
2912 }
2913 
2914 static void ftrace_startup_sysctl(void)
2915 {
2916 	int command;
2917 
2918 	if (unlikely(ftrace_disabled))
2919 		return;
2920 
2921 	/* Force update next time */
2922 	saved_ftrace_func = NULL;
2923 	/* ftrace_start_up is true if we want ftrace running */
2924 	if (ftrace_start_up) {
2925 		command = FTRACE_UPDATE_CALLS;
2926 		if (ftrace_graph_active)
2927 			command |= FTRACE_START_FUNC_RET;
2928 		ftrace_startup_enable(command);
2929 	}
2930 }
2931 
2932 static void ftrace_shutdown_sysctl(void)
2933 {
2934 	int command;
2935 
2936 	if (unlikely(ftrace_disabled))
2937 		return;
2938 
2939 	/* ftrace_start_up is true if ftrace is running */
2940 	if (ftrace_start_up) {
2941 		command = FTRACE_DISABLE_CALLS;
2942 		if (ftrace_graph_active)
2943 			command |= FTRACE_STOP_FUNC_RET;
2944 		ftrace_run_update_code(command);
2945 	}
2946 }
2947 
2948 static u64		ftrace_update_time;
2949 unsigned long		ftrace_update_tot_cnt;
2950 
2951 static inline int ops_traces_mod(struct ftrace_ops *ops)
2952 {
2953 	/*
2954 	 * Filter_hash being empty will default to trace module.
2955 	 * But notrace hash requires a test of individual module functions.
2956 	 */
2957 	return ftrace_hash_empty(ops->func_hash->filter_hash) &&
2958 		ftrace_hash_empty(ops->func_hash->notrace_hash);
2959 }
2960 
2961 /*
2962  * Check if the current ops references the record.
2963  *
2964  * If the ops traces all functions, then it was already accounted for.
2965  * If the ops does not trace the current record function, skip it.
2966  * If the ops ignores the function via notrace filter, skip it.
2967  */
2968 static inline bool
2969 ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
2970 {
2971 	/* If ops isn't enabled, ignore it */
2972 	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
2973 		return 0;
2974 
2975 	/* If ops traces all then it includes this function */
2976 	if (ops_traces_mod(ops))
2977 		return 1;
2978 
2979 	/* The function must be in the filter */
2980 	if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
2981 	    !__ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))
2982 		return 0;
2983 
2984 	/* If in notrace hash, we ignore it too */
2985 	if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip))
2986 		return 0;
2987 
2988 	return 1;
2989 }
2990 
2991 static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
2992 {
2993 	struct ftrace_page *pg;
2994 	struct dyn_ftrace *p;
2995 	u64 start, stop;
2996 	unsigned long update_cnt = 0;
2997 	unsigned long rec_flags = 0;
2998 	int i;
2999 
3000 	start = ftrace_now(raw_smp_processor_id());
3001 
3002 	/*
3003 	 * When a module is loaded, this function is called to convert
3004 	 * the calls to mcount in its text to nops, and also to create
3005 	 * an entry in the ftrace data. Now, if ftrace is activated
3006 	 * after this call, but before the module sets its text to
3007 	 * read-only, the modification of enabling ftrace can fail if
3008 	 * the read-only is done while ftrace is converting the calls.
3009 	 * To prevent this, the module's records are set as disabled
3010 	 * and will be enabled after the call to set the module's text
3011 	 * to read-only.
3012 	 */
3013 	if (mod)
3014 		rec_flags |= FTRACE_FL_DISABLED;
3015 
3016 	for (pg = new_pgs; pg; pg = pg->next) {
3017 
3018 		for (i = 0; i < pg->index; i++) {
3019 
3020 			/* If something went wrong, bail without enabling anything */
3021 			if (unlikely(ftrace_disabled))
3022 				return -1;
3023 
3024 			p = &pg->records[i];
3025 			p->flags = rec_flags;
3026 
3027 			/*
3028 			 * Do the initial record conversion from mcount jump
3029 			 * to the NOP instructions.
3030 			 */
3031 			if (!ftrace_code_disable(mod, p))
3032 				break;
3033 
3034 			update_cnt++;
3035 		}
3036 	}
3037 
3038 	stop = ftrace_now(raw_smp_processor_id());
3039 	ftrace_update_time = stop - start;
3040 	ftrace_update_tot_cnt += update_cnt;
3041 
3042 	return 0;
3043 }
3044 
3045 static int ftrace_allocate_records(struct ftrace_page *pg, int count)
3046 {
3047 	int order;
3048 	int cnt;
3049 
3050 	if (WARN_ON(!count))
3051 		return -EINVAL;
3052 
3053 	order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
3054 
3055 	/*
3056 	 * We want to fill as much as possible. No more than a page
3057 	 * may be empty.
3058 	 */
3059 	while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
3060 		order--;
3061 
3062  again:
3063 	pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
3064 
3065 	if (!pg->records) {
3066 		/* if we can't allocate this size, try something smaller */
3067 		if (!order)
3068 			return -ENOMEM;
3069 		order >>= 1;
3070 		goto again;
3071 	}
3072 
3073 	cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
3074 	pg->size = cnt;
3075 
3076 	if (cnt > count)
3077 		cnt = count;
3078 
3079 	return cnt;
3080 }
3081 
3082 static struct ftrace_page *
3083 ftrace_allocate_pages(unsigned long num_to_init)
3084 {
3085 	struct ftrace_page *start_pg;
3086 	struct ftrace_page *pg;
3087 	int order;
3088 	int cnt;
3089 
3090 	if (!num_to_init)
3091 		return 0;
3092 
3093 	start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
3094 	if (!pg)
3095 		return NULL;
3096 
3097 	/*
3098 	 * Try to allocate as much as possible in one continues
3099 	 * location that fills in all of the space. We want to
3100 	 * waste as little space as possible.
3101 	 */
3102 	for (;;) {
3103 		cnt = ftrace_allocate_records(pg, num_to_init);
3104 		if (cnt < 0)
3105 			goto free_pages;
3106 
3107 		num_to_init -= cnt;
3108 		if (!num_to_init)
3109 			break;
3110 
3111 		pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
3112 		if (!pg->next)
3113 			goto free_pages;
3114 
3115 		pg = pg->next;
3116 	}
3117 
3118 	return start_pg;
3119 
3120  free_pages:
3121 	pg = start_pg;
3122 	while (pg) {
3123 		order = get_count_order(pg->size / ENTRIES_PER_PAGE);
3124 		free_pages((unsigned long)pg->records, order);
3125 		start_pg = pg->next;
3126 		kfree(pg);
3127 		pg = start_pg;
3128 	}
3129 	pr_info("ftrace: FAILED to allocate memory for functions\n");
3130 	return NULL;
3131 }
3132 
3133 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
3134 
3135 struct ftrace_iterator {
3136 	loff_t				pos;
3137 	loff_t				func_pos;
3138 	loff_t				mod_pos;
3139 	struct ftrace_page		*pg;
3140 	struct dyn_ftrace		*func;
3141 	struct ftrace_func_probe	*probe;
3142 	struct ftrace_func_entry	*probe_entry;
3143 	struct trace_parser		parser;
3144 	struct ftrace_hash		*hash;
3145 	struct ftrace_ops		*ops;
3146 	struct trace_array		*tr;
3147 	struct list_head		*mod_list;
3148 	int				pidx;
3149 	int				idx;
3150 	unsigned			flags;
3151 };
3152 
3153 static void *
3154 t_probe_next(struct seq_file *m, loff_t *pos)
3155 {
3156 	struct ftrace_iterator *iter = m->private;
3157 	struct trace_array *tr = iter->ops->private;
3158 	struct list_head *func_probes;
3159 	struct ftrace_hash *hash;
3160 	struct list_head *next;
3161 	struct hlist_node *hnd = NULL;
3162 	struct hlist_head *hhd;
3163 	int size;
3164 
3165 	(*pos)++;
3166 	iter->pos = *pos;
3167 
3168 	if (!tr)
3169 		return NULL;
3170 
3171 	func_probes = &tr->func_probes;
3172 	if (list_empty(func_probes))
3173 		return NULL;
3174 
3175 	if (!iter->probe) {
3176 		next = func_probes->next;
3177 		iter->probe = list_entry(next, struct ftrace_func_probe, list);
3178 	}
3179 
3180 	if (iter->probe_entry)
3181 		hnd = &iter->probe_entry->hlist;
3182 
3183 	hash = iter->probe->ops.func_hash->filter_hash;
3184 	size = 1 << hash->size_bits;
3185 
3186  retry:
3187 	if (iter->pidx >= size) {
3188 		if (iter->probe->list.next == func_probes)
3189 			return NULL;
3190 		next = iter->probe->list.next;
3191 		iter->probe = list_entry(next, struct ftrace_func_probe, list);
3192 		hash = iter->probe->ops.func_hash->filter_hash;
3193 		size = 1 << hash->size_bits;
3194 		iter->pidx = 0;
3195 	}
3196 
3197 	hhd = &hash->buckets[iter->pidx];
3198 
3199 	if (hlist_empty(hhd)) {
3200 		iter->pidx++;
3201 		hnd = NULL;
3202 		goto retry;
3203 	}
3204 
3205 	if (!hnd)
3206 		hnd = hhd->first;
3207 	else {
3208 		hnd = hnd->next;
3209 		if (!hnd) {
3210 			iter->pidx++;
3211 			goto retry;
3212 		}
3213 	}
3214 
3215 	if (WARN_ON_ONCE(!hnd))
3216 		return NULL;
3217 
3218 	iter->probe_entry = hlist_entry(hnd, struct ftrace_func_entry, hlist);
3219 
3220 	return iter;
3221 }
3222 
3223 static void *t_probe_start(struct seq_file *m, loff_t *pos)
3224 {
3225 	struct ftrace_iterator *iter = m->private;
3226 	void *p = NULL;
3227 	loff_t l;
3228 
3229 	if (!(iter->flags & FTRACE_ITER_DO_PROBES))
3230 		return NULL;
3231 
3232 	if (iter->mod_pos > *pos)
3233 		return NULL;
3234 
3235 	iter->probe = NULL;
3236 	iter->probe_entry = NULL;
3237 	iter->pidx = 0;
3238 	for (l = 0; l <= (*pos - iter->mod_pos); ) {
3239 		p = t_probe_next(m, &l);
3240 		if (!p)
3241 			break;
3242 	}
3243 	if (!p)
3244 		return NULL;
3245 
3246 	/* Only set this if we have an item */
3247 	iter->flags |= FTRACE_ITER_PROBE;
3248 
3249 	return iter;
3250 }
3251 
3252 static int
3253 t_probe_show(struct seq_file *m, struct ftrace_iterator *iter)
3254 {
3255 	struct ftrace_func_entry *probe_entry;
3256 	struct ftrace_probe_ops *probe_ops;
3257 	struct ftrace_func_probe *probe;
3258 
3259 	probe = iter->probe;
3260 	probe_entry = iter->probe_entry;
3261 
3262 	if (WARN_ON_ONCE(!probe || !probe_entry))
3263 		return -EIO;
3264 
3265 	probe_ops = probe->probe_ops;
3266 
3267 	if (probe_ops->print)
3268 		return probe_ops->print(m, probe_entry->ip, probe_ops, probe->data);
3269 
3270 	seq_printf(m, "%ps:%ps\n", (void *)probe_entry->ip,
3271 		   (void *)probe_ops->func);
3272 
3273 	return 0;
3274 }
3275 
3276 static void *
3277 t_mod_next(struct seq_file *m, loff_t *pos)
3278 {
3279 	struct ftrace_iterator *iter = m->private;
3280 	struct trace_array *tr = iter->tr;
3281 
3282 	(*pos)++;
3283 	iter->pos = *pos;
3284 
3285 	iter->mod_list = iter->mod_list->next;
3286 
3287 	if (iter->mod_list == &tr->mod_trace ||
3288 	    iter->mod_list == &tr->mod_notrace) {
3289 		iter->flags &= ~FTRACE_ITER_MOD;
3290 		return NULL;
3291 	}
3292 
3293 	iter->mod_pos = *pos;
3294 
3295 	return iter;
3296 }
3297 
3298 static void *t_mod_start(struct seq_file *m, loff_t *pos)
3299 {
3300 	struct ftrace_iterator *iter = m->private;
3301 	void *p = NULL;
3302 	loff_t l;
3303 
3304 	if (iter->func_pos > *pos)
3305 		return NULL;
3306 
3307 	iter->mod_pos = iter->func_pos;
3308 
3309 	/* probes are only available if tr is set */
3310 	if (!iter->tr)
3311 		return NULL;
3312 
3313 	for (l = 0; l <= (*pos - iter->func_pos); ) {
3314 		p = t_mod_next(m, &l);
3315 		if (!p)
3316 			break;
3317 	}
3318 	if (!p) {
3319 		iter->flags &= ~FTRACE_ITER_MOD;
3320 		return t_probe_start(m, pos);
3321 	}
3322 
3323 	/* Only set this if we have an item */
3324 	iter->flags |= FTRACE_ITER_MOD;
3325 
3326 	return iter;
3327 }
3328 
3329 static int
3330 t_mod_show(struct seq_file *m, struct ftrace_iterator *iter)
3331 {
3332 	struct ftrace_mod_load *ftrace_mod;
3333 	struct trace_array *tr = iter->tr;
3334 
3335 	if (WARN_ON_ONCE(!iter->mod_list) ||
3336 			 iter->mod_list == &tr->mod_trace ||
3337 			 iter->mod_list == &tr->mod_notrace)
3338 		return -EIO;
3339 
3340 	ftrace_mod = list_entry(iter->mod_list, struct ftrace_mod_load, list);
3341 
3342 	if (ftrace_mod->func)
3343 		seq_printf(m, "%s", ftrace_mod->func);
3344 	else
3345 		seq_putc(m, '*');
3346 
3347 	seq_printf(m, ":mod:%s\n", ftrace_mod->module);
3348 
3349 	return 0;
3350 }
3351 
3352 static void *
3353 t_func_next(struct seq_file *m, loff_t *pos)
3354 {
3355 	struct ftrace_iterator *iter = m->private;
3356 	struct dyn_ftrace *rec = NULL;
3357 
3358 	(*pos)++;
3359 
3360  retry:
3361 	if (iter->idx >= iter->pg->index) {
3362 		if (iter->pg->next) {
3363 			iter->pg = iter->pg->next;
3364 			iter->idx = 0;
3365 			goto retry;
3366 		}
3367 	} else {
3368 		rec = &iter->pg->records[iter->idx++];
3369 		if (((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
3370 		     !ftrace_lookup_ip(iter->hash, rec->ip)) ||
3371 
3372 		    ((iter->flags & FTRACE_ITER_ENABLED) &&
3373 		     !(rec->flags & FTRACE_FL_ENABLED))) {
3374 
3375 			rec = NULL;
3376 			goto retry;
3377 		}
3378 	}
3379 
3380 	if (!rec)
3381 		return NULL;
3382 
3383 	iter->pos = iter->func_pos = *pos;
3384 	iter->func = rec;
3385 
3386 	return iter;
3387 }
3388 
3389 static void *
3390 t_next(struct seq_file *m, void *v, loff_t *pos)
3391 {
3392 	struct ftrace_iterator *iter = m->private;
3393 	loff_t l = *pos; /* t_probe_start() must use original pos */
3394 	void *ret;
3395 
3396 	if (unlikely(ftrace_disabled))
3397 		return NULL;
3398 
3399 	if (iter->flags & FTRACE_ITER_PROBE)
3400 		return t_probe_next(m, pos);
3401 
3402 	if (iter->flags & FTRACE_ITER_MOD)
3403 		return t_mod_next(m, pos);
3404 
3405 	if (iter->flags & FTRACE_ITER_PRINTALL) {
3406 		/* next must increment pos, and t_probe_start does not */
3407 		(*pos)++;
3408 		return t_mod_start(m, &l);
3409 	}
3410 
3411 	ret = t_func_next(m, pos);
3412 
3413 	if (!ret)
3414 		return t_mod_start(m, &l);
3415 
3416 	return ret;
3417 }
3418 
3419 static void reset_iter_read(struct ftrace_iterator *iter)
3420 {
3421 	iter->pos = 0;
3422 	iter->func_pos = 0;
3423 	iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_PROBE | FTRACE_ITER_MOD);
3424 }
3425 
3426 static void *t_start(struct seq_file *m, loff_t *pos)
3427 {
3428 	struct ftrace_iterator *iter = m->private;
3429 	void *p = NULL;
3430 	loff_t l;
3431 
3432 	mutex_lock(&ftrace_lock);
3433 
3434 	if (unlikely(ftrace_disabled))
3435 		return NULL;
3436 
3437 	/*
3438 	 * If an lseek was done, then reset and start from beginning.
3439 	 */
3440 	if (*pos < iter->pos)
3441 		reset_iter_read(iter);
3442 
3443 	/*
3444 	 * For set_ftrace_filter reading, if we have the filter
3445 	 * off, we can short cut and just print out that all
3446 	 * functions are enabled.
3447 	 */
3448 	if ((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
3449 	    ftrace_hash_empty(iter->hash)) {
3450 		iter->func_pos = 1; /* Account for the message */
3451 		if (*pos > 0)
3452 			return t_mod_start(m, pos);
3453 		iter->flags |= FTRACE_ITER_PRINTALL;
3454 		/* reset in case of seek/pread */
3455 		iter->flags &= ~FTRACE_ITER_PROBE;
3456 		return iter;
3457 	}
3458 
3459 	if (iter->flags & FTRACE_ITER_MOD)
3460 		return t_mod_start(m, pos);
3461 
3462 	/*
3463 	 * Unfortunately, we need to restart at ftrace_pages_start
3464 	 * every time we let go of the ftrace_mutex. This is because
3465 	 * those pointers can change without the lock.
3466 	 */
3467 	iter->pg = ftrace_pages_start;
3468 	iter->idx = 0;
3469 	for (l = 0; l <= *pos; ) {
3470 		p = t_func_next(m, &l);
3471 		if (!p)
3472 			break;
3473 	}
3474 
3475 	if (!p)
3476 		return t_mod_start(m, pos);
3477 
3478 	return iter;
3479 }
3480 
3481 static void t_stop(struct seq_file *m, void *p)
3482 {
3483 	mutex_unlock(&ftrace_lock);
3484 }
3485 
3486 void * __weak
3487 arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
3488 {
3489 	return NULL;
3490 }
3491 
3492 static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops,
3493 				struct dyn_ftrace *rec)
3494 {
3495 	void *ptr;
3496 
3497 	ptr = arch_ftrace_trampoline_func(ops, rec);
3498 	if (ptr)
3499 		seq_printf(m, " ->%pS", ptr);
3500 }
3501 
3502 static int t_show(struct seq_file *m, void *v)
3503 {
3504 	struct ftrace_iterator *iter = m->private;
3505 	struct dyn_ftrace *rec;
3506 
3507 	if (iter->flags & FTRACE_ITER_PROBE)
3508 		return t_probe_show(m, iter);
3509 
3510 	if (iter->flags & FTRACE_ITER_MOD)
3511 		return t_mod_show(m, iter);
3512 
3513 	if (iter->flags & FTRACE_ITER_PRINTALL) {
3514 		if (iter->flags & FTRACE_ITER_NOTRACE)
3515 			seq_puts(m, "#### no functions disabled ####\n");
3516 		else
3517 			seq_puts(m, "#### all functions enabled ####\n");
3518 		return 0;
3519 	}
3520 
3521 	rec = iter->func;
3522 
3523 	if (!rec)
3524 		return 0;
3525 
3526 	seq_printf(m, "%ps", (void *)rec->ip);
3527 	if (iter->flags & FTRACE_ITER_ENABLED) {
3528 		struct ftrace_ops *ops;
3529 
3530 		seq_printf(m, " (%ld)%s%s",
3531 			   ftrace_rec_count(rec),
3532 			   rec->flags & FTRACE_FL_REGS ? " R" : "  ",
3533 			   rec->flags & FTRACE_FL_IPMODIFY ? " I" : "  ");
3534 		if (rec->flags & FTRACE_FL_TRAMP_EN) {
3535 			ops = ftrace_find_tramp_ops_any(rec);
3536 			if (ops) {
3537 				do {
3538 					seq_printf(m, "\ttramp: %pS (%pS)",
3539 						   (void *)ops->trampoline,
3540 						   (void *)ops->func);
3541 					add_trampoline_func(m, ops, rec);
3542 					ops = ftrace_find_tramp_ops_next(rec, ops);
3543 				} while (ops);
3544 			} else
3545 				seq_puts(m, "\ttramp: ERROR!");
3546 		} else {
3547 			add_trampoline_func(m, NULL, rec);
3548 		}
3549 	}
3550 
3551 	seq_putc(m, '\n');
3552 
3553 	return 0;
3554 }
3555 
3556 static const struct seq_operations show_ftrace_seq_ops = {
3557 	.start = t_start,
3558 	.next = t_next,
3559 	.stop = t_stop,
3560 	.show = t_show,
3561 };
3562 
3563 static int
3564 ftrace_avail_open(struct inode *inode, struct file *file)
3565 {
3566 	struct ftrace_iterator *iter;
3567 
3568 	if (unlikely(ftrace_disabled))
3569 		return -ENODEV;
3570 
3571 	iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3572 	if (!iter)
3573 		return -ENOMEM;
3574 
3575 	iter->pg = ftrace_pages_start;
3576 	iter->ops = &global_ops;
3577 
3578 	return 0;
3579 }
3580 
3581 static int
3582 ftrace_enabled_open(struct inode *inode, struct file *file)
3583 {
3584 	struct ftrace_iterator *iter;
3585 
3586 	iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3587 	if (!iter)
3588 		return -ENOMEM;
3589 
3590 	iter->pg = ftrace_pages_start;
3591 	iter->flags = FTRACE_ITER_ENABLED;
3592 	iter->ops = &global_ops;
3593 
3594 	return 0;
3595 }
3596 
3597 /**
3598  * ftrace_regex_open - initialize function tracer filter files
3599  * @ops: The ftrace_ops that hold the hash filters
3600  * @flag: The type of filter to process
3601  * @inode: The inode, usually passed in to your open routine
3602  * @file: The file, usually passed in to your open routine
3603  *
3604  * ftrace_regex_open() initializes the filter files for the
3605  * @ops. Depending on @flag it may process the filter hash or
3606  * the notrace hash of @ops. With this called from the open
3607  * routine, you can use ftrace_filter_write() for the write
3608  * routine if @flag has FTRACE_ITER_FILTER set, or
3609  * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
3610  * tracing_lseek() should be used as the lseek routine, and
3611  * release must call ftrace_regex_release().
3612  */
3613 int
3614 ftrace_regex_open(struct ftrace_ops *ops, int flag,
3615 		  struct inode *inode, struct file *file)
3616 {
3617 	struct ftrace_iterator *iter;
3618 	struct ftrace_hash *hash;
3619 	struct list_head *mod_head;
3620 	struct trace_array *tr = ops->private;
3621 	int ret = 0;
3622 
3623 	ftrace_ops_init(ops);
3624 
3625 	if (unlikely(ftrace_disabled))
3626 		return -ENODEV;
3627 
3628 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
3629 	if (!iter)
3630 		return -ENOMEM;
3631 
3632 	if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
3633 		kfree(iter);
3634 		return -ENOMEM;
3635 	}
3636 
3637 	iter->ops = ops;
3638 	iter->flags = flag;
3639 	iter->tr = tr;
3640 
3641 	mutex_lock(&ops->func_hash->regex_lock);
3642 
3643 	if (flag & FTRACE_ITER_NOTRACE) {
3644 		hash = ops->func_hash->notrace_hash;
3645 		mod_head = tr ? &tr->mod_notrace : NULL;
3646 	} else {
3647 		hash = ops->func_hash->filter_hash;
3648 		mod_head = tr ? &tr->mod_trace : NULL;
3649 	}
3650 
3651 	iter->mod_list = mod_head;
3652 
3653 	if (file->f_mode & FMODE_WRITE) {
3654 		const int size_bits = FTRACE_HASH_DEFAULT_BITS;
3655 
3656 		if (file->f_flags & O_TRUNC) {
3657 			iter->hash = alloc_ftrace_hash(size_bits);
3658 			clear_ftrace_mod_list(mod_head);
3659 	        } else {
3660 			iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);
3661 		}
3662 
3663 		if (!iter->hash) {
3664 			trace_parser_put(&iter->parser);
3665 			kfree(iter);
3666 			ret = -ENOMEM;
3667 			goto out_unlock;
3668 		}
3669 	} else
3670 		iter->hash = hash;
3671 
3672 	if (file->f_mode & FMODE_READ) {
3673 		iter->pg = ftrace_pages_start;
3674 
3675 		ret = seq_open(file, &show_ftrace_seq_ops);
3676 		if (!ret) {
3677 			struct seq_file *m = file->private_data;
3678 			m->private = iter;
3679 		} else {
3680 			/* Failed */
3681 			free_ftrace_hash(iter->hash);
3682 			trace_parser_put(&iter->parser);
3683 			kfree(iter);
3684 		}
3685 	} else
3686 		file->private_data = iter;
3687 
3688  out_unlock:
3689 	mutex_unlock(&ops->func_hash->regex_lock);
3690 
3691 	return ret;
3692 }
3693 
3694 static int
3695 ftrace_filter_open(struct inode *inode, struct file *file)
3696 {
3697 	struct ftrace_ops *ops = inode->i_private;
3698 
3699 	return ftrace_regex_open(ops,
3700 			FTRACE_ITER_FILTER | FTRACE_ITER_DO_PROBES,
3701 			inode, file);
3702 }
3703 
3704 static int
3705 ftrace_notrace_open(struct inode *inode, struct file *file)
3706 {
3707 	struct ftrace_ops *ops = inode->i_private;
3708 
3709 	return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
3710 				 inode, file);
3711 }
3712 
3713 /* Type for quick search ftrace basic regexes (globs) from filter_parse_regex */
3714 struct ftrace_glob {
3715 	char *search;
3716 	unsigned len;
3717 	int type;
3718 };
3719 
3720 /*
3721  * If symbols in an architecture don't correspond exactly to the user-visible
3722  * name of what they represent, it is possible to define this function to
3723  * perform the necessary adjustments.
3724 */
3725 char * __weak arch_ftrace_match_adjust(char *str, const char *search)
3726 {
3727 	return str;
3728 }
3729 
3730 static int ftrace_match(char *str, struct ftrace_glob *g)
3731 {
3732 	int matched = 0;
3733 	int slen;
3734 
3735 	str = arch_ftrace_match_adjust(str, g->search);
3736 
3737 	switch (g->type) {
3738 	case MATCH_FULL:
3739 		if (strcmp(str, g->search) == 0)
3740 			matched = 1;
3741 		break;
3742 	case MATCH_FRONT_ONLY:
3743 		if (strncmp(str, g->search, g->len) == 0)
3744 			matched = 1;
3745 		break;
3746 	case MATCH_MIDDLE_ONLY:
3747 		if (strstr(str, g->search))
3748 			matched = 1;
3749 		break;
3750 	case MATCH_END_ONLY:
3751 		slen = strlen(str);
3752 		if (slen >= g->len &&
3753 		    memcmp(str + slen - g->len, g->search, g->len) == 0)
3754 			matched = 1;
3755 		break;
3756 	case MATCH_GLOB:
3757 		if (glob_match(g->search, str))
3758 			matched = 1;
3759 		break;
3760 	}
3761 
3762 	return matched;
3763 }
3764 
3765 static int
3766 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter)
3767 {
3768 	struct ftrace_func_entry *entry;
3769 	int ret = 0;
3770 
3771 	entry = ftrace_lookup_ip(hash, rec->ip);
3772 	if (clear_filter) {
3773 		/* Do nothing if it doesn't exist */
3774 		if (!entry)
3775 			return 0;
3776 
3777 		free_hash_entry(hash, entry);
3778 	} else {
3779 		/* Do nothing if it exists */
3780 		if (entry)
3781 			return 0;
3782 
3783 		ret = add_hash_entry(hash, rec->ip);
3784 	}
3785 	return ret;
3786 }
3787 
3788 static int
3789 ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g,
3790 		struct ftrace_glob *mod_g, int exclude_mod)
3791 {
3792 	char str[KSYM_SYMBOL_LEN];
3793 	char *modname;
3794 
3795 	kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
3796 
3797 	if (mod_g) {
3798 		int mod_matches = (modname) ? ftrace_match(modname, mod_g) : 0;
3799 
3800 		/* blank module name to match all modules */
3801 		if (!mod_g->len) {
3802 			/* blank module globbing: modname xor exclude_mod */
3803 			if (!exclude_mod != !modname)
3804 				goto func_match;
3805 			return 0;
3806 		}
3807 
3808 		/*
3809 		 * exclude_mod is set to trace everything but the given
3810 		 * module. If it is set and the module matches, then
3811 		 * return 0. If it is not set, and the module doesn't match
3812 		 * also return 0. Otherwise, check the function to see if
3813 		 * that matches.
3814 		 */
3815 		if (!mod_matches == !exclude_mod)
3816 			return 0;
3817 func_match:
3818 		/* blank search means to match all funcs in the mod */
3819 		if (!func_g->len)
3820 			return 1;
3821 	}
3822 
3823 	return ftrace_match(str, func_g);
3824 }
3825 
3826 static int
3827 match_records(struct ftrace_hash *hash, char *func, int len, char *mod)
3828 {
3829 	struct ftrace_page *pg;
3830 	struct dyn_ftrace *rec;
3831 	struct ftrace_glob func_g = { .type = MATCH_FULL };
3832 	struct ftrace_glob mod_g = { .type = MATCH_FULL };
3833 	struct ftrace_glob *mod_match = (mod) ? &mod_g : NULL;
3834 	int exclude_mod = 0;
3835 	int found = 0;
3836 	int ret;
3837 	int clear_filter = 0;
3838 
3839 	if (func) {
3840 		func_g.type = filter_parse_regex(func, len, &func_g.search,
3841 						 &clear_filter);
3842 		func_g.len = strlen(func_g.search);
3843 	}
3844 
3845 	if (mod) {
3846 		mod_g.type = filter_parse_regex(mod, strlen(mod),
3847 				&mod_g.search, &exclude_mod);
3848 		mod_g.len = strlen(mod_g.search);
3849 	}
3850 
3851 	mutex_lock(&ftrace_lock);
3852 
3853 	if (unlikely(ftrace_disabled))
3854 		goto out_unlock;
3855 
3856 	do_for_each_ftrace_rec(pg, rec) {
3857 
3858 		if (rec->flags & FTRACE_FL_DISABLED)
3859 			continue;
3860 
3861 		if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) {
3862 			ret = enter_record(hash, rec, clear_filter);
3863 			if (ret < 0) {
3864 				found = ret;
3865 				goto out_unlock;
3866 			}
3867 			found = 1;
3868 		}
3869 	} while_for_each_ftrace_rec();
3870  out_unlock:
3871 	mutex_unlock(&ftrace_lock);
3872 
3873 	return found;
3874 }
3875 
3876 static int
3877 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
3878 {
3879 	return match_records(hash, buff, len, NULL);
3880 }
3881 
3882 static void ftrace_ops_update_code(struct ftrace_ops *ops,
3883 				   struct ftrace_ops_hash *old_hash)
3884 {
3885 	struct ftrace_ops *op;
3886 
3887 	if (!ftrace_enabled)
3888 		return;
3889 
3890 	if (ops->flags & FTRACE_OPS_FL_ENABLED) {
3891 		ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
3892 		return;
3893 	}
3894 
3895 	/*
3896 	 * If this is the shared global_ops filter, then we need to
3897 	 * check if there is another ops that shares it, is enabled.
3898 	 * If so, we still need to run the modify code.
3899 	 */
3900 	if (ops->func_hash != &global_ops.local_hash)
3901 		return;
3902 
3903 	do_for_each_ftrace_op(op, ftrace_ops_list) {
3904 		if (op->func_hash == &global_ops.local_hash &&
3905 		    op->flags & FTRACE_OPS_FL_ENABLED) {
3906 			ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash);
3907 			/* Only need to do this once */
3908 			return;
3909 		}
3910 	} while_for_each_ftrace_op(op);
3911 }
3912 
3913 static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops,
3914 					   struct ftrace_hash **orig_hash,
3915 					   struct ftrace_hash *hash,
3916 					   int enable)
3917 {
3918 	struct ftrace_ops_hash old_hash_ops;
3919 	struct ftrace_hash *old_hash;
3920 	int ret;
3921 
3922 	old_hash = *orig_hash;
3923 	old_hash_ops.filter_hash = ops->func_hash->filter_hash;
3924 	old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
3925 	ret = ftrace_hash_move(ops, enable, orig_hash, hash);
3926 	if (!ret) {
3927 		ftrace_ops_update_code(ops, &old_hash_ops);
3928 		free_ftrace_hash_rcu(old_hash);
3929 	}
3930 	return ret;
3931 }
3932 
3933 static bool module_exists(const char *module)
3934 {
3935 	/* All modules have the symbol __this_module */
3936 	const char this_mod[] = "__this_module";
3937 	const int modname_size = MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 1;
3938 	char modname[modname_size + 1];
3939 	unsigned long val;
3940 	int n;
3941 
3942 	n = snprintf(modname, modname_size + 1, "%s:%s", module, this_mod);
3943 
3944 	if (n > modname_size)
3945 		return false;
3946 
3947 	val = module_kallsyms_lookup_name(modname);
3948 	return val != 0;
3949 }
3950 
3951 static int cache_mod(struct trace_array *tr,
3952 		     const char *func, char *module, int enable)
3953 {
3954 	struct ftrace_mod_load *ftrace_mod, *n;
3955 	struct list_head *head = enable ? &tr->mod_trace : &tr->mod_notrace;
3956 	int ret;
3957 
3958 	mutex_lock(&ftrace_lock);
3959 
3960 	/* We do not cache inverse filters */
3961 	if (func[0] == '!') {
3962 		func++;
3963 		ret = -EINVAL;
3964 
3965 		/* Look to remove this hash */
3966 		list_for_each_entry_safe(ftrace_mod, n, head, list) {
3967 			if (strcmp(ftrace_mod->module, module) != 0)
3968 				continue;
3969 
3970 			/* no func matches all */
3971 			if (strcmp(func, "*") == 0 ||
3972 			    (ftrace_mod->func &&
3973 			     strcmp(ftrace_mod->func, func) == 0)) {
3974 				ret = 0;
3975 				free_ftrace_mod(ftrace_mod);
3976 				continue;
3977 			}
3978 		}
3979 		goto out;
3980 	}
3981 
3982 	ret = -EINVAL;
3983 	/* We only care about modules that have not been loaded yet */
3984 	if (module_exists(module))
3985 		goto out;
3986 
3987 	/* Save this string off, and execute it when the module is loaded */
3988 	ret = ftrace_add_mod(tr, func, module, enable);
3989  out:
3990 	mutex_unlock(&ftrace_lock);
3991 
3992 	return ret;
3993 }
3994 
3995 static int
3996 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
3997 		 int reset, int enable);
3998 
3999 #ifdef CONFIG_MODULES
4000 static void process_mod_list(struct list_head *head, struct ftrace_ops *ops,
4001 			     char *mod, bool enable)
4002 {
4003 	struct ftrace_mod_load *ftrace_mod, *n;
4004 	struct ftrace_hash **orig_hash, *new_hash;
4005 	LIST_HEAD(process_mods);
4006 	char *func;
4007 	int ret;
4008 
4009 	mutex_lock(&ops->func_hash->regex_lock);
4010 
4011 	if (enable)
4012 		orig_hash = &ops->func_hash->filter_hash;
4013 	else
4014 		orig_hash = &ops->func_hash->notrace_hash;
4015 
4016 	new_hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS,
4017 					      *orig_hash);
4018 	if (!new_hash)
4019 		goto out; /* warn? */
4020 
4021 	mutex_lock(&ftrace_lock);
4022 
4023 	list_for_each_entry_safe(ftrace_mod, n, head, list) {
4024 
4025 		if (strcmp(ftrace_mod->module, mod) != 0)
4026 			continue;
4027 
4028 		if (ftrace_mod->func)
4029 			func = kstrdup(ftrace_mod->func, GFP_KERNEL);
4030 		else
4031 			func = kstrdup("*", GFP_KERNEL);
4032 
4033 		if (!func) /* warn? */
4034 			continue;
4035 
4036 		list_del(&ftrace_mod->list);
4037 		list_add(&ftrace_mod->list, &process_mods);
4038 
4039 		/* Use the newly allocated func, as it may be "*" */
4040 		kfree(ftrace_mod->func);
4041 		ftrace_mod->func = func;
4042 	}
4043 
4044 	mutex_unlock(&ftrace_lock);
4045 
4046 	list_for_each_entry_safe(ftrace_mod, n, &process_mods, list) {
4047 
4048 		func = ftrace_mod->func;
4049 
4050 		/* Grabs ftrace_lock, which is why we have this extra step */
4051 		match_records(new_hash, func, strlen(func), mod);
4052 		free_ftrace_mod(ftrace_mod);
4053 	}
4054 
4055 	if (enable && list_empty(head))
4056 		new_hash->flags &= ~FTRACE_HASH_FL_MOD;
4057 
4058 	mutex_lock(&ftrace_lock);
4059 
4060 	ret = ftrace_hash_move_and_update_ops(ops, orig_hash,
4061 					      new_hash, enable);
4062 	mutex_unlock(&ftrace_lock);
4063 
4064  out:
4065 	mutex_unlock(&ops->func_hash->regex_lock);
4066 
4067 	free_ftrace_hash(new_hash);
4068 }
4069 
4070 static void process_cached_mods(const char *mod_name)
4071 {
4072 	struct trace_array *tr;
4073 	char *mod;
4074 
4075 	mod = kstrdup(mod_name, GFP_KERNEL);
4076 	if (!mod)
4077 		return;
4078 
4079 	mutex_lock(&trace_types_lock);
4080 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
4081 		if (!list_empty(&tr->mod_trace))
4082 			process_mod_list(&tr->mod_trace, tr->ops, mod, true);
4083 		if (!list_empty(&tr->mod_notrace))
4084 			process_mod_list(&tr->mod_notrace, tr->ops, mod, false);
4085 	}
4086 	mutex_unlock(&trace_types_lock);
4087 
4088 	kfree(mod);
4089 }
4090 #endif
4091 
4092 /*
4093  * We register the module command as a template to show others how
4094  * to register the a command as well.
4095  */
4096 
4097 static int
4098 ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash,
4099 		    char *func_orig, char *cmd, char *module, int enable)
4100 {
4101 	char *func;
4102 	int ret;
4103 
4104 	/* match_records() modifies func, and we need the original */
4105 	func = kstrdup(func_orig, GFP_KERNEL);
4106 	if (!func)
4107 		return -ENOMEM;
4108 
4109 	/*
4110 	 * cmd == 'mod' because we only registered this func
4111 	 * for the 'mod' ftrace_func_command.
4112 	 * But if you register one func with multiple commands,
4113 	 * you can tell which command was used by the cmd
4114 	 * parameter.
4115 	 */
4116 	ret = match_records(hash, func, strlen(func), module);
4117 	kfree(func);
4118 
4119 	if (!ret)
4120 		return cache_mod(tr, func_orig, module, enable);
4121 	if (ret < 0)
4122 		return ret;
4123 	return 0;
4124 }
4125 
4126 static struct ftrace_func_command ftrace_mod_cmd = {
4127 	.name			= "mod",
4128 	.func			= ftrace_mod_callback,
4129 };
4130 
4131 static int __init ftrace_mod_cmd_init(void)
4132 {
4133 	return register_ftrace_command(&ftrace_mod_cmd);
4134 }
4135 core_initcall(ftrace_mod_cmd_init);
4136 
4137 static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
4138 				      struct ftrace_ops *op, struct pt_regs *pt_regs)
4139 {
4140 	struct ftrace_probe_ops *probe_ops;
4141 	struct ftrace_func_probe *probe;
4142 
4143 	probe = container_of(op, struct ftrace_func_probe, ops);
4144 	probe_ops = probe->probe_ops;
4145 
4146 	/*
4147 	 * Disable preemption for these calls to prevent a RCU grace
4148 	 * period. This syncs the hash iteration and freeing of items
4149 	 * on the hash. rcu_read_lock is too dangerous here.
4150 	 */
4151 	preempt_disable_notrace();
4152 	probe_ops->func(ip, parent_ip, probe->tr, probe_ops, probe->data);
4153 	preempt_enable_notrace();
4154 }
4155 
4156 struct ftrace_func_map {
4157 	struct ftrace_func_entry	entry;
4158 	void				*data;
4159 };
4160 
4161 struct ftrace_func_mapper {
4162 	struct ftrace_hash		hash;
4163 };
4164 
4165 /**
4166  * allocate_ftrace_func_mapper - allocate a new ftrace_func_mapper
4167  *
4168  * Returns a ftrace_func_mapper descriptor that can be used to map ips to data.
4169  */
4170 struct ftrace_func_mapper *allocate_ftrace_func_mapper(void)
4171 {
4172 	struct ftrace_hash *hash;
4173 
4174 	/*
4175 	 * The mapper is simply a ftrace_hash, but since the entries
4176 	 * in the hash are not ftrace_func_entry type, we define it
4177 	 * as a separate structure.
4178 	 */
4179 	hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
4180 	return (struct ftrace_func_mapper *)hash;
4181 }
4182 
4183 /**
4184  * ftrace_func_mapper_find_ip - Find some data mapped to an ip
4185  * @mapper: The mapper that has the ip maps
4186  * @ip: the instruction pointer to find the data for
4187  *
4188  * Returns the data mapped to @ip if found otherwise NULL. The return
4189  * is actually the address of the mapper data pointer. The address is
4190  * returned for use cases where the data is no bigger than a long, and
4191  * the user can use the data pointer as its data instead of having to
4192  * allocate more memory for the reference.
4193  */
4194 void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
4195 				  unsigned long ip)
4196 {
4197 	struct ftrace_func_entry *entry;
4198 	struct ftrace_func_map *map;
4199 
4200 	entry = ftrace_lookup_ip(&mapper->hash, ip);
4201 	if (!entry)
4202 		return NULL;
4203 
4204 	map = (struct ftrace_func_map *)entry;
4205 	return &map->data;
4206 }
4207 
4208 /**
4209  * ftrace_func_mapper_add_ip - Map some data to an ip
4210  * @mapper: The mapper that has the ip maps
4211  * @ip: The instruction pointer address to map @data to
4212  * @data: The data to map to @ip
4213  *
4214  * Returns 0 on succes otherwise an error.
4215  */
4216 int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
4217 			      unsigned long ip, void *data)
4218 {
4219 	struct ftrace_func_entry *entry;
4220 	struct ftrace_func_map *map;
4221 
4222 	entry = ftrace_lookup_ip(&mapper->hash, ip);
4223 	if (entry)
4224 		return -EBUSY;
4225 
4226 	map = kmalloc(sizeof(*map), GFP_KERNEL);
4227 	if (!map)
4228 		return -ENOMEM;
4229 
4230 	map->entry.ip = ip;
4231 	map->data = data;
4232 
4233 	__add_hash_entry(&mapper->hash, &map->entry);
4234 
4235 	return 0;
4236 }
4237 
4238 /**
4239  * ftrace_func_mapper_remove_ip - Remove an ip from the mapping
4240  * @mapper: The mapper that has the ip maps
4241  * @ip: The instruction pointer address to remove the data from
4242  *
4243  * Returns the data if it is found, otherwise NULL.
4244  * Note, if the data pointer is used as the data itself, (see
4245  * ftrace_func_mapper_find_ip(), then the return value may be meaningless,
4246  * if the data pointer was set to zero.
4247  */
4248 void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
4249 				   unsigned long ip)
4250 {
4251 	struct ftrace_func_entry *entry;
4252 	struct ftrace_func_map *map;
4253 	void *data;
4254 
4255 	entry = ftrace_lookup_ip(&mapper->hash, ip);
4256 	if (!entry)
4257 		return NULL;
4258 
4259 	map = (struct ftrace_func_map *)entry;
4260 	data = map->data;
4261 
4262 	remove_hash_entry(&mapper->hash, entry);
4263 	kfree(entry);
4264 
4265 	return data;
4266 }
4267 
4268 /**
4269  * free_ftrace_func_mapper - free a mapping of ips and data
4270  * @mapper: The mapper that has the ip maps
4271  * @free_func: A function to be called on each data item.
4272  *
4273  * This is used to free the function mapper. The @free_func is optional
4274  * and can be used if the data needs to be freed as well.
4275  */
4276 void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
4277 			     ftrace_mapper_func free_func)
4278 {
4279 	struct ftrace_func_entry *entry;
4280 	struct ftrace_func_map *map;
4281 	struct hlist_head *hhd;
4282 	int size = 1 << mapper->hash.size_bits;
4283 	int i;
4284 
4285 	if (free_func && mapper->hash.count) {
4286 		for (i = 0; i < size; i++) {
4287 			hhd = &mapper->hash.buckets[i];
4288 			hlist_for_each_entry(entry, hhd, hlist) {
4289 				map = (struct ftrace_func_map *)entry;
4290 				free_func(map);
4291 			}
4292 		}
4293 	}
4294 	free_ftrace_hash(&mapper->hash);
4295 }
4296 
4297 static void release_probe(struct ftrace_func_probe *probe)
4298 {
4299 	struct ftrace_probe_ops *probe_ops;
4300 
4301 	mutex_lock(&ftrace_lock);
4302 
4303 	WARN_ON(probe->ref <= 0);
4304 
4305 	/* Subtract the ref that was used to protect this instance */
4306 	probe->ref--;
4307 
4308 	if (!probe->ref) {
4309 		probe_ops = probe->probe_ops;
4310 		/*
4311 		 * Sending zero as ip tells probe_ops to free
4312 		 * the probe->data itself
4313 		 */
4314 		if (probe_ops->free)
4315 			probe_ops->free(probe_ops, probe->tr, 0, probe->data);
4316 		list_del(&probe->list);
4317 		kfree(probe);
4318 	}
4319 	mutex_unlock(&ftrace_lock);
4320 }
4321 
4322 static void acquire_probe_locked(struct ftrace_func_probe *probe)
4323 {
4324 	/*
4325 	 * Add one ref to keep it from being freed when releasing the
4326 	 * ftrace_lock mutex.
4327 	 */
4328 	probe->ref++;
4329 }
4330 
4331 int
4332 register_ftrace_function_probe(char *glob, struct trace_array *tr,
4333 			       struct ftrace_probe_ops *probe_ops,
4334 			       void *data)
4335 {
4336 	struct ftrace_func_entry *entry;
4337 	struct ftrace_func_probe *probe;
4338 	struct ftrace_hash **orig_hash;
4339 	struct ftrace_hash *old_hash;
4340 	struct ftrace_hash *hash;
4341 	int count = 0;
4342 	int size;
4343 	int ret;
4344 	int i;
4345 
4346 	if (WARN_ON(!tr))
4347 		return -EINVAL;
4348 
4349 	/* We do not support '!' for function probes */
4350 	if (WARN_ON(glob[0] == '!'))
4351 		return -EINVAL;
4352 
4353 
4354 	mutex_lock(&ftrace_lock);
4355 	/* Check if the probe_ops is already registered */
4356 	list_for_each_entry(probe, &tr->func_probes, list) {
4357 		if (probe->probe_ops == probe_ops)
4358 			break;
4359 	}
4360 	if (&probe->list == &tr->func_probes) {
4361 		probe = kzalloc(sizeof(*probe), GFP_KERNEL);
4362 		if (!probe) {
4363 			mutex_unlock(&ftrace_lock);
4364 			return -ENOMEM;
4365 		}
4366 		probe->probe_ops = probe_ops;
4367 		probe->ops.func = function_trace_probe_call;
4368 		probe->tr = tr;
4369 		ftrace_ops_init(&probe->ops);
4370 		list_add(&probe->list, &tr->func_probes);
4371 	}
4372 
4373 	acquire_probe_locked(probe);
4374 
4375 	mutex_unlock(&ftrace_lock);
4376 
4377 	mutex_lock(&probe->ops.func_hash->regex_lock);
4378 
4379 	orig_hash = &probe->ops.func_hash->filter_hash;
4380 	old_hash = *orig_hash;
4381 	hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
4382 
4383 	ret = ftrace_match_records(hash, glob, strlen(glob));
4384 
4385 	/* Nothing found? */
4386 	if (!ret)
4387 		ret = -EINVAL;
4388 
4389 	if (ret < 0)
4390 		goto out;
4391 
4392 	size = 1 << hash->size_bits;
4393 	for (i = 0; i < size; i++) {
4394 		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
4395 			if (ftrace_lookup_ip(old_hash, entry->ip))
4396 				continue;
4397 			/*
4398 			 * The caller might want to do something special
4399 			 * for each function we find. We call the callback
4400 			 * to give the caller an opportunity to do so.
4401 			 */
4402 			if (probe_ops->init) {
4403 				ret = probe_ops->init(probe_ops, tr,
4404 						      entry->ip, data,
4405 						      &probe->data);
4406 				if (ret < 0) {
4407 					if (probe_ops->free && count)
4408 						probe_ops->free(probe_ops, tr,
4409 								0, probe->data);
4410 					probe->data = NULL;
4411 					goto out;
4412 				}
4413 			}
4414 			count++;
4415 		}
4416 	}
4417 
4418 	mutex_lock(&ftrace_lock);
4419 
4420 	if (!count) {
4421 		/* Nothing was added? */
4422 		ret = -EINVAL;
4423 		goto out_unlock;
4424 	}
4425 
4426 	ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
4427 					      hash, 1);
4428 	if (ret < 0)
4429 		goto err_unlock;
4430 
4431 	/* One ref for each new function traced */
4432 	probe->ref += count;
4433 
4434 	if (!(probe->ops.flags & FTRACE_OPS_FL_ENABLED))
4435 		ret = ftrace_startup(&probe->ops, 0);
4436 
4437  out_unlock:
4438 	mutex_unlock(&ftrace_lock);
4439 
4440 	if (!ret)
4441 		ret = count;
4442  out:
4443 	mutex_unlock(&probe->ops.func_hash->regex_lock);
4444 	free_ftrace_hash(hash);
4445 
4446 	release_probe(probe);
4447 
4448 	return ret;
4449 
4450  err_unlock:
4451 	if (!probe_ops->free || !count)
4452 		goto out_unlock;
4453 
4454 	/* Failed to do the move, need to call the free functions */
4455 	for (i = 0; i < size; i++) {
4456 		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
4457 			if (ftrace_lookup_ip(old_hash, entry->ip))
4458 				continue;
4459 			probe_ops->free(probe_ops, tr, entry->ip, probe->data);
4460 		}
4461 	}
4462 	goto out_unlock;
4463 }
4464 
4465 int
4466 unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
4467 				      struct ftrace_probe_ops *probe_ops)
4468 {
4469 	struct ftrace_ops_hash old_hash_ops;
4470 	struct ftrace_func_entry *entry;
4471 	struct ftrace_func_probe *probe;
4472 	struct ftrace_glob func_g;
4473 	struct ftrace_hash **orig_hash;
4474 	struct ftrace_hash *old_hash;
4475 	struct ftrace_hash *hash = NULL;
4476 	struct hlist_node *tmp;
4477 	struct hlist_head hhd;
4478 	char str[KSYM_SYMBOL_LEN];
4479 	int count = 0;
4480 	int i, ret = -ENODEV;
4481 	int size;
4482 
4483 	if (!glob || !strlen(glob) || !strcmp(glob, "*"))
4484 		func_g.search = NULL;
4485 	else {
4486 		int not;
4487 
4488 		func_g.type = filter_parse_regex(glob, strlen(glob),
4489 						 &func_g.search, &not);
4490 		func_g.len = strlen(func_g.search);
4491 		func_g.search = glob;
4492 
4493 		/* we do not support '!' for function probes */
4494 		if (WARN_ON(not))
4495 			return -EINVAL;
4496 	}
4497 
4498 	mutex_lock(&ftrace_lock);
4499 	/* Check if the probe_ops is already registered */
4500 	list_for_each_entry(probe, &tr->func_probes, list) {
4501 		if (probe->probe_ops == probe_ops)
4502 			break;
4503 	}
4504 	if (&probe->list == &tr->func_probes)
4505 		goto err_unlock_ftrace;
4506 
4507 	ret = -EINVAL;
4508 	if (!(probe->ops.flags & FTRACE_OPS_FL_INITIALIZED))
4509 		goto err_unlock_ftrace;
4510 
4511 	acquire_probe_locked(probe);
4512 
4513 	mutex_unlock(&ftrace_lock);
4514 
4515 	mutex_lock(&probe->ops.func_hash->regex_lock);
4516 
4517 	orig_hash = &probe->ops.func_hash->filter_hash;
4518 	old_hash = *orig_hash;
4519 
4520 	if (ftrace_hash_empty(old_hash))
4521 		goto out_unlock;
4522 
4523 	old_hash_ops.filter_hash = old_hash;
4524 	/* Probes only have filters */
4525 	old_hash_ops.notrace_hash = NULL;
4526 
4527 	ret = -ENOMEM;
4528 	hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
4529 	if (!hash)
4530 		goto out_unlock;
4531 
4532 	INIT_HLIST_HEAD(&hhd);
4533 
4534 	size = 1 << hash->size_bits;
4535 	for (i = 0; i < size; i++) {
4536 		hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) {
4537 
4538 			if (func_g.search) {
4539 				kallsyms_lookup(entry->ip, NULL, NULL,
4540 						NULL, str);
4541 				if (!ftrace_match(str, &func_g))
4542 					continue;
4543 			}
4544 			count++;
4545 			remove_hash_entry(hash, entry);
4546 			hlist_add_head(&entry->hlist, &hhd);
4547 		}
4548 	}
4549 
4550 	/* Nothing found? */
4551 	if (!count) {
4552 		ret = -EINVAL;
4553 		goto out_unlock;
4554 	}
4555 
4556 	mutex_lock(&ftrace_lock);
4557 
4558 	WARN_ON(probe->ref < count);
4559 
4560 	probe->ref -= count;
4561 
4562 	if (ftrace_hash_empty(hash))
4563 		ftrace_shutdown(&probe->ops, 0);
4564 
4565 	ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
4566 					      hash, 1);
4567 
4568 	/* still need to update the function call sites */
4569 	if (ftrace_enabled && !ftrace_hash_empty(hash))
4570 		ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS,
4571 				       &old_hash_ops);
4572 	synchronize_sched();
4573 
4574 	hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) {
4575 		hlist_del(&entry->hlist);
4576 		if (probe_ops->free)
4577 			probe_ops->free(probe_ops, tr, entry->ip, probe->data);
4578 		kfree(entry);
4579 	}
4580 	mutex_unlock(&ftrace_lock);
4581 
4582  out_unlock:
4583 	mutex_unlock(&probe->ops.func_hash->regex_lock);
4584 	free_ftrace_hash(hash);
4585 
4586 	release_probe(probe);
4587 
4588 	return ret;
4589 
4590  err_unlock_ftrace:
4591 	mutex_unlock(&ftrace_lock);
4592 	return ret;
4593 }
4594 
4595 void clear_ftrace_function_probes(struct trace_array *tr)
4596 {
4597 	struct ftrace_func_probe *probe, *n;
4598 
4599 	list_for_each_entry_safe(probe, n, &tr->func_probes, list)
4600 		unregister_ftrace_function_probe_func(NULL, tr, probe->probe_ops);
4601 }
4602 
4603 static LIST_HEAD(ftrace_commands);
4604 static DEFINE_MUTEX(ftrace_cmd_mutex);
4605 
4606 /*
4607  * Currently we only register ftrace commands from __init, so mark this
4608  * __init too.
4609  */
4610 __init int register_ftrace_command(struct ftrace_func_command *cmd)
4611 {
4612 	struct ftrace_func_command *p;
4613 	int ret = 0;
4614 
4615 	mutex_lock(&ftrace_cmd_mutex);
4616 	list_for_each_entry(p, &ftrace_commands, list) {
4617 		if (strcmp(cmd->name, p->name) == 0) {
4618 			ret = -EBUSY;
4619 			goto out_unlock;
4620 		}
4621 	}
4622 	list_add(&cmd->list, &ftrace_commands);
4623  out_unlock:
4624 	mutex_unlock(&ftrace_cmd_mutex);
4625 
4626 	return ret;
4627 }
4628 
4629 /*
4630  * Currently we only unregister ftrace commands from __init, so mark
4631  * this __init too.
4632  */
4633 __init int unregister_ftrace_command(struct ftrace_func_command *cmd)
4634 {
4635 	struct ftrace_func_command *p, *n;
4636 	int ret = -ENODEV;
4637 
4638 	mutex_lock(&ftrace_cmd_mutex);
4639 	list_for_each_entry_safe(p, n, &ftrace_commands, list) {
4640 		if (strcmp(cmd->name, p->name) == 0) {
4641 			ret = 0;
4642 			list_del_init(&p->list);
4643 			goto out_unlock;
4644 		}
4645 	}
4646  out_unlock:
4647 	mutex_unlock(&ftrace_cmd_mutex);
4648 
4649 	return ret;
4650 }
4651 
4652 static int ftrace_process_regex(struct ftrace_iterator *iter,
4653 				char *buff, int len, int enable)
4654 {
4655 	struct ftrace_hash *hash = iter->hash;
4656 	struct trace_array *tr = iter->ops->private;
4657 	char *func, *command, *next = buff;
4658 	struct ftrace_func_command *p;
4659 	int ret = -EINVAL;
4660 
4661 	func = strsep(&next, ":");
4662 
4663 	if (!next) {
4664 		ret = ftrace_match_records(hash, func, len);
4665 		if (!ret)
4666 			ret = -EINVAL;
4667 		if (ret < 0)
4668 			return ret;
4669 		return 0;
4670 	}
4671 
4672 	/* command found */
4673 
4674 	command = strsep(&next, ":");
4675 
4676 	mutex_lock(&ftrace_cmd_mutex);
4677 	list_for_each_entry(p, &ftrace_commands, list) {
4678 		if (strcmp(p->name, command) == 0) {
4679 			ret = p->func(tr, hash, func, command, next, enable);
4680 			goto out_unlock;
4681 		}
4682 	}
4683  out_unlock:
4684 	mutex_unlock(&ftrace_cmd_mutex);
4685 
4686 	return ret;
4687 }
4688 
4689 static ssize_t
4690 ftrace_regex_write(struct file *file, const char __user *ubuf,
4691 		   size_t cnt, loff_t *ppos, int enable)
4692 {
4693 	struct ftrace_iterator *iter;
4694 	struct trace_parser *parser;
4695 	ssize_t ret, read;
4696 
4697 	if (!cnt)
4698 		return 0;
4699 
4700 	if (file->f_mode & FMODE_READ) {
4701 		struct seq_file *m = file->private_data;
4702 		iter = m->private;
4703 	} else
4704 		iter = file->private_data;
4705 
4706 	if (unlikely(ftrace_disabled))
4707 		return -ENODEV;
4708 
4709 	/* iter->hash is a local copy, so we don't need regex_lock */
4710 
4711 	parser = &iter->parser;
4712 	read = trace_get_user(parser, ubuf, cnt, ppos);
4713 
4714 	if (read >= 0 && trace_parser_loaded(parser) &&
4715 	    !trace_parser_cont(parser)) {
4716 		ret = ftrace_process_regex(iter, parser->buffer,
4717 					   parser->idx, enable);
4718 		trace_parser_clear(parser);
4719 		if (ret < 0)
4720 			goto out;
4721 	}
4722 
4723 	ret = read;
4724  out:
4725 	return ret;
4726 }
4727 
4728 ssize_t
4729 ftrace_filter_write(struct file *file, const char __user *ubuf,
4730 		    size_t cnt, loff_t *ppos)
4731 {
4732 	return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
4733 }
4734 
4735 ssize_t
4736 ftrace_notrace_write(struct file *file, const char __user *ubuf,
4737 		     size_t cnt, loff_t *ppos)
4738 {
4739 	return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
4740 }
4741 
4742 static int
4743 ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
4744 {
4745 	struct ftrace_func_entry *entry;
4746 
4747 	if (!ftrace_location(ip))
4748 		return -EINVAL;
4749 
4750 	if (remove) {
4751 		entry = ftrace_lookup_ip(hash, ip);
4752 		if (!entry)
4753 			return -ENOENT;
4754 		free_hash_entry(hash, entry);
4755 		return 0;
4756 	}
4757 
4758 	return add_hash_entry(hash, ip);
4759 }
4760 
4761 static int
4762 ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
4763 		unsigned long ip, int remove, int reset, int enable)
4764 {
4765 	struct ftrace_hash **orig_hash;
4766 	struct ftrace_hash *hash;
4767 	int ret;
4768 
4769 	if (unlikely(ftrace_disabled))
4770 		return -ENODEV;
4771 
4772 	mutex_lock(&ops->func_hash->regex_lock);
4773 
4774 	if (enable)
4775 		orig_hash = &ops->func_hash->filter_hash;
4776 	else
4777 		orig_hash = &ops->func_hash->notrace_hash;
4778 
4779 	if (reset)
4780 		hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
4781 	else
4782 		hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
4783 
4784 	if (!hash) {
4785 		ret = -ENOMEM;
4786 		goto out_regex_unlock;
4787 	}
4788 
4789 	if (buf && !ftrace_match_records(hash, buf, len)) {
4790 		ret = -EINVAL;
4791 		goto out_regex_unlock;
4792 	}
4793 	if (ip) {
4794 		ret = ftrace_match_addr(hash, ip, remove);
4795 		if (ret < 0)
4796 			goto out_regex_unlock;
4797 	}
4798 
4799 	mutex_lock(&ftrace_lock);
4800 	ret = ftrace_hash_move_and_update_ops(ops, orig_hash, hash, enable);
4801 	mutex_unlock(&ftrace_lock);
4802 
4803  out_regex_unlock:
4804 	mutex_unlock(&ops->func_hash->regex_lock);
4805 
4806 	free_ftrace_hash(hash);
4807 	return ret;
4808 }
4809 
4810 static int
4811 ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
4812 		int reset, int enable)
4813 {
4814 	return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable);
4815 }
4816 
4817 /**
4818  * ftrace_set_filter_ip - set a function to filter on in ftrace by address
4819  * @ops - the ops to set the filter with
4820  * @ip - the address to add to or remove from the filter.
4821  * @remove - non zero to remove the ip from the filter
4822  * @reset - non zero to reset all filters before applying this filter.
4823  *
4824  * Filters denote which functions should be enabled when tracing is enabled
4825  * If @ip is NULL, it failes to update filter.
4826  */
4827 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
4828 			 int remove, int reset)
4829 {
4830 	ftrace_ops_init(ops);
4831 	return ftrace_set_addr(ops, ip, remove, reset, 1);
4832 }
4833 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
4834 
4835 /**
4836  * ftrace_ops_set_global_filter - setup ops to use global filters
4837  * @ops - the ops which will use the global filters
4838  *
4839  * ftrace users who need global function trace filtering should call this.
4840  * It can set the global filter only if ops were not initialized before.
4841  */
4842 void ftrace_ops_set_global_filter(struct ftrace_ops *ops)
4843 {
4844 	if (ops->flags & FTRACE_OPS_FL_INITIALIZED)
4845 		return;
4846 
4847 	ftrace_ops_init(ops);
4848 	ops->func_hash = &global_ops.local_hash;
4849 }
4850 EXPORT_SYMBOL_GPL(ftrace_ops_set_global_filter);
4851 
4852 static int
4853 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
4854 		 int reset, int enable)
4855 {
4856 	return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable);
4857 }
4858 
4859 /**
4860  * ftrace_set_filter - set a function to filter on in ftrace
4861  * @ops - the ops to set the filter with
4862  * @buf - the string that holds the function filter text.
4863  * @len - the length of the string.
4864  * @reset - non zero to reset all filters before applying this filter.
4865  *
4866  * Filters denote which functions should be enabled when tracing is enabled.
4867  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
4868  */
4869 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
4870 		       int len, int reset)
4871 {
4872 	ftrace_ops_init(ops);
4873 	return ftrace_set_regex(ops, buf, len, reset, 1);
4874 }
4875 EXPORT_SYMBOL_GPL(ftrace_set_filter);
4876 
4877 /**
4878  * ftrace_set_notrace - set a function to not trace in ftrace
4879  * @ops - the ops to set the notrace filter with
4880  * @buf - the string that holds the function notrace text.
4881  * @len - the length of the string.
4882  * @reset - non zero to reset all filters before applying this filter.
4883  *
4884  * Notrace Filters denote which functions should not be enabled when tracing
4885  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
4886  * for tracing.
4887  */
4888 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
4889 			int len, int reset)
4890 {
4891 	ftrace_ops_init(ops);
4892 	return ftrace_set_regex(ops, buf, len, reset, 0);
4893 }
4894 EXPORT_SYMBOL_GPL(ftrace_set_notrace);
4895 /**
4896  * ftrace_set_global_filter - set a function to filter on with global tracers
4897  * @buf - the string that holds the function filter text.
4898  * @len - the length of the string.
4899  * @reset - non zero to reset all filters before applying this filter.
4900  *
4901  * Filters denote which functions should be enabled when tracing is enabled.
4902  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
4903  */
4904 void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
4905 {
4906 	ftrace_set_regex(&global_ops, buf, len, reset, 1);
4907 }
4908 EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
4909 
4910 /**
4911  * ftrace_set_global_notrace - set a function to not trace with global tracers
4912  * @buf - the string that holds the function notrace text.
4913  * @len - the length of the string.
4914  * @reset - non zero to reset all filters before applying this filter.
4915  *
4916  * Notrace Filters denote which functions should not be enabled when tracing
4917  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
4918  * for tracing.
4919  */
4920 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
4921 {
4922 	ftrace_set_regex(&global_ops, buf, len, reset, 0);
4923 }
4924 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
4925 
4926 /*
4927  * command line interface to allow users to set filters on boot up.
4928  */
4929 #define FTRACE_FILTER_SIZE		COMMAND_LINE_SIZE
4930 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
4931 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
4932 
4933 /* Used by function selftest to not test if filter is set */
4934 bool ftrace_filter_param __initdata;
4935 
4936 static int __init set_ftrace_notrace(char *str)
4937 {
4938 	ftrace_filter_param = true;
4939 	strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
4940 	return 1;
4941 }
4942 __setup("ftrace_notrace=", set_ftrace_notrace);
4943 
4944 static int __init set_ftrace_filter(char *str)
4945 {
4946 	ftrace_filter_param = true;
4947 	strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
4948 	return 1;
4949 }
4950 __setup("ftrace_filter=", set_ftrace_filter);
4951 
4952 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4953 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
4954 static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
4955 static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer);
4956 
4957 static int __init set_graph_function(char *str)
4958 {
4959 	strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
4960 	return 1;
4961 }
4962 __setup("ftrace_graph_filter=", set_graph_function);
4963 
4964 static int __init set_graph_notrace_function(char *str)
4965 {
4966 	strlcpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE);
4967 	return 1;
4968 }
4969 __setup("ftrace_graph_notrace=", set_graph_notrace_function);
4970 
4971 static int __init set_graph_max_depth_function(char *str)
4972 {
4973 	if (!str)
4974 		return 0;
4975 	fgraph_max_depth = simple_strtoul(str, NULL, 0);
4976 	return 1;
4977 }
4978 __setup("ftrace_graph_max_depth=", set_graph_max_depth_function);
4979 
4980 static void __init set_ftrace_early_graph(char *buf, int enable)
4981 {
4982 	int ret;
4983 	char *func;
4984 	struct ftrace_hash *hash;
4985 
4986 	hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
4987 	if (WARN_ON(!hash))
4988 		return;
4989 
4990 	while (buf) {
4991 		func = strsep(&buf, ",");
4992 		/* we allow only one expression at a time */
4993 		ret = ftrace_graph_set_hash(hash, func);
4994 		if (ret)
4995 			printk(KERN_DEBUG "ftrace: function %s not "
4996 					  "traceable\n", func);
4997 	}
4998 
4999 	if (enable)
5000 		ftrace_graph_hash = hash;
5001 	else
5002 		ftrace_graph_notrace_hash = hash;
5003 }
5004 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
5005 
5006 void __init
5007 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
5008 {
5009 	char *func;
5010 
5011 	ftrace_ops_init(ops);
5012 
5013 	while (buf) {
5014 		func = strsep(&buf, ",");
5015 		ftrace_set_regex(ops, func, strlen(func), 0, enable);
5016 	}
5017 }
5018 
5019 static void __init set_ftrace_early_filters(void)
5020 {
5021 	if (ftrace_filter_buf[0])
5022 		ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
5023 	if (ftrace_notrace_buf[0])
5024 		ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
5025 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5026 	if (ftrace_graph_buf[0])
5027 		set_ftrace_early_graph(ftrace_graph_buf, 1);
5028 	if (ftrace_graph_notrace_buf[0])
5029 		set_ftrace_early_graph(ftrace_graph_notrace_buf, 0);
5030 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
5031 }
5032 
5033 int ftrace_regex_release(struct inode *inode, struct file *file)
5034 {
5035 	struct seq_file *m = (struct seq_file *)file->private_data;
5036 	struct ftrace_iterator *iter;
5037 	struct ftrace_hash **orig_hash;
5038 	struct trace_parser *parser;
5039 	int filter_hash;
5040 	int ret;
5041 
5042 	if (file->f_mode & FMODE_READ) {
5043 		iter = m->private;
5044 		seq_release(inode, file);
5045 	} else
5046 		iter = file->private_data;
5047 
5048 	parser = &iter->parser;
5049 	if (trace_parser_loaded(parser)) {
5050 		parser->buffer[parser->idx] = 0;
5051 		ftrace_match_records(iter->hash, parser->buffer, parser->idx);
5052 	}
5053 
5054 	trace_parser_put(parser);
5055 
5056 	mutex_lock(&iter->ops->func_hash->regex_lock);
5057 
5058 	if (file->f_mode & FMODE_WRITE) {
5059 		filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
5060 
5061 		if (filter_hash) {
5062 			orig_hash = &iter->ops->func_hash->filter_hash;
5063 			if (iter->tr && !list_empty(&iter->tr->mod_trace))
5064 				iter->hash->flags |= FTRACE_HASH_FL_MOD;
5065 		} else
5066 			orig_hash = &iter->ops->func_hash->notrace_hash;
5067 
5068 		mutex_lock(&ftrace_lock);
5069 		ret = ftrace_hash_move_and_update_ops(iter->ops, orig_hash,
5070 						      iter->hash, filter_hash);
5071 		mutex_unlock(&ftrace_lock);
5072 	} else {
5073 		/* For read only, the hash is the ops hash */
5074 		iter->hash = NULL;
5075 	}
5076 
5077 	mutex_unlock(&iter->ops->func_hash->regex_lock);
5078 	free_ftrace_hash(iter->hash);
5079 	kfree(iter);
5080 
5081 	return 0;
5082 }
5083 
5084 static const struct file_operations ftrace_avail_fops = {
5085 	.open = ftrace_avail_open,
5086 	.read = seq_read,
5087 	.llseek = seq_lseek,
5088 	.release = seq_release_private,
5089 };
5090 
5091 static const struct file_operations ftrace_enabled_fops = {
5092 	.open = ftrace_enabled_open,
5093 	.read = seq_read,
5094 	.llseek = seq_lseek,
5095 	.release = seq_release_private,
5096 };
5097 
5098 static const struct file_operations ftrace_filter_fops = {
5099 	.open = ftrace_filter_open,
5100 	.read = seq_read,
5101 	.write = ftrace_filter_write,
5102 	.llseek = tracing_lseek,
5103 	.release = ftrace_regex_release,
5104 };
5105 
5106 static const struct file_operations ftrace_notrace_fops = {
5107 	.open = ftrace_notrace_open,
5108 	.read = seq_read,
5109 	.write = ftrace_notrace_write,
5110 	.llseek = tracing_lseek,
5111 	.release = ftrace_regex_release,
5112 };
5113 
5114 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5115 
5116 static DEFINE_MUTEX(graph_lock);
5117 
5118 struct ftrace_hash *ftrace_graph_hash = EMPTY_HASH;
5119 struct ftrace_hash *ftrace_graph_notrace_hash = EMPTY_HASH;
5120 
5121 enum graph_filter_type {
5122 	GRAPH_FILTER_NOTRACE	= 0,
5123 	GRAPH_FILTER_FUNCTION,
5124 };
5125 
5126 #define FTRACE_GRAPH_EMPTY	((void *)1)
5127 
5128 struct ftrace_graph_data {
5129 	struct ftrace_hash		*hash;
5130 	struct ftrace_func_entry	*entry;
5131 	int				idx;   /* for hash table iteration */
5132 	enum graph_filter_type		type;
5133 	struct ftrace_hash		*new_hash;
5134 	const struct seq_operations	*seq_ops;
5135 	struct trace_parser		parser;
5136 };
5137 
5138 static void *
5139 __g_next(struct seq_file *m, loff_t *pos)
5140 {
5141 	struct ftrace_graph_data *fgd = m->private;
5142 	struct ftrace_func_entry *entry = fgd->entry;
5143 	struct hlist_head *head;
5144 	int i, idx = fgd->idx;
5145 
5146 	if (*pos >= fgd->hash->count)
5147 		return NULL;
5148 
5149 	if (entry) {
5150 		hlist_for_each_entry_continue(entry, hlist) {
5151 			fgd->entry = entry;
5152 			return entry;
5153 		}
5154 
5155 		idx++;
5156 	}
5157 
5158 	for (i = idx; i < 1 << fgd->hash->size_bits; i++) {
5159 		head = &fgd->hash->buckets[i];
5160 		hlist_for_each_entry(entry, head, hlist) {
5161 			fgd->entry = entry;
5162 			fgd->idx = i;
5163 			return entry;
5164 		}
5165 	}
5166 	return NULL;
5167 }
5168 
5169 static void *
5170 g_next(struct seq_file *m, void *v, loff_t *pos)
5171 {
5172 	(*pos)++;
5173 	return __g_next(m, pos);
5174 }
5175 
5176 static void *g_start(struct seq_file *m, loff_t *pos)
5177 {
5178 	struct ftrace_graph_data *fgd = m->private;
5179 
5180 	mutex_lock(&graph_lock);
5181 
5182 	if (fgd->type == GRAPH_FILTER_FUNCTION)
5183 		fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
5184 					lockdep_is_held(&graph_lock));
5185 	else
5186 		fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
5187 					lockdep_is_held(&graph_lock));
5188 
5189 	/* Nothing, tell g_show to print all functions are enabled */
5190 	if (ftrace_hash_empty(fgd->hash) && !*pos)
5191 		return FTRACE_GRAPH_EMPTY;
5192 
5193 	fgd->idx = 0;
5194 	fgd->entry = NULL;
5195 	return __g_next(m, pos);
5196 }
5197 
5198 static void g_stop(struct seq_file *m, void *p)
5199 {
5200 	mutex_unlock(&graph_lock);
5201 }
5202 
5203 static int g_show(struct seq_file *m, void *v)
5204 {
5205 	struct ftrace_func_entry *entry = v;
5206 
5207 	if (!entry)
5208 		return 0;
5209 
5210 	if (entry == FTRACE_GRAPH_EMPTY) {
5211 		struct ftrace_graph_data *fgd = m->private;
5212 
5213 		if (fgd->type == GRAPH_FILTER_FUNCTION)
5214 			seq_puts(m, "#### all functions enabled ####\n");
5215 		else
5216 			seq_puts(m, "#### no functions disabled ####\n");
5217 		return 0;
5218 	}
5219 
5220 	seq_printf(m, "%ps\n", (void *)entry->ip);
5221 
5222 	return 0;
5223 }
5224 
5225 static const struct seq_operations ftrace_graph_seq_ops = {
5226 	.start = g_start,
5227 	.next = g_next,
5228 	.stop = g_stop,
5229 	.show = g_show,
5230 };
5231 
5232 static int
5233 __ftrace_graph_open(struct inode *inode, struct file *file,
5234 		    struct ftrace_graph_data *fgd)
5235 {
5236 	int ret = 0;
5237 	struct ftrace_hash *new_hash = NULL;
5238 
5239 	if (file->f_mode & FMODE_WRITE) {
5240 		const int size_bits = FTRACE_HASH_DEFAULT_BITS;
5241 
5242 		if (trace_parser_get_init(&fgd->parser, FTRACE_BUFF_MAX))
5243 			return -ENOMEM;
5244 
5245 		if (file->f_flags & O_TRUNC)
5246 			new_hash = alloc_ftrace_hash(size_bits);
5247 		else
5248 			new_hash = alloc_and_copy_ftrace_hash(size_bits,
5249 							      fgd->hash);
5250 		if (!new_hash) {
5251 			ret = -ENOMEM;
5252 			goto out;
5253 		}
5254 	}
5255 
5256 	if (file->f_mode & FMODE_READ) {
5257 		ret = seq_open(file, &ftrace_graph_seq_ops);
5258 		if (!ret) {
5259 			struct seq_file *m = file->private_data;
5260 			m->private = fgd;
5261 		} else {
5262 			/* Failed */
5263 			free_ftrace_hash(new_hash);
5264 			new_hash = NULL;
5265 		}
5266 	} else
5267 		file->private_data = fgd;
5268 
5269 out:
5270 	if (ret < 0 && file->f_mode & FMODE_WRITE)
5271 		trace_parser_put(&fgd->parser);
5272 
5273 	fgd->new_hash = new_hash;
5274 
5275 	/*
5276 	 * All uses of fgd->hash must be taken with the graph_lock
5277 	 * held. The graph_lock is going to be released, so force
5278 	 * fgd->hash to be reinitialized when it is taken again.
5279 	 */
5280 	fgd->hash = NULL;
5281 
5282 	return ret;
5283 }
5284 
5285 static int
5286 ftrace_graph_open(struct inode *inode, struct file *file)
5287 {
5288 	struct ftrace_graph_data *fgd;
5289 	int ret;
5290 
5291 	if (unlikely(ftrace_disabled))
5292 		return -ENODEV;
5293 
5294 	fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
5295 	if (fgd == NULL)
5296 		return -ENOMEM;
5297 
5298 	mutex_lock(&graph_lock);
5299 
5300 	fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
5301 					lockdep_is_held(&graph_lock));
5302 	fgd->type = GRAPH_FILTER_FUNCTION;
5303 	fgd->seq_ops = &ftrace_graph_seq_ops;
5304 
5305 	ret = __ftrace_graph_open(inode, file, fgd);
5306 	if (ret < 0)
5307 		kfree(fgd);
5308 
5309 	mutex_unlock(&graph_lock);
5310 	return ret;
5311 }
5312 
5313 static int
5314 ftrace_graph_notrace_open(struct inode *inode, struct file *file)
5315 {
5316 	struct ftrace_graph_data *fgd;
5317 	int ret;
5318 
5319 	if (unlikely(ftrace_disabled))
5320 		return -ENODEV;
5321 
5322 	fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
5323 	if (fgd == NULL)
5324 		return -ENOMEM;
5325 
5326 	mutex_lock(&graph_lock);
5327 
5328 	fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
5329 					lockdep_is_held(&graph_lock));
5330 	fgd->type = GRAPH_FILTER_NOTRACE;
5331 	fgd->seq_ops = &ftrace_graph_seq_ops;
5332 
5333 	ret = __ftrace_graph_open(inode, file, fgd);
5334 	if (ret < 0)
5335 		kfree(fgd);
5336 
5337 	mutex_unlock(&graph_lock);
5338 	return ret;
5339 }
5340 
5341 static int
5342 ftrace_graph_release(struct inode *inode, struct file *file)
5343 {
5344 	struct ftrace_graph_data *fgd;
5345 	struct ftrace_hash *old_hash, *new_hash;
5346 	struct trace_parser *parser;
5347 	int ret = 0;
5348 
5349 	if (file->f_mode & FMODE_READ) {
5350 		struct seq_file *m = file->private_data;
5351 
5352 		fgd = m->private;
5353 		seq_release(inode, file);
5354 	} else {
5355 		fgd = file->private_data;
5356 	}
5357 
5358 
5359 	if (file->f_mode & FMODE_WRITE) {
5360 
5361 		parser = &fgd->parser;
5362 
5363 		if (trace_parser_loaded((parser))) {
5364 			parser->buffer[parser->idx] = 0;
5365 			ret = ftrace_graph_set_hash(fgd->new_hash,
5366 						    parser->buffer);
5367 		}
5368 
5369 		trace_parser_put(parser);
5370 
5371 		new_hash = __ftrace_hash_move(fgd->new_hash);
5372 		if (!new_hash) {
5373 			ret = -ENOMEM;
5374 			goto out;
5375 		}
5376 
5377 		mutex_lock(&graph_lock);
5378 
5379 		if (fgd->type == GRAPH_FILTER_FUNCTION) {
5380 			old_hash = rcu_dereference_protected(ftrace_graph_hash,
5381 					lockdep_is_held(&graph_lock));
5382 			rcu_assign_pointer(ftrace_graph_hash, new_hash);
5383 		} else {
5384 			old_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
5385 					lockdep_is_held(&graph_lock));
5386 			rcu_assign_pointer(ftrace_graph_notrace_hash, new_hash);
5387 		}
5388 
5389 		mutex_unlock(&graph_lock);
5390 
5391 		/* Wait till all users are no longer using the old hash */
5392 		synchronize_sched();
5393 
5394 		free_ftrace_hash(old_hash);
5395 	}
5396 
5397  out:
5398 	free_ftrace_hash(fgd->new_hash);
5399 	kfree(fgd);
5400 
5401 	return ret;
5402 }
5403 
5404 static int
5405 ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer)
5406 {
5407 	struct ftrace_glob func_g;
5408 	struct dyn_ftrace *rec;
5409 	struct ftrace_page *pg;
5410 	struct ftrace_func_entry *entry;
5411 	int fail = 1;
5412 	int not;
5413 
5414 	/* decode regex */
5415 	func_g.type = filter_parse_regex(buffer, strlen(buffer),
5416 					 &func_g.search, &not);
5417 
5418 	func_g.len = strlen(func_g.search);
5419 
5420 	mutex_lock(&ftrace_lock);
5421 
5422 	if (unlikely(ftrace_disabled)) {
5423 		mutex_unlock(&ftrace_lock);
5424 		return -ENODEV;
5425 	}
5426 
5427 	do_for_each_ftrace_rec(pg, rec) {
5428 
5429 		if (rec->flags & FTRACE_FL_DISABLED)
5430 			continue;
5431 
5432 		if (ftrace_match_record(rec, &func_g, NULL, 0)) {
5433 			entry = ftrace_lookup_ip(hash, rec->ip);
5434 
5435 			if (!not) {
5436 				fail = 0;
5437 
5438 				if (entry)
5439 					continue;
5440 				if (add_hash_entry(hash, rec->ip) < 0)
5441 					goto out;
5442 			} else {
5443 				if (entry) {
5444 					free_hash_entry(hash, entry);
5445 					fail = 0;
5446 				}
5447 			}
5448 		}
5449 	} while_for_each_ftrace_rec();
5450 out:
5451 	mutex_unlock(&ftrace_lock);
5452 
5453 	if (fail)
5454 		return -EINVAL;
5455 
5456 	return 0;
5457 }
5458 
5459 static ssize_t
5460 ftrace_graph_write(struct file *file, const char __user *ubuf,
5461 		   size_t cnt, loff_t *ppos)
5462 {
5463 	ssize_t read, ret = 0;
5464 	struct ftrace_graph_data *fgd = file->private_data;
5465 	struct trace_parser *parser;
5466 
5467 	if (!cnt)
5468 		return 0;
5469 
5470 	/* Read mode uses seq functions */
5471 	if (file->f_mode & FMODE_READ) {
5472 		struct seq_file *m = file->private_data;
5473 		fgd = m->private;
5474 	}
5475 
5476 	parser = &fgd->parser;
5477 
5478 	read = trace_get_user(parser, ubuf, cnt, ppos);
5479 
5480 	if (read >= 0 && trace_parser_loaded(parser) &&
5481 	    !trace_parser_cont(parser)) {
5482 
5483 		ret = ftrace_graph_set_hash(fgd->new_hash,
5484 					    parser->buffer);
5485 		trace_parser_clear(parser);
5486 	}
5487 
5488 	if (!ret)
5489 		ret = read;
5490 
5491 	return ret;
5492 }
5493 
5494 static const struct file_operations ftrace_graph_fops = {
5495 	.open		= ftrace_graph_open,
5496 	.read		= seq_read,
5497 	.write		= ftrace_graph_write,
5498 	.llseek		= tracing_lseek,
5499 	.release	= ftrace_graph_release,
5500 };
5501 
5502 static const struct file_operations ftrace_graph_notrace_fops = {
5503 	.open		= ftrace_graph_notrace_open,
5504 	.read		= seq_read,
5505 	.write		= ftrace_graph_write,
5506 	.llseek		= tracing_lseek,
5507 	.release	= ftrace_graph_release,
5508 };
5509 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
5510 
5511 void ftrace_create_filter_files(struct ftrace_ops *ops,
5512 				struct dentry *parent)
5513 {
5514 
5515 	trace_create_file("set_ftrace_filter", 0644, parent,
5516 			  ops, &ftrace_filter_fops);
5517 
5518 	trace_create_file("set_ftrace_notrace", 0644, parent,
5519 			  ops, &ftrace_notrace_fops);
5520 }
5521 
5522 /*
5523  * The name "destroy_filter_files" is really a misnomer. Although
5524  * in the future, it may actualy delete the files, but this is
5525  * really intended to make sure the ops passed in are disabled
5526  * and that when this function returns, the caller is free to
5527  * free the ops.
5528  *
5529  * The "destroy" name is only to match the "create" name that this
5530  * should be paired with.
5531  */
5532 void ftrace_destroy_filter_files(struct ftrace_ops *ops)
5533 {
5534 	mutex_lock(&ftrace_lock);
5535 	if (ops->flags & FTRACE_OPS_FL_ENABLED)
5536 		ftrace_shutdown(ops, 0);
5537 	ops->flags |= FTRACE_OPS_FL_DELETED;
5538 	mutex_unlock(&ftrace_lock);
5539 }
5540 
5541 static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer)
5542 {
5543 
5544 	trace_create_file("available_filter_functions", 0444,
5545 			d_tracer, NULL, &ftrace_avail_fops);
5546 
5547 	trace_create_file("enabled_functions", 0444,
5548 			d_tracer, NULL, &ftrace_enabled_fops);
5549 
5550 	ftrace_create_filter_files(&global_ops, d_tracer);
5551 
5552 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5553 	trace_create_file("set_graph_function", 0444, d_tracer,
5554 				    NULL,
5555 				    &ftrace_graph_fops);
5556 	trace_create_file("set_graph_notrace", 0444, d_tracer,
5557 				    NULL,
5558 				    &ftrace_graph_notrace_fops);
5559 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
5560 
5561 	return 0;
5562 }
5563 
5564 static int ftrace_cmp_ips(const void *a, const void *b)
5565 {
5566 	const unsigned long *ipa = a;
5567 	const unsigned long *ipb = b;
5568 
5569 	if (*ipa > *ipb)
5570 		return 1;
5571 	if (*ipa < *ipb)
5572 		return -1;
5573 	return 0;
5574 }
5575 
5576 static int ftrace_process_locs(struct module *mod,
5577 			       unsigned long *start,
5578 			       unsigned long *end)
5579 {
5580 	struct ftrace_page *start_pg;
5581 	struct ftrace_page *pg;
5582 	struct dyn_ftrace *rec;
5583 	unsigned long count;
5584 	unsigned long *p;
5585 	unsigned long addr;
5586 	unsigned long flags = 0; /* Shut up gcc */
5587 	int ret = -ENOMEM;
5588 
5589 	count = end - start;
5590 
5591 	if (!count)
5592 		return 0;
5593 
5594 	sort(start, count, sizeof(*start),
5595 	     ftrace_cmp_ips, NULL);
5596 
5597 	start_pg = ftrace_allocate_pages(count);
5598 	if (!start_pg)
5599 		return -ENOMEM;
5600 
5601 	mutex_lock(&ftrace_lock);
5602 
5603 	/*
5604 	 * Core and each module needs their own pages, as
5605 	 * modules will free them when they are removed.
5606 	 * Force a new page to be allocated for modules.
5607 	 */
5608 	if (!mod) {
5609 		WARN_ON(ftrace_pages || ftrace_pages_start);
5610 		/* First initialization */
5611 		ftrace_pages = ftrace_pages_start = start_pg;
5612 	} else {
5613 		if (!ftrace_pages)
5614 			goto out;
5615 
5616 		if (WARN_ON(ftrace_pages->next)) {
5617 			/* Hmm, we have free pages? */
5618 			while (ftrace_pages->next)
5619 				ftrace_pages = ftrace_pages->next;
5620 		}
5621 
5622 		ftrace_pages->next = start_pg;
5623 	}
5624 
5625 	p = start;
5626 	pg = start_pg;
5627 	while (p < end) {
5628 		addr = ftrace_call_adjust(*p++);
5629 		/*
5630 		 * Some architecture linkers will pad between
5631 		 * the different mcount_loc sections of different
5632 		 * object files to satisfy alignments.
5633 		 * Skip any NULL pointers.
5634 		 */
5635 		if (!addr)
5636 			continue;
5637 
5638 		if (pg->index == pg->size) {
5639 			/* We should have allocated enough */
5640 			if (WARN_ON(!pg->next))
5641 				break;
5642 			pg = pg->next;
5643 		}
5644 
5645 		rec = &pg->records[pg->index++];
5646 		rec->ip = addr;
5647 	}
5648 
5649 	/* We should have used all pages */
5650 	WARN_ON(pg->next);
5651 
5652 	/* Assign the last page to ftrace_pages */
5653 	ftrace_pages = pg;
5654 
5655 	/*
5656 	 * We only need to disable interrupts on start up
5657 	 * because we are modifying code that an interrupt
5658 	 * may execute, and the modification is not atomic.
5659 	 * But for modules, nothing runs the code we modify
5660 	 * until we are finished with it, and there's no
5661 	 * reason to cause large interrupt latencies while we do it.
5662 	 */
5663 	if (!mod)
5664 		local_irq_save(flags);
5665 	ftrace_update_code(mod, start_pg);
5666 	if (!mod)
5667 		local_irq_restore(flags);
5668 	ret = 0;
5669  out:
5670 	mutex_unlock(&ftrace_lock);
5671 
5672 	return ret;
5673 }
5674 
5675 #ifdef CONFIG_MODULES
5676 
5677 #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
5678 
5679 static int referenced_filters(struct dyn_ftrace *rec)
5680 {
5681 	struct ftrace_ops *ops;
5682 	int cnt = 0;
5683 
5684 	for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
5685 		if (ops_references_rec(ops, rec))
5686 		    cnt++;
5687 	}
5688 
5689 	return cnt;
5690 }
5691 
5692 static void
5693 clear_mod_from_hash(struct ftrace_page *pg, struct ftrace_hash *hash)
5694 {
5695 	struct ftrace_func_entry *entry;
5696 	struct dyn_ftrace *rec;
5697 	int i;
5698 
5699 	if (ftrace_hash_empty(hash))
5700 		return;
5701 
5702 	for (i = 0; i < pg->index; i++) {
5703 		rec = &pg->records[i];
5704 		entry = __ftrace_lookup_ip(hash, rec->ip);
5705 		/*
5706 		 * Do not allow this rec to match again.
5707 		 * Yeah, it may waste some memory, but will be removed
5708 		 * if/when the hash is modified again.
5709 		 */
5710 		if (entry)
5711 			entry->ip = 0;
5712 	}
5713 }
5714 
5715 /* Clear any records from hashs */
5716 static void clear_mod_from_hashes(struct ftrace_page *pg)
5717 {
5718 	struct trace_array *tr;
5719 
5720 	mutex_lock(&trace_types_lock);
5721 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
5722 		if (!tr->ops || !tr->ops->func_hash)
5723 			continue;
5724 		mutex_lock(&tr->ops->func_hash->regex_lock);
5725 		clear_mod_from_hash(pg, tr->ops->func_hash->filter_hash);
5726 		clear_mod_from_hash(pg, tr->ops->func_hash->notrace_hash);
5727 		mutex_unlock(&tr->ops->func_hash->regex_lock);
5728 	}
5729 	mutex_unlock(&trace_types_lock);
5730 }
5731 
5732 void ftrace_release_mod(struct module *mod)
5733 {
5734 	struct dyn_ftrace *rec;
5735 	struct ftrace_page **last_pg;
5736 	struct ftrace_page *tmp_page = NULL;
5737 	struct ftrace_page *pg;
5738 	int order;
5739 
5740 	mutex_lock(&ftrace_lock);
5741 
5742 	if (ftrace_disabled)
5743 		goto out_unlock;
5744 
5745 	/*
5746 	 * Each module has its own ftrace_pages, remove
5747 	 * them from the list.
5748 	 */
5749 	last_pg = &ftrace_pages_start;
5750 	for (pg = ftrace_pages_start; pg; pg = *last_pg) {
5751 		rec = &pg->records[0];
5752 		if (within_module_core(rec->ip, mod)) {
5753 			/*
5754 			 * As core pages are first, the first
5755 			 * page should never be a module page.
5756 			 */
5757 			if (WARN_ON(pg == ftrace_pages_start))
5758 				goto out_unlock;
5759 
5760 			/* Check if we are deleting the last page */
5761 			if (pg == ftrace_pages)
5762 				ftrace_pages = next_to_ftrace_page(last_pg);
5763 
5764 			ftrace_update_tot_cnt -= pg->index;
5765 			*last_pg = pg->next;
5766 
5767 			pg->next = tmp_page;
5768 			tmp_page = pg;
5769 		} else
5770 			last_pg = &pg->next;
5771 	}
5772  out_unlock:
5773 	mutex_unlock(&ftrace_lock);
5774 
5775 	for (pg = tmp_page; pg; pg = tmp_page) {
5776 
5777 		/* Needs to be called outside of ftrace_lock */
5778 		clear_mod_from_hashes(pg);
5779 
5780 		order = get_count_order(pg->size / ENTRIES_PER_PAGE);
5781 		free_pages((unsigned long)pg->records, order);
5782 		tmp_page = pg->next;
5783 		kfree(pg);
5784 	}
5785 }
5786 
5787 void ftrace_module_enable(struct module *mod)
5788 {
5789 	struct dyn_ftrace *rec;
5790 	struct ftrace_page *pg;
5791 
5792 	mutex_lock(&ftrace_lock);
5793 
5794 	if (ftrace_disabled)
5795 		goto out_unlock;
5796 
5797 	/*
5798 	 * If the tracing is enabled, go ahead and enable the record.
5799 	 *
5800 	 * The reason not to enable the record immediatelly is the
5801 	 * inherent check of ftrace_make_nop/ftrace_make_call for
5802 	 * correct previous instructions.  Making first the NOP
5803 	 * conversion puts the module to the correct state, thus
5804 	 * passing the ftrace_make_call check.
5805 	 *
5806 	 * We also delay this to after the module code already set the
5807 	 * text to read-only, as we now need to set it back to read-write
5808 	 * so that we can modify the text.
5809 	 */
5810 	if (ftrace_start_up)
5811 		ftrace_arch_code_modify_prepare();
5812 
5813 	do_for_each_ftrace_rec(pg, rec) {
5814 		int cnt;
5815 		/*
5816 		 * do_for_each_ftrace_rec() is a double loop.
5817 		 * module text shares the pg. If a record is
5818 		 * not part of this module, then skip this pg,
5819 		 * which the "break" will do.
5820 		 */
5821 		if (!within_module_core(rec->ip, mod))
5822 			break;
5823 
5824 		cnt = 0;
5825 
5826 		/*
5827 		 * When adding a module, we need to check if tracers are
5828 		 * currently enabled and if they are, and can trace this record,
5829 		 * we need to enable the module functions as well as update the
5830 		 * reference counts for those function records.
5831 		 */
5832 		if (ftrace_start_up)
5833 			cnt += referenced_filters(rec);
5834 
5835 		/* This clears FTRACE_FL_DISABLED */
5836 		rec->flags = cnt;
5837 
5838 		if (ftrace_start_up && cnt) {
5839 			int failed = __ftrace_replace_code(rec, 1);
5840 			if (failed) {
5841 				ftrace_bug(failed, rec);
5842 				goto out_loop;
5843 			}
5844 		}
5845 
5846 	} while_for_each_ftrace_rec();
5847 
5848  out_loop:
5849 	if (ftrace_start_up)
5850 		ftrace_arch_code_modify_post_process();
5851 
5852  out_unlock:
5853 	mutex_unlock(&ftrace_lock);
5854 
5855 	process_cached_mods(mod->name);
5856 }
5857 
5858 void ftrace_module_init(struct module *mod)
5859 {
5860 	if (ftrace_disabled || !mod->num_ftrace_callsites)
5861 		return;
5862 
5863 	ftrace_process_locs(mod, mod->ftrace_callsites,
5864 			    mod->ftrace_callsites + mod->num_ftrace_callsites);
5865 }
5866 #endif /* CONFIG_MODULES */
5867 
5868 void __init ftrace_free_init_mem(void)
5869 {
5870 	unsigned long start = (unsigned long)(&__init_begin);
5871 	unsigned long end = (unsigned long)(&__init_end);
5872 	struct ftrace_page **last_pg = &ftrace_pages_start;
5873 	struct ftrace_page *pg;
5874 	struct dyn_ftrace *rec;
5875 	struct dyn_ftrace key;
5876 	int order;
5877 
5878 	key.ip = start;
5879 	key.flags = end;	/* overload flags, as it is unsigned long */
5880 
5881 	mutex_lock(&ftrace_lock);
5882 
5883 	for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) {
5884 		if (end < pg->records[0].ip ||
5885 		    start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
5886 			continue;
5887  again:
5888 		rec = bsearch(&key, pg->records, pg->index,
5889 			      sizeof(struct dyn_ftrace),
5890 			      ftrace_cmp_recs);
5891 		if (!rec)
5892 			continue;
5893 		pg->index--;
5894 		ftrace_update_tot_cnt--;
5895 		if (!pg->index) {
5896 			*last_pg = pg->next;
5897 			order = get_count_order(pg->size / ENTRIES_PER_PAGE);
5898 			free_pages((unsigned long)pg->records, order);
5899 			kfree(pg);
5900 			pg = container_of(last_pg, struct ftrace_page, next);
5901 			if (!(*last_pg))
5902 				ftrace_pages = pg;
5903 			continue;
5904 		}
5905 		memmove(rec, rec + 1,
5906 			(pg->index - (rec - pg->records)) * sizeof(*rec));
5907 		/* More than one function may be in this block */
5908 		goto again;
5909 	}
5910 	mutex_unlock(&ftrace_lock);
5911 }
5912 
5913 void __init ftrace_init(void)
5914 {
5915 	extern unsigned long __start_mcount_loc[];
5916 	extern unsigned long __stop_mcount_loc[];
5917 	unsigned long count, flags;
5918 	int ret;
5919 
5920 	local_irq_save(flags);
5921 	ret = ftrace_dyn_arch_init();
5922 	local_irq_restore(flags);
5923 	if (ret)
5924 		goto failed;
5925 
5926 	count = __stop_mcount_loc - __start_mcount_loc;
5927 	if (!count) {
5928 		pr_info("ftrace: No functions to be traced?\n");
5929 		goto failed;
5930 	}
5931 
5932 	pr_info("ftrace: allocating %ld entries in %ld pages\n",
5933 		count, count / ENTRIES_PER_PAGE + 1);
5934 
5935 	last_ftrace_enabled = ftrace_enabled = 1;
5936 
5937 	ret = ftrace_process_locs(NULL,
5938 				  __start_mcount_loc,
5939 				  __stop_mcount_loc);
5940 
5941 	set_ftrace_early_filters();
5942 
5943 	return;
5944  failed:
5945 	ftrace_disabled = 1;
5946 }
5947 
5948 /* Do nothing if arch does not support this */
5949 void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops)
5950 {
5951 }
5952 
5953 static void ftrace_update_trampoline(struct ftrace_ops *ops)
5954 {
5955 	arch_ftrace_update_trampoline(ops);
5956 }
5957 
5958 void ftrace_init_trace_array(struct trace_array *tr)
5959 {
5960 	INIT_LIST_HEAD(&tr->func_probes);
5961 	INIT_LIST_HEAD(&tr->mod_trace);
5962 	INIT_LIST_HEAD(&tr->mod_notrace);
5963 }
5964 #else
5965 
5966 static struct ftrace_ops global_ops = {
5967 	.func			= ftrace_stub,
5968 	.flags			= FTRACE_OPS_FL_RECURSION_SAFE |
5969 				  FTRACE_OPS_FL_INITIALIZED |
5970 				  FTRACE_OPS_FL_PID,
5971 };
5972 
5973 static int __init ftrace_nodyn_init(void)
5974 {
5975 	ftrace_enabled = 1;
5976 	return 0;
5977 }
5978 core_initcall(ftrace_nodyn_init);
5979 
5980 static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; }
5981 static inline void ftrace_startup_enable(int command) { }
5982 static inline void ftrace_startup_all(int command) { }
5983 /* Keep as macros so we do not need to define the commands */
5984 # define ftrace_startup(ops, command)					\
5985 	({								\
5986 		int ___ret = __register_ftrace_function(ops);		\
5987 		if (!___ret)						\
5988 			(ops)->flags |= FTRACE_OPS_FL_ENABLED;		\
5989 		___ret;							\
5990 	})
5991 # define ftrace_shutdown(ops, command)					\
5992 	({								\
5993 		int ___ret = __unregister_ftrace_function(ops);		\
5994 		if (!___ret)						\
5995 			(ops)->flags &= ~FTRACE_OPS_FL_ENABLED;		\
5996 		___ret;							\
5997 	})
5998 
5999 # define ftrace_startup_sysctl()	do { } while (0)
6000 # define ftrace_shutdown_sysctl()	do { } while (0)
6001 
6002 static inline int
6003 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
6004 {
6005 	return 1;
6006 }
6007 
6008 static void ftrace_update_trampoline(struct ftrace_ops *ops)
6009 {
6010 }
6011 
6012 #endif /* CONFIG_DYNAMIC_FTRACE */
6013 
6014 __init void ftrace_init_global_array_ops(struct trace_array *tr)
6015 {
6016 	tr->ops = &global_ops;
6017 	tr->ops->private = tr;
6018 	ftrace_init_trace_array(tr);
6019 }
6020 
6021 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
6022 {
6023 	/* If we filter on pids, update to use the pid function */
6024 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
6025 		if (WARN_ON(tr->ops->func != ftrace_stub))
6026 			printk("ftrace ops had %pS for function\n",
6027 			       tr->ops->func);
6028 	}
6029 	tr->ops->func = func;
6030 	tr->ops->private = tr;
6031 }
6032 
6033 void ftrace_reset_array_ops(struct trace_array *tr)
6034 {
6035 	tr->ops->func = ftrace_stub;
6036 }
6037 
6038 static inline void
6039 __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
6040 		       struct ftrace_ops *ignored, struct pt_regs *regs)
6041 {
6042 	struct ftrace_ops *op;
6043 	int bit;
6044 
6045 	bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
6046 	if (bit < 0)
6047 		return;
6048 
6049 	/*
6050 	 * Some of the ops may be dynamically allocated,
6051 	 * they must be freed after a synchronize_sched().
6052 	 */
6053 	preempt_disable_notrace();
6054 
6055 	do_for_each_ftrace_op(op, ftrace_ops_list) {
6056 		/*
6057 		 * Check the following for each ops before calling their func:
6058 		 *  if RCU flag is set, then rcu_is_watching() must be true
6059 		 *  if PER_CPU is set, then ftrace_function_local_disable()
6060 		 *                          must be false
6061 		 *  Otherwise test if the ip matches the ops filter
6062 		 *
6063 		 * If any of the above fails then the op->func() is not executed.
6064 		 */
6065 		if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) &&
6066 		    (!(op->flags & FTRACE_OPS_FL_PER_CPU) ||
6067 		     !ftrace_function_local_disabled(op)) &&
6068 		    ftrace_ops_test(op, ip, regs)) {
6069 
6070 			if (FTRACE_WARN_ON(!op->func)) {
6071 				pr_warn("op=%p %pS\n", op, op);
6072 				goto out;
6073 			}
6074 			op->func(ip, parent_ip, op, regs);
6075 		}
6076 	} while_for_each_ftrace_op(op);
6077 out:
6078 	preempt_enable_notrace();
6079 	trace_clear_recursion(bit);
6080 }
6081 
6082 /*
6083  * Some archs only support passing ip and parent_ip. Even though
6084  * the list function ignores the op parameter, we do not want any
6085  * C side effects, where a function is called without the caller
6086  * sending a third parameter.
6087  * Archs are to support both the regs and ftrace_ops at the same time.
6088  * If they support ftrace_ops, it is assumed they support regs.
6089  * If call backs want to use regs, they must either check for regs
6090  * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
6091  * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
6092  * An architecture can pass partial regs with ftrace_ops and still
6093  * set the ARCH_SUPPORTS_FTRACE_OPS.
6094  */
6095 #if ARCH_SUPPORTS_FTRACE_OPS
6096 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
6097 				 struct ftrace_ops *op, struct pt_regs *regs)
6098 {
6099 	__ftrace_ops_list_func(ip, parent_ip, NULL, regs);
6100 }
6101 #else
6102 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
6103 {
6104 	__ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
6105 }
6106 #endif
6107 
6108 /*
6109  * If there's only one function registered but it does not support
6110  * recursion, needs RCU protection and/or requires per cpu handling, then
6111  * this function will be called by the mcount trampoline.
6112  */
6113 static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
6114 				   struct ftrace_ops *op, struct pt_regs *regs)
6115 {
6116 	int bit;
6117 
6118 	if ((op->flags & FTRACE_OPS_FL_RCU) && !rcu_is_watching())
6119 		return;
6120 
6121 	bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
6122 	if (bit < 0)
6123 		return;
6124 
6125 	preempt_disable_notrace();
6126 
6127 	if (!(op->flags & FTRACE_OPS_FL_PER_CPU) ||
6128 	    !ftrace_function_local_disabled(op)) {
6129 		op->func(ip, parent_ip, op, regs);
6130 	}
6131 
6132 	preempt_enable_notrace();
6133 	trace_clear_recursion(bit);
6134 }
6135 
6136 /**
6137  * ftrace_ops_get_func - get the function a trampoline should call
6138  * @ops: the ops to get the function for
6139  *
6140  * Normally the mcount trampoline will call the ops->func, but there
6141  * are times that it should not. For example, if the ops does not
6142  * have its own recursion protection, then it should call the
6143  * ftrace_ops_assist_func() instead.
6144  *
6145  * Returns the function that the trampoline should call for @ops.
6146  */
6147 ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
6148 {
6149 	/*
6150 	 * If the function does not handle recursion, needs to be RCU safe,
6151 	 * or does per cpu logic, then we need to call the assist handler.
6152 	 */
6153 	if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE) ||
6154 	    ops->flags & (FTRACE_OPS_FL_RCU | FTRACE_OPS_FL_PER_CPU))
6155 		return ftrace_ops_assist_func;
6156 
6157 	return ops->func;
6158 }
6159 
6160 static void
6161 ftrace_filter_pid_sched_switch_probe(void *data, bool preempt,
6162 		    struct task_struct *prev, struct task_struct *next)
6163 {
6164 	struct trace_array *tr = data;
6165 	struct trace_pid_list *pid_list;
6166 
6167 	pid_list = rcu_dereference_sched(tr->function_pids);
6168 
6169 	this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid,
6170 		       trace_ignore_this_task(pid_list, next));
6171 }
6172 
6173 static void
6174 ftrace_pid_follow_sched_process_fork(void *data,
6175 				     struct task_struct *self,
6176 				     struct task_struct *task)
6177 {
6178 	struct trace_pid_list *pid_list;
6179 	struct trace_array *tr = data;
6180 
6181 	pid_list = rcu_dereference_sched(tr->function_pids);
6182 	trace_filter_add_remove_task(pid_list, self, task);
6183 }
6184 
6185 static void
6186 ftrace_pid_follow_sched_process_exit(void *data, struct task_struct *task)
6187 {
6188 	struct trace_pid_list *pid_list;
6189 	struct trace_array *tr = data;
6190 
6191 	pid_list = rcu_dereference_sched(tr->function_pids);
6192 	trace_filter_add_remove_task(pid_list, NULL, task);
6193 }
6194 
6195 void ftrace_pid_follow_fork(struct trace_array *tr, bool enable)
6196 {
6197 	if (enable) {
6198 		register_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
6199 						  tr);
6200 		register_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit,
6201 						  tr);
6202 	} else {
6203 		unregister_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
6204 						    tr);
6205 		unregister_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit,
6206 						    tr);
6207 	}
6208 }
6209 
6210 static void clear_ftrace_pids(struct trace_array *tr)
6211 {
6212 	struct trace_pid_list *pid_list;
6213 	int cpu;
6214 
6215 	pid_list = rcu_dereference_protected(tr->function_pids,
6216 					     lockdep_is_held(&ftrace_lock));
6217 	if (!pid_list)
6218 		return;
6219 
6220 	unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
6221 
6222 	for_each_possible_cpu(cpu)
6223 		per_cpu_ptr(tr->trace_buffer.data, cpu)->ftrace_ignore_pid = false;
6224 
6225 	rcu_assign_pointer(tr->function_pids, NULL);
6226 
6227 	/* Wait till all users are no longer using pid filtering */
6228 	synchronize_sched();
6229 
6230 	trace_free_pid_list(pid_list);
6231 }
6232 
6233 void ftrace_clear_pids(struct trace_array *tr)
6234 {
6235 	mutex_lock(&ftrace_lock);
6236 
6237 	clear_ftrace_pids(tr);
6238 
6239 	mutex_unlock(&ftrace_lock);
6240 }
6241 
6242 static void ftrace_pid_reset(struct trace_array *tr)
6243 {
6244 	mutex_lock(&ftrace_lock);
6245 	clear_ftrace_pids(tr);
6246 
6247 	ftrace_update_pid_func();
6248 	ftrace_startup_all(0);
6249 
6250 	mutex_unlock(&ftrace_lock);
6251 }
6252 
6253 /* Greater than any max PID */
6254 #define FTRACE_NO_PIDS		(void *)(PID_MAX_LIMIT + 1)
6255 
6256 static void *fpid_start(struct seq_file *m, loff_t *pos)
6257 	__acquires(RCU)
6258 {
6259 	struct trace_pid_list *pid_list;
6260 	struct trace_array *tr = m->private;
6261 
6262 	mutex_lock(&ftrace_lock);
6263 	rcu_read_lock_sched();
6264 
6265 	pid_list = rcu_dereference_sched(tr->function_pids);
6266 
6267 	if (!pid_list)
6268 		return !(*pos) ? FTRACE_NO_PIDS : NULL;
6269 
6270 	return trace_pid_start(pid_list, pos);
6271 }
6272 
6273 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
6274 {
6275 	struct trace_array *tr = m->private;
6276 	struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids);
6277 
6278 	if (v == FTRACE_NO_PIDS)
6279 		return NULL;
6280 
6281 	return trace_pid_next(pid_list, v, pos);
6282 }
6283 
6284 static void fpid_stop(struct seq_file *m, void *p)
6285 	__releases(RCU)
6286 {
6287 	rcu_read_unlock_sched();
6288 	mutex_unlock(&ftrace_lock);
6289 }
6290 
6291 static int fpid_show(struct seq_file *m, void *v)
6292 {
6293 	if (v == FTRACE_NO_PIDS) {
6294 		seq_puts(m, "no pid\n");
6295 		return 0;
6296 	}
6297 
6298 	return trace_pid_show(m, v);
6299 }
6300 
6301 static const struct seq_operations ftrace_pid_sops = {
6302 	.start = fpid_start,
6303 	.next = fpid_next,
6304 	.stop = fpid_stop,
6305 	.show = fpid_show,
6306 };
6307 
6308 static int
6309 ftrace_pid_open(struct inode *inode, struct file *file)
6310 {
6311 	struct trace_array *tr = inode->i_private;
6312 	struct seq_file *m;
6313 	int ret = 0;
6314 
6315 	if (trace_array_get(tr) < 0)
6316 		return -ENODEV;
6317 
6318 	if ((file->f_mode & FMODE_WRITE) &&
6319 	    (file->f_flags & O_TRUNC))
6320 		ftrace_pid_reset(tr);
6321 
6322 	ret = seq_open(file, &ftrace_pid_sops);
6323 	if (ret < 0) {
6324 		trace_array_put(tr);
6325 	} else {
6326 		m = file->private_data;
6327 		/* copy tr over to seq ops */
6328 		m->private = tr;
6329 	}
6330 
6331 	return ret;
6332 }
6333 
6334 static void ignore_task_cpu(void *data)
6335 {
6336 	struct trace_array *tr = data;
6337 	struct trace_pid_list *pid_list;
6338 
6339 	/*
6340 	 * This function is called by on_each_cpu() while the
6341 	 * event_mutex is held.
6342 	 */
6343 	pid_list = rcu_dereference_protected(tr->function_pids,
6344 					     mutex_is_locked(&ftrace_lock));
6345 
6346 	this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid,
6347 		       trace_ignore_this_task(pid_list, current));
6348 }
6349 
6350 static ssize_t
6351 ftrace_pid_write(struct file *filp, const char __user *ubuf,
6352 		   size_t cnt, loff_t *ppos)
6353 {
6354 	struct seq_file *m = filp->private_data;
6355 	struct trace_array *tr = m->private;
6356 	struct trace_pid_list *filtered_pids = NULL;
6357 	struct trace_pid_list *pid_list;
6358 	ssize_t ret;
6359 
6360 	if (!cnt)
6361 		return 0;
6362 
6363 	mutex_lock(&ftrace_lock);
6364 
6365 	filtered_pids = rcu_dereference_protected(tr->function_pids,
6366 					     lockdep_is_held(&ftrace_lock));
6367 
6368 	ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
6369 	if (ret < 0)
6370 		goto out;
6371 
6372 	rcu_assign_pointer(tr->function_pids, pid_list);
6373 
6374 	if (filtered_pids) {
6375 		synchronize_sched();
6376 		trace_free_pid_list(filtered_pids);
6377 	} else if (pid_list) {
6378 		/* Register a probe to set whether to ignore the tracing of a task */
6379 		register_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
6380 	}
6381 
6382 	/*
6383 	 * Ignoring of pids is done at task switch. But we have to
6384 	 * check for those tasks that are currently running.
6385 	 * Always do this in case a pid was appended or removed.
6386 	 */
6387 	on_each_cpu(ignore_task_cpu, tr, 1);
6388 
6389 	ftrace_update_pid_func();
6390 	ftrace_startup_all(0);
6391  out:
6392 	mutex_unlock(&ftrace_lock);
6393 
6394 	if (ret > 0)
6395 		*ppos += ret;
6396 
6397 	return ret;
6398 }
6399 
6400 static int
6401 ftrace_pid_release(struct inode *inode, struct file *file)
6402 {
6403 	struct trace_array *tr = inode->i_private;
6404 
6405 	trace_array_put(tr);
6406 
6407 	return seq_release(inode, file);
6408 }
6409 
6410 static const struct file_operations ftrace_pid_fops = {
6411 	.open		= ftrace_pid_open,
6412 	.write		= ftrace_pid_write,
6413 	.read		= seq_read,
6414 	.llseek		= tracing_lseek,
6415 	.release	= ftrace_pid_release,
6416 };
6417 
6418 void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer)
6419 {
6420 	trace_create_file("set_ftrace_pid", 0644, d_tracer,
6421 			    tr, &ftrace_pid_fops);
6422 }
6423 
6424 void __init ftrace_init_tracefs_toplevel(struct trace_array *tr,
6425 					 struct dentry *d_tracer)
6426 {
6427 	/* Only the top level directory has the dyn_tracefs and profile */
6428 	WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
6429 
6430 	ftrace_init_dyn_tracefs(d_tracer);
6431 	ftrace_profile_tracefs(d_tracer);
6432 }
6433 
6434 /**
6435  * ftrace_kill - kill ftrace
6436  *
6437  * This function should be used by panic code. It stops ftrace
6438  * but in a not so nice way. If you need to simply kill ftrace
6439  * from a non-atomic section, use ftrace_kill.
6440  */
6441 void ftrace_kill(void)
6442 {
6443 	ftrace_disabled = 1;
6444 	ftrace_enabled = 0;
6445 	clear_ftrace_function();
6446 }
6447 
6448 /**
6449  * Test if ftrace is dead or not.
6450  */
6451 int ftrace_is_dead(void)
6452 {
6453 	return ftrace_disabled;
6454 }
6455 
6456 /**
6457  * register_ftrace_function - register a function for profiling
6458  * @ops - ops structure that holds the function for profiling.
6459  *
6460  * Register a function to be called by all functions in the
6461  * kernel.
6462  *
6463  * Note: @ops->func and all the functions it calls must be labeled
6464  *       with "notrace", otherwise it will go into a
6465  *       recursive loop.
6466  */
6467 int register_ftrace_function(struct ftrace_ops *ops)
6468 {
6469 	int ret = -1;
6470 
6471 	ftrace_ops_init(ops);
6472 
6473 	mutex_lock(&ftrace_lock);
6474 
6475 	ret = ftrace_startup(ops, 0);
6476 
6477 	mutex_unlock(&ftrace_lock);
6478 
6479 	return ret;
6480 }
6481 EXPORT_SYMBOL_GPL(register_ftrace_function);
6482 
6483 /**
6484  * unregister_ftrace_function - unregister a function for profiling.
6485  * @ops - ops structure that holds the function to unregister
6486  *
6487  * Unregister a function that was added to be called by ftrace profiling.
6488  */
6489 int unregister_ftrace_function(struct ftrace_ops *ops)
6490 {
6491 	int ret;
6492 
6493 	mutex_lock(&ftrace_lock);
6494 	ret = ftrace_shutdown(ops, 0);
6495 	mutex_unlock(&ftrace_lock);
6496 
6497 	return ret;
6498 }
6499 EXPORT_SYMBOL_GPL(unregister_ftrace_function);
6500 
6501 int
6502 ftrace_enable_sysctl(struct ctl_table *table, int write,
6503 		     void __user *buffer, size_t *lenp,
6504 		     loff_t *ppos)
6505 {
6506 	int ret = -ENODEV;
6507 
6508 	mutex_lock(&ftrace_lock);
6509 
6510 	if (unlikely(ftrace_disabled))
6511 		goto out;
6512 
6513 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
6514 
6515 	if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
6516 		goto out;
6517 
6518 	last_ftrace_enabled = !!ftrace_enabled;
6519 
6520 	if (ftrace_enabled) {
6521 
6522 		/* we are starting ftrace again */
6523 		if (rcu_dereference_protected(ftrace_ops_list,
6524 			lockdep_is_held(&ftrace_lock)) != &ftrace_list_end)
6525 			update_ftrace_function();
6526 
6527 		ftrace_startup_sysctl();
6528 
6529 	} else {
6530 		/* stopping ftrace calls (just send to ftrace_stub) */
6531 		ftrace_trace_function = ftrace_stub;
6532 
6533 		ftrace_shutdown_sysctl();
6534 	}
6535 
6536  out:
6537 	mutex_unlock(&ftrace_lock);
6538 	return ret;
6539 }
6540 
6541 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6542 
6543 static struct ftrace_ops graph_ops = {
6544 	.func			= ftrace_stub,
6545 	.flags			= FTRACE_OPS_FL_RECURSION_SAFE |
6546 				   FTRACE_OPS_FL_INITIALIZED |
6547 				   FTRACE_OPS_FL_PID |
6548 				   FTRACE_OPS_FL_STUB,
6549 #ifdef FTRACE_GRAPH_TRAMP_ADDR
6550 	.trampoline		= FTRACE_GRAPH_TRAMP_ADDR,
6551 	/* trampoline_size is only needed for dynamically allocated tramps */
6552 #endif
6553 	ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
6554 };
6555 
6556 void ftrace_graph_sleep_time_control(bool enable)
6557 {
6558 	fgraph_sleep_time = enable;
6559 }
6560 
6561 void ftrace_graph_graph_time_control(bool enable)
6562 {
6563 	fgraph_graph_time = enable;
6564 }
6565 
6566 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
6567 {
6568 	return 0;
6569 }
6570 
6571 /* The callbacks that hook a function */
6572 trace_func_graph_ret_t ftrace_graph_return =
6573 			(trace_func_graph_ret_t)ftrace_stub;
6574 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
6575 static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
6576 
6577 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
6578 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
6579 {
6580 	int i;
6581 	int ret = 0;
6582 	int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
6583 	struct task_struct *g, *t;
6584 
6585 	for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
6586 		ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
6587 					* sizeof(struct ftrace_ret_stack),
6588 					GFP_KERNEL);
6589 		if (!ret_stack_list[i]) {
6590 			start = 0;
6591 			end = i;
6592 			ret = -ENOMEM;
6593 			goto free;
6594 		}
6595 	}
6596 
6597 	read_lock(&tasklist_lock);
6598 	do_each_thread(g, t) {
6599 		if (start == end) {
6600 			ret = -EAGAIN;
6601 			goto unlock;
6602 		}
6603 
6604 		if (t->ret_stack == NULL) {
6605 			atomic_set(&t->tracing_graph_pause, 0);
6606 			atomic_set(&t->trace_overrun, 0);
6607 			t->curr_ret_stack = -1;
6608 			/* Make sure the tasks see the -1 first: */
6609 			smp_wmb();
6610 			t->ret_stack = ret_stack_list[start++];
6611 		}
6612 	} while_each_thread(g, t);
6613 
6614 unlock:
6615 	read_unlock(&tasklist_lock);
6616 free:
6617 	for (i = start; i < end; i++)
6618 		kfree(ret_stack_list[i]);
6619 	return ret;
6620 }
6621 
6622 static void
6623 ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
6624 			struct task_struct *prev, struct task_struct *next)
6625 {
6626 	unsigned long long timestamp;
6627 	int index;
6628 
6629 	/*
6630 	 * Does the user want to count the time a function was asleep.
6631 	 * If so, do not update the time stamps.
6632 	 */
6633 	if (fgraph_sleep_time)
6634 		return;
6635 
6636 	timestamp = trace_clock_local();
6637 
6638 	prev->ftrace_timestamp = timestamp;
6639 
6640 	/* only process tasks that we timestamped */
6641 	if (!next->ftrace_timestamp)
6642 		return;
6643 
6644 	/*
6645 	 * Update all the counters in next to make up for the
6646 	 * time next was sleeping.
6647 	 */
6648 	timestamp -= next->ftrace_timestamp;
6649 
6650 	for (index = next->curr_ret_stack; index >= 0; index--)
6651 		next->ret_stack[index].calltime += timestamp;
6652 }
6653 
6654 /* Allocate a return stack for each task */
6655 static int start_graph_tracing(void)
6656 {
6657 	struct ftrace_ret_stack **ret_stack_list;
6658 	int ret, cpu;
6659 
6660 	ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
6661 				sizeof(struct ftrace_ret_stack *),
6662 				GFP_KERNEL);
6663 
6664 	if (!ret_stack_list)
6665 		return -ENOMEM;
6666 
6667 	/* The cpu_boot init_task->ret_stack will never be freed */
6668 	for_each_online_cpu(cpu) {
6669 		if (!idle_task(cpu)->ret_stack)
6670 			ftrace_graph_init_idle_task(idle_task(cpu), cpu);
6671 	}
6672 
6673 	do {
6674 		ret = alloc_retstack_tasklist(ret_stack_list);
6675 	} while (ret == -EAGAIN);
6676 
6677 	if (!ret) {
6678 		ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
6679 		if (ret)
6680 			pr_info("ftrace_graph: Couldn't activate tracepoint"
6681 				" probe to kernel_sched_switch\n");
6682 	}
6683 
6684 	kfree(ret_stack_list);
6685 	return ret;
6686 }
6687 
6688 /*
6689  * Hibernation protection.
6690  * The state of the current task is too much unstable during
6691  * suspend/restore to disk. We want to protect against that.
6692  */
6693 static int
6694 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
6695 							void *unused)
6696 {
6697 	switch (state) {
6698 	case PM_HIBERNATION_PREPARE:
6699 		pause_graph_tracing();
6700 		break;
6701 
6702 	case PM_POST_HIBERNATION:
6703 		unpause_graph_tracing();
6704 		break;
6705 	}
6706 	return NOTIFY_DONE;
6707 }
6708 
6709 static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
6710 {
6711 	if (!ftrace_ops_test(&global_ops, trace->func, NULL))
6712 		return 0;
6713 	return __ftrace_graph_entry(trace);
6714 }
6715 
6716 /*
6717  * The function graph tracer should only trace the functions defined
6718  * by set_ftrace_filter and set_ftrace_notrace. If another function
6719  * tracer ops is registered, the graph tracer requires testing the
6720  * function against the global ops, and not just trace any function
6721  * that any ftrace_ops registered.
6722  */
6723 static void update_function_graph_func(void)
6724 {
6725 	struct ftrace_ops *op;
6726 	bool do_test = false;
6727 
6728 	/*
6729 	 * The graph and global ops share the same set of functions
6730 	 * to test. If any other ops is on the list, then
6731 	 * the graph tracing needs to test if its the function
6732 	 * it should call.
6733 	 */
6734 	do_for_each_ftrace_op(op, ftrace_ops_list) {
6735 		if (op != &global_ops && op != &graph_ops &&
6736 		    op != &ftrace_list_end) {
6737 			do_test = true;
6738 			/* in double loop, break out with goto */
6739 			goto out;
6740 		}
6741 	} while_for_each_ftrace_op(op);
6742  out:
6743 	if (do_test)
6744 		ftrace_graph_entry = ftrace_graph_entry_test;
6745 	else
6746 		ftrace_graph_entry = __ftrace_graph_entry;
6747 }
6748 
6749 static struct notifier_block ftrace_suspend_notifier = {
6750 	.notifier_call = ftrace_suspend_notifier_call,
6751 };
6752 
6753 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
6754 			trace_func_graph_ent_t entryfunc)
6755 {
6756 	int ret = 0;
6757 
6758 	mutex_lock(&ftrace_lock);
6759 
6760 	/* we currently allow only one tracer registered at a time */
6761 	if (ftrace_graph_active) {
6762 		ret = -EBUSY;
6763 		goto out;
6764 	}
6765 
6766 	register_pm_notifier(&ftrace_suspend_notifier);
6767 
6768 	ftrace_graph_active++;
6769 	ret = start_graph_tracing();
6770 	if (ret) {
6771 		ftrace_graph_active--;
6772 		goto out;
6773 	}
6774 
6775 	ftrace_graph_return = retfunc;
6776 
6777 	/*
6778 	 * Update the indirect function to the entryfunc, and the
6779 	 * function that gets called to the entry_test first. Then
6780 	 * call the update fgraph entry function to determine if
6781 	 * the entryfunc should be called directly or not.
6782 	 */
6783 	__ftrace_graph_entry = entryfunc;
6784 	ftrace_graph_entry = ftrace_graph_entry_test;
6785 	update_function_graph_func();
6786 
6787 	ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET);
6788 out:
6789 	mutex_unlock(&ftrace_lock);
6790 	return ret;
6791 }
6792 
6793 void unregister_ftrace_graph(void)
6794 {
6795 	mutex_lock(&ftrace_lock);
6796 
6797 	if (unlikely(!ftrace_graph_active))
6798 		goto out;
6799 
6800 	ftrace_graph_active--;
6801 	ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
6802 	ftrace_graph_entry = ftrace_graph_entry_stub;
6803 	__ftrace_graph_entry = ftrace_graph_entry_stub;
6804 	ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
6805 	unregister_pm_notifier(&ftrace_suspend_notifier);
6806 	unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
6807 
6808  out:
6809 	mutex_unlock(&ftrace_lock);
6810 }
6811 
6812 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
6813 
6814 static void
6815 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
6816 {
6817 	atomic_set(&t->tracing_graph_pause, 0);
6818 	atomic_set(&t->trace_overrun, 0);
6819 	t->ftrace_timestamp = 0;
6820 	/* make curr_ret_stack visible before we add the ret_stack */
6821 	smp_wmb();
6822 	t->ret_stack = ret_stack;
6823 }
6824 
6825 /*
6826  * Allocate a return stack for the idle task. May be the first
6827  * time through, or it may be done by CPU hotplug online.
6828  */
6829 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
6830 {
6831 	t->curr_ret_stack = -1;
6832 	/*
6833 	 * The idle task has no parent, it either has its own
6834 	 * stack or no stack at all.
6835 	 */
6836 	if (t->ret_stack)
6837 		WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
6838 
6839 	if (ftrace_graph_active) {
6840 		struct ftrace_ret_stack *ret_stack;
6841 
6842 		ret_stack = per_cpu(idle_ret_stack, cpu);
6843 		if (!ret_stack) {
6844 			ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
6845 					    * sizeof(struct ftrace_ret_stack),
6846 					    GFP_KERNEL);
6847 			if (!ret_stack)
6848 				return;
6849 			per_cpu(idle_ret_stack, cpu) = ret_stack;
6850 		}
6851 		graph_init_task(t, ret_stack);
6852 	}
6853 }
6854 
6855 /* Allocate a return stack for newly created task */
6856 void ftrace_graph_init_task(struct task_struct *t)
6857 {
6858 	/* Make sure we do not use the parent ret_stack */
6859 	t->ret_stack = NULL;
6860 	t->curr_ret_stack = -1;
6861 
6862 	if (ftrace_graph_active) {
6863 		struct ftrace_ret_stack *ret_stack;
6864 
6865 		ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
6866 				* sizeof(struct ftrace_ret_stack),
6867 				GFP_KERNEL);
6868 		if (!ret_stack)
6869 			return;
6870 		graph_init_task(t, ret_stack);
6871 	}
6872 }
6873 
6874 void ftrace_graph_exit_task(struct task_struct *t)
6875 {
6876 	struct ftrace_ret_stack	*ret_stack = t->ret_stack;
6877 
6878 	t->ret_stack = NULL;
6879 	/* NULL must become visible to IRQs before we free it: */
6880 	barrier();
6881 
6882 	kfree(ret_stack);
6883 }
6884 #endif
6885