xref: /openbmc/linux/kernel/trace/ftrace.c (revision 239480ab)
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 Nadia Yvette Chambers
14  */
15 
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/sched/task.h>
19 #include <linux/kallsyms.h>
20 #include <linux/seq_file.h>
21 #include <linux/suspend.h>
22 #include <linux/tracefs.h>
23 #include <linux/hardirq.h>
24 #include <linux/kthread.h>
25 #include <linux/uaccess.h>
26 #include <linux/bsearch.h>
27 #include <linux/module.h>
28 #include <linux/ftrace.h>
29 #include <linux/sysctl.h>
30 #include <linux/slab.h>
31 #include <linux/ctype.h>
32 #include <linux/sort.h>
33 #include <linux/list.h>
34 #include <linux/hash.h>
35 #include <linux/rcupdate.h>
36 
37 #include <trace/events/sched.h>
38 
39 #include <asm/sections.h>
40 #include <asm/setup.h>
41 
42 #include "trace_output.h"
43 #include "trace_stat.h"
44 
45 #define FTRACE_WARN_ON(cond)			\
46 	({					\
47 		int ___r = cond;		\
48 		if (WARN_ON(___r))		\
49 			ftrace_kill();		\
50 		___r;				\
51 	})
52 
53 #define FTRACE_WARN_ON_ONCE(cond)		\
54 	({					\
55 		int ___r = cond;		\
56 		if (WARN_ON_ONCE(___r))		\
57 			ftrace_kill();		\
58 		___r;				\
59 	})
60 
61 /* hash bits for specific function selection */
62 #define FTRACE_HASH_BITS 7
63 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
64 #define FTRACE_HASH_DEFAULT_BITS 10
65 #define FTRACE_HASH_MAX_BITS 12
66 
67 #ifdef CONFIG_DYNAMIC_FTRACE
68 #define INIT_OPS_HASH(opsname)	\
69 	.func_hash		= &opsname.local_hash,			\
70 	.local_hash.regex_lock	= __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
71 #define ASSIGN_OPS_HASH(opsname, val) \
72 	.func_hash		= val, \
73 	.local_hash.regex_lock	= __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
74 #else
75 #define INIT_OPS_HASH(opsname)
76 #define ASSIGN_OPS_HASH(opsname, val)
77 #endif
78 
79 static struct ftrace_ops ftrace_list_end __read_mostly = {
80 	.func		= ftrace_stub,
81 	.flags		= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
82 	INIT_OPS_HASH(ftrace_list_end)
83 };
84 
85 /* ftrace_enabled is a method to turn ftrace on or off */
86 int ftrace_enabled __read_mostly;
87 static int last_ftrace_enabled;
88 
89 /* Current function tracing op */
90 struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
91 /* What to set function_trace_op to */
92 static struct ftrace_ops *set_function_trace_op;
93 
94 static bool ftrace_pids_enabled(struct ftrace_ops *ops)
95 {
96 	struct trace_array *tr;
97 
98 	if (!(ops->flags & FTRACE_OPS_FL_PID) || !ops->private)
99 		return false;
100 
101 	tr = ops->private;
102 
103 	return tr->function_pids != NULL;
104 }
105 
106 static void ftrace_update_trampoline(struct ftrace_ops *ops);
107 
108 /*
109  * ftrace_disabled is set when an anomaly is discovered.
110  * ftrace_disabled is much stronger than ftrace_enabled.
111  */
112 static int ftrace_disabled __read_mostly;
113 
114 static DEFINE_MUTEX(ftrace_lock);
115 
116 static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
117 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
118 static struct ftrace_ops global_ops;
119 
120 #if ARCH_SUPPORTS_FTRACE_OPS
121 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
122 				 struct ftrace_ops *op, struct pt_regs *regs);
123 #else
124 /* See comment below, where ftrace_ops_list_func is defined */
125 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
126 #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
127 #endif
128 
129 /*
130  * Traverse the ftrace_global_list, invoking all entries.  The reason that we
131  * can use rcu_dereference_raw_notrace() is that elements removed from this list
132  * are simply leaked, so there is no need to interact with a grace-period
133  * mechanism.  The rcu_dereference_raw_notrace() calls are needed to handle
134  * concurrent insertions into the ftrace_global_list.
135  *
136  * Silly Alpha and silly pointer-speculation compiler optimizations!
137  */
138 #define do_for_each_ftrace_op(op, list)			\
139 	op = rcu_dereference_raw_notrace(list);			\
140 	do
141 
142 /*
143  * Optimized for just a single item in the list (as that is the normal case).
144  */
145 #define while_for_each_ftrace_op(op)				\
146 	while (likely(op = rcu_dereference_raw_notrace((op)->next)) &&	\
147 	       unlikely((op) != &ftrace_list_end))
148 
149 static inline void ftrace_ops_init(struct ftrace_ops *ops)
150 {
151 #ifdef CONFIG_DYNAMIC_FTRACE
152 	if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
153 		mutex_init(&ops->local_hash.regex_lock);
154 		ops->func_hash = &ops->local_hash;
155 		ops->flags |= FTRACE_OPS_FL_INITIALIZED;
156 	}
157 #endif
158 }
159 
160 /**
161  * ftrace_nr_registered_ops - return number of ops registered
162  *
163  * Returns the number of ftrace_ops registered and tracing functions
164  */
165 int ftrace_nr_registered_ops(void)
166 {
167 	struct ftrace_ops *ops;
168 	int cnt = 0;
169 
170 	mutex_lock(&ftrace_lock);
171 
172 	for (ops = ftrace_ops_list;
173 	     ops != &ftrace_list_end; ops = ops->next)
174 		cnt++;
175 
176 	mutex_unlock(&ftrace_lock);
177 
178 	return cnt;
179 }
180 
181 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
182 			    struct ftrace_ops *op, struct pt_regs *regs)
183 {
184 	struct trace_array *tr = op->private;
185 
186 	if (tr && this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid))
187 		return;
188 
189 	op->saved_func(ip, parent_ip, op, regs);
190 }
191 
192 /**
193  * clear_ftrace_function - reset the ftrace function
194  *
195  * This NULLs the ftrace function and in essence stops
196  * tracing.  There may be lag
197  */
198 void clear_ftrace_function(void)
199 {
200 	ftrace_trace_function = ftrace_stub;
201 }
202 
203 static void per_cpu_ops_disable_all(struct ftrace_ops *ops)
204 {
205 	int cpu;
206 
207 	for_each_possible_cpu(cpu)
208 		*per_cpu_ptr(ops->disabled, cpu) = 1;
209 }
210 
211 static int per_cpu_ops_alloc(struct ftrace_ops *ops)
212 {
213 	int __percpu *disabled;
214 
215 	if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU)))
216 		return -EINVAL;
217 
218 	disabled = alloc_percpu(int);
219 	if (!disabled)
220 		return -ENOMEM;
221 
222 	ops->disabled = disabled;
223 	per_cpu_ops_disable_all(ops);
224 	return 0;
225 }
226 
227 static void ftrace_sync(struct work_struct *work)
228 {
229 	/*
230 	 * This function is just a stub to implement a hard force
231 	 * of synchronize_sched(). This requires synchronizing
232 	 * tasks even in userspace and idle.
233 	 *
234 	 * Yes, function tracing is rude.
235 	 */
236 }
237 
238 static void ftrace_sync_ipi(void *data)
239 {
240 	/* Probably not needed, but do it anyway */
241 	smp_rmb();
242 }
243 
244 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
245 static void update_function_graph_func(void);
246 
247 /* Both enabled by default (can be cleared by function_graph tracer flags */
248 static bool fgraph_sleep_time = true;
249 static bool fgraph_graph_time = true;
250 
251 #else
252 static inline void update_function_graph_func(void) { }
253 #endif
254 
255 
256 static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
257 {
258 	/*
259 	 * If this is a dynamic, RCU, or per CPU ops, or we force list func,
260 	 * then it needs to call the list anyway.
261 	 */
262 	if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU |
263 			  FTRACE_OPS_FL_RCU) || FTRACE_FORCE_LIST_FUNC)
264 		return ftrace_ops_list_func;
265 
266 	return ftrace_ops_get_func(ops);
267 }
268 
269 static void update_ftrace_function(void)
270 {
271 	ftrace_func_t func;
272 
273 	/*
274 	 * Prepare the ftrace_ops that the arch callback will use.
275 	 * If there's only one ftrace_ops registered, the ftrace_ops_list
276 	 * will point to the ops we want.
277 	 */
278 	set_function_trace_op = ftrace_ops_list;
279 
280 	/* If there's no ftrace_ops registered, just call the stub function */
281 	if (ftrace_ops_list == &ftrace_list_end) {
282 		func = ftrace_stub;
283 
284 	/*
285 	 * If we are at the end of the list and this ops is
286 	 * recursion safe and not dynamic and the arch supports passing ops,
287 	 * then have the mcount trampoline call the function directly.
288 	 */
289 	} else if (ftrace_ops_list->next == &ftrace_list_end) {
290 		func = ftrace_ops_get_list_func(ftrace_ops_list);
291 
292 	} else {
293 		/* Just use the default ftrace_ops */
294 		set_function_trace_op = &ftrace_list_end;
295 		func = ftrace_ops_list_func;
296 	}
297 
298 	update_function_graph_func();
299 
300 	/* If there's no change, then do nothing more here */
301 	if (ftrace_trace_function == func)
302 		return;
303 
304 	/*
305 	 * If we are using the list function, it doesn't care
306 	 * about the function_trace_ops.
307 	 */
308 	if (func == ftrace_ops_list_func) {
309 		ftrace_trace_function = func;
310 		/*
311 		 * Don't even bother setting function_trace_ops,
312 		 * it would be racy to do so anyway.
313 		 */
314 		return;
315 	}
316 
317 #ifndef CONFIG_DYNAMIC_FTRACE
318 	/*
319 	 * For static tracing, we need to be a bit more careful.
320 	 * The function change takes affect immediately. Thus,
321 	 * we need to coorditate the setting of the function_trace_ops
322 	 * with the setting of the ftrace_trace_function.
323 	 *
324 	 * Set the function to the list ops, which will call the
325 	 * function we want, albeit indirectly, but it handles the
326 	 * ftrace_ops and doesn't depend on function_trace_op.
327 	 */
328 	ftrace_trace_function = ftrace_ops_list_func;
329 	/*
330 	 * Make sure all CPUs see this. Yes this is slow, but static
331 	 * tracing is slow and nasty to have enabled.
332 	 */
333 	schedule_on_each_cpu(ftrace_sync);
334 	/* Now all cpus are using the list ops. */
335 	function_trace_op = set_function_trace_op;
336 	/* Make sure the function_trace_op is visible on all CPUs */
337 	smp_wmb();
338 	/* Nasty way to force a rmb on all cpus */
339 	smp_call_function(ftrace_sync_ipi, NULL, 1);
340 	/* OK, we are all set to update the ftrace_trace_function now! */
341 #endif /* !CONFIG_DYNAMIC_FTRACE */
342 
343 	ftrace_trace_function = func;
344 }
345 
346 int using_ftrace_ops_list_func(void)
347 {
348 	return ftrace_trace_function == ftrace_ops_list_func;
349 }
350 
351 static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
352 {
353 	ops->next = *list;
354 	/*
355 	 * We are entering ops into the list but another
356 	 * CPU might be walking that list. We need to make sure
357 	 * the ops->next pointer is valid before another CPU sees
358 	 * the ops pointer included into the list.
359 	 */
360 	rcu_assign_pointer(*list, ops);
361 }
362 
363 static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
364 {
365 	struct ftrace_ops **p;
366 
367 	/*
368 	 * If we are removing the last function, then simply point
369 	 * to the ftrace_stub.
370 	 */
371 	if (*list == ops && ops->next == &ftrace_list_end) {
372 		*list = &ftrace_list_end;
373 		return 0;
374 	}
375 
376 	for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
377 		if (*p == ops)
378 			break;
379 
380 	if (*p != ops)
381 		return -1;
382 
383 	*p = (*p)->next;
384 	return 0;
385 }
386 
387 static void ftrace_update_trampoline(struct ftrace_ops *ops);
388 
389 static int __register_ftrace_function(struct ftrace_ops *ops)
390 {
391 	if (ops->flags & FTRACE_OPS_FL_DELETED)
392 		return -EINVAL;
393 
394 	if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
395 		return -EBUSY;
396 
397 #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
398 	/*
399 	 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
400 	 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
401 	 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
402 	 */
403 	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
404 	    !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
405 		return -EINVAL;
406 
407 	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
408 		ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
409 #endif
410 
411 	if (!core_kernel_data((unsigned long)ops))
412 		ops->flags |= FTRACE_OPS_FL_DYNAMIC;
413 
414 	if (ops->flags & FTRACE_OPS_FL_PER_CPU) {
415 		if (per_cpu_ops_alloc(ops))
416 			return -ENOMEM;
417 	}
418 
419 	add_ftrace_ops(&ftrace_ops_list, ops);
420 
421 	/* Always save the function, and reset at unregistering */
422 	ops->saved_func = ops->func;
423 
424 	if (ftrace_pids_enabled(ops))
425 		ops->func = ftrace_pid_func;
426 
427 	ftrace_update_trampoline(ops);
428 
429 	if (ftrace_enabled)
430 		update_ftrace_function();
431 
432 	return 0;
433 }
434 
435 static int __unregister_ftrace_function(struct ftrace_ops *ops)
436 {
437 	int ret;
438 
439 	if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
440 		return -EBUSY;
441 
442 	ret = remove_ftrace_ops(&ftrace_ops_list, ops);
443 
444 	if (ret < 0)
445 		return ret;
446 
447 	if (ftrace_enabled)
448 		update_ftrace_function();
449 
450 	ops->func = ops->saved_func;
451 
452 	return 0;
453 }
454 
455 static void ftrace_update_pid_func(void)
456 {
457 	struct ftrace_ops *op;
458 
459 	/* Only do something if we are tracing something */
460 	if (ftrace_trace_function == ftrace_stub)
461 		return;
462 
463 	do_for_each_ftrace_op(op, ftrace_ops_list) {
464 		if (op->flags & FTRACE_OPS_FL_PID) {
465 			op->func = ftrace_pids_enabled(op) ?
466 				ftrace_pid_func : op->saved_func;
467 			ftrace_update_trampoline(op);
468 		}
469 	} while_for_each_ftrace_op(op);
470 
471 	update_ftrace_function();
472 }
473 
474 #ifdef CONFIG_FUNCTION_PROFILER
475 struct ftrace_profile {
476 	struct hlist_node		node;
477 	unsigned long			ip;
478 	unsigned long			counter;
479 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
480 	unsigned long long		time;
481 	unsigned long long		time_squared;
482 #endif
483 };
484 
485 struct ftrace_profile_page {
486 	struct ftrace_profile_page	*next;
487 	unsigned long			index;
488 	struct ftrace_profile		records[];
489 };
490 
491 struct ftrace_profile_stat {
492 	atomic_t			disabled;
493 	struct hlist_head		*hash;
494 	struct ftrace_profile_page	*pages;
495 	struct ftrace_profile_page	*start;
496 	struct tracer_stat		stat;
497 };
498 
499 #define PROFILE_RECORDS_SIZE						\
500 	(PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
501 
502 #define PROFILES_PER_PAGE					\
503 	(PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
504 
505 static int ftrace_profile_enabled __read_mostly;
506 
507 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
508 static DEFINE_MUTEX(ftrace_profile_lock);
509 
510 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
511 
512 #define FTRACE_PROFILE_HASH_BITS 10
513 #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
514 
515 static void *
516 function_stat_next(void *v, int idx)
517 {
518 	struct ftrace_profile *rec = v;
519 	struct ftrace_profile_page *pg;
520 
521 	pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
522 
523  again:
524 	if (idx != 0)
525 		rec++;
526 
527 	if ((void *)rec >= (void *)&pg->records[pg->index]) {
528 		pg = pg->next;
529 		if (!pg)
530 			return NULL;
531 		rec = &pg->records[0];
532 		if (!rec->counter)
533 			goto again;
534 	}
535 
536 	return rec;
537 }
538 
539 static void *function_stat_start(struct tracer_stat *trace)
540 {
541 	struct ftrace_profile_stat *stat =
542 		container_of(trace, struct ftrace_profile_stat, stat);
543 
544 	if (!stat || !stat->start)
545 		return NULL;
546 
547 	return function_stat_next(&stat->start->records[0], 0);
548 }
549 
550 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
551 /* function graph compares on total time */
552 static int function_stat_cmp(void *p1, void *p2)
553 {
554 	struct ftrace_profile *a = p1;
555 	struct ftrace_profile *b = p2;
556 
557 	if (a->time < b->time)
558 		return -1;
559 	if (a->time > b->time)
560 		return 1;
561 	else
562 		return 0;
563 }
564 #else
565 /* not function graph compares against hits */
566 static int function_stat_cmp(void *p1, void *p2)
567 {
568 	struct ftrace_profile *a = p1;
569 	struct ftrace_profile *b = p2;
570 
571 	if (a->counter < b->counter)
572 		return -1;
573 	if (a->counter > b->counter)
574 		return 1;
575 	else
576 		return 0;
577 }
578 #endif
579 
580 static int function_stat_headers(struct seq_file *m)
581 {
582 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
583 	seq_puts(m, "  Function                               "
584 		 "Hit    Time            Avg             s^2\n"
585 		    "  --------                               "
586 		 "---    ----            ---             ---\n");
587 #else
588 	seq_puts(m, "  Function                               Hit\n"
589 		    "  --------                               ---\n");
590 #endif
591 	return 0;
592 }
593 
594 static int function_stat_show(struct seq_file *m, void *v)
595 {
596 	struct ftrace_profile *rec = v;
597 	char str[KSYM_SYMBOL_LEN];
598 	int ret = 0;
599 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
600 	static struct trace_seq s;
601 	unsigned long long avg;
602 	unsigned long long stddev;
603 #endif
604 	mutex_lock(&ftrace_profile_lock);
605 
606 	/* we raced with function_profile_reset() */
607 	if (unlikely(rec->counter == 0)) {
608 		ret = -EBUSY;
609 		goto out;
610 	}
611 
612 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
613 	avg = rec->time;
614 	do_div(avg, rec->counter);
615 	if (tracing_thresh && (avg < tracing_thresh))
616 		goto out;
617 #endif
618 
619 	kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
620 	seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
621 
622 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
623 	seq_puts(m, "    ");
624 
625 	/* Sample standard deviation (s^2) */
626 	if (rec->counter <= 1)
627 		stddev = 0;
628 	else {
629 		/*
630 		 * Apply Welford's method:
631 		 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
632 		 */
633 		stddev = rec->counter * rec->time_squared -
634 			 rec->time * rec->time;
635 
636 		/*
637 		 * Divide only 1000 for ns^2 -> us^2 conversion.
638 		 * trace_print_graph_duration will divide 1000 again.
639 		 */
640 		do_div(stddev, rec->counter * (rec->counter - 1) * 1000);
641 	}
642 
643 	trace_seq_init(&s);
644 	trace_print_graph_duration(rec->time, &s);
645 	trace_seq_puts(&s, "    ");
646 	trace_print_graph_duration(avg, &s);
647 	trace_seq_puts(&s, "    ");
648 	trace_print_graph_duration(stddev, &s);
649 	trace_print_seq(m, &s);
650 #endif
651 	seq_putc(m, '\n');
652 out:
653 	mutex_unlock(&ftrace_profile_lock);
654 
655 	return ret;
656 }
657 
658 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
659 {
660 	struct ftrace_profile_page *pg;
661 
662 	pg = stat->pages = stat->start;
663 
664 	while (pg) {
665 		memset(pg->records, 0, PROFILE_RECORDS_SIZE);
666 		pg->index = 0;
667 		pg = pg->next;
668 	}
669 
670 	memset(stat->hash, 0,
671 	       FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
672 }
673 
674 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
675 {
676 	struct ftrace_profile_page *pg;
677 	int functions;
678 	int pages;
679 	int i;
680 
681 	/* If we already allocated, do nothing */
682 	if (stat->pages)
683 		return 0;
684 
685 	stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
686 	if (!stat->pages)
687 		return -ENOMEM;
688 
689 #ifdef CONFIG_DYNAMIC_FTRACE
690 	functions = ftrace_update_tot_cnt;
691 #else
692 	/*
693 	 * We do not know the number of functions that exist because
694 	 * dynamic tracing is what counts them. With past experience
695 	 * we have around 20K functions. That should be more than enough.
696 	 * It is highly unlikely we will execute every function in
697 	 * the kernel.
698 	 */
699 	functions = 20000;
700 #endif
701 
702 	pg = stat->start = stat->pages;
703 
704 	pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
705 
706 	for (i = 1; i < pages; i++) {
707 		pg->next = (void *)get_zeroed_page(GFP_KERNEL);
708 		if (!pg->next)
709 			goto out_free;
710 		pg = pg->next;
711 	}
712 
713 	return 0;
714 
715  out_free:
716 	pg = stat->start;
717 	while (pg) {
718 		unsigned long tmp = (unsigned long)pg;
719 
720 		pg = pg->next;
721 		free_page(tmp);
722 	}
723 
724 	stat->pages = NULL;
725 	stat->start = NULL;
726 
727 	return -ENOMEM;
728 }
729 
730 static int ftrace_profile_init_cpu(int cpu)
731 {
732 	struct ftrace_profile_stat *stat;
733 	int size;
734 
735 	stat = &per_cpu(ftrace_profile_stats, cpu);
736 
737 	if (stat->hash) {
738 		/* If the profile is already created, simply reset it */
739 		ftrace_profile_reset(stat);
740 		return 0;
741 	}
742 
743 	/*
744 	 * We are profiling all functions, but usually only a few thousand
745 	 * functions are hit. We'll make a hash of 1024 items.
746 	 */
747 	size = FTRACE_PROFILE_HASH_SIZE;
748 
749 	stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
750 
751 	if (!stat->hash)
752 		return -ENOMEM;
753 
754 	/* Preallocate the function profiling pages */
755 	if (ftrace_profile_pages_init(stat) < 0) {
756 		kfree(stat->hash);
757 		stat->hash = NULL;
758 		return -ENOMEM;
759 	}
760 
761 	return 0;
762 }
763 
764 static int ftrace_profile_init(void)
765 {
766 	int cpu;
767 	int ret = 0;
768 
769 	for_each_possible_cpu(cpu) {
770 		ret = ftrace_profile_init_cpu(cpu);
771 		if (ret)
772 			break;
773 	}
774 
775 	return ret;
776 }
777 
778 /* interrupts must be disabled */
779 static struct ftrace_profile *
780 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
781 {
782 	struct ftrace_profile *rec;
783 	struct hlist_head *hhd;
784 	unsigned long key;
785 
786 	key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
787 	hhd = &stat->hash[key];
788 
789 	if (hlist_empty(hhd))
790 		return NULL;
791 
792 	hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
793 		if (rec->ip == ip)
794 			return rec;
795 	}
796 
797 	return NULL;
798 }
799 
800 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
801 			       struct ftrace_profile *rec)
802 {
803 	unsigned long key;
804 
805 	key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
806 	hlist_add_head_rcu(&rec->node, &stat->hash[key]);
807 }
808 
809 /*
810  * The memory is already allocated, this simply finds a new record to use.
811  */
812 static struct ftrace_profile *
813 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
814 {
815 	struct ftrace_profile *rec = NULL;
816 
817 	/* prevent recursion (from NMIs) */
818 	if (atomic_inc_return(&stat->disabled) != 1)
819 		goto out;
820 
821 	/*
822 	 * Try to find the function again since an NMI
823 	 * could have added it
824 	 */
825 	rec = ftrace_find_profiled_func(stat, ip);
826 	if (rec)
827 		goto out;
828 
829 	if (stat->pages->index == PROFILES_PER_PAGE) {
830 		if (!stat->pages->next)
831 			goto out;
832 		stat->pages = stat->pages->next;
833 	}
834 
835 	rec = &stat->pages->records[stat->pages->index++];
836 	rec->ip = ip;
837 	ftrace_add_profile(stat, rec);
838 
839  out:
840 	atomic_dec(&stat->disabled);
841 
842 	return rec;
843 }
844 
845 static void
846 function_profile_call(unsigned long ip, unsigned long parent_ip,
847 		      struct ftrace_ops *ops, struct pt_regs *regs)
848 {
849 	struct ftrace_profile_stat *stat;
850 	struct ftrace_profile *rec;
851 	unsigned long flags;
852 
853 	if (!ftrace_profile_enabled)
854 		return;
855 
856 	local_irq_save(flags);
857 
858 	stat = this_cpu_ptr(&ftrace_profile_stats);
859 	if (!stat->hash || !ftrace_profile_enabled)
860 		goto out;
861 
862 	rec = ftrace_find_profiled_func(stat, ip);
863 	if (!rec) {
864 		rec = ftrace_profile_alloc(stat, ip);
865 		if (!rec)
866 			goto out;
867 	}
868 
869 	rec->counter++;
870  out:
871 	local_irq_restore(flags);
872 }
873 
874 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
875 static int profile_graph_entry(struct ftrace_graph_ent *trace)
876 {
877 	int index = trace->depth;
878 
879 	function_profile_call(trace->func, 0, NULL, NULL);
880 
881 	if (index >= 0 && index < FTRACE_RETFUNC_DEPTH)
882 		current->ret_stack[index].subtime = 0;
883 
884 	return 1;
885 }
886 
887 static void profile_graph_return(struct ftrace_graph_ret *trace)
888 {
889 	struct ftrace_profile_stat *stat;
890 	unsigned long long calltime;
891 	struct ftrace_profile *rec;
892 	unsigned long flags;
893 
894 	local_irq_save(flags);
895 	stat = this_cpu_ptr(&ftrace_profile_stats);
896 	if (!stat->hash || !ftrace_profile_enabled)
897 		goto out;
898 
899 	/* If the calltime was zero'd ignore it */
900 	if (!trace->calltime)
901 		goto out;
902 
903 	calltime = trace->rettime - trace->calltime;
904 
905 	if (!fgraph_graph_time) {
906 		int index;
907 
908 		index = trace->depth;
909 
910 		/* Append this call time to the parent time to subtract */
911 		if (index)
912 			current->ret_stack[index - 1].subtime += calltime;
913 
914 		if (current->ret_stack[index].subtime < calltime)
915 			calltime -= current->ret_stack[index].subtime;
916 		else
917 			calltime = 0;
918 	}
919 
920 	rec = ftrace_find_profiled_func(stat, trace->func);
921 	if (rec) {
922 		rec->time += calltime;
923 		rec->time_squared += calltime * calltime;
924 	}
925 
926  out:
927 	local_irq_restore(flags);
928 }
929 
930 static int register_ftrace_profiler(void)
931 {
932 	return register_ftrace_graph(&profile_graph_return,
933 				     &profile_graph_entry);
934 }
935 
936 static void unregister_ftrace_profiler(void)
937 {
938 	unregister_ftrace_graph();
939 }
940 #else
941 static struct ftrace_ops ftrace_profile_ops __read_mostly = {
942 	.func		= function_profile_call,
943 	.flags		= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
944 	INIT_OPS_HASH(ftrace_profile_ops)
945 };
946 
947 static int register_ftrace_profiler(void)
948 {
949 	return register_ftrace_function(&ftrace_profile_ops);
950 }
951 
952 static void unregister_ftrace_profiler(void)
953 {
954 	unregister_ftrace_function(&ftrace_profile_ops);
955 }
956 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
957 
958 static ssize_t
959 ftrace_profile_write(struct file *filp, const char __user *ubuf,
960 		     size_t cnt, loff_t *ppos)
961 {
962 	unsigned long val;
963 	int ret;
964 
965 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
966 	if (ret)
967 		return ret;
968 
969 	val = !!val;
970 
971 	mutex_lock(&ftrace_profile_lock);
972 	if (ftrace_profile_enabled ^ val) {
973 		if (val) {
974 			ret = ftrace_profile_init();
975 			if (ret < 0) {
976 				cnt = ret;
977 				goto out;
978 			}
979 
980 			ret = register_ftrace_profiler();
981 			if (ret < 0) {
982 				cnt = ret;
983 				goto out;
984 			}
985 			ftrace_profile_enabled = 1;
986 		} else {
987 			ftrace_profile_enabled = 0;
988 			/*
989 			 * unregister_ftrace_profiler calls stop_machine
990 			 * so this acts like an synchronize_sched.
991 			 */
992 			unregister_ftrace_profiler();
993 		}
994 	}
995  out:
996 	mutex_unlock(&ftrace_profile_lock);
997 
998 	*ppos += cnt;
999 
1000 	return cnt;
1001 }
1002 
1003 static ssize_t
1004 ftrace_profile_read(struct file *filp, char __user *ubuf,
1005 		     size_t cnt, loff_t *ppos)
1006 {
1007 	char buf[64];		/* big enough to hold a number */
1008 	int r;
1009 
1010 	r = sprintf(buf, "%u\n", ftrace_profile_enabled);
1011 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1012 }
1013 
1014 static const struct file_operations ftrace_profile_fops = {
1015 	.open		= tracing_open_generic,
1016 	.read		= ftrace_profile_read,
1017 	.write		= ftrace_profile_write,
1018 	.llseek		= default_llseek,
1019 };
1020 
1021 /* used to initialize the real stat files */
1022 static struct tracer_stat function_stats __initdata = {
1023 	.name		= "functions",
1024 	.stat_start	= function_stat_start,
1025 	.stat_next	= function_stat_next,
1026 	.stat_cmp	= function_stat_cmp,
1027 	.stat_headers	= function_stat_headers,
1028 	.stat_show	= function_stat_show
1029 };
1030 
1031 static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
1032 {
1033 	struct ftrace_profile_stat *stat;
1034 	struct dentry *entry;
1035 	char *name;
1036 	int ret;
1037 	int cpu;
1038 
1039 	for_each_possible_cpu(cpu) {
1040 		stat = &per_cpu(ftrace_profile_stats, cpu);
1041 
1042 		name = kasprintf(GFP_KERNEL, "function%d", cpu);
1043 		if (!name) {
1044 			/*
1045 			 * The files created are permanent, if something happens
1046 			 * we still do not free memory.
1047 			 */
1048 			WARN(1,
1049 			     "Could not allocate stat file for cpu %d\n",
1050 			     cpu);
1051 			return;
1052 		}
1053 		stat->stat = function_stats;
1054 		stat->stat.name = name;
1055 		ret = register_stat_tracer(&stat->stat);
1056 		if (ret) {
1057 			WARN(1,
1058 			     "Could not register function stat for cpu %d\n",
1059 			     cpu);
1060 			kfree(name);
1061 			return;
1062 		}
1063 	}
1064 
1065 	entry = tracefs_create_file("function_profile_enabled", 0644,
1066 				    d_tracer, NULL, &ftrace_profile_fops);
1067 	if (!entry)
1068 		pr_warn("Could not create tracefs 'function_profile_enabled' entry\n");
1069 }
1070 
1071 #else /* CONFIG_FUNCTION_PROFILER */
1072 static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
1073 {
1074 }
1075 #endif /* CONFIG_FUNCTION_PROFILER */
1076 
1077 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
1078 
1079 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1080 static int ftrace_graph_active;
1081 #else
1082 # define ftrace_graph_active 0
1083 #endif
1084 
1085 #ifdef CONFIG_DYNAMIC_FTRACE
1086 
1087 static struct ftrace_ops *removed_ops;
1088 
1089 /*
1090  * Set when doing a global update, like enabling all recs or disabling them.
1091  * It is not set when just updating a single ftrace_ops.
1092  */
1093 static bool update_all_ops;
1094 
1095 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
1096 # error Dynamic ftrace depends on MCOUNT_RECORD
1097 #endif
1098 
1099 struct ftrace_func_entry {
1100 	struct hlist_node hlist;
1101 	unsigned long ip;
1102 };
1103 
1104 struct ftrace_func_probe {
1105 	struct ftrace_probe_ops	*probe_ops;
1106 	struct ftrace_ops	ops;
1107 	struct trace_array	*tr;
1108 	struct list_head	list;
1109 	void			*data;
1110 	int			ref;
1111 };
1112 
1113 /*
1114  * We make these constant because no one should touch them,
1115  * but they are used as the default "empty hash", to avoid allocating
1116  * it all the time. These are in a read only section such that if
1117  * anyone does try to modify it, it will cause an exception.
1118  */
1119 static const struct hlist_head empty_buckets[1];
1120 static const struct ftrace_hash empty_hash = {
1121 	.buckets = (struct hlist_head *)empty_buckets,
1122 };
1123 #define EMPTY_HASH	((struct ftrace_hash *)&empty_hash)
1124 
1125 static struct ftrace_ops global_ops = {
1126 	.func				= ftrace_stub,
1127 	.local_hash.notrace_hash	= EMPTY_HASH,
1128 	.local_hash.filter_hash		= EMPTY_HASH,
1129 	INIT_OPS_HASH(global_ops)
1130 	.flags				= FTRACE_OPS_FL_RECURSION_SAFE |
1131 					  FTRACE_OPS_FL_INITIALIZED |
1132 					  FTRACE_OPS_FL_PID,
1133 };
1134 
1135 /*
1136  * This is used by __kernel_text_address() to return true if the
1137  * address is on a dynamically allocated trampoline that would
1138  * not return true for either core_kernel_text() or
1139  * is_module_text_address().
1140  */
1141 bool is_ftrace_trampoline(unsigned long addr)
1142 {
1143 	struct ftrace_ops *op;
1144 	bool ret = false;
1145 
1146 	/*
1147 	 * Some of the ops may be dynamically allocated,
1148 	 * they are freed after a synchronize_sched().
1149 	 */
1150 	preempt_disable_notrace();
1151 
1152 	do_for_each_ftrace_op(op, ftrace_ops_list) {
1153 		/*
1154 		 * This is to check for dynamically allocated trampolines.
1155 		 * Trampolines that are in kernel text will have
1156 		 * core_kernel_text() return true.
1157 		 */
1158 		if (op->trampoline && op->trampoline_size)
1159 			if (addr >= op->trampoline &&
1160 			    addr < op->trampoline + op->trampoline_size) {
1161 				ret = true;
1162 				goto out;
1163 			}
1164 	} while_for_each_ftrace_op(op);
1165 
1166  out:
1167 	preempt_enable_notrace();
1168 
1169 	return ret;
1170 }
1171 
1172 struct ftrace_page {
1173 	struct ftrace_page	*next;
1174 	struct dyn_ftrace	*records;
1175 	int			index;
1176 	int			size;
1177 };
1178 
1179 #define ENTRY_SIZE sizeof(struct dyn_ftrace)
1180 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1181 
1182 /* estimate from running different kernels */
1183 #define NR_TO_INIT		10000
1184 
1185 static struct ftrace_page	*ftrace_pages_start;
1186 static struct ftrace_page	*ftrace_pages;
1187 
1188 static __always_inline unsigned long
1189 ftrace_hash_key(struct ftrace_hash *hash, unsigned long ip)
1190 {
1191 	if (hash->size_bits > 0)
1192 		return hash_long(ip, hash->size_bits);
1193 
1194 	return 0;
1195 }
1196 
1197 /* Only use this function if ftrace_hash_empty() has already been tested */
1198 static __always_inline struct ftrace_func_entry *
1199 __ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1200 {
1201 	unsigned long key;
1202 	struct ftrace_func_entry *entry;
1203 	struct hlist_head *hhd;
1204 
1205 	key = ftrace_hash_key(hash, ip);
1206 	hhd = &hash->buckets[key];
1207 
1208 	hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
1209 		if (entry->ip == ip)
1210 			return entry;
1211 	}
1212 	return NULL;
1213 }
1214 
1215 /**
1216  * ftrace_lookup_ip - Test to see if an ip exists in an ftrace_hash
1217  * @hash: The hash to look at
1218  * @ip: The instruction pointer to test
1219  *
1220  * Search a given @hash to see if a given instruction pointer (@ip)
1221  * exists in it.
1222  *
1223  * Returns the entry that holds the @ip if found. NULL otherwise.
1224  */
1225 struct ftrace_func_entry *
1226 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1227 {
1228 	if (ftrace_hash_empty(hash))
1229 		return NULL;
1230 
1231 	return __ftrace_lookup_ip(hash, ip);
1232 }
1233 
1234 static void __add_hash_entry(struct ftrace_hash *hash,
1235 			     struct ftrace_func_entry *entry)
1236 {
1237 	struct hlist_head *hhd;
1238 	unsigned long key;
1239 
1240 	key = ftrace_hash_key(hash, entry->ip);
1241 	hhd = &hash->buckets[key];
1242 	hlist_add_head(&entry->hlist, hhd);
1243 	hash->count++;
1244 }
1245 
1246 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1247 {
1248 	struct ftrace_func_entry *entry;
1249 
1250 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1251 	if (!entry)
1252 		return -ENOMEM;
1253 
1254 	entry->ip = ip;
1255 	__add_hash_entry(hash, entry);
1256 
1257 	return 0;
1258 }
1259 
1260 static void
1261 free_hash_entry(struct ftrace_hash *hash,
1262 		  struct ftrace_func_entry *entry)
1263 {
1264 	hlist_del(&entry->hlist);
1265 	kfree(entry);
1266 	hash->count--;
1267 }
1268 
1269 static void
1270 remove_hash_entry(struct ftrace_hash *hash,
1271 		  struct ftrace_func_entry *entry)
1272 {
1273 	hlist_del_rcu(&entry->hlist);
1274 	hash->count--;
1275 }
1276 
1277 static void ftrace_hash_clear(struct ftrace_hash *hash)
1278 {
1279 	struct hlist_head *hhd;
1280 	struct hlist_node *tn;
1281 	struct ftrace_func_entry *entry;
1282 	int size = 1 << hash->size_bits;
1283 	int i;
1284 
1285 	if (!hash->count)
1286 		return;
1287 
1288 	for (i = 0; i < size; i++) {
1289 		hhd = &hash->buckets[i];
1290 		hlist_for_each_entry_safe(entry, tn, hhd, hlist)
1291 			free_hash_entry(hash, entry);
1292 	}
1293 	FTRACE_WARN_ON(hash->count);
1294 }
1295 
1296 static void free_ftrace_hash(struct ftrace_hash *hash)
1297 {
1298 	if (!hash || hash == EMPTY_HASH)
1299 		return;
1300 	ftrace_hash_clear(hash);
1301 	kfree(hash->buckets);
1302 	kfree(hash);
1303 }
1304 
1305 static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1306 {
1307 	struct ftrace_hash *hash;
1308 
1309 	hash = container_of(rcu, struct ftrace_hash, rcu);
1310 	free_ftrace_hash(hash);
1311 }
1312 
1313 static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1314 {
1315 	if (!hash || hash == EMPTY_HASH)
1316 		return;
1317 	call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1318 }
1319 
1320 void ftrace_free_filter(struct ftrace_ops *ops)
1321 {
1322 	ftrace_ops_init(ops);
1323 	free_ftrace_hash(ops->func_hash->filter_hash);
1324 	free_ftrace_hash(ops->func_hash->notrace_hash);
1325 }
1326 
1327 static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1328 {
1329 	struct ftrace_hash *hash;
1330 	int size;
1331 
1332 	hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1333 	if (!hash)
1334 		return NULL;
1335 
1336 	size = 1 << size_bits;
1337 	hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1338 
1339 	if (!hash->buckets) {
1340 		kfree(hash);
1341 		return NULL;
1342 	}
1343 
1344 	hash->size_bits = size_bits;
1345 
1346 	return hash;
1347 }
1348 
1349 static struct ftrace_hash *
1350 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1351 {
1352 	struct ftrace_func_entry *entry;
1353 	struct ftrace_hash *new_hash;
1354 	int size;
1355 	int ret;
1356 	int i;
1357 
1358 	new_hash = alloc_ftrace_hash(size_bits);
1359 	if (!new_hash)
1360 		return NULL;
1361 
1362 	/* Empty hash? */
1363 	if (ftrace_hash_empty(hash))
1364 		return new_hash;
1365 
1366 	size = 1 << hash->size_bits;
1367 	for (i = 0; i < size; i++) {
1368 		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
1369 			ret = add_hash_entry(new_hash, entry->ip);
1370 			if (ret < 0)
1371 				goto free_hash;
1372 		}
1373 	}
1374 
1375 	FTRACE_WARN_ON(new_hash->count != hash->count);
1376 
1377 	return new_hash;
1378 
1379  free_hash:
1380 	free_ftrace_hash(new_hash);
1381 	return NULL;
1382 }
1383 
1384 static void
1385 ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash);
1386 static void
1387 ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash);
1388 
1389 static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1390 				       struct ftrace_hash *new_hash);
1391 
1392 static struct ftrace_hash *
1393 __ftrace_hash_move(struct ftrace_hash *src)
1394 {
1395 	struct ftrace_func_entry *entry;
1396 	struct hlist_node *tn;
1397 	struct hlist_head *hhd;
1398 	struct ftrace_hash *new_hash;
1399 	int size = src->count;
1400 	int bits = 0;
1401 	int i;
1402 
1403 	/*
1404 	 * If the new source is empty, just return the empty_hash.
1405 	 */
1406 	if (!src->count)
1407 		return EMPTY_HASH;
1408 
1409 	/*
1410 	 * Make the hash size about 1/2 the # found
1411 	 */
1412 	for (size /= 2; size; size >>= 1)
1413 		bits++;
1414 
1415 	/* Don't allocate too much */
1416 	if (bits > FTRACE_HASH_MAX_BITS)
1417 		bits = FTRACE_HASH_MAX_BITS;
1418 
1419 	new_hash = alloc_ftrace_hash(bits);
1420 	if (!new_hash)
1421 		return NULL;
1422 
1423 	size = 1 << src->size_bits;
1424 	for (i = 0; i < size; i++) {
1425 		hhd = &src->buckets[i];
1426 		hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
1427 			remove_hash_entry(src, entry);
1428 			__add_hash_entry(new_hash, entry);
1429 		}
1430 	}
1431 
1432 	return new_hash;
1433 }
1434 
1435 static int
1436 ftrace_hash_move(struct ftrace_ops *ops, int enable,
1437 		 struct ftrace_hash **dst, struct ftrace_hash *src)
1438 {
1439 	struct ftrace_hash *new_hash;
1440 	int ret;
1441 
1442 	/* Reject setting notrace hash on IPMODIFY ftrace_ops */
1443 	if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable)
1444 		return -EINVAL;
1445 
1446 	new_hash = __ftrace_hash_move(src);
1447 	if (!new_hash)
1448 		return -ENOMEM;
1449 
1450 	/* Make sure this can be applied if it is IPMODIFY ftrace_ops */
1451 	if (enable) {
1452 		/* IPMODIFY should be updated only when filter_hash updating */
1453 		ret = ftrace_hash_ipmodify_update(ops, new_hash);
1454 		if (ret < 0) {
1455 			free_ftrace_hash(new_hash);
1456 			return ret;
1457 		}
1458 	}
1459 
1460 	/*
1461 	 * Remove the current set, update the hash and add
1462 	 * them back.
1463 	 */
1464 	ftrace_hash_rec_disable_modify(ops, enable);
1465 
1466 	rcu_assign_pointer(*dst, new_hash);
1467 
1468 	ftrace_hash_rec_enable_modify(ops, enable);
1469 
1470 	return 0;
1471 }
1472 
1473 static bool hash_contains_ip(unsigned long ip,
1474 			     struct ftrace_ops_hash *hash)
1475 {
1476 	/*
1477 	 * The function record is a match if it exists in the filter
1478 	 * hash and not in the notrace hash. Note, an emty hash is
1479 	 * considered a match for the filter hash, but an empty
1480 	 * notrace hash is considered not in the notrace hash.
1481 	 */
1482 	return (ftrace_hash_empty(hash->filter_hash) ||
1483 		__ftrace_lookup_ip(hash->filter_hash, ip)) &&
1484 		(ftrace_hash_empty(hash->notrace_hash) ||
1485 		 !__ftrace_lookup_ip(hash->notrace_hash, ip));
1486 }
1487 
1488 /*
1489  * Test the hashes for this ops to see if we want to call
1490  * the ops->func or not.
1491  *
1492  * It's a match if the ip is in the ops->filter_hash or
1493  * the filter_hash does not exist or is empty,
1494  *  AND
1495  * the ip is not in the ops->notrace_hash.
1496  *
1497  * This needs to be called with preemption disabled as
1498  * the hashes are freed with call_rcu_sched().
1499  */
1500 static int
1501 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1502 {
1503 	struct ftrace_ops_hash hash;
1504 	int ret;
1505 
1506 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1507 	/*
1508 	 * There's a small race when adding ops that the ftrace handler
1509 	 * that wants regs, may be called without them. We can not
1510 	 * allow that handler to be called if regs is NULL.
1511 	 */
1512 	if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
1513 		return 0;
1514 #endif
1515 
1516 	hash.filter_hash = rcu_dereference_raw_notrace(ops->func_hash->filter_hash);
1517 	hash.notrace_hash = rcu_dereference_raw_notrace(ops->func_hash->notrace_hash);
1518 
1519 	if (hash_contains_ip(ip, &hash))
1520 		ret = 1;
1521 	else
1522 		ret = 0;
1523 
1524 	return ret;
1525 }
1526 
1527 /*
1528  * This is a double for. Do not use 'break' to break out of the loop,
1529  * you must use a goto.
1530  */
1531 #define do_for_each_ftrace_rec(pg, rec)					\
1532 	for (pg = ftrace_pages_start; pg; pg = pg->next) {		\
1533 		int _____i;						\
1534 		for (_____i = 0; _____i < pg->index; _____i++) {	\
1535 			rec = &pg->records[_____i];
1536 
1537 #define while_for_each_ftrace_rec()		\
1538 		}				\
1539 	}
1540 
1541 
1542 static int ftrace_cmp_recs(const void *a, const void *b)
1543 {
1544 	const struct dyn_ftrace *key = a;
1545 	const struct dyn_ftrace *rec = b;
1546 
1547 	if (key->flags < rec->ip)
1548 		return -1;
1549 	if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1550 		return 1;
1551 	return 0;
1552 }
1553 
1554 /**
1555  * ftrace_location_range - return the first address of a traced location
1556  *	if it touches the given ip range
1557  * @start: start of range to search.
1558  * @end: end of range to search (inclusive). @end points to the last byte
1559  *	to check.
1560  *
1561  * Returns rec->ip if the related ftrace location is a least partly within
1562  * the given address range. That is, the first address of the instruction
1563  * that is either a NOP or call to the function tracer. It checks the ftrace
1564  * internal tables to determine if the address belongs or not.
1565  */
1566 unsigned long ftrace_location_range(unsigned long start, unsigned long end)
1567 {
1568 	struct ftrace_page *pg;
1569 	struct dyn_ftrace *rec;
1570 	struct dyn_ftrace key;
1571 
1572 	key.ip = start;
1573 	key.flags = end;	/* overload flags, as it is unsigned long */
1574 
1575 	for (pg = ftrace_pages_start; pg; pg = pg->next) {
1576 		if (end < pg->records[0].ip ||
1577 		    start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1578 			continue;
1579 		rec = bsearch(&key, pg->records, pg->index,
1580 			      sizeof(struct dyn_ftrace),
1581 			      ftrace_cmp_recs);
1582 		if (rec)
1583 			return rec->ip;
1584 	}
1585 
1586 	return 0;
1587 }
1588 
1589 /**
1590  * ftrace_location - return true if the ip giving is a traced location
1591  * @ip: the instruction pointer to check
1592  *
1593  * Returns rec->ip if @ip given is a pointer to a ftrace location.
1594  * That is, the instruction that is either a NOP or call to
1595  * the function tracer. It checks the ftrace internal tables to
1596  * determine if the address belongs or not.
1597  */
1598 unsigned long ftrace_location(unsigned long ip)
1599 {
1600 	return ftrace_location_range(ip, ip);
1601 }
1602 
1603 /**
1604  * ftrace_text_reserved - return true if range contains an ftrace location
1605  * @start: start of range to search
1606  * @end: end of range to search (inclusive). @end points to the last byte to check.
1607  *
1608  * Returns 1 if @start and @end contains a ftrace location.
1609  * That is, the instruction that is either a NOP or call to
1610  * the function tracer. It checks the ftrace internal tables to
1611  * determine if the address belongs or not.
1612  */
1613 int ftrace_text_reserved(const void *start, const void *end)
1614 {
1615 	unsigned long ret;
1616 
1617 	ret = ftrace_location_range((unsigned long)start,
1618 				    (unsigned long)end);
1619 
1620 	return (int)!!ret;
1621 }
1622 
1623 /* Test if ops registered to this rec needs regs */
1624 static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
1625 {
1626 	struct ftrace_ops *ops;
1627 	bool keep_regs = false;
1628 
1629 	for (ops = ftrace_ops_list;
1630 	     ops != &ftrace_list_end; ops = ops->next) {
1631 		/* pass rec in as regs to have non-NULL val */
1632 		if (ftrace_ops_test(ops, rec->ip, rec)) {
1633 			if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1634 				keep_regs = true;
1635 				break;
1636 			}
1637 		}
1638 	}
1639 
1640 	return  keep_regs;
1641 }
1642 
1643 static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
1644 				     int filter_hash,
1645 				     bool inc)
1646 {
1647 	struct ftrace_hash *hash;
1648 	struct ftrace_hash *other_hash;
1649 	struct ftrace_page *pg;
1650 	struct dyn_ftrace *rec;
1651 	bool update = false;
1652 	int count = 0;
1653 	int all = 0;
1654 
1655 	/* Only update if the ops has been registered */
1656 	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1657 		return false;
1658 
1659 	/*
1660 	 * In the filter_hash case:
1661 	 *   If the count is zero, we update all records.
1662 	 *   Otherwise we just update the items in the hash.
1663 	 *
1664 	 * In the notrace_hash case:
1665 	 *   We enable the update in the hash.
1666 	 *   As disabling notrace means enabling the tracing,
1667 	 *   and enabling notrace means disabling, the inc variable
1668 	 *   gets inversed.
1669 	 */
1670 	if (filter_hash) {
1671 		hash = ops->func_hash->filter_hash;
1672 		other_hash = ops->func_hash->notrace_hash;
1673 		if (ftrace_hash_empty(hash))
1674 			all = 1;
1675 	} else {
1676 		inc = !inc;
1677 		hash = ops->func_hash->notrace_hash;
1678 		other_hash = ops->func_hash->filter_hash;
1679 		/*
1680 		 * If the notrace hash has no items,
1681 		 * then there's nothing to do.
1682 		 */
1683 		if (ftrace_hash_empty(hash))
1684 			return false;
1685 	}
1686 
1687 	do_for_each_ftrace_rec(pg, rec) {
1688 		int in_other_hash = 0;
1689 		int in_hash = 0;
1690 		int match = 0;
1691 
1692 		if (rec->flags & FTRACE_FL_DISABLED)
1693 			continue;
1694 
1695 		if (all) {
1696 			/*
1697 			 * Only the filter_hash affects all records.
1698 			 * Update if the record is not in the notrace hash.
1699 			 */
1700 			if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1701 				match = 1;
1702 		} else {
1703 			in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1704 			in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1705 
1706 			/*
1707 			 * If filter_hash is set, we want to match all functions
1708 			 * that are in the hash but not in the other hash.
1709 			 *
1710 			 * If filter_hash is not set, then we are decrementing.
1711 			 * That means we match anything that is in the hash
1712 			 * and also in the other_hash. That is, we need to turn
1713 			 * off functions in the other hash because they are disabled
1714 			 * by this hash.
1715 			 */
1716 			if (filter_hash && in_hash && !in_other_hash)
1717 				match = 1;
1718 			else if (!filter_hash && in_hash &&
1719 				 (in_other_hash || ftrace_hash_empty(other_hash)))
1720 				match = 1;
1721 		}
1722 		if (!match)
1723 			continue;
1724 
1725 		if (inc) {
1726 			rec->flags++;
1727 			if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX))
1728 				return false;
1729 
1730 			/*
1731 			 * If there's only a single callback registered to a
1732 			 * function, and the ops has a trampoline registered
1733 			 * for it, then we can call it directly.
1734 			 */
1735 			if (ftrace_rec_count(rec) == 1 && ops->trampoline)
1736 				rec->flags |= FTRACE_FL_TRAMP;
1737 			else
1738 				/*
1739 				 * If we are adding another function callback
1740 				 * to this function, and the previous had a
1741 				 * custom trampoline in use, then we need to go
1742 				 * back to the default trampoline.
1743 				 */
1744 				rec->flags &= ~FTRACE_FL_TRAMP;
1745 
1746 			/*
1747 			 * If any ops wants regs saved for this function
1748 			 * then all ops will get saved regs.
1749 			 */
1750 			if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1751 				rec->flags |= FTRACE_FL_REGS;
1752 		} else {
1753 			if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0))
1754 				return false;
1755 			rec->flags--;
1756 
1757 			/*
1758 			 * If the rec had REGS enabled and the ops that is
1759 			 * being removed had REGS set, then see if there is
1760 			 * still any ops for this record that wants regs.
1761 			 * If not, we can stop recording them.
1762 			 */
1763 			if (ftrace_rec_count(rec) > 0 &&
1764 			    rec->flags & FTRACE_FL_REGS &&
1765 			    ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1766 				if (!test_rec_ops_needs_regs(rec))
1767 					rec->flags &= ~FTRACE_FL_REGS;
1768 			}
1769 
1770 			/*
1771 			 * If the rec had TRAMP enabled, then it needs to
1772 			 * be cleared. As TRAMP can only be enabled iff
1773 			 * there is only a single ops attached to it.
1774 			 * In otherwords, always disable it on decrementing.
1775 			 * In the future, we may set it if rec count is
1776 			 * decremented to one, and the ops that is left
1777 			 * has a trampoline.
1778 			 */
1779 			rec->flags &= ~FTRACE_FL_TRAMP;
1780 
1781 			/*
1782 			 * flags will be cleared in ftrace_check_record()
1783 			 * if rec count is zero.
1784 			 */
1785 		}
1786 		count++;
1787 
1788 		/* Must match FTRACE_UPDATE_CALLS in ftrace_modify_all_code() */
1789 		update |= ftrace_test_record(rec, 1) != FTRACE_UPDATE_IGNORE;
1790 
1791 		/* Shortcut, if we handled all records, we are done. */
1792 		if (!all && count == hash->count)
1793 			return update;
1794 	} while_for_each_ftrace_rec();
1795 
1796 	return update;
1797 }
1798 
1799 static bool ftrace_hash_rec_disable(struct ftrace_ops *ops,
1800 				    int filter_hash)
1801 {
1802 	return __ftrace_hash_rec_update(ops, filter_hash, 0);
1803 }
1804 
1805 static bool ftrace_hash_rec_enable(struct ftrace_ops *ops,
1806 				   int filter_hash)
1807 {
1808 	return __ftrace_hash_rec_update(ops, filter_hash, 1);
1809 }
1810 
1811 static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops,
1812 					  int filter_hash, int inc)
1813 {
1814 	struct ftrace_ops *op;
1815 
1816 	__ftrace_hash_rec_update(ops, filter_hash, inc);
1817 
1818 	if (ops->func_hash != &global_ops.local_hash)
1819 		return;
1820 
1821 	/*
1822 	 * If the ops shares the global_ops hash, then we need to update
1823 	 * all ops that are enabled and use this hash.
1824 	 */
1825 	do_for_each_ftrace_op(op, ftrace_ops_list) {
1826 		/* Already done */
1827 		if (op == ops)
1828 			continue;
1829 		if (op->func_hash == &global_ops.local_hash)
1830 			__ftrace_hash_rec_update(op, filter_hash, inc);
1831 	} while_for_each_ftrace_op(op);
1832 }
1833 
1834 static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops,
1835 					   int filter_hash)
1836 {
1837 	ftrace_hash_rec_update_modify(ops, filter_hash, 0);
1838 }
1839 
1840 static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops,
1841 					  int filter_hash)
1842 {
1843 	ftrace_hash_rec_update_modify(ops, filter_hash, 1);
1844 }
1845 
1846 /*
1847  * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK
1848  * or no-needed to update, -EBUSY if it detects a conflict of the flag
1849  * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs.
1850  * Note that old_hash and new_hash has below meanings
1851  *  - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected)
1852  *  - If the hash is EMPTY_HASH, it hits nothing
1853  *  - Anything else hits the recs which match the hash entries.
1854  */
1855 static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
1856 					 struct ftrace_hash *old_hash,
1857 					 struct ftrace_hash *new_hash)
1858 {
1859 	struct ftrace_page *pg;
1860 	struct dyn_ftrace *rec, *end = NULL;
1861 	int in_old, in_new;
1862 
1863 	/* Only update if the ops has been registered */
1864 	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1865 		return 0;
1866 
1867 	if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
1868 		return 0;
1869 
1870 	/*
1871 	 * Since the IPMODIFY is a very address sensitive action, we do not
1872 	 * allow ftrace_ops to set all functions to new hash.
1873 	 */
1874 	if (!new_hash || !old_hash)
1875 		return -EINVAL;
1876 
1877 	/* Update rec->flags */
1878 	do_for_each_ftrace_rec(pg, rec) {
1879 
1880 		if (rec->flags & FTRACE_FL_DISABLED)
1881 			continue;
1882 
1883 		/* We need to update only differences of filter_hash */
1884 		in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1885 		in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1886 		if (in_old == in_new)
1887 			continue;
1888 
1889 		if (in_new) {
1890 			/* New entries must ensure no others are using it */
1891 			if (rec->flags & FTRACE_FL_IPMODIFY)
1892 				goto rollback;
1893 			rec->flags |= FTRACE_FL_IPMODIFY;
1894 		} else /* Removed entry */
1895 			rec->flags &= ~FTRACE_FL_IPMODIFY;
1896 	} while_for_each_ftrace_rec();
1897 
1898 	return 0;
1899 
1900 rollback:
1901 	end = rec;
1902 
1903 	/* Roll back what we did above */
1904 	do_for_each_ftrace_rec(pg, rec) {
1905 
1906 		if (rec->flags & FTRACE_FL_DISABLED)
1907 			continue;
1908 
1909 		if (rec == end)
1910 			goto err_out;
1911 
1912 		in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1913 		in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1914 		if (in_old == in_new)
1915 			continue;
1916 
1917 		if (in_new)
1918 			rec->flags &= ~FTRACE_FL_IPMODIFY;
1919 		else
1920 			rec->flags |= FTRACE_FL_IPMODIFY;
1921 	} while_for_each_ftrace_rec();
1922 
1923 err_out:
1924 	return -EBUSY;
1925 }
1926 
1927 static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops)
1928 {
1929 	struct ftrace_hash *hash = ops->func_hash->filter_hash;
1930 
1931 	if (ftrace_hash_empty(hash))
1932 		hash = NULL;
1933 
1934 	return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash);
1935 }
1936 
1937 /* Disabling always succeeds */
1938 static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops)
1939 {
1940 	struct ftrace_hash *hash = ops->func_hash->filter_hash;
1941 
1942 	if (ftrace_hash_empty(hash))
1943 		hash = NULL;
1944 
1945 	__ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH);
1946 }
1947 
1948 static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1949 				       struct ftrace_hash *new_hash)
1950 {
1951 	struct ftrace_hash *old_hash = ops->func_hash->filter_hash;
1952 
1953 	if (ftrace_hash_empty(old_hash))
1954 		old_hash = NULL;
1955 
1956 	if (ftrace_hash_empty(new_hash))
1957 		new_hash = NULL;
1958 
1959 	return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash);
1960 }
1961 
1962 static void print_ip_ins(const char *fmt, const unsigned char *p)
1963 {
1964 	int i;
1965 
1966 	printk(KERN_CONT "%s", fmt);
1967 
1968 	for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1969 		printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1970 }
1971 
1972 static struct ftrace_ops *
1973 ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
1974 static struct ftrace_ops *
1975 ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
1976 
1977 enum ftrace_bug_type ftrace_bug_type;
1978 const void *ftrace_expected;
1979 
1980 static void print_bug_type(void)
1981 {
1982 	switch (ftrace_bug_type) {
1983 	case FTRACE_BUG_UNKNOWN:
1984 		break;
1985 	case FTRACE_BUG_INIT:
1986 		pr_info("Initializing ftrace call sites\n");
1987 		break;
1988 	case FTRACE_BUG_NOP:
1989 		pr_info("Setting ftrace call site to NOP\n");
1990 		break;
1991 	case FTRACE_BUG_CALL:
1992 		pr_info("Setting ftrace call site to call ftrace function\n");
1993 		break;
1994 	case FTRACE_BUG_UPDATE:
1995 		pr_info("Updating ftrace call site to call a different ftrace function\n");
1996 		break;
1997 	}
1998 }
1999 
2000 /**
2001  * ftrace_bug - report and shutdown function tracer
2002  * @failed: The failed type (EFAULT, EINVAL, EPERM)
2003  * @rec: The record that failed
2004  *
2005  * The arch code that enables or disables the function tracing
2006  * can call ftrace_bug() when it has detected a problem in
2007  * modifying the code. @failed should be one of either:
2008  * EFAULT - if the problem happens on reading the @ip address
2009  * EINVAL - if what is read at @ip is not what was expected
2010  * EPERM - if the problem happens on writting to the @ip address
2011  */
2012 void ftrace_bug(int failed, struct dyn_ftrace *rec)
2013 {
2014 	unsigned long ip = rec ? rec->ip : 0;
2015 
2016 	switch (failed) {
2017 	case -EFAULT:
2018 		FTRACE_WARN_ON_ONCE(1);
2019 		pr_info("ftrace faulted on modifying ");
2020 		print_ip_sym(ip);
2021 		break;
2022 	case -EINVAL:
2023 		FTRACE_WARN_ON_ONCE(1);
2024 		pr_info("ftrace failed to modify ");
2025 		print_ip_sym(ip);
2026 		print_ip_ins(" actual:   ", (unsigned char *)ip);
2027 		pr_cont("\n");
2028 		if (ftrace_expected) {
2029 			print_ip_ins(" expected: ", ftrace_expected);
2030 			pr_cont("\n");
2031 		}
2032 		break;
2033 	case -EPERM:
2034 		FTRACE_WARN_ON_ONCE(1);
2035 		pr_info("ftrace faulted on writing ");
2036 		print_ip_sym(ip);
2037 		break;
2038 	default:
2039 		FTRACE_WARN_ON_ONCE(1);
2040 		pr_info("ftrace faulted on unknown error ");
2041 		print_ip_sym(ip);
2042 	}
2043 	print_bug_type();
2044 	if (rec) {
2045 		struct ftrace_ops *ops = NULL;
2046 
2047 		pr_info("ftrace record flags: %lx\n", rec->flags);
2048 		pr_cont(" (%ld)%s", ftrace_rec_count(rec),
2049 			rec->flags & FTRACE_FL_REGS ? " R" : "  ");
2050 		if (rec->flags & FTRACE_FL_TRAMP_EN) {
2051 			ops = ftrace_find_tramp_ops_any(rec);
2052 			if (ops) {
2053 				do {
2054 					pr_cont("\ttramp: %pS (%pS)",
2055 						(void *)ops->trampoline,
2056 						(void *)ops->func);
2057 					ops = ftrace_find_tramp_ops_next(rec, ops);
2058 				} while (ops);
2059 			} else
2060 				pr_cont("\ttramp: ERROR!");
2061 
2062 		}
2063 		ip = ftrace_get_addr_curr(rec);
2064 		pr_cont("\n expected tramp: %lx\n", ip);
2065 	}
2066 }
2067 
2068 static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
2069 {
2070 	unsigned long flag = 0UL;
2071 
2072 	ftrace_bug_type = FTRACE_BUG_UNKNOWN;
2073 
2074 	if (rec->flags & FTRACE_FL_DISABLED)
2075 		return FTRACE_UPDATE_IGNORE;
2076 
2077 	/*
2078 	 * If we are updating calls:
2079 	 *
2080 	 *   If the record has a ref count, then we need to enable it
2081 	 *   because someone is using it.
2082 	 *
2083 	 *   Otherwise we make sure its disabled.
2084 	 *
2085 	 * If we are disabling calls, then disable all records that
2086 	 * are enabled.
2087 	 */
2088 	if (enable && ftrace_rec_count(rec))
2089 		flag = FTRACE_FL_ENABLED;
2090 
2091 	/*
2092 	 * If enabling and the REGS flag does not match the REGS_EN, or
2093 	 * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore
2094 	 * this record. Set flags to fail the compare against ENABLED.
2095 	 */
2096 	if (flag) {
2097 		if (!(rec->flags & FTRACE_FL_REGS) !=
2098 		    !(rec->flags & FTRACE_FL_REGS_EN))
2099 			flag |= FTRACE_FL_REGS;
2100 
2101 		if (!(rec->flags & FTRACE_FL_TRAMP) !=
2102 		    !(rec->flags & FTRACE_FL_TRAMP_EN))
2103 			flag |= FTRACE_FL_TRAMP;
2104 	}
2105 
2106 	/* If the state of this record hasn't changed, then do nothing */
2107 	if ((rec->flags & FTRACE_FL_ENABLED) == flag)
2108 		return FTRACE_UPDATE_IGNORE;
2109 
2110 	if (flag) {
2111 		/* Save off if rec is being enabled (for return value) */
2112 		flag ^= rec->flags & FTRACE_FL_ENABLED;
2113 
2114 		if (update) {
2115 			rec->flags |= FTRACE_FL_ENABLED;
2116 			if (flag & FTRACE_FL_REGS) {
2117 				if (rec->flags & FTRACE_FL_REGS)
2118 					rec->flags |= FTRACE_FL_REGS_EN;
2119 				else
2120 					rec->flags &= ~FTRACE_FL_REGS_EN;
2121 			}
2122 			if (flag & FTRACE_FL_TRAMP) {
2123 				if (rec->flags & FTRACE_FL_TRAMP)
2124 					rec->flags |= FTRACE_FL_TRAMP_EN;
2125 				else
2126 					rec->flags &= ~FTRACE_FL_TRAMP_EN;
2127 			}
2128 		}
2129 
2130 		/*
2131 		 * If this record is being updated from a nop, then
2132 		 *   return UPDATE_MAKE_CALL.
2133 		 * Otherwise,
2134 		 *   return UPDATE_MODIFY_CALL to tell the caller to convert
2135 		 *   from the save regs, to a non-save regs function or
2136 		 *   vice versa, or from a trampoline call.
2137 		 */
2138 		if (flag & FTRACE_FL_ENABLED) {
2139 			ftrace_bug_type = FTRACE_BUG_CALL;
2140 			return FTRACE_UPDATE_MAKE_CALL;
2141 		}
2142 
2143 		ftrace_bug_type = FTRACE_BUG_UPDATE;
2144 		return FTRACE_UPDATE_MODIFY_CALL;
2145 	}
2146 
2147 	if (update) {
2148 		/* If there's no more users, clear all flags */
2149 		if (!ftrace_rec_count(rec))
2150 			rec->flags = 0;
2151 		else
2152 			/*
2153 			 * Just disable the record, but keep the ops TRAMP
2154 			 * and REGS states. The _EN flags must be disabled though.
2155 			 */
2156 			rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN |
2157 					FTRACE_FL_REGS_EN);
2158 	}
2159 
2160 	ftrace_bug_type = FTRACE_BUG_NOP;
2161 	return FTRACE_UPDATE_MAKE_NOP;
2162 }
2163 
2164 /**
2165  * ftrace_update_record, set a record that now is tracing or not
2166  * @rec: the record to update
2167  * @enable: set to 1 if the record is tracing, zero to force disable
2168  *
2169  * The records that represent all functions that can be traced need
2170  * to be updated when tracing has been enabled.
2171  */
2172 int ftrace_update_record(struct dyn_ftrace *rec, int enable)
2173 {
2174 	return ftrace_check_record(rec, enable, 1);
2175 }
2176 
2177 /**
2178  * ftrace_test_record, check if the record has been enabled or not
2179  * @rec: the record to test
2180  * @enable: set to 1 to check if enabled, 0 if it is disabled
2181  *
2182  * The arch code may need to test if a record is already set to
2183  * tracing to determine how to modify the function code that it
2184  * represents.
2185  */
2186 int ftrace_test_record(struct dyn_ftrace *rec, int enable)
2187 {
2188 	return ftrace_check_record(rec, enable, 0);
2189 }
2190 
2191 static struct ftrace_ops *
2192 ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
2193 {
2194 	struct ftrace_ops *op;
2195 	unsigned long ip = rec->ip;
2196 
2197 	do_for_each_ftrace_op(op, ftrace_ops_list) {
2198 
2199 		if (!op->trampoline)
2200 			continue;
2201 
2202 		if (hash_contains_ip(ip, op->func_hash))
2203 			return op;
2204 	} while_for_each_ftrace_op(op);
2205 
2206 	return NULL;
2207 }
2208 
2209 static struct ftrace_ops *
2210 ftrace_find_tramp_ops_next(struct dyn_ftrace *rec,
2211 			   struct ftrace_ops *op)
2212 {
2213 	unsigned long ip = rec->ip;
2214 
2215 	while_for_each_ftrace_op(op) {
2216 
2217 		if (!op->trampoline)
2218 			continue;
2219 
2220 		if (hash_contains_ip(ip, op->func_hash))
2221 			return op;
2222 	}
2223 
2224 	return NULL;
2225 }
2226 
2227 static struct ftrace_ops *
2228 ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
2229 {
2230 	struct ftrace_ops *op;
2231 	unsigned long ip = rec->ip;
2232 
2233 	/*
2234 	 * Need to check removed ops first.
2235 	 * If they are being removed, and this rec has a tramp,
2236 	 * and this rec is in the ops list, then it would be the
2237 	 * one with the tramp.
2238 	 */
2239 	if (removed_ops) {
2240 		if (hash_contains_ip(ip, &removed_ops->old_hash))
2241 			return removed_ops;
2242 	}
2243 
2244 	/*
2245 	 * Need to find the current trampoline for a rec.
2246 	 * Now, a trampoline is only attached to a rec if there
2247 	 * was a single 'ops' attached to it. But this can be called
2248 	 * when we are adding another op to the rec or removing the
2249 	 * current one. Thus, if the op is being added, we can
2250 	 * ignore it because it hasn't attached itself to the rec
2251 	 * yet.
2252 	 *
2253 	 * If an ops is being modified (hooking to different functions)
2254 	 * then we don't care about the new functions that are being
2255 	 * added, just the old ones (that are probably being removed).
2256 	 *
2257 	 * If we are adding an ops to a function that already is using
2258 	 * a trampoline, it needs to be removed (trampolines are only
2259 	 * for single ops connected), then an ops that is not being
2260 	 * modified also needs to be checked.
2261 	 */
2262 	do_for_each_ftrace_op(op, ftrace_ops_list) {
2263 
2264 		if (!op->trampoline)
2265 			continue;
2266 
2267 		/*
2268 		 * If the ops is being added, it hasn't gotten to
2269 		 * the point to be removed from this tree yet.
2270 		 */
2271 		if (op->flags & FTRACE_OPS_FL_ADDING)
2272 			continue;
2273 
2274 
2275 		/*
2276 		 * If the ops is being modified and is in the old
2277 		 * hash, then it is probably being removed from this
2278 		 * function.
2279 		 */
2280 		if ((op->flags & FTRACE_OPS_FL_MODIFYING) &&
2281 		    hash_contains_ip(ip, &op->old_hash))
2282 			return op;
2283 		/*
2284 		 * If the ops is not being added or modified, and it's
2285 		 * in its normal filter hash, then this must be the one
2286 		 * we want!
2287 		 */
2288 		if (!(op->flags & FTRACE_OPS_FL_MODIFYING) &&
2289 		    hash_contains_ip(ip, op->func_hash))
2290 			return op;
2291 
2292 	} while_for_each_ftrace_op(op);
2293 
2294 	return NULL;
2295 }
2296 
2297 static struct ftrace_ops *
2298 ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
2299 {
2300 	struct ftrace_ops *op;
2301 	unsigned long ip = rec->ip;
2302 
2303 	do_for_each_ftrace_op(op, ftrace_ops_list) {
2304 		/* pass rec in as regs to have non-NULL val */
2305 		if (hash_contains_ip(ip, op->func_hash))
2306 			return op;
2307 	} while_for_each_ftrace_op(op);
2308 
2309 	return NULL;
2310 }
2311 
2312 /**
2313  * ftrace_get_addr_new - Get the call address to set to
2314  * @rec:  The ftrace record descriptor
2315  *
2316  * If the record has the FTRACE_FL_REGS set, that means that it
2317  * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
2318  * is not not set, then it wants to convert to the normal callback.
2319  *
2320  * Returns the address of the trampoline to set to
2321  */
2322 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
2323 {
2324 	struct ftrace_ops *ops;
2325 
2326 	/* Trampolines take precedence over regs */
2327 	if (rec->flags & FTRACE_FL_TRAMP) {
2328 		ops = ftrace_find_tramp_ops_new(rec);
2329 		if (FTRACE_WARN_ON(!ops || !ops->trampoline)) {
2330 			pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
2331 				(void *)rec->ip, (void *)rec->ip, rec->flags);
2332 			/* Ftrace is shutting down, return anything */
2333 			return (unsigned long)FTRACE_ADDR;
2334 		}
2335 		return ops->trampoline;
2336 	}
2337 
2338 	if (rec->flags & FTRACE_FL_REGS)
2339 		return (unsigned long)FTRACE_REGS_ADDR;
2340 	else
2341 		return (unsigned long)FTRACE_ADDR;
2342 }
2343 
2344 /**
2345  * ftrace_get_addr_curr - Get the call address that is already there
2346  * @rec:  The ftrace record descriptor
2347  *
2348  * The FTRACE_FL_REGS_EN is set when the record already points to
2349  * a function that saves all the regs. Basically the '_EN' version
2350  * represents the current state of the function.
2351  *
2352  * Returns the address of the trampoline that is currently being called
2353  */
2354 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
2355 {
2356 	struct ftrace_ops *ops;
2357 
2358 	/* Trampolines take precedence over regs */
2359 	if (rec->flags & FTRACE_FL_TRAMP_EN) {
2360 		ops = ftrace_find_tramp_ops_curr(rec);
2361 		if (FTRACE_WARN_ON(!ops)) {
2362 			pr_warn("Bad trampoline accounting at: %p (%pS)\n",
2363 				(void *)rec->ip, (void *)rec->ip);
2364 			/* Ftrace is shutting down, return anything */
2365 			return (unsigned long)FTRACE_ADDR;
2366 		}
2367 		return ops->trampoline;
2368 	}
2369 
2370 	if (rec->flags & FTRACE_FL_REGS_EN)
2371 		return (unsigned long)FTRACE_REGS_ADDR;
2372 	else
2373 		return (unsigned long)FTRACE_ADDR;
2374 }
2375 
2376 static int
2377 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
2378 {
2379 	unsigned long ftrace_old_addr;
2380 	unsigned long ftrace_addr;
2381 	int ret;
2382 
2383 	ftrace_addr = ftrace_get_addr_new(rec);
2384 
2385 	/* This needs to be done before we call ftrace_update_record */
2386 	ftrace_old_addr = ftrace_get_addr_curr(rec);
2387 
2388 	ret = ftrace_update_record(rec, enable);
2389 
2390 	ftrace_bug_type = FTRACE_BUG_UNKNOWN;
2391 
2392 	switch (ret) {
2393 	case FTRACE_UPDATE_IGNORE:
2394 		return 0;
2395 
2396 	case FTRACE_UPDATE_MAKE_CALL:
2397 		ftrace_bug_type = FTRACE_BUG_CALL;
2398 		return ftrace_make_call(rec, ftrace_addr);
2399 
2400 	case FTRACE_UPDATE_MAKE_NOP:
2401 		ftrace_bug_type = FTRACE_BUG_NOP;
2402 		return ftrace_make_nop(NULL, rec, ftrace_old_addr);
2403 
2404 	case FTRACE_UPDATE_MODIFY_CALL:
2405 		ftrace_bug_type = FTRACE_BUG_UPDATE;
2406 		return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
2407 	}
2408 
2409 	return -1; /* unknow ftrace bug */
2410 }
2411 
2412 void __weak ftrace_replace_code(int enable)
2413 {
2414 	struct dyn_ftrace *rec;
2415 	struct ftrace_page *pg;
2416 	int failed;
2417 
2418 	if (unlikely(ftrace_disabled))
2419 		return;
2420 
2421 	do_for_each_ftrace_rec(pg, rec) {
2422 
2423 		if (rec->flags & FTRACE_FL_DISABLED)
2424 			continue;
2425 
2426 		failed = __ftrace_replace_code(rec, enable);
2427 		if (failed) {
2428 			ftrace_bug(failed, rec);
2429 			/* Stop processing */
2430 			return;
2431 		}
2432 	} while_for_each_ftrace_rec();
2433 }
2434 
2435 struct ftrace_rec_iter {
2436 	struct ftrace_page	*pg;
2437 	int			index;
2438 };
2439 
2440 /**
2441  * ftrace_rec_iter_start, start up iterating over traced functions
2442  *
2443  * Returns an iterator handle that is used to iterate over all
2444  * the records that represent address locations where functions
2445  * are traced.
2446  *
2447  * May return NULL if no records are available.
2448  */
2449 struct ftrace_rec_iter *ftrace_rec_iter_start(void)
2450 {
2451 	/*
2452 	 * We only use a single iterator.
2453 	 * Protected by the ftrace_lock mutex.
2454 	 */
2455 	static struct ftrace_rec_iter ftrace_rec_iter;
2456 	struct ftrace_rec_iter *iter = &ftrace_rec_iter;
2457 
2458 	iter->pg = ftrace_pages_start;
2459 	iter->index = 0;
2460 
2461 	/* Could have empty pages */
2462 	while (iter->pg && !iter->pg->index)
2463 		iter->pg = iter->pg->next;
2464 
2465 	if (!iter->pg)
2466 		return NULL;
2467 
2468 	return iter;
2469 }
2470 
2471 /**
2472  * ftrace_rec_iter_next, get the next record to process.
2473  * @iter: The handle to the iterator.
2474  *
2475  * Returns the next iterator after the given iterator @iter.
2476  */
2477 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
2478 {
2479 	iter->index++;
2480 
2481 	if (iter->index >= iter->pg->index) {
2482 		iter->pg = iter->pg->next;
2483 		iter->index = 0;
2484 
2485 		/* Could have empty pages */
2486 		while (iter->pg && !iter->pg->index)
2487 			iter->pg = iter->pg->next;
2488 	}
2489 
2490 	if (!iter->pg)
2491 		return NULL;
2492 
2493 	return iter;
2494 }
2495 
2496 /**
2497  * ftrace_rec_iter_record, get the record at the iterator location
2498  * @iter: The current iterator location
2499  *
2500  * Returns the record that the current @iter is at.
2501  */
2502 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
2503 {
2504 	return &iter->pg->records[iter->index];
2505 }
2506 
2507 static int
2508 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
2509 {
2510 	int ret;
2511 
2512 	if (unlikely(ftrace_disabled))
2513 		return 0;
2514 
2515 	ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
2516 	if (ret) {
2517 		ftrace_bug_type = FTRACE_BUG_INIT;
2518 		ftrace_bug(ret, rec);
2519 		return 0;
2520 	}
2521 	return 1;
2522 }
2523 
2524 /*
2525  * archs can override this function if they must do something
2526  * before the modifying code is performed.
2527  */
2528 int __weak ftrace_arch_code_modify_prepare(void)
2529 {
2530 	return 0;
2531 }
2532 
2533 /*
2534  * archs can override this function if they must do something
2535  * after the modifying code is performed.
2536  */
2537 int __weak ftrace_arch_code_modify_post_process(void)
2538 {
2539 	return 0;
2540 }
2541 
2542 void ftrace_modify_all_code(int command)
2543 {
2544 	int update = command & FTRACE_UPDATE_TRACE_FUNC;
2545 	int err = 0;
2546 
2547 	/*
2548 	 * If the ftrace_caller calls a ftrace_ops func directly,
2549 	 * we need to make sure that it only traces functions it
2550 	 * expects to trace. When doing the switch of functions,
2551 	 * we need to update to the ftrace_ops_list_func first
2552 	 * before the transition between old and new calls are set,
2553 	 * as the ftrace_ops_list_func will check the ops hashes
2554 	 * to make sure the ops are having the right functions
2555 	 * traced.
2556 	 */
2557 	if (update) {
2558 		err = ftrace_update_ftrace_func(ftrace_ops_list_func);
2559 		if (FTRACE_WARN_ON(err))
2560 			return;
2561 	}
2562 
2563 	if (command & FTRACE_UPDATE_CALLS)
2564 		ftrace_replace_code(1);
2565 	else if (command & FTRACE_DISABLE_CALLS)
2566 		ftrace_replace_code(0);
2567 
2568 	if (update && ftrace_trace_function != ftrace_ops_list_func) {
2569 		function_trace_op = set_function_trace_op;
2570 		smp_wmb();
2571 		/* If irqs are disabled, we are in stop machine */
2572 		if (!irqs_disabled())
2573 			smp_call_function(ftrace_sync_ipi, NULL, 1);
2574 		err = ftrace_update_ftrace_func(ftrace_trace_function);
2575 		if (FTRACE_WARN_ON(err))
2576 			return;
2577 	}
2578 
2579 	if (command & FTRACE_START_FUNC_RET)
2580 		err = ftrace_enable_ftrace_graph_caller();
2581 	else if (command & FTRACE_STOP_FUNC_RET)
2582 		err = ftrace_disable_ftrace_graph_caller();
2583 	FTRACE_WARN_ON(err);
2584 }
2585 
2586 static int __ftrace_modify_code(void *data)
2587 {
2588 	int *command = data;
2589 
2590 	ftrace_modify_all_code(*command);
2591 
2592 	return 0;
2593 }
2594 
2595 /**
2596  * ftrace_run_stop_machine, go back to the stop machine method
2597  * @command: The command to tell ftrace what to do
2598  *
2599  * If an arch needs to fall back to the stop machine method, the
2600  * it can call this function.
2601  */
2602 void ftrace_run_stop_machine(int command)
2603 {
2604 	stop_machine(__ftrace_modify_code, &command, NULL);
2605 }
2606 
2607 /**
2608  * arch_ftrace_update_code, modify the code to trace or not trace
2609  * @command: The command that needs to be done
2610  *
2611  * Archs can override this function if it does not need to
2612  * run stop_machine() to modify code.
2613  */
2614 void __weak arch_ftrace_update_code(int command)
2615 {
2616 	ftrace_run_stop_machine(command);
2617 }
2618 
2619 static void ftrace_run_update_code(int command)
2620 {
2621 	int ret;
2622 
2623 	ret = ftrace_arch_code_modify_prepare();
2624 	FTRACE_WARN_ON(ret);
2625 	if (ret)
2626 		return;
2627 
2628 	/*
2629 	 * By default we use stop_machine() to modify the code.
2630 	 * But archs can do what ever they want as long as it
2631 	 * is safe. The stop_machine() is the safest, but also
2632 	 * produces the most overhead.
2633 	 */
2634 	arch_ftrace_update_code(command);
2635 
2636 	ret = ftrace_arch_code_modify_post_process();
2637 	FTRACE_WARN_ON(ret);
2638 }
2639 
2640 static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
2641 				   struct ftrace_ops_hash *old_hash)
2642 {
2643 	ops->flags |= FTRACE_OPS_FL_MODIFYING;
2644 	ops->old_hash.filter_hash = old_hash->filter_hash;
2645 	ops->old_hash.notrace_hash = old_hash->notrace_hash;
2646 	ftrace_run_update_code(command);
2647 	ops->old_hash.filter_hash = NULL;
2648 	ops->old_hash.notrace_hash = NULL;
2649 	ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
2650 }
2651 
2652 static ftrace_func_t saved_ftrace_func;
2653 static int ftrace_start_up;
2654 
2655 void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops)
2656 {
2657 }
2658 
2659 static void per_cpu_ops_free(struct ftrace_ops *ops)
2660 {
2661 	free_percpu(ops->disabled);
2662 }
2663 
2664 static void ftrace_startup_enable(int command)
2665 {
2666 	if (saved_ftrace_func != ftrace_trace_function) {
2667 		saved_ftrace_func = ftrace_trace_function;
2668 		command |= FTRACE_UPDATE_TRACE_FUNC;
2669 	}
2670 
2671 	if (!command || !ftrace_enabled)
2672 		return;
2673 
2674 	ftrace_run_update_code(command);
2675 }
2676 
2677 static void ftrace_startup_all(int command)
2678 {
2679 	update_all_ops = true;
2680 	ftrace_startup_enable(command);
2681 	update_all_ops = false;
2682 }
2683 
2684 static int ftrace_startup(struct ftrace_ops *ops, int command)
2685 {
2686 	int ret;
2687 
2688 	if (unlikely(ftrace_disabled))
2689 		return -ENODEV;
2690 
2691 	ret = __register_ftrace_function(ops);
2692 	if (ret)
2693 		return ret;
2694 
2695 	ftrace_start_up++;
2696 
2697 	/*
2698 	 * Note that ftrace probes uses this to start up
2699 	 * and modify functions it will probe. But we still
2700 	 * set the ADDING flag for modification, as probes
2701 	 * do not have trampolines. If they add them in the
2702 	 * future, then the probes will need to distinguish
2703 	 * between adding and updating probes.
2704 	 */
2705 	ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING;
2706 
2707 	ret = ftrace_hash_ipmodify_enable(ops);
2708 	if (ret < 0) {
2709 		/* Rollback registration process */
2710 		__unregister_ftrace_function(ops);
2711 		ftrace_start_up--;
2712 		ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2713 		return ret;
2714 	}
2715 
2716 	if (ftrace_hash_rec_enable(ops, 1))
2717 		command |= FTRACE_UPDATE_CALLS;
2718 
2719 	ftrace_startup_enable(command);
2720 
2721 	ops->flags &= ~FTRACE_OPS_FL_ADDING;
2722 
2723 	return 0;
2724 }
2725 
2726 static int ftrace_shutdown(struct ftrace_ops *ops, int command)
2727 {
2728 	int ret;
2729 
2730 	if (unlikely(ftrace_disabled))
2731 		return -ENODEV;
2732 
2733 	ret = __unregister_ftrace_function(ops);
2734 	if (ret)
2735 		return ret;
2736 
2737 	ftrace_start_up--;
2738 	/*
2739 	 * Just warn in case of unbalance, no need to kill ftrace, it's not
2740 	 * critical but the ftrace_call callers may be never nopped again after
2741 	 * further ftrace uses.
2742 	 */
2743 	WARN_ON_ONCE(ftrace_start_up < 0);
2744 
2745 	/* Disabling ipmodify never fails */
2746 	ftrace_hash_ipmodify_disable(ops);
2747 
2748 	if (ftrace_hash_rec_disable(ops, 1))
2749 		command |= FTRACE_UPDATE_CALLS;
2750 
2751 	ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2752 
2753 	if (saved_ftrace_func != ftrace_trace_function) {
2754 		saved_ftrace_func = ftrace_trace_function;
2755 		command |= FTRACE_UPDATE_TRACE_FUNC;
2756 	}
2757 
2758 	if (!command || !ftrace_enabled) {
2759 		/*
2760 		 * If these are per_cpu ops, they still need their
2761 		 * per_cpu field freed. Since, function tracing is
2762 		 * not currently active, we can just free them
2763 		 * without synchronizing all CPUs.
2764 		 */
2765 		if (ops->flags & FTRACE_OPS_FL_PER_CPU)
2766 			per_cpu_ops_free(ops);
2767 		return 0;
2768 	}
2769 
2770 	/*
2771 	 * If the ops uses a trampoline, then it needs to be
2772 	 * tested first on update.
2773 	 */
2774 	ops->flags |= FTRACE_OPS_FL_REMOVING;
2775 	removed_ops = ops;
2776 
2777 	/* The trampoline logic checks the old hashes */
2778 	ops->old_hash.filter_hash = ops->func_hash->filter_hash;
2779 	ops->old_hash.notrace_hash = ops->func_hash->notrace_hash;
2780 
2781 	ftrace_run_update_code(command);
2782 
2783 	/*
2784 	 * If there's no more ops registered with ftrace, run a
2785 	 * sanity check to make sure all rec flags are cleared.
2786 	 */
2787 	if (ftrace_ops_list == &ftrace_list_end) {
2788 		struct ftrace_page *pg;
2789 		struct dyn_ftrace *rec;
2790 
2791 		do_for_each_ftrace_rec(pg, rec) {
2792 			if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_FL_DISABLED))
2793 				pr_warn("  %pS flags:%lx\n",
2794 					(void *)rec->ip, rec->flags);
2795 		} while_for_each_ftrace_rec();
2796 	}
2797 
2798 	ops->old_hash.filter_hash = NULL;
2799 	ops->old_hash.notrace_hash = NULL;
2800 
2801 	removed_ops = NULL;
2802 	ops->flags &= ~FTRACE_OPS_FL_REMOVING;
2803 
2804 	/*
2805 	 * Dynamic ops may be freed, we must make sure that all
2806 	 * callers are done before leaving this function.
2807 	 * The same goes for freeing the per_cpu data of the per_cpu
2808 	 * ops.
2809 	 */
2810 	if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU)) {
2811 		/*
2812 		 * We need to do a hard force of sched synchronization.
2813 		 * This is because we use preempt_disable() to do RCU, but
2814 		 * the function tracers can be called where RCU is not watching
2815 		 * (like before user_exit()). We can not rely on the RCU
2816 		 * infrastructure to do the synchronization, thus we must do it
2817 		 * ourselves.
2818 		 */
2819 		schedule_on_each_cpu(ftrace_sync);
2820 
2821 		/*
2822 		 * When the kernel is preeptive, tasks can be preempted
2823 		 * while on a ftrace trampoline. Just scheduling a task on
2824 		 * a CPU is not good enough to flush them. Calling
2825 		 * synchornize_rcu_tasks() will wait for those tasks to
2826 		 * execute and either schedule voluntarily or enter user space.
2827 		 */
2828 		if (IS_ENABLED(CONFIG_PREEMPT))
2829 			synchronize_rcu_tasks();
2830 
2831 		arch_ftrace_trampoline_free(ops);
2832 
2833 		if (ops->flags & FTRACE_OPS_FL_PER_CPU)
2834 			per_cpu_ops_free(ops);
2835 	}
2836 
2837 	return 0;
2838 }
2839 
2840 static void ftrace_startup_sysctl(void)
2841 {
2842 	int command;
2843 
2844 	if (unlikely(ftrace_disabled))
2845 		return;
2846 
2847 	/* Force update next time */
2848 	saved_ftrace_func = NULL;
2849 	/* ftrace_start_up is true if we want ftrace running */
2850 	if (ftrace_start_up) {
2851 		command = FTRACE_UPDATE_CALLS;
2852 		if (ftrace_graph_active)
2853 			command |= FTRACE_START_FUNC_RET;
2854 		ftrace_startup_enable(command);
2855 	}
2856 }
2857 
2858 static void ftrace_shutdown_sysctl(void)
2859 {
2860 	int command;
2861 
2862 	if (unlikely(ftrace_disabled))
2863 		return;
2864 
2865 	/* ftrace_start_up is true if ftrace is running */
2866 	if (ftrace_start_up) {
2867 		command = FTRACE_DISABLE_CALLS;
2868 		if (ftrace_graph_active)
2869 			command |= FTRACE_STOP_FUNC_RET;
2870 		ftrace_run_update_code(command);
2871 	}
2872 }
2873 
2874 static u64		ftrace_update_time;
2875 unsigned long		ftrace_update_tot_cnt;
2876 
2877 static inline int ops_traces_mod(struct ftrace_ops *ops)
2878 {
2879 	/*
2880 	 * Filter_hash being empty will default to trace module.
2881 	 * But notrace hash requires a test of individual module functions.
2882 	 */
2883 	return ftrace_hash_empty(ops->func_hash->filter_hash) &&
2884 		ftrace_hash_empty(ops->func_hash->notrace_hash);
2885 }
2886 
2887 /*
2888  * Check if the current ops references the record.
2889  *
2890  * If the ops traces all functions, then it was already accounted for.
2891  * If the ops does not trace the current record function, skip it.
2892  * If the ops ignores the function via notrace filter, skip it.
2893  */
2894 static inline bool
2895 ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
2896 {
2897 	/* If ops isn't enabled, ignore it */
2898 	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
2899 		return 0;
2900 
2901 	/* If ops traces all then it includes this function */
2902 	if (ops_traces_mod(ops))
2903 		return 1;
2904 
2905 	/* The function must be in the filter */
2906 	if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
2907 	    !__ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))
2908 		return 0;
2909 
2910 	/* If in notrace hash, we ignore it too */
2911 	if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip))
2912 		return 0;
2913 
2914 	return 1;
2915 }
2916 
2917 static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
2918 {
2919 	struct ftrace_page *pg;
2920 	struct dyn_ftrace *p;
2921 	u64 start, stop;
2922 	unsigned long update_cnt = 0;
2923 	unsigned long rec_flags = 0;
2924 	int i;
2925 
2926 	start = ftrace_now(raw_smp_processor_id());
2927 
2928 	/*
2929 	 * When a module is loaded, this function is called to convert
2930 	 * the calls to mcount in its text to nops, and also to create
2931 	 * an entry in the ftrace data. Now, if ftrace is activated
2932 	 * after this call, but before the module sets its text to
2933 	 * read-only, the modification of enabling ftrace can fail if
2934 	 * the read-only is done while ftrace is converting the calls.
2935 	 * To prevent this, the module's records are set as disabled
2936 	 * and will be enabled after the call to set the module's text
2937 	 * to read-only.
2938 	 */
2939 	if (mod)
2940 		rec_flags |= FTRACE_FL_DISABLED;
2941 
2942 	for (pg = new_pgs; pg; pg = pg->next) {
2943 
2944 		for (i = 0; i < pg->index; i++) {
2945 
2946 			/* If something went wrong, bail without enabling anything */
2947 			if (unlikely(ftrace_disabled))
2948 				return -1;
2949 
2950 			p = &pg->records[i];
2951 			p->flags = rec_flags;
2952 
2953 			/*
2954 			 * Do the initial record conversion from mcount jump
2955 			 * to the NOP instructions.
2956 			 */
2957 			if (!ftrace_code_disable(mod, p))
2958 				break;
2959 
2960 			update_cnt++;
2961 		}
2962 	}
2963 
2964 	stop = ftrace_now(raw_smp_processor_id());
2965 	ftrace_update_time = stop - start;
2966 	ftrace_update_tot_cnt += update_cnt;
2967 
2968 	return 0;
2969 }
2970 
2971 static int ftrace_allocate_records(struct ftrace_page *pg, int count)
2972 {
2973 	int order;
2974 	int cnt;
2975 
2976 	if (WARN_ON(!count))
2977 		return -EINVAL;
2978 
2979 	order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
2980 
2981 	/*
2982 	 * We want to fill as much as possible. No more than a page
2983 	 * may be empty.
2984 	 */
2985 	while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
2986 		order--;
2987 
2988  again:
2989 	pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
2990 
2991 	if (!pg->records) {
2992 		/* if we can't allocate this size, try something smaller */
2993 		if (!order)
2994 			return -ENOMEM;
2995 		order >>= 1;
2996 		goto again;
2997 	}
2998 
2999 	cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
3000 	pg->size = cnt;
3001 
3002 	if (cnt > count)
3003 		cnt = count;
3004 
3005 	return cnt;
3006 }
3007 
3008 static struct ftrace_page *
3009 ftrace_allocate_pages(unsigned long num_to_init)
3010 {
3011 	struct ftrace_page *start_pg;
3012 	struct ftrace_page *pg;
3013 	int order;
3014 	int cnt;
3015 
3016 	if (!num_to_init)
3017 		return 0;
3018 
3019 	start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
3020 	if (!pg)
3021 		return NULL;
3022 
3023 	/*
3024 	 * Try to allocate as much as possible in one continues
3025 	 * location that fills in all of the space. We want to
3026 	 * waste as little space as possible.
3027 	 */
3028 	for (;;) {
3029 		cnt = ftrace_allocate_records(pg, num_to_init);
3030 		if (cnt < 0)
3031 			goto free_pages;
3032 
3033 		num_to_init -= cnt;
3034 		if (!num_to_init)
3035 			break;
3036 
3037 		pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
3038 		if (!pg->next)
3039 			goto free_pages;
3040 
3041 		pg = pg->next;
3042 	}
3043 
3044 	return start_pg;
3045 
3046  free_pages:
3047 	pg = start_pg;
3048 	while (pg) {
3049 		order = get_count_order(pg->size / ENTRIES_PER_PAGE);
3050 		free_pages((unsigned long)pg->records, order);
3051 		start_pg = pg->next;
3052 		kfree(pg);
3053 		pg = start_pg;
3054 	}
3055 	pr_info("ftrace: FAILED to allocate memory for functions\n");
3056 	return NULL;
3057 }
3058 
3059 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
3060 
3061 struct ftrace_iterator {
3062 	loff_t				pos;
3063 	loff_t				func_pos;
3064 	struct ftrace_page		*pg;
3065 	struct dyn_ftrace		*func;
3066 	struct ftrace_func_probe	*probe;
3067 	struct ftrace_func_entry	*probe_entry;
3068 	struct trace_parser		parser;
3069 	struct ftrace_hash		*hash;
3070 	struct ftrace_ops		*ops;
3071 	int				pidx;
3072 	int				idx;
3073 	unsigned			flags;
3074 };
3075 
3076 static void *
3077 t_probe_next(struct seq_file *m, loff_t *pos)
3078 {
3079 	struct ftrace_iterator *iter = m->private;
3080 	struct trace_array *tr = iter->ops->private;
3081 	struct list_head *func_probes;
3082 	struct ftrace_hash *hash;
3083 	struct list_head *next;
3084 	struct hlist_node *hnd = NULL;
3085 	struct hlist_head *hhd;
3086 	int size;
3087 
3088 	(*pos)++;
3089 	iter->pos = *pos;
3090 
3091 	if (!tr)
3092 		return NULL;
3093 
3094 	func_probes = &tr->func_probes;
3095 	if (list_empty(func_probes))
3096 		return NULL;
3097 
3098 	if (!iter->probe) {
3099 		next = func_probes->next;
3100 		iter->probe = list_entry(next, struct ftrace_func_probe, list);
3101 	}
3102 
3103 	if (iter->probe_entry)
3104 		hnd = &iter->probe_entry->hlist;
3105 
3106 	hash = iter->probe->ops.func_hash->filter_hash;
3107 	size = 1 << hash->size_bits;
3108 
3109  retry:
3110 	if (iter->pidx >= size) {
3111 		if (iter->probe->list.next == func_probes)
3112 			return NULL;
3113 		next = iter->probe->list.next;
3114 		iter->probe = list_entry(next, struct ftrace_func_probe, list);
3115 		hash = iter->probe->ops.func_hash->filter_hash;
3116 		size = 1 << hash->size_bits;
3117 		iter->pidx = 0;
3118 	}
3119 
3120 	hhd = &hash->buckets[iter->pidx];
3121 
3122 	if (hlist_empty(hhd)) {
3123 		iter->pidx++;
3124 		hnd = NULL;
3125 		goto retry;
3126 	}
3127 
3128 	if (!hnd)
3129 		hnd = hhd->first;
3130 	else {
3131 		hnd = hnd->next;
3132 		if (!hnd) {
3133 			iter->pidx++;
3134 			goto retry;
3135 		}
3136 	}
3137 
3138 	if (WARN_ON_ONCE(!hnd))
3139 		return NULL;
3140 
3141 	iter->probe_entry = hlist_entry(hnd, struct ftrace_func_entry, hlist);
3142 
3143 	return iter;
3144 }
3145 
3146 static void *t_probe_start(struct seq_file *m, loff_t *pos)
3147 {
3148 	struct ftrace_iterator *iter = m->private;
3149 	void *p = NULL;
3150 	loff_t l;
3151 
3152 	if (!(iter->flags & FTRACE_ITER_DO_PROBES))
3153 		return NULL;
3154 
3155 	if (iter->func_pos > *pos)
3156 		return NULL;
3157 
3158 	iter->probe = NULL;
3159 	iter->probe_entry = NULL;
3160 	iter->pidx = 0;
3161 	for (l = 0; l <= (*pos - iter->func_pos); ) {
3162 		p = t_probe_next(m, &l);
3163 		if (!p)
3164 			break;
3165 	}
3166 	if (!p)
3167 		return NULL;
3168 
3169 	/* Only set this if we have an item */
3170 	iter->flags |= FTRACE_ITER_PROBE;
3171 
3172 	return iter;
3173 }
3174 
3175 static int
3176 t_probe_show(struct seq_file *m, struct ftrace_iterator *iter)
3177 {
3178 	struct ftrace_func_entry *probe_entry;
3179 	struct ftrace_probe_ops *probe_ops;
3180 	struct ftrace_func_probe *probe;
3181 
3182 	probe = iter->probe;
3183 	probe_entry = iter->probe_entry;
3184 
3185 	if (WARN_ON_ONCE(!probe || !probe_entry))
3186 		return -EIO;
3187 
3188 	probe_ops = probe->probe_ops;
3189 
3190 	if (probe_ops->print)
3191 		return probe_ops->print(m, probe_entry->ip, probe_ops, probe->data);
3192 
3193 	seq_printf(m, "%ps:%ps\n", (void *)probe_entry->ip,
3194 		   (void *)probe_ops->func);
3195 
3196 	return 0;
3197 }
3198 
3199 static void *
3200 t_func_next(struct seq_file *m, loff_t *pos)
3201 {
3202 	struct ftrace_iterator *iter = m->private;
3203 	struct dyn_ftrace *rec = NULL;
3204 
3205 	(*pos)++;
3206 
3207  retry:
3208 	if (iter->idx >= iter->pg->index) {
3209 		if (iter->pg->next) {
3210 			iter->pg = iter->pg->next;
3211 			iter->idx = 0;
3212 			goto retry;
3213 		}
3214 	} else {
3215 		rec = &iter->pg->records[iter->idx++];
3216 		if (((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
3217 		     !ftrace_lookup_ip(iter->hash, rec->ip)) ||
3218 
3219 		    ((iter->flags & FTRACE_ITER_ENABLED) &&
3220 		     !(rec->flags & FTRACE_FL_ENABLED))) {
3221 
3222 			rec = NULL;
3223 			goto retry;
3224 		}
3225 	}
3226 
3227 	if (!rec)
3228 		return NULL;
3229 
3230 	iter->pos = iter->func_pos = *pos;
3231 	iter->func = rec;
3232 
3233 	return iter;
3234 }
3235 
3236 static void *
3237 t_next(struct seq_file *m, void *v, loff_t *pos)
3238 {
3239 	struct ftrace_iterator *iter = m->private;
3240 	loff_t l = *pos; /* t_hash_start() must use original pos */
3241 	void *ret;
3242 
3243 	if (unlikely(ftrace_disabled))
3244 		return NULL;
3245 
3246 	if (iter->flags & FTRACE_ITER_PROBE)
3247 		return t_probe_next(m, pos);
3248 
3249 	if (iter->flags & FTRACE_ITER_PRINTALL) {
3250 		/* next must increment pos, and t_probe_start does not */
3251 		(*pos)++;
3252 		return t_probe_start(m, &l);
3253 	}
3254 
3255 	ret = t_func_next(m, pos);
3256 
3257 	if (!ret)
3258 		return t_probe_start(m, &l);
3259 
3260 	return ret;
3261 }
3262 
3263 static void reset_iter_read(struct ftrace_iterator *iter)
3264 {
3265 	iter->pos = 0;
3266 	iter->func_pos = 0;
3267 	iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_PROBE);
3268 }
3269 
3270 static void *t_start(struct seq_file *m, loff_t *pos)
3271 {
3272 	struct ftrace_iterator *iter = m->private;
3273 	void *p = NULL;
3274 	loff_t l;
3275 
3276 	mutex_lock(&ftrace_lock);
3277 
3278 	if (unlikely(ftrace_disabled))
3279 		return NULL;
3280 
3281 	/*
3282 	 * If an lseek was done, then reset and start from beginning.
3283 	 */
3284 	if (*pos < iter->pos)
3285 		reset_iter_read(iter);
3286 
3287 	/*
3288 	 * For set_ftrace_filter reading, if we have the filter
3289 	 * off, we can short cut and just print out that all
3290 	 * functions are enabled.
3291 	 */
3292 	if ((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
3293 	    ftrace_hash_empty(iter->hash)) {
3294 		iter->func_pos = 1; /* Account for the message */
3295 		if (*pos > 0)
3296 			return t_probe_start(m, pos);
3297 		iter->flags |= FTRACE_ITER_PRINTALL;
3298 		/* reset in case of seek/pread */
3299 		iter->flags &= ~FTRACE_ITER_PROBE;
3300 		return iter;
3301 	}
3302 
3303 	if (iter->flags & FTRACE_ITER_PROBE)
3304 		return t_probe_start(m, pos);
3305 
3306 	/*
3307 	 * Unfortunately, we need to restart at ftrace_pages_start
3308 	 * every time we let go of the ftrace_mutex. This is because
3309 	 * those pointers can change without the lock.
3310 	 */
3311 	iter->pg = ftrace_pages_start;
3312 	iter->idx = 0;
3313 	for (l = 0; l <= *pos; ) {
3314 		p = t_func_next(m, &l);
3315 		if (!p)
3316 			break;
3317 	}
3318 
3319 	if (!p)
3320 		return t_probe_start(m, pos);
3321 
3322 	return iter;
3323 }
3324 
3325 static void t_stop(struct seq_file *m, void *p)
3326 {
3327 	mutex_unlock(&ftrace_lock);
3328 }
3329 
3330 void * __weak
3331 arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
3332 {
3333 	return NULL;
3334 }
3335 
3336 static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops,
3337 				struct dyn_ftrace *rec)
3338 {
3339 	void *ptr;
3340 
3341 	ptr = arch_ftrace_trampoline_func(ops, rec);
3342 	if (ptr)
3343 		seq_printf(m, " ->%pS", ptr);
3344 }
3345 
3346 static int t_show(struct seq_file *m, void *v)
3347 {
3348 	struct ftrace_iterator *iter = m->private;
3349 	struct dyn_ftrace *rec;
3350 
3351 	if (iter->flags & FTRACE_ITER_PROBE)
3352 		return t_probe_show(m, iter);
3353 
3354 	if (iter->flags & FTRACE_ITER_PRINTALL) {
3355 		if (iter->flags & FTRACE_ITER_NOTRACE)
3356 			seq_puts(m, "#### no functions disabled ####\n");
3357 		else
3358 			seq_puts(m, "#### all functions enabled ####\n");
3359 		return 0;
3360 	}
3361 
3362 	rec = iter->func;
3363 
3364 	if (!rec)
3365 		return 0;
3366 
3367 	seq_printf(m, "%ps", (void *)rec->ip);
3368 	if (iter->flags & FTRACE_ITER_ENABLED) {
3369 		struct ftrace_ops *ops;
3370 
3371 		seq_printf(m, " (%ld)%s%s",
3372 			   ftrace_rec_count(rec),
3373 			   rec->flags & FTRACE_FL_REGS ? " R" : "  ",
3374 			   rec->flags & FTRACE_FL_IPMODIFY ? " I" : "  ");
3375 		if (rec->flags & FTRACE_FL_TRAMP_EN) {
3376 			ops = ftrace_find_tramp_ops_any(rec);
3377 			if (ops) {
3378 				do {
3379 					seq_printf(m, "\ttramp: %pS (%pS)",
3380 						   (void *)ops->trampoline,
3381 						   (void *)ops->func);
3382 					add_trampoline_func(m, ops, rec);
3383 					ops = ftrace_find_tramp_ops_next(rec, ops);
3384 				} while (ops);
3385 			} else
3386 				seq_puts(m, "\ttramp: ERROR!");
3387 		} else {
3388 			add_trampoline_func(m, NULL, rec);
3389 		}
3390 	}
3391 
3392 	seq_putc(m, '\n');
3393 
3394 	return 0;
3395 }
3396 
3397 static const struct seq_operations show_ftrace_seq_ops = {
3398 	.start = t_start,
3399 	.next = t_next,
3400 	.stop = t_stop,
3401 	.show = t_show,
3402 };
3403 
3404 static int
3405 ftrace_avail_open(struct inode *inode, struct file *file)
3406 {
3407 	struct ftrace_iterator *iter;
3408 
3409 	if (unlikely(ftrace_disabled))
3410 		return -ENODEV;
3411 
3412 	iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3413 	if (!iter)
3414 		return -ENOMEM;
3415 
3416 	iter->pg = ftrace_pages_start;
3417 	iter->ops = &global_ops;
3418 
3419 	return 0;
3420 }
3421 
3422 static int
3423 ftrace_enabled_open(struct inode *inode, struct file *file)
3424 {
3425 	struct ftrace_iterator *iter;
3426 
3427 	iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3428 	if (!iter)
3429 		return -ENOMEM;
3430 
3431 	iter->pg = ftrace_pages_start;
3432 	iter->flags = FTRACE_ITER_ENABLED;
3433 	iter->ops = &global_ops;
3434 
3435 	return 0;
3436 }
3437 
3438 /**
3439  * ftrace_regex_open - initialize function tracer filter files
3440  * @ops: The ftrace_ops that hold the hash filters
3441  * @flag: The type of filter to process
3442  * @inode: The inode, usually passed in to your open routine
3443  * @file: The file, usually passed in to your open routine
3444  *
3445  * ftrace_regex_open() initializes the filter files for the
3446  * @ops. Depending on @flag it may process the filter hash or
3447  * the notrace hash of @ops. With this called from the open
3448  * routine, you can use ftrace_filter_write() for the write
3449  * routine if @flag has FTRACE_ITER_FILTER set, or
3450  * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
3451  * tracing_lseek() should be used as the lseek routine, and
3452  * release must call ftrace_regex_release().
3453  */
3454 int
3455 ftrace_regex_open(struct ftrace_ops *ops, int flag,
3456 		  struct inode *inode, struct file *file)
3457 {
3458 	struct ftrace_iterator *iter;
3459 	struct ftrace_hash *hash;
3460 	int ret = 0;
3461 
3462 	ftrace_ops_init(ops);
3463 
3464 	if (unlikely(ftrace_disabled))
3465 		return -ENODEV;
3466 
3467 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
3468 	if (!iter)
3469 		return -ENOMEM;
3470 
3471 	if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
3472 		kfree(iter);
3473 		return -ENOMEM;
3474 	}
3475 
3476 	iter->ops = ops;
3477 	iter->flags = flag;
3478 
3479 	mutex_lock(&ops->func_hash->regex_lock);
3480 
3481 	if (flag & FTRACE_ITER_NOTRACE)
3482 		hash = ops->func_hash->notrace_hash;
3483 	else
3484 		hash = ops->func_hash->filter_hash;
3485 
3486 	if (file->f_mode & FMODE_WRITE) {
3487 		const int size_bits = FTRACE_HASH_DEFAULT_BITS;
3488 
3489 		if (file->f_flags & O_TRUNC)
3490 			iter->hash = alloc_ftrace_hash(size_bits);
3491 		else
3492 			iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);
3493 
3494 		if (!iter->hash) {
3495 			trace_parser_put(&iter->parser);
3496 			kfree(iter);
3497 			ret = -ENOMEM;
3498 			goto out_unlock;
3499 		}
3500 	} else
3501 		iter->hash = hash;
3502 
3503 	if (file->f_mode & FMODE_READ) {
3504 		iter->pg = ftrace_pages_start;
3505 
3506 		ret = seq_open(file, &show_ftrace_seq_ops);
3507 		if (!ret) {
3508 			struct seq_file *m = file->private_data;
3509 			m->private = iter;
3510 		} else {
3511 			/* Failed */
3512 			free_ftrace_hash(iter->hash);
3513 			trace_parser_put(&iter->parser);
3514 			kfree(iter);
3515 		}
3516 	} else
3517 		file->private_data = iter;
3518 
3519  out_unlock:
3520 	mutex_unlock(&ops->func_hash->regex_lock);
3521 
3522 	return ret;
3523 }
3524 
3525 static int
3526 ftrace_filter_open(struct inode *inode, struct file *file)
3527 {
3528 	struct ftrace_ops *ops = inode->i_private;
3529 
3530 	return ftrace_regex_open(ops,
3531 			FTRACE_ITER_FILTER | FTRACE_ITER_DO_PROBES,
3532 			inode, file);
3533 }
3534 
3535 static int
3536 ftrace_notrace_open(struct inode *inode, struct file *file)
3537 {
3538 	struct ftrace_ops *ops = inode->i_private;
3539 
3540 	return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
3541 				 inode, file);
3542 }
3543 
3544 /* Type for quick search ftrace basic regexes (globs) from filter_parse_regex */
3545 struct ftrace_glob {
3546 	char *search;
3547 	unsigned len;
3548 	int type;
3549 };
3550 
3551 /*
3552  * If symbols in an architecture don't correspond exactly to the user-visible
3553  * name of what they represent, it is possible to define this function to
3554  * perform the necessary adjustments.
3555 */
3556 char * __weak arch_ftrace_match_adjust(char *str, const char *search)
3557 {
3558 	return str;
3559 }
3560 
3561 static int ftrace_match(char *str, struct ftrace_glob *g)
3562 {
3563 	int matched = 0;
3564 	int slen;
3565 
3566 	str = arch_ftrace_match_adjust(str, g->search);
3567 
3568 	switch (g->type) {
3569 	case MATCH_FULL:
3570 		if (strcmp(str, g->search) == 0)
3571 			matched = 1;
3572 		break;
3573 	case MATCH_FRONT_ONLY:
3574 		if (strncmp(str, g->search, g->len) == 0)
3575 			matched = 1;
3576 		break;
3577 	case MATCH_MIDDLE_ONLY:
3578 		if (strstr(str, g->search))
3579 			matched = 1;
3580 		break;
3581 	case MATCH_END_ONLY:
3582 		slen = strlen(str);
3583 		if (slen >= g->len &&
3584 		    memcmp(str + slen - g->len, g->search, g->len) == 0)
3585 			matched = 1;
3586 		break;
3587 	case MATCH_GLOB:
3588 		if (glob_match(g->search, str))
3589 			matched = 1;
3590 		break;
3591 	}
3592 
3593 	return matched;
3594 }
3595 
3596 static int
3597 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter)
3598 {
3599 	struct ftrace_func_entry *entry;
3600 	int ret = 0;
3601 
3602 	entry = ftrace_lookup_ip(hash, rec->ip);
3603 	if (clear_filter) {
3604 		/* Do nothing if it doesn't exist */
3605 		if (!entry)
3606 			return 0;
3607 
3608 		free_hash_entry(hash, entry);
3609 	} else {
3610 		/* Do nothing if it exists */
3611 		if (entry)
3612 			return 0;
3613 
3614 		ret = add_hash_entry(hash, rec->ip);
3615 	}
3616 	return ret;
3617 }
3618 
3619 static int
3620 ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g,
3621 		struct ftrace_glob *mod_g, int exclude_mod)
3622 {
3623 	char str[KSYM_SYMBOL_LEN];
3624 	char *modname;
3625 
3626 	kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
3627 
3628 	if (mod_g) {
3629 		int mod_matches = (modname) ? ftrace_match(modname, mod_g) : 0;
3630 
3631 		/* blank module name to match all modules */
3632 		if (!mod_g->len) {
3633 			/* blank module globbing: modname xor exclude_mod */
3634 			if (!exclude_mod != !modname)
3635 				goto func_match;
3636 			return 0;
3637 		}
3638 
3639 		/*
3640 		 * exclude_mod is set to trace everything but the given
3641 		 * module. If it is set and the module matches, then
3642 		 * return 0. If it is not set, and the module doesn't match
3643 		 * also return 0. Otherwise, check the function to see if
3644 		 * that matches.
3645 		 */
3646 		if (!mod_matches == !exclude_mod)
3647 			return 0;
3648 func_match:
3649 		/* blank search means to match all funcs in the mod */
3650 		if (!func_g->len)
3651 			return 1;
3652 	}
3653 
3654 	return ftrace_match(str, func_g);
3655 }
3656 
3657 static int
3658 match_records(struct ftrace_hash *hash, char *func, int len, char *mod)
3659 {
3660 	struct ftrace_page *pg;
3661 	struct dyn_ftrace *rec;
3662 	struct ftrace_glob func_g = { .type = MATCH_FULL };
3663 	struct ftrace_glob mod_g = { .type = MATCH_FULL };
3664 	struct ftrace_glob *mod_match = (mod) ? &mod_g : NULL;
3665 	int exclude_mod = 0;
3666 	int found = 0;
3667 	int ret;
3668 	int clear_filter;
3669 
3670 	if (func) {
3671 		func_g.type = filter_parse_regex(func, len, &func_g.search,
3672 						 &clear_filter);
3673 		func_g.len = strlen(func_g.search);
3674 	}
3675 
3676 	if (mod) {
3677 		mod_g.type = filter_parse_regex(mod, strlen(mod),
3678 				&mod_g.search, &exclude_mod);
3679 		mod_g.len = strlen(mod_g.search);
3680 	}
3681 
3682 	mutex_lock(&ftrace_lock);
3683 
3684 	if (unlikely(ftrace_disabled))
3685 		goto out_unlock;
3686 
3687 	do_for_each_ftrace_rec(pg, rec) {
3688 
3689 		if (rec->flags & FTRACE_FL_DISABLED)
3690 			continue;
3691 
3692 		if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) {
3693 			ret = enter_record(hash, rec, clear_filter);
3694 			if (ret < 0) {
3695 				found = ret;
3696 				goto out_unlock;
3697 			}
3698 			found = 1;
3699 		}
3700 	} while_for_each_ftrace_rec();
3701  out_unlock:
3702 	mutex_unlock(&ftrace_lock);
3703 
3704 	return found;
3705 }
3706 
3707 static int
3708 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
3709 {
3710 	return match_records(hash, buff, len, NULL);
3711 }
3712 
3713 static void ftrace_ops_update_code(struct ftrace_ops *ops,
3714 				   struct ftrace_ops_hash *old_hash)
3715 {
3716 	struct ftrace_ops *op;
3717 
3718 	if (!ftrace_enabled)
3719 		return;
3720 
3721 	if (ops->flags & FTRACE_OPS_FL_ENABLED) {
3722 		ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
3723 		return;
3724 	}
3725 
3726 	/*
3727 	 * If this is the shared global_ops filter, then we need to
3728 	 * check if there is another ops that shares it, is enabled.
3729 	 * If so, we still need to run the modify code.
3730 	 */
3731 	if (ops->func_hash != &global_ops.local_hash)
3732 		return;
3733 
3734 	do_for_each_ftrace_op(op, ftrace_ops_list) {
3735 		if (op->func_hash == &global_ops.local_hash &&
3736 		    op->flags & FTRACE_OPS_FL_ENABLED) {
3737 			ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash);
3738 			/* Only need to do this once */
3739 			return;
3740 		}
3741 	} while_for_each_ftrace_op(op);
3742 }
3743 
3744 static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops,
3745 					   struct ftrace_hash **orig_hash,
3746 					   struct ftrace_hash *hash,
3747 					   int enable)
3748 {
3749 	struct ftrace_ops_hash old_hash_ops;
3750 	struct ftrace_hash *old_hash;
3751 	int ret;
3752 
3753 	old_hash = *orig_hash;
3754 	old_hash_ops.filter_hash = ops->func_hash->filter_hash;
3755 	old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
3756 	ret = ftrace_hash_move(ops, enable, orig_hash, hash);
3757 	if (!ret) {
3758 		ftrace_ops_update_code(ops, &old_hash_ops);
3759 		free_ftrace_hash_rcu(old_hash);
3760 	}
3761 	return ret;
3762 }
3763 
3764 /*
3765  * We register the module command as a template to show others how
3766  * to register the a command as well.
3767  */
3768 
3769 static int
3770 ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash,
3771 		    char *func, char *cmd, char *module, int enable)
3772 {
3773 	int ret;
3774 
3775 	/*
3776 	 * cmd == 'mod' because we only registered this func
3777 	 * for the 'mod' ftrace_func_command.
3778 	 * But if you register one func with multiple commands,
3779 	 * you can tell which command was used by the cmd
3780 	 * parameter.
3781 	 */
3782 	ret = match_records(hash, func, strlen(func), module);
3783 	if (!ret)
3784 		return -EINVAL;
3785 	if (ret < 0)
3786 		return ret;
3787 	return 0;
3788 }
3789 
3790 static struct ftrace_func_command ftrace_mod_cmd = {
3791 	.name			= "mod",
3792 	.func			= ftrace_mod_callback,
3793 };
3794 
3795 static int __init ftrace_mod_cmd_init(void)
3796 {
3797 	return register_ftrace_command(&ftrace_mod_cmd);
3798 }
3799 core_initcall(ftrace_mod_cmd_init);
3800 
3801 static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
3802 				      struct ftrace_ops *op, struct pt_regs *pt_regs)
3803 {
3804 	struct ftrace_probe_ops *probe_ops;
3805 	struct ftrace_func_probe *probe;
3806 
3807 	probe = container_of(op, struct ftrace_func_probe, ops);
3808 	probe_ops = probe->probe_ops;
3809 
3810 	/*
3811 	 * Disable preemption for these calls to prevent a RCU grace
3812 	 * period. This syncs the hash iteration and freeing of items
3813 	 * on the hash. rcu_read_lock is too dangerous here.
3814 	 */
3815 	preempt_disable_notrace();
3816 	probe_ops->func(ip, parent_ip, probe->tr, probe_ops, probe->data);
3817 	preempt_enable_notrace();
3818 }
3819 
3820 struct ftrace_func_map {
3821 	struct ftrace_func_entry	entry;
3822 	void				*data;
3823 };
3824 
3825 struct ftrace_func_mapper {
3826 	struct ftrace_hash		hash;
3827 };
3828 
3829 /**
3830  * allocate_ftrace_func_mapper - allocate a new ftrace_func_mapper
3831  *
3832  * Returns a ftrace_func_mapper descriptor that can be used to map ips to data.
3833  */
3834 struct ftrace_func_mapper *allocate_ftrace_func_mapper(void)
3835 {
3836 	struct ftrace_hash *hash;
3837 
3838 	/*
3839 	 * The mapper is simply a ftrace_hash, but since the entries
3840 	 * in the hash are not ftrace_func_entry type, we define it
3841 	 * as a separate structure.
3842 	 */
3843 	hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
3844 	return (struct ftrace_func_mapper *)hash;
3845 }
3846 
3847 /**
3848  * ftrace_func_mapper_find_ip - Find some data mapped to an ip
3849  * @mapper: The mapper that has the ip maps
3850  * @ip: the instruction pointer to find the data for
3851  *
3852  * Returns the data mapped to @ip if found otherwise NULL. The return
3853  * is actually the address of the mapper data pointer. The address is
3854  * returned for use cases where the data is no bigger than a long, and
3855  * the user can use the data pointer as its data instead of having to
3856  * allocate more memory for the reference.
3857  */
3858 void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
3859 				  unsigned long ip)
3860 {
3861 	struct ftrace_func_entry *entry;
3862 	struct ftrace_func_map *map;
3863 
3864 	entry = ftrace_lookup_ip(&mapper->hash, ip);
3865 	if (!entry)
3866 		return NULL;
3867 
3868 	map = (struct ftrace_func_map *)entry;
3869 	return &map->data;
3870 }
3871 
3872 /**
3873  * ftrace_func_mapper_add_ip - Map some data to an ip
3874  * @mapper: The mapper that has the ip maps
3875  * @ip: The instruction pointer address to map @data to
3876  * @data: The data to map to @ip
3877  *
3878  * Returns 0 on succes otherwise an error.
3879  */
3880 int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
3881 			      unsigned long ip, void *data)
3882 {
3883 	struct ftrace_func_entry *entry;
3884 	struct ftrace_func_map *map;
3885 
3886 	entry = ftrace_lookup_ip(&mapper->hash, ip);
3887 	if (entry)
3888 		return -EBUSY;
3889 
3890 	map = kmalloc(sizeof(*map), GFP_KERNEL);
3891 	if (!map)
3892 		return -ENOMEM;
3893 
3894 	map->entry.ip = ip;
3895 	map->data = data;
3896 
3897 	__add_hash_entry(&mapper->hash, &map->entry);
3898 
3899 	return 0;
3900 }
3901 
3902 /**
3903  * ftrace_func_mapper_remove_ip - Remove an ip from the mapping
3904  * @mapper: The mapper that has the ip maps
3905  * @ip: The instruction pointer address to remove the data from
3906  *
3907  * Returns the data if it is found, otherwise NULL.
3908  * Note, if the data pointer is used as the data itself, (see
3909  * ftrace_func_mapper_find_ip(), then the return value may be meaningless,
3910  * if the data pointer was set to zero.
3911  */
3912 void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
3913 				   unsigned long ip)
3914 {
3915 	struct ftrace_func_entry *entry;
3916 	struct ftrace_func_map *map;
3917 	void *data;
3918 
3919 	entry = ftrace_lookup_ip(&mapper->hash, ip);
3920 	if (!entry)
3921 		return NULL;
3922 
3923 	map = (struct ftrace_func_map *)entry;
3924 	data = map->data;
3925 
3926 	remove_hash_entry(&mapper->hash, entry);
3927 	kfree(entry);
3928 
3929 	return data;
3930 }
3931 
3932 /**
3933  * free_ftrace_func_mapper - free a mapping of ips and data
3934  * @mapper: The mapper that has the ip maps
3935  * @free_func: A function to be called on each data item.
3936  *
3937  * This is used to free the function mapper. The @free_func is optional
3938  * and can be used if the data needs to be freed as well.
3939  */
3940 void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
3941 			     ftrace_mapper_func free_func)
3942 {
3943 	struct ftrace_func_entry *entry;
3944 	struct ftrace_func_map *map;
3945 	struct hlist_head *hhd;
3946 	int size = 1 << mapper->hash.size_bits;
3947 	int i;
3948 
3949 	if (free_func && mapper->hash.count) {
3950 		for (i = 0; i < size; i++) {
3951 			hhd = &mapper->hash.buckets[i];
3952 			hlist_for_each_entry(entry, hhd, hlist) {
3953 				map = (struct ftrace_func_map *)entry;
3954 				free_func(map);
3955 			}
3956 		}
3957 	}
3958 	free_ftrace_hash(&mapper->hash);
3959 }
3960 
3961 static void release_probe(struct ftrace_func_probe *probe)
3962 {
3963 	struct ftrace_probe_ops *probe_ops;
3964 
3965 	mutex_lock(&ftrace_lock);
3966 
3967 	WARN_ON(probe->ref <= 0);
3968 
3969 	/* Subtract the ref that was used to protect this instance */
3970 	probe->ref--;
3971 
3972 	if (!probe->ref) {
3973 		probe_ops = probe->probe_ops;
3974 		/*
3975 		 * Sending zero as ip tells probe_ops to free
3976 		 * the probe->data itself
3977 		 */
3978 		if (probe_ops->free)
3979 			probe_ops->free(probe_ops, probe->tr, 0, probe->data);
3980 		list_del(&probe->list);
3981 		kfree(probe);
3982 	}
3983 	mutex_unlock(&ftrace_lock);
3984 }
3985 
3986 static void acquire_probe_locked(struct ftrace_func_probe *probe)
3987 {
3988 	/*
3989 	 * Add one ref to keep it from being freed when releasing the
3990 	 * ftrace_lock mutex.
3991 	 */
3992 	probe->ref++;
3993 }
3994 
3995 int
3996 register_ftrace_function_probe(char *glob, struct trace_array *tr,
3997 			       struct ftrace_probe_ops *probe_ops,
3998 			       void *data)
3999 {
4000 	struct ftrace_func_entry *entry;
4001 	struct ftrace_func_probe *probe;
4002 	struct ftrace_hash **orig_hash;
4003 	struct ftrace_hash *old_hash;
4004 	struct ftrace_hash *hash;
4005 	int count = 0;
4006 	int size;
4007 	int ret;
4008 	int i;
4009 
4010 	if (WARN_ON(!tr))
4011 		return -EINVAL;
4012 
4013 	/* We do not support '!' for function probes */
4014 	if (WARN_ON(glob[0] == '!'))
4015 		return -EINVAL;
4016 
4017 
4018 	mutex_lock(&ftrace_lock);
4019 	/* Check if the probe_ops is already registered */
4020 	list_for_each_entry(probe, &tr->func_probes, list) {
4021 		if (probe->probe_ops == probe_ops)
4022 			break;
4023 	}
4024 	if (&probe->list == &tr->func_probes) {
4025 		probe = kzalloc(sizeof(*probe), GFP_KERNEL);
4026 		if (!probe) {
4027 			mutex_unlock(&ftrace_lock);
4028 			return -ENOMEM;
4029 		}
4030 		probe->probe_ops = probe_ops;
4031 		probe->ops.func = function_trace_probe_call;
4032 		probe->tr = tr;
4033 		ftrace_ops_init(&probe->ops);
4034 		list_add(&probe->list, &tr->func_probes);
4035 	}
4036 
4037 	acquire_probe_locked(probe);
4038 
4039 	mutex_unlock(&ftrace_lock);
4040 
4041 	mutex_lock(&probe->ops.func_hash->regex_lock);
4042 
4043 	orig_hash = &probe->ops.func_hash->filter_hash;
4044 	old_hash = *orig_hash;
4045 	hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
4046 
4047 	ret = ftrace_match_records(hash, glob, strlen(glob));
4048 
4049 	/* Nothing found? */
4050 	if (!ret)
4051 		ret = -EINVAL;
4052 
4053 	if (ret < 0)
4054 		goto out;
4055 
4056 	size = 1 << hash->size_bits;
4057 	for (i = 0; i < size; i++) {
4058 		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
4059 			if (ftrace_lookup_ip(old_hash, entry->ip))
4060 				continue;
4061 			/*
4062 			 * The caller might want to do something special
4063 			 * for each function we find. We call the callback
4064 			 * to give the caller an opportunity to do so.
4065 			 */
4066 			if (probe_ops->init) {
4067 				ret = probe_ops->init(probe_ops, tr,
4068 						      entry->ip, data,
4069 						      &probe->data);
4070 				if (ret < 0) {
4071 					if (probe_ops->free && count)
4072 						probe_ops->free(probe_ops, tr,
4073 								0, probe->data);
4074 					probe->data = NULL;
4075 					goto out;
4076 				}
4077 			}
4078 			count++;
4079 		}
4080 	}
4081 
4082 	mutex_lock(&ftrace_lock);
4083 
4084 	if (!count) {
4085 		/* Nothing was added? */
4086 		ret = -EINVAL;
4087 		goto out_unlock;
4088 	}
4089 
4090 	ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
4091 					      hash, 1);
4092 	if (ret < 0)
4093 		goto err_unlock;
4094 
4095 	/* One ref for each new function traced */
4096 	probe->ref += count;
4097 
4098 	if (!(probe->ops.flags & FTRACE_OPS_FL_ENABLED))
4099 		ret = ftrace_startup(&probe->ops, 0);
4100 
4101  out_unlock:
4102 	mutex_unlock(&ftrace_lock);
4103 
4104 	if (!ret)
4105 		ret = count;
4106  out:
4107 	mutex_unlock(&probe->ops.func_hash->regex_lock);
4108 	free_ftrace_hash(hash);
4109 
4110 	release_probe(probe);
4111 
4112 	return ret;
4113 
4114  err_unlock:
4115 	if (!probe_ops->free || !count)
4116 		goto out_unlock;
4117 
4118 	/* Failed to do the move, need to call the free functions */
4119 	for (i = 0; i < size; i++) {
4120 		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
4121 			if (ftrace_lookup_ip(old_hash, entry->ip))
4122 				continue;
4123 			probe_ops->free(probe_ops, tr, entry->ip, probe->data);
4124 		}
4125 	}
4126 	goto out_unlock;
4127 }
4128 
4129 int
4130 unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
4131 				      struct ftrace_probe_ops *probe_ops)
4132 {
4133 	struct ftrace_ops_hash old_hash_ops;
4134 	struct ftrace_func_entry *entry;
4135 	struct ftrace_func_probe *probe;
4136 	struct ftrace_glob func_g;
4137 	struct ftrace_hash **orig_hash;
4138 	struct ftrace_hash *old_hash;
4139 	struct ftrace_hash *hash = NULL;
4140 	struct hlist_node *tmp;
4141 	struct hlist_head hhd;
4142 	char str[KSYM_SYMBOL_LEN];
4143 	int count = 0;
4144 	int i, ret = -ENODEV;
4145 	int size;
4146 
4147 	if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
4148 		func_g.search = NULL;
4149 	else if (glob) {
4150 		int not;
4151 
4152 		func_g.type = filter_parse_regex(glob, strlen(glob),
4153 						 &func_g.search, &not);
4154 		func_g.len = strlen(func_g.search);
4155 		func_g.search = glob;
4156 
4157 		/* we do not support '!' for function probes */
4158 		if (WARN_ON(not))
4159 			return -EINVAL;
4160 	}
4161 
4162 	mutex_lock(&ftrace_lock);
4163 	/* Check if the probe_ops is already registered */
4164 	list_for_each_entry(probe, &tr->func_probes, list) {
4165 		if (probe->probe_ops == probe_ops)
4166 			break;
4167 	}
4168 	if (&probe->list == &tr->func_probes)
4169 		goto err_unlock_ftrace;
4170 
4171 	ret = -EINVAL;
4172 	if (!(probe->ops.flags & FTRACE_OPS_FL_INITIALIZED))
4173 		goto err_unlock_ftrace;
4174 
4175 	acquire_probe_locked(probe);
4176 
4177 	mutex_unlock(&ftrace_lock);
4178 
4179 	mutex_lock(&probe->ops.func_hash->regex_lock);
4180 
4181 	orig_hash = &probe->ops.func_hash->filter_hash;
4182 	old_hash = *orig_hash;
4183 
4184 	if (ftrace_hash_empty(old_hash))
4185 		goto out_unlock;
4186 
4187 	old_hash_ops.filter_hash = old_hash;
4188 	/* Probes only have filters */
4189 	old_hash_ops.notrace_hash = NULL;
4190 
4191 	ret = -ENOMEM;
4192 	hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
4193 	if (!hash)
4194 		goto out_unlock;
4195 
4196 	INIT_HLIST_HEAD(&hhd);
4197 
4198 	size = 1 << hash->size_bits;
4199 	for (i = 0; i < size; i++) {
4200 		hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) {
4201 
4202 			if (func_g.search) {
4203 				kallsyms_lookup(entry->ip, NULL, NULL,
4204 						NULL, str);
4205 				if (!ftrace_match(str, &func_g))
4206 					continue;
4207 			}
4208 			count++;
4209 			remove_hash_entry(hash, entry);
4210 			hlist_add_head(&entry->hlist, &hhd);
4211 		}
4212 	}
4213 
4214 	/* Nothing found? */
4215 	if (!count) {
4216 		ret = -EINVAL;
4217 		goto out_unlock;
4218 	}
4219 
4220 	mutex_lock(&ftrace_lock);
4221 
4222 	WARN_ON(probe->ref < count);
4223 
4224 	probe->ref -= count;
4225 
4226 	if (ftrace_hash_empty(hash))
4227 		ftrace_shutdown(&probe->ops, 0);
4228 
4229 	ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
4230 					      hash, 1);
4231 
4232 	/* still need to update the function call sites */
4233 	if (ftrace_enabled && !ftrace_hash_empty(hash))
4234 		ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS,
4235 				       &old_hash_ops);
4236 	synchronize_sched();
4237 
4238 	hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) {
4239 		hlist_del(&entry->hlist);
4240 		if (probe_ops->free)
4241 			probe_ops->free(probe_ops, tr, entry->ip, probe->data);
4242 		kfree(entry);
4243 	}
4244 	mutex_unlock(&ftrace_lock);
4245 
4246  out_unlock:
4247 	mutex_unlock(&probe->ops.func_hash->regex_lock);
4248 	free_ftrace_hash(hash);
4249 
4250 	release_probe(probe);
4251 
4252 	return ret;
4253 
4254  err_unlock_ftrace:
4255 	mutex_unlock(&ftrace_lock);
4256 	return ret;
4257 }
4258 
4259 static LIST_HEAD(ftrace_commands);
4260 static DEFINE_MUTEX(ftrace_cmd_mutex);
4261 
4262 /*
4263  * Currently we only register ftrace commands from __init, so mark this
4264  * __init too.
4265  */
4266 __init int register_ftrace_command(struct ftrace_func_command *cmd)
4267 {
4268 	struct ftrace_func_command *p;
4269 	int ret = 0;
4270 
4271 	mutex_lock(&ftrace_cmd_mutex);
4272 	list_for_each_entry(p, &ftrace_commands, list) {
4273 		if (strcmp(cmd->name, p->name) == 0) {
4274 			ret = -EBUSY;
4275 			goto out_unlock;
4276 		}
4277 	}
4278 	list_add(&cmd->list, &ftrace_commands);
4279  out_unlock:
4280 	mutex_unlock(&ftrace_cmd_mutex);
4281 
4282 	return ret;
4283 }
4284 
4285 /*
4286  * Currently we only unregister ftrace commands from __init, so mark
4287  * this __init too.
4288  */
4289 __init int unregister_ftrace_command(struct ftrace_func_command *cmd)
4290 {
4291 	struct ftrace_func_command *p, *n;
4292 	int ret = -ENODEV;
4293 
4294 	mutex_lock(&ftrace_cmd_mutex);
4295 	list_for_each_entry_safe(p, n, &ftrace_commands, list) {
4296 		if (strcmp(cmd->name, p->name) == 0) {
4297 			ret = 0;
4298 			list_del_init(&p->list);
4299 			goto out_unlock;
4300 		}
4301 	}
4302  out_unlock:
4303 	mutex_unlock(&ftrace_cmd_mutex);
4304 
4305 	return ret;
4306 }
4307 
4308 static int ftrace_process_regex(struct ftrace_iterator *iter,
4309 				char *buff, int len, int enable)
4310 {
4311 	struct ftrace_hash *hash = iter->hash;
4312 	struct trace_array *tr = iter->ops->private;
4313 	char *func, *command, *next = buff;
4314 	struct ftrace_func_command *p;
4315 	int ret = -EINVAL;
4316 
4317 	func = strsep(&next, ":");
4318 
4319 	if (!next) {
4320 		ret = ftrace_match_records(hash, func, len);
4321 		if (!ret)
4322 			ret = -EINVAL;
4323 		if (ret < 0)
4324 			return ret;
4325 		return 0;
4326 	}
4327 
4328 	/* command found */
4329 
4330 	command = strsep(&next, ":");
4331 
4332 	if (WARN_ON_ONCE(!tr))
4333 		return -EINVAL;
4334 
4335 	mutex_lock(&ftrace_cmd_mutex);
4336 	list_for_each_entry(p, &ftrace_commands, list) {
4337 		if (strcmp(p->name, command) == 0) {
4338 			ret = p->func(tr, hash, func, command, next, enable);
4339 			goto out_unlock;
4340 		}
4341 	}
4342  out_unlock:
4343 	mutex_unlock(&ftrace_cmd_mutex);
4344 
4345 	return ret;
4346 }
4347 
4348 static ssize_t
4349 ftrace_regex_write(struct file *file, const char __user *ubuf,
4350 		   size_t cnt, loff_t *ppos, int enable)
4351 {
4352 	struct ftrace_iterator *iter;
4353 	struct trace_parser *parser;
4354 	ssize_t ret, read;
4355 
4356 	if (!cnt)
4357 		return 0;
4358 
4359 	if (file->f_mode & FMODE_READ) {
4360 		struct seq_file *m = file->private_data;
4361 		iter = m->private;
4362 	} else
4363 		iter = file->private_data;
4364 
4365 	if (unlikely(ftrace_disabled))
4366 		return -ENODEV;
4367 
4368 	/* iter->hash is a local copy, so we don't need regex_lock */
4369 
4370 	parser = &iter->parser;
4371 	read = trace_get_user(parser, ubuf, cnt, ppos);
4372 
4373 	if (read >= 0 && trace_parser_loaded(parser) &&
4374 	    !trace_parser_cont(parser)) {
4375 		ret = ftrace_process_regex(iter, parser->buffer,
4376 					   parser->idx, enable);
4377 		trace_parser_clear(parser);
4378 		if (ret < 0)
4379 			goto out;
4380 	}
4381 
4382 	ret = read;
4383  out:
4384 	return ret;
4385 }
4386 
4387 ssize_t
4388 ftrace_filter_write(struct file *file, const char __user *ubuf,
4389 		    size_t cnt, loff_t *ppos)
4390 {
4391 	return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
4392 }
4393 
4394 ssize_t
4395 ftrace_notrace_write(struct file *file, const char __user *ubuf,
4396 		     size_t cnt, loff_t *ppos)
4397 {
4398 	return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
4399 }
4400 
4401 static int
4402 ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
4403 {
4404 	struct ftrace_func_entry *entry;
4405 
4406 	if (!ftrace_location(ip))
4407 		return -EINVAL;
4408 
4409 	if (remove) {
4410 		entry = ftrace_lookup_ip(hash, ip);
4411 		if (!entry)
4412 			return -ENOENT;
4413 		free_hash_entry(hash, entry);
4414 		return 0;
4415 	}
4416 
4417 	return add_hash_entry(hash, ip);
4418 }
4419 
4420 static int
4421 ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
4422 		unsigned long ip, int remove, int reset, int enable)
4423 {
4424 	struct ftrace_hash **orig_hash;
4425 	struct ftrace_hash *hash;
4426 	int ret;
4427 
4428 	if (unlikely(ftrace_disabled))
4429 		return -ENODEV;
4430 
4431 	mutex_lock(&ops->func_hash->regex_lock);
4432 
4433 	if (enable)
4434 		orig_hash = &ops->func_hash->filter_hash;
4435 	else
4436 		orig_hash = &ops->func_hash->notrace_hash;
4437 
4438 	if (reset)
4439 		hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
4440 	else
4441 		hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
4442 
4443 	if (!hash) {
4444 		ret = -ENOMEM;
4445 		goto out_regex_unlock;
4446 	}
4447 
4448 	if (buf && !ftrace_match_records(hash, buf, len)) {
4449 		ret = -EINVAL;
4450 		goto out_regex_unlock;
4451 	}
4452 	if (ip) {
4453 		ret = ftrace_match_addr(hash, ip, remove);
4454 		if (ret < 0)
4455 			goto out_regex_unlock;
4456 	}
4457 
4458 	mutex_lock(&ftrace_lock);
4459 	ret = ftrace_hash_move_and_update_ops(ops, orig_hash, hash, enable);
4460 	mutex_unlock(&ftrace_lock);
4461 
4462  out_regex_unlock:
4463 	mutex_unlock(&ops->func_hash->regex_lock);
4464 
4465 	free_ftrace_hash(hash);
4466 	return ret;
4467 }
4468 
4469 static int
4470 ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
4471 		int reset, int enable)
4472 {
4473 	return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable);
4474 }
4475 
4476 /**
4477  * ftrace_set_filter_ip - set a function to filter on in ftrace by address
4478  * @ops - the ops to set the filter with
4479  * @ip - the address to add to or remove from the filter.
4480  * @remove - non zero to remove the ip from the filter
4481  * @reset - non zero to reset all filters before applying this filter.
4482  *
4483  * Filters denote which functions should be enabled when tracing is enabled
4484  * If @ip is NULL, it failes to update filter.
4485  */
4486 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
4487 			 int remove, int reset)
4488 {
4489 	ftrace_ops_init(ops);
4490 	return ftrace_set_addr(ops, ip, remove, reset, 1);
4491 }
4492 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
4493 
4494 /**
4495  * ftrace_ops_set_global_filter - setup ops to use global filters
4496  * @ops - the ops which will use the global filters
4497  *
4498  * ftrace users who need global function trace filtering should call this.
4499  * It can set the global filter only if ops were not initialized before.
4500  */
4501 void ftrace_ops_set_global_filter(struct ftrace_ops *ops)
4502 {
4503 	if (ops->flags & FTRACE_OPS_FL_INITIALIZED)
4504 		return;
4505 
4506 	ftrace_ops_init(ops);
4507 	ops->func_hash = &global_ops.local_hash;
4508 }
4509 EXPORT_SYMBOL_GPL(ftrace_ops_set_global_filter);
4510 
4511 static int
4512 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
4513 		 int reset, int enable)
4514 {
4515 	return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable);
4516 }
4517 
4518 /**
4519  * ftrace_set_filter - set a function to filter on in ftrace
4520  * @ops - the ops to set the filter with
4521  * @buf - the string that holds the function filter text.
4522  * @len - the length of the string.
4523  * @reset - non zero to reset all filters before applying this filter.
4524  *
4525  * Filters denote which functions should be enabled when tracing is enabled.
4526  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
4527  */
4528 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
4529 		       int len, int reset)
4530 {
4531 	ftrace_ops_init(ops);
4532 	return ftrace_set_regex(ops, buf, len, reset, 1);
4533 }
4534 EXPORT_SYMBOL_GPL(ftrace_set_filter);
4535 
4536 /**
4537  * ftrace_set_notrace - set a function to not trace in ftrace
4538  * @ops - the ops to set the notrace filter with
4539  * @buf - the string that holds the function notrace text.
4540  * @len - the length of the string.
4541  * @reset - non zero to reset all filters before applying this filter.
4542  *
4543  * Notrace Filters denote which functions should not be enabled when tracing
4544  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
4545  * for tracing.
4546  */
4547 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
4548 			int len, int reset)
4549 {
4550 	ftrace_ops_init(ops);
4551 	return ftrace_set_regex(ops, buf, len, reset, 0);
4552 }
4553 EXPORT_SYMBOL_GPL(ftrace_set_notrace);
4554 /**
4555  * ftrace_set_global_filter - set a function to filter on with global tracers
4556  * @buf - the string that holds the function filter text.
4557  * @len - the length of the string.
4558  * @reset - non zero to reset all filters before applying this filter.
4559  *
4560  * Filters denote which functions should be enabled when tracing is enabled.
4561  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
4562  */
4563 void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
4564 {
4565 	ftrace_set_regex(&global_ops, buf, len, reset, 1);
4566 }
4567 EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
4568 
4569 /**
4570  * ftrace_set_global_notrace - set a function to not trace with global tracers
4571  * @buf - the string that holds the function notrace text.
4572  * @len - the length of the string.
4573  * @reset - non zero to reset all filters before applying this filter.
4574  *
4575  * Notrace Filters denote which functions should not be enabled when tracing
4576  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
4577  * for tracing.
4578  */
4579 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
4580 {
4581 	ftrace_set_regex(&global_ops, buf, len, reset, 0);
4582 }
4583 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
4584 
4585 /*
4586  * command line interface to allow users to set filters on boot up.
4587  */
4588 #define FTRACE_FILTER_SIZE		COMMAND_LINE_SIZE
4589 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
4590 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
4591 
4592 /* Used by function selftest to not test if filter is set */
4593 bool ftrace_filter_param __initdata;
4594 
4595 static int __init set_ftrace_notrace(char *str)
4596 {
4597 	ftrace_filter_param = true;
4598 	strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
4599 	return 1;
4600 }
4601 __setup("ftrace_notrace=", set_ftrace_notrace);
4602 
4603 static int __init set_ftrace_filter(char *str)
4604 {
4605 	ftrace_filter_param = true;
4606 	strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
4607 	return 1;
4608 }
4609 __setup("ftrace_filter=", set_ftrace_filter);
4610 
4611 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4612 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
4613 static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
4614 static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer);
4615 
4616 static unsigned long save_global_trampoline;
4617 static unsigned long save_global_flags;
4618 
4619 static int __init set_graph_function(char *str)
4620 {
4621 	strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
4622 	return 1;
4623 }
4624 __setup("ftrace_graph_filter=", set_graph_function);
4625 
4626 static int __init set_graph_notrace_function(char *str)
4627 {
4628 	strlcpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE);
4629 	return 1;
4630 }
4631 __setup("ftrace_graph_notrace=", set_graph_notrace_function);
4632 
4633 static int __init set_graph_max_depth_function(char *str)
4634 {
4635 	if (!str)
4636 		return 0;
4637 	fgraph_max_depth = simple_strtoul(str, NULL, 0);
4638 	return 1;
4639 }
4640 __setup("ftrace_graph_max_depth=", set_graph_max_depth_function);
4641 
4642 static void __init set_ftrace_early_graph(char *buf, int enable)
4643 {
4644 	int ret;
4645 	char *func;
4646 	struct ftrace_hash *hash;
4647 
4648 	hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
4649 	if (WARN_ON(!hash))
4650 		return;
4651 
4652 	while (buf) {
4653 		func = strsep(&buf, ",");
4654 		/* we allow only one expression at a time */
4655 		ret = ftrace_graph_set_hash(hash, func);
4656 		if (ret)
4657 			printk(KERN_DEBUG "ftrace: function %s not "
4658 					  "traceable\n", func);
4659 	}
4660 
4661 	if (enable)
4662 		ftrace_graph_hash = hash;
4663 	else
4664 		ftrace_graph_notrace_hash = hash;
4665 }
4666 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4667 
4668 void __init
4669 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
4670 {
4671 	char *func;
4672 
4673 	ftrace_ops_init(ops);
4674 
4675 	while (buf) {
4676 		func = strsep(&buf, ",");
4677 		ftrace_set_regex(ops, func, strlen(func), 0, enable);
4678 	}
4679 }
4680 
4681 static void __init set_ftrace_early_filters(void)
4682 {
4683 	if (ftrace_filter_buf[0])
4684 		ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
4685 	if (ftrace_notrace_buf[0])
4686 		ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
4687 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4688 	if (ftrace_graph_buf[0])
4689 		set_ftrace_early_graph(ftrace_graph_buf, 1);
4690 	if (ftrace_graph_notrace_buf[0])
4691 		set_ftrace_early_graph(ftrace_graph_notrace_buf, 0);
4692 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4693 }
4694 
4695 int ftrace_regex_release(struct inode *inode, struct file *file)
4696 {
4697 	struct seq_file *m = (struct seq_file *)file->private_data;
4698 	struct ftrace_iterator *iter;
4699 	struct ftrace_hash **orig_hash;
4700 	struct trace_parser *parser;
4701 	int filter_hash;
4702 	int ret;
4703 
4704 	if (file->f_mode & FMODE_READ) {
4705 		iter = m->private;
4706 		seq_release(inode, file);
4707 	} else
4708 		iter = file->private_data;
4709 
4710 	parser = &iter->parser;
4711 	if (trace_parser_loaded(parser)) {
4712 		parser->buffer[parser->idx] = 0;
4713 		ftrace_match_records(iter->hash, parser->buffer, parser->idx);
4714 	}
4715 
4716 	trace_parser_put(parser);
4717 
4718 	mutex_lock(&iter->ops->func_hash->regex_lock);
4719 
4720 	if (file->f_mode & FMODE_WRITE) {
4721 		filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
4722 
4723 		if (filter_hash)
4724 			orig_hash = &iter->ops->func_hash->filter_hash;
4725 		else
4726 			orig_hash = &iter->ops->func_hash->notrace_hash;
4727 
4728 		mutex_lock(&ftrace_lock);
4729 		ret = ftrace_hash_move_and_update_ops(iter->ops, orig_hash,
4730 						      iter->hash, filter_hash);
4731 		mutex_unlock(&ftrace_lock);
4732 	} else {
4733 		/* For read only, the hash is the ops hash */
4734 		iter->hash = NULL;
4735 	}
4736 
4737 	mutex_unlock(&iter->ops->func_hash->regex_lock);
4738 	free_ftrace_hash(iter->hash);
4739 	kfree(iter);
4740 
4741 	return 0;
4742 }
4743 
4744 static const struct file_operations ftrace_avail_fops = {
4745 	.open = ftrace_avail_open,
4746 	.read = seq_read,
4747 	.llseek = seq_lseek,
4748 	.release = seq_release_private,
4749 };
4750 
4751 static const struct file_operations ftrace_enabled_fops = {
4752 	.open = ftrace_enabled_open,
4753 	.read = seq_read,
4754 	.llseek = seq_lseek,
4755 	.release = seq_release_private,
4756 };
4757 
4758 static const struct file_operations ftrace_filter_fops = {
4759 	.open = ftrace_filter_open,
4760 	.read = seq_read,
4761 	.write = ftrace_filter_write,
4762 	.llseek = tracing_lseek,
4763 	.release = ftrace_regex_release,
4764 };
4765 
4766 static const struct file_operations ftrace_notrace_fops = {
4767 	.open = ftrace_notrace_open,
4768 	.read = seq_read,
4769 	.write = ftrace_notrace_write,
4770 	.llseek = tracing_lseek,
4771 	.release = ftrace_regex_release,
4772 };
4773 
4774 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4775 
4776 static DEFINE_MUTEX(graph_lock);
4777 
4778 struct ftrace_hash *ftrace_graph_hash = EMPTY_HASH;
4779 struct ftrace_hash *ftrace_graph_notrace_hash = EMPTY_HASH;
4780 
4781 enum graph_filter_type {
4782 	GRAPH_FILTER_NOTRACE	= 0,
4783 	GRAPH_FILTER_FUNCTION,
4784 };
4785 
4786 #define FTRACE_GRAPH_EMPTY	((void *)1)
4787 
4788 struct ftrace_graph_data {
4789 	struct ftrace_hash		*hash;
4790 	struct ftrace_func_entry	*entry;
4791 	int				idx;   /* for hash table iteration */
4792 	enum graph_filter_type		type;
4793 	struct ftrace_hash		*new_hash;
4794 	const struct seq_operations	*seq_ops;
4795 	struct trace_parser		parser;
4796 };
4797 
4798 static void *
4799 __g_next(struct seq_file *m, loff_t *pos)
4800 {
4801 	struct ftrace_graph_data *fgd = m->private;
4802 	struct ftrace_func_entry *entry = fgd->entry;
4803 	struct hlist_head *head;
4804 	int i, idx = fgd->idx;
4805 
4806 	if (*pos >= fgd->hash->count)
4807 		return NULL;
4808 
4809 	if (entry) {
4810 		hlist_for_each_entry_continue(entry, hlist) {
4811 			fgd->entry = entry;
4812 			return entry;
4813 		}
4814 
4815 		idx++;
4816 	}
4817 
4818 	for (i = idx; i < 1 << fgd->hash->size_bits; i++) {
4819 		head = &fgd->hash->buckets[i];
4820 		hlist_for_each_entry(entry, head, hlist) {
4821 			fgd->entry = entry;
4822 			fgd->idx = i;
4823 			return entry;
4824 		}
4825 	}
4826 	return NULL;
4827 }
4828 
4829 static void *
4830 g_next(struct seq_file *m, void *v, loff_t *pos)
4831 {
4832 	(*pos)++;
4833 	return __g_next(m, pos);
4834 }
4835 
4836 static void *g_start(struct seq_file *m, loff_t *pos)
4837 {
4838 	struct ftrace_graph_data *fgd = m->private;
4839 
4840 	mutex_lock(&graph_lock);
4841 
4842 	if (fgd->type == GRAPH_FILTER_FUNCTION)
4843 		fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
4844 					lockdep_is_held(&graph_lock));
4845 	else
4846 		fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
4847 					lockdep_is_held(&graph_lock));
4848 
4849 	/* Nothing, tell g_show to print all functions are enabled */
4850 	if (ftrace_hash_empty(fgd->hash) && !*pos)
4851 		return FTRACE_GRAPH_EMPTY;
4852 
4853 	fgd->idx = 0;
4854 	fgd->entry = NULL;
4855 	return __g_next(m, pos);
4856 }
4857 
4858 static void g_stop(struct seq_file *m, void *p)
4859 {
4860 	mutex_unlock(&graph_lock);
4861 }
4862 
4863 static int g_show(struct seq_file *m, void *v)
4864 {
4865 	struct ftrace_func_entry *entry = v;
4866 
4867 	if (!entry)
4868 		return 0;
4869 
4870 	if (entry == FTRACE_GRAPH_EMPTY) {
4871 		struct ftrace_graph_data *fgd = m->private;
4872 
4873 		if (fgd->type == GRAPH_FILTER_FUNCTION)
4874 			seq_puts(m, "#### all functions enabled ####\n");
4875 		else
4876 			seq_puts(m, "#### no functions disabled ####\n");
4877 		return 0;
4878 	}
4879 
4880 	seq_printf(m, "%ps\n", (void *)entry->ip);
4881 
4882 	return 0;
4883 }
4884 
4885 static const struct seq_operations ftrace_graph_seq_ops = {
4886 	.start = g_start,
4887 	.next = g_next,
4888 	.stop = g_stop,
4889 	.show = g_show,
4890 };
4891 
4892 static int
4893 __ftrace_graph_open(struct inode *inode, struct file *file,
4894 		    struct ftrace_graph_data *fgd)
4895 {
4896 	int ret = 0;
4897 	struct ftrace_hash *new_hash = NULL;
4898 
4899 	if (file->f_mode & FMODE_WRITE) {
4900 		const int size_bits = FTRACE_HASH_DEFAULT_BITS;
4901 
4902 		if (trace_parser_get_init(&fgd->parser, FTRACE_BUFF_MAX))
4903 			return -ENOMEM;
4904 
4905 		if (file->f_flags & O_TRUNC)
4906 			new_hash = alloc_ftrace_hash(size_bits);
4907 		else
4908 			new_hash = alloc_and_copy_ftrace_hash(size_bits,
4909 							      fgd->hash);
4910 		if (!new_hash) {
4911 			ret = -ENOMEM;
4912 			goto out;
4913 		}
4914 	}
4915 
4916 	if (file->f_mode & FMODE_READ) {
4917 		ret = seq_open(file, &ftrace_graph_seq_ops);
4918 		if (!ret) {
4919 			struct seq_file *m = file->private_data;
4920 			m->private = fgd;
4921 		} else {
4922 			/* Failed */
4923 			free_ftrace_hash(new_hash);
4924 			new_hash = NULL;
4925 		}
4926 	} else
4927 		file->private_data = fgd;
4928 
4929 out:
4930 	if (ret < 0 && file->f_mode & FMODE_WRITE)
4931 		trace_parser_put(&fgd->parser);
4932 
4933 	fgd->new_hash = new_hash;
4934 
4935 	/*
4936 	 * All uses of fgd->hash must be taken with the graph_lock
4937 	 * held. The graph_lock is going to be released, so force
4938 	 * fgd->hash to be reinitialized when it is taken again.
4939 	 */
4940 	fgd->hash = NULL;
4941 
4942 	return ret;
4943 }
4944 
4945 static int
4946 ftrace_graph_open(struct inode *inode, struct file *file)
4947 {
4948 	struct ftrace_graph_data *fgd;
4949 	int ret;
4950 
4951 	if (unlikely(ftrace_disabled))
4952 		return -ENODEV;
4953 
4954 	fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
4955 	if (fgd == NULL)
4956 		return -ENOMEM;
4957 
4958 	mutex_lock(&graph_lock);
4959 
4960 	fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
4961 					lockdep_is_held(&graph_lock));
4962 	fgd->type = GRAPH_FILTER_FUNCTION;
4963 	fgd->seq_ops = &ftrace_graph_seq_ops;
4964 
4965 	ret = __ftrace_graph_open(inode, file, fgd);
4966 	if (ret < 0)
4967 		kfree(fgd);
4968 
4969 	mutex_unlock(&graph_lock);
4970 	return ret;
4971 }
4972 
4973 static int
4974 ftrace_graph_notrace_open(struct inode *inode, struct file *file)
4975 {
4976 	struct ftrace_graph_data *fgd;
4977 	int ret;
4978 
4979 	if (unlikely(ftrace_disabled))
4980 		return -ENODEV;
4981 
4982 	fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
4983 	if (fgd == NULL)
4984 		return -ENOMEM;
4985 
4986 	mutex_lock(&graph_lock);
4987 
4988 	fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
4989 					lockdep_is_held(&graph_lock));
4990 	fgd->type = GRAPH_FILTER_NOTRACE;
4991 	fgd->seq_ops = &ftrace_graph_seq_ops;
4992 
4993 	ret = __ftrace_graph_open(inode, file, fgd);
4994 	if (ret < 0)
4995 		kfree(fgd);
4996 
4997 	mutex_unlock(&graph_lock);
4998 	return ret;
4999 }
5000 
5001 static int
5002 ftrace_graph_release(struct inode *inode, struct file *file)
5003 {
5004 	struct ftrace_graph_data *fgd;
5005 	struct ftrace_hash *old_hash, *new_hash;
5006 	struct trace_parser *parser;
5007 	int ret = 0;
5008 
5009 	if (file->f_mode & FMODE_READ) {
5010 		struct seq_file *m = file->private_data;
5011 
5012 		fgd = m->private;
5013 		seq_release(inode, file);
5014 	} else {
5015 		fgd = file->private_data;
5016 	}
5017 
5018 
5019 	if (file->f_mode & FMODE_WRITE) {
5020 
5021 		parser = &fgd->parser;
5022 
5023 		if (trace_parser_loaded((parser))) {
5024 			parser->buffer[parser->idx] = 0;
5025 			ret = ftrace_graph_set_hash(fgd->new_hash,
5026 						    parser->buffer);
5027 		}
5028 
5029 		trace_parser_put(parser);
5030 
5031 		new_hash = __ftrace_hash_move(fgd->new_hash);
5032 		if (!new_hash) {
5033 			ret = -ENOMEM;
5034 			goto out;
5035 		}
5036 
5037 		mutex_lock(&graph_lock);
5038 
5039 		if (fgd->type == GRAPH_FILTER_FUNCTION) {
5040 			old_hash = rcu_dereference_protected(ftrace_graph_hash,
5041 					lockdep_is_held(&graph_lock));
5042 			rcu_assign_pointer(ftrace_graph_hash, new_hash);
5043 		} else {
5044 			old_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
5045 					lockdep_is_held(&graph_lock));
5046 			rcu_assign_pointer(ftrace_graph_notrace_hash, new_hash);
5047 		}
5048 
5049 		mutex_unlock(&graph_lock);
5050 
5051 		/* Wait till all users are no longer using the old hash */
5052 		synchronize_sched();
5053 
5054 		free_ftrace_hash(old_hash);
5055 	}
5056 
5057  out:
5058 	kfree(fgd->new_hash);
5059 	kfree(fgd);
5060 
5061 	return ret;
5062 }
5063 
5064 static int
5065 ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer)
5066 {
5067 	struct ftrace_glob func_g;
5068 	struct dyn_ftrace *rec;
5069 	struct ftrace_page *pg;
5070 	struct ftrace_func_entry *entry;
5071 	int fail = 1;
5072 	int not;
5073 
5074 	/* decode regex */
5075 	func_g.type = filter_parse_regex(buffer, strlen(buffer),
5076 					 &func_g.search, &not);
5077 
5078 	func_g.len = strlen(func_g.search);
5079 
5080 	mutex_lock(&ftrace_lock);
5081 
5082 	if (unlikely(ftrace_disabled)) {
5083 		mutex_unlock(&ftrace_lock);
5084 		return -ENODEV;
5085 	}
5086 
5087 	do_for_each_ftrace_rec(pg, rec) {
5088 
5089 		if (rec->flags & FTRACE_FL_DISABLED)
5090 			continue;
5091 
5092 		if (ftrace_match_record(rec, &func_g, NULL, 0)) {
5093 			entry = ftrace_lookup_ip(hash, rec->ip);
5094 
5095 			if (!not) {
5096 				fail = 0;
5097 
5098 				if (entry)
5099 					continue;
5100 				if (add_hash_entry(hash, rec->ip) < 0)
5101 					goto out;
5102 			} else {
5103 				if (entry) {
5104 					free_hash_entry(hash, entry);
5105 					fail = 0;
5106 				}
5107 			}
5108 		}
5109 	} while_for_each_ftrace_rec();
5110 out:
5111 	mutex_unlock(&ftrace_lock);
5112 
5113 	if (fail)
5114 		return -EINVAL;
5115 
5116 	return 0;
5117 }
5118 
5119 static ssize_t
5120 ftrace_graph_write(struct file *file, const char __user *ubuf,
5121 		   size_t cnt, loff_t *ppos)
5122 {
5123 	ssize_t read, ret = 0;
5124 	struct ftrace_graph_data *fgd = file->private_data;
5125 	struct trace_parser *parser;
5126 
5127 	if (!cnt)
5128 		return 0;
5129 
5130 	/* Read mode uses seq functions */
5131 	if (file->f_mode & FMODE_READ) {
5132 		struct seq_file *m = file->private_data;
5133 		fgd = m->private;
5134 	}
5135 
5136 	parser = &fgd->parser;
5137 
5138 	read = trace_get_user(parser, ubuf, cnt, ppos);
5139 
5140 	if (read >= 0 && trace_parser_loaded(parser) &&
5141 	    !trace_parser_cont(parser)) {
5142 
5143 		ret = ftrace_graph_set_hash(fgd->new_hash,
5144 					    parser->buffer);
5145 		trace_parser_clear(parser);
5146 	}
5147 
5148 	if (!ret)
5149 		ret = read;
5150 
5151 	return ret;
5152 }
5153 
5154 static const struct file_operations ftrace_graph_fops = {
5155 	.open		= ftrace_graph_open,
5156 	.read		= seq_read,
5157 	.write		= ftrace_graph_write,
5158 	.llseek		= tracing_lseek,
5159 	.release	= ftrace_graph_release,
5160 };
5161 
5162 static const struct file_operations ftrace_graph_notrace_fops = {
5163 	.open		= ftrace_graph_notrace_open,
5164 	.read		= seq_read,
5165 	.write		= ftrace_graph_write,
5166 	.llseek		= tracing_lseek,
5167 	.release	= ftrace_graph_release,
5168 };
5169 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
5170 
5171 void ftrace_create_filter_files(struct ftrace_ops *ops,
5172 				struct dentry *parent)
5173 {
5174 
5175 	trace_create_file("set_ftrace_filter", 0644, parent,
5176 			  ops, &ftrace_filter_fops);
5177 
5178 	trace_create_file("set_ftrace_notrace", 0644, parent,
5179 			  ops, &ftrace_notrace_fops);
5180 }
5181 
5182 /*
5183  * The name "destroy_filter_files" is really a misnomer. Although
5184  * in the future, it may actualy delete the files, but this is
5185  * really intended to make sure the ops passed in are disabled
5186  * and that when this function returns, the caller is free to
5187  * free the ops.
5188  *
5189  * The "destroy" name is only to match the "create" name that this
5190  * should be paired with.
5191  */
5192 void ftrace_destroy_filter_files(struct ftrace_ops *ops)
5193 {
5194 	mutex_lock(&ftrace_lock);
5195 	if (ops->flags & FTRACE_OPS_FL_ENABLED)
5196 		ftrace_shutdown(ops, 0);
5197 	ops->flags |= FTRACE_OPS_FL_DELETED;
5198 	mutex_unlock(&ftrace_lock);
5199 }
5200 
5201 static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer)
5202 {
5203 
5204 	trace_create_file("available_filter_functions", 0444,
5205 			d_tracer, NULL, &ftrace_avail_fops);
5206 
5207 	trace_create_file("enabled_functions", 0444,
5208 			d_tracer, NULL, &ftrace_enabled_fops);
5209 
5210 	ftrace_create_filter_files(&global_ops, d_tracer);
5211 
5212 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5213 	trace_create_file("set_graph_function", 0444, d_tracer,
5214 				    NULL,
5215 				    &ftrace_graph_fops);
5216 	trace_create_file("set_graph_notrace", 0444, d_tracer,
5217 				    NULL,
5218 				    &ftrace_graph_notrace_fops);
5219 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
5220 
5221 	return 0;
5222 }
5223 
5224 static int ftrace_cmp_ips(const void *a, const void *b)
5225 {
5226 	const unsigned long *ipa = a;
5227 	const unsigned long *ipb = b;
5228 
5229 	if (*ipa > *ipb)
5230 		return 1;
5231 	if (*ipa < *ipb)
5232 		return -1;
5233 	return 0;
5234 }
5235 
5236 static int ftrace_process_locs(struct module *mod,
5237 			       unsigned long *start,
5238 			       unsigned long *end)
5239 {
5240 	struct ftrace_page *start_pg;
5241 	struct ftrace_page *pg;
5242 	struct dyn_ftrace *rec;
5243 	unsigned long count;
5244 	unsigned long *p;
5245 	unsigned long addr;
5246 	unsigned long flags = 0; /* Shut up gcc */
5247 	int ret = -ENOMEM;
5248 
5249 	count = end - start;
5250 
5251 	if (!count)
5252 		return 0;
5253 
5254 	sort(start, count, sizeof(*start),
5255 	     ftrace_cmp_ips, NULL);
5256 
5257 	start_pg = ftrace_allocate_pages(count);
5258 	if (!start_pg)
5259 		return -ENOMEM;
5260 
5261 	mutex_lock(&ftrace_lock);
5262 
5263 	/*
5264 	 * Core and each module needs their own pages, as
5265 	 * modules will free them when they are removed.
5266 	 * Force a new page to be allocated for modules.
5267 	 */
5268 	if (!mod) {
5269 		WARN_ON(ftrace_pages || ftrace_pages_start);
5270 		/* First initialization */
5271 		ftrace_pages = ftrace_pages_start = start_pg;
5272 	} else {
5273 		if (!ftrace_pages)
5274 			goto out;
5275 
5276 		if (WARN_ON(ftrace_pages->next)) {
5277 			/* Hmm, we have free pages? */
5278 			while (ftrace_pages->next)
5279 				ftrace_pages = ftrace_pages->next;
5280 		}
5281 
5282 		ftrace_pages->next = start_pg;
5283 	}
5284 
5285 	p = start;
5286 	pg = start_pg;
5287 	while (p < end) {
5288 		addr = ftrace_call_adjust(*p++);
5289 		/*
5290 		 * Some architecture linkers will pad between
5291 		 * the different mcount_loc sections of different
5292 		 * object files to satisfy alignments.
5293 		 * Skip any NULL pointers.
5294 		 */
5295 		if (!addr)
5296 			continue;
5297 
5298 		if (pg->index == pg->size) {
5299 			/* We should have allocated enough */
5300 			if (WARN_ON(!pg->next))
5301 				break;
5302 			pg = pg->next;
5303 		}
5304 
5305 		rec = &pg->records[pg->index++];
5306 		rec->ip = addr;
5307 	}
5308 
5309 	/* We should have used all pages */
5310 	WARN_ON(pg->next);
5311 
5312 	/* Assign the last page to ftrace_pages */
5313 	ftrace_pages = pg;
5314 
5315 	/*
5316 	 * We only need to disable interrupts on start up
5317 	 * because we are modifying code that an interrupt
5318 	 * may execute, and the modification is not atomic.
5319 	 * But for modules, nothing runs the code we modify
5320 	 * until we are finished with it, and there's no
5321 	 * reason to cause large interrupt latencies while we do it.
5322 	 */
5323 	if (!mod)
5324 		local_irq_save(flags);
5325 	ftrace_update_code(mod, start_pg);
5326 	if (!mod)
5327 		local_irq_restore(flags);
5328 	ret = 0;
5329  out:
5330 	mutex_unlock(&ftrace_lock);
5331 
5332 	return ret;
5333 }
5334 
5335 #ifdef CONFIG_MODULES
5336 
5337 #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
5338 
5339 static int referenced_filters(struct dyn_ftrace *rec)
5340 {
5341 	struct ftrace_ops *ops;
5342 	int cnt = 0;
5343 
5344 	for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
5345 		if (ops_references_rec(ops, rec))
5346 		    cnt++;
5347 	}
5348 
5349 	return cnt;
5350 }
5351 
5352 void ftrace_release_mod(struct module *mod)
5353 {
5354 	struct dyn_ftrace *rec;
5355 	struct ftrace_page **last_pg;
5356 	struct ftrace_page *pg;
5357 	int order;
5358 
5359 	mutex_lock(&ftrace_lock);
5360 
5361 	if (ftrace_disabled)
5362 		goto out_unlock;
5363 
5364 	/*
5365 	 * Each module has its own ftrace_pages, remove
5366 	 * them from the list.
5367 	 */
5368 	last_pg = &ftrace_pages_start;
5369 	for (pg = ftrace_pages_start; pg; pg = *last_pg) {
5370 		rec = &pg->records[0];
5371 		if (within_module_core(rec->ip, mod)) {
5372 			/*
5373 			 * As core pages are first, the first
5374 			 * page should never be a module page.
5375 			 */
5376 			if (WARN_ON(pg == ftrace_pages_start))
5377 				goto out_unlock;
5378 
5379 			/* Check if we are deleting the last page */
5380 			if (pg == ftrace_pages)
5381 				ftrace_pages = next_to_ftrace_page(last_pg);
5382 
5383 			*last_pg = pg->next;
5384 			order = get_count_order(pg->size / ENTRIES_PER_PAGE);
5385 			free_pages((unsigned long)pg->records, order);
5386 			kfree(pg);
5387 		} else
5388 			last_pg = &pg->next;
5389 	}
5390  out_unlock:
5391 	mutex_unlock(&ftrace_lock);
5392 }
5393 
5394 void ftrace_module_enable(struct module *mod)
5395 {
5396 	struct dyn_ftrace *rec;
5397 	struct ftrace_page *pg;
5398 
5399 	mutex_lock(&ftrace_lock);
5400 
5401 	if (ftrace_disabled)
5402 		goto out_unlock;
5403 
5404 	/*
5405 	 * If the tracing is enabled, go ahead and enable the record.
5406 	 *
5407 	 * The reason not to enable the record immediatelly is the
5408 	 * inherent check of ftrace_make_nop/ftrace_make_call for
5409 	 * correct previous instructions.  Making first the NOP
5410 	 * conversion puts the module to the correct state, thus
5411 	 * passing the ftrace_make_call check.
5412 	 *
5413 	 * We also delay this to after the module code already set the
5414 	 * text to read-only, as we now need to set it back to read-write
5415 	 * so that we can modify the text.
5416 	 */
5417 	if (ftrace_start_up)
5418 		ftrace_arch_code_modify_prepare();
5419 
5420 	do_for_each_ftrace_rec(pg, rec) {
5421 		int cnt;
5422 		/*
5423 		 * do_for_each_ftrace_rec() is a double loop.
5424 		 * module text shares the pg. If a record is
5425 		 * not part of this module, then skip this pg,
5426 		 * which the "break" will do.
5427 		 */
5428 		if (!within_module_core(rec->ip, mod))
5429 			break;
5430 
5431 		cnt = 0;
5432 
5433 		/*
5434 		 * When adding a module, we need to check if tracers are
5435 		 * currently enabled and if they are, and can trace this record,
5436 		 * we need to enable the module functions as well as update the
5437 		 * reference counts for those function records.
5438 		 */
5439 		if (ftrace_start_up)
5440 			cnt += referenced_filters(rec);
5441 
5442 		/* This clears FTRACE_FL_DISABLED */
5443 		rec->flags = cnt;
5444 
5445 		if (ftrace_start_up && cnt) {
5446 			int failed = __ftrace_replace_code(rec, 1);
5447 			if (failed) {
5448 				ftrace_bug(failed, rec);
5449 				goto out_loop;
5450 			}
5451 		}
5452 
5453 	} while_for_each_ftrace_rec();
5454 
5455  out_loop:
5456 	if (ftrace_start_up)
5457 		ftrace_arch_code_modify_post_process();
5458 
5459  out_unlock:
5460 	mutex_unlock(&ftrace_lock);
5461 }
5462 
5463 void ftrace_module_init(struct module *mod)
5464 {
5465 	if (ftrace_disabled || !mod->num_ftrace_callsites)
5466 		return;
5467 
5468 	ftrace_process_locs(mod, mod->ftrace_callsites,
5469 			    mod->ftrace_callsites + mod->num_ftrace_callsites);
5470 }
5471 #endif /* CONFIG_MODULES */
5472 
5473 void __init ftrace_free_init_mem(void)
5474 {
5475 	unsigned long start = (unsigned long)(&__init_begin);
5476 	unsigned long end = (unsigned long)(&__init_end);
5477 	struct ftrace_page **last_pg = &ftrace_pages_start;
5478 	struct ftrace_page *pg;
5479 	struct dyn_ftrace *rec;
5480 	struct dyn_ftrace key;
5481 	int order;
5482 
5483 	key.ip = start;
5484 	key.flags = end;	/* overload flags, as it is unsigned long */
5485 
5486 	mutex_lock(&ftrace_lock);
5487 
5488 	for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) {
5489 		if (end < pg->records[0].ip ||
5490 		    start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
5491 			continue;
5492  again:
5493 		rec = bsearch(&key, pg->records, pg->index,
5494 			      sizeof(struct dyn_ftrace),
5495 			      ftrace_cmp_recs);
5496 		if (!rec)
5497 			continue;
5498 		pg->index--;
5499 		if (!pg->index) {
5500 			*last_pg = pg->next;
5501 			order = get_count_order(pg->size / ENTRIES_PER_PAGE);
5502 			free_pages((unsigned long)pg->records, order);
5503 			kfree(pg);
5504 			pg = container_of(last_pg, struct ftrace_page, next);
5505 			if (!(*last_pg))
5506 				ftrace_pages = pg;
5507 			continue;
5508 		}
5509 		memmove(rec, rec + 1,
5510 			(pg->index - (rec - pg->records)) * sizeof(*rec));
5511 		/* More than one function may be in this block */
5512 		goto again;
5513 	}
5514 	mutex_unlock(&ftrace_lock);
5515 }
5516 
5517 void __init ftrace_init(void)
5518 {
5519 	extern unsigned long __start_mcount_loc[];
5520 	extern unsigned long __stop_mcount_loc[];
5521 	unsigned long count, flags;
5522 	int ret;
5523 
5524 	local_irq_save(flags);
5525 	ret = ftrace_dyn_arch_init();
5526 	local_irq_restore(flags);
5527 	if (ret)
5528 		goto failed;
5529 
5530 	count = __stop_mcount_loc - __start_mcount_loc;
5531 	if (!count) {
5532 		pr_info("ftrace: No functions to be traced?\n");
5533 		goto failed;
5534 	}
5535 
5536 	pr_info("ftrace: allocating %ld entries in %ld pages\n",
5537 		count, count / ENTRIES_PER_PAGE + 1);
5538 
5539 	last_ftrace_enabled = ftrace_enabled = 1;
5540 
5541 	ret = ftrace_process_locs(NULL,
5542 				  __start_mcount_loc,
5543 				  __stop_mcount_loc);
5544 
5545 	set_ftrace_early_filters();
5546 
5547 	return;
5548  failed:
5549 	ftrace_disabled = 1;
5550 }
5551 
5552 /* Do nothing if arch does not support this */
5553 void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops)
5554 {
5555 }
5556 
5557 static void ftrace_update_trampoline(struct ftrace_ops *ops)
5558 {
5559 	arch_ftrace_update_trampoline(ops);
5560 }
5561 
5562 void ftrace_init_trace_array(struct trace_array *tr)
5563 {
5564 	INIT_LIST_HEAD(&tr->func_probes);
5565 }
5566 #else
5567 
5568 static struct ftrace_ops global_ops = {
5569 	.func			= ftrace_stub,
5570 	.flags			= FTRACE_OPS_FL_RECURSION_SAFE |
5571 				  FTRACE_OPS_FL_INITIALIZED |
5572 				  FTRACE_OPS_FL_PID,
5573 };
5574 
5575 static int __init ftrace_nodyn_init(void)
5576 {
5577 	ftrace_enabled = 1;
5578 	return 0;
5579 }
5580 core_initcall(ftrace_nodyn_init);
5581 
5582 static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; }
5583 static inline void ftrace_startup_enable(int command) { }
5584 static inline void ftrace_startup_all(int command) { }
5585 /* Keep as macros so we do not need to define the commands */
5586 # define ftrace_startup(ops, command)					\
5587 	({								\
5588 		int ___ret = __register_ftrace_function(ops);		\
5589 		if (!___ret)						\
5590 			(ops)->flags |= FTRACE_OPS_FL_ENABLED;		\
5591 		___ret;							\
5592 	})
5593 # define ftrace_shutdown(ops, command)					\
5594 	({								\
5595 		int ___ret = __unregister_ftrace_function(ops);		\
5596 		if (!___ret)						\
5597 			(ops)->flags &= ~FTRACE_OPS_FL_ENABLED;		\
5598 		___ret;							\
5599 	})
5600 
5601 # define ftrace_startup_sysctl()	do { } while (0)
5602 # define ftrace_shutdown_sysctl()	do { } while (0)
5603 
5604 static inline int
5605 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
5606 {
5607 	return 1;
5608 }
5609 
5610 static void ftrace_update_trampoline(struct ftrace_ops *ops)
5611 {
5612 }
5613 
5614 #endif /* CONFIG_DYNAMIC_FTRACE */
5615 
5616 __init void ftrace_init_global_array_ops(struct trace_array *tr)
5617 {
5618 	tr->ops = &global_ops;
5619 	tr->ops->private = tr;
5620 	ftrace_init_trace_array(tr);
5621 }
5622 
5623 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
5624 {
5625 	/* If we filter on pids, update to use the pid function */
5626 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
5627 		if (WARN_ON(tr->ops->func != ftrace_stub))
5628 			printk("ftrace ops had %pS for function\n",
5629 			       tr->ops->func);
5630 	}
5631 	tr->ops->func = func;
5632 	tr->ops->private = tr;
5633 }
5634 
5635 void ftrace_reset_array_ops(struct trace_array *tr)
5636 {
5637 	tr->ops->func = ftrace_stub;
5638 }
5639 
5640 static inline void
5641 __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
5642 		       struct ftrace_ops *ignored, struct pt_regs *regs)
5643 {
5644 	struct ftrace_ops *op;
5645 	int bit;
5646 
5647 	bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
5648 	if (bit < 0)
5649 		return;
5650 
5651 	/*
5652 	 * Some of the ops may be dynamically allocated,
5653 	 * they must be freed after a synchronize_sched().
5654 	 */
5655 	preempt_disable_notrace();
5656 
5657 	do_for_each_ftrace_op(op, ftrace_ops_list) {
5658 		/*
5659 		 * Check the following for each ops before calling their func:
5660 		 *  if RCU flag is set, then rcu_is_watching() must be true
5661 		 *  if PER_CPU is set, then ftrace_function_local_disable()
5662 		 *                          must be false
5663 		 *  Otherwise test if the ip matches the ops filter
5664 		 *
5665 		 * If any of the above fails then the op->func() is not executed.
5666 		 */
5667 		if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) &&
5668 		    (!(op->flags & FTRACE_OPS_FL_PER_CPU) ||
5669 		     !ftrace_function_local_disabled(op)) &&
5670 		    ftrace_ops_test(op, ip, regs)) {
5671 
5672 			if (FTRACE_WARN_ON(!op->func)) {
5673 				pr_warn("op=%p %pS\n", op, op);
5674 				goto out;
5675 			}
5676 			op->func(ip, parent_ip, op, regs);
5677 		}
5678 	} while_for_each_ftrace_op(op);
5679 out:
5680 	preempt_enable_notrace();
5681 	trace_clear_recursion(bit);
5682 }
5683 
5684 /*
5685  * Some archs only support passing ip and parent_ip. Even though
5686  * the list function ignores the op parameter, we do not want any
5687  * C side effects, where a function is called without the caller
5688  * sending a third parameter.
5689  * Archs are to support both the regs and ftrace_ops at the same time.
5690  * If they support ftrace_ops, it is assumed they support regs.
5691  * If call backs want to use regs, they must either check for regs
5692  * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
5693  * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
5694  * An architecture can pass partial regs with ftrace_ops and still
5695  * set the ARCH_SUPPORTS_FTRACE_OPS.
5696  */
5697 #if ARCH_SUPPORTS_FTRACE_OPS
5698 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
5699 				 struct ftrace_ops *op, struct pt_regs *regs)
5700 {
5701 	__ftrace_ops_list_func(ip, parent_ip, NULL, regs);
5702 }
5703 #else
5704 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
5705 {
5706 	__ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
5707 }
5708 #endif
5709 
5710 /*
5711  * If there's only one function registered but it does not support
5712  * recursion, needs RCU protection and/or requires per cpu handling, then
5713  * this function will be called by the mcount trampoline.
5714  */
5715 static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
5716 				   struct ftrace_ops *op, struct pt_regs *regs)
5717 {
5718 	int bit;
5719 
5720 	if ((op->flags & FTRACE_OPS_FL_RCU) && !rcu_is_watching())
5721 		return;
5722 
5723 	bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
5724 	if (bit < 0)
5725 		return;
5726 
5727 	preempt_disable_notrace();
5728 
5729 	if (!(op->flags & FTRACE_OPS_FL_PER_CPU) ||
5730 	    !ftrace_function_local_disabled(op)) {
5731 		op->func(ip, parent_ip, op, regs);
5732 	}
5733 
5734 	preempt_enable_notrace();
5735 	trace_clear_recursion(bit);
5736 }
5737 
5738 /**
5739  * ftrace_ops_get_func - get the function a trampoline should call
5740  * @ops: the ops to get the function for
5741  *
5742  * Normally the mcount trampoline will call the ops->func, but there
5743  * are times that it should not. For example, if the ops does not
5744  * have its own recursion protection, then it should call the
5745  * ftrace_ops_assist_func() instead.
5746  *
5747  * Returns the function that the trampoline should call for @ops.
5748  */
5749 ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
5750 {
5751 	/*
5752 	 * If the function does not handle recursion, needs to be RCU safe,
5753 	 * or does per cpu logic, then we need to call the assist handler.
5754 	 */
5755 	if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE) ||
5756 	    ops->flags & (FTRACE_OPS_FL_RCU | FTRACE_OPS_FL_PER_CPU))
5757 		return ftrace_ops_assist_func;
5758 
5759 	return ops->func;
5760 }
5761 
5762 static void
5763 ftrace_filter_pid_sched_switch_probe(void *data, bool preempt,
5764 		    struct task_struct *prev, struct task_struct *next)
5765 {
5766 	struct trace_array *tr = data;
5767 	struct trace_pid_list *pid_list;
5768 
5769 	pid_list = rcu_dereference_sched(tr->function_pids);
5770 
5771 	this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid,
5772 		       trace_ignore_this_task(pid_list, next));
5773 }
5774 
5775 static void
5776 ftrace_pid_follow_sched_process_fork(void *data,
5777 				     struct task_struct *self,
5778 				     struct task_struct *task)
5779 {
5780 	struct trace_pid_list *pid_list;
5781 	struct trace_array *tr = data;
5782 
5783 	pid_list = rcu_dereference_sched(tr->function_pids);
5784 	trace_filter_add_remove_task(pid_list, self, task);
5785 }
5786 
5787 static void
5788 ftrace_pid_follow_sched_process_exit(void *data, struct task_struct *task)
5789 {
5790 	struct trace_pid_list *pid_list;
5791 	struct trace_array *tr = data;
5792 
5793 	pid_list = rcu_dereference_sched(tr->function_pids);
5794 	trace_filter_add_remove_task(pid_list, NULL, task);
5795 }
5796 
5797 void ftrace_pid_follow_fork(struct trace_array *tr, bool enable)
5798 {
5799 	if (enable) {
5800 		register_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
5801 						  tr);
5802 		register_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit,
5803 						  tr);
5804 	} else {
5805 		unregister_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
5806 						    tr);
5807 		unregister_trace_sched_process_exit(ftrace_pid_follow_sched_process_exit,
5808 						    tr);
5809 	}
5810 }
5811 
5812 static void clear_ftrace_pids(struct trace_array *tr)
5813 {
5814 	struct trace_pid_list *pid_list;
5815 	int cpu;
5816 
5817 	pid_list = rcu_dereference_protected(tr->function_pids,
5818 					     lockdep_is_held(&ftrace_lock));
5819 	if (!pid_list)
5820 		return;
5821 
5822 	unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
5823 
5824 	for_each_possible_cpu(cpu)
5825 		per_cpu_ptr(tr->trace_buffer.data, cpu)->ftrace_ignore_pid = false;
5826 
5827 	rcu_assign_pointer(tr->function_pids, NULL);
5828 
5829 	/* Wait till all users are no longer using pid filtering */
5830 	synchronize_sched();
5831 
5832 	trace_free_pid_list(pid_list);
5833 }
5834 
5835 void ftrace_clear_pids(struct trace_array *tr)
5836 {
5837 	mutex_lock(&ftrace_lock);
5838 
5839 	clear_ftrace_pids(tr);
5840 
5841 	mutex_unlock(&ftrace_lock);
5842 }
5843 
5844 static void ftrace_pid_reset(struct trace_array *tr)
5845 {
5846 	mutex_lock(&ftrace_lock);
5847 	clear_ftrace_pids(tr);
5848 
5849 	ftrace_update_pid_func();
5850 	ftrace_startup_all(0);
5851 
5852 	mutex_unlock(&ftrace_lock);
5853 }
5854 
5855 /* Greater than any max PID */
5856 #define FTRACE_NO_PIDS		(void *)(PID_MAX_LIMIT + 1)
5857 
5858 static void *fpid_start(struct seq_file *m, loff_t *pos)
5859 	__acquires(RCU)
5860 {
5861 	struct trace_pid_list *pid_list;
5862 	struct trace_array *tr = m->private;
5863 
5864 	mutex_lock(&ftrace_lock);
5865 	rcu_read_lock_sched();
5866 
5867 	pid_list = rcu_dereference_sched(tr->function_pids);
5868 
5869 	if (!pid_list)
5870 		return !(*pos) ? FTRACE_NO_PIDS : NULL;
5871 
5872 	return trace_pid_start(pid_list, pos);
5873 }
5874 
5875 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
5876 {
5877 	struct trace_array *tr = m->private;
5878 	struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids);
5879 
5880 	if (v == FTRACE_NO_PIDS)
5881 		return NULL;
5882 
5883 	return trace_pid_next(pid_list, v, pos);
5884 }
5885 
5886 static void fpid_stop(struct seq_file *m, void *p)
5887 	__releases(RCU)
5888 {
5889 	rcu_read_unlock_sched();
5890 	mutex_unlock(&ftrace_lock);
5891 }
5892 
5893 static int fpid_show(struct seq_file *m, void *v)
5894 {
5895 	if (v == FTRACE_NO_PIDS) {
5896 		seq_puts(m, "no pid\n");
5897 		return 0;
5898 	}
5899 
5900 	return trace_pid_show(m, v);
5901 }
5902 
5903 static const struct seq_operations ftrace_pid_sops = {
5904 	.start = fpid_start,
5905 	.next = fpid_next,
5906 	.stop = fpid_stop,
5907 	.show = fpid_show,
5908 };
5909 
5910 static int
5911 ftrace_pid_open(struct inode *inode, struct file *file)
5912 {
5913 	struct trace_array *tr = inode->i_private;
5914 	struct seq_file *m;
5915 	int ret = 0;
5916 
5917 	if (trace_array_get(tr) < 0)
5918 		return -ENODEV;
5919 
5920 	if ((file->f_mode & FMODE_WRITE) &&
5921 	    (file->f_flags & O_TRUNC))
5922 		ftrace_pid_reset(tr);
5923 
5924 	ret = seq_open(file, &ftrace_pid_sops);
5925 	if (ret < 0) {
5926 		trace_array_put(tr);
5927 	} else {
5928 		m = file->private_data;
5929 		/* copy tr over to seq ops */
5930 		m->private = tr;
5931 	}
5932 
5933 	return ret;
5934 }
5935 
5936 static void ignore_task_cpu(void *data)
5937 {
5938 	struct trace_array *tr = data;
5939 	struct trace_pid_list *pid_list;
5940 
5941 	/*
5942 	 * This function is called by on_each_cpu() while the
5943 	 * event_mutex is held.
5944 	 */
5945 	pid_list = rcu_dereference_protected(tr->function_pids,
5946 					     mutex_is_locked(&ftrace_lock));
5947 
5948 	this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid,
5949 		       trace_ignore_this_task(pid_list, current));
5950 }
5951 
5952 static ssize_t
5953 ftrace_pid_write(struct file *filp, const char __user *ubuf,
5954 		   size_t cnt, loff_t *ppos)
5955 {
5956 	struct seq_file *m = filp->private_data;
5957 	struct trace_array *tr = m->private;
5958 	struct trace_pid_list *filtered_pids = NULL;
5959 	struct trace_pid_list *pid_list;
5960 	ssize_t ret;
5961 
5962 	if (!cnt)
5963 		return 0;
5964 
5965 	mutex_lock(&ftrace_lock);
5966 
5967 	filtered_pids = rcu_dereference_protected(tr->function_pids,
5968 					     lockdep_is_held(&ftrace_lock));
5969 
5970 	ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
5971 	if (ret < 0)
5972 		goto out;
5973 
5974 	rcu_assign_pointer(tr->function_pids, pid_list);
5975 
5976 	if (filtered_pids) {
5977 		synchronize_sched();
5978 		trace_free_pid_list(filtered_pids);
5979 	} else if (pid_list) {
5980 		/* Register a probe to set whether to ignore the tracing of a task */
5981 		register_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
5982 	}
5983 
5984 	/*
5985 	 * Ignoring of pids is done at task switch. But we have to
5986 	 * check for those tasks that are currently running.
5987 	 * Always do this in case a pid was appended or removed.
5988 	 */
5989 	on_each_cpu(ignore_task_cpu, tr, 1);
5990 
5991 	ftrace_update_pid_func();
5992 	ftrace_startup_all(0);
5993  out:
5994 	mutex_unlock(&ftrace_lock);
5995 
5996 	if (ret > 0)
5997 		*ppos += ret;
5998 
5999 	return ret;
6000 }
6001 
6002 static int
6003 ftrace_pid_release(struct inode *inode, struct file *file)
6004 {
6005 	struct trace_array *tr = inode->i_private;
6006 
6007 	trace_array_put(tr);
6008 
6009 	return seq_release(inode, file);
6010 }
6011 
6012 static const struct file_operations ftrace_pid_fops = {
6013 	.open		= ftrace_pid_open,
6014 	.write		= ftrace_pid_write,
6015 	.read		= seq_read,
6016 	.llseek		= tracing_lseek,
6017 	.release	= ftrace_pid_release,
6018 };
6019 
6020 void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer)
6021 {
6022 	trace_create_file("set_ftrace_pid", 0644, d_tracer,
6023 			    tr, &ftrace_pid_fops);
6024 }
6025 
6026 void __init ftrace_init_tracefs_toplevel(struct trace_array *tr,
6027 					 struct dentry *d_tracer)
6028 {
6029 	/* Only the top level directory has the dyn_tracefs and profile */
6030 	WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
6031 
6032 	ftrace_init_dyn_tracefs(d_tracer);
6033 	ftrace_profile_tracefs(d_tracer);
6034 }
6035 
6036 /**
6037  * ftrace_kill - kill ftrace
6038  *
6039  * This function should be used by panic code. It stops ftrace
6040  * but in a not so nice way. If you need to simply kill ftrace
6041  * from a non-atomic section, use ftrace_kill.
6042  */
6043 void ftrace_kill(void)
6044 {
6045 	ftrace_disabled = 1;
6046 	ftrace_enabled = 0;
6047 	clear_ftrace_function();
6048 }
6049 
6050 /**
6051  * Test if ftrace is dead or not.
6052  */
6053 int ftrace_is_dead(void)
6054 {
6055 	return ftrace_disabled;
6056 }
6057 
6058 /**
6059  * register_ftrace_function - register a function for profiling
6060  * @ops - ops structure that holds the function for profiling.
6061  *
6062  * Register a function to be called by all functions in the
6063  * kernel.
6064  *
6065  * Note: @ops->func and all the functions it calls must be labeled
6066  *       with "notrace", otherwise it will go into a
6067  *       recursive loop.
6068  */
6069 int register_ftrace_function(struct ftrace_ops *ops)
6070 {
6071 	int ret = -1;
6072 
6073 	ftrace_ops_init(ops);
6074 
6075 	mutex_lock(&ftrace_lock);
6076 
6077 	ret = ftrace_startup(ops, 0);
6078 
6079 	mutex_unlock(&ftrace_lock);
6080 
6081 	return ret;
6082 }
6083 EXPORT_SYMBOL_GPL(register_ftrace_function);
6084 
6085 /**
6086  * unregister_ftrace_function - unregister a function for profiling.
6087  * @ops - ops structure that holds the function to unregister
6088  *
6089  * Unregister a function that was added to be called by ftrace profiling.
6090  */
6091 int unregister_ftrace_function(struct ftrace_ops *ops)
6092 {
6093 	int ret;
6094 
6095 	mutex_lock(&ftrace_lock);
6096 	ret = ftrace_shutdown(ops, 0);
6097 	mutex_unlock(&ftrace_lock);
6098 
6099 	return ret;
6100 }
6101 EXPORT_SYMBOL_GPL(unregister_ftrace_function);
6102 
6103 int
6104 ftrace_enable_sysctl(struct ctl_table *table, int write,
6105 		     void __user *buffer, size_t *lenp,
6106 		     loff_t *ppos)
6107 {
6108 	int ret = -ENODEV;
6109 
6110 	mutex_lock(&ftrace_lock);
6111 
6112 	if (unlikely(ftrace_disabled))
6113 		goto out;
6114 
6115 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
6116 
6117 	if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
6118 		goto out;
6119 
6120 	last_ftrace_enabled = !!ftrace_enabled;
6121 
6122 	if (ftrace_enabled) {
6123 
6124 		/* we are starting ftrace again */
6125 		if (ftrace_ops_list != &ftrace_list_end)
6126 			update_ftrace_function();
6127 
6128 		ftrace_startup_sysctl();
6129 
6130 	} else {
6131 		/* stopping ftrace calls (just send to ftrace_stub) */
6132 		ftrace_trace_function = ftrace_stub;
6133 
6134 		ftrace_shutdown_sysctl();
6135 	}
6136 
6137  out:
6138 	mutex_unlock(&ftrace_lock);
6139 	return ret;
6140 }
6141 
6142 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6143 
6144 static struct ftrace_ops graph_ops = {
6145 	.func			= ftrace_stub,
6146 	.flags			= FTRACE_OPS_FL_RECURSION_SAFE |
6147 				   FTRACE_OPS_FL_INITIALIZED |
6148 				   FTRACE_OPS_FL_PID |
6149 				   FTRACE_OPS_FL_STUB,
6150 #ifdef FTRACE_GRAPH_TRAMP_ADDR
6151 	.trampoline		= FTRACE_GRAPH_TRAMP_ADDR,
6152 	/* trampoline_size is only needed for dynamically allocated tramps */
6153 #endif
6154 	ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
6155 };
6156 
6157 void ftrace_graph_sleep_time_control(bool enable)
6158 {
6159 	fgraph_sleep_time = enable;
6160 }
6161 
6162 void ftrace_graph_graph_time_control(bool enable)
6163 {
6164 	fgraph_graph_time = enable;
6165 }
6166 
6167 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
6168 {
6169 	return 0;
6170 }
6171 
6172 /* The callbacks that hook a function */
6173 trace_func_graph_ret_t ftrace_graph_return =
6174 			(trace_func_graph_ret_t)ftrace_stub;
6175 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
6176 static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
6177 
6178 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
6179 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
6180 {
6181 	int i;
6182 	int ret = 0;
6183 	int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
6184 	struct task_struct *g, *t;
6185 
6186 	for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
6187 		ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
6188 					* sizeof(struct ftrace_ret_stack),
6189 					GFP_KERNEL);
6190 		if (!ret_stack_list[i]) {
6191 			start = 0;
6192 			end = i;
6193 			ret = -ENOMEM;
6194 			goto free;
6195 		}
6196 	}
6197 
6198 	read_lock(&tasklist_lock);
6199 	do_each_thread(g, t) {
6200 		if (start == end) {
6201 			ret = -EAGAIN;
6202 			goto unlock;
6203 		}
6204 
6205 		if (t->ret_stack == NULL) {
6206 			atomic_set(&t->tracing_graph_pause, 0);
6207 			atomic_set(&t->trace_overrun, 0);
6208 			t->curr_ret_stack = -1;
6209 			/* Make sure the tasks see the -1 first: */
6210 			smp_wmb();
6211 			t->ret_stack = ret_stack_list[start++];
6212 		}
6213 	} while_each_thread(g, t);
6214 
6215 unlock:
6216 	read_unlock(&tasklist_lock);
6217 free:
6218 	for (i = start; i < end; i++)
6219 		kfree(ret_stack_list[i]);
6220 	return ret;
6221 }
6222 
6223 static void
6224 ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
6225 			struct task_struct *prev, struct task_struct *next)
6226 {
6227 	unsigned long long timestamp;
6228 	int index;
6229 
6230 	/*
6231 	 * Does the user want to count the time a function was asleep.
6232 	 * If so, do not update the time stamps.
6233 	 */
6234 	if (fgraph_sleep_time)
6235 		return;
6236 
6237 	timestamp = trace_clock_local();
6238 
6239 	prev->ftrace_timestamp = timestamp;
6240 
6241 	/* only process tasks that we timestamped */
6242 	if (!next->ftrace_timestamp)
6243 		return;
6244 
6245 	/*
6246 	 * Update all the counters in next to make up for the
6247 	 * time next was sleeping.
6248 	 */
6249 	timestamp -= next->ftrace_timestamp;
6250 
6251 	for (index = next->curr_ret_stack; index >= 0; index--)
6252 		next->ret_stack[index].calltime += timestamp;
6253 }
6254 
6255 /* Allocate a return stack for each task */
6256 static int start_graph_tracing(void)
6257 {
6258 	struct ftrace_ret_stack **ret_stack_list;
6259 	int ret, cpu;
6260 
6261 	ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
6262 				sizeof(struct ftrace_ret_stack *),
6263 				GFP_KERNEL);
6264 
6265 	if (!ret_stack_list)
6266 		return -ENOMEM;
6267 
6268 	/* The cpu_boot init_task->ret_stack will never be freed */
6269 	for_each_online_cpu(cpu) {
6270 		if (!idle_task(cpu)->ret_stack)
6271 			ftrace_graph_init_idle_task(idle_task(cpu), cpu);
6272 	}
6273 
6274 	do {
6275 		ret = alloc_retstack_tasklist(ret_stack_list);
6276 	} while (ret == -EAGAIN);
6277 
6278 	if (!ret) {
6279 		ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
6280 		if (ret)
6281 			pr_info("ftrace_graph: Couldn't activate tracepoint"
6282 				" probe to kernel_sched_switch\n");
6283 	}
6284 
6285 	kfree(ret_stack_list);
6286 	return ret;
6287 }
6288 
6289 /*
6290  * Hibernation protection.
6291  * The state of the current task is too much unstable during
6292  * suspend/restore to disk. We want to protect against that.
6293  */
6294 static int
6295 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
6296 							void *unused)
6297 {
6298 	switch (state) {
6299 	case PM_HIBERNATION_PREPARE:
6300 		pause_graph_tracing();
6301 		break;
6302 
6303 	case PM_POST_HIBERNATION:
6304 		unpause_graph_tracing();
6305 		break;
6306 	}
6307 	return NOTIFY_DONE;
6308 }
6309 
6310 static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
6311 {
6312 	if (!ftrace_ops_test(&global_ops, trace->func, NULL))
6313 		return 0;
6314 	return __ftrace_graph_entry(trace);
6315 }
6316 
6317 /*
6318  * The function graph tracer should only trace the functions defined
6319  * by set_ftrace_filter and set_ftrace_notrace. If another function
6320  * tracer ops is registered, the graph tracer requires testing the
6321  * function against the global ops, and not just trace any function
6322  * that any ftrace_ops registered.
6323  */
6324 static void update_function_graph_func(void)
6325 {
6326 	struct ftrace_ops *op;
6327 	bool do_test = false;
6328 
6329 	/*
6330 	 * The graph and global ops share the same set of functions
6331 	 * to test. If any other ops is on the list, then
6332 	 * the graph tracing needs to test if its the function
6333 	 * it should call.
6334 	 */
6335 	do_for_each_ftrace_op(op, ftrace_ops_list) {
6336 		if (op != &global_ops && op != &graph_ops &&
6337 		    op != &ftrace_list_end) {
6338 			do_test = true;
6339 			/* in double loop, break out with goto */
6340 			goto out;
6341 		}
6342 	} while_for_each_ftrace_op(op);
6343  out:
6344 	if (do_test)
6345 		ftrace_graph_entry = ftrace_graph_entry_test;
6346 	else
6347 		ftrace_graph_entry = __ftrace_graph_entry;
6348 }
6349 
6350 static struct notifier_block ftrace_suspend_notifier = {
6351 	.notifier_call = ftrace_suspend_notifier_call,
6352 };
6353 
6354 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
6355 			trace_func_graph_ent_t entryfunc)
6356 {
6357 	int ret = 0;
6358 
6359 	mutex_lock(&ftrace_lock);
6360 
6361 	/* we currently allow only one tracer registered at a time */
6362 	if (ftrace_graph_active) {
6363 		ret = -EBUSY;
6364 		goto out;
6365 	}
6366 
6367 	register_pm_notifier(&ftrace_suspend_notifier);
6368 
6369 	ftrace_graph_active++;
6370 	ret = start_graph_tracing();
6371 	if (ret) {
6372 		ftrace_graph_active--;
6373 		goto out;
6374 	}
6375 
6376 	ftrace_graph_return = retfunc;
6377 
6378 	/*
6379 	 * Update the indirect function to the entryfunc, and the
6380 	 * function that gets called to the entry_test first. Then
6381 	 * call the update fgraph entry function to determine if
6382 	 * the entryfunc should be called directly or not.
6383 	 */
6384 	__ftrace_graph_entry = entryfunc;
6385 	ftrace_graph_entry = ftrace_graph_entry_test;
6386 	update_function_graph_func();
6387 
6388 	ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET);
6389 out:
6390 	mutex_unlock(&ftrace_lock);
6391 	return ret;
6392 }
6393 
6394 void unregister_ftrace_graph(void)
6395 {
6396 	mutex_lock(&ftrace_lock);
6397 
6398 	if (unlikely(!ftrace_graph_active))
6399 		goto out;
6400 
6401 	ftrace_graph_active--;
6402 	ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
6403 	ftrace_graph_entry = ftrace_graph_entry_stub;
6404 	__ftrace_graph_entry = ftrace_graph_entry_stub;
6405 	ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
6406 	unregister_pm_notifier(&ftrace_suspend_notifier);
6407 	unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
6408 
6409 #ifdef CONFIG_DYNAMIC_FTRACE
6410 	/*
6411 	 * Function graph does not allocate the trampoline, but
6412 	 * other global_ops do. We need to reset the ALLOC_TRAMP flag
6413 	 * if one was used.
6414 	 */
6415 	global_ops.trampoline = save_global_trampoline;
6416 	if (save_global_flags & FTRACE_OPS_FL_ALLOC_TRAMP)
6417 		global_ops.flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
6418 #endif
6419 
6420  out:
6421 	mutex_unlock(&ftrace_lock);
6422 }
6423 
6424 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
6425 
6426 static void
6427 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
6428 {
6429 	atomic_set(&t->tracing_graph_pause, 0);
6430 	atomic_set(&t->trace_overrun, 0);
6431 	t->ftrace_timestamp = 0;
6432 	/* make curr_ret_stack visible before we add the ret_stack */
6433 	smp_wmb();
6434 	t->ret_stack = ret_stack;
6435 }
6436 
6437 /*
6438  * Allocate a return stack for the idle task. May be the first
6439  * time through, or it may be done by CPU hotplug online.
6440  */
6441 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
6442 {
6443 	t->curr_ret_stack = -1;
6444 	/*
6445 	 * The idle task has no parent, it either has its own
6446 	 * stack or no stack at all.
6447 	 */
6448 	if (t->ret_stack)
6449 		WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
6450 
6451 	if (ftrace_graph_active) {
6452 		struct ftrace_ret_stack *ret_stack;
6453 
6454 		ret_stack = per_cpu(idle_ret_stack, cpu);
6455 		if (!ret_stack) {
6456 			ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
6457 					    * sizeof(struct ftrace_ret_stack),
6458 					    GFP_KERNEL);
6459 			if (!ret_stack)
6460 				return;
6461 			per_cpu(idle_ret_stack, cpu) = ret_stack;
6462 		}
6463 		graph_init_task(t, ret_stack);
6464 	}
6465 }
6466 
6467 /* Allocate a return stack for newly created task */
6468 void ftrace_graph_init_task(struct task_struct *t)
6469 {
6470 	/* Make sure we do not use the parent ret_stack */
6471 	t->ret_stack = NULL;
6472 	t->curr_ret_stack = -1;
6473 
6474 	if (ftrace_graph_active) {
6475 		struct ftrace_ret_stack *ret_stack;
6476 
6477 		ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
6478 				* sizeof(struct ftrace_ret_stack),
6479 				GFP_KERNEL);
6480 		if (!ret_stack)
6481 			return;
6482 		graph_init_task(t, ret_stack);
6483 	}
6484 }
6485 
6486 void ftrace_graph_exit_task(struct task_struct *t)
6487 {
6488 	struct ftrace_ret_stack	*ret_stack = t->ret_stack;
6489 
6490 	t->ret_stack = NULL;
6491 	/* NULL must become visible to IRQs before we free it: */
6492 	barrier();
6493 
6494 	kfree(ret_stack);
6495 }
6496 #endif
6497