xref: /openbmc/linux/kernel/trace/ftrace.c (revision 3a36cb11ca65cd6804972eaf1000378ba4384ea7)
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 Nadia Yvette Chambers
14  */
15 
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/bsearch.h>
26 #include <linux/module.h>
27 #include <linux/ftrace.h>
28 #include <linux/sysctl.h>
29 #include <linux/slab.h>
30 #include <linux/ctype.h>
31 #include <linux/sort.h>
32 #include <linux/list.h>
33 #include <linux/hash.h>
34 #include <linux/rcupdate.h>
35 
36 #include <trace/events/sched.h>
37 
38 #include <asm/setup.h>
39 
40 #include "trace_output.h"
41 #include "trace_stat.h"
42 
43 #define FTRACE_WARN_ON(cond)			\
44 	({					\
45 		int ___r = cond;		\
46 		if (WARN_ON(___r))		\
47 			ftrace_kill();		\
48 		___r;				\
49 	})
50 
51 #define FTRACE_WARN_ON_ONCE(cond)		\
52 	({					\
53 		int ___r = cond;		\
54 		if (WARN_ON_ONCE(___r))		\
55 			ftrace_kill();		\
56 		___r;				\
57 	})
58 
59 /* hash bits for specific function selection */
60 #define FTRACE_HASH_BITS 7
61 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
62 #define FTRACE_HASH_DEFAULT_BITS 10
63 #define FTRACE_HASH_MAX_BITS 12
64 
65 #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
66 
67 #ifdef CONFIG_DYNAMIC_FTRACE
68 #define INIT_REGEX_LOCK(opsname)	\
69 	.regex_lock	= __MUTEX_INITIALIZER(opsname.regex_lock),
70 #else
71 #define INIT_REGEX_LOCK(opsname)
72 #endif
73 
74 static struct ftrace_ops ftrace_list_end __read_mostly = {
75 	.func		= ftrace_stub,
76 	.flags		= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
77 };
78 
79 /* ftrace_enabled is a method to turn ftrace on or off */
80 int ftrace_enabled __read_mostly;
81 static int last_ftrace_enabled;
82 
83 /* Quick disabling of function tracer. */
84 int function_trace_stop __read_mostly;
85 
86 /* Current function tracing op */
87 struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
88 /* What to set function_trace_op to */
89 static struct ftrace_ops *set_function_trace_op;
90 
91 /* List for set_ftrace_pid's pids. */
92 LIST_HEAD(ftrace_pids);
93 struct ftrace_pid {
94 	struct list_head list;
95 	struct pid *pid;
96 };
97 
98 /*
99  * ftrace_disabled is set when an anomaly is discovered.
100  * ftrace_disabled is much stronger than ftrace_enabled.
101  */
102 static int ftrace_disabled __read_mostly;
103 
104 static DEFINE_MUTEX(ftrace_lock);
105 
106 static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
107 static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
108 static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
109 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
110 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
111 static struct ftrace_ops global_ops;
112 static struct ftrace_ops control_ops;
113 
114 #if ARCH_SUPPORTS_FTRACE_OPS
115 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
116 				 struct ftrace_ops *op, struct pt_regs *regs);
117 #else
118 /* See comment below, where ftrace_ops_list_func is defined */
119 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
120 #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
121 #endif
122 
123 /*
124  * Traverse the ftrace_global_list, invoking all entries.  The reason that we
125  * can use rcu_dereference_raw_notrace() is that elements removed from this list
126  * are simply leaked, so there is no need to interact with a grace-period
127  * mechanism.  The rcu_dereference_raw_notrace() calls are needed to handle
128  * concurrent insertions into the ftrace_global_list.
129  *
130  * Silly Alpha and silly pointer-speculation compiler optimizations!
131  */
132 #define do_for_each_ftrace_op(op, list)			\
133 	op = rcu_dereference_raw_notrace(list);			\
134 	do
135 
136 /*
137  * Optimized for just a single item in the list (as that is the normal case).
138  */
139 #define while_for_each_ftrace_op(op)				\
140 	while (likely(op = rcu_dereference_raw_notrace((op)->next)) &&	\
141 	       unlikely((op) != &ftrace_list_end))
142 
143 static inline void ftrace_ops_init(struct ftrace_ops *ops)
144 {
145 #ifdef CONFIG_DYNAMIC_FTRACE
146 	if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
147 		mutex_init(&ops->regex_lock);
148 		ops->flags |= FTRACE_OPS_FL_INITIALIZED;
149 	}
150 #endif
151 }
152 
153 /**
154  * ftrace_nr_registered_ops - return number of ops registered
155  *
156  * Returns the number of ftrace_ops registered and tracing functions
157  */
158 int ftrace_nr_registered_ops(void)
159 {
160 	struct ftrace_ops *ops;
161 	int cnt = 0;
162 
163 	mutex_lock(&ftrace_lock);
164 
165 	for (ops = ftrace_ops_list;
166 	     ops != &ftrace_list_end; ops = ops->next)
167 		cnt++;
168 
169 	mutex_unlock(&ftrace_lock);
170 
171 	return cnt;
172 }
173 
174 static void
175 ftrace_global_list_func(unsigned long ip, unsigned long parent_ip,
176 			struct ftrace_ops *op, struct pt_regs *regs)
177 {
178 	int bit;
179 
180 	bit = trace_test_and_set_recursion(TRACE_GLOBAL_START, TRACE_GLOBAL_MAX);
181 	if (bit < 0)
182 		return;
183 
184 	do_for_each_ftrace_op(op, ftrace_global_list) {
185 		op->func(ip, parent_ip, op, regs);
186 	} while_for_each_ftrace_op(op);
187 
188 	trace_clear_recursion(bit);
189 }
190 
191 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
192 			    struct ftrace_ops *op, struct pt_regs *regs)
193 {
194 	if (!test_tsk_trace_trace(current))
195 		return;
196 
197 	ftrace_pid_function(ip, parent_ip, op, regs);
198 }
199 
200 static void set_ftrace_pid_function(ftrace_func_t func)
201 {
202 	/* do not set ftrace_pid_function to itself! */
203 	if (func != ftrace_pid_func)
204 		ftrace_pid_function = func;
205 }
206 
207 /**
208  * clear_ftrace_function - reset the ftrace function
209  *
210  * This NULLs the ftrace function and in essence stops
211  * tracing.  There may be lag
212  */
213 void clear_ftrace_function(void)
214 {
215 	ftrace_trace_function = ftrace_stub;
216 	ftrace_pid_function = ftrace_stub;
217 }
218 
219 static void control_ops_disable_all(struct ftrace_ops *ops)
220 {
221 	int cpu;
222 
223 	for_each_possible_cpu(cpu)
224 		*per_cpu_ptr(ops->disabled, cpu) = 1;
225 }
226 
227 static int control_ops_alloc(struct ftrace_ops *ops)
228 {
229 	int __percpu *disabled;
230 
231 	disabled = alloc_percpu(int);
232 	if (!disabled)
233 		return -ENOMEM;
234 
235 	ops->disabled = disabled;
236 	control_ops_disable_all(ops);
237 	return 0;
238 }
239 
240 static void control_ops_free(struct ftrace_ops *ops)
241 {
242 	free_percpu(ops->disabled);
243 }
244 
245 static void update_global_ops(void)
246 {
247 	ftrace_func_t func = ftrace_global_list_func;
248 	void *private = NULL;
249 
250 	/* The list has its own recursion protection. */
251 	global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
252 
253 	/*
254 	 * If there's only one function registered, then call that
255 	 * function directly. Otherwise, we need to iterate over the
256 	 * registered callers.
257 	 */
258 	if (ftrace_global_list == &ftrace_list_end ||
259 	    ftrace_global_list->next == &ftrace_list_end) {
260 		func = ftrace_global_list->func;
261 		private = ftrace_global_list->private;
262 		/*
263 		 * As we are calling the function directly.
264 		 * If it does not have recursion protection,
265 		 * the function_trace_op needs to be updated
266 		 * accordingly.
267 		 */
268 		if (!(ftrace_global_list->flags & FTRACE_OPS_FL_RECURSION_SAFE))
269 			global_ops.flags &= ~FTRACE_OPS_FL_RECURSION_SAFE;
270 	}
271 
272 	/* If we filter on pids, update to use the pid function */
273 	if (!list_empty(&ftrace_pids)) {
274 		set_ftrace_pid_function(func);
275 		func = ftrace_pid_func;
276 	}
277 
278 	global_ops.func = func;
279 	global_ops.private = private;
280 }
281 
282 static void ftrace_sync(struct work_struct *work)
283 {
284 	/*
285 	 * This function is just a stub to implement a hard force
286 	 * of synchronize_sched(). This requires synchronizing
287 	 * tasks even in userspace and idle.
288 	 *
289 	 * Yes, function tracing is rude.
290 	 */
291 }
292 
293 static void ftrace_sync_ipi(void *data)
294 {
295 	/* Probably not needed, but do it anyway */
296 	smp_rmb();
297 }
298 
299 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
300 static void update_function_graph_func(void);
301 #else
302 static inline void update_function_graph_func(void) { }
303 #endif
304 
305 static void update_ftrace_function(void)
306 {
307 	ftrace_func_t func;
308 
309 	update_global_ops();
310 
311 	/*
312 	 * If we are at the end of the list and this ops is
313 	 * recursion safe and not dynamic and the arch supports passing ops,
314 	 * then have the mcount trampoline call the function directly.
315 	 */
316 	if (ftrace_ops_list == &ftrace_list_end ||
317 	    (ftrace_ops_list->next == &ftrace_list_end &&
318 	     !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) &&
319 	     (ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) &&
320 	     !FTRACE_FORCE_LIST_FUNC)) {
321 		/* Set the ftrace_ops that the arch callback uses */
322 		if (ftrace_ops_list == &global_ops)
323 			set_function_trace_op = ftrace_global_list;
324 		else
325 			set_function_trace_op = ftrace_ops_list;
326 		func = ftrace_ops_list->func;
327 	} else {
328 		/* Just use the default ftrace_ops */
329 		set_function_trace_op = &ftrace_list_end;
330 		func = ftrace_ops_list_func;
331 	}
332 
333 	/* If there's no change, then do nothing more here */
334 	if (ftrace_trace_function == func)
335 		return;
336 
337 	update_function_graph_func();
338 
339 	/*
340 	 * If we are using the list function, it doesn't care
341 	 * about the function_trace_ops.
342 	 */
343 	if (func == ftrace_ops_list_func) {
344 		ftrace_trace_function = func;
345 		/*
346 		 * Don't even bother setting function_trace_ops,
347 		 * it would be racy to do so anyway.
348 		 */
349 		return;
350 	}
351 
352 #ifndef CONFIG_DYNAMIC_FTRACE
353 	/*
354 	 * For static tracing, we need to be a bit more careful.
355 	 * The function change takes affect immediately. Thus,
356 	 * we need to coorditate the setting of the function_trace_ops
357 	 * with the setting of the ftrace_trace_function.
358 	 *
359 	 * Set the function to the list ops, which will call the
360 	 * function we want, albeit indirectly, but it handles the
361 	 * ftrace_ops and doesn't depend on function_trace_op.
362 	 */
363 	ftrace_trace_function = ftrace_ops_list_func;
364 	/*
365 	 * Make sure all CPUs see this. Yes this is slow, but static
366 	 * tracing is slow and nasty to have enabled.
367 	 */
368 	schedule_on_each_cpu(ftrace_sync);
369 	/* Now all cpus are using the list ops. */
370 	function_trace_op = set_function_trace_op;
371 	/* Make sure the function_trace_op is visible on all CPUs */
372 	smp_wmb();
373 	/* Nasty way to force a rmb on all cpus */
374 	smp_call_function(ftrace_sync_ipi, NULL, 1);
375 	/* OK, we are all set to update the ftrace_trace_function now! */
376 #endif /* !CONFIG_DYNAMIC_FTRACE */
377 
378 	ftrace_trace_function = func;
379 }
380 
381 static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
382 {
383 	ops->next = *list;
384 	/*
385 	 * We are entering ops into the list but another
386 	 * CPU might be walking that list. We need to make sure
387 	 * the ops->next pointer is valid before another CPU sees
388 	 * the ops pointer included into the list.
389 	 */
390 	rcu_assign_pointer(*list, ops);
391 }
392 
393 static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
394 {
395 	struct ftrace_ops **p;
396 
397 	/*
398 	 * If we are removing the last function, then simply point
399 	 * to the ftrace_stub.
400 	 */
401 	if (*list == ops && ops->next == &ftrace_list_end) {
402 		*list = &ftrace_list_end;
403 		return 0;
404 	}
405 
406 	for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
407 		if (*p == ops)
408 			break;
409 
410 	if (*p != ops)
411 		return -1;
412 
413 	*p = (*p)->next;
414 	return 0;
415 }
416 
417 static void add_ftrace_list_ops(struct ftrace_ops **list,
418 				struct ftrace_ops *main_ops,
419 				struct ftrace_ops *ops)
420 {
421 	int first = *list == &ftrace_list_end;
422 	add_ftrace_ops(list, ops);
423 	if (first)
424 		add_ftrace_ops(&ftrace_ops_list, main_ops);
425 }
426 
427 static int remove_ftrace_list_ops(struct ftrace_ops **list,
428 				  struct ftrace_ops *main_ops,
429 				  struct ftrace_ops *ops)
430 {
431 	int ret = remove_ftrace_ops(list, ops);
432 	if (!ret && *list == &ftrace_list_end)
433 		ret = remove_ftrace_ops(&ftrace_ops_list, main_ops);
434 	return ret;
435 }
436 
437 static int __register_ftrace_function(struct ftrace_ops *ops)
438 {
439 	if (ops->flags & FTRACE_OPS_FL_DELETED)
440 		return -EINVAL;
441 
442 	if (FTRACE_WARN_ON(ops == &global_ops))
443 		return -EINVAL;
444 
445 	if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
446 		return -EBUSY;
447 
448 	/* We don't support both control and global flags set. */
449 	if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK)
450 		return -EINVAL;
451 
452 #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
453 	/*
454 	 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
455 	 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
456 	 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
457 	 */
458 	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
459 	    !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
460 		return -EINVAL;
461 
462 	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
463 		ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
464 #endif
465 
466 	if (!core_kernel_data((unsigned long)ops))
467 		ops->flags |= FTRACE_OPS_FL_DYNAMIC;
468 
469 	if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
470 		add_ftrace_list_ops(&ftrace_global_list, &global_ops, ops);
471 		ops->flags |= FTRACE_OPS_FL_ENABLED;
472 	} else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
473 		if (control_ops_alloc(ops))
474 			return -ENOMEM;
475 		add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
476 	} else
477 		add_ftrace_ops(&ftrace_ops_list, ops);
478 
479 	if (ftrace_enabled)
480 		update_ftrace_function();
481 
482 	return 0;
483 }
484 
485 static int __unregister_ftrace_function(struct ftrace_ops *ops)
486 {
487 	int ret;
488 
489 	if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
490 		return -EBUSY;
491 
492 	if (FTRACE_WARN_ON(ops == &global_ops))
493 		return -EINVAL;
494 
495 	if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
496 		ret = remove_ftrace_list_ops(&ftrace_global_list,
497 					     &global_ops, ops);
498 		if (!ret)
499 			ops->flags &= ~FTRACE_OPS_FL_ENABLED;
500 	} else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
501 		ret = remove_ftrace_list_ops(&ftrace_control_list,
502 					     &control_ops, ops);
503 	} else
504 		ret = remove_ftrace_ops(&ftrace_ops_list, ops);
505 
506 	if (ret < 0)
507 		return ret;
508 
509 	if (ftrace_enabled)
510 		update_ftrace_function();
511 
512 	return 0;
513 }
514 
515 static void ftrace_update_pid_func(void)
516 {
517 	/* Only do something if we are tracing something */
518 	if (ftrace_trace_function == ftrace_stub)
519 		return;
520 
521 	update_ftrace_function();
522 }
523 
524 #ifdef CONFIG_FUNCTION_PROFILER
525 struct ftrace_profile {
526 	struct hlist_node		node;
527 	unsigned long			ip;
528 	unsigned long			counter;
529 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
530 	unsigned long long		time;
531 	unsigned long long		time_squared;
532 #endif
533 };
534 
535 struct ftrace_profile_page {
536 	struct ftrace_profile_page	*next;
537 	unsigned long			index;
538 	struct ftrace_profile		records[];
539 };
540 
541 struct ftrace_profile_stat {
542 	atomic_t			disabled;
543 	struct hlist_head		*hash;
544 	struct ftrace_profile_page	*pages;
545 	struct ftrace_profile_page	*start;
546 	struct tracer_stat		stat;
547 };
548 
549 #define PROFILE_RECORDS_SIZE						\
550 	(PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
551 
552 #define PROFILES_PER_PAGE					\
553 	(PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
554 
555 static int ftrace_profile_enabled __read_mostly;
556 
557 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
558 static DEFINE_MUTEX(ftrace_profile_lock);
559 
560 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
561 
562 #define FTRACE_PROFILE_HASH_BITS 10
563 #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
564 
565 static void *
566 function_stat_next(void *v, int idx)
567 {
568 	struct ftrace_profile *rec = v;
569 	struct ftrace_profile_page *pg;
570 
571 	pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
572 
573  again:
574 	if (idx != 0)
575 		rec++;
576 
577 	if ((void *)rec >= (void *)&pg->records[pg->index]) {
578 		pg = pg->next;
579 		if (!pg)
580 			return NULL;
581 		rec = &pg->records[0];
582 		if (!rec->counter)
583 			goto again;
584 	}
585 
586 	return rec;
587 }
588 
589 static void *function_stat_start(struct tracer_stat *trace)
590 {
591 	struct ftrace_profile_stat *stat =
592 		container_of(trace, struct ftrace_profile_stat, stat);
593 
594 	if (!stat || !stat->start)
595 		return NULL;
596 
597 	return function_stat_next(&stat->start->records[0], 0);
598 }
599 
600 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
601 /* function graph compares on total time */
602 static int function_stat_cmp(void *p1, void *p2)
603 {
604 	struct ftrace_profile *a = p1;
605 	struct ftrace_profile *b = p2;
606 
607 	if (a->time < b->time)
608 		return -1;
609 	if (a->time > b->time)
610 		return 1;
611 	else
612 		return 0;
613 }
614 #else
615 /* not function graph compares against hits */
616 static int function_stat_cmp(void *p1, void *p2)
617 {
618 	struct ftrace_profile *a = p1;
619 	struct ftrace_profile *b = p2;
620 
621 	if (a->counter < b->counter)
622 		return -1;
623 	if (a->counter > b->counter)
624 		return 1;
625 	else
626 		return 0;
627 }
628 #endif
629 
630 static int function_stat_headers(struct seq_file *m)
631 {
632 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
633 	seq_printf(m, "  Function                               "
634 		   "Hit    Time            Avg             s^2\n"
635 		      "  --------                               "
636 		   "---    ----            ---             ---\n");
637 #else
638 	seq_printf(m, "  Function                               Hit\n"
639 		      "  --------                               ---\n");
640 #endif
641 	return 0;
642 }
643 
644 static int function_stat_show(struct seq_file *m, void *v)
645 {
646 	struct ftrace_profile *rec = v;
647 	char str[KSYM_SYMBOL_LEN];
648 	int ret = 0;
649 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
650 	static struct trace_seq s;
651 	unsigned long long avg;
652 	unsigned long long stddev;
653 #endif
654 	mutex_lock(&ftrace_profile_lock);
655 
656 	/* we raced with function_profile_reset() */
657 	if (unlikely(rec->counter == 0)) {
658 		ret = -EBUSY;
659 		goto out;
660 	}
661 
662 	kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
663 	seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
664 
665 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
666 	seq_printf(m, "    ");
667 	avg = rec->time;
668 	do_div(avg, rec->counter);
669 
670 	/* Sample standard deviation (s^2) */
671 	if (rec->counter <= 1)
672 		stddev = 0;
673 	else {
674 		/*
675 		 * Apply Welford's method:
676 		 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
677 		 */
678 		stddev = rec->counter * rec->time_squared -
679 			 rec->time * rec->time;
680 
681 		/*
682 		 * Divide only 1000 for ns^2 -> us^2 conversion.
683 		 * trace_print_graph_duration will divide 1000 again.
684 		 */
685 		do_div(stddev, rec->counter * (rec->counter - 1) * 1000);
686 	}
687 
688 	trace_seq_init(&s);
689 	trace_print_graph_duration(rec->time, &s);
690 	trace_seq_puts(&s, "    ");
691 	trace_print_graph_duration(avg, &s);
692 	trace_seq_puts(&s, "    ");
693 	trace_print_graph_duration(stddev, &s);
694 	trace_print_seq(m, &s);
695 #endif
696 	seq_putc(m, '\n');
697 out:
698 	mutex_unlock(&ftrace_profile_lock);
699 
700 	return ret;
701 }
702 
703 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
704 {
705 	struct ftrace_profile_page *pg;
706 
707 	pg = stat->pages = stat->start;
708 
709 	while (pg) {
710 		memset(pg->records, 0, PROFILE_RECORDS_SIZE);
711 		pg->index = 0;
712 		pg = pg->next;
713 	}
714 
715 	memset(stat->hash, 0,
716 	       FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
717 }
718 
719 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
720 {
721 	struct ftrace_profile_page *pg;
722 	int functions;
723 	int pages;
724 	int i;
725 
726 	/* If we already allocated, do nothing */
727 	if (stat->pages)
728 		return 0;
729 
730 	stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
731 	if (!stat->pages)
732 		return -ENOMEM;
733 
734 #ifdef CONFIG_DYNAMIC_FTRACE
735 	functions = ftrace_update_tot_cnt;
736 #else
737 	/*
738 	 * We do not know the number of functions that exist because
739 	 * dynamic tracing is what counts them. With past experience
740 	 * we have around 20K functions. That should be more than enough.
741 	 * It is highly unlikely we will execute every function in
742 	 * the kernel.
743 	 */
744 	functions = 20000;
745 #endif
746 
747 	pg = stat->start = stat->pages;
748 
749 	pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
750 
751 	for (i = 1; i < pages; i++) {
752 		pg->next = (void *)get_zeroed_page(GFP_KERNEL);
753 		if (!pg->next)
754 			goto out_free;
755 		pg = pg->next;
756 	}
757 
758 	return 0;
759 
760  out_free:
761 	pg = stat->start;
762 	while (pg) {
763 		unsigned long tmp = (unsigned long)pg;
764 
765 		pg = pg->next;
766 		free_page(tmp);
767 	}
768 
769 	stat->pages = NULL;
770 	stat->start = NULL;
771 
772 	return -ENOMEM;
773 }
774 
775 static int ftrace_profile_init_cpu(int cpu)
776 {
777 	struct ftrace_profile_stat *stat;
778 	int size;
779 
780 	stat = &per_cpu(ftrace_profile_stats, cpu);
781 
782 	if (stat->hash) {
783 		/* If the profile is already created, simply reset it */
784 		ftrace_profile_reset(stat);
785 		return 0;
786 	}
787 
788 	/*
789 	 * We are profiling all functions, but usually only a few thousand
790 	 * functions are hit. We'll make a hash of 1024 items.
791 	 */
792 	size = FTRACE_PROFILE_HASH_SIZE;
793 
794 	stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
795 
796 	if (!stat->hash)
797 		return -ENOMEM;
798 
799 	/* Preallocate the function profiling pages */
800 	if (ftrace_profile_pages_init(stat) < 0) {
801 		kfree(stat->hash);
802 		stat->hash = NULL;
803 		return -ENOMEM;
804 	}
805 
806 	return 0;
807 }
808 
809 static int ftrace_profile_init(void)
810 {
811 	int cpu;
812 	int ret = 0;
813 
814 	for_each_possible_cpu(cpu) {
815 		ret = ftrace_profile_init_cpu(cpu);
816 		if (ret)
817 			break;
818 	}
819 
820 	return ret;
821 }
822 
823 /* interrupts must be disabled */
824 static struct ftrace_profile *
825 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
826 {
827 	struct ftrace_profile *rec;
828 	struct hlist_head *hhd;
829 	unsigned long key;
830 
831 	key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
832 	hhd = &stat->hash[key];
833 
834 	if (hlist_empty(hhd))
835 		return NULL;
836 
837 	hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
838 		if (rec->ip == ip)
839 			return rec;
840 	}
841 
842 	return NULL;
843 }
844 
845 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
846 			       struct ftrace_profile *rec)
847 {
848 	unsigned long key;
849 
850 	key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
851 	hlist_add_head_rcu(&rec->node, &stat->hash[key]);
852 }
853 
854 /*
855  * The memory is already allocated, this simply finds a new record to use.
856  */
857 static struct ftrace_profile *
858 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
859 {
860 	struct ftrace_profile *rec = NULL;
861 
862 	/* prevent recursion (from NMIs) */
863 	if (atomic_inc_return(&stat->disabled) != 1)
864 		goto out;
865 
866 	/*
867 	 * Try to find the function again since an NMI
868 	 * could have added it
869 	 */
870 	rec = ftrace_find_profiled_func(stat, ip);
871 	if (rec)
872 		goto out;
873 
874 	if (stat->pages->index == PROFILES_PER_PAGE) {
875 		if (!stat->pages->next)
876 			goto out;
877 		stat->pages = stat->pages->next;
878 	}
879 
880 	rec = &stat->pages->records[stat->pages->index++];
881 	rec->ip = ip;
882 	ftrace_add_profile(stat, rec);
883 
884  out:
885 	atomic_dec(&stat->disabled);
886 
887 	return rec;
888 }
889 
890 static void
891 function_profile_call(unsigned long ip, unsigned long parent_ip,
892 		      struct ftrace_ops *ops, struct pt_regs *regs)
893 {
894 	struct ftrace_profile_stat *stat;
895 	struct ftrace_profile *rec;
896 	unsigned long flags;
897 
898 	if (!ftrace_profile_enabled)
899 		return;
900 
901 	local_irq_save(flags);
902 
903 	stat = &__get_cpu_var(ftrace_profile_stats);
904 	if (!stat->hash || !ftrace_profile_enabled)
905 		goto out;
906 
907 	rec = ftrace_find_profiled_func(stat, ip);
908 	if (!rec) {
909 		rec = ftrace_profile_alloc(stat, ip);
910 		if (!rec)
911 			goto out;
912 	}
913 
914 	rec->counter++;
915  out:
916 	local_irq_restore(flags);
917 }
918 
919 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
920 static int profile_graph_entry(struct ftrace_graph_ent *trace)
921 {
922 	function_profile_call(trace->func, 0, NULL, NULL);
923 	return 1;
924 }
925 
926 static void profile_graph_return(struct ftrace_graph_ret *trace)
927 {
928 	struct ftrace_profile_stat *stat;
929 	unsigned long long calltime;
930 	struct ftrace_profile *rec;
931 	unsigned long flags;
932 
933 	local_irq_save(flags);
934 	stat = &__get_cpu_var(ftrace_profile_stats);
935 	if (!stat->hash || !ftrace_profile_enabled)
936 		goto out;
937 
938 	/* If the calltime was zero'd ignore it */
939 	if (!trace->calltime)
940 		goto out;
941 
942 	calltime = trace->rettime - trace->calltime;
943 
944 	if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
945 		int index;
946 
947 		index = trace->depth;
948 
949 		/* Append this call time to the parent time to subtract */
950 		if (index)
951 			current->ret_stack[index - 1].subtime += calltime;
952 
953 		if (current->ret_stack[index].subtime < calltime)
954 			calltime -= current->ret_stack[index].subtime;
955 		else
956 			calltime = 0;
957 	}
958 
959 	rec = ftrace_find_profiled_func(stat, trace->func);
960 	if (rec) {
961 		rec->time += calltime;
962 		rec->time_squared += calltime * calltime;
963 	}
964 
965  out:
966 	local_irq_restore(flags);
967 }
968 
969 static int register_ftrace_profiler(void)
970 {
971 	return register_ftrace_graph(&profile_graph_return,
972 				     &profile_graph_entry);
973 }
974 
975 static void unregister_ftrace_profiler(void)
976 {
977 	unregister_ftrace_graph();
978 }
979 #else
980 static struct ftrace_ops ftrace_profile_ops __read_mostly = {
981 	.func		= function_profile_call,
982 	.flags		= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
983 	INIT_REGEX_LOCK(ftrace_profile_ops)
984 };
985 
986 static int register_ftrace_profiler(void)
987 {
988 	return register_ftrace_function(&ftrace_profile_ops);
989 }
990 
991 static void unregister_ftrace_profiler(void)
992 {
993 	unregister_ftrace_function(&ftrace_profile_ops);
994 }
995 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
996 
997 static ssize_t
998 ftrace_profile_write(struct file *filp, const char __user *ubuf,
999 		     size_t cnt, loff_t *ppos)
1000 {
1001 	unsigned long val;
1002 	int ret;
1003 
1004 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1005 	if (ret)
1006 		return ret;
1007 
1008 	val = !!val;
1009 
1010 	mutex_lock(&ftrace_profile_lock);
1011 	if (ftrace_profile_enabled ^ val) {
1012 		if (val) {
1013 			ret = ftrace_profile_init();
1014 			if (ret < 0) {
1015 				cnt = ret;
1016 				goto out;
1017 			}
1018 
1019 			ret = register_ftrace_profiler();
1020 			if (ret < 0) {
1021 				cnt = ret;
1022 				goto out;
1023 			}
1024 			ftrace_profile_enabled = 1;
1025 		} else {
1026 			ftrace_profile_enabled = 0;
1027 			/*
1028 			 * unregister_ftrace_profiler calls stop_machine
1029 			 * so this acts like an synchronize_sched.
1030 			 */
1031 			unregister_ftrace_profiler();
1032 		}
1033 	}
1034  out:
1035 	mutex_unlock(&ftrace_profile_lock);
1036 
1037 	*ppos += cnt;
1038 
1039 	return cnt;
1040 }
1041 
1042 static ssize_t
1043 ftrace_profile_read(struct file *filp, char __user *ubuf,
1044 		     size_t cnt, loff_t *ppos)
1045 {
1046 	char buf[64];		/* big enough to hold a number */
1047 	int r;
1048 
1049 	r = sprintf(buf, "%u\n", ftrace_profile_enabled);
1050 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1051 }
1052 
1053 static const struct file_operations ftrace_profile_fops = {
1054 	.open		= tracing_open_generic,
1055 	.read		= ftrace_profile_read,
1056 	.write		= ftrace_profile_write,
1057 	.llseek		= default_llseek,
1058 };
1059 
1060 /* used to initialize the real stat files */
1061 static struct tracer_stat function_stats __initdata = {
1062 	.name		= "functions",
1063 	.stat_start	= function_stat_start,
1064 	.stat_next	= function_stat_next,
1065 	.stat_cmp	= function_stat_cmp,
1066 	.stat_headers	= function_stat_headers,
1067 	.stat_show	= function_stat_show
1068 };
1069 
1070 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
1071 {
1072 	struct ftrace_profile_stat *stat;
1073 	struct dentry *entry;
1074 	char *name;
1075 	int ret;
1076 	int cpu;
1077 
1078 	for_each_possible_cpu(cpu) {
1079 		stat = &per_cpu(ftrace_profile_stats, cpu);
1080 
1081 		/* allocate enough for function name + cpu number */
1082 		name = kmalloc(32, GFP_KERNEL);
1083 		if (!name) {
1084 			/*
1085 			 * The files created are permanent, if something happens
1086 			 * we still do not free memory.
1087 			 */
1088 			WARN(1,
1089 			     "Could not allocate stat file for cpu %d\n",
1090 			     cpu);
1091 			return;
1092 		}
1093 		stat->stat = function_stats;
1094 		snprintf(name, 32, "function%d", cpu);
1095 		stat->stat.name = name;
1096 		ret = register_stat_tracer(&stat->stat);
1097 		if (ret) {
1098 			WARN(1,
1099 			     "Could not register function stat for cpu %d\n",
1100 			     cpu);
1101 			kfree(name);
1102 			return;
1103 		}
1104 	}
1105 
1106 	entry = debugfs_create_file("function_profile_enabled", 0644,
1107 				    d_tracer, NULL, &ftrace_profile_fops);
1108 	if (!entry)
1109 		pr_warning("Could not create debugfs "
1110 			   "'function_profile_enabled' entry\n");
1111 }
1112 
1113 #else /* CONFIG_FUNCTION_PROFILER */
1114 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
1115 {
1116 }
1117 #endif /* CONFIG_FUNCTION_PROFILER */
1118 
1119 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
1120 
1121 #ifdef CONFIG_DYNAMIC_FTRACE
1122 
1123 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
1124 # error Dynamic ftrace depends on MCOUNT_RECORD
1125 #endif
1126 
1127 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
1128 
1129 struct ftrace_func_probe {
1130 	struct hlist_node	node;
1131 	struct ftrace_probe_ops	*ops;
1132 	unsigned long		flags;
1133 	unsigned long		ip;
1134 	void			*data;
1135 	struct list_head	free_list;
1136 };
1137 
1138 struct ftrace_func_entry {
1139 	struct hlist_node hlist;
1140 	unsigned long ip;
1141 };
1142 
1143 struct ftrace_hash {
1144 	unsigned long		size_bits;
1145 	struct hlist_head	*buckets;
1146 	unsigned long		count;
1147 	struct rcu_head		rcu;
1148 };
1149 
1150 /*
1151  * We make these constant because no one should touch them,
1152  * but they are used as the default "empty hash", to avoid allocating
1153  * it all the time. These are in a read only section such that if
1154  * anyone does try to modify it, it will cause an exception.
1155  */
1156 static const struct hlist_head empty_buckets[1];
1157 static const struct ftrace_hash empty_hash = {
1158 	.buckets = (struct hlist_head *)empty_buckets,
1159 };
1160 #define EMPTY_HASH	((struct ftrace_hash *)&empty_hash)
1161 
1162 static struct ftrace_ops global_ops = {
1163 	.func			= ftrace_stub,
1164 	.notrace_hash		= EMPTY_HASH,
1165 	.filter_hash		= EMPTY_HASH,
1166 	.flags			= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
1167 	INIT_REGEX_LOCK(global_ops)
1168 };
1169 
1170 struct ftrace_page {
1171 	struct ftrace_page	*next;
1172 	struct dyn_ftrace	*records;
1173 	int			index;
1174 	int			size;
1175 };
1176 
1177 #define ENTRY_SIZE sizeof(struct dyn_ftrace)
1178 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1179 
1180 /* estimate from running different kernels */
1181 #define NR_TO_INIT		10000
1182 
1183 static struct ftrace_page	*ftrace_pages_start;
1184 static struct ftrace_page	*ftrace_pages;
1185 
1186 static bool ftrace_hash_empty(struct ftrace_hash *hash)
1187 {
1188 	return !hash || !hash->count;
1189 }
1190 
1191 static struct ftrace_func_entry *
1192 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1193 {
1194 	unsigned long key;
1195 	struct ftrace_func_entry *entry;
1196 	struct hlist_head *hhd;
1197 
1198 	if (ftrace_hash_empty(hash))
1199 		return NULL;
1200 
1201 	if (hash->size_bits > 0)
1202 		key = hash_long(ip, hash->size_bits);
1203 	else
1204 		key = 0;
1205 
1206 	hhd = &hash->buckets[key];
1207 
1208 	hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
1209 		if (entry->ip == ip)
1210 			return entry;
1211 	}
1212 	return NULL;
1213 }
1214 
1215 static void __add_hash_entry(struct ftrace_hash *hash,
1216 			     struct ftrace_func_entry *entry)
1217 {
1218 	struct hlist_head *hhd;
1219 	unsigned long key;
1220 
1221 	if (hash->size_bits)
1222 		key = hash_long(entry->ip, hash->size_bits);
1223 	else
1224 		key = 0;
1225 
1226 	hhd = &hash->buckets[key];
1227 	hlist_add_head(&entry->hlist, hhd);
1228 	hash->count++;
1229 }
1230 
1231 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1232 {
1233 	struct ftrace_func_entry *entry;
1234 
1235 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1236 	if (!entry)
1237 		return -ENOMEM;
1238 
1239 	entry->ip = ip;
1240 	__add_hash_entry(hash, entry);
1241 
1242 	return 0;
1243 }
1244 
1245 static void
1246 free_hash_entry(struct ftrace_hash *hash,
1247 		  struct ftrace_func_entry *entry)
1248 {
1249 	hlist_del(&entry->hlist);
1250 	kfree(entry);
1251 	hash->count--;
1252 }
1253 
1254 static void
1255 remove_hash_entry(struct ftrace_hash *hash,
1256 		  struct ftrace_func_entry *entry)
1257 {
1258 	hlist_del(&entry->hlist);
1259 	hash->count--;
1260 }
1261 
1262 static void ftrace_hash_clear(struct ftrace_hash *hash)
1263 {
1264 	struct hlist_head *hhd;
1265 	struct hlist_node *tn;
1266 	struct ftrace_func_entry *entry;
1267 	int size = 1 << hash->size_bits;
1268 	int i;
1269 
1270 	if (!hash->count)
1271 		return;
1272 
1273 	for (i = 0; i < size; i++) {
1274 		hhd = &hash->buckets[i];
1275 		hlist_for_each_entry_safe(entry, tn, hhd, hlist)
1276 			free_hash_entry(hash, entry);
1277 	}
1278 	FTRACE_WARN_ON(hash->count);
1279 }
1280 
1281 static void free_ftrace_hash(struct ftrace_hash *hash)
1282 {
1283 	if (!hash || hash == EMPTY_HASH)
1284 		return;
1285 	ftrace_hash_clear(hash);
1286 	kfree(hash->buckets);
1287 	kfree(hash);
1288 }
1289 
1290 static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1291 {
1292 	struct ftrace_hash *hash;
1293 
1294 	hash = container_of(rcu, struct ftrace_hash, rcu);
1295 	free_ftrace_hash(hash);
1296 }
1297 
1298 static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1299 {
1300 	if (!hash || hash == EMPTY_HASH)
1301 		return;
1302 	call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1303 }
1304 
1305 void ftrace_free_filter(struct ftrace_ops *ops)
1306 {
1307 	ftrace_ops_init(ops);
1308 	free_ftrace_hash(ops->filter_hash);
1309 	free_ftrace_hash(ops->notrace_hash);
1310 }
1311 
1312 static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1313 {
1314 	struct ftrace_hash *hash;
1315 	int size;
1316 
1317 	hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1318 	if (!hash)
1319 		return NULL;
1320 
1321 	size = 1 << size_bits;
1322 	hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1323 
1324 	if (!hash->buckets) {
1325 		kfree(hash);
1326 		return NULL;
1327 	}
1328 
1329 	hash->size_bits = size_bits;
1330 
1331 	return hash;
1332 }
1333 
1334 static struct ftrace_hash *
1335 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1336 {
1337 	struct ftrace_func_entry *entry;
1338 	struct ftrace_hash *new_hash;
1339 	int size;
1340 	int ret;
1341 	int i;
1342 
1343 	new_hash = alloc_ftrace_hash(size_bits);
1344 	if (!new_hash)
1345 		return NULL;
1346 
1347 	/* Empty hash? */
1348 	if (ftrace_hash_empty(hash))
1349 		return new_hash;
1350 
1351 	size = 1 << hash->size_bits;
1352 	for (i = 0; i < size; i++) {
1353 		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
1354 			ret = add_hash_entry(new_hash, entry->ip);
1355 			if (ret < 0)
1356 				goto free_hash;
1357 		}
1358 	}
1359 
1360 	FTRACE_WARN_ON(new_hash->count != hash->count);
1361 
1362 	return new_hash;
1363 
1364  free_hash:
1365 	free_ftrace_hash(new_hash);
1366 	return NULL;
1367 }
1368 
1369 static void
1370 ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
1371 static void
1372 ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
1373 
1374 static int
1375 ftrace_hash_move(struct ftrace_ops *ops, int enable,
1376 		 struct ftrace_hash **dst, struct ftrace_hash *src)
1377 {
1378 	struct ftrace_func_entry *entry;
1379 	struct hlist_node *tn;
1380 	struct hlist_head *hhd;
1381 	struct ftrace_hash *old_hash;
1382 	struct ftrace_hash *new_hash;
1383 	int size = src->count;
1384 	int bits = 0;
1385 	int ret;
1386 	int i;
1387 
1388 	/*
1389 	 * Remove the current set, update the hash and add
1390 	 * them back.
1391 	 */
1392 	ftrace_hash_rec_disable(ops, enable);
1393 
1394 	/*
1395 	 * If the new source is empty, just free dst and assign it
1396 	 * the empty_hash.
1397 	 */
1398 	if (!src->count) {
1399 		free_ftrace_hash_rcu(*dst);
1400 		rcu_assign_pointer(*dst, EMPTY_HASH);
1401 		/* still need to update the function records */
1402 		ret = 0;
1403 		goto out;
1404 	}
1405 
1406 	/*
1407 	 * Make the hash size about 1/2 the # found
1408 	 */
1409 	for (size /= 2; size; size >>= 1)
1410 		bits++;
1411 
1412 	/* Don't allocate too much */
1413 	if (bits > FTRACE_HASH_MAX_BITS)
1414 		bits = FTRACE_HASH_MAX_BITS;
1415 
1416 	ret = -ENOMEM;
1417 	new_hash = alloc_ftrace_hash(bits);
1418 	if (!new_hash)
1419 		goto out;
1420 
1421 	size = 1 << src->size_bits;
1422 	for (i = 0; i < size; i++) {
1423 		hhd = &src->buckets[i];
1424 		hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
1425 			remove_hash_entry(src, entry);
1426 			__add_hash_entry(new_hash, entry);
1427 		}
1428 	}
1429 
1430 	old_hash = *dst;
1431 	rcu_assign_pointer(*dst, new_hash);
1432 	free_ftrace_hash_rcu(old_hash);
1433 
1434 	ret = 0;
1435  out:
1436 	/*
1437 	 * Enable regardless of ret:
1438 	 *  On success, we enable the new hash.
1439 	 *  On failure, we re-enable the original hash.
1440 	 */
1441 	ftrace_hash_rec_enable(ops, enable);
1442 
1443 	return ret;
1444 }
1445 
1446 /*
1447  * Test the hashes for this ops to see if we want to call
1448  * the ops->func or not.
1449  *
1450  * It's a match if the ip is in the ops->filter_hash or
1451  * the filter_hash does not exist or is empty,
1452  *  AND
1453  * the ip is not in the ops->notrace_hash.
1454  *
1455  * This needs to be called with preemption disabled as
1456  * the hashes are freed with call_rcu_sched().
1457  */
1458 static int
1459 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1460 {
1461 	struct ftrace_hash *filter_hash;
1462 	struct ftrace_hash *notrace_hash;
1463 	int ret;
1464 
1465 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1466 	/*
1467 	 * There's a small race when adding ops that the ftrace handler
1468 	 * that wants regs, may be called without them. We can not
1469 	 * allow that handler to be called if regs is NULL.
1470 	 */
1471 	if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
1472 		return 0;
1473 #endif
1474 
1475 	filter_hash = rcu_dereference_raw_notrace(ops->filter_hash);
1476 	notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash);
1477 
1478 	if ((ftrace_hash_empty(filter_hash) ||
1479 	     ftrace_lookup_ip(filter_hash, ip)) &&
1480 	    (ftrace_hash_empty(notrace_hash) ||
1481 	     !ftrace_lookup_ip(notrace_hash, ip)))
1482 		ret = 1;
1483 	else
1484 		ret = 0;
1485 
1486 	return ret;
1487 }
1488 
1489 /*
1490  * This is a double for. Do not use 'break' to break out of the loop,
1491  * you must use a goto.
1492  */
1493 #define do_for_each_ftrace_rec(pg, rec)					\
1494 	for (pg = ftrace_pages_start; pg; pg = pg->next) {		\
1495 		int _____i;						\
1496 		for (_____i = 0; _____i < pg->index; _____i++) {	\
1497 			rec = &pg->records[_____i];
1498 
1499 #define while_for_each_ftrace_rec()		\
1500 		}				\
1501 	}
1502 
1503 
1504 static int ftrace_cmp_recs(const void *a, const void *b)
1505 {
1506 	const struct dyn_ftrace *key = a;
1507 	const struct dyn_ftrace *rec = b;
1508 
1509 	if (key->flags < rec->ip)
1510 		return -1;
1511 	if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1512 		return 1;
1513 	return 0;
1514 }
1515 
1516 static unsigned long ftrace_location_range(unsigned long start, unsigned long end)
1517 {
1518 	struct ftrace_page *pg;
1519 	struct dyn_ftrace *rec;
1520 	struct dyn_ftrace key;
1521 
1522 	key.ip = start;
1523 	key.flags = end;	/* overload flags, as it is unsigned long */
1524 
1525 	for (pg = ftrace_pages_start; pg; pg = pg->next) {
1526 		if (end < pg->records[0].ip ||
1527 		    start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1528 			continue;
1529 		rec = bsearch(&key, pg->records, pg->index,
1530 			      sizeof(struct dyn_ftrace),
1531 			      ftrace_cmp_recs);
1532 		if (rec)
1533 			return rec->ip;
1534 	}
1535 
1536 	return 0;
1537 }
1538 
1539 /**
1540  * ftrace_location - return true if the ip giving is a traced location
1541  * @ip: the instruction pointer to check
1542  *
1543  * Returns rec->ip if @ip given is a pointer to a ftrace location.
1544  * That is, the instruction that is either a NOP or call to
1545  * the function tracer. It checks the ftrace internal tables to
1546  * determine if the address belongs or not.
1547  */
1548 unsigned long ftrace_location(unsigned long ip)
1549 {
1550 	return ftrace_location_range(ip, ip);
1551 }
1552 
1553 /**
1554  * ftrace_text_reserved - return true if range contains an ftrace location
1555  * @start: start of range to search
1556  * @end: end of range to search (inclusive). @end points to the last byte to check.
1557  *
1558  * Returns 1 if @start and @end contains a ftrace location.
1559  * That is, the instruction that is either a NOP or call to
1560  * the function tracer. It checks the ftrace internal tables to
1561  * determine if the address belongs or not.
1562  */
1563 int ftrace_text_reserved(void *start, void *end)
1564 {
1565 	unsigned long ret;
1566 
1567 	ret = ftrace_location_range((unsigned long)start,
1568 				    (unsigned long)end);
1569 
1570 	return (int)!!ret;
1571 }
1572 
1573 static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1574 				     int filter_hash,
1575 				     bool inc)
1576 {
1577 	struct ftrace_hash *hash;
1578 	struct ftrace_hash *other_hash;
1579 	struct ftrace_page *pg;
1580 	struct dyn_ftrace *rec;
1581 	int count = 0;
1582 	int all = 0;
1583 
1584 	/* Only update if the ops has been registered */
1585 	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1586 		return;
1587 
1588 	/*
1589 	 * In the filter_hash case:
1590 	 *   If the count is zero, we update all records.
1591 	 *   Otherwise we just update the items in the hash.
1592 	 *
1593 	 * In the notrace_hash case:
1594 	 *   We enable the update in the hash.
1595 	 *   As disabling notrace means enabling the tracing,
1596 	 *   and enabling notrace means disabling, the inc variable
1597 	 *   gets inversed.
1598 	 */
1599 	if (filter_hash) {
1600 		hash = ops->filter_hash;
1601 		other_hash = ops->notrace_hash;
1602 		if (ftrace_hash_empty(hash))
1603 			all = 1;
1604 	} else {
1605 		inc = !inc;
1606 		hash = ops->notrace_hash;
1607 		other_hash = ops->filter_hash;
1608 		/*
1609 		 * If the notrace hash has no items,
1610 		 * then there's nothing to do.
1611 		 */
1612 		if (ftrace_hash_empty(hash))
1613 			return;
1614 	}
1615 
1616 	do_for_each_ftrace_rec(pg, rec) {
1617 		int in_other_hash = 0;
1618 		int in_hash = 0;
1619 		int match = 0;
1620 
1621 		if (all) {
1622 			/*
1623 			 * Only the filter_hash affects all records.
1624 			 * Update if the record is not in the notrace hash.
1625 			 */
1626 			if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1627 				match = 1;
1628 		} else {
1629 			in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1630 			in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1631 
1632 			/*
1633 			 *
1634 			 */
1635 			if (filter_hash && in_hash && !in_other_hash)
1636 				match = 1;
1637 			else if (!filter_hash && in_hash &&
1638 				 (in_other_hash || ftrace_hash_empty(other_hash)))
1639 				match = 1;
1640 		}
1641 		if (!match)
1642 			continue;
1643 
1644 		if (inc) {
1645 			rec->flags++;
1646 			if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX))
1647 				return;
1648 			/*
1649 			 * If any ops wants regs saved for this function
1650 			 * then all ops will get saved regs.
1651 			 */
1652 			if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1653 				rec->flags |= FTRACE_FL_REGS;
1654 		} else {
1655 			if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0))
1656 				return;
1657 			rec->flags--;
1658 		}
1659 		count++;
1660 		/* Shortcut, if we handled all records, we are done. */
1661 		if (!all && count == hash->count)
1662 			return;
1663 	} while_for_each_ftrace_rec();
1664 }
1665 
1666 static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1667 				    int filter_hash)
1668 {
1669 	__ftrace_hash_rec_update(ops, filter_hash, 0);
1670 }
1671 
1672 static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1673 				   int filter_hash)
1674 {
1675 	__ftrace_hash_rec_update(ops, filter_hash, 1);
1676 }
1677 
1678 static void print_ip_ins(const char *fmt, unsigned char *p)
1679 {
1680 	int i;
1681 
1682 	printk(KERN_CONT "%s", fmt);
1683 
1684 	for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1685 		printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1686 }
1687 
1688 /**
1689  * ftrace_bug - report and shutdown function tracer
1690  * @failed: The failed type (EFAULT, EINVAL, EPERM)
1691  * @ip: The address that failed
1692  *
1693  * The arch code that enables or disables the function tracing
1694  * can call ftrace_bug() when it has detected a problem in
1695  * modifying the code. @failed should be one of either:
1696  * EFAULT - if the problem happens on reading the @ip address
1697  * EINVAL - if what is read at @ip is not what was expected
1698  * EPERM - if the problem happens on writting to the @ip address
1699  */
1700 void ftrace_bug(int failed, unsigned long ip)
1701 {
1702 	switch (failed) {
1703 	case -EFAULT:
1704 		FTRACE_WARN_ON_ONCE(1);
1705 		pr_info("ftrace faulted on modifying ");
1706 		print_ip_sym(ip);
1707 		break;
1708 	case -EINVAL:
1709 		FTRACE_WARN_ON_ONCE(1);
1710 		pr_info("ftrace failed to modify ");
1711 		print_ip_sym(ip);
1712 		print_ip_ins(" actual: ", (unsigned char *)ip);
1713 		printk(KERN_CONT "\n");
1714 		break;
1715 	case -EPERM:
1716 		FTRACE_WARN_ON_ONCE(1);
1717 		pr_info("ftrace faulted on writing ");
1718 		print_ip_sym(ip);
1719 		break;
1720 	default:
1721 		FTRACE_WARN_ON_ONCE(1);
1722 		pr_info("ftrace faulted on unknown error ");
1723 		print_ip_sym(ip);
1724 	}
1725 }
1726 
1727 static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
1728 {
1729 	unsigned long flag = 0UL;
1730 
1731 	/*
1732 	 * If we are updating calls:
1733 	 *
1734 	 *   If the record has a ref count, then we need to enable it
1735 	 *   because someone is using it.
1736 	 *
1737 	 *   Otherwise we make sure its disabled.
1738 	 *
1739 	 * If we are disabling calls, then disable all records that
1740 	 * are enabled.
1741 	 */
1742 	if (enable && (rec->flags & ~FTRACE_FL_MASK))
1743 		flag = FTRACE_FL_ENABLED;
1744 
1745 	/*
1746 	 * If enabling and the REGS flag does not match the REGS_EN, then
1747 	 * do not ignore this record. Set flags to fail the compare against
1748 	 * ENABLED.
1749 	 */
1750 	if (flag &&
1751 	    (!(rec->flags & FTRACE_FL_REGS) != !(rec->flags & FTRACE_FL_REGS_EN)))
1752 		flag |= FTRACE_FL_REGS;
1753 
1754 	/* If the state of this record hasn't changed, then do nothing */
1755 	if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1756 		return FTRACE_UPDATE_IGNORE;
1757 
1758 	if (flag) {
1759 		/* Save off if rec is being enabled (for return value) */
1760 		flag ^= rec->flags & FTRACE_FL_ENABLED;
1761 
1762 		if (update) {
1763 			rec->flags |= FTRACE_FL_ENABLED;
1764 			if (flag & FTRACE_FL_REGS) {
1765 				if (rec->flags & FTRACE_FL_REGS)
1766 					rec->flags |= FTRACE_FL_REGS_EN;
1767 				else
1768 					rec->flags &= ~FTRACE_FL_REGS_EN;
1769 			}
1770 		}
1771 
1772 		/*
1773 		 * If this record is being updated from a nop, then
1774 		 *   return UPDATE_MAKE_CALL.
1775 		 * Otherwise, if the EN flag is set, then return
1776 		 *   UPDATE_MODIFY_CALL_REGS to tell the caller to convert
1777 		 *   from the non-save regs, to a save regs function.
1778 		 * Otherwise,
1779 		 *   return UPDATE_MODIFY_CALL to tell the caller to convert
1780 		 *   from the save regs, to a non-save regs function.
1781 		 */
1782 		if (flag & FTRACE_FL_ENABLED)
1783 			return FTRACE_UPDATE_MAKE_CALL;
1784 		else if (rec->flags & FTRACE_FL_REGS_EN)
1785 			return FTRACE_UPDATE_MODIFY_CALL_REGS;
1786 		else
1787 			return FTRACE_UPDATE_MODIFY_CALL;
1788 	}
1789 
1790 	if (update) {
1791 		/* If there's no more users, clear all flags */
1792 		if (!(rec->flags & ~FTRACE_FL_MASK))
1793 			rec->flags = 0;
1794 		else
1795 			/* Just disable the record (keep REGS state) */
1796 			rec->flags &= ~FTRACE_FL_ENABLED;
1797 	}
1798 
1799 	return FTRACE_UPDATE_MAKE_NOP;
1800 }
1801 
1802 /**
1803  * ftrace_update_record, set a record that now is tracing or not
1804  * @rec: the record to update
1805  * @enable: set to 1 if the record is tracing, zero to force disable
1806  *
1807  * The records that represent all functions that can be traced need
1808  * to be updated when tracing has been enabled.
1809  */
1810 int ftrace_update_record(struct dyn_ftrace *rec, int enable)
1811 {
1812 	return ftrace_check_record(rec, enable, 1);
1813 }
1814 
1815 /**
1816  * ftrace_test_record, check if the record has been enabled or not
1817  * @rec: the record to test
1818  * @enable: set to 1 to check if enabled, 0 if it is disabled
1819  *
1820  * The arch code may need to test if a record is already set to
1821  * tracing to determine how to modify the function code that it
1822  * represents.
1823  */
1824 int ftrace_test_record(struct dyn_ftrace *rec, int enable)
1825 {
1826 	return ftrace_check_record(rec, enable, 0);
1827 }
1828 
1829 static int
1830 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1831 {
1832 	unsigned long ftrace_old_addr;
1833 	unsigned long ftrace_addr;
1834 	int ret;
1835 
1836 	ret = ftrace_update_record(rec, enable);
1837 
1838 	if (rec->flags & FTRACE_FL_REGS)
1839 		ftrace_addr = (unsigned long)FTRACE_REGS_ADDR;
1840 	else
1841 		ftrace_addr = (unsigned long)FTRACE_ADDR;
1842 
1843 	switch (ret) {
1844 	case FTRACE_UPDATE_IGNORE:
1845 		return 0;
1846 
1847 	case FTRACE_UPDATE_MAKE_CALL:
1848 		return ftrace_make_call(rec, ftrace_addr);
1849 
1850 	case FTRACE_UPDATE_MAKE_NOP:
1851 		return ftrace_make_nop(NULL, rec, ftrace_addr);
1852 
1853 	case FTRACE_UPDATE_MODIFY_CALL_REGS:
1854 	case FTRACE_UPDATE_MODIFY_CALL:
1855 		if (rec->flags & FTRACE_FL_REGS)
1856 			ftrace_old_addr = (unsigned long)FTRACE_ADDR;
1857 		else
1858 			ftrace_old_addr = (unsigned long)FTRACE_REGS_ADDR;
1859 
1860 		return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
1861 	}
1862 
1863 	return -1; /* unknow ftrace bug */
1864 }
1865 
1866 void __weak ftrace_replace_code(int enable)
1867 {
1868 	struct dyn_ftrace *rec;
1869 	struct ftrace_page *pg;
1870 	int failed;
1871 
1872 	if (unlikely(ftrace_disabled))
1873 		return;
1874 
1875 	do_for_each_ftrace_rec(pg, rec) {
1876 		failed = __ftrace_replace_code(rec, enable);
1877 		if (failed) {
1878 			ftrace_bug(failed, rec->ip);
1879 			/* Stop processing */
1880 			return;
1881 		}
1882 	} while_for_each_ftrace_rec();
1883 }
1884 
1885 struct ftrace_rec_iter {
1886 	struct ftrace_page	*pg;
1887 	int			index;
1888 };
1889 
1890 /**
1891  * ftrace_rec_iter_start, start up iterating over traced functions
1892  *
1893  * Returns an iterator handle that is used to iterate over all
1894  * the records that represent address locations where functions
1895  * are traced.
1896  *
1897  * May return NULL if no records are available.
1898  */
1899 struct ftrace_rec_iter *ftrace_rec_iter_start(void)
1900 {
1901 	/*
1902 	 * We only use a single iterator.
1903 	 * Protected by the ftrace_lock mutex.
1904 	 */
1905 	static struct ftrace_rec_iter ftrace_rec_iter;
1906 	struct ftrace_rec_iter *iter = &ftrace_rec_iter;
1907 
1908 	iter->pg = ftrace_pages_start;
1909 	iter->index = 0;
1910 
1911 	/* Could have empty pages */
1912 	while (iter->pg && !iter->pg->index)
1913 		iter->pg = iter->pg->next;
1914 
1915 	if (!iter->pg)
1916 		return NULL;
1917 
1918 	return iter;
1919 }
1920 
1921 /**
1922  * ftrace_rec_iter_next, get the next record to process.
1923  * @iter: The handle to the iterator.
1924  *
1925  * Returns the next iterator after the given iterator @iter.
1926  */
1927 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
1928 {
1929 	iter->index++;
1930 
1931 	if (iter->index >= iter->pg->index) {
1932 		iter->pg = iter->pg->next;
1933 		iter->index = 0;
1934 
1935 		/* Could have empty pages */
1936 		while (iter->pg && !iter->pg->index)
1937 			iter->pg = iter->pg->next;
1938 	}
1939 
1940 	if (!iter->pg)
1941 		return NULL;
1942 
1943 	return iter;
1944 }
1945 
1946 /**
1947  * ftrace_rec_iter_record, get the record at the iterator location
1948  * @iter: The current iterator location
1949  *
1950  * Returns the record that the current @iter is at.
1951  */
1952 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
1953 {
1954 	return &iter->pg->records[iter->index];
1955 }
1956 
1957 static int
1958 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
1959 {
1960 	unsigned long ip;
1961 	int ret;
1962 
1963 	ip = rec->ip;
1964 
1965 	if (unlikely(ftrace_disabled))
1966 		return 0;
1967 
1968 	ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
1969 	if (ret) {
1970 		ftrace_bug(ret, ip);
1971 		return 0;
1972 	}
1973 	return 1;
1974 }
1975 
1976 /*
1977  * archs can override this function if they must do something
1978  * before the modifying code is performed.
1979  */
1980 int __weak ftrace_arch_code_modify_prepare(void)
1981 {
1982 	return 0;
1983 }
1984 
1985 /*
1986  * archs can override this function if they must do something
1987  * after the modifying code is performed.
1988  */
1989 int __weak ftrace_arch_code_modify_post_process(void)
1990 {
1991 	return 0;
1992 }
1993 
1994 void ftrace_modify_all_code(int command)
1995 {
1996 	int update = command & FTRACE_UPDATE_TRACE_FUNC;
1997 
1998 	/*
1999 	 * If the ftrace_caller calls a ftrace_ops func directly,
2000 	 * we need to make sure that it only traces functions it
2001 	 * expects to trace. When doing the switch of functions,
2002 	 * we need to update to the ftrace_ops_list_func first
2003 	 * before the transition between old and new calls are set,
2004 	 * as the ftrace_ops_list_func will check the ops hashes
2005 	 * to make sure the ops are having the right functions
2006 	 * traced.
2007 	 */
2008 	if (update)
2009 		ftrace_update_ftrace_func(ftrace_ops_list_func);
2010 
2011 	if (command & FTRACE_UPDATE_CALLS)
2012 		ftrace_replace_code(1);
2013 	else if (command & FTRACE_DISABLE_CALLS)
2014 		ftrace_replace_code(0);
2015 
2016 	if (update && ftrace_trace_function != ftrace_ops_list_func) {
2017 		function_trace_op = set_function_trace_op;
2018 		smp_wmb();
2019 		/* If irqs are disabled, we are in stop machine */
2020 		if (!irqs_disabled())
2021 			smp_call_function(ftrace_sync_ipi, NULL, 1);
2022 		ftrace_update_ftrace_func(ftrace_trace_function);
2023 	}
2024 
2025 	if (command & FTRACE_START_FUNC_RET)
2026 		ftrace_enable_ftrace_graph_caller();
2027 	else if (command & FTRACE_STOP_FUNC_RET)
2028 		ftrace_disable_ftrace_graph_caller();
2029 }
2030 
2031 static int __ftrace_modify_code(void *data)
2032 {
2033 	int *command = data;
2034 
2035 	ftrace_modify_all_code(*command);
2036 
2037 	return 0;
2038 }
2039 
2040 /**
2041  * ftrace_run_stop_machine, go back to the stop machine method
2042  * @command: The command to tell ftrace what to do
2043  *
2044  * If an arch needs to fall back to the stop machine method, the
2045  * it can call this function.
2046  */
2047 void ftrace_run_stop_machine(int command)
2048 {
2049 	stop_machine(__ftrace_modify_code, &command, NULL);
2050 }
2051 
2052 /**
2053  * arch_ftrace_update_code, modify the code to trace or not trace
2054  * @command: The command that needs to be done
2055  *
2056  * Archs can override this function if it does not need to
2057  * run stop_machine() to modify code.
2058  */
2059 void __weak arch_ftrace_update_code(int command)
2060 {
2061 	ftrace_run_stop_machine(command);
2062 }
2063 
2064 static void ftrace_run_update_code(int command)
2065 {
2066 	int ret;
2067 
2068 	ret = ftrace_arch_code_modify_prepare();
2069 	FTRACE_WARN_ON(ret);
2070 	if (ret)
2071 		return;
2072 	/*
2073 	 * Do not call function tracer while we update the code.
2074 	 * We are in stop machine.
2075 	 */
2076 	function_trace_stop++;
2077 
2078 	/*
2079 	 * By default we use stop_machine() to modify the code.
2080 	 * But archs can do what ever they want as long as it
2081 	 * is safe. The stop_machine() is the safest, but also
2082 	 * produces the most overhead.
2083 	 */
2084 	arch_ftrace_update_code(command);
2085 
2086 	function_trace_stop--;
2087 
2088 	ret = ftrace_arch_code_modify_post_process();
2089 	FTRACE_WARN_ON(ret);
2090 }
2091 
2092 static ftrace_func_t saved_ftrace_func;
2093 static int ftrace_start_up;
2094 static int global_start_up;
2095 
2096 static void ftrace_startup_enable(int command)
2097 {
2098 	if (saved_ftrace_func != ftrace_trace_function) {
2099 		saved_ftrace_func = ftrace_trace_function;
2100 		command |= FTRACE_UPDATE_TRACE_FUNC;
2101 	}
2102 
2103 	if (!command || !ftrace_enabled)
2104 		return;
2105 
2106 	ftrace_run_update_code(command);
2107 }
2108 
2109 static int ftrace_startup(struct ftrace_ops *ops, int command)
2110 {
2111 	bool hash_enable = true;
2112 	int ret;
2113 
2114 	if (unlikely(ftrace_disabled))
2115 		return -ENODEV;
2116 
2117 	ret = __register_ftrace_function(ops);
2118 	if (ret)
2119 		return ret;
2120 
2121 	ftrace_start_up++;
2122 	command |= FTRACE_UPDATE_CALLS;
2123 
2124 	/* ops marked global share the filter hashes */
2125 	if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
2126 		ops = &global_ops;
2127 		/* Don't update hash if global is already set */
2128 		if (global_start_up)
2129 			hash_enable = false;
2130 		global_start_up++;
2131 	}
2132 
2133 	ops->flags |= FTRACE_OPS_FL_ENABLED;
2134 	if (hash_enable)
2135 		ftrace_hash_rec_enable(ops, 1);
2136 
2137 	ftrace_startup_enable(command);
2138 
2139 	return 0;
2140 }
2141 
2142 static int ftrace_shutdown(struct ftrace_ops *ops, int command)
2143 {
2144 	bool hash_disable = true;
2145 	int ret;
2146 
2147 	if (unlikely(ftrace_disabled))
2148 		return -ENODEV;
2149 
2150 	ret = __unregister_ftrace_function(ops);
2151 	if (ret)
2152 		return ret;
2153 
2154 	ftrace_start_up--;
2155 	/*
2156 	 * Just warn in case of unbalance, no need to kill ftrace, it's not
2157 	 * critical but the ftrace_call callers may be never nopped again after
2158 	 * further ftrace uses.
2159 	 */
2160 	WARN_ON_ONCE(ftrace_start_up < 0);
2161 
2162 	if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
2163 		ops = &global_ops;
2164 		global_start_up--;
2165 		WARN_ON_ONCE(global_start_up < 0);
2166 		/* Don't update hash if global still has users */
2167 		if (global_start_up) {
2168 			WARN_ON_ONCE(!ftrace_start_up);
2169 			hash_disable = false;
2170 		}
2171 	}
2172 
2173 	if (hash_disable)
2174 		ftrace_hash_rec_disable(ops, 1);
2175 
2176 	if (ops != &global_ops || !global_start_up)
2177 		ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2178 
2179 	command |= FTRACE_UPDATE_CALLS;
2180 
2181 	if (saved_ftrace_func != ftrace_trace_function) {
2182 		saved_ftrace_func = ftrace_trace_function;
2183 		command |= FTRACE_UPDATE_TRACE_FUNC;
2184 	}
2185 
2186 	if (!command || !ftrace_enabled) {
2187 		/*
2188 		 * If these are control ops, they still need their
2189 		 * per_cpu field freed. Since, function tracing is
2190 		 * not currently active, we can just free them
2191 		 * without synchronizing all CPUs.
2192 		 */
2193 		if (ops->flags & FTRACE_OPS_FL_CONTROL)
2194 			control_ops_free(ops);
2195 		return 0;
2196 	}
2197 
2198 	ftrace_run_update_code(command);
2199 
2200 	/*
2201 	 * Dynamic ops may be freed, we must make sure that all
2202 	 * callers are done before leaving this function.
2203 	 * The same goes for freeing the per_cpu data of the control
2204 	 * ops.
2205 	 *
2206 	 * Again, normal synchronize_sched() is not good enough.
2207 	 * We need to do a hard force of sched synchronization.
2208 	 * This is because we use preempt_disable() to do RCU, but
2209 	 * the function tracers can be called where RCU is not watching
2210 	 * (like before user_exit()). We can not rely on the RCU
2211 	 * infrastructure to do the synchronization, thus we must do it
2212 	 * ourselves.
2213 	 */
2214 	if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) {
2215 		schedule_on_each_cpu(ftrace_sync);
2216 
2217 		if (ops->flags & FTRACE_OPS_FL_CONTROL)
2218 			control_ops_free(ops);
2219 	}
2220 
2221 	return 0;
2222 }
2223 
2224 static void ftrace_startup_sysctl(void)
2225 {
2226 	if (unlikely(ftrace_disabled))
2227 		return;
2228 
2229 	/* Force update next time */
2230 	saved_ftrace_func = NULL;
2231 	/* ftrace_start_up is true if we want ftrace running */
2232 	if (ftrace_start_up)
2233 		ftrace_run_update_code(FTRACE_UPDATE_CALLS);
2234 }
2235 
2236 static void ftrace_shutdown_sysctl(void)
2237 {
2238 	if (unlikely(ftrace_disabled))
2239 		return;
2240 
2241 	/* ftrace_start_up is true if ftrace is running */
2242 	if (ftrace_start_up)
2243 		ftrace_run_update_code(FTRACE_DISABLE_CALLS);
2244 }
2245 
2246 static cycle_t		ftrace_update_time;
2247 unsigned long		ftrace_update_tot_cnt;
2248 
2249 static inline int ops_traces_mod(struct ftrace_ops *ops)
2250 {
2251 	/*
2252 	 * Filter_hash being empty will default to trace module.
2253 	 * But notrace hash requires a test of individual module functions.
2254 	 */
2255 	return ftrace_hash_empty(ops->filter_hash) &&
2256 		ftrace_hash_empty(ops->notrace_hash);
2257 }
2258 
2259 /*
2260  * Check if the current ops references the record.
2261  *
2262  * If the ops traces all functions, then it was already accounted for.
2263  * If the ops does not trace the current record function, skip it.
2264  * If the ops ignores the function via notrace filter, skip it.
2265  */
2266 static inline bool
2267 ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
2268 {
2269 	/* If ops isn't enabled, ignore it */
2270 	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
2271 		return 0;
2272 
2273 	/* If ops traces all mods, we already accounted for it */
2274 	if (ops_traces_mod(ops))
2275 		return 0;
2276 
2277 	/* The function must be in the filter */
2278 	if (!ftrace_hash_empty(ops->filter_hash) &&
2279 	    !ftrace_lookup_ip(ops->filter_hash, rec->ip))
2280 		return 0;
2281 
2282 	/* If in notrace hash, we ignore it too */
2283 	if (ftrace_lookup_ip(ops->notrace_hash, rec->ip))
2284 		return 0;
2285 
2286 	return 1;
2287 }
2288 
2289 static int referenced_filters(struct dyn_ftrace *rec)
2290 {
2291 	struct ftrace_ops *ops;
2292 	int cnt = 0;
2293 
2294 	for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
2295 		if (ops_references_rec(ops, rec))
2296 		    cnt++;
2297 	}
2298 
2299 	return cnt;
2300 }
2301 
2302 static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
2303 {
2304 	struct ftrace_page *pg;
2305 	struct dyn_ftrace *p;
2306 	cycle_t start, stop;
2307 	unsigned long update_cnt = 0;
2308 	unsigned long ref = 0;
2309 	bool test = false;
2310 	int i;
2311 
2312 	/*
2313 	 * When adding a module, we need to check if tracers are
2314 	 * currently enabled and if they are set to trace all functions.
2315 	 * If they are, we need to enable the module functions as well
2316 	 * as update the reference counts for those function records.
2317 	 */
2318 	if (mod) {
2319 		struct ftrace_ops *ops;
2320 
2321 		for (ops = ftrace_ops_list;
2322 		     ops != &ftrace_list_end; ops = ops->next) {
2323 			if (ops->flags & FTRACE_OPS_FL_ENABLED) {
2324 				if (ops_traces_mod(ops))
2325 					ref++;
2326 				else
2327 					test = true;
2328 			}
2329 		}
2330 	}
2331 
2332 	start = ftrace_now(raw_smp_processor_id());
2333 
2334 	for (pg = new_pgs; pg; pg = pg->next) {
2335 
2336 		for (i = 0; i < pg->index; i++) {
2337 			int cnt = ref;
2338 
2339 			/* If something went wrong, bail without enabling anything */
2340 			if (unlikely(ftrace_disabled))
2341 				return -1;
2342 
2343 			p = &pg->records[i];
2344 			if (test)
2345 				cnt += referenced_filters(p);
2346 			p->flags = cnt;
2347 
2348 			/*
2349 			 * Do the initial record conversion from mcount jump
2350 			 * to the NOP instructions.
2351 			 */
2352 			if (!ftrace_code_disable(mod, p))
2353 				break;
2354 
2355 			update_cnt++;
2356 
2357 			/*
2358 			 * If the tracing is enabled, go ahead and enable the record.
2359 			 *
2360 			 * The reason not to enable the record immediatelly is the
2361 			 * inherent check of ftrace_make_nop/ftrace_make_call for
2362 			 * correct previous instructions.  Making first the NOP
2363 			 * conversion puts the module to the correct state, thus
2364 			 * passing the ftrace_make_call check.
2365 			 */
2366 			if (ftrace_start_up && cnt) {
2367 				int failed = __ftrace_replace_code(p, 1);
2368 				if (failed)
2369 					ftrace_bug(failed, p->ip);
2370 			}
2371 		}
2372 	}
2373 
2374 	stop = ftrace_now(raw_smp_processor_id());
2375 	ftrace_update_time = stop - start;
2376 	ftrace_update_tot_cnt += update_cnt;
2377 
2378 	return 0;
2379 }
2380 
2381 static int ftrace_allocate_records(struct ftrace_page *pg, int count)
2382 {
2383 	int order;
2384 	int cnt;
2385 
2386 	if (WARN_ON(!count))
2387 		return -EINVAL;
2388 
2389 	order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
2390 
2391 	/*
2392 	 * We want to fill as much as possible. No more than a page
2393 	 * may be empty.
2394 	 */
2395 	while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
2396 		order--;
2397 
2398  again:
2399 	pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
2400 
2401 	if (!pg->records) {
2402 		/* if we can't allocate this size, try something smaller */
2403 		if (!order)
2404 			return -ENOMEM;
2405 		order >>= 1;
2406 		goto again;
2407 	}
2408 
2409 	cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
2410 	pg->size = cnt;
2411 
2412 	if (cnt > count)
2413 		cnt = count;
2414 
2415 	return cnt;
2416 }
2417 
2418 static struct ftrace_page *
2419 ftrace_allocate_pages(unsigned long num_to_init)
2420 {
2421 	struct ftrace_page *start_pg;
2422 	struct ftrace_page *pg;
2423 	int order;
2424 	int cnt;
2425 
2426 	if (!num_to_init)
2427 		return 0;
2428 
2429 	start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
2430 	if (!pg)
2431 		return NULL;
2432 
2433 	/*
2434 	 * Try to allocate as much as possible in one continues
2435 	 * location that fills in all of the space. We want to
2436 	 * waste as little space as possible.
2437 	 */
2438 	for (;;) {
2439 		cnt = ftrace_allocate_records(pg, num_to_init);
2440 		if (cnt < 0)
2441 			goto free_pages;
2442 
2443 		num_to_init -= cnt;
2444 		if (!num_to_init)
2445 			break;
2446 
2447 		pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
2448 		if (!pg->next)
2449 			goto free_pages;
2450 
2451 		pg = pg->next;
2452 	}
2453 
2454 	return start_pg;
2455 
2456  free_pages:
2457 	while (start_pg) {
2458 		order = get_count_order(pg->size / ENTRIES_PER_PAGE);
2459 		free_pages((unsigned long)pg->records, order);
2460 		start_pg = pg->next;
2461 		kfree(pg);
2462 		pg = start_pg;
2463 	}
2464 	pr_info("ftrace: FAILED to allocate memory for functions\n");
2465 	return NULL;
2466 }
2467 
2468 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
2469 
2470 struct ftrace_iterator {
2471 	loff_t				pos;
2472 	loff_t				func_pos;
2473 	struct ftrace_page		*pg;
2474 	struct dyn_ftrace		*func;
2475 	struct ftrace_func_probe	*probe;
2476 	struct trace_parser		parser;
2477 	struct ftrace_hash		*hash;
2478 	struct ftrace_ops		*ops;
2479 	int				hidx;
2480 	int				idx;
2481 	unsigned			flags;
2482 };
2483 
2484 static void *
2485 t_hash_next(struct seq_file *m, loff_t *pos)
2486 {
2487 	struct ftrace_iterator *iter = m->private;
2488 	struct hlist_node *hnd = NULL;
2489 	struct hlist_head *hhd;
2490 
2491 	(*pos)++;
2492 	iter->pos = *pos;
2493 
2494 	if (iter->probe)
2495 		hnd = &iter->probe->node;
2496  retry:
2497 	if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
2498 		return NULL;
2499 
2500 	hhd = &ftrace_func_hash[iter->hidx];
2501 
2502 	if (hlist_empty(hhd)) {
2503 		iter->hidx++;
2504 		hnd = NULL;
2505 		goto retry;
2506 	}
2507 
2508 	if (!hnd)
2509 		hnd = hhd->first;
2510 	else {
2511 		hnd = hnd->next;
2512 		if (!hnd) {
2513 			iter->hidx++;
2514 			goto retry;
2515 		}
2516 	}
2517 
2518 	if (WARN_ON_ONCE(!hnd))
2519 		return NULL;
2520 
2521 	iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
2522 
2523 	return iter;
2524 }
2525 
2526 static void *t_hash_start(struct seq_file *m, loff_t *pos)
2527 {
2528 	struct ftrace_iterator *iter = m->private;
2529 	void *p = NULL;
2530 	loff_t l;
2531 
2532 	if (!(iter->flags & FTRACE_ITER_DO_HASH))
2533 		return NULL;
2534 
2535 	if (iter->func_pos > *pos)
2536 		return NULL;
2537 
2538 	iter->hidx = 0;
2539 	for (l = 0; l <= (*pos - iter->func_pos); ) {
2540 		p = t_hash_next(m, &l);
2541 		if (!p)
2542 			break;
2543 	}
2544 	if (!p)
2545 		return NULL;
2546 
2547 	/* Only set this if we have an item */
2548 	iter->flags |= FTRACE_ITER_HASH;
2549 
2550 	return iter;
2551 }
2552 
2553 static int
2554 t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
2555 {
2556 	struct ftrace_func_probe *rec;
2557 
2558 	rec = iter->probe;
2559 	if (WARN_ON_ONCE(!rec))
2560 		return -EIO;
2561 
2562 	if (rec->ops->print)
2563 		return rec->ops->print(m, rec->ip, rec->ops, rec->data);
2564 
2565 	seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
2566 
2567 	if (rec->data)
2568 		seq_printf(m, ":%p", rec->data);
2569 	seq_putc(m, '\n');
2570 
2571 	return 0;
2572 }
2573 
2574 static void *
2575 t_next(struct seq_file *m, void *v, loff_t *pos)
2576 {
2577 	struct ftrace_iterator *iter = m->private;
2578 	struct ftrace_ops *ops = iter->ops;
2579 	struct dyn_ftrace *rec = NULL;
2580 
2581 	if (unlikely(ftrace_disabled))
2582 		return NULL;
2583 
2584 	if (iter->flags & FTRACE_ITER_HASH)
2585 		return t_hash_next(m, pos);
2586 
2587 	(*pos)++;
2588 	iter->pos = iter->func_pos = *pos;
2589 
2590 	if (iter->flags & FTRACE_ITER_PRINTALL)
2591 		return t_hash_start(m, pos);
2592 
2593  retry:
2594 	if (iter->idx >= iter->pg->index) {
2595 		if (iter->pg->next) {
2596 			iter->pg = iter->pg->next;
2597 			iter->idx = 0;
2598 			goto retry;
2599 		}
2600 	} else {
2601 		rec = &iter->pg->records[iter->idx++];
2602 		if (((iter->flags & FTRACE_ITER_FILTER) &&
2603 		     !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) ||
2604 
2605 		    ((iter->flags & FTRACE_ITER_NOTRACE) &&
2606 		     !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
2607 
2608 		    ((iter->flags & FTRACE_ITER_ENABLED) &&
2609 		     !(rec->flags & FTRACE_FL_ENABLED))) {
2610 
2611 			rec = NULL;
2612 			goto retry;
2613 		}
2614 	}
2615 
2616 	if (!rec)
2617 		return t_hash_start(m, pos);
2618 
2619 	iter->func = rec;
2620 
2621 	return iter;
2622 }
2623 
2624 static void reset_iter_read(struct ftrace_iterator *iter)
2625 {
2626 	iter->pos = 0;
2627 	iter->func_pos = 0;
2628 	iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH);
2629 }
2630 
2631 static void *t_start(struct seq_file *m, loff_t *pos)
2632 {
2633 	struct ftrace_iterator *iter = m->private;
2634 	struct ftrace_ops *ops = iter->ops;
2635 	void *p = NULL;
2636 	loff_t l;
2637 
2638 	mutex_lock(&ftrace_lock);
2639 
2640 	if (unlikely(ftrace_disabled))
2641 		return NULL;
2642 
2643 	/*
2644 	 * If an lseek was done, then reset and start from beginning.
2645 	 */
2646 	if (*pos < iter->pos)
2647 		reset_iter_read(iter);
2648 
2649 	/*
2650 	 * For set_ftrace_filter reading, if we have the filter
2651 	 * off, we can short cut and just print out that all
2652 	 * functions are enabled.
2653 	 */
2654 	if (iter->flags & FTRACE_ITER_FILTER &&
2655 	    ftrace_hash_empty(ops->filter_hash)) {
2656 		if (*pos > 0)
2657 			return t_hash_start(m, pos);
2658 		iter->flags |= FTRACE_ITER_PRINTALL;
2659 		/* reset in case of seek/pread */
2660 		iter->flags &= ~FTRACE_ITER_HASH;
2661 		return iter;
2662 	}
2663 
2664 	if (iter->flags & FTRACE_ITER_HASH)
2665 		return t_hash_start(m, pos);
2666 
2667 	/*
2668 	 * Unfortunately, we need to restart at ftrace_pages_start
2669 	 * every time we let go of the ftrace_mutex. This is because
2670 	 * those pointers can change without the lock.
2671 	 */
2672 	iter->pg = ftrace_pages_start;
2673 	iter->idx = 0;
2674 	for (l = 0; l <= *pos; ) {
2675 		p = t_next(m, p, &l);
2676 		if (!p)
2677 			break;
2678 	}
2679 
2680 	if (!p)
2681 		return t_hash_start(m, pos);
2682 
2683 	return iter;
2684 }
2685 
2686 static void t_stop(struct seq_file *m, void *p)
2687 {
2688 	mutex_unlock(&ftrace_lock);
2689 }
2690 
2691 static int t_show(struct seq_file *m, void *v)
2692 {
2693 	struct ftrace_iterator *iter = m->private;
2694 	struct dyn_ftrace *rec;
2695 
2696 	if (iter->flags & FTRACE_ITER_HASH)
2697 		return t_hash_show(m, iter);
2698 
2699 	if (iter->flags & FTRACE_ITER_PRINTALL) {
2700 		seq_printf(m, "#### all functions enabled ####\n");
2701 		return 0;
2702 	}
2703 
2704 	rec = iter->func;
2705 
2706 	if (!rec)
2707 		return 0;
2708 
2709 	seq_printf(m, "%ps", (void *)rec->ip);
2710 	if (iter->flags & FTRACE_ITER_ENABLED)
2711 		seq_printf(m, " (%ld)%s",
2712 			   rec->flags & ~FTRACE_FL_MASK,
2713 			   rec->flags & FTRACE_FL_REGS ? " R" : "");
2714 	seq_printf(m, "\n");
2715 
2716 	return 0;
2717 }
2718 
2719 static const struct seq_operations show_ftrace_seq_ops = {
2720 	.start = t_start,
2721 	.next = t_next,
2722 	.stop = t_stop,
2723 	.show = t_show,
2724 };
2725 
2726 static int
2727 ftrace_avail_open(struct inode *inode, struct file *file)
2728 {
2729 	struct ftrace_iterator *iter;
2730 
2731 	if (unlikely(ftrace_disabled))
2732 		return -ENODEV;
2733 
2734 	iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
2735 	if (iter) {
2736 		iter->pg = ftrace_pages_start;
2737 		iter->ops = &global_ops;
2738 	}
2739 
2740 	return iter ? 0 : -ENOMEM;
2741 }
2742 
2743 static int
2744 ftrace_enabled_open(struct inode *inode, struct file *file)
2745 {
2746 	struct ftrace_iterator *iter;
2747 
2748 	if (unlikely(ftrace_disabled))
2749 		return -ENODEV;
2750 
2751 	iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
2752 	if (iter) {
2753 		iter->pg = ftrace_pages_start;
2754 		iter->flags = FTRACE_ITER_ENABLED;
2755 		iter->ops = &global_ops;
2756 	}
2757 
2758 	return iter ? 0 : -ENOMEM;
2759 }
2760 
2761 static void ftrace_filter_reset(struct ftrace_hash *hash)
2762 {
2763 	mutex_lock(&ftrace_lock);
2764 	ftrace_hash_clear(hash);
2765 	mutex_unlock(&ftrace_lock);
2766 }
2767 
2768 /**
2769  * ftrace_regex_open - initialize function tracer filter files
2770  * @ops: The ftrace_ops that hold the hash filters
2771  * @flag: The type of filter to process
2772  * @inode: The inode, usually passed in to your open routine
2773  * @file: The file, usually passed in to your open routine
2774  *
2775  * ftrace_regex_open() initializes the filter files for the
2776  * @ops. Depending on @flag it may process the filter hash or
2777  * the notrace hash of @ops. With this called from the open
2778  * routine, you can use ftrace_filter_write() for the write
2779  * routine if @flag has FTRACE_ITER_FILTER set, or
2780  * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
2781  * tracing_lseek() should be used as the lseek routine, and
2782  * release must call ftrace_regex_release().
2783  */
2784 int
2785 ftrace_regex_open(struct ftrace_ops *ops, int flag,
2786 		  struct inode *inode, struct file *file)
2787 {
2788 	struct ftrace_iterator *iter;
2789 	struct ftrace_hash *hash;
2790 	int ret = 0;
2791 
2792 	ftrace_ops_init(ops);
2793 
2794 	if (unlikely(ftrace_disabled))
2795 		return -ENODEV;
2796 
2797 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2798 	if (!iter)
2799 		return -ENOMEM;
2800 
2801 	if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
2802 		kfree(iter);
2803 		return -ENOMEM;
2804 	}
2805 
2806 	iter->ops = ops;
2807 	iter->flags = flag;
2808 
2809 	mutex_lock(&ops->regex_lock);
2810 
2811 	if (flag & FTRACE_ITER_NOTRACE)
2812 		hash = ops->notrace_hash;
2813 	else
2814 		hash = ops->filter_hash;
2815 
2816 	if (file->f_mode & FMODE_WRITE) {
2817 		iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
2818 		if (!iter->hash) {
2819 			trace_parser_put(&iter->parser);
2820 			kfree(iter);
2821 			ret = -ENOMEM;
2822 			goto out_unlock;
2823 		}
2824 	}
2825 
2826 	if ((file->f_mode & FMODE_WRITE) &&
2827 	    (file->f_flags & O_TRUNC))
2828 		ftrace_filter_reset(iter->hash);
2829 
2830 	if (file->f_mode & FMODE_READ) {
2831 		iter->pg = ftrace_pages_start;
2832 
2833 		ret = seq_open(file, &show_ftrace_seq_ops);
2834 		if (!ret) {
2835 			struct seq_file *m = file->private_data;
2836 			m->private = iter;
2837 		} else {
2838 			/* Failed */
2839 			free_ftrace_hash(iter->hash);
2840 			trace_parser_put(&iter->parser);
2841 			kfree(iter);
2842 		}
2843 	} else
2844 		file->private_data = iter;
2845 
2846  out_unlock:
2847 	mutex_unlock(&ops->regex_lock);
2848 
2849 	return ret;
2850 }
2851 
2852 static int
2853 ftrace_filter_open(struct inode *inode, struct file *file)
2854 {
2855 	struct ftrace_ops *ops = inode->i_private;
2856 
2857 	return ftrace_regex_open(ops,
2858 			FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH,
2859 			inode, file);
2860 }
2861 
2862 static int
2863 ftrace_notrace_open(struct inode *inode, struct file *file)
2864 {
2865 	struct ftrace_ops *ops = inode->i_private;
2866 
2867 	return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
2868 				 inode, file);
2869 }
2870 
2871 static int ftrace_match(char *str, char *regex, int len, int type)
2872 {
2873 	int matched = 0;
2874 	int slen;
2875 
2876 	switch (type) {
2877 	case MATCH_FULL:
2878 		if (strcmp(str, regex) == 0)
2879 			matched = 1;
2880 		break;
2881 	case MATCH_FRONT_ONLY:
2882 		if (strncmp(str, regex, len) == 0)
2883 			matched = 1;
2884 		break;
2885 	case MATCH_MIDDLE_ONLY:
2886 		if (strstr(str, regex))
2887 			matched = 1;
2888 		break;
2889 	case MATCH_END_ONLY:
2890 		slen = strlen(str);
2891 		if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
2892 			matched = 1;
2893 		break;
2894 	}
2895 
2896 	return matched;
2897 }
2898 
2899 static int
2900 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
2901 {
2902 	struct ftrace_func_entry *entry;
2903 	int ret = 0;
2904 
2905 	entry = ftrace_lookup_ip(hash, rec->ip);
2906 	if (not) {
2907 		/* Do nothing if it doesn't exist */
2908 		if (!entry)
2909 			return 0;
2910 
2911 		free_hash_entry(hash, entry);
2912 	} else {
2913 		/* Do nothing if it exists */
2914 		if (entry)
2915 			return 0;
2916 
2917 		ret = add_hash_entry(hash, rec->ip);
2918 	}
2919 	return ret;
2920 }
2921 
2922 static int
2923 ftrace_match_record(struct dyn_ftrace *rec, char *mod,
2924 		    char *regex, int len, int type)
2925 {
2926 	char str[KSYM_SYMBOL_LEN];
2927 	char *modname;
2928 
2929 	kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
2930 
2931 	if (mod) {
2932 		/* module lookup requires matching the module */
2933 		if (!modname || strcmp(modname, mod))
2934 			return 0;
2935 
2936 		/* blank search means to match all funcs in the mod */
2937 		if (!len)
2938 			return 1;
2939 	}
2940 
2941 	return ftrace_match(str, regex, len, type);
2942 }
2943 
2944 static int
2945 match_records(struct ftrace_hash *hash, char *buff,
2946 	      int len, char *mod, int not)
2947 {
2948 	unsigned search_len = 0;
2949 	struct ftrace_page *pg;
2950 	struct dyn_ftrace *rec;
2951 	int type = MATCH_FULL;
2952 	char *search = buff;
2953 	int found = 0;
2954 	int ret;
2955 
2956 	if (len) {
2957 		type = filter_parse_regex(buff, len, &search, &not);
2958 		search_len = strlen(search);
2959 	}
2960 
2961 	mutex_lock(&ftrace_lock);
2962 
2963 	if (unlikely(ftrace_disabled))
2964 		goto out_unlock;
2965 
2966 	do_for_each_ftrace_rec(pg, rec) {
2967 		if (ftrace_match_record(rec, mod, search, search_len, type)) {
2968 			ret = enter_record(hash, rec, not);
2969 			if (ret < 0) {
2970 				found = ret;
2971 				goto out_unlock;
2972 			}
2973 			found = 1;
2974 		}
2975 	} while_for_each_ftrace_rec();
2976  out_unlock:
2977 	mutex_unlock(&ftrace_lock);
2978 
2979 	return found;
2980 }
2981 
2982 static int
2983 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
2984 {
2985 	return match_records(hash, buff, len, NULL, 0);
2986 }
2987 
2988 static int
2989 ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
2990 {
2991 	int not = 0;
2992 
2993 	/* blank or '*' mean the same */
2994 	if (strcmp(buff, "*") == 0)
2995 		buff[0] = 0;
2996 
2997 	/* handle the case of 'dont filter this module' */
2998 	if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
2999 		buff[0] = 0;
3000 		not = 1;
3001 	}
3002 
3003 	return match_records(hash, buff, strlen(buff), mod, not);
3004 }
3005 
3006 /*
3007  * We register the module command as a template to show others how
3008  * to register the a command as well.
3009  */
3010 
3011 static int
3012 ftrace_mod_callback(struct ftrace_hash *hash,
3013 		    char *func, char *cmd, char *param, int enable)
3014 {
3015 	char *mod;
3016 	int ret = -EINVAL;
3017 
3018 	/*
3019 	 * cmd == 'mod' because we only registered this func
3020 	 * for the 'mod' ftrace_func_command.
3021 	 * But if you register one func with multiple commands,
3022 	 * you can tell which command was used by the cmd
3023 	 * parameter.
3024 	 */
3025 
3026 	/* we must have a module name */
3027 	if (!param)
3028 		return ret;
3029 
3030 	mod = strsep(&param, ":");
3031 	if (!strlen(mod))
3032 		return ret;
3033 
3034 	ret = ftrace_match_module_records(hash, func, mod);
3035 	if (!ret)
3036 		ret = -EINVAL;
3037 	if (ret < 0)
3038 		return ret;
3039 
3040 	return 0;
3041 }
3042 
3043 static struct ftrace_func_command ftrace_mod_cmd = {
3044 	.name			= "mod",
3045 	.func			= ftrace_mod_callback,
3046 };
3047 
3048 static int __init ftrace_mod_cmd_init(void)
3049 {
3050 	return register_ftrace_command(&ftrace_mod_cmd);
3051 }
3052 core_initcall(ftrace_mod_cmd_init);
3053 
3054 static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
3055 				      struct ftrace_ops *op, struct pt_regs *pt_regs)
3056 {
3057 	struct ftrace_func_probe *entry;
3058 	struct hlist_head *hhd;
3059 	unsigned long key;
3060 
3061 	key = hash_long(ip, FTRACE_HASH_BITS);
3062 
3063 	hhd = &ftrace_func_hash[key];
3064 
3065 	if (hlist_empty(hhd))
3066 		return;
3067 
3068 	/*
3069 	 * Disable preemption for these calls to prevent a RCU grace
3070 	 * period. This syncs the hash iteration and freeing of items
3071 	 * on the hash. rcu_read_lock is too dangerous here.
3072 	 */
3073 	preempt_disable_notrace();
3074 	hlist_for_each_entry_rcu_notrace(entry, hhd, node) {
3075 		if (entry->ip == ip)
3076 			entry->ops->func(ip, parent_ip, &entry->data);
3077 	}
3078 	preempt_enable_notrace();
3079 }
3080 
3081 static struct ftrace_ops trace_probe_ops __read_mostly =
3082 {
3083 	.func		= function_trace_probe_call,
3084 	.flags		= FTRACE_OPS_FL_INITIALIZED,
3085 	INIT_REGEX_LOCK(trace_probe_ops)
3086 };
3087 
3088 static int ftrace_probe_registered;
3089 
3090 static void __enable_ftrace_function_probe(void)
3091 {
3092 	int ret;
3093 	int i;
3094 
3095 	if (ftrace_probe_registered) {
3096 		/* still need to update the function call sites */
3097 		if (ftrace_enabled)
3098 			ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3099 		return;
3100 	}
3101 
3102 	for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3103 		struct hlist_head *hhd = &ftrace_func_hash[i];
3104 		if (hhd->first)
3105 			break;
3106 	}
3107 	/* Nothing registered? */
3108 	if (i == FTRACE_FUNC_HASHSIZE)
3109 		return;
3110 
3111 	ret = ftrace_startup(&trace_probe_ops, 0);
3112 
3113 	ftrace_probe_registered = 1;
3114 }
3115 
3116 static void __disable_ftrace_function_probe(void)
3117 {
3118 	int i;
3119 
3120 	if (!ftrace_probe_registered)
3121 		return;
3122 
3123 	for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3124 		struct hlist_head *hhd = &ftrace_func_hash[i];
3125 		if (hhd->first)
3126 			return;
3127 	}
3128 
3129 	/* no more funcs left */
3130 	ftrace_shutdown(&trace_probe_ops, 0);
3131 
3132 	ftrace_probe_registered = 0;
3133 }
3134 
3135 
3136 static void ftrace_free_entry(struct ftrace_func_probe *entry)
3137 {
3138 	if (entry->ops->free)
3139 		entry->ops->free(entry->ops, entry->ip, &entry->data);
3140 	kfree(entry);
3141 }
3142 
3143 int
3144 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3145 			      void *data)
3146 {
3147 	struct ftrace_func_probe *entry;
3148 	struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash;
3149 	struct ftrace_hash *hash;
3150 	struct ftrace_page *pg;
3151 	struct dyn_ftrace *rec;
3152 	int type, len, not;
3153 	unsigned long key;
3154 	int count = 0;
3155 	char *search;
3156 	int ret;
3157 
3158 	type = filter_parse_regex(glob, strlen(glob), &search, &not);
3159 	len = strlen(search);
3160 
3161 	/* we do not support '!' for function probes */
3162 	if (WARN_ON(not))
3163 		return -EINVAL;
3164 
3165 	mutex_lock(&trace_probe_ops.regex_lock);
3166 
3167 	hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3168 	if (!hash) {
3169 		count = -ENOMEM;
3170 		goto out;
3171 	}
3172 
3173 	if (unlikely(ftrace_disabled)) {
3174 		count = -ENODEV;
3175 		goto out;
3176 	}
3177 
3178 	mutex_lock(&ftrace_lock);
3179 
3180 	do_for_each_ftrace_rec(pg, rec) {
3181 
3182 		if (!ftrace_match_record(rec, NULL, search, len, type))
3183 			continue;
3184 
3185 		entry = kmalloc(sizeof(*entry), GFP_KERNEL);
3186 		if (!entry) {
3187 			/* If we did not process any, then return error */
3188 			if (!count)
3189 				count = -ENOMEM;
3190 			goto out_unlock;
3191 		}
3192 
3193 		count++;
3194 
3195 		entry->data = data;
3196 
3197 		/*
3198 		 * The caller might want to do something special
3199 		 * for each function we find. We call the callback
3200 		 * to give the caller an opportunity to do so.
3201 		 */
3202 		if (ops->init) {
3203 			if (ops->init(ops, rec->ip, &entry->data) < 0) {
3204 				/* caller does not like this func */
3205 				kfree(entry);
3206 				continue;
3207 			}
3208 		}
3209 
3210 		ret = enter_record(hash, rec, 0);
3211 		if (ret < 0) {
3212 			kfree(entry);
3213 			count = ret;
3214 			goto out_unlock;
3215 		}
3216 
3217 		entry->ops = ops;
3218 		entry->ip = rec->ip;
3219 
3220 		key = hash_long(entry->ip, FTRACE_HASH_BITS);
3221 		hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
3222 
3223 	} while_for_each_ftrace_rec();
3224 
3225 	ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3226 	if (ret < 0)
3227 		count = ret;
3228 
3229 	__enable_ftrace_function_probe();
3230 
3231  out_unlock:
3232 	mutex_unlock(&ftrace_lock);
3233  out:
3234 	mutex_unlock(&trace_probe_ops.regex_lock);
3235 	free_ftrace_hash(hash);
3236 
3237 	return count;
3238 }
3239 
3240 enum {
3241 	PROBE_TEST_FUNC		= 1,
3242 	PROBE_TEST_DATA		= 2
3243 };
3244 
3245 static void
3246 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3247 				  void *data, int flags)
3248 {
3249 	struct ftrace_func_entry *rec_entry;
3250 	struct ftrace_func_probe *entry;
3251 	struct ftrace_func_probe *p;
3252 	struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash;
3253 	struct list_head free_list;
3254 	struct ftrace_hash *hash;
3255 	struct hlist_node *tmp;
3256 	char str[KSYM_SYMBOL_LEN];
3257 	int type = MATCH_FULL;
3258 	int i, len = 0;
3259 	char *search;
3260 
3261 	if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
3262 		glob = NULL;
3263 	else if (glob) {
3264 		int not;
3265 
3266 		type = filter_parse_regex(glob, strlen(glob), &search, &not);
3267 		len = strlen(search);
3268 
3269 		/* we do not support '!' for function probes */
3270 		if (WARN_ON(not))
3271 			return;
3272 	}
3273 
3274 	mutex_lock(&trace_probe_ops.regex_lock);
3275 
3276 	hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3277 	if (!hash)
3278 		/* Hmm, should report this somehow */
3279 		goto out_unlock;
3280 
3281 	INIT_LIST_HEAD(&free_list);
3282 
3283 	for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3284 		struct hlist_head *hhd = &ftrace_func_hash[i];
3285 
3286 		hlist_for_each_entry_safe(entry, tmp, hhd, node) {
3287 
3288 			/* break up if statements for readability */
3289 			if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
3290 				continue;
3291 
3292 			if ((flags & PROBE_TEST_DATA) && entry->data != data)
3293 				continue;
3294 
3295 			/* do this last, since it is the most expensive */
3296 			if (glob) {
3297 				kallsyms_lookup(entry->ip, NULL, NULL,
3298 						NULL, str);
3299 				if (!ftrace_match(str, glob, len, type))
3300 					continue;
3301 			}
3302 
3303 			rec_entry = ftrace_lookup_ip(hash, entry->ip);
3304 			/* It is possible more than one entry had this ip */
3305 			if (rec_entry)
3306 				free_hash_entry(hash, rec_entry);
3307 
3308 			hlist_del_rcu(&entry->node);
3309 			list_add(&entry->free_list, &free_list);
3310 		}
3311 	}
3312 	mutex_lock(&ftrace_lock);
3313 	__disable_ftrace_function_probe();
3314 	/*
3315 	 * Remove after the disable is called. Otherwise, if the last
3316 	 * probe is removed, a null hash means *all enabled*.
3317 	 */
3318 	ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3319 	synchronize_sched();
3320 	list_for_each_entry_safe(entry, p, &free_list, free_list) {
3321 		list_del(&entry->free_list);
3322 		ftrace_free_entry(entry);
3323 	}
3324 	mutex_unlock(&ftrace_lock);
3325 
3326  out_unlock:
3327 	mutex_unlock(&trace_probe_ops.regex_lock);
3328 	free_ftrace_hash(hash);
3329 }
3330 
3331 void
3332 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3333 				void *data)
3334 {
3335 	__unregister_ftrace_function_probe(glob, ops, data,
3336 					  PROBE_TEST_FUNC | PROBE_TEST_DATA);
3337 }
3338 
3339 void
3340 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
3341 {
3342 	__unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
3343 }
3344 
3345 void unregister_ftrace_function_probe_all(char *glob)
3346 {
3347 	__unregister_ftrace_function_probe(glob, NULL, NULL, 0);
3348 }
3349 
3350 static LIST_HEAD(ftrace_commands);
3351 static DEFINE_MUTEX(ftrace_cmd_mutex);
3352 
3353 /*
3354  * Currently we only register ftrace commands from __init, so mark this
3355  * __init too.
3356  */
3357 __init int register_ftrace_command(struct ftrace_func_command *cmd)
3358 {
3359 	struct ftrace_func_command *p;
3360 	int ret = 0;
3361 
3362 	mutex_lock(&ftrace_cmd_mutex);
3363 	list_for_each_entry(p, &ftrace_commands, list) {
3364 		if (strcmp(cmd->name, p->name) == 0) {
3365 			ret = -EBUSY;
3366 			goto out_unlock;
3367 		}
3368 	}
3369 	list_add(&cmd->list, &ftrace_commands);
3370  out_unlock:
3371 	mutex_unlock(&ftrace_cmd_mutex);
3372 
3373 	return ret;
3374 }
3375 
3376 /*
3377  * Currently we only unregister ftrace commands from __init, so mark
3378  * this __init too.
3379  */
3380 __init int unregister_ftrace_command(struct ftrace_func_command *cmd)
3381 {
3382 	struct ftrace_func_command *p, *n;
3383 	int ret = -ENODEV;
3384 
3385 	mutex_lock(&ftrace_cmd_mutex);
3386 	list_for_each_entry_safe(p, n, &ftrace_commands, list) {
3387 		if (strcmp(cmd->name, p->name) == 0) {
3388 			ret = 0;
3389 			list_del_init(&p->list);
3390 			goto out_unlock;
3391 		}
3392 	}
3393  out_unlock:
3394 	mutex_unlock(&ftrace_cmd_mutex);
3395 
3396 	return ret;
3397 }
3398 
3399 static int ftrace_process_regex(struct ftrace_hash *hash,
3400 				char *buff, int len, int enable)
3401 {
3402 	char *func, *command, *next = buff;
3403 	struct ftrace_func_command *p;
3404 	int ret = -EINVAL;
3405 
3406 	func = strsep(&next, ":");
3407 
3408 	if (!next) {
3409 		ret = ftrace_match_records(hash, func, len);
3410 		if (!ret)
3411 			ret = -EINVAL;
3412 		if (ret < 0)
3413 			return ret;
3414 		return 0;
3415 	}
3416 
3417 	/* command found */
3418 
3419 	command = strsep(&next, ":");
3420 
3421 	mutex_lock(&ftrace_cmd_mutex);
3422 	list_for_each_entry(p, &ftrace_commands, list) {
3423 		if (strcmp(p->name, command) == 0) {
3424 			ret = p->func(hash, func, command, next, enable);
3425 			goto out_unlock;
3426 		}
3427 	}
3428  out_unlock:
3429 	mutex_unlock(&ftrace_cmd_mutex);
3430 
3431 	return ret;
3432 }
3433 
3434 static ssize_t
3435 ftrace_regex_write(struct file *file, const char __user *ubuf,
3436 		   size_t cnt, loff_t *ppos, int enable)
3437 {
3438 	struct ftrace_iterator *iter;
3439 	struct trace_parser *parser;
3440 	ssize_t ret, read;
3441 
3442 	if (!cnt)
3443 		return 0;
3444 
3445 	if (file->f_mode & FMODE_READ) {
3446 		struct seq_file *m = file->private_data;
3447 		iter = m->private;
3448 	} else
3449 		iter = file->private_data;
3450 
3451 	if (unlikely(ftrace_disabled))
3452 		return -ENODEV;
3453 
3454 	/* iter->hash is a local copy, so we don't need regex_lock */
3455 
3456 	parser = &iter->parser;
3457 	read = trace_get_user(parser, ubuf, cnt, ppos);
3458 
3459 	if (read >= 0 && trace_parser_loaded(parser) &&
3460 	    !trace_parser_cont(parser)) {
3461 		ret = ftrace_process_regex(iter->hash, parser->buffer,
3462 					   parser->idx, enable);
3463 		trace_parser_clear(parser);
3464 		if (ret < 0)
3465 			goto out;
3466 	}
3467 
3468 	ret = read;
3469  out:
3470 	return ret;
3471 }
3472 
3473 ssize_t
3474 ftrace_filter_write(struct file *file, const char __user *ubuf,
3475 		    size_t cnt, loff_t *ppos)
3476 {
3477 	return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
3478 }
3479 
3480 ssize_t
3481 ftrace_notrace_write(struct file *file, const char __user *ubuf,
3482 		     size_t cnt, loff_t *ppos)
3483 {
3484 	return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
3485 }
3486 
3487 static int
3488 ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
3489 {
3490 	struct ftrace_func_entry *entry;
3491 
3492 	if (!ftrace_location(ip))
3493 		return -EINVAL;
3494 
3495 	if (remove) {
3496 		entry = ftrace_lookup_ip(hash, ip);
3497 		if (!entry)
3498 			return -ENOENT;
3499 		free_hash_entry(hash, entry);
3500 		return 0;
3501 	}
3502 
3503 	return add_hash_entry(hash, ip);
3504 }
3505 
3506 static void ftrace_ops_update_code(struct ftrace_ops *ops)
3507 {
3508 	if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled)
3509 		ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3510 }
3511 
3512 static int
3513 ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
3514 		unsigned long ip, int remove, int reset, int enable)
3515 {
3516 	struct ftrace_hash **orig_hash;
3517 	struct ftrace_hash *hash;
3518 	int ret;
3519 
3520 	/* All global ops uses the global ops filters */
3521 	if (ops->flags & FTRACE_OPS_FL_GLOBAL)
3522 		ops = &global_ops;
3523 
3524 	if (unlikely(ftrace_disabled))
3525 		return -ENODEV;
3526 
3527 	mutex_lock(&ops->regex_lock);
3528 
3529 	if (enable)
3530 		orig_hash = &ops->filter_hash;
3531 	else
3532 		orig_hash = &ops->notrace_hash;
3533 
3534 	hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3535 	if (!hash) {
3536 		ret = -ENOMEM;
3537 		goto out_regex_unlock;
3538 	}
3539 
3540 	if (reset)
3541 		ftrace_filter_reset(hash);
3542 	if (buf && !ftrace_match_records(hash, buf, len)) {
3543 		ret = -EINVAL;
3544 		goto out_regex_unlock;
3545 	}
3546 	if (ip) {
3547 		ret = ftrace_match_addr(hash, ip, remove);
3548 		if (ret < 0)
3549 			goto out_regex_unlock;
3550 	}
3551 
3552 	mutex_lock(&ftrace_lock);
3553 	ret = ftrace_hash_move(ops, enable, orig_hash, hash);
3554 	if (!ret)
3555 		ftrace_ops_update_code(ops);
3556 
3557 	mutex_unlock(&ftrace_lock);
3558 
3559  out_regex_unlock:
3560 	mutex_unlock(&ops->regex_lock);
3561 
3562 	free_ftrace_hash(hash);
3563 	return ret;
3564 }
3565 
3566 static int
3567 ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
3568 		int reset, int enable)
3569 {
3570 	return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable);
3571 }
3572 
3573 /**
3574  * ftrace_set_filter_ip - set a function to filter on in ftrace by address
3575  * @ops - the ops to set the filter with
3576  * @ip - the address to add to or remove from the filter.
3577  * @remove - non zero to remove the ip from the filter
3578  * @reset - non zero to reset all filters before applying this filter.
3579  *
3580  * Filters denote which functions should be enabled when tracing is enabled
3581  * If @ip is NULL, it failes to update filter.
3582  */
3583 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
3584 			 int remove, int reset)
3585 {
3586 	ftrace_ops_init(ops);
3587 	return ftrace_set_addr(ops, ip, remove, reset, 1);
3588 }
3589 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
3590 
3591 static int
3592 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
3593 		 int reset, int enable)
3594 {
3595 	return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable);
3596 }
3597 
3598 /**
3599  * ftrace_set_filter - set a function to filter on in ftrace
3600  * @ops - the ops to set the filter with
3601  * @buf - the string that holds the function filter text.
3602  * @len - the length of the string.
3603  * @reset - non zero to reset all filters before applying this filter.
3604  *
3605  * Filters denote which functions should be enabled when tracing is enabled.
3606  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3607  */
3608 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
3609 		       int len, int reset)
3610 {
3611 	ftrace_ops_init(ops);
3612 	return ftrace_set_regex(ops, buf, len, reset, 1);
3613 }
3614 EXPORT_SYMBOL_GPL(ftrace_set_filter);
3615 
3616 /**
3617  * ftrace_set_notrace - set a function to not trace in ftrace
3618  * @ops - the ops to set the notrace filter with
3619  * @buf - the string that holds the function notrace text.
3620  * @len - the length of the string.
3621  * @reset - non zero to reset all filters before applying this filter.
3622  *
3623  * Notrace Filters denote which functions should not be enabled when tracing
3624  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3625  * for tracing.
3626  */
3627 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
3628 			int len, int reset)
3629 {
3630 	ftrace_ops_init(ops);
3631 	return ftrace_set_regex(ops, buf, len, reset, 0);
3632 }
3633 EXPORT_SYMBOL_GPL(ftrace_set_notrace);
3634 /**
3635  * ftrace_set_filter - set a function to filter on in ftrace
3636  * @ops - the ops to set the filter with
3637  * @buf - the string that holds the function filter text.
3638  * @len - the length of the string.
3639  * @reset - non zero to reset all filters before applying this filter.
3640  *
3641  * Filters denote which functions should be enabled when tracing is enabled.
3642  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3643  */
3644 void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
3645 {
3646 	ftrace_set_regex(&global_ops, buf, len, reset, 1);
3647 }
3648 EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
3649 
3650 /**
3651  * ftrace_set_notrace - set a function to not trace in ftrace
3652  * @ops - the ops to set the notrace filter with
3653  * @buf - the string that holds the function notrace text.
3654  * @len - the length of the string.
3655  * @reset - non zero to reset all filters before applying this filter.
3656  *
3657  * Notrace Filters denote which functions should not be enabled when tracing
3658  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3659  * for tracing.
3660  */
3661 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
3662 {
3663 	ftrace_set_regex(&global_ops, buf, len, reset, 0);
3664 }
3665 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
3666 
3667 /*
3668  * command line interface to allow users to set filters on boot up.
3669  */
3670 #define FTRACE_FILTER_SIZE		COMMAND_LINE_SIZE
3671 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
3672 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
3673 
3674 /* Used by function selftest to not test if filter is set */
3675 bool ftrace_filter_param __initdata;
3676 
3677 static int __init set_ftrace_notrace(char *str)
3678 {
3679 	ftrace_filter_param = true;
3680 	strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
3681 	return 1;
3682 }
3683 __setup("ftrace_notrace=", set_ftrace_notrace);
3684 
3685 static int __init set_ftrace_filter(char *str)
3686 {
3687 	ftrace_filter_param = true;
3688 	strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
3689 	return 1;
3690 }
3691 __setup("ftrace_filter=", set_ftrace_filter);
3692 
3693 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3694 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
3695 static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer);
3696 
3697 static int __init set_graph_function(char *str)
3698 {
3699 	strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
3700 	return 1;
3701 }
3702 __setup("ftrace_graph_filter=", set_graph_function);
3703 
3704 static void __init set_ftrace_early_graph(char *buf)
3705 {
3706 	int ret;
3707 	char *func;
3708 
3709 	while (buf) {
3710 		func = strsep(&buf, ",");
3711 		/* we allow only one expression at a time */
3712 		ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3713 				      FTRACE_GRAPH_MAX_FUNCS, func);
3714 		if (ret)
3715 			printk(KERN_DEBUG "ftrace: function %s not "
3716 					  "traceable\n", func);
3717 	}
3718 }
3719 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3720 
3721 void __init
3722 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
3723 {
3724 	char *func;
3725 
3726 	ftrace_ops_init(ops);
3727 
3728 	while (buf) {
3729 		func = strsep(&buf, ",");
3730 		ftrace_set_regex(ops, func, strlen(func), 0, enable);
3731 	}
3732 }
3733 
3734 static void __init set_ftrace_early_filters(void)
3735 {
3736 	if (ftrace_filter_buf[0])
3737 		ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
3738 	if (ftrace_notrace_buf[0])
3739 		ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
3740 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3741 	if (ftrace_graph_buf[0])
3742 		set_ftrace_early_graph(ftrace_graph_buf);
3743 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3744 }
3745 
3746 int ftrace_regex_release(struct inode *inode, struct file *file)
3747 {
3748 	struct seq_file *m = (struct seq_file *)file->private_data;
3749 	struct ftrace_iterator *iter;
3750 	struct ftrace_hash **orig_hash;
3751 	struct trace_parser *parser;
3752 	int filter_hash;
3753 	int ret;
3754 
3755 	if (file->f_mode & FMODE_READ) {
3756 		iter = m->private;
3757 		seq_release(inode, file);
3758 	} else
3759 		iter = file->private_data;
3760 
3761 	parser = &iter->parser;
3762 	if (trace_parser_loaded(parser)) {
3763 		parser->buffer[parser->idx] = 0;
3764 		ftrace_match_records(iter->hash, parser->buffer, parser->idx);
3765 	}
3766 
3767 	trace_parser_put(parser);
3768 
3769 	mutex_lock(&iter->ops->regex_lock);
3770 
3771 	if (file->f_mode & FMODE_WRITE) {
3772 		filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
3773 
3774 		if (filter_hash)
3775 			orig_hash = &iter->ops->filter_hash;
3776 		else
3777 			orig_hash = &iter->ops->notrace_hash;
3778 
3779 		mutex_lock(&ftrace_lock);
3780 		ret = ftrace_hash_move(iter->ops, filter_hash,
3781 				       orig_hash, iter->hash);
3782 		if (!ret)
3783 			ftrace_ops_update_code(iter->ops);
3784 
3785 		mutex_unlock(&ftrace_lock);
3786 	}
3787 
3788 	mutex_unlock(&iter->ops->regex_lock);
3789 	free_ftrace_hash(iter->hash);
3790 	kfree(iter);
3791 
3792 	return 0;
3793 }
3794 
3795 static const struct file_operations ftrace_avail_fops = {
3796 	.open = ftrace_avail_open,
3797 	.read = seq_read,
3798 	.llseek = seq_lseek,
3799 	.release = seq_release_private,
3800 };
3801 
3802 static const struct file_operations ftrace_enabled_fops = {
3803 	.open = ftrace_enabled_open,
3804 	.read = seq_read,
3805 	.llseek = seq_lseek,
3806 	.release = seq_release_private,
3807 };
3808 
3809 static const struct file_operations ftrace_filter_fops = {
3810 	.open = ftrace_filter_open,
3811 	.read = seq_read,
3812 	.write = ftrace_filter_write,
3813 	.llseek = tracing_lseek,
3814 	.release = ftrace_regex_release,
3815 };
3816 
3817 static const struct file_operations ftrace_notrace_fops = {
3818 	.open = ftrace_notrace_open,
3819 	.read = seq_read,
3820 	.write = ftrace_notrace_write,
3821 	.llseek = tracing_lseek,
3822 	.release = ftrace_regex_release,
3823 };
3824 
3825 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3826 
3827 static DEFINE_MUTEX(graph_lock);
3828 
3829 int ftrace_graph_count;
3830 int ftrace_graph_notrace_count;
3831 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
3832 unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
3833 
3834 struct ftrace_graph_data {
3835 	unsigned long *table;
3836 	size_t size;
3837 	int *count;
3838 	const struct seq_operations *seq_ops;
3839 };
3840 
3841 static void *
3842 __g_next(struct seq_file *m, loff_t *pos)
3843 {
3844 	struct ftrace_graph_data *fgd = m->private;
3845 
3846 	if (*pos >= *fgd->count)
3847 		return NULL;
3848 	return &fgd->table[*pos];
3849 }
3850 
3851 static void *
3852 g_next(struct seq_file *m, void *v, loff_t *pos)
3853 {
3854 	(*pos)++;
3855 	return __g_next(m, pos);
3856 }
3857 
3858 static void *g_start(struct seq_file *m, loff_t *pos)
3859 {
3860 	struct ftrace_graph_data *fgd = m->private;
3861 
3862 	mutex_lock(&graph_lock);
3863 
3864 	/* Nothing, tell g_show to print all functions are enabled */
3865 	if (!*fgd->count && !*pos)
3866 		return (void *)1;
3867 
3868 	return __g_next(m, pos);
3869 }
3870 
3871 static void g_stop(struct seq_file *m, void *p)
3872 {
3873 	mutex_unlock(&graph_lock);
3874 }
3875 
3876 static int g_show(struct seq_file *m, void *v)
3877 {
3878 	unsigned long *ptr = v;
3879 
3880 	if (!ptr)
3881 		return 0;
3882 
3883 	if (ptr == (unsigned long *)1) {
3884 		seq_printf(m, "#### all functions enabled ####\n");
3885 		return 0;
3886 	}
3887 
3888 	seq_printf(m, "%ps\n", (void *)*ptr);
3889 
3890 	return 0;
3891 }
3892 
3893 static const struct seq_operations ftrace_graph_seq_ops = {
3894 	.start = g_start,
3895 	.next = g_next,
3896 	.stop = g_stop,
3897 	.show = g_show,
3898 };
3899 
3900 static int
3901 __ftrace_graph_open(struct inode *inode, struct file *file,
3902 		    struct ftrace_graph_data *fgd)
3903 {
3904 	int ret = 0;
3905 
3906 	mutex_lock(&graph_lock);
3907 	if ((file->f_mode & FMODE_WRITE) &&
3908 	    (file->f_flags & O_TRUNC)) {
3909 		*fgd->count = 0;
3910 		memset(fgd->table, 0, fgd->size * sizeof(*fgd->table));
3911 	}
3912 	mutex_unlock(&graph_lock);
3913 
3914 	if (file->f_mode & FMODE_READ) {
3915 		ret = seq_open(file, fgd->seq_ops);
3916 		if (!ret) {
3917 			struct seq_file *m = file->private_data;
3918 			m->private = fgd;
3919 		}
3920 	} else
3921 		file->private_data = fgd;
3922 
3923 	return ret;
3924 }
3925 
3926 static int
3927 ftrace_graph_open(struct inode *inode, struct file *file)
3928 {
3929 	struct ftrace_graph_data *fgd;
3930 
3931 	if (unlikely(ftrace_disabled))
3932 		return -ENODEV;
3933 
3934 	fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
3935 	if (fgd == NULL)
3936 		return -ENOMEM;
3937 
3938 	fgd->table = ftrace_graph_funcs;
3939 	fgd->size = FTRACE_GRAPH_MAX_FUNCS;
3940 	fgd->count = &ftrace_graph_count;
3941 	fgd->seq_ops = &ftrace_graph_seq_ops;
3942 
3943 	return __ftrace_graph_open(inode, file, fgd);
3944 }
3945 
3946 static int
3947 ftrace_graph_notrace_open(struct inode *inode, struct file *file)
3948 {
3949 	struct ftrace_graph_data *fgd;
3950 
3951 	if (unlikely(ftrace_disabled))
3952 		return -ENODEV;
3953 
3954 	fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
3955 	if (fgd == NULL)
3956 		return -ENOMEM;
3957 
3958 	fgd->table = ftrace_graph_notrace_funcs;
3959 	fgd->size = FTRACE_GRAPH_MAX_FUNCS;
3960 	fgd->count = &ftrace_graph_notrace_count;
3961 	fgd->seq_ops = &ftrace_graph_seq_ops;
3962 
3963 	return __ftrace_graph_open(inode, file, fgd);
3964 }
3965 
3966 static int
3967 ftrace_graph_release(struct inode *inode, struct file *file)
3968 {
3969 	if (file->f_mode & FMODE_READ) {
3970 		struct seq_file *m = file->private_data;
3971 
3972 		kfree(m->private);
3973 		seq_release(inode, file);
3974 	} else {
3975 		kfree(file->private_data);
3976 	}
3977 
3978 	return 0;
3979 }
3980 
3981 static int
3982 ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer)
3983 {
3984 	struct dyn_ftrace *rec;
3985 	struct ftrace_page *pg;
3986 	int search_len;
3987 	int fail = 1;
3988 	int type, not;
3989 	char *search;
3990 	bool exists;
3991 	int i;
3992 
3993 	/* decode regex */
3994 	type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
3995 	if (!not && *idx >= size)
3996 		return -EBUSY;
3997 
3998 	search_len = strlen(search);
3999 
4000 	mutex_lock(&ftrace_lock);
4001 
4002 	if (unlikely(ftrace_disabled)) {
4003 		mutex_unlock(&ftrace_lock);
4004 		return -ENODEV;
4005 	}
4006 
4007 	do_for_each_ftrace_rec(pg, rec) {
4008 
4009 		if (ftrace_match_record(rec, NULL, search, search_len, type)) {
4010 			/* if it is in the array */
4011 			exists = false;
4012 			for (i = 0; i < *idx; i++) {
4013 				if (array[i] == rec->ip) {
4014 					exists = true;
4015 					break;
4016 				}
4017 			}
4018 
4019 			if (!not) {
4020 				fail = 0;
4021 				if (!exists) {
4022 					array[(*idx)++] = rec->ip;
4023 					if (*idx >= size)
4024 						goto out;
4025 				}
4026 			} else {
4027 				if (exists) {
4028 					array[i] = array[--(*idx)];
4029 					array[*idx] = 0;
4030 					fail = 0;
4031 				}
4032 			}
4033 		}
4034 	} while_for_each_ftrace_rec();
4035 out:
4036 	mutex_unlock(&ftrace_lock);
4037 
4038 	if (fail)
4039 		return -EINVAL;
4040 
4041 	return 0;
4042 }
4043 
4044 static ssize_t
4045 ftrace_graph_write(struct file *file, const char __user *ubuf,
4046 		   size_t cnt, loff_t *ppos)
4047 {
4048 	struct trace_parser parser;
4049 	ssize_t read, ret = 0;
4050 	struct ftrace_graph_data *fgd = file->private_data;
4051 
4052 	if (!cnt)
4053 		return 0;
4054 
4055 	if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX))
4056 		return -ENOMEM;
4057 
4058 	read = trace_get_user(&parser, ubuf, cnt, ppos);
4059 
4060 	if (read >= 0 && trace_parser_loaded((&parser))) {
4061 		parser.buffer[parser.idx] = 0;
4062 
4063 		mutex_lock(&graph_lock);
4064 
4065 		/* we allow only one expression at a time */
4066 		ret = ftrace_set_func(fgd->table, fgd->count, fgd->size,
4067 				      parser.buffer);
4068 
4069 		mutex_unlock(&graph_lock);
4070 	}
4071 
4072 	if (!ret)
4073 		ret = read;
4074 
4075 	trace_parser_put(&parser);
4076 
4077 	return ret;
4078 }
4079 
4080 static const struct file_operations ftrace_graph_fops = {
4081 	.open		= ftrace_graph_open,
4082 	.read		= seq_read,
4083 	.write		= ftrace_graph_write,
4084 	.llseek		= tracing_lseek,
4085 	.release	= ftrace_graph_release,
4086 };
4087 
4088 static const struct file_operations ftrace_graph_notrace_fops = {
4089 	.open		= ftrace_graph_notrace_open,
4090 	.read		= seq_read,
4091 	.write		= ftrace_graph_write,
4092 	.llseek		= tracing_lseek,
4093 	.release	= ftrace_graph_release,
4094 };
4095 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4096 
4097 void ftrace_create_filter_files(struct ftrace_ops *ops,
4098 				struct dentry *parent)
4099 {
4100 
4101 	trace_create_file("set_ftrace_filter", 0644, parent,
4102 			  ops, &ftrace_filter_fops);
4103 
4104 	trace_create_file("set_ftrace_notrace", 0644, parent,
4105 			  ops, &ftrace_notrace_fops);
4106 }
4107 
4108 /*
4109  * The name "destroy_filter_files" is really a misnomer. Although
4110  * in the future, it may actualy delete the files, but this is
4111  * really intended to make sure the ops passed in are disabled
4112  * and that when this function returns, the caller is free to
4113  * free the ops.
4114  *
4115  * The "destroy" name is only to match the "create" name that this
4116  * should be paired with.
4117  */
4118 void ftrace_destroy_filter_files(struct ftrace_ops *ops)
4119 {
4120 	mutex_lock(&ftrace_lock);
4121 	if (ops->flags & FTRACE_OPS_FL_ENABLED)
4122 		ftrace_shutdown(ops, 0);
4123 	ops->flags |= FTRACE_OPS_FL_DELETED;
4124 	mutex_unlock(&ftrace_lock);
4125 }
4126 
4127 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
4128 {
4129 
4130 	trace_create_file("available_filter_functions", 0444,
4131 			d_tracer, NULL, &ftrace_avail_fops);
4132 
4133 	trace_create_file("enabled_functions", 0444,
4134 			d_tracer, NULL, &ftrace_enabled_fops);
4135 
4136 	ftrace_create_filter_files(&global_ops, d_tracer);
4137 
4138 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4139 	trace_create_file("set_graph_function", 0444, d_tracer,
4140 				    NULL,
4141 				    &ftrace_graph_fops);
4142 	trace_create_file("set_graph_notrace", 0444, d_tracer,
4143 				    NULL,
4144 				    &ftrace_graph_notrace_fops);
4145 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4146 
4147 	return 0;
4148 }
4149 
4150 static int ftrace_cmp_ips(const void *a, const void *b)
4151 {
4152 	const unsigned long *ipa = a;
4153 	const unsigned long *ipb = b;
4154 
4155 	if (*ipa > *ipb)
4156 		return 1;
4157 	if (*ipa < *ipb)
4158 		return -1;
4159 	return 0;
4160 }
4161 
4162 static void ftrace_swap_ips(void *a, void *b, int size)
4163 {
4164 	unsigned long *ipa = a;
4165 	unsigned long *ipb = b;
4166 	unsigned long t;
4167 
4168 	t = *ipa;
4169 	*ipa = *ipb;
4170 	*ipb = t;
4171 }
4172 
4173 static int ftrace_process_locs(struct module *mod,
4174 			       unsigned long *start,
4175 			       unsigned long *end)
4176 {
4177 	struct ftrace_page *start_pg;
4178 	struct ftrace_page *pg;
4179 	struct dyn_ftrace *rec;
4180 	unsigned long count;
4181 	unsigned long *p;
4182 	unsigned long addr;
4183 	unsigned long flags = 0; /* Shut up gcc */
4184 	int ret = -ENOMEM;
4185 
4186 	count = end - start;
4187 
4188 	if (!count)
4189 		return 0;
4190 
4191 	sort(start, count, sizeof(*start),
4192 	     ftrace_cmp_ips, ftrace_swap_ips);
4193 
4194 	start_pg = ftrace_allocate_pages(count);
4195 	if (!start_pg)
4196 		return -ENOMEM;
4197 
4198 	mutex_lock(&ftrace_lock);
4199 
4200 	/*
4201 	 * Core and each module needs their own pages, as
4202 	 * modules will free them when they are removed.
4203 	 * Force a new page to be allocated for modules.
4204 	 */
4205 	if (!mod) {
4206 		WARN_ON(ftrace_pages || ftrace_pages_start);
4207 		/* First initialization */
4208 		ftrace_pages = ftrace_pages_start = start_pg;
4209 	} else {
4210 		if (!ftrace_pages)
4211 			goto out;
4212 
4213 		if (WARN_ON(ftrace_pages->next)) {
4214 			/* Hmm, we have free pages? */
4215 			while (ftrace_pages->next)
4216 				ftrace_pages = ftrace_pages->next;
4217 		}
4218 
4219 		ftrace_pages->next = start_pg;
4220 	}
4221 
4222 	p = start;
4223 	pg = start_pg;
4224 	while (p < end) {
4225 		addr = ftrace_call_adjust(*p++);
4226 		/*
4227 		 * Some architecture linkers will pad between
4228 		 * the different mcount_loc sections of different
4229 		 * object files to satisfy alignments.
4230 		 * Skip any NULL pointers.
4231 		 */
4232 		if (!addr)
4233 			continue;
4234 
4235 		if (pg->index == pg->size) {
4236 			/* We should have allocated enough */
4237 			if (WARN_ON(!pg->next))
4238 				break;
4239 			pg = pg->next;
4240 		}
4241 
4242 		rec = &pg->records[pg->index++];
4243 		rec->ip = addr;
4244 	}
4245 
4246 	/* We should have used all pages */
4247 	WARN_ON(pg->next);
4248 
4249 	/* Assign the last page to ftrace_pages */
4250 	ftrace_pages = pg;
4251 
4252 	/*
4253 	 * We only need to disable interrupts on start up
4254 	 * because we are modifying code that an interrupt
4255 	 * may execute, and the modification is not atomic.
4256 	 * But for modules, nothing runs the code we modify
4257 	 * until we are finished with it, and there's no
4258 	 * reason to cause large interrupt latencies while we do it.
4259 	 */
4260 	if (!mod)
4261 		local_irq_save(flags);
4262 	ftrace_update_code(mod, start_pg);
4263 	if (!mod)
4264 		local_irq_restore(flags);
4265 	ret = 0;
4266  out:
4267 	mutex_unlock(&ftrace_lock);
4268 
4269 	return ret;
4270 }
4271 
4272 #ifdef CONFIG_MODULES
4273 
4274 #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
4275 
4276 void ftrace_release_mod(struct module *mod)
4277 {
4278 	struct dyn_ftrace *rec;
4279 	struct ftrace_page **last_pg;
4280 	struct ftrace_page *pg;
4281 	int order;
4282 
4283 	mutex_lock(&ftrace_lock);
4284 
4285 	if (ftrace_disabled)
4286 		goto out_unlock;
4287 
4288 	/*
4289 	 * Each module has its own ftrace_pages, remove
4290 	 * them from the list.
4291 	 */
4292 	last_pg = &ftrace_pages_start;
4293 	for (pg = ftrace_pages_start; pg; pg = *last_pg) {
4294 		rec = &pg->records[0];
4295 		if (within_module_core(rec->ip, mod)) {
4296 			/*
4297 			 * As core pages are first, the first
4298 			 * page should never be a module page.
4299 			 */
4300 			if (WARN_ON(pg == ftrace_pages_start))
4301 				goto out_unlock;
4302 
4303 			/* Check if we are deleting the last page */
4304 			if (pg == ftrace_pages)
4305 				ftrace_pages = next_to_ftrace_page(last_pg);
4306 
4307 			*last_pg = pg->next;
4308 			order = get_count_order(pg->size / ENTRIES_PER_PAGE);
4309 			free_pages((unsigned long)pg->records, order);
4310 			kfree(pg);
4311 		} else
4312 			last_pg = &pg->next;
4313 	}
4314  out_unlock:
4315 	mutex_unlock(&ftrace_lock);
4316 }
4317 
4318 static void ftrace_init_module(struct module *mod,
4319 			       unsigned long *start, unsigned long *end)
4320 {
4321 	if (ftrace_disabled || start == end)
4322 		return;
4323 	ftrace_process_locs(mod, start, end);
4324 }
4325 
4326 static int ftrace_module_notify_enter(struct notifier_block *self,
4327 				      unsigned long val, void *data)
4328 {
4329 	struct module *mod = data;
4330 
4331 	if (val == MODULE_STATE_COMING)
4332 		ftrace_init_module(mod, mod->ftrace_callsites,
4333 				   mod->ftrace_callsites +
4334 				   mod->num_ftrace_callsites);
4335 	return 0;
4336 }
4337 
4338 static int ftrace_module_notify_exit(struct notifier_block *self,
4339 				     unsigned long val, void *data)
4340 {
4341 	struct module *mod = data;
4342 
4343 	if (val == MODULE_STATE_GOING)
4344 		ftrace_release_mod(mod);
4345 
4346 	return 0;
4347 }
4348 #else
4349 static int ftrace_module_notify_enter(struct notifier_block *self,
4350 				      unsigned long val, void *data)
4351 {
4352 	return 0;
4353 }
4354 static int ftrace_module_notify_exit(struct notifier_block *self,
4355 				     unsigned long val, void *data)
4356 {
4357 	return 0;
4358 }
4359 #endif /* CONFIG_MODULES */
4360 
4361 struct notifier_block ftrace_module_enter_nb = {
4362 	.notifier_call = ftrace_module_notify_enter,
4363 	.priority = INT_MAX,	/* Run before anything that can use kprobes */
4364 };
4365 
4366 struct notifier_block ftrace_module_exit_nb = {
4367 	.notifier_call = ftrace_module_notify_exit,
4368 	.priority = INT_MIN,	/* Run after anything that can remove kprobes */
4369 };
4370 
4371 void __init ftrace_init(void)
4372 {
4373 	extern unsigned long __start_mcount_loc[];
4374 	extern unsigned long __stop_mcount_loc[];
4375 	unsigned long count, flags;
4376 	int ret;
4377 
4378 	local_irq_save(flags);
4379 	ret = ftrace_dyn_arch_init();
4380 	local_irq_restore(flags);
4381 	if (ret)
4382 		goto failed;
4383 
4384 	count = __stop_mcount_loc - __start_mcount_loc;
4385 	if (!count) {
4386 		pr_info("ftrace: No functions to be traced?\n");
4387 		goto failed;
4388 	}
4389 
4390 	pr_info("ftrace: allocating %ld entries in %ld pages\n",
4391 		count, count / ENTRIES_PER_PAGE + 1);
4392 
4393 	last_ftrace_enabled = ftrace_enabled = 1;
4394 
4395 	ret = ftrace_process_locs(NULL,
4396 				  __start_mcount_loc,
4397 				  __stop_mcount_loc);
4398 
4399 	ret = register_module_notifier(&ftrace_module_enter_nb);
4400 	if (ret)
4401 		pr_warning("Failed to register trace ftrace module enter notifier\n");
4402 
4403 	ret = register_module_notifier(&ftrace_module_exit_nb);
4404 	if (ret)
4405 		pr_warning("Failed to register trace ftrace module exit notifier\n");
4406 
4407 	set_ftrace_early_filters();
4408 
4409 	return;
4410  failed:
4411 	ftrace_disabled = 1;
4412 }
4413 
4414 #else
4415 
4416 static struct ftrace_ops global_ops = {
4417 	.func			= ftrace_stub,
4418 	.flags			= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
4419 	INIT_REGEX_LOCK(global_ops)
4420 };
4421 
4422 static int __init ftrace_nodyn_init(void)
4423 {
4424 	ftrace_enabled = 1;
4425 	return 0;
4426 }
4427 core_initcall(ftrace_nodyn_init);
4428 
4429 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
4430 static inline void ftrace_startup_enable(int command) { }
4431 /* Keep as macros so we do not need to define the commands */
4432 # define ftrace_startup(ops, command)					\
4433 	({								\
4434 		int ___ret = __register_ftrace_function(ops);		\
4435 		if (!___ret)						\
4436 			(ops)->flags |= FTRACE_OPS_FL_ENABLED;		\
4437 		___ret;							\
4438 	})
4439 # define ftrace_shutdown(ops, command)					\
4440 	({								\
4441 		int ___ret = __unregister_ftrace_function(ops);		\
4442 		if (!___ret)						\
4443 			(ops)->flags &= ~FTRACE_OPS_FL_ENABLED;		\
4444 		___ret;							\
4445 	})
4446 
4447 # define ftrace_startup_sysctl()	do { } while (0)
4448 # define ftrace_shutdown_sysctl()	do { } while (0)
4449 
4450 static inline int
4451 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
4452 {
4453 	return 1;
4454 }
4455 
4456 #endif /* CONFIG_DYNAMIC_FTRACE */
4457 
4458 static void
4459 ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
4460 			struct ftrace_ops *op, struct pt_regs *regs)
4461 {
4462 	if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
4463 		return;
4464 
4465 	/*
4466 	 * Some of the ops may be dynamically allocated,
4467 	 * they must be freed after a synchronize_sched().
4468 	 */
4469 	preempt_disable_notrace();
4470 	trace_recursion_set(TRACE_CONTROL_BIT);
4471 
4472 	/*
4473 	 * Control funcs (perf) uses RCU. Only trace if
4474 	 * RCU is currently active.
4475 	 */
4476 	if (!rcu_is_watching())
4477 		goto out;
4478 
4479 	do_for_each_ftrace_op(op, ftrace_control_list) {
4480 		if (!(op->flags & FTRACE_OPS_FL_STUB) &&
4481 		    !ftrace_function_local_disabled(op) &&
4482 		    ftrace_ops_test(op, ip, regs))
4483 			op->func(ip, parent_ip, op, regs);
4484 	} while_for_each_ftrace_op(op);
4485  out:
4486 	trace_recursion_clear(TRACE_CONTROL_BIT);
4487 	preempt_enable_notrace();
4488 }
4489 
4490 static struct ftrace_ops control_ops = {
4491 	.func	= ftrace_ops_control_func,
4492 	.flags	= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
4493 	INIT_REGEX_LOCK(control_ops)
4494 };
4495 
4496 static inline void
4497 __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4498 		       struct ftrace_ops *ignored, struct pt_regs *regs)
4499 {
4500 	struct ftrace_ops *op;
4501 	int bit;
4502 
4503 	if (function_trace_stop)
4504 		return;
4505 
4506 	bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
4507 	if (bit < 0)
4508 		return;
4509 
4510 	/*
4511 	 * Some of the ops may be dynamically allocated,
4512 	 * they must be freed after a synchronize_sched().
4513 	 */
4514 	preempt_disable_notrace();
4515 	do_for_each_ftrace_op(op, ftrace_ops_list) {
4516 		if (ftrace_ops_test(op, ip, regs))
4517 			op->func(ip, parent_ip, op, regs);
4518 	} while_for_each_ftrace_op(op);
4519 	preempt_enable_notrace();
4520 	trace_clear_recursion(bit);
4521 }
4522 
4523 /*
4524  * Some archs only support passing ip and parent_ip. Even though
4525  * the list function ignores the op parameter, we do not want any
4526  * C side effects, where a function is called without the caller
4527  * sending a third parameter.
4528  * Archs are to support both the regs and ftrace_ops at the same time.
4529  * If they support ftrace_ops, it is assumed they support regs.
4530  * If call backs want to use regs, they must either check for regs
4531  * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
4532  * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
4533  * An architecture can pass partial regs with ftrace_ops and still
4534  * set the ARCH_SUPPORT_FTARCE_OPS.
4535  */
4536 #if ARCH_SUPPORTS_FTRACE_OPS
4537 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4538 				 struct ftrace_ops *op, struct pt_regs *regs)
4539 {
4540 	__ftrace_ops_list_func(ip, parent_ip, NULL, regs);
4541 }
4542 #else
4543 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
4544 {
4545 	__ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
4546 }
4547 #endif
4548 
4549 static void clear_ftrace_swapper(void)
4550 {
4551 	struct task_struct *p;
4552 	int cpu;
4553 
4554 	get_online_cpus();
4555 	for_each_online_cpu(cpu) {
4556 		p = idle_task(cpu);
4557 		clear_tsk_trace_trace(p);
4558 	}
4559 	put_online_cpus();
4560 }
4561 
4562 static void set_ftrace_swapper(void)
4563 {
4564 	struct task_struct *p;
4565 	int cpu;
4566 
4567 	get_online_cpus();
4568 	for_each_online_cpu(cpu) {
4569 		p = idle_task(cpu);
4570 		set_tsk_trace_trace(p);
4571 	}
4572 	put_online_cpus();
4573 }
4574 
4575 static void clear_ftrace_pid(struct pid *pid)
4576 {
4577 	struct task_struct *p;
4578 
4579 	rcu_read_lock();
4580 	do_each_pid_task(pid, PIDTYPE_PID, p) {
4581 		clear_tsk_trace_trace(p);
4582 	} while_each_pid_task(pid, PIDTYPE_PID, p);
4583 	rcu_read_unlock();
4584 
4585 	put_pid(pid);
4586 }
4587 
4588 static void set_ftrace_pid(struct pid *pid)
4589 {
4590 	struct task_struct *p;
4591 
4592 	rcu_read_lock();
4593 	do_each_pid_task(pid, PIDTYPE_PID, p) {
4594 		set_tsk_trace_trace(p);
4595 	} while_each_pid_task(pid, PIDTYPE_PID, p);
4596 	rcu_read_unlock();
4597 }
4598 
4599 static void clear_ftrace_pid_task(struct pid *pid)
4600 {
4601 	if (pid == ftrace_swapper_pid)
4602 		clear_ftrace_swapper();
4603 	else
4604 		clear_ftrace_pid(pid);
4605 }
4606 
4607 static void set_ftrace_pid_task(struct pid *pid)
4608 {
4609 	if (pid == ftrace_swapper_pid)
4610 		set_ftrace_swapper();
4611 	else
4612 		set_ftrace_pid(pid);
4613 }
4614 
4615 static int ftrace_pid_add(int p)
4616 {
4617 	struct pid *pid;
4618 	struct ftrace_pid *fpid;
4619 	int ret = -EINVAL;
4620 
4621 	mutex_lock(&ftrace_lock);
4622 
4623 	if (!p)
4624 		pid = ftrace_swapper_pid;
4625 	else
4626 		pid = find_get_pid(p);
4627 
4628 	if (!pid)
4629 		goto out;
4630 
4631 	ret = 0;
4632 
4633 	list_for_each_entry(fpid, &ftrace_pids, list)
4634 		if (fpid->pid == pid)
4635 			goto out_put;
4636 
4637 	ret = -ENOMEM;
4638 
4639 	fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
4640 	if (!fpid)
4641 		goto out_put;
4642 
4643 	list_add(&fpid->list, &ftrace_pids);
4644 	fpid->pid = pid;
4645 
4646 	set_ftrace_pid_task(pid);
4647 
4648 	ftrace_update_pid_func();
4649 	ftrace_startup_enable(0);
4650 
4651 	mutex_unlock(&ftrace_lock);
4652 	return 0;
4653 
4654 out_put:
4655 	if (pid != ftrace_swapper_pid)
4656 		put_pid(pid);
4657 
4658 out:
4659 	mutex_unlock(&ftrace_lock);
4660 	return ret;
4661 }
4662 
4663 static void ftrace_pid_reset(void)
4664 {
4665 	struct ftrace_pid *fpid, *safe;
4666 
4667 	mutex_lock(&ftrace_lock);
4668 	list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
4669 		struct pid *pid = fpid->pid;
4670 
4671 		clear_ftrace_pid_task(pid);
4672 
4673 		list_del(&fpid->list);
4674 		kfree(fpid);
4675 	}
4676 
4677 	ftrace_update_pid_func();
4678 	ftrace_startup_enable(0);
4679 
4680 	mutex_unlock(&ftrace_lock);
4681 }
4682 
4683 static void *fpid_start(struct seq_file *m, loff_t *pos)
4684 {
4685 	mutex_lock(&ftrace_lock);
4686 
4687 	if (list_empty(&ftrace_pids) && (!*pos))
4688 		return (void *) 1;
4689 
4690 	return seq_list_start(&ftrace_pids, *pos);
4691 }
4692 
4693 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
4694 {
4695 	if (v == (void *)1)
4696 		return NULL;
4697 
4698 	return seq_list_next(v, &ftrace_pids, pos);
4699 }
4700 
4701 static void fpid_stop(struct seq_file *m, void *p)
4702 {
4703 	mutex_unlock(&ftrace_lock);
4704 }
4705 
4706 static int fpid_show(struct seq_file *m, void *v)
4707 {
4708 	const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
4709 
4710 	if (v == (void *)1) {
4711 		seq_printf(m, "no pid\n");
4712 		return 0;
4713 	}
4714 
4715 	if (fpid->pid == ftrace_swapper_pid)
4716 		seq_printf(m, "swapper tasks\n");
4717 	else
4718 		seq_printf(m, "%u\n", pid_vnr(fpid->pid));
4719 
4720 	return 0;
4721 }
4722 
4723 static const struct seq_operations ftrace_pid_sops = {
4724 	.start = fpid_start,
4725 	.next = fpid_next,
4726 	.stop = fpid_stop,
4727 	.show = fpid_show,
4728 };
4729 
4730 static int
4731 ftrace_pid_open(struct inode *inode, struct file *file)
4732 {
4733 	int ret = 0;
4734 
4735 	if ((file->f_mode & FMODE_WRITE) &&
4736 	    (file->f_flags & O_TRUNC))
4737 		ftrace_pid_reset();
4738 
4739 	if (file->f_mode & FMODE_READ)
4740 		ret = seq_open(file, &ftrace_pid_sops);
4741 
4742 	return ret;
4743 }
4744 
4745 static ssize_t
4746 ftrace_pid_write(struct file *filp, const char __user *ubuf,
4747 		   size_t cnt, loff_t *ppos)
4748 {
4749 	char buf[64], *tmp;
4750 	long val;
4751 	int ret;
4752 
4753 	if (cnt >= sizeof(buf))
4754 		return -EINVAL;
4755 
4756 	if (copy_from_user(&buf, ubuf, cnt))
4757 		return -EFAULT;
4758 
4759 	buf[cnt] = 0;
4760 
4761 	/*
4762 	 * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
4763 	 * to clean the filter quietly.
4764 	 */
4765 	tmp = strstrip(buf);
4766 	if (strlen(tmp) == 0)
4767 		return 1;
4768 
4769 	ret = kstrtol(tmp, 10, &val);
4770 	if (ret < 0)
4771 		return ret;
4772 
4773 	ret = ftrace_pid_add(val);
4774 
4775 	return ret ? ret : cnt;
4776 }
4777 
4778 static int
4779 ftrace_pid_release(struct inode *inode, struct file *file)
4780 {
4781 	if (file->f_mode & FMODE_READ)
4782 		seq_release(inode, file);
4783 
4784 	return 0;
4785 }
4786 
4787 static const struct file_operations ftrace_pid_fops = {
4788 	.open		= ftrace_pid_open,
4789 	.write		= ftrace_pid_write,
4790 	.read		= seq_read,
4791 	.llseek		= tracing_lseek,
4792 	.release	= ftrace_pid_release,
4793 };
4794 
4795 static __init int ftrace_init_debugfs(void)
4796 {
4797 	struct dentry *d_tracer;
4798 
4799 	d_tracer = tracing_init_dentry();
4800 	if (!d_tracer)
4801 		return 0;
4802 
4803 	ftrace_init_dyn_debugfs(d_tracer);
4804 
4805 	trace_create_file("set_ftrace_pid", 0644, d_tracer,
4806 			    NULL, &ftrace_pid_fops);
4807 
4808 	ftrace_profile_debugfs(d_tracer);
4809 
4810 	return 0;
4811 }
4812 fs_initcall(ftrace_init_debugfs);
4813 
4814 /**
4815  * ftrace_kill - kill ftrace
4816  *
4817  * This function should be used by panic code. It stops ftrace
4818  * but in a not so nice way. If you need to simply kill ftrace
4819  * from a non-atomic section, use ftrace_kill.
4820  */
4821 void ftrace_kill(void)
4822 {
4823 	ftrace_disabled = 1;
4824 	ftrace_enabled = 0;
4825 	clear_ftrace_function();
4826 }
4827 
4828 /**
4829  * Test if ftrace is dead or not.
4830  */
4831 int ftrace_is_dead(void)
4832 {
4833 	return ftrace_disabled;
4834 }
4835 
4836 /**
4837  * register_ftrace_function - register a function for profiling
4838  * @ops - ops structure that holds the function for profiling.
4839  *
4840  * Register a function to be called by all functions in the
4841  * kernel.
4842  *
4843  * Note: @ops->func and all the functions it calls must be labeled
4844  *       with "notrace", otherwise it will go into a
4845  *       recursive loop.
4846  */
4847 int register_ftrace_function(struct ftrace_ops *ops)
4848 {
4849 	int ret = -1;
4850 
4851 	ftrace_ops_init(ops);
4852 
4853 	mutex_lock(&ftrace_lock);
4854 
4855 	ret = ftrace_startup(ops, 0);
4856 
4857 	mutex_unlock(&ftrace_lock);
4858 
4859 	return ret;
4860 }
4861 EXPORT_SYMBOL_GPL(register_ftrace_function);
4862 
4863 /**
4864  * unregister_ftrace_function - unregister a function for profiling.
4865  * @ops - ops structure that holds the function to unregister
4866  *
4867  * Unregister a function that was added to be called by ftrace profiling.
4868  */
4869 int unregister_ftrace_function(struct ftrace_ops *ops)
4870 {
4871 	int ret;
4872 
4873 	mutex_lock(&ftrace_lock);
4874 	ret = ftrace_shutdown(ops, 0);
4875 	mutex_unlock(&ftrace_lock);
4876 
4877 	return ret;
4878 }
4879 EXPORT_SYMBOL_GPL(unregister_ftrace_function);
4880 
4881 int
4882 ftrace_enable_sysctl(struct ctl_table *table, int write,
4883 		     void __user *buffer, size_t *lenp,
4884 		     loff_t *ppos)
4885 {
4886 	int ret = -ENODEV;
4887 
4888 	mutex_lock(&ftrace_lock);
4889 
4890 	if (unlikely(ftrace_disabled))
4891 		goto out;
4892 
4893 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
4894 
4895 	if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
4896 		goto out;
4897 
4898 	last_ftrace_enabled = !!ftrace_enabled;
4899 
4900 	if (ftrace_enabled) {
4901 
4902 		ftrace_startup_sysctl();
4903 
4904 		/* we are starting ftrace again */
4905 		if (ftrace_ops_list != &ftrace_list_end)
4906 			update_ftrace_function();
4907 
4908 	} else {
4909 		/* stopping ftrace calls (just send to ftrace_stub) */
4910 		ftrace_trace_function = ftrace_stub;
4911 
4912 		ftrace_shutdown_sysctl();
4913 	}
4914 
4915  out:
4916 	mutex_unlock(&ftrace_lock);
4917 	return ret;
4918 }
4919 
4920 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4921 
4922 static int ftrace_graph_active;
4923 static struct notifier_block ftrace_suspend_notifier;
4924 
4925 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
4926 {
4927 	return 0;
4928 }
4929 
4930 /* The callbacks that hook a function */
4931 trace_func_graph_ret_t ftrace_graph_return =
4932 			(trace_func_graph_ret_t)ftrace_stub;
4933 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
4934 static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
4935 
4936 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
4937 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
4938 {
4939 	int i;
4940 	int ret = 0;
4941 	unsigned long flags;
4942 	int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
4943 	struct task_struct *g, *t;
4944 
4945 	for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
4946 		ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
4947 					* sizeof(struct ftrace_ret_stack),
4948 					GFP_KERNEL);
4949 		if (!ret_stack_list[i]) {
4950 			start = 0;
4951 			end = i;
4952 			ret = -ENOMEM;
4953 			goto free;
4954 		}
4955 	}
4956 
4957 	read_lock_irqsave(&tasklist_lock, flags);
4958 	do_each_thread(g, t) {
4959 		if (start == end) {
4960 			ret = -EAGAIN;
4961 			goto unlock;
4962 		}
4963 
4964 		if (t->ret_stack == NULL) {
4965 			atomic_set(&t->tracing_graph_pause, 0);
4966 			atomic_set(&t->trace_overrun, 0);
4967 			t->curr_ret_stack = -1;
4968 			/* Make sure the tasks see the -1 first: */
4969 			smp_wmb();
4970 			t->ret_stack = ret_stack_list[start++];
4971 		}
4972 	} while_each_thread(g, t);
4973 
4974 unlock:
4975 	read_unlock_irqrestore(&tasklist_lock, flags);
4976 free:
4977 	for (i = start; i < end; i++)
4978 		kfree(ret_stack_list[i]);
4979 	return ret;
4980 }
4981 
4982 static void
4983 ftrace_graph_probe_sched_switch(void *ignore,
4984 			struct task_struct *prev, struct task_struct *next)
4985 {
4986 	unsigned long long timestamp;
4987 	int index;
4988 
4989 	/*
4990 	 * Does the user want to count the time a function was asleep.
4991 	 * If so, do not update the time stamps.
4992 	 */
4993 	if (trace_flags & TRACE_ITER_SLEEP_TIME)
4994 		return;
4995 
4996 	timestamp = trace_clock_local();
4997 
4998 	prev->ftrace_timestamp = timestamp;
4999 
5000 	/* only process tasks that we timestamped */
5001 	if (!next->ftrace_timestamp)
5002 		return;
5003 
5004 	/*
5005 	 * Update all the counters in next to make up for the
5006 	 * time next was sleeping.
5007 	 */
5008 	timestamp -= next->ftrace_timestamp;
5009 
5010 	for (index = next->curr_ret_stack; index >= 0; index--)
5011 		next->ret_stack[index].calltime += timestamp;
5012 }
5013 
5014 /* Allocate a return stack for each task */
5015 static int start_graph_tracing(void)
5016 {
5017 	struct ftrace_ret_stack **ret_stack_list;
5018 	int ret, cpu;
5019 
5020 	ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
5021 				sizeof(struct ftrace_ret_stack *),
5022 				GFP_KERNEL);
5023 
5024 	if (!ret_stack_list)
5025 		return -ENOMEM;
5026 
5027 	/* The cpu_boot init_task->ret_stack will never be freed */
5028 	for_each_online_cpu(cpu) {
5029 		if (!idle_task(cpu)->ret_stack)
5030 			ftrace_graph_init_idle_task(idle_task(cpu), cpu);
5031 	}
5032 
5033 	do {
5034 		ret = alloc_retstack_tasklist(ret_stack_list);
5035 	} while (ret == -EAGAIN);
5036 
5037 	if (!ret) {
5038 		ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
5039 		if (ret)
5040 			pr_info("ftrace_graph: Couldn't activate tracepoint"
5041 				" probe to kernel_sched_switch\n");
5042 	}
5043 
5044 	kfree(ret_stack_list);
5045 	return ret;
5046 }
5047 
5048 /*
5049  * Hibernation protection.
5050  * The state of the current task is too much unstable during
5051  * suspend/restore to disk. We want to protect against that.
5052  */
5053 static int
5054 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
5055 							void *unused)
5056 {
5057 	switch (state) {
5058 	case PM_HIBERNATION_PREPARE:
5059 		pause_graph_tracing();
5060 		break;
5061 
5062 	case PM_POST_HIBERNATION:
5063 		unpause_graph_tracing();
5064 		break;
5065 	}
5066 	return NOTIFY_DONE;
5067 }
5068 
5069 /* Just a place holder for function graph */
5070 static struct ftrace_ops fgraph_ops __read_mostly = {
5071 	.func		= ftrace_stub,
5072 	.flags		= FTRACE_OPS_FL_STUB | FTRACE_OPS_FL_GLOBAL |
5073 				FTRACE_OPS_FL_RECURSION_SAFE,
5074 };
5075 
5076 static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
5077 {
5078 	if (!ftrace_ops_test(&global_ops, trace->func, NULL))
5079 		return 0;
5080 	return __ftrace_graph_entry(trace);
5081 }
5082 
5083 /*
5084  * The function graph tracer should only trace the functions defined
5085  * by set_ftrace_filter and set_ftrace_notrace. If another function
5086  * tracer ops is registered, the graph tracer requires testing the
5087  * function against the global ops, and not just trace any function
5088  * that any ftrace_ops registered.
5089  */
5090 static void update_function_graph_func(void)
5091 {
5092 	if (ftrace_ops_list == &ftrace_list_end ||
5093 	    (ftrace_ops_list == &global_ops &&
5094 	     global_ops.next == &ftrace_list_end))
5095 		ftrace_graph_entry = __ftrace_graph_entry;
5096 	else
5097 		ftrace_graph_entry = ftrace_graph_entry_test;
5098 }
5099 
5100 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
5101 			trace_func_graph_ent_t entryfunc)
5102 {
5103 	int ret = 0;
5104 
5105 	mutex_lock(&ftrace_lock);
5106 
5107 	/* we currently allow only one tracer registered at a time */
5108 	if (ftrace_graph_active) {
5109 		ret = -EBUSY;
5110 		goto out;
5111 	}
5112 
5113 	ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
5114 	register_pm_notifier(&ftrace_suspend_notifier);
5115 
5116 	ftrace_graph_active++;
5117 	ret = start_graph_tracing();
5118 	if (ret) {
5119 		ftrace_graph_active--;
5120 		goto out;
5121 	}
5122 
5123 	ftrace_graph_return = retfunc;
5124 
5125 	/*
5126 	 * Update the indirect function to the entryfunc, and the
5127 	 * function that gets called to the entry_test first. Then
5128 	 * call the update fgraph entry function to determine if
5129 	 * the entryfunc should be called directly or not.
5130 	 */
5131 	__ftrace_graph_entry = entryfunc;
5132 	ftrace_graph_entry = ftrace_graph_entry_test;
5133 	update_function_graph_func();
5134 
5135 	ret = ftrace_startup(&fgraph_ops, FTRACE_START_FUNC_RET);
5136 
5137 out:
5138 	mutex_unlock(&ftrace_lock);
5139 	return ret;
5140 }
5141 
5142 void unregister_ftrace_graph(void)
5143 {
5144 	mutex_lock(&ftrace_lock);
5145 
5146 	if (unlikely(!ftrace_graph_active))
5147 		goto out;
5148 
5149 	ftrace_graph_active--;
5150 	ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
5151 	ftrace_graph_entry = ftrace_graph_entry_stub;
5152 	__ftrace_graph_entry = ftrace_graph_entry_stub;
5153 	ftrace_shutdown(&fgraph_ops, FTRACE_STOP_FUNC_RET);
5154 	unregister_pm_notifier(&ftrace_suspend_notifier);
5155 	unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
5156 
5157  out:
5158 	mutex_unlock(&ftrace_lock);
5159 }
5160 
5161 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
5162 
5163 static void
5164 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
5165 {
5166 	atomic_set(&t->tracing_graph_pause, 0);
5167 	atomic_set(&t->trace_overrun, 0);
5168 	t->ftrace_timestamp = 0;
5169 	/* make curr_ret_stack visible before we add the ret_stack */
5170 	smp_wmb();
5171 	t->ret_stack = ret_stack;
5172 }
5173 
5174 /*
5175  * Allocate a return stack for the idle task. May be the first
5176  * time through, or it may be done by CPU hotplug online.
5177  */
5178 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
5179 {
5180 	t->curr_ret_stack = -1;
5181 	/*
5182 	 * The idle task has no parent, it either has its own
5183 	 * stack or no stack at all.
5184 	 */
5185 	if (t->ret_stack)
5186 		WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
5187 
5188 	if (ftrace_graph_active) {
5189 		struct ftrace_ret_stack *ret_stack;
5190 
5191 		ret_stack = per_cpu(idle_ret_stack, cpu);
5192 		if (!ret_stack) {
5193 			ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
5194 					    * sizeof(struct ftrace_ret_stack),
5195 					    GFP_KERNEL);
5196 			if (!ret_stack)
5197 				return;
5198 			per_cpu(idle_ret_stack, cpu) = ret_stack;
5199 		}
5200 		graph_init_task(t, ret_stack);
5201 	}
5202 }
5203 
5204 /* Allocate a return stack for newly created task */
5205 void ftrace_graph_init_task(struct task_struct *t)
5206 {
5207 	/* Make sure we do not use the parent ret_stack */
5208 	t->ret_stack = NULL;
5209 	t->curr_ret_stack = -1;
5210 
5211 	if (ftrace_graph_active) {
5212 		struct ftrace_ret_stack *ret_stack;
5213 
5214 		ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
5215 				* sizeof(struct ftrace_ret_stack),
5216 				GFP_KERNEL);
5217 		if (!ret_stack)
5218 			return;
5219 		graph_init_task(t, ret_stack);
5220 	}
5221 }
5222 
5223 void ftrace_graph_exit_task(struct task_struct *t)
5224 {
5225 	struct ftrace_ret_stack	*ret_stack = t->ret_stack;
5226 
5227 	t->ret_stack = NULL;
5228 	/* NULL must become visible to IRQs before we free it: */
5229 	barrier();
5230 
5231 	kfree(ret_stack);
5232 }
5233 
5234 void ftrace_graph_stop(void)
5235 {
5236 	ftrace_stop();
5237 }
5238 #endif
5239