xref: /openbmc/linux/kernel/trace/ftrace.c (revision 5bd8e16d)
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 Nadia Yvette Chambers
14  */
15 
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/bsearch.h>
26 #include <linux/module.h>
27 #include <linux/ftrace.h>
28 #include <linux/sysctl.h>
29 #include <linux/slab.h>
30 #include <linux/ctype.h>
31 #include <linux/sort.h>
32 #include <linux/list.h>
33 #include <linux/hash.h>
34 #include <linux/rcupdate.h>
35 
36 #include <trace/events/sched.h>
37 
38 #include <asm/setup.h>
39 
40 #include "trace_output.h"
41 #include "trace_stat.h"
42 
43 #define FTRACE_WARN_ON(cond)			\
44 	({					\
45 		int ___r = cond;		\
46 		if (WARN_ON(___r))		\
47 			ftrace_kill();		\
48 		___r;				\
49 	})
50 
51 #define FTRACE_WARN_ON_ONCE(cond)		\
52 	({					\
53 		int ___r = cond;		\
54 		if (WARN_ON_ONCE(___r))		\
55 			ftrace_kill();		\
56 		___r;				\
57 	})
58 
59 /* hash bits for specific function selection */
60 #define FTRACE_HASH_BITS 7
61 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
62 #define FTRACE_HASH_DEFAULT_BITS 10
63 #define FTRACE_HASH_MAX_BITS 12
64 
65 #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
66 
67 #ifdef CONFIG_DYNAMIC_FTRACE
68 #define INIT_REGEX_LOCK(opsname)	\
69 	.regex_lock	= __MUTEX_INITIALIZER(opsname.regex_lock),
70 #else
71 #define INIT_REGEX_LOCK(opsname)
72 #endif
73 
74 static struct ftrace_ops ftrace_list_end __read_mostly = {
75 	.func		= ftrace_stub,
76 	.flags		= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
77 };
78 
79 /* ftrace_enabled is a method to turn ftrace on or off */
80 int ftrace_enabled __read_mostly;
81 static int last_ftrace_enabled;
82 
83 /* Quick disabling of function tracer. */
84 int function_trace_stop __read_mostly;
85 
86 /* Current function tracing op */
87 struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
88 
89 /* List for set_ftrace_pid's pids. */
90 LIST_HEAD(ftrace_pids);
91 struct ftrace_pid {
92 	struct list_head list;
93 	struct pid *pid;
94 };
95 
96 /*
97  * ftrace_disabled is set when an anomaly is discovered.
98  * ftrace_disabled is much stronger than ftrace_enabled.
99  */
100 static int ftrace_disabled __read_mostly;
101 
102 static DEFINE_MUTEX(ftrace_lock);
103 
104 static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
105 static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
106 static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
107 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
108 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
109 static struct ftrace_ops global_ops;
110 static struct ftrace_ops control_ops;
111 
112 #if ARCH_SUPPORTS_FTRACE_OPS
113 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
114 				 struct ftrace_ops *op, struct pt_regs *regs);
115 #else
116 /* See comment below, where ftrace_ops_list_func is defined */
117 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
118 #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
119 #endif
120 
121 /*
122  * Traverse the ftrace_global_list, invoking all entries.  The reason that we
123  * can use rcu_dereference_raw_notrace() is that elements removed from this list
124  * are simply leaked, so there is no need to interact with a grace-period
125  * mechanism.  The rcu_dereference_raw_notrace() calls are needed to handle
126  * concurrent insertions into the ftrace_global_list.
127  *
128  * Silly Alpha and silly pointer-speculation compiler optimizations!
129  */
130 #define do_for_each_ftrace_op(op, list)			\
131 	op = rcu_dereference_raw_notrace(list);			\
132 	do
133 
134 /*
135  * Optimized for just a single item in the list (as that is the normal case).
136  */
137 #define while_for_each_ftrace_op(op)				\
138 	while (likely(op = rcu_dereference_raw_notrace((op)->next)) &&	\
139 	       unlikely((op) != &ftrace_list_end))
140 
141 static inline void ftrace_ops_init(struct ftrace_ops *ops)
142 {
143 #ifdef CONFIG_DYNAMIC_FTRACE
144 	if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
145 		mutex_init(&ops->regex_lock);
146 		ops->flags |= FTRACE_OPS_FL_INITIALIZED;
147 	}
148 #endif
149 }
150 
151 /**
152  * ftrace_nr_registered_ops - return number of ops registered
153  *
154  * Returns the number of ftrace_ops registered and tracing functions
155  */
156 int ftrace_nr_registered_ops(void)
157 {
158 	struct ftrace_ops *ops;
159 	int cnt = 0;
160 
161 	mutex_lock(&ftrace_lock);
162 
163 	for (ops = ftrace_ops_list;
164 	     ops != &ftrace_list_end; ops = ops->next)
165 		cnt++;
166 
167 	mutex_unlock(&ftrace_lock);
168 
169 	return cnt;
170 }
171 
172 static void
173 ftrace_global_list_func(unsigned long ip, unsigned long parent_ip,
174 			struct ftrace_ops *op, struct pt_regs *regs)
175 {
176 	int bit;
177 
178 	bit = trace_test_and_set_recursion(TRACE_GLOBAL_START, TRACE_GLOBAL_MAX);
179 	if (bit < 0)
180 		return;
181 
182 	do_for_each_ftrace_op(op, ftrace_global_list) {
183 		op->func(ip, parent_ip, op, regs);
184 	} while_for_each_ftrace_op(op);
185 
186 	trace_clear_recursion(bit);
187 }
188 
189 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
190 			    struct ftrace_ops *op, struct pt_regs *regs)
191 {
192 	if (!test_tsk_trace_trace(current))
193 		return;
194 
195 	ftrace_pid_function(ip, parent_ip, op, regs);
196 }
197 
198 static void set_ftrace_pid_function(ftrace_func_t func)
199 {
200 	/* do not set ftrace_pid_function to itself! */
201 	if (func != ftrace_pid_func)
202 		ftrace_pid_function = func;
203 }
204 
205 /**
206  * clear_ftrace_function - reset the ftrace function
207  *
208  * This NULLs the ftrace function and in essence stops
209  * tracing.  There may be lag
210  */
211 void clear_ftrace_function(void)
212 {
213 	ftrace_trace_function = ftrace_stub;
214 	ftrace_pid_function = ftrace_stub;
215 }
216 
217 static void control_ops_disable_all(struct ftrace_ops *ops)
218 {
219 	int cpu;
220 
221 	for_each_possible_cpu(cpu)
222 		*per_cpu_ptr(ops->disabled, cpu) = 1;
223 }
224 
225 static int control_ops_alloc(struct ftrace_ops *ops)
226 {
227 	int __percpu *disabled;
228 
229 	disabled = alloc_percpu(int);
230 	if (!disabled)
231 		return -ENOMEM;
232 
233 	ops->disabled = disabled;
234 	control_ops_disable_all(ops);
235 	return 0;
236 }
237 
238 static void control_ops_free(struct ftrace_ops *ops)
239 {
240 	free_percpu(ops->disabled);
241 }
242 
243 static void update_global_ops(void)
244 {
245 	ftrace_func_t func;
246 
247 	/*
248 	 * If there's only one function registered, then call that
249 	 * function directly. Otherwise, we need to iterate over the
250 	 * registered callers.
251 	 */
252 	if (ftrace_global_list == &ftrace_list_end ||
253 	    ftrace_global_list->next == &ftrace_list_end) {
254 		func = ftrace_global_list->func;
255 		/*
256 		 * As we are calling the function directly.
257 		 * If it does not have recursion protection,
258 		 * the function_trace_op needs to be updated
259 		 * accordingly.
260 		 */
261 		if (ftrace_global_list->flags & FTRACE_OPS_FL_RECURSION_SAFE)
262 			global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
263 		else
264 			global_ops.flags &= ~FTRACE_OPS_FL_RECURSION_SAFE;
265 	} else {
266 		func = ftrace_global_list_func;
267 		/* The list has its own recursion protection. */
268 		global_ops.flags |= FTRACE_OPS_FL_RECURSION_SAFE;
269 	}
270 
271 
272 	/* If we filter on pids, update to use the pid function */
273 	if (!list_empty(&ftrace_pids)) {
274 		set_ftrace_pid_function(func);
275 		func = ftrace_pid_func;
276 	}
277 
278 	global_ops.func = func;
279 }
280 
281 static void update_ftrace_function(void)
282 {
283 	ftrace_func_t func;
284 
285 	update_global_ops();
286 
287 	/*
288 	 * If we are at the end of the list and this ops is
289 	 * recursion safe and not dynamic and the arch supports passing ops,
290 	 * then have the mcount trampoline call the function directly.
291 	 */
292 	if (ftrace_ops_list == &ftrace_list_end ||
293 	    (ftrace_ops_list->next == &ftrace_list_end &&
294 	     !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) &&
295 	     (ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) &&
296 	     !FTRACE_FORCE_LIST_FUNC)) {
297 		/* Set the ftrace_ops that the arch callback uses */
298 		if (ftrace_ops_list == &global_ops)
299 			function_trace_op = ftrace_global_list;
300 		else
301 			function_trace_op = ftrace_ops_list;
302 		func = ftrace_ops_list->func;
303 	} else {
304 		/* Just use the default ftrace_ops */
305 		function_trace_op = &ftrace_list_end;
306 		func = ftrace_ops_list_func;
307 	}
308 
309 	ftrace_trace_function = func;
310 }
311 
312 static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
313 {
314 	ops->next = *list;
315 	/*
316 	 * We are entering ops into the list but another
317 	 * CPU might be walking that list. We need to make sure
318 	 * the ops->next pointer is valid before another CPU sees
319 	 * the ops pointer included into the list.
320 	 */
321 	rcu_assign_pointer(*list, ops);
322 }
323 
324 static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
325 {
326 	struct ftrace_ops **p;
327 
328 	/*
329 	 * If we are removing the last function, then simply point
330 	 * to the ftrace_stub.
331 	 */
332 	if (*list == ops && ops->next == &ftrace_list_end) {
333 		*list = &ftrace_list_end;
334 		return 0;
335 	}
336 
337 	for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
338 		if (*p == ops)
339 			break;
340 
341 	if (*p != ops)
342 		return -1;
343 
344 	*p = (*p)->next;
345 	return 0;
346 }
347 
348 static void add_ftrace_list_ops(struct ftrace_ops **list,
349 				struct ftrace_ops *main_ops,
350 				struct ftrace_ops *ops)
351 {
352 	int first = *list == &ftrace_list_end;
353 	add_ftrace_ops(list, ops);
354 	if (first)
355 		add_ftrace_ops(&ftrace_ops_list, main_ops);
356 }
357 
358 static int remove_ftrace_list_ops(struct ftrace_ops **list,
359 				  struct ftrace_ops *main_ops,
360 				  struct ftrace_ops *ops)
361 {
362 	int ret = remove_ftrace_ops(list, ops);
363 	if (!ret && *list == &ftrace_list_end)
364 		ret = remove_ftrace_ops(&ftrace_ops_list, main_ops);
365 	return ret;
366 }
367 
368 static int __register_ftrace_function(struct ftrace_ops *ops)
369 {
370 	if (unlikely(ftrace_disabled))
371 		return -ENODEV;
372 
373 	if (FTRACE_WARN_ON(ops == &global_ops))
374 		return -EINVAL;
375 
376 	if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
377 		return -EBUSY;
378 
379 	/* We don't support both control and global flags set. */
380 	if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK)
381 		return -EINVAL;
382 
383 #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
384 	/*
385 	 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
386 	 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
387 	 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
388 	 */
389 	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
390 	    !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
391 		return -EINVAL;
392 
393 	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
394 		ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
395 #endif
396 
397 	if (!core_kernel_data((unsigned long)ops))
398 		ops->flags |= FTRACE_OPS_FL_DYNAMIC;
399 
400 	if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
401 		add_ftrace_list_ops(&ftrace_global_list, &global_ops, ops);
402 		ops->flags |= FTRACE_OPS_FL_ENABLED;
403 	} else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
404 		if (control_ops_alloc(ops))
405 			return -ENOMEM;
406 		add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
407 	} else
408 		add_ftrace_ops(&ftrace_ops_list, ops);
409 
410 	if (ftrace_enabled)
411 		update_ftrace_function();
412 
413 	return 0;
414 }
415 
416 static void ftrace_sync(struct work_struct *work)
417 {
418 	/*
419 	 * This function is just a stub to implement a hard force
420 	 * of synchronize_sched(). This requires synchronizing
421 	 * tasks even in userspace and idle.
422 	 *
423 	 * Yes, function tracing is rude.
424 	 */
425 }
426 
427 static int __unregister_ftrace_function(struct ftrace_ops *ops)
428 {
429 	int ret;
430 
431 	if (ftrace_disabled)
432 		return -ENODEV;
433 
434 	if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
435 		return -EBUSY;
436 
437 	if (FTRACE_WARN_ON(ops == &global_ops))
438 		return -EINVAL;
439 
440 	if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
441 		ret = remove_ftrace_list_ops(&ftrace_global_list,
442 					     &global_ops, ops);
443 		if (!ret)
444 			ops->flags &= ~FTRACE_OPS_FL_ENABLED;
445 	} else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
446 		ret = remove_ftrace_list_ops(&ftrace_control_list,
447 					     &control_ops, ops);
448 		if (!ret) {
449 			/*
450 			 * The ftrace_ops is now removed from the list,
451 			 * so there'll be no new users. We must ensure
452 			 * all current users are done before we free
453 			 * the control data.
454 			 * Note synchronize_sched() is not enough, as we
455 			 * use preempt_disable() to do RCU, but the function
456 			 * tracer can be called where RCU is not active
457 			 * (before user_exit()).
458 			 */
459 			schedule_on_each_cpu(ftrace_sync);
460 			control_ops_free(ops);
461 		}
462 	} else
463 		ret = remove_ftrace_ops(&ftrace_ops_list, ops);
464 
465 	if (ret < 0)
466 		return ret;
467 
468 	if (ftrace_enabled)
469 		update_ftrace_function();
470 
471 	/*
472 	 * Dynamic ops may be freed, we must make sure that all
473 	 * callers are done before leaving this function.
474 	 *
475 	 * Again, normal synchronize_sched() is not good enough.
476 	 * We need to do a hard force of sched synchronization.
477 	 */
478 	if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
479 		schedule_on_each_cpu(ftrace_sync);
480 
481 
482 	return 0;
483 }
484 
485 static void ftrace_update_pid_func(void)
486 {
487 	/* Only do something if we are tracing something */
488 	if (ftrace_trace_function == ftrace_stub)
489 		return;
490 
491 	update_ftrace_function();
492 }
493 
494 #ifdef CONFIG_FUNCTION_PROFILER
495 struct ftrace_profile {
496 	struct hlist_node		node;
497 	unsigned long			ip;
498 	unsigned long			counter;
499 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
500 	unsigned long long		time;
501 	unsigned long long		time_squared;
502 #endif
503 };
504 
505 struct ftrace_profile_page {
506 	struct ftrace_profile_page	*next;
507 	unsigned long			index;
508 	struct ftrace_profile		records[];
509 };
510 
511 struct ftrace_profile_stat {
512 	atomic_t			disabled;
513 	struct hlist_head		*hash;
514 	struct ftrace_profile_page	*pages;
515 	struct ftrace_profile_page	*start;
516 	struct tracer_stat		stat;
517 };
518 
519 #define PROFILE_RECORDS_SIZE						\
520 	(PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
521 
522 #define PROFILES_PER_PAGE					\
523 	(PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
524 
525 static int ftrace_profile_enabled __read_mostly;
526 
527 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
528 static DEFINE_MUTEX(ftrace_profile_lock);
529 
530 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
531 
532 #define FTRACE_PROFILE_HASH_BITS 10
533 #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
534 
535 static void *
536 function_stat_next(void *v, int idx)
537 {
538 	struct ftrace_profile *rec = v;
539 	struct ftrace_profile_page *pg;
540 
541 	pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
542 
543  again:
544 	if (idx != 0)
545 		rec++;
546 
547 	if ((void *)rec >= (void *)&pg->records[pg->index]) {
548 		pg = pg->next;
549 		if (!pg)
550 			return NULL;
551 		rec = &pg->records[0];
552 		if (!rec->counter)
553 			goto again;
554 	}
555 
556 	return rec;
557 }
558 
559 static void *function_stat_start(struct tracer_stat *trace)
560 {
561 	struct ftrace_profile_stat *stat =
562 		container_of(trace, struct ftrace_profile_stat, stat);
563 
564 	if (!stat || !stat->start)
565 		return NULL;
566 
567 	return function_stat_next(&stat->start->records[0], 0);
568 }
569 
570 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
571 /* function graph compares on total time */
572 static int function_stat_cmp(void *p1, void *p2)
573 {
574 	struct ftrace_profile *a = p1;
575 	struct ftrace_profile *b = p2;
576 
577 	if (a->time < b->time)
578 		return -1;
579 	if (a->time > b->time)
580 		return 1;
581 	else
582 		return 0;
583 }
584 #else
585 /* not function graph compares against hits */
586 static int function_stat_cmp(void *p1, void *p2)
587 {
588 	struct ftrace_profile *a = p1;
589 	struct ftrace_profile *b = p2;
590 
591 	if (a->counter < b->counter)
592 		return -1;
593 	if (a->counter > b->counter)
594 		return 1;
595 	else
596 		return 0;
597 }
598 #endif
599 
600 static int function_stat_headers(struct seq_file *m)
601 {
602 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
603 	seq_printf(m, "  Function                               "
604 		   "Hit    Time            Avg             s^2\n"
605 		      "  --------                               "
606 		   "---    ----            ---             ---\n");
607 #else
608 	seq_printf(m, "  Function                               Hit\n"
609 		      "  --------                               ---\n");
610 #endif
611 	return 0;
612 }
613 
614 static int function_stat_show(struct seq_file *m, void *v)
615 {
616 	struct ftrace_profile *rec = v;
617 	char str[KSYM_SYMBOL_LEN];
618 	int ret = 0;
619 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
620 	static struct trace_seq s;
621 	unsigned long long avg;
622 	unsigned long long stddev;
623 #endif
624 	mutex_lock(&ftrace_profile_lock);
625 
626 	/* we raced with function_profile_reset() */
627 	if (unlikely(rec->counter == 0)) {
628 		ret = -EBUSY;
629 		goto out;
630 	}
631 
632 	kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
633 	seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
634 
635 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
636 	seq_printf(m, "    ");
637 	avg = rec->time;
638 	do_div(avg, rec->counter);
639 
640 	/* Sample standard deviation (s^2) */
641 	if (rec->counter <= 1)
642 		stddev = 0;
643 	else {
644 		/*
645 		 * Apply Welford's method:
646 		 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
647 		 */
648 		stddev = rec->counter * rec->time_squared -
649 			 rec->time * rec->time;
650 
651 		/*
652 		 * Divide only 1000 for ns^2 -> us^2 conversion.
653 		 * trace_print_graph_duration will divide 1000 again.
654 		 */
655 		do_div(stddev, rec->counter * (rec->counter - 1) * 1000);
656 	}
657 
658 	trace_seq_init(&s);
659 	trace_print_graph_duration(rec->time, &s);
660 	trace_seq_puts(&s, "    ");
661 	trace_print_graph_duration(avg, &s);
662 	trace_seq_puts(&s, "    ");
663 	trace_print_graph_duration(stddev, &s);
664 	trace_print_seq(m, &s);
665 #endif
666 	seq_putc(m, '\n');
667 out:
668 	mutex_unlock(&ftrace_profile_lock);
669 
670 	return ret;
671 }
672 
673 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
674 {
675 	struct ftrace_profile_page *pg;
676 
677 	pg = stat->pages = stat->start;
678 
679 	while (pg) {
680 		memset(pg->records, 0, PROFILE_RECORDS_SIZE);
681 		pg->index = 0;
682 		pg = pg->next;
683 	}
684 
685 	memset(stat->hash, 0,
686 	       FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
687 }
688 
689 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
690 {
691 	struct ftrace_profile_page *pg;
692 	int functions;
693 	int pages;
694 	int i;
695 
696 	/* If we already allocated, do nothing */
697 	if (stat->pages)
698 		return 0;
699 
700 	stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
701 	if (!stat->pages)
702 		return -ENOMEM;
703 
704 #ifdef CONFIG_DYNAMIC_FTRACE
705 	functions = ftrace_update_tot_cnt;
706 #else
707 	/*
708 	 * We do not know the number of functions that exist because
709 	 * dynamic tracing is what counts them. With past experience
710 	 * we have around 20K functions. That should be more than enough.
711 	 * It is highly unlikely we will execute every function in
712 	 * the kernel.
713 	 */
714 	functions = 20000;
715 #endif
716 
717 	pg = stat->start = stat->pages;
718 
719 	pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
720 
721 	for (i = 1; i < pages; i++) {
722 		pg->next = (void *)get_zeroed_page(GFP_KERNEL);
723 		if (!pg->next)
724 			goto out_free;
725 		pg = pg->next;
726 	}
727 
728 	return 0;
729 
730  out_free:
731 	pg = stat->start;
732 	while (pg) {
733 		unsigned long tmp = (unsigned long)pg;
734 
735 		pg = pg->next;
736 		free_page(tmp);
737 	}
738 
739 	stat->pages = NULL;
740 	stat->start = NULL;
741 
742 	return -ENOMEM;
743 }
744 
745 static int ftrace_profile_init_cpu(int cpu)
746 {
747 	struct ftrace_profile_stat *stat;
748 	int size;
749 
750 	stat = &per_cpu(ftrace_profile_stats, cpu);
751 
752 	if (stat->hash) {
753 		/* If the profile is already created, simply reset it */
754 		ftrace_profile_reset(stat);
755 		return 0;
756 	}
757 
758 	/*
759 	 * We are profiling all functions, but usually only a few thousand
760 	 * functions are hit. We'll make a hash of 1024 items.
761 	 */
762 	size = FTRACE_PROFILE_HASH_SIZE;
763 
764 	stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
765 
766 	if (!stat->hash)
767 		return -ENOMEM;
768 
769 	/* Preallocate the function profiling pages */
770 	if (ftrace_profile_pages_init(stat) < 0) {
771 		kfree(stat->hash);
772 		stat->hash = NULL;
773 		return -ENOMEM;
774 	}
775 
776 	return 0;
777 }
778 
779 static int ftrace_profile_init(void)
780 {
781 	int cpu;
782 	int ret = 0;
783 
784 	for_each_online_cpu(cpu) {
785 		ret = ftrace_profile_init_cpu(cpu);
786 		if (ret)
787 			break;
788 	}
789 
790 	return ret;
791 }
792 
793 /* interrupts must be disabled */
794 static struct ftrace_profile *
795 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
796 {
797 	struct ftrace_profile *rec;
798 	struct hlist_head *hhd;
799 	unsigned long key;
800 
801 	key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
802 	hhd = &stat->hash[key];
803 
804 	if (hlist_empty(hhd))
805 		return NULL;
806 
807 	hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
808 		if (rec->ip == ip)
809 			return rec;
810 	}
811 
812 	return NULL;
813 }
814 
815 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
816 			       struct ftrace_profile *rec)
817 {
818 	unsigned long key;
819 
820 	key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
821 	hlist_add_head_rcu(&rec->node, &stat->hash[key]);
822 }
823 
824 /*
825  * The memory is already allocated, this simply finds a new record to use.
826  */
827 static struct ftrace_profile *
828 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
829 {
830 	struct ftrace_profile *rec = NULL;
831 
832 	/* prevent recursion (from NMIs) */
833 	if (atomic_inc_return(&stat->disabled) != 1)
834 		goto out;
835 
836 	/*
837 	 * Try to find the function again since an NMI
838 	 * could have added it
839 	 */
840 	rec = ftrace_find_profiled_func(stat, ip);
841 	if (rec)
842 		goto out;
843 
844 	if (stat->pages->index == PROFILES_PER_PAGE) {
845 		if (!stat->pages->next)
846 			goto out;
847 		stat->pages = stat->pages->next;
848 	}
849 
850 	rec = &stat->pages->records[stat->pages->index++];
851 	rec->ip = ip;
852 	ftrace_add_profile(stat, rec);
853 
854  out:
855 	atomic_dec(&stat->disabled);
856 
857 	return rec;
858 }
859 
860 static void
861 function_profile_call(unsigned long ip, unsigned long parent_ip,
862 		      struct ftrace_ops *ops, struct pt_regs *regs)
863 {
864 	struct ftrace_profile_stat *stat;
865 	struct ftrace_profile *rec;
866 	unsigned long flags;
867 
868 	if (!ftrace_profile_enabled)
869 		return;
870 
871 	local_irq_save(flags);
872 
873 	stat = &__get_cpu_var(ftrace_profile_stats);
874 	if (!stat->hash || !ftrace_profile_enabled)
875 		goto out;
876 
877 	rec = ftrace_find_profiled_func(stat, ip);
878 	if (!rec) {
879 		rec = ftrace_profile_alloc(stat, ip);
880 		if (!rec)
881 			goto out;
882 	}
883 
884 	rec->counter++;
885  out:
886 	local_irq_restore(flags);
887 }
888 
889 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
890 static int profile_graph_entry(struct ftrace_graph_ent *trace)
891 {
892 	function_profile_call(trace->func, 0, NULL, NULL);
893 	return 1;
894 }
895 
896 static void profile_graph_return(struct ftrace_graph_ret *trace)
897 {
898 	struct ftrace_profile_stat *stat;
899 	unsigned long long calltime;
900 	struct ftrace_profile *rec;
901 	unsigned long flags;
902 
903 	local_irq_save(flags);
904 	stat = &__get_cpu_var(ftrace_profile_stats);
905 	if (!stat->hash || !ftrace_profile_enabled)
906 		goto out;
907 
908 	/* If the calltime was zero'd ignore it */
909 	if (!trace->calltime)
910 		goto out;
911 
912 	calltime = trace->rettime - trace->calltime;
913 
914 	if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
915 		int index;
916 
917 		index = trace->depth;
918 
919 		/* Append this call time to the parent time to subtract */
920 		if (index)
921 			current->ret_stack[index - 1].subtime += calltime;
922 
923 		if (current->ret_stack[index].subtime < calltime)
924 			calltime -= current->ret_stack[index].subtime;
925 		else
926 			calltime = 0;
927 	}
928 
929 	rec = ftrace_find_profiled_func(stat, trace->func);
930 	if (rec) {
931 		rec->time += calltime;
932 		rec->time_squared += calltime * calltime;
933 	}
934 
935  out:
936 	local_irq_restore(flags);
937 }
938 
939 static int register_ftrace_profiler(void)
940 {
941 	return register_ftrace_graph(&profile_graph_return,
942 				     &profile_graph_entry);
943 }
944 
945 static void unregister_ftrace_profiler(void)
946 {
947 	unregister_ftrace_graph();
948 }
949 #else
950 static struct ftrace_ops ftrace_profile_ops __read_mostly = {
951 	.func		= function_profile_call,
952 	.flags		= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
953 	INIT_REGEX_LOCK(ftrace_profile_ops)
954 };
955 
956 static int register_ftrace_profiler(void)
957 {
958 	return register_ftrace_function(&ftrace_profile_ops);
959 }
960 
961 static void unregister_ftrace_profiler(void)
962 {
963 	unregister_ftrace_function(&ftrace_profile_ops);
964 }
965 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
966 
967 static ssize_t
968 ftrace_profile_write(struct file *filp, const char __user *ubuf,
969 		     size_t cnt, loff_t *ppos)
970 {
971 	unsigned long val;
972 	int ret;
973 
974 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
975 	if (ret)
976 		return ret;
977 
978 	val = !!val;
979 
980 	mutex_lock(&ftrace_profile_lock);
981 	if (ftrace_profile_enabled ^ val) {
982 		if (val) {
983 			ret = ftrace_profile_init();
984 			if (ret < 0) {
985 				cnt = ret;
986 				goto out;
987 			}
988 
989 			ret = register_ftrace_profiler();
990 			if (ret < 0) {
991 				cnt = ret;
992 				goto out;
993 			}
994 			ftrace_profile_enabled = 1;
995 		} else {
996 			ftrace_profile_enabled = 0;
997 			/*
998 			 * unregister_ftrace_profiler calls stop_machine
999 			 * so this acts like an synchronize_sched.
1000 			 */
1001 			unregister_ftrace_profiler();
1002 		}
1003 	}
1004  out:
1005 	mutex_unlock(&ftrace_profile_lock);
1006 
1007 	*ppos += cnt;
1008 
1009 	return cnt;
1010 }
1011 
1012 static ssize_t
1013 ftrace_profile_read(struct file *filp, char __user *ubuf,
1014 		     size_t cnt, loff_t *ppos)
1015 {
1016 	char buf[64];		/* big enough to hold a number */
1017 	int r;
1018 
1019 	r = sprintf(buf, "%u\n", ftrace_profile_enabled);
1020 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1021 }
1022 
1023 static const struct file_operations ftrace_profile_fops = {
1024 	.open		= tracing_open_generic,
1025 	.read		= ftrace_profile_read,
1026 	.write		= ftrace_profile_write,
1027 	.llseek		= default_llseek,
1028 };
1029 
1030 /* used to initialize the real stat files */
1031 static struct tracer_stat function_stats __initdata = {
1032 	.name		= "functions",
1033 	.stat_start	= function_stat_start,
1034 	.stat_next	= function_stat_next,
1035 	.stat_cmp	= function_stat_cmp,
1036 	.stat_headers	= function_stat_headers,
1037 	.stat_show	= function_stat_show
1038 };
1039 
1040 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
1041 {
1042 	struct ftrace_profile_stat *stat;
1043 	struct dentry *entry;
1044 	char *name;
1045 	int ret;
1046 	int cpu;
1047 
1048 	for_each_possible_cpu(cpu) {
1049 		stat = &per_cpu(ftrace_profile_stats, cpu);
1050 
1051 		/* allocate enough for function name + cpu number */
1052 		name = kmalloc(32, GFP_KERNEL);
1053 		if (!name) {
1054 			/*
1055 			 * The files created are permanent, if something happens
1056 			 * we still do not free memory.
1057 			 */
1058 			WARN(1,
1059 			     "Could not allocate stat file for cpu %d\n",
1060 			     cpu);
1061 			return;
1062 		}
1063 		stat->stat = function_stats;
1064 		snprintf(name, 32, "function%d", cpu);
1065 		stat->stat.name = name;
1066 		ret = register_stat_tracer(&stat->stat);
1067 		if (ret) {
1068 			WARN(1,
1069 			     "Could not register function stat for cpu %d\n",
1070 			     cpu);
1071 			kfree(name);
1072 			return;
1073 		}
1074 	}
1075 
1076 	entry = debugfs_create_file("function_profile_enabled", 0644,
1077 				    d_tracer, NULL, &ftrace_profile_fops);
1078 	if (!entry)
1079 		pr_warning("Could not create debugfs "
1080 			   "'function_profile_enabled' entry\n");
1081 }
1082 
1083 #else /* CONFIG_FUNCTION_PROFILER */
1084 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
1085 {
1086 }
1087 #endif /* CONFIG_FUNCTION_PROFILER */
1088 
1089 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
1090 
1091 loff_t
1092 ftrace_filter_lseek(struct file *file, loff_t offset, int whence)
1093 {
1094 	loff_t ret;
1095 
1096 	if (file->f_mode & FMODE_READ)
1097 		ret = seq_lseek(file, offset, whence);
1098 	else
1099 		file->f_pos = ret = 1;
1100 
1101 	return ret;
1102 }
1103 
1104 #ifdef CONFIG_DYNAMIC_FTRACE
1105 
1106 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
1107 # error Dynamic ftrace depends on MCOUNT_RECORD
1108 #endif
1109 
1110 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
1111 
1112 struct ftrace_func_probe {
1113 	struct hlist_node	node;
1114 	struct ftrace_probe_ops	*ops;
1115 	unsigned long		flags;
1116 	unsigned long		ip;
1117 	void			*data;
1118 	struct list_head	free_list;
1119 };
1120 
1121 struct ftrace_func_entry {
1122 	struct hlist_node hlist;
1123 	unsigned long ip;
1124 };
1125 
1126 struct ftrace_hash {
1127 	unsigned long		size_bits;
1128 	struct hlist_head	*buckets;
1129 	unsigned long		count;
1130 	struct rcu_head		rcu;
1131 };
1132 
1133 /*
1134  * We make these constant because no one should touch them,
1135  * but they are used as the default "empty hash", to avoid allocating
1136  * it all the time. These are in a read only section such that if
1137  * anyone does try to modify it, it will cause an exception.
1138  */
1139 static const struct hlist_head empty_buckets[1];
1140 static const struct ftrace_hash empty_hash = {
1141 	.buckets = (struct hlist_head *)empty_buckets,
1142 };
1143 #define EMPTY_HASH	((struct ftrace_hash *)&empty_hash)
1144 
1145 static struct ftrace_ops global_ops = {
1146 	.func			= ftrace_stub,
1147 	.notrace_hash		= EMPTY_HASH,
1148 	.filter_hash		= EMPTY_HASH,
1149 	.flags			= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
1150 	INIT_REGEX_LOCK(global_ops)
1151 };
1152 
1153 struct ftrace_page {
1154 	struct ftrace_page	*next;
1155 	struct dyn_ftrace	*records;
1156 	int			index;
1157 	int			size;
1158 };
1159 
1160 static struct ftrace_page *ftrace_new_pgs;
1161 
1162 #define ENTRY_SIZE sizeof(struct dyn_ftrace)
1163 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1164 
1165 /* estimate from running different kernels */
1166 #define NR_TO_INIT		10000
1167 
1168 static struct ftrace_page	*ftrace_pages_start;
1169 static struct ftrace_page	*ftrace_pages;
1170 
1171 static bool ftrace_hash_empty(struct ftrace_hash *hash)
1172 {
1173 	return !hash || !hash->count;
1174 }
1175 
1176 static struct ftrace_func_entry *
1177 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1178 {
1179 	unsigned long key;
1180 	struct ftrace_func_entry *entry;
1181 	struct hlist_head *hhd;
1182 
1183 	if (ftrace_hash_empty(hash))
1184 		return NULL;
1185 
1186 	if (hash->size_bits > 0)
1187 		key = hash_long(ip, hash->size_bits);
1188 	else
1189 		key = 0;
1190 
1191 	hhd = &hash->buckets[key];
1192 
1193 	hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
1194 		if (entry->ip == ip)
1195 			return entry;
1196 	}
1197 	return NULL;
1198 }
1199 
1200 static void __add_hash_entry(struct ftrace_hash *hash,
1201 			     struct ftrace_func_entry *entry)
1202 {
1203 	struct hlist_head *hhd;
1204 	unsigned long key;
1205 
1206 	if (hash->size_bits)
1207 		key = hash_long(entry->ip, hash->size_bits);
1208 	else
1209 		key = 0;
1210 
1211 	hhd = &hash->buckets[key];
1212 	hlist_add_head(&entry->hlist, hhd);
1213 	hash->count++;
1214 }
1215 
1216 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1217 {
1218 	struct ftrace_func_entry *entry;
1219 
1220 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1221 	if (!entry)
1222 		return -ENOMEM;
1223 
1224 	entry->ip = ip;
1225 	__add_hash_entry(hash, entry);
1226 
1227 	return 0;
1228 }
1229 
1230 static void
1231 free_hash_entry(struct ftrace_hash *hash,
1232 		  struct ftrace_func_entry *entry)
1233 {
1234 	hlist_del(&entry->hlist);
1235 	kfree(entry);
1236 	hash->count--;
1237 }
1238 
1239 static void
1240 remove_hash_entry(struct ftrace_hash *hash,
1241 		  struct ftrace_func_entry *entry)
1242 {
1243 	hlist_del(&entry->hlist);
1244 	hash->count--;
1245 }
1246 
1247 static void ftrace_hash_clear(struct ftrace_hash *hash)
1248 {
1249 	struct hlist_head *hhd;
1250 	struct hlist_node *tn;
1251 	struct ftrace_func_entry *entry;
1252 	int size = 1 << hash->size_bits;
1253 	int i;
1254 
1255 	if (!hash->count)
1256 		return;
1257 
1258 	for (i = 0; i < size; i++) {
1259 		hhd = &hash->buckets[i];
1260 		hlist_for_each_entry_safe(entry, tn, hhd, hlist)
1261 			free_hash_entry(hash, entry);
1262 	}
1263 	FTRACE_WARN_ON(hash->count);
1264 }
1265 
1266 static void free_ftrace_hash(struct ftrace_hash *hash)
1267 {
1268 	if (!hash || hash == EMPTY_HASH)
1269 		return;
1270 	ftrace_hash_clear(hash);
1271 	kfree(hash->buckets);
1272 	kfree(hash);
1273 }
1274 
1275 static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1276 {
1277 	struct ftrace_hash *hash;
1278 
1279 	hash = container_of(rcu, struct ftrace_hash, rcu);
1280 	free_ftrace_hash(hash);
1281 }
1282 
1283 static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1284 {
1285 	if (!hash || hash == EMPTY_HASH)
1286 		return;
1287 	call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1288 }
1289 
1290 void ftrace_free_filter(struct ftrace_ops *ops)
1291 {
1292 	ftrace_ops_init(ops);
1293 	free_ftrace_hash(ops->filter_hash);
1294 	free_ftrace_hash(ops->notrace_hash);
1295 }
1296 
1297 static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1298 {
1299 	struct ftrace_hash *hash;
1300 	int size;
1301 
1302 	hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1303 	if (!hash)
1304 		return NULL;
1305 
1306 	size = 1 << size_bits;
1307 	hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1308 
1309 	if (!hash->buckets) {
1310 		kfree(hash);
1311 		return NULL;
1312 	}
1313 
1314 	hash->size_bits = size_bits;
1315 
1316 	return hash;
1317 }
1318 
1319 static struct ftrace_hash *
1320 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1321 {
1322 	struct ftrace_func_entry *entry;
1323 	struct ftrace_hash *new_hash;
1324 	int size;
1325 	int ret;
1326 	int i;
1327 
1328 	new_hash = alloc_ftrace_hash(size_bits);
1329 	if (!new_hash)
1330 		return NULL;
1331 
1332 	/* Empty hash? */
1333 	if (ftrace_hash_empty(hash))
1334 		return new_hash;
1335 
1336 	size = 1 << hash->size_bits;
1337 	for (i = 0; i < size; i++) {
1338 		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
1339 			ret = add_hash_entry(new_hash, entry->ip);
1340 			if (ret < 0)
1341 				goto free_hash;
1342 		}
1343 	}
1344 
1345 	FTRACE_WARN_ON(new_hash->count != hash->count);
1346 
1347 	return new_hash;
1348 
1349  free_hash:
1350 	free_ftrace_hash(new_hash);
1351 	return NULL;
1352 }
1353 
1354 static void
1355 ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
1356 static void
1357 ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
1358 
1359 static int
1360 ftrace_hash_move(struct ftrace_ops *ops, int enable,
1361 		 struct ftrace_hash **dst, struct ftrace_hash *src)
1362 {
1363 	struct ftrace_func_entry *entry;
1364 	struct hlist_node *tn;
1365 	struct hlist_head *hhd;
1366 	struct ftrace_hash *old_hash;
1367 	struct ftrace_hash *new_hash;
1368 	int size = src->count;
1369 	int bits = 0;
1370 	int ret;
1371 	int i;
1372 
1373 	/*
1374 	 * Remove the current set, update the hash and add
1375 	 * them back.
1376 	 */
1377 	ftrace_hash_rec_disable(ops, enable);
1378 
1379 	/*
1380 	 * If the new source is empty, just free dst and assign it
1381 	 * the empty_hash.
1382 	 */
1383 	if (!src->count) {
1384 		free_ftrace_hash_rcu(*dst);
1385 		rcu_assign_pointer(*dst, EMPTY_HASH);
1386 		/* still need to update the function records */
1387 		ret = 0;
1388 		goto out;
1389 	}
1390 
1391 	/*
1392 	 * Make the hash size about 1/2 the # found
1393 	 */
1394 	for (size /= 2; size; size >>= 1)
1395 		bits++;
1396 
1397 	/* Don't allocate too much */
1398 	if (bits > FTRACE_HASH_MAX_BITS)
1399 		bits = FTRACE_HASH_MAX_BITS;
1400 
1401 	ret = -ENOMEM;
1402 	new_hash = alloc_ftrace_hash(bits);
1403 	if (!new_hash)
1404 		goto out;
1405 
1406 	size = 1 << src->size_bits;
1407 	for (i = 0; i < size; i++) {
1408 		hhd = &src->buckets[i];
1409 		hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
1410 			remove_hash_entry(src, entry);
1411 			__add_hash_entry(new_hash, entry);
1412 		}
1413 	}
1414 
1415 	old_hash = *dst;
1416 	rcu_assign_pointer(*dst, new_hash);
1417 	free_ftrace_hash_rcu(old_hash);
1418 
1419 	ret = 0;
1420  out:
1421 	/*
1422 	 * Enable regardless of ret:
1423 	 *  On success, we enable the new hash.
1424 	 *  On failure, we re-enable the original hash.
1425 	 */
1426 	ftrace_hash_rec_enable(ops, enable);
1427 
1428 	return ret;
1429 }
1430 
1431 /*
1432  * Test the hashes for this ops to see if we want to call
1433  * the ops->func or not.
1434  *
1435  * It's a match if the ip is in the ops->filter_hash or
1436  * the filter_hash does not exist or is empty,
1437  *  AND
1438  * the ip is not in the ops->notrace_hash.
1439  *
1440  * This needs to be called with preemption disabled as
1441  * the hashes are freed with call_rcu_sched().
1442  */
1443 static int
1444 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1445 {
1446 	struct ftrace_hash *filter_hash;
1447 	struct ftrace_hash *notrace_hash;
1448 	int ret;
1449 
1450 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1451 	/*
1452 	 * There's a small race when adding ops that the ftrace handler
1453 	 * that wants regs, may be called without them. We can not
1454 	 * allow that handler to be called if regs is NULL.
1455 	 */
1456 	if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
1457 		return 0;
1458 #endif
1459 
1460 	filter_hash = rcu_dereference_raw_notrace(ops->filter_hash);
1461 	notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash);
1462 
1463 	if ((ftrace_hash_empty(filter_hash) ||
1464 	     ftrace_lookup_ip(filter_hash, ip)) &&
1465 	    (ftrace_hash_empty(notrace_hash) ||
1466 	     !ftrace_lookup_ip(notrace_hash, ip)))
1467 		ret = 1;
1468 	else
1469 		ret = 0;
1470 
1471 	return ret;
1472 }
1473 
1474 /*
1475  * This is a double for. Do not use 'break' to break out of the loop,
1476  * you must use a goto.
1477  */
1478 #define do_for_each_ftrace_rec(pg, rec)					\
1479 	for (pg = ftrace_pages_start; pg; pg = pg->next) {		\
1480 		int _____i;						\
1481 		for (_____i = 0; _____i < pg->index; _____i++) {	\
1482 			rec = &pg->records[_____i];
1483 
1484 #define while_for_each_ftrace_rec()		\
1485 		}				\
1486 	}
1487 
1488 
1489 static int ftrace_cmp_recs(const void *a, const void *b)
1490 {
1491 	const struct dyn_ftrace *key = a;
1492 	const struct dyn_ftrace *rec = b;
1493 
1494 	if (key->flags < rec->ip)
1495 		return -1;
1496 	if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1497 		return 1;
1498 	return 0;
1499 }
1500 
1501 static unsigned long ftrace_location_range(unsigned long start, unsigned long end)
1502 {
1503 	struct ftrace_page *pg;
1504 	struct dyn_ftrace *rec;
1505 	struct dyn_ftrace key;
1506 
1507 	key.ip = start;
1508 	key.flags = end;	/* overload flags, as it is unsigned long */
1509 
1510 	for (pg = ftrace_pages_start; pg; pg = pg->next) {
1511 		if (end < pg->records[0].ip ||
1512 		    start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1513 			continue;
1514 		rec = bsearch(&key, pg->records, pg->index,
1515 			      sizeof(struct dyn_ftrace),
1516 			      ftrace_cmp_recs);
1517 		if (rec)
1518 			return rec->ip;
1519 	}
1520 
1521 	return 0;
1522 }
1523 
1524 /**
1525  * ftrace_location - return true if the ip giving is a traced location
1526  * @ip: the instruction pointer to check
1527  *
1528  * Returns rec->ip if @ip given is a pointer to a ftrace location.
1529  * That is, the instruction that is either a NOP or call to
1530  * the function tracer. It checks the ftrace internal tables to
1531  * determine if the address belongs or not.
1532  */
1533 unsigned long ftrace_location(unsigned long ip)
1534 {
1535 	return ftrace_location_range(ip, ip);
1536 }
1537 
1538 /**
1539  * ftrace_text_reserved - return true if range contains an ftrace location
1540  * @start: start of range to search
1541  * @end: end of range to search (inclusive). @end points to the last byte to check.
1542  *
1543  * Returns 1 if @start and @end contains a ftrace location.
1544  * That is, the instruction that is either a NOP or call to
1545  * the function tracer. It checks the ftrace internal tables to
1546  * determine if the address belongs or not.
1547  */
1548 int ftrace_text_reserved(void *start, void *end)
1549 {
1550 	unsigned long ret;
1551 
1552 	ret = ftrace_location_range((unsigned long)start,
1553 				    (unsigned long)end);
1554 
1555 	return (int)!!ret;
1556 }
1557 
1558 static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1559 				     int filter_hash,
1560 				     bool inc)
1561 {
1562 	struct ftrace_hash *hash;
1563 	struct ftrace_hash *other_hash;
1564 	struct ftrace_page *pg;
1565 	struct dyn_ftrace *rec;
1566 	int count = 0;
1567 	int all = 0;
1568 
1569 	/* Only update if the ops has been registered */
1570 	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1571 		return;
1572 
1573 	/*
1574 	 * In the filter_hash case:
1575 	 *   If the count is zero, we update all records.
1576 	 *   Otherwise we just update the items in the hash.
1577 	 *
1578 	 * In the notrace_hash case:
1579 	 *   We enable the update in the hash.
1580 	 *   As disabling notrace means enabling the tracing,
1581 	 *   and enabling notrace means disabling, the inc variable
1582 	 *   gets inversed.
1583 	 */
1584 	if (filter_hash) {
1585 		hash = ops->filter_hash;
1586 		other_hash = ops->notrace_hash;
1587 		if (ftrace_hash_empty(hash))
1588 			all = 1;
1589 	} else {
1590 		inc = !inc;
1591 		hash = ops->notrace_hash;
1592 		other_hash = ops->filter_hash;
1593 		/*
1594 		 * If the notrace hash has no items,
1595 		 * then there's nothing to do.
1596 		 */
1597 		if (ftrace_hash_empty(hash))
1598 			return;
1599 	}
1600 
1601 	do_for_each_ftrace_rec(pg, rec) {
1602 		int in_other_hash = 0;
1603 		int in_hash = 0;
1604 		int match = 0;
1605 
1606 		if (all) {
1607 			/*
1608 			 * Only the filter_hash affects all records.
1609 			 * Update if the record is not in the notrace hash.
1610 			 */
1611 			if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1612 				match = 1;
1613 		} else {
1614 			in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1615 			in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1616 
1617 			/*
1618 			 *
1619 			 */
1620 			if (filter_hash && in_hash && !in_other_hash)
1621 				match = 1;
1622 			else if (!filter_hash && in_hash &&
1623 				 (in_other_hash || ftrace_hash_empty(other_hash)))
1624 				match = 1;
1625 		}
1626 		if (!match)
1627 			continue;
1628 
1629 		if (inc) {
1630 			rec->flags++;
1631 			if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX))
1632 				return;
1633 			/*
1634 			 * If any ops wants regs saved for this function
1635 			 * then all ops will get saved regs.
1636 			 */
1637 			if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1638 				rec->flags |= FTRACE_FL_REGS;
1639 		} else {
1640 			if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0))
1641 				return;
1642 			rec->flags--;
1643 		}
1644 		count++;
1645 		/* Shortcut, if we handled all records, we are done. */
1646 		if (!all && count == hash->count)
1647 			return;
1648 	} while_for_each_ftrace_rec();
1649 }
1650 
1651 static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1652 				    int filter_hash)
1653 {
1654 	__ftrace_hash_rec_update(ops, filter_hash, 0);
1655 }
1656 
1657 static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1658 				   int filter_hash)
1659 {
1660 	__ftrace_hash_rec_update(ops, filter_hash, 1);
1661 }
1662 
1663 static void print_ip_ins(const char *fmt, unsigned char *p)
1664 {
1665 	int i;
1666 
1667 	printk(KERN_CONT "%s", fmt);
1668 
1669 	for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1670 		printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1671 }
1672 
1673 /**
1674  * ftrace_bug - report and shutdown function tracer
1675  * @failed: The failed type (EFAULT, EINVAL, EPERM)
1676  * @ip: The address that failed
1677  *
1678  * The arch code that enables or disables the function tracing
1679  * can call ftrace_bug() when it has detected a problem in
1680  * modifying the code. @failed should be one of either:
1681  * EFAULT - if the problem happens on reading the @ip address
1682  * EINVAL - if what is read at @ip is not what was expected
1683  * EPERM - if the problem happens on writting to the @ip address
1684  */
1685 void ftrace_bug(int failed, unsigned long ip)
1686 {
1687 	switch (failed) {
1688 	case -EFAULT:
1689 		FTRACE_WARN_ON_ONCE(1);
1690 		pr_info("ftrace faulted on modifying ");
1691 		print_ip_sym(ip);
1692 		break;
1693 	case -EINVAL:
1694 		FTRACE_WARN_ON_ONCE(1);
1695 		pr_info("ftrace failed to modify ");
1696 		print_ip_sym(ip);
1697 		print_ip_ins(" actual: ", (unsigned char *)ip);
1698 		printk(KERN_CONT "\n");
1699 		break;
1700 	case -EPERM:
1701 		FTRACE_WARN_ON_ONCE(1);
1702 		pr_info("ftrace faulted on writing ");
1703 		print_ip_sym(ip);
1704 		break;
1705 	default:
1706 		FTRACE_WARN_ON_ONCE(1);
1707 		pr_info("ftrace faulted on unknown error ");
1708 		print_ip_sym(ip);
1709 	}
1710 }
1711 
1712 static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
1713 {
1714 	unsigned long flag = 0UL;
1715 
1716 	/*
1717 	 * If we are updating calls:
1718 	 *
1719 	 *   If the record has a ref count, then we need to enable it
1720 	 *   because someone is using it.
1721 	 *
1722 	 *   Otherwise we make sure its disabled.
1723 	 *
1724 	 * If we are disabling calls, then disable all records that
1725 	 * are enabled.
1726 	 */
1727 	if (enable && (rec->flags & ~FTRACE_FL_MASK))
1728 		flag = FTRACE_FL_ENABLED;
1729 
1730 	/*
1731 	 * If enabling and the REGS flag does not match the REGS_EN, then
1732 	 * do not ignore this record. Set flags to fail the compare against
1733 	 * ENABLED.
1734 	 */
1735 	if (flag &&
1736 	    (!(rec->flags & FTRACE_FL_REGS) != !(rec->flags & FTRACE_FL_REGS_EN)))
1737 		flag |= FTRACE_FL_REGS;
1738 
1739 	/* If the state of this record hasn't changed, then do nothing */
1740 	if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1741 		return FTRACE_UPDATE_IGNORE;
1742 
1743 	if (flag) {
1744 		/* Save off if rec is being enabled (for return value) */
1745 		flag ^= rec->flags & FTRACE_FL_ENABLED;
1746 
1747 		if (update) {
1748 			rec->flags |= FTRACE_FL_ENABLED;
1749 			if (flag & FTRACE_FL_REGS) {
1750 				if (rec->flags & FTRACE_FL_REGS)
1751 					rec->flags |= FTRACE_FL_REGS_EN;
1752 				else
1753 					rec->flags &= ~FTRACE_FL_REGS_EN;
1754 			}
1755 		}
1756 
1757 		/*
1758 		 * If this record is being updated from a nop, then
1759 		 *   return UPDATE_MAKE_CALL.
1760 		 * Otherwise, if the EN flag is set, then return
1761 		 *   UPDATE_MODIFY_CALL_REGS to tell the caller to convert
1762 		 *   from the non-save regs, to a save regs function.
1763 		 * Otherwise,
1764 		 *   return UPDATE_MODIFY_CALL to tell the caller to convert
1765 		 *   from the save regs, to a non-save regs function.
1766 		 */
1767 		if (flag & FTRACE_FL_ENABLED)
1768 			return FTRACE_UPDATE_MAKE_CALL;
1769 		else if (rec->flags & FTRACE_FL_REGS_EN)
1770 			return FTRACE_UPDATE_MODIFY_CALL_REGS;
1771 		else
1772 			return FTRACE_UPDATE_MODIFY_CALL;
1773 	}
1774 
1775 	if (update) {
1776 		/* If there's no more users, clear all flags */
1777 		if (!(rec->flags & ~FTRACE_FL_MASK))
1778 			rec->flags = 0;
1779 		else
1780 			/* Just disable the record (keep REGS state) */
1781 			rec->flags &= ~FTRACE_FL_ENABLED;
1782 	}
1783 
1784 	return FTRACE_UPDATE_MAKE_NOP;
1785 }
1786 
1787 /**
1788  * ftrace_update_record, set a record that now is tracing or not
1789  * @rec: the record to update
1790  * @enable: set to 1 if the record is tracing, zero to force disable
1791  *
1792  * The records that represent all functions that can be traced need
1793  * to be updated when tracing has been enabled.
1794  */
1795 int ftrace_update_record(struct dyn_ftrace *rec, int enable)
1796 {
1797 	return ftrace_check_record(rec, enable, 1);
1798 }
1799 
1800 /**
1801  * ftrace_test_record, check if the record has been enabled or not
1802  * @rec: the record to test
1803  * @enable: set to 1 to check if enabled, 0 if it is disabled
1804  *
1805  * The arch code may need to test if a record is already set to
1806  * tracing to determine how to modify the function code that it
1807  * represents.
1808  */
1809 int ftrace_test_record(struct dyn_ftrace *rec, int enable)
1810 {
1811 	return ftrace_check_record(rec, enable, 0);
1812 }
1813 
1814 static int
1815 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1816 {
1817 	unsigned long ftrace_old_addr;
1818 	unsigned long ftrace_addr;
1819 	int ret;
1820 
1821 	ret = ftrace_update_record(rec, enable);
1822 
1823 	if (rec->flags & FTRACE_FL_REGS)
1824 		ftrace_addr = (unsigned long)FTRACE_REGS_ADDR;
1825 	else
1826 		ftrace_addr = (unsigned long)FTRACE_ADDR;
1827 
1828 	switch (ret) {
1829 	case FTRACE_UPDATE_IGNORE:
1830 		return 0;
1831 
1832 	case FTRACE_UPDATE_MAKE_CALL:
1833 		return ftrace_make_call(rec, ftrace_addr);
1834 
1835 	case FTRACE_UPDATE_MAKE_NOP:
1836 		return ftrace_make_nop(NULL, rec, ftrace_addr);
1837 
1838 	case FTRACE_UPDATE_MODIFY_CALL_REGS:
1839 	case FTRACE_UPDATE_MODIFY_CALL:
1840 		if (rec->flags & FTRACE_FL_REGS)
1841 			ftrace_old_addr = (unsigned long)FTRACE_ADDR;
1842 		else
1843 			ftrace_old_addr = (unsigned long)FTRACE_REGS_ADDR;
1844 
1845 		return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
1846 	}
1847 
1848 	return -1; /* unknow ftrace bug */
1849 }
1850 
1851 void __weak ftrace_replace_code(int enable)
1852 {
1853 	struct dyn_ftrace *rec;
1854 	struct ftrace_page *pg;
1855 	int failed;
1856 
1857 	if (unlikely(ftrace_disabled))
1858 		return;
1859 
1860 	do_for_each_ftrace_rec(pg, rec) {
1861 		failed = __ftrace_replace_code(rec, enable);
1862 		if (failed) {
1863 			ftrace_bug(failed, rec->ip);
1864 			/* Stop processing */
1865 			return;
1866 		}
1867 	} while_for_each_ftrace_rec();
1868 }
1869 
1870 struct ftrace_rec_iter {
1871 	struct ftrace_page	*pg;
1872 	int			index;
1873 };
1874 
1875 /**
1876  * ftrace_rec_iter_start, start up iterating over traced functions
1877  *
1878  * Returns an iterator handle that is used to iterate over all
1879  * the records that represent address locations where functions
1880  * are traced.
1881  *
1882  * May return NULL if no records are available.
1883  */
1884 struct ftrace_rec_iter *ftrace_rec_iter_start(void)
1885 {
1886 	/*
1887 	 * We only use a single iterator.
1888 	 * Protected by the ftrace_lock mutex.
1889 	 */
1890 	static struct ftrace_rec_iter ftrace_rec_iter;
1891 	struct ftrace_rec_iter *iter = &ftrace_rec_iter;
1892 
1893 	iter->pg = ftrace_pages_start;
1894 	iter->index = 0;
1895 
1896 	/* Could have empty pages */
1897 	while (iter->pg && !iter->pg->index)
1898 		iter->pg = iter->pg->next;
1899 
1900 	if (!iter->pg)
1901 		return NULL;
1902 
1903 	return iter;
1904 }
1905 
1906 /**
1907  * ftrace_rec_iter_next, get the next record to process.
1908  * @iter: The handle to the iterator.
1909  *
1910  * Returns the next iterator after the given iterator @iter.
1911  */
1912 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
1913 {
1914 	iter->index++;
1915 
1916 	if (iter->index >= iter->pg->index) {
1917 		iter->pg = iter->pg->next;
1918 		iter->index = 0;
1919 
1920 		/* Could have empty pages */
1921 		while (iter->pg && !iter->pg->index)
1922 			iter->pg = iter->pg->next;
1923 	}
1924 
1925 	if (!iter->pg)
1926 		return NULL;
1927 
1928 	return iter;
1929 }
1930 
1931 /**
1932  * ftrace_rec_iter_record, get the record at the iterator location
1933  * @iter: The current iterator location
1934  *
1935  * Returns the record that the current @iter is at.
1936  */
1937 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
1938 {
1939 	return &iter->pg->records[iter->index];
1940 }
1941 
1942 static int
1943 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
1944 {
1945 	unsigned long ip;
1946 	int ret;
1947 
1948 	ip = rec->ip;
1949 
1950 	if (unlikely(ftrace_disabled))
1951 		return 0;
1952 
1953 	ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
1954 	if (ret) {
1955 		ftrace_bug(ret, ip);
1956 		return 0;
1957 	}
1958 	return 1;
1959 }
1960 
1961 /*
1962  * archs can override this function if they must do something
1963  * before the modifying code is performed.
1964  */
1965 int __weak ftrace_arch_code_modify_prepare(void)
1966 {
1967 	return 0;
1968 }
1969 
1970 /*
1971  * archs can override this function if they must do something
1972  * after the modifying code is performed.
1973  */
1974 int __weak ftrace_arch_code_modify_post_process(void)
1975 {
1976 	return 0;
1977 }
1978 
1979 void ftrace_modify_all_code(int command)
1980 {
1981 	int update = command & FTRACE_UPDATE_TRACE_FUNC;
1982 
1983 	/*
1984 	 * If the ftrace_caller calls a ftrace_ops func directly,
1985 	 * we need to make sure that it only traces functions it
1986 	 * expects to trace. When doing the switch of functions,
1987 	 * we need to update to the ftrace_ops_list_func first
1988 	 * before the transition between old and new calls are set,
1989 	 * as the ftrace_ops_list_func will check the ops hashes
1990 	 * to make sure the ops are having the right functions
1991 	 * traced.
1992 	 */
1993 	if (update)
1994 		ftrace_update_ftrace_func(ftrace_ops_list_func);
1995 
1996 	if (command & FTRACE_UPDATE_CALLS)
1997 		ftrace_replace_code(1);
1998 	else if (command & FTRACE_DISABLE_CALLS)
1999 		ftrace_replace_code(0);
2000 
2001 	if (update && ftrace_trace_function != ftrace_ops_list_func)
2002 		ftrace_update_ftrace_func(ftrace_trace_function);
2003 
2004 	if (command & FTRACE_START_FUNC_RET)
2005 		ftrace_enable_ftrace_graph_caller();
2006 	else if (command & FTRACE_STOP_FUNC_RET)
2007 		ftrace_disable_ftrace_graph_caller();
2008 }
2009 
2010 static int __ftrace_modify_code(void *data)
2011 {
2012 	int *command = data;
2013 
2014 	ftrace_modify_all_code(*command);
2015 
2016 	return 0;
2017 }
2018 
2019 /**
2020  * ftrace_run_stop_machine, go back to the stop machine method
2021  * @command: The command to tell ftrace what to do
2022  *
2023  * If an arch needs to fall back to the stop machine method, the
2024  * it can call this function.
2025  */
2026 void ftrace_run_stop_machine(int command)
2027 {
2028 	stop_machine(__ftrace_modify_code, &command, NULL);
2029 }
2030 
2031 /**
2032  * arch_ftrace_update_code, modify the code to trace or not trace
2033  * @command: The command that needs to be done
2034  *
2035  * Archs can override this function if it does not need to
2036  * run stop_machine() to modify code.
2037  */
2038 void __weak arch_ftrace_update_code(int command)
2039 {
2040 	ftrace_run_stop_machine(command);
2041 }
2042 
2043 static void ftrace_run_update_code(int command)
2044 {
2045 	int ret;
2046 
2047 	ret = ftrace_arch_code_modify_prepare();
2048 	FTRACE_WARN_ON(ret);
2049 	if (ret)
2050 		return;
2051 	/*
2052 	 * Do not call function tracer while we update the code.
2053 	 * We are in stop machine.
2054 	 */
2055 	function_trace_stop++;
2056 
2057 	/*
2058 	 * By default we use stop_machine() to modify the code.
2059 	 * But archs can do what ever they want as long as it
2060 	 * is safe. The stop_machine() is the safest, but also
2061 	 * produces the most overhead.
2062 	 */
2063 	arch_ftrace_update_code(command);
2064 
2065 	function_trace_stop--;
2066 
2067 	ret = ftrace_arch_code_modify_post_process();
2068 	FTRACE_WARN_ON(ret);
2069 }
2070 
2071 static ftrace_func_t saved_ftrace_func;
2072 static int ftrace_start_up;
2073 static int global_start_up;
2074 
2075 static void ftrace_startup_enable(int command)
2076 {
2077 	if (saved_ftrace_func != ftrace_trace_function) {
2078 		saved_ftrace_func = ftrace_trace_function;
2079 		command |= FTRACE_UPDATE_TRACE_FUNC;
2080 	}
2081 
2082 	if (!command || !ftrace_enabled)
2083 		return;
2084 
2085 	ftrace_run_update_code(command);
2086 }
2087 
2088 static int ftrace_startup(struct ftrace_ops *ops, int command)
2089 {
2090 	bool hash_enable = true;
2091 
2092 	if (unlikely(ftrace_disabled))
2093 		return -ENODEV;
2094 
2095 	ftrace_start_up++;
2096 	command |= FTRACE_UPDATE_CALLS;
2097 
2098 	/* ops marked global share the filter hashes */
2099 	if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
2100 		ops = &global_ops;
2101 		/* Don't update hash if global is already set */
2102 		if (global_start_up)
2103 			hash_enable = false;
2104 		global_start_up++;
2105 	}
2106 
2107 	ops->flags |= FTRACE_OPS_FL_ENABLED;
2108 	if (hash_enable)
2109 		ftrace_hash_rec_enable(ops, 1);
2110 
2111 	ftrace_startup_enable(command);
2112 
2113 	return 0;
2114 }
2115 
2116 static void ftrace_shutdown(struct ftrace_ops *ops, int command)
2117 {
2118 	bool hash_disable = true;
2119 
2120 	if (unlikely(ftrace_disabled))
2121 		return;
2122 
2123 	ftrace_start_up--;
2124 	/*
2125 	 * Just warn in case of unbalance, no need to kill ftrace, it's not
2126 	 * critical but the ftrace_call callers may be never nopped again after
2127 	 * further ftrace uses.
2128 	 */
2129 	WARN_ON_ONCE(ftrace_start_up < 0);
2130 
2131 	if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
2132 		ops = &global_ops;
2133 		global_start_up--;
2134 		WARN_ON_ONCE(global_start_up < 0);
2135 		/* Don't update hash if global still has users */
2136 		if (global_start_up) {
2137 			WARN_ON_ONCE(!ftrace_start_up);
2138 			hash_disable = false;
2139 		}
2140 	}
2141 
2142 	if (hash_disable)
2143 		ftrace_hash_rec_disable(ops, 1);
2144 
2145 	if (ops != &global_ops || !global_start_up)
2146 		ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2147 
2148 	command |= FTRACE_UPDATE_CALLS;
2149 
2150 	if (saved_ftrace_func != ftrace_trace_function) {
2151 		saved_ftrace_func = ftrace_trace_function;
2152 		command |= FTRACE_UPDATE_TRACE_FUNC;
2153 	}
2154 
2155 	if (!command || !ftrace_enabled)
2156 		return;
2157 
2158 	ftrace_run_update_code(command);
2159 }
2160 
2161 static void ftrace_startup_sysctl(void)
2162 {
2163 	if (unlikely(ftrace_disabled))
2164 		return;
2165 
2166 	/* Force update next time */
2167 	saved_ftrace_func = NULL;
2168 	/* ftrace_start_up is true if we want ftrace running */
2169 	if (ftrace_start_up)
2170 		ftrace_run_update_code(FTRACE_UPDATE_CALLS);
2171 }
2172 
2173 static void ftrace_shutdown_sysctl(void)
2174 {
2175 	if (unlikely(ftrace_disabled))
2176 		return;
2177 
2178 	/* ftrace_start_up is true if ftrace is running */
2179 	if (ftrace_start_up)
2180 		ftrace_run_update_code(FTRACE_DISABLE_CALLS);
2181 }
2182 
2183 static cycle_t		ftrace_update_time;
2184 static unsigned long	ftrace_update_cnt;
2185 unsigned long		ftrace_update_tot_cnt;
2186 
2187 static inline int ops_traces_mod(struct ftrace_ops *ops)
2188 {
2189 	/*
2190 	 * Filter_hash being empty will default to trace module.
2191 	 * But notrace hash requires a test of individual module functions.
2192 	 */
2193 	return ftrace_hash_empty(ops->filter_hash) &&
2194 		ftrace_hash_empty(ops->notrace_hash);
2195 }
2196 
2197 /*
2198  * Check if the current ops references the record.
2199  *
2200  * If the ops traces all functions, then it was already accounted for.
2201  * If the ops does not trace the current record function, skip it.
2202  * If the ops ignores the function via notrace filter, skip it.
2203  */
2204 static inline bool
2205 ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
2206 {
2207 	/* If ops isn't enabled, ignore it */
2208 	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
2209 		return 0;
2210 
2211 	/* If ops traces all mods, we already accounted for it */
2212 	if (ops_traces_mod(ops))
2213 		return 0;
2214 
2215 	/* The function must be in the filter */
2216 	if (!ftrace_hash_empty(ops->filter_hash) &&
2217 	    !ftrace_lookup_ip(ops->filter_hash, rec->ip))
2218 		return 0;
2219 
2220 	/* If in notrace hash, we ignore it too */
2221 	if (ftrace_lookup_ip(ops->notrace_hash, rec->ip))
2222 		return 0;
2223 
2224 	return 1;
2225 }
2226 
2227 static int referenced_filters(struct dyn_ftrace *rec)
2228 {
2229 	struct ftrace_ops *ops;
2230 	int cnt = 0;
2231 
2232 	for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
2233 		if (ops_references_rec(ops, rec))
2234 		    cnt++;
2235 	}
2236 
2237 	return cnt;
2238 }
2239 
2240 static int ftrace_update_code(struct module *mod)
2241 {
2242 	struct ftrace_page *pg;
2243 	struct dyn_ftrace *p;
2244 	cycle_t start, stop;
2245 	unsigned long ref = 0;
2246 	bool test = false;
2247 	int i;
2248 
2249 	/*
2250 	 * When adding a module, we need to check if tracers are
2251 	 * currently enabled and if they are set to trace all functions.
2252 	 * If they are, we need to enable the module functions as well
2253 	 * as update the reference counts for those function records.
2254 	 */
2255 	if (mod) {
2256 		struct ftrace_ops *ops;
2257 
2258 		for (ops = ftrace_ops_list;
2259 		     ops != &ftrace_list_end; ops = ops->next) {
2260 			if (ops->flags & FTRACE_OPS_FL_ENABLED) {
2261 				if (ops_traces_mod(ops))
2262 					ref++;
2263 				else
2264 					test = true;
2265 			}
2266 		}
2267 	}
2268 
2269 	start = ftrace_now(raw_smp_processor_id());
2270 	ftrace_update_cnt = 0;
2271 
2272 	for (pg = ftrace_new_pgs; pg; pg = pg->next) {
2273 
2274 		for (i = 0; i < pg->index; i++) {
2275 			int cnt = ref;
2276 
2277 			/* If something went wrong, bail without enabling anything */
2278 			if (unlikely(ftrace_disabled))
2279 				return -1;
2280 
2281 			p = &pg->records[i];
2282 			if (test)
2283 				cnt += referenced_filters(p);
2284 			p->flags = cnt;
2285 
2286 			/*
2287 			 * Do the initial record conversion from mcount jump
2288 			 * to the NOP instructions.
2289 			 */
2290 			if (!ftrace_code_disable(mod, p))
2291 				break;
2292 
2293 			ftrace_update_cnt++;
2294 
2295 			/*
2296 			 * If the tracing is enabled, go ahead and enable the record.
2297 			 *
2298 			 * The reason not to enable the record immediatelly is the
2299 			 * inherent check of ftrace_make_nop/ftrace_make_call for
2300 			 * correct previous instructions.  Making first the NOP
2301 			 * conversion puts the module to the correct state, thus
2302 			 * passing the ftrace_make_call check.
2303 			 */
2304 			if (ftrace_start_up && cnt) {
2305 				int failed = __ftrace_replace_code(p, 1);
2306 				if (failed)
2307 					ftrace_bug(failed, p->ip);
2308 			}
2309 		}
2310 	}
2311 
2312 	ftrace_new_pgs = NULL;
2313 
2314 	stop = ftrace_now(raw_smp_processor_id());
2315 	ftrace_update_time = stop - start;
2316 	ftrace_update_tot_cnt += ftrace_update_cnt;
2317 
2318 	return 0;
2319 }
2320 
2321 static int ftrace_allocate_records(struct ftrace_page *pg, int count)
2322 {
2323 	int order;
2324 	int cnt;
2325 
2326 	if (WARN_ON(!count))
2327 		return -EINVAL;
2328 
2329 	order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
2330 
2331 	/*
2332 	 * We want to fill as much as possible. No more than a page
2333 	 * may be empty.
2334 	 */
2335 	while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
2336 		order--;
2337 
2338  again:
2339 	pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
2340 
2341 	if (!pg->records) {
2342 		/* if we can't allocate this size, try something smaller */
2343 		if (!order)
2344 			return -ENOMEM;
2345 		order >>= 1;
2346 		goto again;
2347 	}
2348 
2349 	cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
2350 	pg->size = cnt;
2351 
2352 	if (cnt > count)
2353 		cnt = count;
2354 
2355 	return cnt;
2356 }
2357 
2358 static struct ftrace_page *
2359 ftrace_allocate_pages(unsigned long num_to_init)
2360 {
2361 	struct ftrace_page *start_pg;
2362 	struct ftrace_page *pg;
2363 	int order;
2364 	int cnt;
2365 
2366 	if (!num_to_init)
2367 		return 0;
2368 
2369 	start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
2370 	if (!pg)
2371 		return NULL;
2372 
2373 	/*
2374 	 * Try to allocate as much as possible in one continues
2375 	 * location that fills in all of the space. We want to
2376 	 * waste as little space as possible.
2377 	 */
2378 	for (;;) {
2379 		cnt = ftrace_allocate_records(pg, num_to_init);
2380 		if (cnt < 0)
2381 			goto free_pages;
2382 
2383 		num_to_init -= cnt;
2384 		if (!num_to_init)
2385 			break;
2386 
2387 		pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
2388 		if (!pg->next)
2389 			goto free_pages;
2390 
2391 		pg = pg->next;
2392 	}
2393 
2394 	return start_pg;
2395 
2396  free_pages:
2397 	while (start_pg) {
2398 		order = get_count_order(pg->size / ENTRIES_PER_PAGE);
2399 		free_pages((unsigned long)pg->records, order);
2400 		start_pg = pg->next;
2401 		kfree(pg);
2402 		pg = start_pg;
2403 	}
2404 	pr_info("ftrace: FAILED to allocate memory for functions\n");
2405 	return NULL;
2406 }
2407 
2408 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
2409 {
2410 	int cnt;
2411 
2412 	if (!num_to_init) {
2413 		pr_info("ftrace: No functions to be traced?\n");
2414 		return -1;
2415 	}
2416 
2417 	cnt = num_to_init / ENTRIES_PER_PAGE;
2418 	pr_info("ftrace: allocating %ld entries in %d pages\n",
2419 		num_to_init, cnt + 1);
2420 
2421 	return 0;
2422 }
2423 
2424 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
2425 
2426 struct ftrace_iterator {
2427 	loff_t				pos;
2428 	loff_t				func_pos;
2429 	struct ftrace_page		*pg;
2430 	struct dyn_ftrace		*func;
2431 	struct ftrace_func_probe	*probe;
2432 	struct trace_parser		parser;
2433 	struct ftrace_hash		*hash;
2434 	struct ftrace_ops		*ops;
2435 	int				hidx;
2436 	int				idx;
2437 	unsigned			flags;
2438 };
2439 
2440 static void *
2441 t_hash_next(struct seq_file *m, loff_t *pos)
2442 {
2443 	struct ftrace_iterator *iter = m->private;
2444 	struct hlist_node *hnd = NULL;
2445 	struct hlist_head *hhd;
2446 
2447 	(*pos)++;
2448 	iter->pos = *pos;
2449 
2450 	if (iter->probe)
2451 		hnd = &iter->probe->node;
2452  retry:
2453 	if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
2454 		return NULL;
2455 
2456 	hhd = &ftrace_func_hash[iter->hidx];
2457 
2458 	if (hlist_empty(hhd)) {
2459 		iter->hidx++;
2460 		hnd = NULL;
2461 		goto retry;
2462 	}
2463 
2464 	if (!hnd)
2465 		hnd = hhd->first;
2466 	else {
2467 		hnd = hnd->next;
2468 		if (!hnd) {
2469 			iter->hidx++;
2470 			goto retry;
2471 		}
2472 	}
2473 
2474 	if (WARN_ON_ONCE(!hnd))
2475 		return NULL;
2476 
2477 	iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
2478 
2479 	return iter;
2480 }
2481 
2482 static void *t_hash_start(struct seq_file *m, loff_t *pos)
2483 {
2484 	struct ftrace_iterator *iter = m->private;
2485 	void *p = NULL;
2486 	loff_t l;
2487 
2488 	if (!(iter->flags & FTRACE_ITER_DO_HASH))
2489 		return NULL;
2490 
2491 	if (iter->func_pos > *pos)
2492 		return NULL;
2493 
2494 	iter->hidx = 0;
2495 	for (l = 0; l <= (*pos - iter->func_pos); ) {
2496 		p = t_hash_next(m, &l);
2497 		if (!p)
2498 			break;
2499 	}
2500 	if (!p)
2501 		return NULL;
2502 
2503 	/* Only set this if we have an item */
2504 	iter->flags |= FTRACE_ITER_HASH;
2505 
2506 	return iter;
2507 }
2508 
2509 static int
2510 t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
2511 {
2512 	struct ftrace_func_probe *rec;
2513 
2514 	rec = iter->probe;
2515 	if (WARN_ON_ONCE(!rec))
2516 		return -EIO;
2517 
2518 	if (rec->ops->print)
2519 		return rec->ops->print(m, rec->ip, rec->ops, rec->data);
2520 
2521 	seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
2522 
2523 	if (rec->data)
2524 		seq_printf(m, ":%p", rec->data);
2525 	seq_putc(m, '\n');
2526 
2527 	return 0;
2528 }
2529 
2530 static void *
2531 t_next(struct seq_file *m, void *v, loff_t *pos)
2532 {
2533 	struct ftrace_iterator *iter = m->private;
2534 	struct ftrace_ops *ops = iter->ops;
2535 	struct dyn_ftrace *rec = NULL;
2536 
2537 	if (unlikely(ftrace_disabled))
2538 		return NULL;
2539 
2540 	if (iter->flags & FTRACE_ITER_HASH)
2541 		return t_hash_next(m, pos);
2542 
2543 	(*pos)++;
2544 	iter->pos = iter->func_pos = *pos;
2545 
2546 	if (iter->flags & FTRACE_ITER_PRINTALL)
2547 		return t_hash_start(m, pos);
2548 
2549  retry:
2550 	if (iter->idx >= iter->pg->index) {
2551 		if (iter->pg->next) {
2552 			iter->pg = iter->pg->next;
2553 			iter->idx = 0;
2554 			goto retry;
2555 		}
2556 	} else {
2557 		rec = &iter->pg->records[iter->idx++];
2558 		if (((iter->flags & FTRACE_ITER_FILTER) &&
2559 		     !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) ||
2560 
2561 		    ((iter->flags & FTRACE_ITER_NOTRACE) &&
2562 		     !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
2563 
2564 		    ((iter->flags & FTRACE_ITER_ENABLED) &&
2565 		     !(rec->flags & FTRACE_FL_ENABLED))) {
2566 
2567 			rec = NULL;
2568 			goto retry;
2569 		}
2570 	}
2571 
2572 	if (!rec)
2573 		return t_hash_start(m, pos);
2574 
2575 	iter->func = rec;
2576 
2577 	return iter;
2578 }
2579 
2580 static void reset_iter_read(struct ftrace_iterator *iter)
2581 {
2582 	iter->pos = 0;
2583 	iter->func_pos = 0;
2584 	iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH);
2585 }
2586 
2587 static void *t_start(struct seq_file *m, loff_t *pos)
2588 {
2589 	struct ftrace_iterator *iter = m->private;
2590 	struct ftrace_ops *ops = iter->ops;
2591 	void *p = NULL;
2592 	loff_t l;
2593 
2594 	mutex_lock(&ftrace_lock);
2595 
2596 	if (unlikely(ftrace_disabled))
2597 		return NULL;
2598 
2599 	/*
2600 	 * If an lseek was done, then reset and start from beginning.
2601 	 */
2602 	if (*pos < iter->pos)
2603 		reset_iter_read(iter);
2604 
2605 	/*
2606 	 * For set_ftrace_filter reading, if we have the filter
2607 	 * off, we can short cut and just print out that all
2608 	 * functions are enabled.
2609 	 */
2610 	if (iter->flags & FTRACE_ITER_FILTER &&
2611 	    ftrace_hash_empty(ops->filter_hash)) {
2612 		if (*pos > 0)
2613 			return t_hash_start(m, pos);
2614 		iter->flags |= FTRACE_ITER_PRINTALL;
2615 		/* reset in case of seek/pread */
2616 		iter->flags &= ~FTRACE_ITER_HASH;
2617 		return iter;
2618 	}
2619 
2620 	if (iter->flags & FTRACE_ITER_HASH)
2621 		return t_hash_start(m, pos);
2622 
2623 	/*
2624 	 * Unfortunately, we need to restart at ftrace_pages_start
2625 	 * every time we let go of the ftrace_mutex. This is because
2626 	 * those pointers can change without the lock.
2627 	 */
2628 	iter->pg = ftrace_pages_start;
2629 	iter->idx = 0;
2630 	for (l = 0; l <= *pos; ) {
2631 		p = t_next(m, p, &l);
2632 		if (!p)
2633 			break;
2634 	}
2635 
2636 	if (!p)
2637 		return t_hash_start(m, pos);
2638 
2639 	return iter;
2640 }
2641 
2642 static void t_stop(struct seq_file *m, void *p)
2643 {
2644 	mutex_unlock(&ftrace_lock);
2645 }
2646 
2647 static int t_show(struct seq_file *m, void *v)
2648 {
2649 	struct ftrace_iterator *iter = m->private;
2650 	struct dyn_ftrace *rec;
2651 
2652 	if (iter->flags & FTRACE_ITER_HASH)
2653 		return t_hash_show(m, iter);
2654 
2655 	if (iter->flags & FTRACE_ITER_PRINTALL) {
2656 		seq_printf(m, "#### all functions enabled ####\n");
2657 		return 0;
2658 	}
2659 
2660 	rec = iter->func;
2661 
2662 	if (!rec)
2663 		return 0;
2664 
2665 	seq_printf(m, "%ps", (void *)rec->ip);
2666 	if (iter->flags & FTRACE_ITER_ENABLED)
2667 		seq_printf(m, " (%ld)%s",
2668 			   rec->flags & ~FTRACE_FL_MASK,
2669 			   rec->flags & FTRACE_FL_REGS ? " R" : "");
2670 	seq_printf(m, "\n");
2671 
2672 	return 0;
2673 }
2674 
2675 static const struct seq_operations show_ftrace_seq_ops = {
2676 	.start = t_start,
2677 	.next = t_next,
2678 	.stop = t_stop,
2679 	.show = t_show,
2680 };
2681 
2682 static int
2683 ftrace_avail_open(struct inode *inode, struct file *file)
2684 {
2685 	struct ftrace_iterator *iter;
2686 
2687 	if (unlikely(ftrace_disabled))
2688 		return -ENODEV;
2689 
2690 	iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
2691 	if (iter) {
2692 		iter->pg = ftrace_pages_start;
2693 		iter->ops = &global_ops;
2694 	}
2695 
2696 	return iter ? 0 : -ENOMEM;
2697 }
2698 
2699 static int
2700 ftrace_enabled_open(struct inode *inode, struct file *file)
2701 {
2702 	struct ftrace_iterator *iter;
2703 
2704 	if (unlikely(ftrace_disabled))
2705 		return -ENODEV;
2706 
2707 	iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
2708 	if (iter) {
2709 		iter->pg = ftrace_pages_start;
2710 		iter->flags = FTRACE_ITER_ENABLED;
2711 		iter->ops = &global_ops;
2712 	}
2713 
2714 	return iter ? 0 : -ENOMEM;
2715 }
2716 
2717 static void ftrace_filter_reset(struct ftrace_hash *hash)
2718 {
2719 	mutex_lock(&ftrace_lock);
2720 	ftrace_hash_clear(hash);
2721 	mutex_unlock(&ftrace_lock);
2722 }
2723 
2724 /**
2725  * ftrace_regex_open - initialize function tracer filter files
2726  * @ops: The ftrace_ops that hold the hash filters
2727  * @flag: The type of filter to process
2728  * @inode: The inode, usually passed in to your open routine
2729  * @file: The file, usually passed in to your open routine
2730  *
2731  * ftrace_regex_open() initializes the filter files for the
2732  * @ops. Depending on @flag it may process the filter hash or
2733  * the notrace hash of @ops. With this called from the open
2734  * routine, you can use ftrace_filter_write() for the write
2735  * routine if @flag has FTRACE_ITER_FILTER set, or
2736  * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
2737  * ftrace_filter_lseek() should be used as the lseek routine, and
2738  * release must call ftrace_regex_release().
2739  */
2740 int
2741 ftrace_regex_open(struct ftrace_ops *ops, int flag,
2742 		  struct inode *inode, struct file *file)
2743 {
2744 	struct ftrace_iterator *iter;
2745 	struct ftrace_hash *hash;
2746 	int ret = 0;
2747 
2748 	ftrace_ops_init(ops);
2749 
2750 	if (unlikely(ftrace_disabled))
2751 		return -ENODEV;
2752 
2753 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2754 	if (!iter)
2755 		return -ENOMEM;
2756 
2757 	if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
2758 		kfree(iter);
2759 		return -ENOMEM;
2760 	}
2761 
2762 	iter->ops = ops;
2763 	iter->flags = flag;
2764 
2765 	mutex_lock(&ops->regex_lock);
2766 
2767 	if (flag & FTRACE_ITER_NOTRACE)
2768 		hash = ops->notrace_hash;
2769 	else
2770 		hash = ops->filter_hash;
2771 
2772 	if (file->f_mode & FMODE_WRITE) {
2773 		iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
2774 		if (!iter->hash) {
2775 			trace_parser_put(&iter->parser);
2776 			kfree(iter);
2777 			ret = -ENOMEM;
2778 			goto out_unlock;
2779 		}
2780 	}
2781 
2782 	if ((file->f_mode & FMODE_WRITE) &&
2783 	    (file->f_flags & O_TRUNC))
2784 		ftrace_filter_reset(iter->hash);
2785 
2786 	if (file->f_mode & FMODE_READ) {
2787 		iter->pg = ftrace_pages_start;
2788 
2789 		ret = seq_open(file, &show_ftrace_seq_ops);
2790 		if (!ret) {
2791 			struct seq_file *m = file->private_data;
2792 			m->private = iter;
2793 		} else {
2794 			/* Failed */
2795 			free_ftrace_hash(iter->hash);
2796 			trace_parser_put(&iter->parser);
2797 			kfree(iter);
2798 		}
2799 	} else
2800 		file->private_data = iter;
2801 
2802  out_unlock:
2803 	mutex_unlock(&ops->regex_lock);
2804 
2805 	return ret;
2806 }
2807 
2808 static int
2809 ftrace_filter_open(struct inode *inode, struct file *file)
2810 {
2811 	return ftrace_regex_open(&global_ops,
2812 			FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH,
2813 			inode, file);
2814 }
2815 
2816 static int
2817 ftrace_notrace_open(struct inode *inode, struct file *file)
2818 {
2819 	return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE,
2820 				 inode, file);
2821 }
2822 
2823 static int ftrace_match(char *str, char *regex, int len, int type)
2824 {
2825 	int matched = 0;
2826 	int slen;
2827 
2828 	switch (type) {
2829 	case MATCH_FULL:
2830 		if (strcmp(str, regex) == 0)
2831 			matched = 1;
2832 		break;
2833 	case MATCH_FRONT_ONLY:
2834 		if (strncmp(str, regex, len) == 0)
2835 			matched = 1;
2836 		break;
2837 	case MATCH_MIDDLE_ONLY:
2838 		if (strstr(str, regex))
2839 			matched = 1;
2840 		break;
2841 	case MATCH_END_ONLY:
2842 		slen = strlen(str);
2843 		if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
2844 			matched = 1;
2845 		break;
2846 	}
2847 
2848 	return matched;
2849 }
2850 
2851 static int
2852 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
2853 {
2854 	struct ftrace_func_entry *entry;
2855 	int ret = 0;
2856 
2857 	entry = ftrace_lookup_ip(hash, rec->ip);
2858 	if (not) {
2859 		/* Do nothing if it doesn't exist */
2860 		if (!entry)
2861 			return 0;
2862 
2863 		free_hash_entry(hash, entry);
2864 	} else {
2865 		/* Do nothing if it exists */
2866 		if (entry)
2867 			return 0;
2868 
2869 		ret = add_hash_entry(hash, rec->ip);
2870 	}
2871 	return ret;
2872 }
2873 
2874 static int
2875 ftrace_match_record(struct dyn_ftrace *rec, char *mod,
2876 		    char *regex, int len, int type)
2877 {
2878 	char str[KSYM_SYMBOL_LEN];
2879 	char *modname;
2880 
2881 	kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
2882 
2883 	if (mod) {
2884 		/* module lookup requires matching the module */
2885 		if (!modname || strcmp(modname, mod))
2886 			return 0;
2887 
2888 		/* blank search means to match all funcs in the mod */
2889 		if (!len)
2890 			return 1;
2891 	}
2892 
2893 	return ftrace_match(str, regex, len, type);
2894 }
2895 
2896 static int
2897 match_records(struct ftrace_hash *hash, char *buff,
2898 	      int len, char *mod, int not)
2899 {
2900 	unsigned search_len = 0;
2901 	struct ftrace_page *pg;
2902 	struct dyn_ftrace *rec;
2903 	int type = MATCH_FULL;
2904 	char *search = buff;
2905 	int found = 0;
2906 	int ret;
2907 
2908 	if (len) {
2909 		type = filter_parse_regex(buff, len, &search, &not);
2910 		search_len = strlen(search);
2911 	}
2912 
2913 	mutex_lock(&ftrace_lock);
2914 
2915 	if (unlikely(ftrace_disabled))
2916 		goto out_unlock;
2917 
2918 	do_for_each_ftrace_rec(pg, rec) {
2919 		if (ftrace_match_record(rec, mod, search, search_len, type)) {
2920 			ret = enter_record(hash, rec, not);
2921 			if (ret < 0) {
2922 				found = ret;
2923 				goto out_unlock;
2924 			}
2925 			found = 1;
2926 		}
2927 	} while_for_each_ftrace_rec();
2928  out_unlock:
2929 	mutex_unlock(&ftrace_lock);
2930 
2931 	return found;
2932 }
2933 
2934 static int
2935 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
2936 {
2937 	return match_records(hash, buff, len, NULL, 0);
2938 }
2939 
2940 static int
2941 ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
2942 {
2943 	int not = 0;
2944 
2945 	/* blank or '*' mean the same */
2946 	if (strcmp(buff, "*") == 0)
2947 		buff[0] = 0;
2948 
2949 	/* handle the case of 'dont filter this module' */
2950 	if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
2951 		buff[0] = 0;
2952 		not = 1;
2953 	}
2954 
2955 	return match_records(hash, buff, strlen(buff), mod, not);
2956 }
2957 
2958 /*
2959  * We register the module command as a template to show others how
2960  * to register the a command as well.
2961  */
2962 
2963 static int
2964 ftrace_mod_callback(struct ftrace_hash *hash,
2965 		    char *func, char *cmd, char *param, int enable)
2966 {
2967 	char *mod;
2968 	int ret = -EINVAL;
2969 
2970 	/*
2971 	 * cmd == 'mod' because we only registered this func
2972 	 * for the 'mod' ftrace_func_command.
2973 	 * But if you register one func with multiple commands,
2974 	 * you can tell which command was used by the cmd
2975 	 * parameter.
2976 	 */
2977 
2978 	/* we must have a module name */
2979 	if (!param)
2980 		return ret;
2981 
2982 	mod = strsep(&param, ":");
2983 	if (!strlen(mod))
2984 		return ret;
2985 
2986 	ret = ftrace_match_module_records(hash, func, mod);
2987 	if (!ret)
2988 		ret = -EINVAL;
2989 	if (ret < 0)
2990 		return ret;
2991 
2992 	return 0;
2993 }
2994 
2995 static struct ftrace_func_command ftrace_mod_cmd = {
2996 	.name			= "mod",
2997 	.func			= ftrace_mod_callback,
2998 };
2999 
3000 static int __init ftrace_mod_cmd_init(void)
3001 {
3002 	return register_ftrace_command(&ftrace_mod_cmd);
3003 }
3004 core_initcall(ftrace_mod_cmd_init);
3005 
3006 static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
3007 				      struct ftrace_ops *op, struct pt_regs *pt_regs)
3008 {
3009 	struct ftrace_func_probe *entry;
3010 	struct hlist_head *hhd;
3011 	unsigned long key;
3012 
3013 	key = hash_long(ip, FTRACE_HASH_BITS);
3014 
3015 	hhd = &ftrace_func_hash[key];
3016 
3017 	if (hlist_empty(hhd))
3018 		return;
3019 
3020 	/*
3021 	 * Disable preemption for these calls to prevent a RCU grace
3022 	 * period. This syncs the hash iteration and freeing of items
3023 	 * on the hash. rcu_read_lock is too dangerous here.
3024 	 */
3025 	preempt_disable_notrace();
3026 	hlist_for_each_entry_rcu_notrace(entry, hhd, node) {
3027 		if (entry->ip == ip)
3028 			entry->ops->func(ip, parent_ip, &entry->data);
3029 	}
3030 	preempt_enable_notrace();
3031 }
3032 
3033 static struct ftrace_ops trace_probe_ops __read_mostly =
3034 {
3035 	.func		= function_trace_probe_call,
3036 	.flags		= FTRACE_OPS_FL_INITIALIZED,
3037 	INIT_REGEX_LOCK(trace_probe_ops)
3038 };
3039 
3040 static int ftrace_probe_registered;
3041 
3042 static void __enable_ftrace_function_probe(void)
3043 {
3044 	int ret;
3045 	int i;
3046 
3047 	if (ftrace_probe_registered) {
3048 		/* still need to update the function call sites */
3049 		if (ftrace_enabled)
3050 			ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3051 		return;
3052 	}
3053 
3054 	for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3055 		struct hlist_head *hhd = &ftrace_func_hash[i];
3056 		if (hhd->first)
3057 			break;
3058 	}
3059 	/* Nothing registered? */
3060 	if (i == FTRACE_FUNC_HASHSIZE)
3061 		return;
3062 
3063 	ret = __register_ftrace_function(&trace_probe_ops);
3064 	if (!ret)
3065 		ret = ftrace_startup(&trace_probe_ops, 0);
3066 
3067 	ftrace_probe_registered = 1;
3068 }
3069 
3070 static void __disable_ftrace_function_probe(void)
3071 {
3072 	int ret;
3073 	int i;
3074 
3075 	if (!ftrace_probe_registered)
3076 		return;
3077 
3078 	for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3079 		struct hlist_head *hhd = &ftrace_func_hash[i];
3080 		if (hhd->first)
3081 			return;
3082 	}
3083 
3084 	/* no more funcs left */
3085 	ret = __unregister_ftrace_function(&trace_probe_ops);
3086 	if (!ret)
3087 		ftrace_shutdown(&trace_probe_ops, 0);
3088 
3089 	ftrace_probe_registered = 0;
3090 }
3091 
3092 
3093 static void ftrace_free_entry(struct ftrace_func_probe *entry)
3094 {
3095 	if (entry->ops->free)
3096 		entry->ops->free(entry->ops, entry->ip, &entry->data);
3097 	kfree(entry);
3098 }
3099 
3100 int
3101 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3102 			      void *data)
3103 {
3104 	struct ftrace_func_probe *entry;
3105 	struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash;
3106 	struct ftrace_hash *hash;
3107 	struct ftrace_page *pg;
3108 	struct dyn_ftrace *rec;
3109 	int type, len, not;
3110 	unsigned long key;
3111 	int count = 0;
3112 	char *search;
3113 	int ret;
3114 
3115 	type = filter_parse_regex(glob, strlen(glob), &search, &not);
3116 	len = strlen(search);
3117 
3118 	/* we do not support '!' for function probes */
3119 	if (WARN_ON(not))
3120 		return -EINVAL;
3121 
3122 	mutex_lock(&trace_probe_ops.regex_lock);
3123 
3124 	hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3125 	if (!hash) {
3126 		count = -ENOMEM;
3127 		goto out;
3128 	}
3129 
3130 	if (unlikely(ftrace_disabled)) {
3131 		count = -ENODEV;
3132 		goto out;
3133 	}
3134 
3135 	mutex_lock(&ftrace_lock);
3136 
3137 	do_for_each_ftrace_rec(pg, rec) {
3138 
3139 		if (!ftrace_match_record(rec, NULL, search, len, type))
3140 			continue;
3141 
3142 		entry = kmalloc(sizeof(*entry), GFP_KERNEL);
3143 		if (!entry) {
3144 			/* If we did not process any, then return error */
3145 			if (!count)
3146 				count = -ENOMEM;
3147 			goto out_unlock;
3148 		}
3149 
3150 		count++;
3151 
3152 		entry->data = data;
3153 
3154 		/*
3155 		 * The caller might want to do something special
3156 		 * for each function we find. We call the callback
3157 		 * to give the caller an opportunity to do so.
3158 		 */
3159 		if (ops->init) {
3160 			if (ops->init(ops, rec->ip, &entry->data) < 0) {
3161 				/* caller does not like this func */
3162 				kfree(entry);
3163 				continue;
3164 			}
3165 		}
3166 
3167 		ret = enter_record(hash, rec, 0);
3168 		if (ret < 0) {
3169 			kfree(entry);
3170 			count = ret;
3171 			goto out_unlock;
3172 		}
3173 
3174 		entry->ops = ops;
3175 		entry->ip = rec->ip;
3176 
3177 		key = hash_long(entry->ip, FTRACE_HASH_BITS);
3178 		hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
3179 
3180 	} while_for_each_ftrace_rec();
3181 
3182 	ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3183 	if (ret < 0)
3184 		count = ret;
3185 
3186 	__enable_ftrace_function_probe();
3187 
3188  out_unlock:
3189 	mutex_unlock(&ftrace_lock);
3190  out:
3191 	mutex_unlock(&trace_probe_ops.regex_lock);
3192 	free_ftrace_hash(hash);
3193 
3194 	return count;
3195 }
3196 
3197 enum {
3198 	PROBE_TEST_FUNC		= 1,
3199 	PROBE_TEST_DATA		= 2
3200 };
3201 
3202 static void
3203 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3204 				  void *data, int flags)
3205 {
3206 	struct ftrace_func_entry *rec_entry;
3207 	struct ftrace_func_probe *entry;
3208 	struct ftrace_func_probe *p;
3209 	struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash;
3210 	struct list_head free_list;
3211 	struct ftrace_hash *hash;
3212 	struct hlist_node *tmp;
3213 	char str[KSYM_SYMBOL_LEN];
3214 	int type = MATCH_FULL;
3215 	int i, len = 0;
3216 	char *search;
3217 
3218 	if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
3219 		glob = NULL;
3220 	else if (glob) {
3221 		int not;
3222 
3223 		type = filter_parse_regex(glob, strlen(glob), &search, &not);
3224 		len = strlen(search);
3225 
3226 		/* we do not support '!' for function probes */
3227 		if (WARN_ON(not))
3228 			return;
3229 	}
3230 
3231 	mutex_lock(&trace_probe_ops.regex_lock);
3232 
3233 	hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3234 	if (!hash)
3235 		/* Hmm, should report this somehow */
3236 		goto out_unlock;
3237 
3238 	INIT_LIST_HEAD(&free_list);
3239 
3240 	for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3241 		struct hlist_head *hhd = &ftrace_func_hash[i];
3242 
3243 		hlist_for_each_entry_safe(entry, tmp, hhd, node) {
3244 
3245 			/* break up if statements for readability */
3246 			if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
3247 				continue;
3248 
3249 			if ((flags & PROBE_TEST_DATA) && entry->data != data)
3250 				continue;
3251 
3252 			/* do this last, since it is the most expensive */
3253 			if (glob) {
3254 				kallsyms_lookup(entry->ip, NULL, NULL,
3255 						NULL, str);
3256 				if (!ftrace_match(str, glob, len, type))
3257 					continue;
3258 			}
3259 
3260 			rec_entry = ftrace_lookup_ip(hash, entry->ip);
3261 			/* It is possible more than one entry had this ip */
3262 			if (rec_entry)
3263 				free_hash_entry(hash, rec_entry);
3264 
3265 			hlist_del_rcu(&entry->node);
3266 			list_add(&entry->free_list, &free_list);
3267 		}
3268 	}
3269 	mutex_lock(&ftrace_lock);
3270 	__disable_ftrace_function_probe();
3271 	/*
3272 	 * Remove after the disable is called. Otherwise, if the last
3273 	 * probe is removed, a null hash means *all enabled*.
3274 	 */
3275 	ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3276 	synchronize_sched();
3277 	list_for_each_entry_safe(entry, p, &free_list, free_list) {
3278 		list_del(&entry->free_list);
3279 		ftrace_free_entry(entry);
3280 	}
3281 	mutex_unlock(&ftrace_lock);
3282 
3283  out_unlock:
3284 	mutex_unlock(&trace_probe_ops.regex_lock);
3285 	free_ftrace_hash(hash);
3286 }
3287 
3288 void
3289 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3290 				void *data)
3291 {
3292 	__unregister_ftrace_function_probe(glob, ops, data,
3293 					  PROBE_TEST_FUNC | PROBE_TEST_DATA);
3294 }
3295 
3296 void
3297 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
3298 {
3299 	__unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
3300 }
3301 
3302 void unregister_ftrace_function_probe_all(char *glob)
3303 {
3304 	__unregister_ftrace_function_probe(glob, NULL, NULL, 0);
3305 }
3306 
3307 static LIST_HEAD(ftrace_commands);
3308 static DEFINE_MUTEX(ftrace_cmd_mutex);
3309 
3310 int register_ftrace_command(struct ftrace_func_command *cmd)
3311 {
3312 	struct ftrace_func_command *p;
3313 	int ret = 0;
3314 
3315 	mutex_lock(&ftrace_cmd_mutex);
3316 	list_for_each_entry(p, &ftrace_commands, list) {
3317 		if (strcmp(cmd->name, p->name) == 0) {
3318 			ret = -EBUSY;
3319 			goto out_unlock;
3320 		}
3321 	}
3322 	list_add(&cmd->list, &ftrace_commands);
3323  out_unlock:
3324 	mutex_unlock(&ftrace_cmd_mutex);
3325 
3326 	return ret;
3327 }
3328 
3329 int unregister_ftrace_command(struct ftrace_func_command *cmd)
3330 {
3331 	struct ftrace_func_command *p, *n;
3332 	int ret = -ENODEV;
3333 
3334 	mutex_lock(&ftrace_cmd_mutex);
3335 	list_for_each_entry_safe(p, n, &ftrace_commands, list) {
3336 		if (strcmp(cmd->name, p->name) == 0) {
3337 			ret = 0;
3338 			list_del_init(&p->list);
3339 			goto out_unlock;
3340 		}
3341 	}
3342  out_unlock:
3343 	mutex_unlock(&ftrace_cmd_mutex);
3344 
3345 	return ret;
3346 }
3347 
3348 static int ftrace_process_regex(struct ftrace_hash *hash,
3349 				char *buff, int len, int enable)
3350 {
3351 	char *func, *command, *next = buff;
3352 	struct ftrace_func_command *p;
3353 	int ret = -EINVAL;
3354 
3355 	func = strsep(&next, ":");
3356 
3357 	if (!next) {
3358 		ret = ftrace_match_records(hash, func, len);
3359 		if (!ret)
3360 			ret = -EINVAL;
3361 		if (ret < 0)
3362 			return ret;
3363 		return 0;
3364 	}
3365 
3366 	/* command found */
3367 
3368 	command = strsep(&next, ":");
3369 
3370 	mutex_lock(&ftrace_cmd_mutex);
3371 	list_for_each_entry(p, &ftrace_commands, list) {
3372 		if (strcmp(p->name, command) == 0) {
3373 			ret = p->func(hash, func, command, next, enable);
3374 			goto out_unlock;
3375 		}
3376 	}
3377  out_unlock:
3378 	mutex_unlock(&ftrace_cmd_mutex);
3379 
3380 	return ret;
3381 }
3382 
3383 static ssize_t
3384 ftrace_regex_write(struct file *file, const char __user *ubuf,
3385 		   size_t cnt, loff_t *ppos, int enable)
3386 {
3387 	struct ftrace_iterator *iter;
3388 	struct trace_parser *parser;
3389 	ssize_t ret, read;
3390 
3391 	if (!cnt)
3392 		return 0;
3393 
3394 	if (file->f_mode & FMODE_READ) {
3395 		struct seq_file *m = file->private_data;
3396 		iter = m->private;
3397 	} else
3398 		iter = file->private_data;
3399 
3400 	if (unlikely(ftrace_disabled))
3401 		return -ENODEV;
3402 
3403 	/* iter->hash is a local copy, so we don't need regex_lock */
3404 
3405 	parser = &iter->parser;
3406 	read = trace_get_user(parser, ubuf, cnt, ppos);
3407 
3408 	if (read >= 0 && trace_parser_loaded(parser) &&
3409 	    !trace_parser_cont(parser)) {
3410 		ret = ftrace_process_regex(iter->hash, parser->buffer,
3411 					   parser->idx, enable);
3412 		trace_parser_clear(parser);
3413 		if (ret < 0)
3414 			goto out;
3415 	}
3416 
3417 	ret = read;
3418  out:
3419 	return ret;
3420 }
3421 
3422 ssize_t
3423 ftrace_filter_write(struct file *file, const char __user *ubuf,
3424 		    size_t cnt, loff_t *ppos)
3425 {
3426 	return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
3427 }
3428 
3429 ssize_t
3430 ftrace_notrace_write(struct file *file, const char __user *ubuf,
3431 		     size_t cnt, loff_t *ppos)
3432 {
3433 	return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
3434 }
3435 
3436 static int
3437 ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
3438 {
3439 	struct ftrace_func_entry *entry;
3440 
3441 	if (!ftrace_location(ip))
3442 		return -EINVAL;
3443 
3444 	if (remove) {
3445 		entry = ftrace_lookup_ip(hash, ip);
3446 		if (!entry)
3447 			return -ENOENT;
3448 		free_hash_entry(hash, entry);
3449 		return 0;
3450 	}
3451 
3452 	return add_hash_entry(hash, ip);
3453 }
3454 
3455 static void ftrace_ops_update_code(struct ftrace_ops *ops)
3456 {
3457 	if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled)
3458 		ftrace_run_update_code(FTRACE_UPDATE_CALLS);
3459 }
3460 
3461 static int
3462 ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
3463 		unsigned long ip, int remove, int reset, int enable)
3464 {
3465 	struct ftrace_hash **orig_hash;
3466 	struct ftrace_hash *hash;
3467 	int ret;
3468 
3469 	/* All global ops uses the global ops filters */
3470 	if (ops->flags & FTRACE_OPS_FL_GLOBAL)
3471 		ops = &global_ops;
3472 
3473 	if (unlikely(ftrace_disabled))
3474 		return -ENODEV;
3475 
3476 	mutex_lock(&ops->regex_lock);
3477 
3478 	if (enable)
3479 		orig_hash = &ops->filter_hash;
3480 	else
3481 		orig_hash = &ops->notrace_hash;
3482 
3483 	hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3484 	if (!hash) {
3485 		ret = -ENOMEM;
3486 		goto out_regex_unlock;
3487 	}
3488 
3489 	if (reset)
3490 		ftrace_filter_reset(hash);
3491 	if (buf && !ftrace_match_records(hash, buf, len)) {
3492 		ret = -EINVAL;
3493 		goto out_regex_unlock;
3494 	}
3495 	if (ip) {
3496 		ret = ftrace_match_addr(hash, ip, remove);
3497 		if (ret < 0)
3498 			goto out_regex_unlock;
3499 	}
3500 
3501 	mutex_lock(&ftrace_lock);
3502 	ret = ftrace_hash_move(ops, enable, orig_hash, hash);
3503 	if (!ret)
3504 		ftrace_ops_update_code(ops);
3505 
3506 	mutex_unlock(&ftrace_lock);
3507 
3508  out_regex_unlock:
3509 	mutex_unlock(&ops->regex_lock);
3510 
3511 	free_ftrace_hash(hash);
3512 	return ret;
3513 }
3514 
3515 static int
3516 ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
3517 		int reset, int enable)
3518 {
3519 	return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable);
3520 }
3521 
3522 /**
3523  * ftrace_set_filter_ip - set a function to filter on in ftrace by address
3524  * @ops - the ops to set the filter with
3525  * @ip - the address to add to or remove from the filter.
3526  * @remove - non zero to remove the ip from the filter
3527  * @reset - non zero to reset all filters before applying this filter.
3528  *
3529  * Filters denote which functions should be enabled when tracing is enabled
3530  * If @ip is NULL, it failes to update filter.
3531  */
3532 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
3533 			 int remove, int reset)
3534 {
3535 	ftrace_ops_init(ops);
3536 	return ftrace_set_addr(ops, ip, remove, reset, 1);
3537 }
3538 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
3539 
3540 static int
3541 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
3542 		 int reset, int enable)
3543 {
3544 	return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable);
3545 }
3546 
3547 /**
3548  * ftrace_set_filter - set a function to filter on in ftrace
3549  * @ops - the ops to set the filter with
3550  * @buf - the string that holds the function filter text.
3551  * @len - the length of the string.
3552  * @reset - non zero to reset all filters before applying this filter.
3553  *
3554  * Filters denote which functions should be enabled when tracing is enabled.
3555  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3556  */
3557 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
3558 		       int len, int reset)
3559 {
3560 	ftrace_ops_init(ops);
3561 	return ftrace_set_regex(ops, buf, len, reset, 1);
3562 }
3563 EXPORT_SYMBOL_GPL(ftrace_set_filter);
3564 
3565 /**
3566  * ftrace_set_notrace - set a function to not trace in ftrace
3567  * @ops - the ops to set the notrace filter with
3568  * @buf - the string that holds the function notrace text.
3569  * @len - the length of the string.
3570  * @reset - non zero to reset all filters before applying this filter.
3571  *
3572  * Notrace Filters denote which functions should not be enabled when tracing
3573  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3574  * for tracing.
3575  */
3576 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
3577 			int len, int reset)
3578 {
3579 	ftrace_ops_init(ops);
3580 	return ftrace_set_regex(ops, buf, len, reset, 0);
3581 }
3582 EXPORT_SYMBOL_GPL(ftrace_set_notrace);
3583 /**
3584  * ftrace_set_filter - set a function to filter on in ftrace
3585  * @ops - the ops to set the filter with
3586  * @buf - the string that holds the function filter text.
3587  * @len - the length of the string.
3588  * @reset - non zero to reset all filters before applying this filter.
3589  *
3590  * Filters denote which functions should be enabled when tracing is enabled.
3591  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3592  */
3593 void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
3594 {
3595 	ftrace_set_regex(&global_ops, buf, len, reset, 1);
3596 }
3597 EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
3598 
3599 /**
3600  * ftrace_set_notrace - set a function to not trace in ftrace
3601  * @ops - the ops to set the notrace filter with
3602  * @buf - the string that holds the function notrace text.
3603  * @len - the length of the string.
3604  * @reset - non zero to reset all filters before applying this filter.
3605  *
3606  * Notrace Filters denote which functions should not be enabled when tracing
3607  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3608  * for tracing.
3609  */
3610 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
3611 {
3612 	ftrace_set_regex(&global_ops, buf, len, reset, 0);
3613 }
3614 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
3615 
3616 /*
3617  * command line interface to allow users to set filters on boot up.
3618  */
3619 #define FTRACE_FILTER_SIZE		COMMAND_LINE_SIZE
3620 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
3621 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
3622 
3623 /* Used by function selftest to not test if filter is set */
3624 bool ftrace_filter_param __initdata;
3625 
3626 static int __init set_ftrace_notrace(char *str)
3627 {
3628 	ftrace_filter_param = true;
3629 	strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
3630 	return 1;
3631 }
3632 __setup("ftrace_notrace=", set_ftrace_notrace);
3633 
3634 static int __init set_ftrace_filter(char *str)
3635 {
3636 	ftrace_filter_param = true;
3637 	strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
3638 	return 1;
3639 }
3640 __setup("ftrace_filter=", set_ftrace_filter);
3641 
3642 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3643 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
3644 static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
3645 
3646 static int __init set_graph_function(char *str)
3647 {
3648 	strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
3649 	return 1;
3650 }
3651 __setup("ftrace_graph_filter=", set_graph_function);
3652 
3653 static void __init set_ftrace_early_graph(char *buf)
3654 {
3655 	int ret;
3656 	char *func;
3657 
3658 	while (buf) {
3659 		func = strsep(&buf, ",");
3660 		/* we allow only one expression at a time */
3661 		ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3662 				      func);
3663 		if (ret)
3664 			printk(KERN_DEBUG "ftrace: function %s not "
3665 					  "traceable\n", func);
3666 	}
3667 }
3668 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3669 
3670 void __init
3671 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
3672 {
3673 	char *func;
3674 
3675 	ftrace_ops_init(ops);
3676 
3677 	while (buf) {
3678 		func = strsep(&buf, ",");
3679 		ftrace_set_regex(ops, func, strlen(func), 0, enable);
3680 	}
3681 }
3682 
3683 static void __init set_ftrace_early_filters(void)
3684 {
3685 	if (ftrace_filter_buf[0])
3686 		ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
3687 	if (ftrace_notrace_buf[0])
3688 		ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
3689 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3690 	if (ftrace_graph_buf[0])
3691 		set_ftrace_early_graph(ftrace_graph_buf);
3692 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3693 }
3694 
3695 int ftrace_regex_release(struct inode *inode, struct file *file)
3696 {
3697 	struct seq_file *m = (struct seq_file *)file->private_data;
3698 	struct ftrace_iterator *iter;
3699 	struct ftrace_hash **orig_hash;
3700 	struct trace_parser *parser;
3701 	int filter_hash;
3702 	int ret;
3703 
3704 	if (file->f_mode & FMODE_READ) {
3705 		iter = m->private;
3706 		seq_release(inode, file);
3707 	} else
3708 		iter = file->private_data;
3709 
3710 	parser = &iter->parser;
3711 	if (trace_parser_loaded(parser)) {
3712 		parser->buffer[parser->idx] = 0;
3713 		ftrace_match_records(iter->hash, parser->buffer, parser->idx);
3714 	}
3715 
3716 	trace_parser_put(parser);
3717 
3718 	mutex_lock(&iter->ops->regex_lock);
3719 
3720 	if (file->f_mode & FMODE_WRITE) {
3721 		filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
3722 
3723 		if (filter_hash)
3724 			orig_hash = &iter->ops->filter_hash;
3725 		else
3726 			orig_hash = &iter->ops->notrace_hash;
3727 
3728 		mutex_lock(&ftrace_lock);
3729 		ret = ftrace_hash_move(iter->ops, filter_hash,
3730 				       orig_hash, iter->hash);
3731 		if (!ret)
3732 			ftrace_ops_update_code(iter->ops);
3733 
3734 		mutex_unlock(&ftrace_lock);
3735 	}
3736 
3737 	mutex_unlock(&iter->ops->regex_lock);
3738 	free_ftrace_hash(iter->hash);
3739 	kfree(iter);
3740 
3741 	return 0;
3742 }
3743 
3744 static const struct file_operations ftrace_avail_fops = {
3745 	.open = ftrace_avail_open,
3746 	.read = seq_read,
3747 	.llseek = seq_lseek,
3748 	.release = seq_release_private,
3749 };
3750 
3751 static const struct file_operations ftrace_enabled_fops = {
3752 	.open = ftrace_enabled_open,
3753 	.read = seq_read,
3754 	.llseek = seq_lseek,
3755 	.release = seq_release_private,
3756 };
3757 
3758 static const struct file_operations ftrace_filter_fops = {
3759 	.open = ftrace_filter_open,
3760 	.read = seq_read,
3761 	.write = ftrace_filter_write,
3762 	.llseek = ftrace_filter_lseek,
3763 	.release = ftrace_regex_release,
3764 };
3765 
3766 static const struct file_operations ftrace_notrace_fops = {
3767 	.open = ftrace_notrace_open,
3768 	.read = seq_read,
3769 	.write = ftrace_notrace_write,
3770 	.llseek = ftrace_filter_lseek,
3771 	.release = ftrace_regex_release,
3772 };
3773 
3774 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3775 
3776 static DEFINE_MUTEX(graph_lock);
3777 
3778 int ftrace_graph_count;
3779 int ftrace_graph_filter_enabled;
3780 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
3781 
3782 static void *
3783 __g_next(struct seq_file *m, loff_t *pos)
3784 {
3785 	if (*pos >= ftrace_graph_count)
3786 		return NULL;
3787 	return &ftrace_graph_funcs[*pos];
3788 }
3789 
3790 static void *
3791 g_next(struct seq_file *m, void *v, loff_t *pos)
3792 {
3793 	(*pos)++;
3794 	return __g_next(m, pos);
3795 }
3796 
3797 static void *g_start(struct seq_file *m, loff_t *pos)
3798 {
3799 	mutex_lock(&graph_lock);
3800 
3801 	/* Nothing, tell g_show to print all functions are enabled */
3802 	if (!ftrace_graph_filter_enabled && !*pos)
3803 		return (void *)1;
3804 
3805 	return __g_next(m, pos);
3806 }
3807 
3808 static void g_stop(struct seq_file *m, void *p)
3809 {
3810 	mutex_unlock(&graph_lock);
3811 }
3812 
3813 static int g_show(struct seq_file *m, void *v)
3814 {
3815 	unsigned long *ptr = v;
3816 
3817 	if (!ptr)
3818 		return 0;
3819 
3820 	if (ptr == (unsigned long *)1) {
3821 		seq_printf(m, "#### all functions enabled ####\n");
3822 		return 0;
3823 	}
3824 
3825 	seq_printf(m, "%ps\n", (void *)*ptr);
3826 
3827 	return 0;
3828 }
3829 
3830 static const struct seq_operations ftrace_graph_seq_ops = {
3831 	.start = g_start,
3832 	.next = g_next,
3833 	.stop = g_stop,
3834 	.show = g_show,
3835 };
3836 
3837 static int
3838 ftrace_graph_open(struct inode *inode, struct file *file)
3839 {
3840 	int ret = 0;
3841 
3842 	if (unlikely(ftrace_disabled))
3843 		return -ENODEV;
3844 
3845 	mutex_lock(&graph_lock);
3846 	if ((file->f_mode & FMODE_WRITE) &&
3847 	    (file->f_flags & O_TRUNC)) {
3848 		ftrace_graph_filter_enabled = 0;
3849 		ftrace_graph_count = 0;
3850 		memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
3851 	}
3852 	mutex_unlock(&graph_lock);
3853 
3854 	if (file->f_mode & FMODE_READ)
3855 		ret = seq_open(file, &ftrace_graph_seq_ops);
3856 
3857 	return ret;
3858 }
3859 
3860 static int
3861 ftrace_graph_release(struct inode *inode, struct file *file)
3862 {
3863 	if (file->f_mode & FMODE_READ)
3864 		seq_release(inode, file);
3865 	return 0;
3866 }
3867 
3868 static int
3869 ftrace_set_func(unsigned long *array, int *idx, char *buffer)
3870 {
3871 	struct dyn_ftrace *rec;
3872 	struct ftrace_page *pg;
3873 	int search_len;
3874 	int fail = 1;
3875 	int type, not;
3876 	char *search;
3877 	bool exists;
3878 	int i;
3879 
3880 	/* decode regex */
3881 	type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
3882 	if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
3883 		return -EBUSY;
3884 
3885 	search_len = strlen(search);
3886 
3887 	mutex_lock(&ftrace_lock);
3888 
3889 	if (unlikely(ftrace_disabled)) {
3890 		mutex_unlock(&ftrace_lock);
3891 		return -ENODEV;
3892 	}
3893 
3894 	do_for_each_ftrace_rec(pg, rec) {
3895 
3896 		if (ftrace_match_record(rec, NULL, search, search_len, type)) {
3897 			/* if it is in the array */
3898 			exists = false;
3899 			for (i = 0; i < *idx; i++) {
3900 				if (array[i] == rec->ip) {
3901 					exists = true;
3902 					break;
3903 				}
3904 			}
3905 
3906 			if (!not) {
3907 				fail = 0;
3908 				if (!exists) {
3909 					array[(*idx)++] = rec->ip;
3910 					if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
3911 						goto out;
3912 				}
3913 			} else {
3914 				if (exists) {
3915 					array[i] = array[--(*idx)];
3916 					array[*idx] = 0;
3917 					fail = 0;
3918 				}
3919 			}
3920 		}
3921 	} while_for_each_ftrace_rec();
3922 out:
3923 	mutex_unlock(&ftrace_lock);
3924 
3925 	if (fail)
3926 		return -EINVAL;
3927 
3928 	ftrace_graph_filter_enabled = !!(*idx);
3929 
3930 	return 0;
3931 }
3932 
3933 static ssize_t
3934 ftrace_graph_write(struct file *file, const char __user *ubuf,
3935 		   size_t cnt, loff_t *ppos)
3936 {
3937 	struct trace_parser parser;
3938 	ssize_t read, ret;
3939 
3940 	if (!cnt)
3941 		return 0;
3942 
3943 	mutex_lock(&graph_lock);
3944 
3945 	if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
3946 		ret = -ENOMEM;
3947 		goto out_unlock;
3948 	}
3949 
3950 	read = trace_get_user(&parser, ubuf, cnt, ppos);
3951 
3952 	if (read >= 0 && trace_parser_loaded((&parser))) {
3953 		parser.buffer[parser.idx] = 0;
3954 
3955 		/* we allow only one expression at a time */
3956 		ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3957 					parser.buffer);
3958 		if (ret)
3959 			goto out_free;
3960 	}
3961 
3962 	ret = read;
3963 
3964 out_free:
3965 	trace_parser_put(&parser);
3966 out_unlock:
3967 	mutex_unlock(&graph_lock);
3968 
3969 	return ret;
3970 }
3971 
3972 static const struct file_operations ftrace_graph_fops = {
3973 	.open		= ftrace_graph_open,
3974 	.read		= seq_read,
3975 	.write		= ftrace_graph_write,
3976 	.llseek		= ftrace_filter_lseek,
3977 	.release	= ftrace_graph_release,
3978 };
3979 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3980 
3981 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
3982 {
3983 
3984 	trace_create_file("available_filter_functions", 0444,
3985 			d_tracer, NULL, &ftrace_avail_fops);
3986 
3987 	trace_create_file("enabled_functions", 0444,
3988 			d_tracer, NULL, &ftrace_enabled_fops);
3989 
3990 	trace_create_file("set_ftrace_filter", 0644, d_tracer,
3991 			NULL, &ftrace_filter_fops);
3992 
3993 	trace_create_file("set_ftrace_notrace", 0644, d_tracer,
3994 				    NULL, &ftrace_notrace_fops);
3995 
3996 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3997 	trace_create_file("set_graph_function", 0444, d_tracer,
3998 				    NULL,
3999 				    &ftrace_graph_fops);
4000 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4001 
4002 	return 0;
4003 }
4004 
4005 static int ftrace_cmp_ips(const void *a, const void *b)
4006 {
4007 	const unsigned long *ipa = a;
4008 	const unsigned long *ipb = b;
4009 
4010 	if (*ipa > *ipb)
4011 		return 1;
4012 	if (*ipa < *ipb)
4013 		return -1;
4014 	return 0;
4015 }
4016 
4017 static void ftrace_swap_ips(void *a, void *b, int size)
4018 {
4019 	unsigned long *ipa = a;
4020 	unsigned long *ipb = b;
4021 	unsigned long t;
4022 
4023 	t = *ipa;
4024 	*ipa = *ipb;
4025 	*ipb = t;
4026 }
4027 
4028 static int ftrace_process_locs(struct module *mod,
4029 			       unsigned long *start,
4030 			       unsigned long *end)
4031 {
4032 	struct ftrace_page *start_pg;
4033 	struct ftrace_page *pg;
4034 	struct dyn_ftrace *rec;
4035 	unsigned long count;
4036 	unsigned long *p;
4037 	unsigned long addr;
4038 	unsigned long flags = 0; /* Shut up gcc */
4039 	int ret = -ENOMEM;
4040 
4041 	count = end - start;
4042 
4043 	if (!count)
4044 		return 0;
4045 
4046 	sort(start, count, sizeof(*start),
4047 	     ftrace_cmp_ips, ftrace_swap_ips);
4048 
4049 	start_pg = ftrace_allocate_pages(count);
4050 	if (!start_pg)
4051 		return -ENOMEM;
4052 
4053 	mutex_lock(&ftrace_lock);
4054 
4055 	/*
4056 	 * Core and each module needs their own pages, as
4057 	 * modules will free them when they are removed.
4058 	 * Force a new page to be allocated for modules.
4059 	 */
4060 	if (!mod) {
4061 		WARN_ON(ftrace_pages || ftrace_pages_start);
4062 		/* First initialization */
4063 		ftrace_pages = ftrace_pages_start = start_pg;
4064 	} else {
4065 		if (!ftrace_pages)
4066 			goto out;
4067 
4068 		if (WARN_ON(ftrace_pages->next)) {
4069 			/* Hmm, we have free pages? */
4070 			while (ftrace_pages->next)
4071 				ftrace_pages = ftrace_pages->next;
4072 		}
4073 
4074 		ftrace_pages->next = start_pg;
4075 	}
4076 
4077 	p = start;
4078 	pg = start_pg;
4079 	while (p < end) {
4080 		addr = ftrace_call_adjust(*p++);
4081 		/*
4082 		 * Some architecture linkers will pad between
4083 		 * the different mcount_loc sections of different
4084 		 * object files to satisfy alignments.
4085 		 * Skip any NULL pointers.
4086 		 */
4087 		if (!addr)
4088 			continue;
4089 
4090 		if (pg->index == pg->size) {
4091 			/* We should have allocated enough */
4092 			if (WARN_ON(!pg->next))
4093 				break;
4094 			pg = pg->next;
4095 		}
4096 
4097 		rec = &pg->records[pg->index++];
4098 		rec->ip = addr;
4099 	}
4100 
4101 	/* We should have used all pages */
4102 	WARN_ON(pg->next);
4103 
4104 	/* Assign the last page to ftrace_pages */
4105 	ftrace_pages = pg;
4106 
4107 	/* These new locations need to be initialized */
4108 	ftrace_new_pgs = start_pg;
4109 
4110 	/*
4111 	 * We only need to disable interrupts on start up
4112 	 * because we are modifying code that an interrupt
4113 	 * may execute, and the modification is not atomic.
4114 	 * But for modules, nothing runs the code we modify
4115 	 * until we are finished with it, and there's no
4116 	 * reason to cause large interrupt latencies while we do it.
4117 	 */
4118 	if (!mod)
4119 		local_irq_save(flags);
4120 	ftrace_update_code(mod);
4121 	if (!mod)
4122 		local_irq_restore(flags);
4123 	ret = 0;
4124  out:
4125 	mutex_unlock(&ftrace_lock);
4126 
4127 	return ret;
4128 }
4129 
4130 #ifdef CONFIG_MODULES
4131 
4132 #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
4133 
4134 void ftrace_release_mod(struct module *mod)
4135 {
4136 	struct dyn_ftrace *rec;
4137 	struct ftrace_page **last_pg;
4138 	struct ftrace_page *pg;
4139 	int order;
4140 
4141 	mutex_lock(&ftrace_lock);
4142 
4143 	if (ftrace_disabled)
4144 		goto out_unlock;
4145 
4146 	/*
4147 	 * Each module has its own ftrace_pages, remove
4148 	 * them from the list.
4149 	 */
4150 	last_pg = &ftrace_pages_start;
4151 	for (pg = ftrace_pages_start; pg; pg = *last_pg) {
4152 		rec = &pg->records[0];
4153 		if (within_module_core(rec->ip, mod)) {
4154 			/*
4155 			 * As core pages are first, the first
4156 			 * page should never be a module page.
4157 			 */
4158 			if (WARN_ON(pg == ftrace_pages_start))
4159 				goto out_unlock;
4160 
4161 			/* Check if we are deleting the last page */
4162 			if (pg == ftrace_pages)
4163 				ftrace_pages = next_to_ftrace_page(last_pg);
4164 
4165 			*last_pg = pg->next;
4166 			order = get_count_order(pg->size / ENTRIES_PER_PAGE);
4167 			free_pages((unsigned long)pg->records, order);
4168 			kfree(pg);
4169 		} else
4170 			last_pg = &pg->next;
4171 	}
4172  out_unlock:
4173 	mutex_unlock(&ftrace_lock);
4174 }
4175 
4176 static void ftrace_init_module(struct module *mod,
4177 			       unsigned long *start, unsigned long *end)
4178 {
4179 	if (ftrace_disabled || start == end)
4180 		return;
4181 	ftrace_process_locs(mod, start, end);
4182 }
4183 
4184 static int ftrace_module_notify_enter(struct notifier_block *self,
4185 				      unsigned long val, void *data)
4186 {
4187 	struct module *mod = data;
4188 
4189 	if (val == MODULE_STATE_COMING)
4190 		ftrace_init_module(mod, mod->ftrace_callsites,
4191 				   mod->ftrace_callsites +
4192 				   mod->num_ftrace_callsites);
4193 	return 0;
4194 }
4195 
4196 static int ftrace_module_notify_exit(struct notifier_block *self,
4197 				     unsigned long val, void *data)
4198 {
4199 	struct module *mod = data;
4200 
4201 	if (val == MODULE_STATE_GOING)
4202 		ftrace_release_mod(mod);
4203 
4204 	return 0;
4205 }
4206 #else
4207 static int ftrace_module_notify_enter(struct notifier_block *self,
4208 				      unsigned long val, void *data)
4209 {
4210 	return 0;
4211 }
4212 static int ftrace_module_notify_exit(struct notifier_block *self,
4213 				     unsigned long val, void *data)
4214 {
4215 	return 0;
4216 }
4217 #endif /* CONFIG_MODULES */
4218 
4219 struct notifier_block ftrace_module_enter_nb = {
4220 	.notifier_call = ftrace_module_notify_enter,
4221 	.priority = INT_MAX,	/* Run before anything that can use kprobes */
4222 };
4223 
4224 struct notifier_block ftrace_module_exit_nb = {
4225 	.notifier_call = ftrace_module_notify_exit,
4226 	.priority = INT_MIN,	/* Run after anything that can remove kprobes */
4227 };
4228 
4229 extern unsigned long __start_mcount_loc[];
4230 extern unsigned long __stop_mcount_loc[];
4231 
4232 void __init ftrace_init(void)
4233 {
4234 	unsigned long count, addr, flags;
4235 	int ret;
4236 
4237 	/* Keep the ftrace pointer to the stub */
4238 	addr = (unsigned long)ftrace_stub;
4239 
4240 	local_irq_save(flags);
4241 	ftrace_dyn_arch_init(&addr);
4242 	local_irq_restore(flags);
4243 
4244 	/* ftrace_dyn_arch_init places the return code in addr */
4245 	if (addr)
4246 		goto failed;
4247 
4248 	count = __stop_mcount_loc - __start_mcount_loc;
4249 
4250 	ret = ftrace_dyn_table_alloc(count);
4251 	if (ret)
4252 		goto failed;
4253 
4254 	last_ftrace_enabled = ftrace_enabled = 1;
4255 
4256 	ret = ftrace_process_locs(NULL,
4257 				  __start_mcount_loc,
4258 				  __stop_mcount_loc);
4259 
4260 	ret = register_module_notifier(&ftrace_module_enter_nb);
4261 	if (ret)
4262 		pr_warning("Failed to register trace ftrace module enter notifier\n");
4263 
4264 	ret = register_module_notifier(&ftrace_module_exit_nb);
4265 	if (ret)
4266 		pr_warning("Failed to register trace ftrace module exit notifier\n");
4267 
4268 	set_ftrace_early_filters();
4269 
4270 	return;
4271  failed:
4272 	ftrace_disabled = 1;
4273 }
4274 
4275 #else
4276 
4277 static struct ftrace_ops global_ops = {
4278 	.func			= ftrace_stub,
4279 	.flags			= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
4280 	INIT_REGEX_LOCK(global_ops)
4281 };
4282 
4283 static int __init ftrace_nodyn_init(void)
4284 {
4285 	ftrace_enabled = 1;
4286 	return 0;
4287 }
4288 core_initcall(ftrace_nodyn_init);
4289 
4290 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
4291 static inline void ftrace_startup_enable(int command) { }
4292 /* Keep as macros so we do not need to define the commands */
4293 # define ftrace_startup(ops, command)			\
4294 	({						\
4295 		(ops)->flags |= FTRACE_OPS_FL_ENABLED;	\
4296 		0;					\
4297 	})
4298 # define ftrace_shutdown(ops, command)	do { } while (0)
4299 # define ftrace_startup_sysctl()	do { } while (0)
4300 # define ftrace_shutdown_sysctl()	do { } while (0)
4301 
4302 static inline int
4303 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
4304 {
4305 	return 1;
4306 }
4307 
4308 #endif /* CONFIG_DYNAMIC_FTRACE */
4309 
4310 static void
4311 ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
4312 			struct ftrace_ops *op, struct pt_regs *regs)
4313 {
4314 	if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
4315 		return;
4316 
4317 	/*
4318 	 * Some of the ops may be dynamically allocated,
4319 	 * they must be freed after a synchronize_sched().
4320 	 */
4321 	preempt_disable_notrace();
4322 	trace_recursion_set(TRACE_CONTROL_BIT);
4323 	do_for_each_ftrace_op(op, ftrace_control_list) {
4324 		if (!(op->flags & FTRACE_OPS_FL_STUB) &&
4325 		    !ftrace_function_local_disabled(op) &&
4326 		    ftrace_ops_test(op, ip, regs))
4327 			op->func(ip, parent_ip, op, regs);
4328 	} while_for_each_ftrace_op(op);
4329 	trace_recursion_clear(TRACE_CONTROL_BIT);
4330 	preempt_enable_notrace();
4331 }
4332 
4333 static struct ftrace_ops control_ops = {
4334 	.func	= ftrace_ops_control_func,
4335 	.flags	= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
4336 	INIT_REGEX_LOCK(control_ops)
4337 };
4338 
4339 static inline void
4340 __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4341 		       struct ftrace_ops *ignored, struct pt_regs *regs)
4342 {
4343 	struct ftrace_ops *op;
4344 	int bit;
4345 
4346 	if (function_trace_stop)
4347 		return;
4348 
4349 	bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
4350 	if (bit < 0)
4351 		return;
4352 
4353 	/*
4354 	 * Some of the ops may be dynamically allocated,
4355 	 * they must be freed after a synchronize_sched().
4356 	 */
4357 	preempt_disable_notrace();
4358 	do_for_each_ftrace_op(op, ftrace_ops_list) {
4359 		if (ftrace_ops_test(op, ip, regs))
4360 			op->func(ip, parent_ip, op, regs);
4361 	} while_for_each_ftrace_op(op);
4362 	preempt_enable_notrace();
4363 	trace_clear_recursion(bit);
4364 }
4365 
4366 /*
4367  * Some archs only support passing ip and parent_ip. Even though
4368  * the list function ignores the op parameter, we do not want any
4369  * C side effects, where a function is called without the caller
4370  * sending a third parameter.
4371  * Archs are to support both the regs and ftrace_ops at the same time.
4372  * If they support ftrace_ops, it is assumed they support regs.
4373  * If call backs want to use regs, they must either check for regs
4374  * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
4375  * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
4376  * An architecture can pass partial regs with ftrace_ops and still
4377  * set the ARCH_SUPPORT_FTARCE_OPS.
4378  */
4379 #if ARCH_SUPPORTS_FTRACE_OPS
4380 static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
4381 				 struct ftrace_ops *op, struct pt_regs *regs)
4382 {
4383 	__ftrace_ops_list_func(ip, parent_ip, NULL, regs);
4384 }
4385 #else
4386 static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
4387 {
4388 	__ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
4389 }
4390 #endif
4391 
4392 static void clear_ftrace_swapper(void)
4393 {
4394 	struct task_struct *p;
4395 	int cpu;
4396 
4397 	get_online_cpus();
4398 	for_each_online_cpu(cpu) {
4399 		p = idle_task(cpu);
4400 		clear_tsk_trace_trace(p);
4401 	}
4402 	put_online_cpus();
4403 }
4404 
4405 static void set_ftrace_swapper(void)
4406 {
4407 	struct task_struct *p;
4408 	int cpu;
4409 
4410 	get_online_cpus();
4411 	for_each_online_cpu(cpu) {
4412 		p = idle_task(cpu);
4413 		set_tsk_trace_trace(p);
4414 	}
4415 	put_online_cpus();
4416 }
4417 
4418 static void clear_ftrace_pid(struct pid *pid)
4419 {
4420 	struct task_struct *p;
4421 
4422 	rcu_read_lock();
4423 	do_each_pid_task(pid, PIDTYPE_PID, p) {
4424 		clear_tsk_trace_trace(p);
4425 	} while_each_pid_task(pid, PIDTYPE_PID, p);
4426 	rcu_read_unlock();
4427 
4428 	put_pid(pid);
4429 }
4430 
4431 static void set_ftrace_pid(struct pid *pid)
4432 {
4433 	struct task_struct *p;
4434 
4435 	rcu_read_lock();
4436 	do_each_pid_task(pid, PIDTYPE_PID, p) {
4437 		set_tsk_trace_trace(p);
4438 	} while_each_pid_task(pid, PIDTYPE_PID, p);
4439 	rcu_read_unlock();
4440 }
4441 
4442 static void clear_ftrace_pid_task(struct pid *pid)
4443 {
4444 	if (pid == ftrace_swapper_pid)
4445 		clear_ftrace_swapper();
4446 	else
4447 		clear_ftrace_pid(pid);
4448 }
4449 
4450 static void set_ftrace_pid_task(struct pid *pid)
4451 {
4452 	if (pid == ftrace_swapper_pid)
4453 		set_ftrace_swapper();
4454 	else
4455 		set_ftrace_pid(pid);
4456 }
4457 
4458 static int ftrace_pid_add(int p)
4459 {
4460 	struct pid *pid;
4461 	struct ftrace_pid *fpid;
4462 	int ret = -EINVAL;
4463 
4464 	mutex_lock(&ftrace_lock);
4465 
4466 	if (!p)
4467 		pid = ftrace_swapper_pid;
4468 	else
4469 		pid = find_get_pid(p);
4470 
4471 	if (!pid)
4472 		goto out;
4473 
4474 	ret = 0;
4475 
4476 	list_for_each_entry(fpid, &ftrace_pids, list)
4477 		if (fpid->pid == pid)
4478 			goto out_put;
4479 
4480 	ret = -ENOMEM;
4481 
4482 	fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
4483 	if (!fpid)
4484 		goto out_put;
4485 
4486 	list_add(&fpid->list, &ftrace_pids);
4487 	fpid->pid = pid;
4488 
4489 	set_ftrace_pid_task(pid);
4490 
4491 	ftrace_update_pid_func();
4492 	ftrace_startup_enable(0);
4493 
4494 	mutex_unlock(&ftrace_lock);
4495 	return 0;
4496 
4497 out_put:
4498 	if (pid != ftrace_swapper_pid)
4499 		put_pid(pid);
4500 
4501 out:
4502 	mutex_unlock(&ftrace_lock);
4503 	return ret;
4504 }
4505 
4506 static void ftrace_pid_reset(void)
4507 {
4508 	struct ftrace_pid *fpid, *safe;
4509 
4510 	mutex_lock(&ftrace_lock);
4511 	list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
4512 		struct pid *pid = fpid->pid;
4513 
4514 		clear_ftrace_pid_task(pid);
4515 
4516 		list_del(&fpid->list);
4517 		kfree(fpid);
4518 	}
4519 
4520 	ftrace_update_pid_func();
4521 	ftrace_startup_enable(0);
4522 
4523 	mutex_unlock(&ftrace_lock);
4524 }
4525 
4526 static void *fpid_start(struct seq_file *m, loff_t *pos)
4527 {
4528 	mutex_lock(&ftrace_lock);
4529 
4530 	if (list_empty(&ftrace_pids) && (!*pos))
4531 		return (void *) 1;
4532 
4533 	return seq_list_start(&ftrace_pids, *pos);
4534 }
4535 
4536 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
4537 {
4538 	if (v == (void *)1)
4539 		return NULL;
4540 
4541 	return seq_list_next(v, &ftrace_pids, pos);
4542 }
4543 
4544 static void fpid_stop(struct seq_file *m, void *p)
4545 {
4546 	mutex_unlock(&ftrace_lock);
4547 }
4548 
4549 static int fpid_show(struct seq_file *m, void *v)
4550 {
4551 	const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
4552 
4553 	if (v == (void *)1) {
4554 		seq_printf(m, "no pid\n");
4555 		return 0;
4556 	}
4557 
4558 	if (fpid->pid == ftrace_swapper_pid)
4559 		seq_printf(m, "swapper tasks\n");
4560 	else
4561 		seq_printf(m, "%u\n", pid_vnr(fpid->pid));
4562 
4563 	return 0;
4564 }
4565 
4566 static const struct seq_operations ftrace_pid_sops = {
4567 	.start = fpid_start,
4568 	.next = fpid_next,
4569 	.stop = fpid_stop,
4570 	.show = fpid_show,
4571 };
4572 
4573 static int
4574 ftrace_pid_open(struct inode *inode, struct file *file)
4575 {
4576 	int ret = 0;
4577 
4578 	if ((file->f_mode & FMODE_WRITE) &&
4579 	    (file->f_flags & O_TRUNC))
4580 		ftrace_pid_reset();
4581 
4582 	if (file->f_mode & FMODE_READ)
4583 		ret = seq_open(file, &ftrace_pid_sops);
4584 
4585 	return ret;
4586 }
4587 
4588 static ssize_t
4589 ftrace_pid_write(struct file *filp, const char __user *ubuf,
4590 		   size_t cnt, loff_t *ppos)
4591 {
4592 	char buf[64], *tmp;
4593 	long val;
4594 	int ret;
4595 
4596 	if (cnt >= sizeof(buf))
4597 		return -EINVAL;
4598 
4599 	if (copy_from_user(&buf, ubuf, cnt))
4600 		return -EFAULT;
4601 
4602 	buf[cnt] = 0;
4603 
4604 	/*
4605 	 * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
4606 	 * to clean the filter quietly.
4607 	 */
4608 	tmp = strstrip(buf);
4609 	if (strlen(tmp) == 0)
4610 		return 1;
4611 
4612 	ret = kstrtol(tmp, 10, &val);
4613 	if (ret < 0)
4614 		return ret;
4615 
4616 	ret = ftrace_pid_add(val);
4617 
4618 	return ret ? ret : cnt;
4619 }
4620 
4621 static int
4622 ftrace_pid_release(struct inode *inode, struct file *file)
4623 {
4624 	if (file->f_mode & FMODE_READ)
4625 		seq_release(inode, file);
4626 
4627 	return 0;
4628 }
4629 
4630 static const struct file_operations ftrace_pid_fops = {
4631 	.open		= ftrace_pid_open,
4632 	.write		= ftrace_pid_write,
4633 	.read		= seq_read,
4634 	.llseek		= ftrace_filter_lseek,
4635 	.release	= ftrace_pid_release,
4636 };
4637 
4638 static __init int ftrace_init_debugfs(void)
4639 {
4640 	struct dentry *d_tracer;
4641 
4642 	d_tracer = tracing_init_dentry();
4643 	if (!d_tracer)
4644 		return 0;
4645 
4646 	ftrace_init_dyn_debugfs(d_tracer);
4647 
4648 	trace_create_file("set_ftrace_pid", 0644, d_tracer,
4649 			    NULL, &ftrace_pid_fops);
4650 
4651 	ftrace_profile_debugfs(d_tracer);
4652 
4653 	return 0;
4654 }
4655 fs_initcall(ftrace_init_debugfs);
4656 
4657 /**
4658  * ftrace_kill - kill ftrace
4659  *
4660  * This function should be used by panic code. It stops ftrace
4661  * but in a not so nice way. If you need to simply kill ftrace
4662  * from a non-atomic section, use ftrace_kill.
4663  */
4664 void ftrace_kill(void)
4665 {
4666 	ftrace_disabled = 1;
4667 	ftrace_enabled = 0;
4668 	clear_ftrace_function();
4669 }
4670 
4671 /**
4672  * Test if ftrace is dead or not.
4673  */
4674 int ftrace_is_dead(void)
4675 {
4676 	return ftrace_disabled;
4677 }
4678 
4679 /**
4680  * register_ftrace_function - register a function for profiling
4681  * @ops - ops structure that holds the function for profiling.
4682  *
4683  * Register a function to be called by all functions in the
4684  * kernel.
4685  *
4686  * Note: @ops->func and all the functions it calls must be labeled
4687  *       with "notrace", otherwise it will go into a
4688  *       recursive loop.
4689  */
4690 int register_ftrace_function(struct ftrace_ops *ops)
4691 {
4692 	int ret = -1;
4693 
4694 	ftrace_ops_init(ops);
4695 
4696 	mutex_lock(&ftrace_lock);
4697 
4698 	ret = __register_ftrace_function(ops);
4699 	if (!ret)
4700 		ret = ftrace_startup(ops, 0);
4701 
4702 	mutex_unlock(&ftrace_lock);
4703 
4704 	return ret;
4705 }
4706 EXPORT_SYMBOL_GPL(register_ftrace_function);
4707 
4708 /**
4709  * unregister_ftrace_function - unregister a function for profiling.
4710  * @ops - ops structure that holds the function to unregister
4711  *
4712  * Unregister a function that was added to be called by ftrace profiling.
4713  */
4714 int unregister_ftrace_function(struct ftrace_ops *ops)
4715 {
4716 	int ret;
4717 
4718 	mutex_lock(&ftrace_lock);
4719 	ret = __unregister_ftrace_function(ops);
4720 	if (!ret)
4721 		ftrace_shutdown(ops, 0);
4722 	mutex_unlock(&ftrace_lock);
4723 
4724 	return ret;
4725 }
4726 EXPORT_SYMBOL_GPL(unregister_ftrace_function);
4727 
4728 int
4729 ftrace_enable_sysctl(struct ctl_table *table, int write,
4730 		     void __user *buffer, size_t *lenp,
4731 		     loff_t *ppos)
4732 {
4733 	int ret = -ENODEV;
4734 
4735 	mutex_lock(&ftrace_lock);
4736 
4737 	if (unlikely(ftrace_disabled))
4738 		goto out;
4739 
4740 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
4741 
4742 	if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
4743 		goto out;
4744 
4745 	last_ftrace_enabled = !!ftrace_enabled;
4746 
4747 	if (ftrace_enabled) {
4748 
4749 		ftrace_startup_sysctl();
4750 
4751 		/* we are starting ftrace again */
4752 		if (ftrace_ops_list != &ftrace_list_end)
4753 			update_ftrace_function();
4754 
4755 	} else {
4756 		/* stopping ftrace calls (just send to ftrace_stub) */
4757 		ftrace_trace_function = ftrace_stub;
4758 
4759 		ftrace_shutdown_sysctl();
4760 	}
4761 
4762  out:
4763 	mutex_unlock(&ftrace_lock);
4764 	return ret;
4765 }
4766 
4767 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4768 
4769 static int ftrace_graph_active;
4770 static struct notifier_block ftrace_suspend_notifier;
4771 
4772 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
4773 {
4774 	return 0;
4775 }
4776 
4777 /* The callbacks that hook a function */
4778 trace_func_graph_ret_t ftrace_graph_return =
4779 			(trace_func_graph_ret_t)ftrace_stub;
4780 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
4781 
4782 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
4783 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
4784 {
4785 	int i;
4786 	int ret = 0;
4787 	unsigned long flags;
4788 	int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
4789 	struct task_struct *g, *t;
4790 
4791 	for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
4792 		ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
4793 					* sizeof(struct ftrace_ret_stack),
4794 					GFP_KERNEL);
4795 		if (!ret_stack_list[i]) {
4796 			start = 0;
4797 			end = i;
4798 			ret = -ENOMEM;
4799 			goto free;
4800 		}
4801 	}
4802 
4803 	read_lock_irqsave(&tasklist_lock, flags);
4804 	do_each_thread(g, t) {
4805 		if (start == end) {
4806 			ret = -EAGAIN;
4807 			goto unlock;
4808 		}
4809 
4810 		if (t->ret_stack == NULL) {
4811 			atomic_set(&t->tracing_graph_pause, 0);
4812 			atomic_set(&t->trace_overrun, 0);
4813 			t->curr_ret_stack = -1;
4814 			/* Make sure the tasks see the -1 first: */
4815 			smp_wmb();
4816 			t->ret_stack = ret_stack_list[start++];
4817 		}
4818 	} while_each_thread(g, t);
4819 
4820 unlock:
4821 	read_unlock_irqrestore(&tasklist_lock, flags);
4822 free:
4823 	for (i = start; i < end; i++)
4824 		kfree(ret_stack_list[i]);
4825 	return ret;
4826 }
4827 
4828 static void
4829 ftrace_graph_probe_sched_switch(void *ignore,
4830 			struct task_struct *prev, struct task_struct *next)
4831 {
4832 	unsigned long long timestamp;
4833 	int index;
4834 
4835 	/*
4836 	 * Does the user want to count the time a function was asleep.
4837 	 * If so, do not update the time stamps.
4838 	 */
4839 	if (trace_flags & TRACE_ITER_SLEEP_TIME)
4840 		return;
4841 
4842 	timestamp = trace_clock_local();
4843 
4844 	prev->ftrace_timestamp = timestamp;
4845 
4846 	/* only process tasks that we timestamped */
4847 	if (!next->ftrace_timestamp)
4848 		return;
4849 
4850 	/*
4851 	 * Update all the counters in next to make up for the
4852 	 * time next was sleeping.
4853 	 */
4854 	timestamp -= next->ftrace_timestamp;
4855 
4856 	for (index = next->curr_ret_stack; index >= 0; index--)
4857 		next->ret_stack[index].calltime += timestamp;
4858 }
4859 
4860 /* Allocate a return stack for each task */
4861 static int start_graph_tracing(void)
4862 {
4863 	struct ftrace_ret_stack **ret_stack_list;
4864 	int ret, cpu;
4865 
4866 	ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
4867 				sizeof(struct ftrace_ret_stack *),
4868 				GFP_KERNEL);
4869 
4870 	if (!ret_stack_list)
4871 		return -ENOMEM;
4872 
4873 	/* The cpu_boot init_task->ret_stack will never be freed */
4874 	for_each_online_cpu(cpu) {
4875 		if (!idle_task(cpu)->ret_stack)
4876 			ftrace_graph_init_idle_task(idle_task(cpu), cpu);
4877 	}
4878 
4879 	do {
4880 		ret = alloc_retstack_tasklist(ret_stack_list);
4881 	} while (ret == -EAGAIN);
4882 
4883 	if (!ret) {
4884 		ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4885 		if (ret)
4886 			pr_info("ftrace_graph: Couldn't activate tracepoint"
4887 				" probe to kernel_sched_switch\n");
4888 	}
4889 
4890 	kfree(ret_stack_list);
4891 	return ret;
4892 }
4893 
4894 /*
4895  * Hibernation protection.
4896  * The state of the current task is too much unstable during
4897  * suspend/restore to disk. We want to protect against that.
4898  */
4899 static int
4900 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
4901 							void *unused)
4902 {
4903 	switch (state) {
4904 	case PM_HIBERNATION_PREPARE:
4905 		pause_graph_tracing();
4906 		break;
4907 
4908 	case PM_POST_HIBERNATION:
4909 		unpause_graph_tracing();
4910 		break;
4911 	}
4912 	return NOTIFY_DONE;
4913 }
4914 
4915 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
4916 			trace_func_graph_ent_t entryfunc)
4917 {
4918 	int ret = 0;
4919 
4920 	mutex_lock(&ftrace_lock);
4921 
4922 	/* we currently allow only one tracer registered at a time */
4923 	if (ftrace_graph_active) {
4924 		ret = -EBUSY;
4925 		goto out;
4926 	}
4927 
4928 	ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
4929 	register_pm_notifier(&ftrace_suspend_notifier);
4930 
4931 	ftrace_graph_active++;
4932 	ret = start_graph_tracing();
4933 	if (ret) {
4934 		ftrace_graph_active--;
4935 		goto out;
4936 	}
4937 
4938 	ftrace_graph_return = retfunc;
4939 	ftrace_graph_entry = entryfunc;
4940 
4941 	ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
4942 
4943 out:
4944 	mutex_unlock(&ftrace_lock);
4945 	return ret;
4946 }
4947 
4948 void unregister_ftrace_graph(void)
4949 {
4950 	mutex_lock(&ftrace_lock);
4951 
4952 	if (unlikely(!ftrace_graph_active))
4953 		goto out;
4954 
4955 	ftrace_graph_active--;
4956 	ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
4957 	ftrace_graph_entry = ftrace_graph_entry_stub;
4958 	ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
4959 	unregister_pm_notifier(&ftrace_suspend_notifier);
4960 	unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
4961 
4962  out:
4963 	mutex_unlock(&ftrace_lock);
4964 }
4965 
4966 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
4967 
4968 static void
4969 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
4970 {
4971 	atomic_set(&t->tracing_graph_pause, 0);
4972 	atomic_set(&t->trace_overrun, 0);
4973 	t->ftrace_timestamp = 0;
4974 	/* make curr_ret_stack visible before we add the ret_stack */
4975 	smp_wmb();
4976 	t->ret_stack = ret_stack;
4977 }
4978 
4979 /*
4980  * Allocate a return stack for the idle task. May be the first
4981  * time through, or it may be done by CPU hotplug online.
4982  */
4983 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
4984 {
4985 	t->curr_ret_stack = -1;
4986 	/*
4987 	 * The idle task has no parent, it either has its own
4988 	 * stack or no stack at all.
4989 	 */
4990 	if (t->ret_stack)
4991 		WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
4992 
4993 	if (ftrace_graph_active) {
4994 		struct ftrace_ret_stack *ret_stack;
4995 
4996 		ret_stack = per_cpu(idle_ret_stack, cpu);
4997 		if (!ret_stack) {
4998 			ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
4999 					    * sizeof(struct ftrace_ret_stack),
5000 					    GFP_KERNEL);
5001 			if (!ret_stack)
5002 				return;
5003 			per_cpu(idle_ret_stack, cpu) = ret_stack;
5004 		}
5005 		graph_init_task(t, ret_stack);
5006 	}
5007 }
5008 
5009 /* Allocate a return stack for newly created task */
5010 void ftrace_graph_init_task(struct task_struct *t)
5011 {
5012 	/* Make sure we do not use the parent ret_stack */
5013 	t->ret_stack = NULL;
5014 	t->curr_ret_stack = -1;
5015 
5016 	if (ftrace_graph_active) {
5017 		struct ftrace_ret_stack *ret_stack;
5018 
5019 		ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
5020 				* sizeof(struct ftrace_ret_stack),
5021 				GFP_KERNEL);
5022 		if (!ret_stack)
5023 			return;
5024 		graph_init_task(t, ret_stack);
5025 	}
5026 }
5027 
5028 void ftrace_graph_exit_task(struct task_struct *t)
5029 {
5030 	struct ftrace_ret_stack	*ret_stack = t->ret_stack;
5031 
5032 	t->ret_stack = NULL;
5033 	/* NULL must become visible to IRQs before we free it: */
5034 	barrier();
5035 
5036 	kfree(ret_stack);
5037 }
5038 
5039 void ftrace_graph_stop(void)
5040 {
5041 	ftrace_stop();
5042 }
5043 #endif
5044