xref: /openbmc/linux/kernel/trace/ftrace.c (revision 405db98b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Infrastructure for profiling code inserted by 'gcc -pg'.
4  *
5  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
6  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7  *
8  * Originally ported from the -rt patch by:
9  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10  *
11  * Based on code in the latency_tracer, that is:
12  *
13  *  Copyright (C) 2004-2006 Ingo Molnar
14  *  Copyright (C) 2004 Nadia Yvette Chambers
15  */
16 
17 #include <linux/stop_machine.h>
18 #include <linux/clocksource.h>
19 #include <linux/sched/task.h>
20 #include <linux/kallsyms.h>
21 #include <linux/security.h>
22 #include <linux/seq_file.h>
23 #include <linux/tracefs.h>
24 #include <linux/hardirq.h>
25 #include <linux/kthread.h>
26 #include <linux/uaccess.h>
27 #include <linux/bsearch.h>
28 #include <linux/module.h>
29 #include <linux/ftrace.h>
30 #include <linux/sysctl.h>
31 #include <linux/slab.h>
32 #include <linux/ctype.h>
33 #include <linux/sort.h>
34 #include <linux/list.h>
35 #include <linux/hash.h>
36 #include <linux/rcupdate.h>
37 #include <linux/kprobes.h>
38 
39 #include <trace/events/sched.h>
40 
41 #include <asm/sections.h>
42 #include <asm/setup.h>
43 
44 #include "ftrace_internal.h"
45 #include "trace_output.h"
46 #include "trace_stat.h"
47 
48 #define FTRACE_WARN_ON(cond)			\
49 	({					\
50 		int ___r = cond;		\
51 		if (WARN_ON(___r))		\
52 			ftrace_kill();		\
53 		___r;				\
54 	})
55 
56 #define FTRACE_WARN_ON_ONCE(cond)		\
57 	({					\
58 		int ___r = cond;		\
59 		if (WARN_ON_ONCE(___r))		\
60 			ftrace_kill();		\
61 		___r;				\
62 	})
63 
64 /* hash bits for specific function selection */
65 #define FTRACE_HASH_DEFAULT_BITS 10
66 #define FTRACE_HASH_MAX_BITS 12
67 
68 #ifdef CONFIG_DYNAMIC_FTRACE
69 #define INIT_OPS_HASH(opsname)	\
70 	.func_hash		= &opsname.local_hash,			\
71 	.local_hash.regex_lock	= __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
72 #else
73 #define INIT_OPS_HASH(opsname)
74 #endif
75 
76 enum {
77 	FTRACE_MODIFY_ENABLE_FL		= (1 << 0),
78 	FTRACE_MODIFY_MAY_SLEEP_FL	= (1 << 1),
79 };
80 
81 struct ftrace_ops ftrace_list_end __read_mostly = {
82 	.func		= ftrace_stub,
83 	.flags		= FTRACE_OPS_FL_STUB,
84 	INIT_OPS_HASH(ftrace_list_end)
85 };
86 
87 /* ftrace_enabled is a method to turn ftrace on or off */
88 int ftrace_enabled __read_mostly;
89 static int last_ftrace_enabled;
90 
91 /* Current function tracing op */
92 struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
93 /* What to set function_trace_op to */
94 static struct ftrace_ops *set_function_trace_op;
95 
96 static bool ftrace_pids_enabled(struct ftrace_ops *ops)
97 {
98 	struct trace_array *tr;
99 
100 	if (!(ops->flags & FTRACE_OPS_FL_PID) || !ops->private)
101 		return false;
102 
103 	tr = ops->private;
104 
105 	return tr->function_pids != NULL || tr->function_no_pids != NULL;
106 }
107 
108 static void ftrace_update_trampoline(struct ftrace_ops *ops);
109 
110 /*
111  * ftrace_disabled is set when an anomaly is discovered.
112  * ftrace_disabled is much stronger than ftrace_enabled.
113  */
114 static int ftrace_disabled __read_mostly;
115 
116 DEFINE_MUTEX(ftrace_lock);
117 
118 struct ftrace_ops __rcu *ftrace_ops_list __read_mostly = &ftrace_list_end;
119 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
120 struct ftrace_ops global_ops;
121 
122 /* Defined by vmlinux.lds.h see the commment above arch_ftrace_ops_list_func for details */
123 void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
124 			  struct ftrace_ops *op, struct ftrace_regs *fregs);
125 
126 static inline void ftrace_ops_init(struct ftrace_ops *ops)
127 {
128 #ifdef CONFIG_DYNAMIC_FTRACE
129 	if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
130 		mutex_init(&ops->local_hash.regex_lock);
131 		ops->func_hash = &ops->local_hash;
132 		ops->flags |= FTRACE_OPS_FL_INITIALIZED;
133 	}
134 #endif
135 }
136 
137 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
138 			    struct ftrace_ops *op, struct ftrace_regs *fregs)
139 {
140 	struct trace_array *tr = op->private;
141 	int pid;
142 
143 	if (tr) {
144 		pid = this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid);
145 		if (pid == FTRACE_PID_IGNORE)
146 			return;
147 		if (pid != FTRACE_PID_TRACE &&
148 		    pid != current->pid)
149 			return;
150 	}
151 
152 	op->saved_func(ip, parent_ip, op, fregs);
153 }
154 
155 static void ftrace_sync_ipi(void *data)
156 {
157 	/* Probably not needed, but do it anyway */
158 	smp_rmb();
159 }
160 
161 static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
162 {
163 	/*
164 	 * If this is a dynamic, RCU, or per CPU ops, or we force list func,
165 	 * then it needs to call the list anyway.
166 	 */
167 	if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_RCU) ||
168 	    FTRACE_FORCE_LIST_FUNC)
169 		return ftrace_ops_list_func;
170 
171 	return ftrace_ops_get_func(ops);
172 }
173 
174 static void update_ftrace_function(void)
175 {
176 	ftrace_func_t func;
177 
178 	/*
179 	 * Prepare the ftrace_ops that the arch callback will use.
180 	 * If there's only one ftrace_ops registered, the ftrace_ops_list
181 	 * will point to the ops we want.
182 	 */
183 	set_function_trace_op = rcu_dereference_protected(ftrace_ops_list,
184 						lockdep_is_held(&ftrace_lock));
185 
186 	/* If there's no ftrace_ops registered, just call the stub function */
187 	if (set_function_trace_op == &ftrace_list_end) {
188 		func = ftrace_stub;
189 
190 	/*
191 	 * If we are at the end of the list and this ops is
192 	 * recursion safe and not dynamic and the arch supports passing ops,
193 	 * then have the mcount trampoline call the function directly.
194 	 */
195 	} else if (rcu_dereference_protected(ftrace_ops_list->next,
196 			lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
197 		func = ftrace_ops_get_list_func(ftrace_ops_list);
198 
199 	} else {
200 		/* Just use the default ftrace_ops */
201 		set_function_trace_op = &ftrace_list_end;
202 		func = ftrace_ops_list_func;
203 	}
204 
205 	update_function_graph_func();
206 
207 	/* If there's no change, then do nothing more here */
208 	if (ftrace_trace_function == func)
209 		return;
210 
211 	/*
212 	 * If we are using the list function, it doesn't care
213 	 * about the function_trace_ops.
214 	 */
215 	if (func == ftrace_ops_list_func) {
216 		ftrace_trace_function = func;
217 		/*
218 		 * Don't even bother setting function_trace_ops,
219 		 * it would be racy to do so anyway.
220 		 */
221 		return;
222 	}
223 
224 #ifndef CONFIG_DYNAMIC_FTRACE
225 	/*
226 	 * For static tracing, we need to be a bit more careful.
227 	 * The function change takes affect immediately. Thus,
228 	 * we need to coordinate the setting of the function_trace_ops
229 	 * with the setting of the ftrace_trace_function.
230 	 *
231 	 * Set the function to the list ops, which will call the
232 	 * function we want, albeit indirectly, but it handles the
233 	 * ftrace_ops and doesn't depend on function_trace_op.
234 	 */
235 	ftrace_trace_function = ftrace_ops_list_func;
236 	/*
237 	 * Make sure all CPUs see this. Yes this is slow, but static
238 	 * tracing is slow and nasty to have enabled.
239 	 */
240 	synchronize_rcu_tasks_rude();
241 	/* Now all cpus are using the list ops. */
242 	function_trace_op = set_function_trace_op;
243 	/* Make sure the function_trace_op is visible on all CPUs */
244 	smp_wmb();
245 	/* Nasty way to force a rmb on all cpus */
246 	smp_call_function(ftrace_sync_ipi, NULL, 1);
247 	/* OK, we are all set to update the ftrace_trace_function now! */
248 #endif /* !CONFIG_DYNAMIC_FTRACE */
249 
250 	ftrace_trace_function = func;
251 }
252 
253 static void add_ftrace_ops(struct ftrace_ops __rcu **list,
254 			   struct ftrace_ops *ops)
255 {
256 	rcu_assign_pointer(ops->next, *list);
257 
258 	/*
259 	 * We are entering ops into the list but another
260 	 * CPU might be walking that list. We need to make sure
261 	 * the ops->next pointer is valid before another CPU sees
262 	 * the ops pointer included into the list.
263 	 */
264 	rcu_assign_pointer(*list, ops);
265 }
266 
267 static int remove_ftrace_ops(struct ftrace_ops __rcu **list,
268 			     struct ftrace_ops *ops)
269 {
270 	struct ftrace_ops **p;
271 
272 	/*
273 	 * If we are removing the last function, then simply point
274 	 * to the ftrace_stub.
275 	 */
276 	if (rcu_dereference_protected(*list,
277 			lockdep_is_held(&ftrace_lock)) == ops &&
278 	    rcu_dereference_protected(ops->next,
279 			lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
280 		*list = &ftrace_list_end;
281 		return 0;
282 	}
283 
284 	for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
285 		if (*p == ops)
286 			break;
287 
288 	if (*p != ops)
289 		return -1;
290 
291 	*p = (*p)->next;
292 	return 0;
293 }
294 
295 static void ftrace_update_trampoline(struct ftrace_ops *ops);
296 
297 int __register_ftrace_function(struct ftrace_ops *ops)
298 {
299 	if (ops->flags & FTRACE_OPS_FL_DELETED)
300 		return -EINVAL;
301 
302 	if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
303 		return -EBUSY;
304 
305 #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
306 	/*
307 	 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
308 	 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
309 	 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
310 	 */
311 	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
312 	    !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
313 		return -EINVAL;
314 
315 	if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
316 		ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
317 #endif
318 	if (!ftrace_enabled && (ops->flags & FTRACE_OPS_FL_PERMANENT))
319 		return -EBUSY;
320 
321 	if (!is_kernel_core_data((unsigned long)ops))
322 		ops->flags |= FTRACE_OPS_FL_DYNAMIC;
323 
324 	add_ftrace_ops(&ftrace_ops_list, ops);
325 
326 	/* Always save the function, and reset at unregistering */
327 	ops->saved_func = ops->func;
328 
329 	if (ftrace_pids_enabled(ops))
330 		ops->func = ftrace_pid_func;
331 
332 	ftrace_update_trampoline(ops);
333 
334 	if (ftrace_enabled)
335 		update_ftrace_function();
336 
337 	return 0;
338 }
339 
340 int __unregister_ftrace_function(struct ftrace_ops *ops)
341 {
342 	int ret;
343 
344 	if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
345 		return -EBUSY;
346 
347 	ret = remove_ftrace_ops(&ftrace_ops_list, ops);
348 
349 	if (ret < 0)
350 		return ret;
351 
352 	if (ftrace_enabled)
353 		update_ftrace_function();
354 
355 	ops->func = ops->saved_func;
356 
357 	return 0;
358 }
359 
360 static void ftrace_update_pid_func(void)
361 {
362 	struct ftrace_ops *op;
363 
364 	/* Only do something if we are tracing something */
365 	if (ftrace_trace_function == ftrace_stub)
366 		return;
367 
368 	do_for_each_ftrace_op(op, ftrace_ops_list) {
369 		if (op->flags & FTRACE_OPS_FL_PID) {
370 			op->func = ftrace_pids_enabled(op) ?
371 				ftrace_pid_func : op->saved_func;
372 			ftrace_update_trampoline(op);
373 		}
374 	} while_for_each_ftrace_op(op);
375 
376 	update_ftrace_function();
377 }
378 
379 #ifdef CONFIG_FUNCTION_PROFILER
380 struct ftrace_profile {
381 	struct hlist_node		node;
382 	unsigned long			ip;
383 	unsigned long			counter;
384 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
385 	unsigned long long		time;
386 	unsigned long long		time_squared;
387 #endif
388 };
389 
390 struct ftrace_profile_page {
391 	struct ftrace_profile_page	*next;
392 	unsigned long			index;
393 	struct ftrace_profile		records[];
394 };
395 
396 struct ftrace_profile_stat {
397 	atomic_t			disabled;
398 	struct hlist_head		*hash;
399 	struct ftrace_profile_page	*pages;
400 	struct ftrace_profile_page	*start;
401 	struct tracer_stat		stat;
402 };
403 
404 #define PROFILE_RECORDS_SIZE						\
405 	(PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
406 
407 #define PROFILES_PER_PAGE					\
408 	(PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
409 
410 static int ftrace_profile_enabled __read_mostly;
411 
412 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
413 static DEFINE_MUTEX(ftrace_profile_lock);
414 
415 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
416 
417 #define FTRACE_PROFILE_HASH_BITS 10
418 #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
419 
420 static void *
421 function_stat_next(void *v, int idx)
422 {
423 	struct ftrace_profile *rec = v;
424 	struct ftrace_profile_page *pg;
425 
426 	pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
427 
428  again:
429 	if (idx != 0)
430 		rec++;
431 
432 	if ((void *)rec >= (void *)&pg->records[pg->index]) {
433 		pg = pg->next;
434 		if (!pg)
435 			return NULL;
436 		rec = &pg->records[0];
437 		if (!rec->counter)
438 			goto again;
439 	}
440 
441 	return rec;
442 }
443 
444 static void *function_stat_start(struct tracer_stat *trace)
445 {
446 	struct ftrace_profile_stat *stat =
447 		container_of(trace, struct ftrace_profile_stat, stat);
448 
449 	if (!stat || !stat->start)
450 		return NULL;
451 
452 	return function_stat_next(&stat->start->records[0], 0);
453 }
454 
455 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
456 /* function graph compares on total time */
457 static int function_stat_cmp(const void *p1, const void *p2)
458 {
459 	const struct ftrace_profile *a = p1;
460 	const struct ftrace_profile *b = p2;
461 
462 	if (a->time < b->time)
463 		return -1;
464 	if (a->time > b->time)
465 		return 1;
466 	else
467 		return 0;
468 }
469 #else
470 /* not function graph compares against hits */
471 static int function_stat_cmp(const void *p1, const void *p2)
472 {
473 	const struct ftrace_profile *a = p1;
474 	const struct ftrace_profile *b = p2;
475 
476 	if (a->counter < b->counter)
477 		return -1;
478 	if (a->counter > b->counter)
479 		return 1;
480 	else
481 		return 0;
482 }
483 #endif
484 
485 static int function_stat_headers(struct seq_file *m)
486 {
487 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
488 	seq_puts(m, "  Function                               "
489 		 "Hit    Time            Avg             s^2\n"
490 		    "  --------                               "
491 		 "---    ----            ---             ---\n");
492 #else
493 	seq_puts(m, "  Function                               Hit\n"
494 		    "  --------                               ---\n");
495 #endif
496 	return 0;
497 }
498 
499 static int function_stat_show(struct seq_file *m, void *v)
500 {
501 	struct ftrace_profile *rec = v;
502 	char str[KSYM_SYMBOL_LEN];
503 	int ret = 0;
504 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
505 	static struct trace_seq s;
506 	unsigned long long avg;
507 	unsigned long long stddev;
508 #endif
509 	mutex_lock(&ftrace_profile_lock);
510 
511 	/* we raced with function_profile_reset() */
512 	if (unlikely(rec->counter == 0)) {
513 		ret = -EBUSY;
514 		goto out;
515 	}
516 
517 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
518 	avg = div64_ul(rec->time, rec->counter);
519 	if (tracing_thresh && (avg < tracing_thresh))
520 		goto out;
521 #endif
522 
523 	kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
524 	seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
525 
526 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
527 	seq_puts(m, "    ");
528 
529 	/* Sample standard deviation (s^2) */
530 	if (rec->counter <= 1)
531 		stddev = 0;
532 	else {
533 		/*
534 		 * Apply Welford's method:
535 		 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
536 		 */
537 		stddev = rec->counter * rec->time_squared -
538 			 rec->time * rec->time;
539 
540 		/*
541 		 * Divide only 1000 for ns^2 -> us^2 conversion.
542 		 * trace_print_graph_duration will divide 1000 again.
543 		 */
544 		stddev = div64_ul(stddev,
545 				  rec->counter * (rec->counter - 1) * 1000);
546 	}
547 
548 	trace_seq_init(&s);
549 	trace_print_graph_duration(rec->time, &s);
550 	trace_seq_puts(&s, "    ");
551 	trace_print_graph_duration(avg, &s);
552 	trace_seq_puts(&s, "    ");
553 	trace_print_graph_duration(stddev, &s);
554 	trace_print_seq(m, &s);
555 #endif
556 	seq_putc(m, '\n');
557 out:
558 	mutex_unlock(&ftrace_profile_lock);
559 
560 	return ret;
561 }
562 
563 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
564 {
565 	struct ftrace_profile_page *pg;
566 
567 	pg = stat->pages = stat->start;
568 
569 	while (pg) {
570 		memset(pg->records, 0, PROFILE_RECORDS_SIZE);
571 		pg->index = 0;
572 		pg = pg->next;
573 	}
574 
575 	memset(stat->hash, 0,
576 	       FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
577 }
578 
579 static int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
580 {
581 	struct ftrace_profile_page *pg;
582 	int functions;
583 	int pages;
584 	int i;
585 
586 	/* If we already allocated, do nothing */
587 	if (stat->pages)
588 		return 0;
589 
590 	stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
591 	if (!stat->pages)
592 		return -ENOMEM;
593 
594 #ifdef CONFIG_DYNAMIC_FTRACE
595 	functions = ftrace_update_tot_cnt;
596 #else
597 	/*
598 	 * We do not know the number of functions that exist because
599 	 * dynamic tracing is what counts them. With past experience
600 	 * we have around 20K functions. That should be more than enough.
601 	 * It is highly unlikely we will execute every function in
602 	 * the kernel.
603 	 */
604 	functions = 20000;
605 #endif
606 
607 	pg = stat->start = stat->pages;
608 
609 	pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
610 
611 	for (i = 1; i < pages; i++) {
612 		pg->next = (void *)get_zeroed_page(GFP_KERNEL);
613 		if (!pg->next)
614 			goto out_free;
615 		pg = pg->next;
616 	}
617 
618 	return 0;
619 
620  out_free:
621 	pg = stat->start;
622 	while (pg) {
623 		unsigned long tmp = (unsigned long)pg;
624 
625 		pg = pg->next;
626 		free_page(tmp);
627 	}
628 
629 	stat->pages = NULL;
630 	stat->start = NULL;
631 
632 	return -ENOMEM;
633 }
634 
635 static int ftrace_profile_init_cpu(int cpu)
636 {
637 	struct ftrace_profile_stat *stat;
638 	int size;
639 
640 	stat = &per_cpu(ftrace_profile_stats, cpu);
641 
642 	if (stat->hash) {
643 		/* If the profile is already created, simply reset it */
644 		ftrace_profile_reset(stat);
645 		return 0;
646 	}
647 
648 	/*
649 	 * We are profiling all functions, but usually only a few thousand
650 	 * functions are hit. We'll make a hash of 1024 items.
651 	 */
652 	size = FTRACE_PROFILE_HASH_SIZE;
653 
654 	stat->hash = kcalloc(size, sizeof(struct hlist_head), GFP_KERNEL);
655 
656 	if (!stat->hash)
657 		return -ENOMEM;
658 
659 	/* Preallocate the function profiling pages */
660 	if (ftrace_profile_pages_init(stat) < 0) {
661 		kfree(stat->hash);
662 		stat->hash = NULL;
663 		return -ENOMEM;
664 	}
665 
666 	return 0;
667 }
668 
669 static int ftrace_profile_init(void)
670 {
671 	int cpu;
672 	int ret = 0;
673 
674 	for_each_possible_cpu(cpu) {
675 		ret = ftrace_profile_init_cpu(cpu);
676 		if (ret)
677 			break;
678 	}
679 
680 	return ret;
681 }
682 
683 /* interrupts must be disabled */
684 static struct ftrace_profile *
685 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
686 {
687 	struct ftrace_profile *rec;
688 	struct hlist_head *hhd;
689 	unsigned long key;
690 
691 	key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
692 	hhd = &stat->hash[key];
693 
694 	if (hlist_empty(hhd))
695 		return NULL;
696 
697 	hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
698 		if (rec->ip == ip)
699 			return rec;
700 	}
701 
702 	return NULL;
703 }
704 
705 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
706 			       struct ftrace_profile *rec)
707 {
708 	unsigned long key;
709 
710 	key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
711 	hlist_add_head_rcu(&rec->node, &stat->hash[key]);
712 }
713 
714 /*
715  * The memory is already allocated, this simply finds a new record to use.
716  */
717 static struct ftrace_profile *
718 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
719 {
720 	struct ftrace_profile *rec = NULL;
721 
722 	/* prevent recursion (from NMIs) */
723 	if (atomic_inc_return(&stat->disabled) != 1)
724 		goto out;
725 
726 	/*
727 	 * Try to find the function again since an NMI
728 	 * could have added it
729 	 */
730 	rec = ftrace_find_profiled_func(stat, ip);
731 	if (rec)
732 		goto out;
733 
734 	if (stat->pages->index == PROFILES_PER_PAGE) {
735 		if (!stat->pages->next)
736 			goto out;
737 		stat->pages = stat->pages->next;
738 	}
739 
740 	rec = &stat->pages->records[stat->pages->index++];
741 	rec->ip = ip;
742 	ftrace_add_profile(stat, rec);
743 
744  out:
745 	atomic_dec(&stat->disabled);
746 
747 	return rec;
748 }
749 
750 static void
751 function_profile_call(unsigned long ip, unsigned long parent_ip,
752 		      struct ftrace_ops *ops, struct ftrace_regs *fregs)
753 {
754 	struct ftrace_profile_stat *stat;
755 	struct ftrace_profile *rec;
756 	unsigned long flags;
757 
758 	if (!ftrace_profile_enabled)
759 		return;
760 
761 	local_irq_save(flags);
762 
763 	stat = this_cpu_ptr(&ftrace_profile_stats);
764 	if (!stat->hash || !ftrace_profile_enabled)
765 		goto out;
766 
767 	rec = ftrace_find_profiled_func(stat, ip);
768 	if (!rec) {
769 		rec = ftrace_profile_alloc(stat, ip);
770 		if (!rec)
771 			goto out;
772 	}
773 
774 	rec->counter++;
775  out:
776 	local_irq_restore(flags);
777 }
778 
779 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
780 static bool fgraph_graph_time = true;
781 
782 void ftrace_graph_graph_time_control(bool enable)
783 {
784 	fgraph_graph_time = enable;
785 }
786 
787 static int profile_graph_entry(struct ftrace_graph_ent *trace)
788 {
789 	struct ftrace_ret_stack *ret_stack;
790 
791 	function_profile_call(trace->func, 0, NULL, NULL);
792 
793 	/* If function graph is shutting down, ret_stack can be NULL */
794 	if (!current->ret_stack)
795 		return 0;
796 
797 	ret_stack = ftrace_graph_get_ret_stack(current, 0);
798 	if (ret_stack)
799 		ret_stack->subtime = 0;
800 
801 	return 1;
802 }
803 
804 static void profile_graph_return(struct ftrace_graph_ret *trace)
805 {
806 	struct ftrace_ret_stack *ret_stack;
807 	struct ftrace_profile_stat *stat;
808 	unsigned long long calltime;
809 	struct ftrace_profile *rec;
810 	unsigned long flags;
811 
812 	local_irq_save(flags);
813 	stat = this_cpu_ptr(&ftrace_profile_stats);
814 	if (!stat->hash || !ftrace_profile_enabled)
815 		goto out;
816 
817 	/* If the calltime was zero'd ignore it */
818 	if (!trace->calltime)
819 		goto out;
820 
821 	calltime = trace->rettime - trace->calltime;
822 
823 	if (!fgraph_graph_time) {
824 
825 		/* Append this call time to the parent time to subtract */
826 		ret_stack = ftrace_graph_get_ret_stack(current, 1);
827 		if (ret_stack)
828 			ret_stack->subtime += calltime;
829 
830 		ret_stack = ftrace_graph_get_ret_stack(current, 0);
831 		if (ret_stack && ret_stack->subtime < calltime)
832 			calltime -= ret_stack->subtime;
833 		else
834 			calltime = 0;
835 	}
836 
837 	rec = ftrace_find_profiled_func(stat, trace->func);
838 	if (rec) {
839 		rec->time += calltime;
840 		rec->time_squared += calltime * calltime;
841 	}
842 
843  out:
844 	local_irq_restore(flags);
845 }
846 
847 static struct fgraph_ops fprofiler_ops = {
848 	.entryfunc = &profile_graph_entry,
849 	.retfunc = &profile_graph_return,
850 };
851 
852 static int register_ftrace_profiler(void)
853 {
854 	return register_ftrace_graph(&fprofiler_ops);
855 }
856 
857 static void unregister_ftrace_profiler(void)
858 {
859 	unregister_ftrace_graph(&fprofiler_ops);
860 }
861 #else
862 static struct ftrace_ops ftrace_profile_ops __read_mostly = {
863 	.func		= function_profile_call,
864 	.flags		= FTRACE_OPS_FL_INITIALIZED,
865 	INIT_OPS_HASH(ftrace_profile_ops)
866 };
867 
868 static int register_ftrace_profiler(void)
869 {
870 	return register_ftrace_function(&ftrace_profile_ops);
871 }
872 
873 static void unregister_ftrace_profiler(void)
874 {
875 	unregister_ftrace_function(&ftrace_profile_ops);
876 }
877 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
878 
879 static ssize_t
880 ftrace_profile_write(struct file *filp, const char __user *ubuf,
881 		     size_t cnt, loff_t *ppos)
882 {
883 	unsigned long val;
884 	int ret;
885 
886 	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
887 	if (ret)
888 		return ret;
889 
890 	val = !!val;
891 
892 	mutex_lock(&ftrace_profile_lock);
893 	if (ftrace_profile_enabled ^ val) {
894 		if (val) {
895 			ret = ftrace_profile_init();
896 			if (ret < 0) {
897 				cnt = ret;
898 				goto out;
899 			}
900 
901 			ret = register_ftrace_profiler();
902 			if (ret < 0) {
903 				cnt = ret;
904 				goto out;
905 			}
906 			ftrace_profile_enabled = 1;
907 		} else {
908 			ftrace_profile_enabled = 0;
909 			/*
910 			 * unregister_ftrace_profiler calls stop_machine
911 			 * so this acts like an synchronize_rcu.
912 			 */
913 			unregister_ftrace_profiler();
914 		}
915 	}
916  out:
917 	mutex_unlock(&ftrace_profile_lock);
918 
919 	*ppos += cnt;
920 
921 	return cnt;
922 }
923 
924 static ssize_t
925 ftrace_profile_read(struct file *filp, char __user *ubuf,
926 		     size_t cnt, loff_t *ppos)
927 {
928 	char buf[64];		/* big enough to hold a number */
929 	int r;
930 
931 	r = sprintf(buf, "%u\n", ftrace_profile_enabled);
932 	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
933 }
934 
935 static const struct file_operations ftrace_profile_fops = {
936 	.open		= tracing_open_generic,
937 	.read		= ftrace_profile_read,
938 	.write		= ftrace_profile_write,
939 	.llseek		= default_llseek,
940 };
941 
942 /* used to initialize the real stat files */
943 static struct tracer_stat function_stats __initdata = {
944 	.name		= "functions",
945 	.stat_start	= function_stat_start,
946 	.stat_next	= function_stat_next,
947 	.stat_cmp	= function_stat_cmp,
948 	.stat_headers	= function_stat_headers,
949 	.stat_show	= function_stat_show
950 };
951 
952 static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
953 {
954 	struct ftrace_profile_stat *stat;
955 	struct dentry *entry;
956 	char *name;
957 	int ret;
958 	int cpu;
959 
960 	for_each_possible_cpu(cpu) {
961 		stat = &per_cpu(ftrace_profile_stats, cpu);
962 
963 		name = kasprintf(GFP_KERNEL, "function%d", cpu);
964 		if (!name) {
965 			/*
966 			 * The files created are permanent, if something happens
967 			 * we still do not free memory.
968 			 */
969 			WARN(1,
970 			     "Could not allocate stat file for cpu %d\n",
971 			     cpu);
972 			return;
973 		}
974 		stat->stat = function_stats;
975 		stat->stat.name = name;
976 		ret = register_stat_tracer(&stat->stat);
977 		if (ret) {
978 			WARN(1,
979 			     "Could not register function stat for cpu %d\n",
980 			     cpu);
981 			kfree(name);
982 			return;
983 		}
984 	}
985 
986 	entry = tracefs_create_file("function_profile_enabled",
987 				    TRACE_MODE_WRITE, d_tracer, NULL,
988 				    &ftrace_profile_fops);
989 	if (!entry)
990 		pr_warn("Could not create tracefs 'function_profile_enabled' entry\n");
991 }
992 
993 #else /* CONFIG_FUNCTION_PROFILER */
994 static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
995 {
996 }
997 #endif /* CONFIG_FUNCTION_PROFILER */
998 
999 #ifdef CONFIG_DYNAMIC_FTRACE
1000 
1001 static struct ftrace_ops *removed_ops;
1002 
1003 /*
1004  * Set when doing a global update, like enabling all recs or disabling them.
1005  * It is not set when just updating a single ftrace_ops.
1006  */
1007 static bool update_all_ops;
1008 
1009 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
1010 # error Dynamic ftrace depends on MCOUNT_RECORD
1011 #endif
1012 
1013 struct ftrace_func_probe {
1014 	struct ftrace_probe_ops	*probe_ops;
1015 	struct ftrace_ops	ops;
1016 	struct trace_array	*tr;
1017 	struct list_head	list;
1018 	void			*data;
1019 	int			ref;
1020 };
1021 
1022 /*
1023  * We make these constant because no one should touch them,
1024  * but they are used as the default "empty hash", to avoid allocating
1025  * it all the time. These are in a read only section such that if
1026  * anyone does try to modify it, it will cause an exception.
1027  */
1028 static const struct hlist_head empty_buckets[1];
1029 static const struct ftrace_hash empty_hash = {
1030 	.buckets = (struct hlist_head *)empty_buckets,
1031 };
1032 #define EMPTY_HASH	((struct ftrace_hash *)&empty_hash)
1033 
1034 struct ftrace_ops global_ops = {
1035 	.func				= ftrace_stub,
1036 	.local_hash.notrace_hash	= EMPTY_HASH,
1037 	.local_hash.filter_hash		= EMPTY_HASH,
1038 	INIT_OPS_HASH(global_ops)
1039 	.flags				= FTRACE_OPS_FL_INITIALIZED |
1040 					  FTRACE_OPS_FL_PID,
1041 };
1042 
1043 /*
1044  * Used by the stack unwinder to know about dynamic ftrace trampolines.
1045  */
1046 struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr)
1047 {
1048 	struct ftrace_ops *op = NULL;
1049 
1050 	/*
1051 	 * Some of the ops may be dynamically allocated,
1052 	 * they are freed after a synchronize_rcu().
1053 	 */
1054 	preempt_disable_notrace();
1055 
1056 	do_for_each_ftrace_op(op, ftrace_ops_list) {
1057 		/*
1058 		 * This is to check for dynamically allocated trampolines.
1059 		 * Trampolines that are in kernel text will have
1060 		 * core_kernel_text() return true.
1061 		 */
1062 		if (op->trampoline && op->trampoline_size)
1063 			if (addr >= op->trampoline &&
1064 			    addr < op->trampoline + op->trampoline_size) {
1065 				preempt_enable_notrace();
1066 				return op;
1067 			}
1068 	} while_for_each_ftrace_op(op);
1069 	preempt_enable_notrace();
1070 
1071 	return NULL;
1072 }
1073 
1074 /*
1075  * This is used by __kernel_text_address() to return true if the
1076  * address is on a dynamically allocated trampoline that would
1077  * not return true for either core_kernel_text() or
1078  * is_module_text_address().
1079  */
1080 bool is_ftrace_trampoline(unsigned long addr)
1081 {
1082 	return ftrace_ops_trampoline(addr) != NULL;
1083 }
1084 
1085 struct ftrace_page {
1086 	struct ftrace_page	*next;
1087 	struct dyn_ftrace	*records;
1088 	int			index;
1089 	int			order;
1090 };
1091 
1092 #define ENTRY_SIZE sizeof(struct dyn_ftrace)
1093 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1094 
1095 static struct ftrace_page	*ftrace_pages_start;
1096 static struct ftrace_page	*ftrace_pages;
1097 
1098 static __always_inline unsigned long
1099 ftrace_hash_key(struct ftrace_hash *hash, unsigned long ip)
1100 {
1101 	if (hash->size_bits > 0)
1102 		return hash_long(ip, hash->size_bits);
1103 
1104 	return 0;
1105 }
1106 
1107 /* Only use this function if ftrace_hash_empty() has already been tested */
1108 static __always_inline struct ftrace_func_entry *
1109 __ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1110 {
1111 	unsigned long key;
1112 	struct ftrace_func_entry *entry;
1113 	struct hlist_head *hhd;
1114 
1115 	key = ftrace_hash_key(hash, ip);
1116 	hhd = &hash->buckets[key];
1117 
1118 	hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
1119 		if (entry->ip == ip)
1120 			return entry;
1121 	}
1122 	return NULL;
1123 }
1124 
1125 /**
1126  * ftrace_lookup_ip - Test to see if an ip exists in an ftrace_hash
1127  * @hash: The hash to look at
1128  * @ip: The instruction pointer to test
1129  *
1130  * Search a given @hash to see if a given instruction pointer (@ip)
1131  * exists in it.
1132  *
1133  * Returns the entry that holds the @ip if found. NULL otherwise.
1134  */
1135 struct ftrace_func_entry *
1136 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1137 {
1138 	if (ftrace_hash_empty(hash))
1139 		return NULL;
1140 
1141 	return __ftrace_lookup_ip(hash, ip);
1142 }
1143 
1144 static void __add_hash_entry(struct ftrace_hash *hash,
1145 			     struct ftrace_func_entry *entry)
1146 {
1147 	struct hlist_head *hhd;
1148 	unsigned long key;
1149 
1150 	key = ftrace_hash_key(hash, entry->ip);
1151 	hhd = &hash->buckets[key];
1152 	hlist_add_head(&entry->hlist, hhd);
1153 	hash->count++;
1154 }
1155 
1156 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1157 {
1158 	struct ftrace_func_entry *entry;
1159 
1160 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1161 	if (!entry)
1162 		return -ENOMEM;
1163 
1164 	entry->ip = ip;
1165 	__add_hash_entry(hash, entry);
1166 
1167 	return 0;
1168 }
1169 
1170 static void
1171 free_hash_entry(struct ftrace_hash *hash,
1172 		  struct ftrace_func_entry *entry)
1173 {
1174 	hlist_del(&entry->hlist);
1175 	kfree(entry);
1176 	hash->count--;
1177 }
1178 
1179 static void
1180 remove_hash_entry(struct ftrace_hash *hash,
1181 		  struct ftrace_func_entry *entry)
1182 {
1183 	hlist_del_rcu(&entry->hlist);
1184 	hash->count--;
1185 }
1186 
1187 static void ftrace_hash_clear(struct ftrace_hash *hash)
1188 {
1189 	struct hlist_head *hhd;
1190 	struct hlist_node *tn;
1191 	struct ftrace_func_entry *entry;
1192 	int size = 1 << hash->size_bits;
1193 	int i;
1194 
1195 	if (!hash->count)
1196 		return;
1197 
1198 	for (i = 0; i < size; i++) {
1199 		hhd = &hash->buckets[i];
1200 		hlist_for_each_entry_safe(entry, tn, hhd, hlist)
1201 			free_hash_entry(hash, entry);
1202 	}
1203 	FTRACE_WARN_ON(hash->count);
1204 }
1205 
1206 static void free_ftrace_mod(struct ftrace_mod_load *ftrace_mod)
1207 {
1208 	list_del(&ftrace_mod->list);
1209 	kfree(ftrace_mod->module);
1210 	kfree(ftrace_mod->func);
1211 	kfree(ftrace_mod);
1212 }
1213 
1214 static void clear_ftrace_mod_list(struct list_head *head)
1215 {
1216 	struct ftrace_mod_load *p, *n;
1217 
1218 	/* stack tracer isn't supported yet */
1219 	if (!head)
1220 		return;
1221 
1222 	mutex_lock(&ftrace_lock);
1223 	list_for_each_entry_safe(p, n, head, list)
1224 		free_ftrace_mod(p);
1225 	mutex_unlock(&ftrace_lock);
1226 }
1227 
1228 static void free_ftrace_hash(struct ftrace_hash *hash)
1229 {
1230 	if (!hash || hash == EMPTY_HASH)
1231 		return;
1232 	ftrace_hash_clear(hash);
1233 	kfree(hash->buckets);
1234 	kfree(hash);
1235 }
1236 
1237 static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1238 {
1239 	struct ftrace_hash *hash;
1240 
1241 	hash = container_of(rcu, struct ftrace_hash, rcu);
1242 	free_ftrace_hash(hash);
1243 }
1244 
1245 static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1246 {
1247 	if (!hash || hash == EMPTY_HASH)
1248 		return;
1249 	call_rcu(&hash->rcu, __free_ftrace_hash_rcu);
1250 }
1251 
1252 void ftrace_free_filter(struct ftrace_ops *ops)
1253 {
1254 	ftrace_ops_init(ops);
1255 	free_ftrace_hash(ops->func_hash->filter_hash);
1256 	free_ftrace_hash(ops->func_hash->notrace_hash);
1257 }
1258 
1259 static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1260 {
1261 	struct ftrace_hash *hash;
1262 	int size;
1263 
1264 	hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1265 	if (!hash)
1266 		return NULL;
1267 
1268 	size = 1 << size_bits;
1269 	hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
1270 
1271 	if (!hash->buckets) {
1272 		kfree(hash);
1273 		return NULL;
1274 	}
1275 
1276 	hash->size_bits = size_bits;
1277 
1278 	return hash;
1279 }
1280 
1281 
1282 static int ftrace_add_mod(struct trace_array *tr,
1283 			  const char *func, const char *module,
1284 			  int enable)
1285 {
1286 	struct ftrace_mod_load *ftrace_mod;
1287 	struct list_head *mod_head = enable ? &tr->mod_trace : &tr->mod_notrace;
1288 
1289 	ftrace_mod = kzalloc(sizeof(*ftrace_mod), GFP_KERNEL);
1290 	if (!ftrace_mod)
1291 		return -ENOMEM;
1292 
1293 	ftrace_mod->func = kstrdup(func, GFP_KERNEL);
1294 	ftrace_mod->module = kstrdup(module, GFP_KERNEL);
1295 	ftrace_mod->enable = enable;
1296 
1297 	if (!ftrace_mod->func || !ftrace_mod->module)
1298 		goto out_free;
1299 
1300 	list_add(&ftrace_mod->list, mod_head);
1301 
1302 	return 0;
1303 
1304  out_free:
1305 	free_ftrace_mod(ftrace_mod);
1306 
1307 	return -ENOMEM;
1308 }
1309 
1310 static struct ftrace_hash *
1311 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1312 {
1313 	struct ftrace_func_entry *entry;
1314 	struct ftrace_hash *new_hash;
1315 	int size;
1316 	int ret;
1317 	int i;
1318 
1319 	new_hash = alloc_ftrace_hash(size_bits);
1320 	if (!new_hash)
1321 		return NULL;
1322 
1323 	if (hash)
1324 		new_hash->flags = hash->flags;
1325 
1326 	/* Empty hash? */
1327 	if (ftrace_hash_empty(hash))
1328 		return new_hash;
1329 
1330 	size = 1 << hash->size_bits;
1331 	for (i = 0; i < size; i++) {
1332 		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
1333 			ret = add_hash_entry(new_hash, entry->ip);
1334 			if (ret < 0)
1335 				goto free_hash;
1336 		}
1337 	}
1338 
1339 	FTRACE_WARN_ON(new_hash->count != hash->count);
1340 
1341 	return new_hash;
1342 
1343  free_hash:
1344 	free_ftrace_hash(new_hash);
1345 	return NULL;
1346 }
1347 
1348 static void
1349 ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash);
1350 static void
1351 ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash);
1352 
1353 static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1354 				       struct ftrace_hash *new_hash);
1355 
1356 static struct ftrace_hash *dup_hash(struct ftrace_hash *src, int size)
1357 {
1358 	struct ftrace_func_entry *entry;
1359 	struct ftrace_hash *new_hash;
1360 	struct hlist_head *hhd;
1361 	struct hlist_node *tn;
1362 	int bits = 0;
1363 	int i;
1364 
1365 	/*
1366 	 * Use around half the size (max bit of it), but
1367 	 * a minimum of 2 is fine (as size of 0 or 1 both give 1 for bits).
1368 	 */
1369 	bits = fls(size / 2);
1370 
1371 	/* Don't allocate too much */
1372 	if (bits > FTRACE_HASH_MAX_BITS)
1373 		bits = FTRACE_HASH_MAX_BITS;
1374 
1375 	new_hash = alloc_ftrace_hash(bits);
1376 	if (!new_hash)
1377 		return NULL;
1378 
1379 	new_hash->flags = src->flags;
1380 
1381 	size = 1 << src->size_bits;
1382 	for (i = 0; i < size; i++) {
1383 		hhd = &src->buckets[i];
1384 		hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
1385 			remove_hash_entry(src, entry);
1386 			__add_hash_entry(new_hash, entry);
1387 		}
1388 	}
1389 	return new_hash;
1390 }
1391 
1392 static struct ftrace_hash *
1393 __ftrace_hash_move(struct ftrace_hash *src)
1394 {
1395 	int size = src->count;
1396 
1397 	/*
1398 	 * If the new source is empty, just return the empty_hash.
1399 	 */
1400 	if (ftrace_hash_empty(src))
1401 		return EMPTY_HASH;
1402 
1403 	return dup_hash(src, size);
1404 }
1405 
1406 static int
1407 ftrace_hash_move(struct ftrace_ops *ops, int enable,
1408 		 struct ftrace_hash **dst, struct ftrace_hash *src)
1409 {
1410 	struct ftrace_hash *new_hash;
1411 	int ret;
1412 
1413 	/* Reject setting notrace hash on IPMODIFY ftrace_ops */
1414 	if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable)
1415 		return -EINVAL;
1416 
1417 	new_hash = __ftrace_hash_move(src);
1418 	if (!new_hash)
1419 		return -ENOMEM;
1420 
1421 	/* Make sure this can be applied if it is IPMODIFY ftrace_ops */
1422 	if (enable) {
1423 		/* IPMODIFY should be updated only when filter_hash updating */
1424 		ret = ftrace_hash_ipmodify_update(ops, new_hash);
1425 		if (ret < 0) {
1426 			free_ftrace_hash(new_hash);
1427 			return ret;
1428 		}
1429 	}
1430 
1431 	/*
1432 	 * Remove the current set, update the hash and add
1433 	 * them back.
1434 	 */
1435 	ftrace_hash_rec_disable_modify(ops, enable);
1436 
1437 	rcu_assign_pointer(*dst, new_hash);
1438 
1439 	ftrace_hash_rec_enable_modify(ops, enable);
1440 
1441 	return 0;
1442 }
1443 
1444 static bool hash_contains_ip(unsigned long ip,
1445 			     struct ftrace_ops_hash *hash)
1446 {
1447 	/*
1448 	 * The function record is a match if it exists in the filter
1449 	 * hash and not in the notrace hash. Note, an empty hash is
1450 	 * considered a match for the filter hash, but an empty
1451 	 * notrace hash is considered not in the notrace hash.
1452 	 */
1453 	return (ftrace_hash_empty(hash->filter_hash) ||
1454 		__ftrace_lookup_ip(hash->filter_hash, ip)) &&
1455 		(ftrace_hash_empty(hash->notrace_hash) ||
1456 		 !__ftrace_lookup_ip(hash->notrace_hash, ip));
1457 }
1458 
1459 /*
1460  * Test the hashes for this ops to see if we want to call
1461  * the ops->func or not.
1462  *
1463  * It's a match if the ip is in the ops->filter_hash or
1464  * the filter_hash does not exist or is empty,
1465  *  AND
1466  * the ip is not in the ops->notrace_hash.
1467  *
1468  * This needs to be called with preemption disabled as
1469  * the hashes are freed with call_rcu().
1470  */
1471 int
1472 ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
1473 {
1474 	struct ftrace_ops_hash hash;
1475 	int ret;
1476 
1477 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1478 	/*
1479 	 * There's a small race when adding ops that the ftrace handler
1480 	 * that wants regs, may be called without them. We can not
1481 	 * allow that handler to be called if regs is NULL.
1482 	 */
1483 	if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
1484 		return 0;
1485 #endif
1486 
1487 	rcu_assign_pointer(hash.filter_hash, ops->func_hash->filter_hash);
1488 	rcu_assign_pointer(hash.notrace_hash, ops->func_hash->notrace_hash);
1489 
1490 	if (hash_contains_ip(ip, &hash))
1491 		ret = 1;
1492 	else
1493 		ret = 0;
1494 
1495 	return ret;
1496 }
1497 
1498 /*
1499  * This is a double for. Do not use 'break' to break out of the loop,
1500  * you must use a goto.
1501  */
1502 #define do_for_each_ftrace_rec(pg, rec)					\
1503 	for (pg = ftrace_pages_start; pg; pg = pg->next) {		\
1504 		int _____i;						\
1505 		for (_____i = 0; _____i < pg->index; _____i++) {	\
1506 			rec = &pg->records[_____i];
1507 
1508 #define while_for_each_ftrace_rec()		\
1509 		}				\
1510 	}
1511 
1512 
1513 static int ftrace_cmp_recs(const void *a, const void *b)
1514 {
1515 	const struct dyn_ftrace *key = a;
1516 	const struct dyn_ftrace *rec = b;
1517 
1518 	if (key->flags < rec->ip)
1519 		return -1;
1520 	if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1521 		return 1;
1522 	return 0;
1523 }
1524 
1525 static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end)
1526 {
1527 	struct ftrace_page *pg;
1528 	struct dyn_ftrace *rec = NULL;
1529 	struct dyn_ftrace key;
1530 
1531 	key.ip = start;
1532 	key.flags = end;	/* overload flags, as it is unsigned long */
1533 
1534 	for (pg = ftrace_pages_start; pg; pg = pg->next) {
1535 		if (end < pg->records[0].ip ||
1536 		    start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
1537 			continue;
1538 		rec = bsearch(&key, pg->records, pg->index,
1539 			      sizeof(struct dyn_ftrace),
1540 			      ftrace_cmp_recs);
1541 		if (rec)
1542 			break;
1543 	}
1544 	return rec;
1545 }
1546 
1547 /**
1548  * ftrace_location_range - return the first address of a traced location
1549  *	if it touches the given ip range
1550  * @start: start of range to search.
1551  * @end: end of range to search (inclusive). @end points to the last byte
1552  *	to check.
1553  *
1554  * Returns rec->ip if the related ftrace location is a least partly within
1555  * the given address range. That is, the first address of the instruction
1556  * that is either a NOP or call to the function tracer. It checks the ftrace
1557  * internal tables to determine if the address belongs or not.
1558  */
1559 unsigned long ftrace_location_range(unsigned long start, unsigned long end)
1560 {
1561 	struct dyn_ftrace *rec;
1562 
1563 	rec = lookup_rec(start, end);
1564 	if (rec)
1565 		return rec->ip;
1566 
1567 	return 0;
1568 }
1569 
1570 /**
1571  * ftrace_location - return true if the ip giving is a traced location
1572  * @ip: the instruction pointer to check
1573  *
1574  * Returns rec->ip if @ip given is a pointer to a ftrace location.
1575  * That is, the instruction that is either a NOP or call to
1576  * the function tracer. It checks the ftrace internal tables to
1577  * determine if the address belongs or not.
1578  */
1579 unsigned long ftrace_location(unsigned long ip)
1580 {
1581 	return ftrace_location_range(ip, ip);
1582 }
1583 
1584 /**
1585  * ftrace_text_reserved - return true if range contains an ftrace location
1586  * @start: start of range to search
1587  * @end: end of range to search (inclusive). @end points to the last byte to check.
1588  *
1589  * Returns 1 if @start and @end contains a ftrace location.
1590  * That is, the instruction that is either a NOP or call to
1591  * the function tracer. It checks the ftrace internal tables to
1592  * determine if the address belongs or not.
1593  */
1594 int ftrace_text_reserved(const void *start, const void *end)
1595 {
1596 	unsigned long ret;
1597 
1598 	ret = ftrace_location_range((unsigned long)start,
1599 				    (unsigned long)end);
1600 
1601 	return (int)!!ret;
1602 }
1603 
1604 /* Test if ops registered to this rec needs regs */
1605 static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
1606 {
1607 	struct ftrace_ops *ops;
1608 	bool keep_regs = false;
1609 
1610 	for (ops = ftrace_ops_list;
1611 	     ops != &ftrace_list_end; ops = ops->next) {
1612 		/* pass rec in as regs to have non-NULL val */
1613 		if (ftrace_ops_test(ops, rec->ip, rec)) {
1614 			if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1615 				keep_regs = true;
1616 				break;
1617 			}
1618 		}
1619 	}
1620 
1621 	return  keep_regs;
1622 }
1623 
1624 static struct ftrace_ops *
1625 ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
1626 static struct ftrace_ops *
1627 ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude);
1628 static struct ftrace_ops *
1629 ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
1630 
1631 static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
1632 				     int filter_hash,
1633 				     bool inc)
1634 {
1635 	struct ftrace_hash *hash;
1636 	struct ftrace_hash *other_hash;
1637 	struct ftrace_page *pg;
1638 	struct dyn_ftrace *rec;
1639 	bool update = false;
1640 	int count = 0;
1641 	int all = false;
1642 
1643 	/* Only update if the ops has been registered */
1644 	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1645 		return false;
1646 
1647 	/*
1648 	 * In the filter_hash case:
1649 	 *   If the count is zero, we update all records.
1650 	 *   Otherwise we just update the items in the hash.
1651 	 *
1652 	 * In the notrace_hash case:
1653 	 *   We enable the update in the hash.
1654 	 *   As disabling notrace means enabling the tracing,
1655 	 *   and enabling notrace means disabling, the inc variable
1656 	 *   gets inversed.
1657 	 */
1658 	if (filter_hash) {
1659 		hash = ops->func_hash->filter_hash;
1660 		other_hash = ops->func_hash->notrace_hash;
1661 		if (ftrace_hash_empty(hash))
1662 			all = true;
1663 	} else {
1664 		inc = !inc;
1665 		hash = ops->func_hash->notrace_hash;
1666 		other_hash = ops->func_hash->filter_hash;
1667 		/*
1668 		 * If the notrace hash has no items,
1669 		 * then there's nothing to do.
1670 		 */
1671 		if (ftrace_hash_empty(hash))
1672 			return false;
1673 	}
1674 
1675 	do_for_each_ftrace_rec(pg, rec) {
1676 		int in_other_hash = 0;
1677 		int in_hash = 0;
1678 		int match = 0;
1679 
1680 		if (rec->flags & FTRACE_FL_DISABLED)
1681 			continue;
1682 
1683 		if (all) {
1684 			/*
1685 			 * Only the filter_hash affects all records.
1686 			 * Update if the record is not in the notrace hash.
1687 			 */
1688 			if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
1689 				match = 1;
1690 		} else {
1691 			in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1692 			in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1693 
1694 			/*
1695 			 * If filter_hash is set, we want to match all functions
1696 			 * that are in the hash but not in the other hash.
1697 			 *
1698 			 * If filter_hash is not set, then we are decrementing.
1699 			 * That means we match anything that is in the hash
1700 			 * and also in the other_hash. That is, we need to turn
1701 			 * off functions in the other hash because they are disabled
1702 			 * by this hash.
1703 			 */
1704 			if (filter_hash && in_hash && !in_other_hash)
1705 				match = 1;
1706 			else if (!filter_hash && in_hash &&
1707 				 (in_other_hash || ftrace_hash_empty(other_hash)))
1708 				match = 1;
1709 		}
1710 		if (!match)
1711 			continue;
1712 
1713 		if (inc) {
1714 			rec->flags++;
1715 			if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX))
1716 				return false;
1717 
1718 			if (ops->flags & FTRACE_OPS_FL_DIRECT)
1719 				rec->flags |= FTRACE_FL_DIRECT;
1720 
1721 			/*
1722 			 * If there's only a single callback registered to a
1723 			 * function, and the ops has a trampoline registered
1724 			 * for it, then we can call it directly.
1725 			 */
1726 			if (ftrace_rec_count(rec) == 1 && ops->trampoline)
1727 				rec->flags |= FTRACE_FL_TRAMP;
1728 			else
1729 				/*
1730 				 * If we are adding another function callback
1731 				 * to this function, and the previous had a
1732 				 * custom trampoline in use, then we need to go
1733 				 * back to the default trampoline.
1734 				 */
1735 				rec->flags &= ~FTRACE_FL_TRAMP;
1736 
1737 			/*
1738 			 * If any ops wants regs saved for this function
1739 			 * then all ops will get saved regs.
1740 			 */
1741 			if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1742 				rec->flags |= FTRACE_FL_REGS;
1743 		} else {
1744 			if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0))
1745 				return false;
1746 			rec->flags--;
1747 
1748 			/*
1749 			 * Only the internal direct_ops should have the
1750 			 * DIRECT flag set. Thus, if it is removing a
1751 			 * function, then that function should no longer
1752 			 * be direct.
1753 			 */
1754 			if (ops->flags & FTRACE_OPS_FL_DIRECT)
1755 				rec->flags &= ~FTRACE_FL_DIRECT;
1756 
1757 			/*
1758 			 * If the rec had REGS enabled and the ops that is
1759 			 * being removed had REGS set, then see if there is
1760 			 * still any ops for this record that wants regs.
1761 			 * If not, we can stop recording them.
1762 			 */
1763 			if (ftrace_rec_count(rec) > 0 &&
1764 			    rec->flags & FTRACE_FL_REGS &&
1765 			    ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1766 				if (!test_rec_ops_needs_regs(rec))
1767 					rec->flags &= ~FTRACE_FL_REGS;
1768 			}
1769 
1770 			/*
1771 			 * The TRAMP needs to be set only if rec count
1772 			 * is decremented to one, and the ops that is
1773 			 * left has a trampoline. As TRAMP can only be
1774 			 * enabled if there is only a single ops attached
1775 			 * to it.
1776 			 */
1777 			if (ftrace_rec_count(rec) == 1 &&
1778 			    ftrace_find_tramp_ops_any_other(rec, ops))
1779 				rec->flags |= FTRACE_FL_TRAMP;
1780 			else
1781 				rec->flags &= ~FTRACE_FL_TRAMP;
1782 
1783 			/*
1784 			 * flags will be cleared in ftrace_check_record()
1785 			 * if rec count is zero.
1786 			 */
1787 		}
1788 		count++;
1789 
1790 		/* Must match FTRACE_UPDATE_CALLS in ftrace_modify_all_code() */
1791 		update |= ftrace_test_record(rec, true) != FTRACE_UPDATE_IGNORE;
1792 
1793 		/* Shortcut, if we handled all records, we are done. */
1794 		if (!all && count == hash->count)
1795 			return update;
1796 	} while_for_each_ftrace_rec();
1797 
1798 	return update;
1799 }
1800 
1801 static bool ftrace_hash_rec_disable(struct ftrace_ops *ops,
1802 				    int filter_hash)
1803 {
1804 	return __ftrace_hash_rec_update(ops, filter_hash, 0);
1805 }
1806 
1807 static bool ftrace_hash_rec_enable(struct ftrace_ops *ops,
1808 				   int filter_hash)
1809 {
1810 	return __ftrace_hash_rec_update(ops, filter_hash, 1);
1811 }
1812 
1813 static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops,
1814 					  int filter_hash, int inc)
1815 {
1816 	struct ftrace_ops *op;
1817 
1818 	__ftrace_hash_rec_update(ops, filter_hash, inc);
1819 
1820 	if (ops->func_hash != &global_ops.local_hash)
1821 		return;
1822 
1823 	/*
1824 	 * If the ops shares the global_ops hash, then we need to update
1825 	 * all ops that are enabled and use this hash.
1826 	 */
1827 	do_for_each_ftrace_op(op, ftrace_ops_list) {
1828 		/* Already done */
1829 		if (op == ops)
1830 			continue;
1831 		if (op->func_hash == &global_ops.local_hash)
1832 			__ftrace_hash_rec_update(op, filter_hash, inc);
1833 	} while_for_each_ftrace_op(op);
1834 }
1835 
1836 static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops,
1837 					   int filter_hash)
1838 {
1839 	ftrace_hash_rec_update_modify(ops, filter_hash, 0);
1840 }
1841 
1842 static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops,
1843 					  int filter_hash)
1844 {
1845 	ftrace_hash_rec_update_modify(ops, filter_hash, 1);
1846 }
1847 
1848 /*
1849  * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK
1850  * or no-needed to update, -EBUSY if it detects a conflict of the flag
1851  * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs.
1852  * Note that old_hash and new_hash has below meanings
1853  *  - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected)
1854  *  - If the hash is EMPTY_HASH, it hits nothing
1855  *  - Anything else hits the recs which match the hash entries.
1856  */
1857 static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
1858 					 struct ftrace_hash *old_hash,
1859 					 struct ftrace_hash *new_hash)
1860 {
1861 	struct ftrace_page *pg;
1862 	struct dyn_ftrace *rec, *end = NULL;
1863 	int in_old, in_new;
1864 
1865 	/* Only update if the ops has been registered */
1866 	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1867 		return 0;
1868 
1869 	if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
1870 		return 0;
1871 
1872 	/*
1873 	 * Since the IPMODIFY is a very address sensitive action, we do not
1874 	 * allow ftrace_ops to set all functions to new hash.
1875 	 */
1876 	if (!new_hash || !old_hash)
1877 		return -EINVAL;
1878 
1879 	/* Update rec->flags */
1880 	do_for_each_ftrace_rec(pg, rec) {
1881 
1882 		if (rec->flags & FTRACE_FL_DISABLED)
1883 			continue;
1884 
1885 		/* We need to update only differences of filter_hash */
1886 		in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1887 		in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1888 		if (in_old == in_new)
1889 			continue;
1890 
1891 		if (in_new) {
1892 			/* New entries must ensure no others are using it */
1893 			if (rec->flags & FTRACE_FL_IPMODIFY)
1894 				goto rollback;
1895 			rec->flags |= FTRACE_FL_IPMODIFY;
1896 		} else /* Removed entry */
1897 			rec->flags &= ~FTRACE_FL_IPMODIFY;
1898 	} while_for_each_ftrace_rec();
1899 
1900 	return 0;
1901 
1902 rollback:
1903 	end = rec;
1904 
1905 	/* Roll back what we did above */
1906 	do_for_each_ftrace_rec(pg, rec) {
1907 
1908 		if (rec->flags & FTRACE_FL_DISABLED)
1909 			continue;
1910 
1911 		if (rec == end)
1912 			goto err_out;
1913 
1914 		in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1915 		in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1916 		if (in_old == in_new)
1917 			continue;
1918 
1919 		if (in_new)
1920 			rec->flags &= ~FTRACE_FL_IPMODIFY;
1921 		else
1922 			rec->flags |= FTRACE_FL_IPMODIFY;
1923 	} while_for_each_ftrace_rec();
1924 
1925 err_out:
1926 	return -EBUSY;
1927 }
1928 
1929 static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops)
1930 {
1931 	struct ftrace_hash *hash = ops->func_hash->filter_hash;
1932 
1933 	if (ftrace_hash_empty(hash))
1934 		hash = NULL;
1935 
1936 	return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash);
1937 }
1938 
1939 /* Disabling always succeeds */
1940 static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops)
1941 {
1942 	struct ftrace_hash *hash = ops->func_hash->filter_hash;
1943 
1944 	if (ftrace_hash_empty(hash))
1945 		hash = NULL;
1946 
1947 	__ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH);
1948 }
1949 
1950 static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1951 				       struct ftrace_hash *new_hash)
1952 {
1953 	struct ftrace_hash *old_hash = ops->func_hash->filter_hash;
1954 
1955 	if (ftrace_hash_empty(old_hash))
1956 		old_hash = NULL;
1957 
1958 	if (ftrace_hash_empty(new_hash))
1959 		new_hash = NULL;
1960 
1961 	return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash);
1962 }
1963 
1964 static void print_ip_ins(const char *fmt, const unsigned char *p)
1965 {
1966 	char ins[MCOUNT_INSN_SIZE];
1967 	int i;
1968 
1969 	if (copy_from_kernel_nofault(ins, p, MCOUNT_INSN_SIZE)) {
1970 		printk(KERN_CONT "%s[FAULT] %px\n", fmt, p);
1971 		return;
1972 	}
1973 
1974 	printk(KERN_CONT "%s", fmt);
1975 
1976 	for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1977 		printk(KERN_CONT "%s%02x", i ? ":" : "", ins[i]);
1978 }
1979 
1980 enum ftrace_bug_type ftrace_bug_type;
1981 const void *ftrace_expected;
1982 
1983 static void print_bug_type(void)
1984 {
1985 	switch (ftrace_bug_type) {
1986 	case FTRACE_BUG_UNKNOWN:
1987 		break;
1988 	case FTRACE_BUG_INIT:
1989 		pr_info("Initializing ftrace call sites\n");
1990 		break;
1991 	case FTRACE_BUG_NOP:
1992 		pr_info("Setting ftrace call site to NOP\n");
1993 		break;
1994 	case FTRACE_BUG_CALL:
1995 		pr_info("Setting ftrace call site to call ftrace function\n");
1996 		break;
1997 	case FTRACE_BUG_UPDATE:
1998 		pr_info("Updating ftrace call site to call a different ftrace function\n");
1999 		break;
2000 	}
2001 }
2002 
2003 /**
2004  * ftrace_bug - report and shutdown function tracer
2005  * @failed: The failed type (EFAULT, EINVAL, EPERM)
2006  * @rec: The record that failed
2007  *
2008  * The arch code that enables or disables the function tracing
2009  * can call ftrace_bug() when it has detected a problem in
2010  * modifying the code. @failed should be one of either:
2011  * EFAULT - if the problem happens on reading the @ip address
2012  * EINVAL - if what is read at @ip is not what was expected
2013  * EPERM - if the problem happens on writing to the @ip address
2014  */
2015 void ftrace_bug(int failed, struct dyn_ftrace *rec)
2016 {
2017 	unsigned long ip = rec ? rec->ip : 0;
2018 
2019 	pr_info("------------[ ftrace bug ]------------\n");
2020 
2021 	switch (failed) {
2022 	case -EFAULT:
2023 		pr_info("ftrace faulted on modifying ");
2024 		print_ip_sym(KERN_INFO, ip);
2025 		break;
2026 	case -EINVAL:
2027 		pr_info("ftrace failed to modify ");
2028 		print_ip_sym(KERN_INFO, ip);
2029 		print_ip_ins(" actual:   ", (unsigned char *)ip);
2030 		pr_cont("\n");
2031 		if (ftrace_expected) {
2032 			print_ip_ins(" expected: ", ftrace_expected);
2033 			pr_cont("\n");
2034 		}
2035 		break;
2036 	case -EPERM:
2037 		pr_info("ftrace faulted on writing ");
2038 		print_ip_sym(KERN_INFO, ip);
2039 		break;
2040 	default:
2041 		pr_info("ftrace faulted on unknown error ");
2042 		print_ip_sym(KERN_INFO, ip);
2043 	}
2044 	print_bug_type();
2045 	if (rec) {
2046 		struct ftrace_ops *ops = NULL;
2047 
2048 		pr_info("ftrace record flags: %lx\n", rec->flags);
2049 		pr_cont(" (%ld)%s", ftrace_rec_count(rec),
2050 			rec->flags & FTRACE_FL_REGS ? " R" : "  ");
2051 		if (rec->flags & FTRACE_FL_TRAMP_EN) {
2052 			ops = ftrace_find_tramp_ops_any(rec);
2053 			if (ops) {
2054 				do {
2055 					pr_cont("\ttramp: %pS (%pS)",
2056 						(void *)ops->trampoline,
2057 						(void *)ops->func);
2058 					ops = ftrace_find_tramp_ops_next(rec, ops);
2059 				} while (ops);
2060 			} else
2061 				pr_cont("\ttramp: ERROR!");
2062 
2063 		}
2064 		ip = ftrace_get_addr_curr(rec);
2065 		pr_cont("\n expected tramp: %lx\n", ip);
2066 	}
2067 
2068 	FTRACE_WARN_ON_ONCE(1);
2069 }
2070 
2071 static int ftrace_check_record(struct dyn_ftrace *rec, bool enable, bool update)
2072 {
2073 	unsigned long flag = 0UL;
2074 
2075 	ftrace_bug_type = FTRACE_BUG_UNKNOWN;
2076 
2077 	if (rec->flags & FTRACE_FL_DISABLED)
2078 		return FTRACE_UPDATE_IGNORE;
2079 
2080 	/*
2081 	 * If we are updating calls:
2082 	 *
2083 	 *   If the record has a ref count, then we need to enable it
2084 	 *   because someone is using it.
2085 	 *
2086 	 *   Otherwise we make sure its disabled.
2087 	 *
2088 	 * If we are disabling calls, then disable all records that
2089 	 * are enabled.
2090 	 */
2091 	if (enable && ftrace_rec_count(rec))
2092 		flag = FTRACE_FL_ENABLED;
2093 
2094 	/*
2095 	 * If enabling and the REGS flag does not match the REGS_EN, or
2096 	 * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore
2097 	 * this record. Set flags to fail the compare against ENABLED.
2098 	 * Same for direct calls.
2099 	 */
2100 	if (flag) {
2101 		if (!(rec->flags & FTRACE_FL_REGS) !=
2102 		    !(rec->flags & FTRACE_FL_REGS_EN))
2103 			flag |= FTRACE_FL_REGS;
2104 
2105 		if (!(rec->flags & FTRACE_FL_TRAMP) !=
2106 		    !(rec->flags & FTRACE_FL_TRAMP_EN))
2107 			flag |= FTRACE_FL_TRAMP;
2108 
2109 		/*
2110 		 * Direct calls are special, as count matters.
2111 		 * We must test the record for direct, if the
2112 		 * DIRECT and DIRECT_EN do not match, but only
2113 		 * if the count is 1. That's because, if the
2114 		 * count is something other than one, we do not
2115 		 * want the direct enabled (it will be done via the
2116 		 * direct helper). But if DIRECT_EN is set, and
2117 		 * the count is not one, we need to clear it.
2118 		 */
2119 		if (ftrace_rec_count(rec) == 1) {
2120 			if (!(rec->flags & FTRACE_FL_DIRECT) !=
2121 			    !(rec->flags & FTRACE_FL_DIRECT_EN))
2122 				flag |= FTRACE_FL_DIRECT;
2123 		} else if (rec->flags & FTRACE_FL_DIRECT_EN) {
2124 			flag |= FTRACE_FL_DIRECT;
2125 		}
2126 	}
2127 
2128 	/* If the state of this record hasn't changed, then do nothing */
2129 	if ((rec->flags & FTRACE_FL_ENABLED) == flag)
2130 		return FTRACE_UPDATE_IGNORE;
2131 
2132 	if (flag) {
2133 		/* Save off if rec is being enabled (for return value) */
2134 		flag ^= rec->flags & FTRACE_FL_ENABLED;
2135 
2136 		if (update) {
2137 			rec->flags |= FTRACE_FL_ENABLED;
2138 			if (flag & FTRACE_FL_REGS) {
2139 				if (rec->flags & FTRACE_FL_REGS)
2140 					rec->flags |= FTRACE_FL_REGS_EN;
2141 				else
2142 					rec->flags &= ~FTRACE_FL_REGS_EN;
2143 			}
2144 			if (flag & FTRACE_FL_TRAMP) {
2145 				if (rec->flags & FTRACE_FL_TRAMP)
2146 					rec->flags |= FTRACE_FL_TRAMP_EN;
2147 				else
2148 					rec->flags &= ~FTRACE_FL_TRAMP_EN;
2149 			}
2150 
2151 			if (flag & FTRACE_FL_DIRECT) {
2152 				/*
2153 				 * If there's only one user (direct_ops helper)
2154 				 * then we can call the direct function
2155 				 * directly (no ftrace trampoline).
2156 				 */
2157 				if (ftrace_rec_count(rec) == 1) {
2158 					if (rec->flags & FTRACE_FL_DIRECT)
2159 						rec->flags |= FTRACE_FL_DIRECT_EN;
2160 					else
2161 						rec->flags &= ~FTRACE_FL_DIRECT_EN;
2162 				} else {
2163 					/*
2164 					 * Can only call directly if there's
2165 					 * only one callback to the function.
2166 					 */
2167 					rec->flags &= ~FTRACE_FL_DIRECT_EN;
2168 				}
2169 			}
2170 		}
2171 
2172 		/*
2173 		 * If this record is being updated from a nop, then
2174 		 *   return UPDATE_MAKE_CALL.
2175 		 * Otherwise,
2176 		 *   return UPDATE_MODIFY_CALL to tell the caller to convert
2177 		 *   from the save regs, to a non-save regs function or
2178 		 *   vice versa, or from a trampoline call.
2179 		 */
2180 		if (flag & FTRACE_FL_ENABLED) {
2181 			ftrace_bug_type = FTRACE_BUG_CALL;
2182 			return FTRACE_UPDATE_MAKE_CALL;
2183 		}
2184 
2185 		ftrace_bug_type = FTRACE_BUG_UPDATE;
2186 		return FTRACE_UPDATE_MODIFY_CALL;
2187 	}
2188 
2189 	if (update) {
2190 		/* If there's no more users, clear all flags */
2191 		if (!ftrace_rec_count(rec))
2192 			rec->flags = 0;
2193 		else
2194 			/*
2195 			 * Just disable the record, but keep the ops TRAMP
2196 			 * and REGS states. The _EN flags must be disabled though.
2197 			 */
2198 			rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN |
2199 					FTRACE_FL_REGS_EN | FTRACE_FL_DIRECT_EN);
2200 	}
2201 
2202 	ftrace_bug_type = FTRACE_BUG_NOP;
2203 	return FTRACE_UPDATE_MAKE_NOP;
2204 }
2205 
2206 /**
2207  * ftrace_update_record - set a record that now is tracing or not
2208  * @rec: the record to update
2209  * @enable: set to true if the record is tracing, false to force disable
2210  *
2211  * The records that represent all functions that can be traced need
2212  * to be updated when tracing has been enabled.
2213  */
2214 int ftrace_update_record(struct dyn_ftrace *rec, bool enable)
2215 {
2216 	return ftrace_check_record(rec, enable, true);
2217 }
2218 
2219 /**
2220  * ftrace_test_record - check if the record has been enabled or not
2221  * @rec: the record to test
2222  * @enable: set to true to check if enabled, false if it is disabled
2223  *
2224  * The arch code may need to test if a record is already set to
2225  * tracing to determine how to modify the function code that it
2226  * represents.
2227  */
2228 int ftrace_test_record(struct dyn_ftrace *rec, bool enable)
2229 {
2230 	return ftrace_check_record(rec, enable, false);
2231 }
2232 
2233 static struct ftrace_ops *
2234 ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
2235 {
2236 	struct ftrace_ops *op;
2237 	unsigned long ip = rec->ip;
2238 
2239 	do_for_each_ftrace_op(op, ftrace_ops_list) {
2240 
2241 		if (!op->trampoline)
2242 			continue;
2243 
2244 		if (hash_contains_ip(ip, op->func_hash))
2245 			return op;
2246 	} while_for_each_ftrace_op(op);
2247 
2248 	return NULL;
2249 }
2250 
2251 static struct ftrace_ops *
2252 ftrace_find_tramp_ops_any_other(struct dyn_ftrace *rec, struct ftrace_ops *op_exclude)
2253 {
2254 	struct ftrace_ops *op;
2255 	unsigned long ip = rec->ip;
2256 
2257 	do_for_each_ftrace_op(op, ftrace_ops_list) {
2258 
2259 		if (op == op_exclude || !op->trampoline)
2260 			continue;
2261 
2262 		if (hash_contains_ip(ip, op->func_hash))
2263 			return op;
2264 	} while_for_each_ftrace_op(op);
2265 
2266 	return NULL;
2267 }
2268 
2269 static struct ftrace_ops *
2270 ftrace_find_tramp_ops_next(struct dyn_ftrace *rec,
2271 			   struct ftrace_ops *op)
2272 {
2273 	unsigned long ip = rec->ip;
2274 
2275 	while_for_each_ftrace_op(op) {
2276 
2277 		if (!op->trampoline)
2278 			continue;
2279 
2280 		if (hash_contains_ip(ip, op->func_hash))
2281 			return op;
2282 	}
2283 
2284 	return NULL;
2285 }
2286 
2287 static struct ftrace_ops *
2288 ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
2289 {
2290 	struct ftrace_ops *op;
2291 	unsigned long ip = rec->ip;
2292 
2293 	/*
2294 	 * Need to check removed ops first.
2295 	 * If they are being removed, and this rec has a tramp,
2296 	 * and this rec is in the ops list, then it would be the
2297 	 * one with the tramp.
2298 	 */
2299 	if (removed_ops) {
2300 		if (hash_contains_ip(ip, &removed_ops->old_hash))
2301 			return removed_ops;
2302 	}
2303 
2304 	/*
2305 	 * Need to find the current trampoline for a rec.
2306 	 * Now, a trampoline is only attached to a rec if there
2307 	 * was a single 'ops' attached to it. But this can be called
2308 	 * when we are adding another op to the rec or removing the
2309 	 * current one. Thus, if the op is being added, we can
2310 	 * ignore it because it hasn't attached itself to the rec
2311 	 * yet.
2312 	 *
2313 	 * If an ops is being modified (hooking to different functions)
2314 	 * then we don't care about the new functions that are being
2315 	 * added, just the old ones (that are probably being removed).
2316 	 *
2317 	 * If we are adding an ops to a function that already is using
2318 	 * a trampoline, it needs to be removed (trampolines are only
2319 	 * for single ops connected), then an ops that is not being
2320 	 * modified also needs to be checked.
2321 	 */
2322 	do_for_each_ftrace_op(op, ftrace_ops_list) {
2323 
2324 		if (!op->trampoline)
2325 			continue;
2326 
2327 		/*
2328 		 * If the ops is being added, it hasn't gotten to
2329 		 * the point to be removed from this tree yet.
2330 		 */
2331 		if (op->flags & FTRACE_OPS_FL_ADDING)
2332 			continue;
2333 
2334 
2335 		/*
2336 		 * If the ops is being modified and is in the old
2337 		 * hash, then it is probably being removed from this
2338 		 * function.
2339 		 */
2340 		if ((op->flags & FTRACE_OPS_FL_MODIFYING) &&
2341 		    hash_contains_ip(ip, &op->old_hash))
2342 			return op;
2343 		/*
2344 		 * If the ops is not being added or modified, and it's
2345 		 * in its normal filter hash, then this must be the one
2346 		 * we want!
2347 		 */
2348 		if (!(op->flags & FTRACE_OPS_FL_MODIFYING) &&
2349 		    hash_contains_ip(ip, op->func_hash))
2350 			return op;
2351 
2352 	} while_for_each_ftrace_op(op);
2353 
2354 	return NULL;
2355 }
2356 
2357 static struct ftrace_ops *
2358 ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
2359 {
2360 	struct ftrace_ops *op;
2361 	unsigned long ip = rec->ip;
2362 
2363 	do_for_each_ftrace_op(op, ftrace_ops_list) {
2364 		/* pass rec in as regs to have non-NULL val */
2365 		if (hash_contains_ip(ip, op->func_hash))
2366 			return op;
2367 	} while_for_each_ftrace_op(op);
2368 
2369 	return NULL;
2370 }
2371 
2372 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
2373 /* Protected by rcu_tasks for reading, and direct_mutex for writing */
2374 static struct ftrace_hash *direct_functions = EMPTY_HASH;
2375 static DEFINE_MUTEX(direct_mutex);
2376 int ftrace_direct_func_count;
2377 
2378 /*
2379  * Search the direct_functions hash to see if the given instruction pointer
2380  * has a direct caller attached to it.
2381  */
2382 unsigned long ftrace_find_rec_direct(unsigned long ip)
2383 {
2384 	struct ftrace_func_entry *entry;
2385 
2386 	entry = __ftrace_lookup_ip(direct_functions, ip);
2387 	if (!entry)
2388 		return 0;
2389 
2390 	return entry->direct;
2391 }
2392 
2393 static struct ftrace_func_entry*
2394 ftrace_add_rec_direct(unsigned long ip, unsigned long addr,
2395 		      struct ftrace_hash **free_hash)
2396 {
2397 	struct ftrace_func_entry *entry;
2398 
2399 	if (ftrace_hash_empty(direct_functions) ||
2400 	    direct_functions->count > 2 * (1 << direct_functions->size_bits)) {
2401 		struct ftrace_hash *new_hash;
2402 		int size = ftrace_hash_empty(direct_functions) ? 0 :
2403 			direct_functions->count + 1;
2404 
2405 		if (size < 32)
2406 			size = 32;
2407 
2408 		new_hash = dup_hash(direct_functions, size);
2409 		if (!new_hash)
2410 			return NULL;
2411 
2412 		*free_hash = direct_functions;
2413 		direct_functions = new_hash;
2414 	}
2415 
2416 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2417 	if (!entry)
2418 		return NULL;
2419 
2420 	entry->ip = ip;
2421 	entry->direct = addr;
2422 	__add_hash_entry(direct_functions, entry);
2423 	return entry;
2424 }
2425 
2426 static void call_direct_funcs(unsigned long ip, unsigned long pip,
2427 			      struct ftrace_ops *ops, struct ftrace_regs *fregs)
2428 {
2429 	struct pt_regs *regs = ftrace_get_regs(fregs);
2430 	unsigned long addr;
2431 
2432 	addr = ftrace_find_rec_direct(ip);
2433 	if (!addr)
2434 		return;
2435 
2436 	arch_ftrace_set_direct_caller(regs, addr);
2437 }
2438 
2439 struct ftrace_ops direct_ops = {
2440 	.func		= call_direct_funcs,
2441 	.flags		= FTRACE_OPS_FL_IPMODIFY
2442 			  | FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS
2443 			  | FTRACE_OPS_FL_PERMANENT,
2444 	/*
2445 	 * By declaring the main trampoline as this trampoline
2446 	 * it will never have one allocated for it. Allocated
2447 	 * trampolines should not call direct functions.
2448 	 * The direct_ops should only be called by the builtin
2449 	 * ftrace_regs_caller trampoline.
2450 	 */
2451 	.trampoline	= FTRACE_REGS_ADDR,
2452 };
2453 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
2454 
2455 /**
2456  * ftrace_get_addr_new - Get the call address to set to
2457  * @rec:  The ftrace record descriptor
2458  *
2459  * If the record has the FTRACE_FL_REGS set, that means that it
2460  * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
2461  * is not set, then it wants to convert to the normal callback.
2462  *
2463  * Returns the address of the trampoline to set to
2464  */
2465 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
2466 {
2467 	struct ftrace_ops *ops;
2468 	unsigned long addr;
2469 
2470 	if ((rec->flags & FTRACE_FL_DIRECT) &&
2471 	    (ftrace_rec_count(rec) == 1)) {
2472 		addr = ftrace_find_rec_direct(rec->ip);
2473 		if (addr)
2474 			return addr;
2475 		WARN_ON_ONCE(1);
2476 	}
2477 
2478 	/* Trampolines take precedence over regs */
2479 	if (rec->flags & FTRACE_FL_TRAMP) {
2480 		ops = ftrace_find_tramp_ops_new(rec);
2481 		if (FTRACE_WARN_ON(!ops || !ops->trampoline)) {
2482 			pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
2483 				(void *)rec->ip, (void *)rec->ip, rec->flags);
2484 			/* Ftrace is shutting down, return anything */
2485 			return (unsigned long)FTRACE_ADDR;
2486 		}
2487 		return ops->trampoline;
2488 	}
2489 
2490 	if (rec->flags & FTRACE_FL_REGS)
2491 		return (unsigned long)FTRACE_REGS_ADDR;
2492 	else
2493 		return (unsigned long)FTRACE_ADDR;
2494 }
2495 
2496 /**
2497  * ftrace_get_addr_curr - Get the call address that is already there
2498  * @rec:  The ftrace record descriptor
2499  *
2500  * The FTRACE_FL_REGS_EN is set when the record already points to
2501  * a function that saves all the regs. Basically the '_EN' version
2502  * represents the current state of the function.
2503  *
2504  * Returns the address of the trampoline that is currently being called
2505  */
2506 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
2507 {
2508 	struct ftrace_ops *ops;
2509 	unsigned long addr;
2510 
2511 	/* Direct calls take precedence over trampolines */
2512 	if (rec->flags & FTRACE_FL_DIRECT_EN) {
2513 		addr = ftrace_find_rec_direct(rec->ip);
2514 		if (addr)
2515 			return addr;
2516 		WARN_ON_ONCE(1);
2517 	}
2518 
2519 	/* Trampolines take precedence over regs */
2520 	if (rec->flags & FTRACE_FL_TRAMP_EN) {
2521 		ops = ftrace_find_tramp_ops_curr(rec);
2522 		if (FTRACE_WARN_ON(!ops)) {
2523 			pr_warn("Bad trampoline accounting at: %p (%pS)\n",
2524 				(void *)rec->ip, (void *)rec->ip);
2525 			/* Ftrace is shutting down, return anything */
2526 			return (unsigned long)FTRACE_ADDR;
2527 		}
2528 		return ops->trampoline;
2529 	}
2530 
2531 	if (rec->flags & FTRACE_FL_REGS_EN)
2532 		return (unsigned long)FTRACE_REGS_ADDR;
2533 	else
2534 		return (unsigned long)FTRACE_ADDR;
2535 }
2536 
2537 static int
2538 __ftrace_replace_code(struct dyn_ftrace *rec, bool enable)
2539 {
2540 	unsigned long ftrace_old_addr;
2541 	unsigned long ftrace_addr;
2542 	int ret;
2543 
2544 	ftrace_addr = ftrace_get_addr_new(rec);
2545 
2546 	/* This needs to be done before we call ftrace_update_record */
2547 	ftrace_old_addr = ftrace_get_addr_curr(rec);
2548 
2549 	ret = ftrace_update_record(rec, enable);
2550 
2551 	ftrace_bug_type = FTRACE_BUG_UNKNOWN;
2552 
2553 	switch (ret) {
2554 	case FTRACE_UPDATE_IGNORE:
2555 		return 0;
2556 
2557 	case FTRACE_UPDATE_MAKE_CALL:
2558 		ftrace_bug_type = FTRACE_BUG_CALL;
2559 		return ftrace_make_call(rec, ftrace_addr);
2560 
2561 	case FTRACE_UPDATE_MAKE_NOP:
2562 		ftrace_bug_type = FTRACE_BUG_NOP;
2563 		return ftrace_make_nop(NULL, rec, ftrace_old_addr);
2564 
2565 	case FTRACE_UPDATE_MODIFY_CALL:
2566 		ftrace_bug_type = FTRACE_BUG_UPDATE;
2567 		return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
2568 	}
2569 
2570 	return -1; /* unknown ftrace bug */
2571 }
2572 
2573 void __weak ftrace_replace_code(int mod_flags)
2574 {
2575 	struct dyn_ftrace *rec;
2576 	struct ftrace_page *pg;
2577 	bool enable = mod_flags & FTRACE_MODIFY_ENABLE_FL;
2578 	int schedulable = mod_flags & FTRACE_MODIFY_MAY_SLEEP_FL;
2579 	int failed;
2580 
2581 	if (unlikely(ftrace_disabled))
2582 		return;
2583 
2584 	do_for_each_ftrace_rec(pg, rec) {
2585 
2586 		if (rec->flags & FTRACE_FL_DISABLED)
2587 			continue;
2588 
2589 		failed = __ftrace_replace_code(rec, enable);
2590 		if (failed) {
2591 			ftrace_bug(failed, rec);
2592 			/* Stop processing */
2593 			return;
2594 		}
2595 		if (schedulable)
2596 			cond_resched();
2597 	} while_for_each_ftrace_rec();
2598 }
2599 
2600 struct ftrace_rec_iter {
2601 	struct ftrace_page	*pg;
2602 	int			index;
2603 };
2604 
2605 /**
2606  * ftrace_rec_iter_start - start up iterating over traced functions
2607  *
2608  * Returns an iterator handle that is used to iterate over all
2609  * the records that represent address locations where functions
2610  * are traced.
2611  *
2612  * May return NULL if no records are available.
2613  */
2614 struct ftrace_rec_iter *ftrace_rec_iter_start(void)
2615 {
2616 	/*
2617 	 * We only use a single iterator.
2618 	 * Protected by the ftrace_lock mutex.
2619 	 */
2620 	static struct ftrace_rec_iter ftrace_rec_iter;
2621 	struct ftrace_rec_iter *iter = &ftrace_rec_iter;
2622 
2623 	iter->pg = ftrace_pages_start;
2624 	iter->index = 0;
2625 
2626 	/* Could have empty pages */
2627 	while (iter->pg && !iter->pg->index)
2628 		iter->pg = iter->pg->next;
2629 
2630 	if (!iter->pg)
2631 		return NULL;
2632 
2633 	return iter;
2634 }
2635 
2636 /**
2637  * ftrace_rec_iter_next - get the next record to process.
2638  * @iter: The handle to the iterator.
2639  *
2640  * Returns the next iterator after the given iterator @iter.
2641  */
2642 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
2643 {
2644 	iter->index++;
2645 
2646 	if (iter->index >= iter->pg->index) {
2647 		iter->pg = iter->pg->next;
2648 		iter->index = 0;
2649 
2650 		/* Could have empty pages */
2651 		while (iter->pg && !iter->pg->index)
2652 			iter->pg = iter->pg->next;
2653 	}
2654 
2655 	if (!iter->pg)
2656 		return NULL;
2657 
2658 	return iter;
2659 }
2660 
2661 /**
2662  * ftrace_rec_iter_record - get the record at the iterator location
2663  * @iter: The current iterator location
2664  *
2665  * Returns the record that the current @iter is at.
2666  */
2667 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
2668 {
2669 	return &iter->pg->records[iter->index];
2670 }
2671 
2672 static int
2673 ftrace_nop_initialize(struct module *mod, struct dyn_ftrace *rec)
2674 {
2675 	int ret;
2676 
2677 	if (unlikely(ftrace_disabled))
2678 		return 0;
2679 
2680 	ret = ftrace_init_nop(mod, rec);
2681 	if (ret) {
2682 		ftrace_bug_type = FTRACE_BUG_INIT;
2683 		ftrace_bug(ret, rec);
2684 		return 0;
2685 	}
2686 	return 1;
2687 }
2688 
2689 /*
2690  * archs can override this function if they must do something
2691  * before the modifying code is performed.
2692  */
2693 int __weak ftrace_arch_code_modify_prepare(void)
2694 {
2695 	return 0;
2696 }
2697 
2698 /*
2699  * archs can override this function if they must do something
2700  * after the modifying code is performed.
2701  */
2702 int __weak ftrace_arch_code_modify_post_process(void)
2703 {
2704 	return 0;
2705 }
2706 
2707 void ftrace_modify_all_code(int command)
2708 {
2709 	int update = command & FTRACE_UPDATE_TRACE_FUNC;
2710 	int mod_flags = 0;
2711 	int err = 0;
2712 
2713 	if (command & FTRACE_MAY_SLEEP)
2714 		mod_flags = FTRACE_MODIFY_MAY_SLEEP_FL;
2715 
2716 	/*
2717 	 * If the ftrace_caller calls a ftrace_ops func directly,
2718 	 * we need to make sure that it only traces functions it
2719 	 * expects to trace. When doing the switch of functions,
2720 	 * we need to update to the ftrace_ops_list_func first
2721 	 * before the transition between old and new calls are set,
2722 	 * as the ftrace_ops_list_func will check the ops hashes
2723 	 * to make sure the ops are having the right functions
2724 	 * traced.
2725 	 */
2726 	if (update) {
2727 		err = ftrace_update_ftrace_func(ftrace_ops_list_func);
2728 		if (FTRACE_WARN_ON(err))
2729 			return;
2730 	}
2731 
2732 	if (command & FTRACE_UPDATE_CALLS)
2733 		ftrace_replace_code(mod_flags | FTRACE_MODIFY_ENABLE_FL);
2734 	else if (command & FTRACE_DISABLE_CALLS)
2735 		ftrace_replace_code(mod_flags);
2736 
2737 	if (update && ftrace_trace_function != ftrace_ops_list_func) {
2738 		function_trace_op = set_function_trace_op;
2739 		smp_wmb();
2740 		/* If irqs are disabled, we are in stop machine */
2741 		if (!irqs_disabled())
2742 			smp_call_function(ftrace_sync_ipi, NULL, 1);
2743 		err = ftrace_update_ftrace_func(ftrace_trace_function);
2744 		if (FTRACE_WARN_ON(err))
2745 			return;
2746 	}
2747 
2748 	if (command & FTRACE_START_FUNC_RET)
2749 		err = ftrace_enable_ftrace_graph_caller();
2750 	else if (command & FTRACE_STOP_FUNC_RET)
2751 		err = ftrace_disable_ftrace_graph_caller();
2752 	FTRACE_WARN_ON(err);
2753 }
2754 
2755 static int __ftrace_modify_code(void *data)
2756 {
2757 	int *command = data;
2758 
2759 	ftrace_modify_all_code(*command);
2760 
2761 	return 0;
2762 }
2763 
2764 /**
2765  * ftrace_run_stop_machine - go back to the stop machine method
2766  * @command: The command to tell ftrace what to do
2767  *
2768  * If an arch needs to fall back to the stop machine method, the
2769  * it can call this function.
2770  */
2771 void ftrace_run_stop_machine(int command)
2772 {
2773 	stop_machine(__ftrace_modify_code, &command, NULL);
2774 }
2775 
2776 /**
2777  * arch_ftrace_update_code - modify the code to trace or not trace
2778  * @command: The command that needs to be done
2779  *
2780  * Archs can override this function if it does not need to
2781  * run stop_machine() to modify code.
2782  */
2783 void __weak arch_ftrace_update_code(int command)
2784 {
2785 	ftrace_run_stop_machine(command);
2786 }
2787 
2788 static void ftrace_run_update_code(int command)
2789 {
2790 	int ret;
2791 
2792 	ret = ftrace_arch_code_modify_prepare();
2793 	FTRACE_WARN_ON(ret);
2794 	if (ret)
2795 		return;
2796 
2797 	/*
2798 	 * By default we use stop_machine() to modify the code.
2799 	 * But archs can do what ever they want as long as it
2800 	 * is safe. The stop_machine() is the safest, but also
2801 	 * produces the most overhead.
2802 	 */
2803 	arch_ftrace_update_code(command);
2804 
2805 	ret = ftrace_arch_code_modify_post_process();
2806 	FTRACE_WARN_ON(ret);
2807 }
2808 
2809 static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
2810 				   struct ftrace_ops_hash *old_hash)
2811 {
2812 	ops->flags |= FTRACE_OPS_FL_MODIFYING;
2813 	ops->old_hash.filter_hash = old_hash->filter_hash;
2814 	ops->old_hash.notrace_hash = old_hash->notrace_hash;
2815 	ftrace_run_update_code(command);
2816 	ops->old_hash.filter_hash = NULL;
2817 	ops->old_hash.notrace_hash = NULL;
2818 	ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
2819 }
2820 
2821 static ftrace_func_t saved_ftrace_func;
2822 static int ftrace_start_up;
2823 
2824 void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops)
2825 {
2826 }
2827 
2828 /* List of trace_ops that have allocated trampolines */
2829 static LIST_HEAD(ftrace_ops_trampoline_list);
2830 
2831 static void ftrace_add_trampoline_to_kallsyms(struct ftrace_ops *ops)
2832 {
2833 	lockdep_assert_held(&ftrace_lock);
2834 	list_add_rcu(&ops->list, &ftrace_ops_trampoline_list);
2835 }
2836 
2837 static void ftrace_remove_trampoline_from_kallsyms(struct ftrace_ops *ops)
2838 {
2839 	lockdep_assert_held(&ftrace_lock);
2840 	list_del_rcu(&ops->list);
2841 	synchronize_rcu();
2842 }
2843 
2844 /*
2845  * "__builtin__ftrace" is used as a module name in /proc/kallsyms for symbols
2846  * for pages allocated for ftrace purposes, even though "__builtin__ftrace" is
2847  * not a module.
2848  */
2849 #define FTRACE_TRAMPOLINE_MOD "__builtin__ftrace"
2850 #define FTRACE_TRAMPOLINE_SYM "ftrace_trampoline"
2851 
2852 static void ftrace_trampoline_free(struct ftrace_ops *ops)
2853 {
2854 	if (ops && (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP) &&
2855 	    ops->trampoline) {
2856 		/*
2857 		 * Record the text poke event before the ksymbol unregister
2858 		 * event.
2859 		 */
2860 		perf_event_text_poke((void *)ops->trampoline,
2861 				     (void *)ops->trampoline,
2862 				     ops->trampoline_size, NULL, 0);
2863 		perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
2864 				   ops->trampoline, ops->trampoline_size,
2865 				   true, FTRACE_TRAMPOLINE_SYM);
2866 		/* Remove from kallsyms after the perf events */
2867 		ftrace_remove_trampoline_from_kallsyms(ops);
2868 	}
2869 
2870 	arch_ftrace_trampoline_free(ops);
2871 }
2872 
2873 static void ftrace_startup_enable(int command)
2874 {
2875 	if (saved_ftrace_func != ftrace_trace_function) {
2876 		saved_ftrace_func = ftrace_trace_function;
2877 		command |= FTRACE_UPDATE_TRACE_FUNC;
2878 	}
2879 
2880 	if (!command || !ftrace_enabled)
2881 		return;
2882 
2883 	ftrace_run_update_code(command);
2884 }
2885 
2886 static void ftrace_startup_all(int command)
2887 {
2888 	update_all_ops = true;
2889 	ftrace_startup_enable(command);
2890 	update_all_ops = false;
2891 }
2892 
2893 int ftrace_startup(struct ftrace_ops *ops, int command)
2894 {
2895 	int ret;
2896 
2897 	if (unlikely(ftrace_disabled))
2898 		return -ENODEV;
2899 
2900 	ret = __register_ftrace_function(ops);
2901 	if (ret)
2902 		return ret;
2903 
2904 	ftrace_start_up++;
2905 
2906 	/*
2907 	 * Note that ftrace probes uses this to start up
2908 	 * and modify functions it will probe. But we still
2909 	 * set the ADDING flag for modification, as probes
2910 	 * do not have trampolines. If they add them in the
2911 	 * future, then the probes will need to distinguish
2912 	 * between adding and updating probes.
2913 	 */
2914 	ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING;
2915 
2916 	ret = ftrace_hash_ipmodify_enable(ops);
2917 	if (ret < 0) {
2918 		/* Rollback registration process */
2919 		__unregister_ftrace_function(ops);
2920 		ftrace_start_up--;
2921 		ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2922 		if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
2923 			ftrace_trampoline_free(ops);
2924 		return ret;
2925 	}
2926 
2927 	if (ftrace_hash_rec_enable(ops, 1))
2928 		command |= FTRACE_UPDATE_CALLS;
2929 
2930 	ftrace_startup_enable(command);
2931 
2932 	ops->flags &= ~FTRACE_OPS_FL_ADDING;
2933 
2934 	return 0;
2935 }
2936 
2937 int ftrace_shutdown(struct ftrace_ops *ops, int command)
2938 {
2939 	int ret;
2940 
2941 	if (unlikely(ftrace_disabled))
2942 		return -ENODEV;
2943 
2944 	ret = __unregister_ftrace_function(ops);
2945 	if (ret)
2946 		return ret;
2947 
2948 	ftrace_start_up--;
2949 	/*
2950 	 * Just warn in case of unbalance, no need to kill ftrace, it's not
2951 	 * critical but the ftrace_call callers may be never nopped again after
2952 	 * further ftrace uses.
2953 	 */
2954 	WARN_ON_ONCE(ftrace_start_up < 0);
2955 
2956 	/* Disabling ipmodify never fails */
2957 	ftrace_hash_ipmodify_disable(ops);
2958 
2959 	if (ftrace_hash_rec_disable(ops, 1))
2960 		command |= FTRACE_UPDATE_CALLS;
2961 
2962 	ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2963 
2964 	if (saved_ftrace_func != ftrace_trace_function) {
2965 		saved_ftrace_func = ftrace_trace_function;
2966 		command |= FTRACE_UPDATE_TRACE_FUNC;
2967 	}
2968 
2969 	if (!command || !ftrace_enabled) {
2970 		/*
2971 		 * If these are dynamic or per_cpu ops, they still
2972 		 * need their data freed. Since, function tracing is
2973 		 * not currently active, we can just free them
2974 		 * without synchronizing all CPUs.
2975 		 */
2976 		if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
2977 			goto free_ops;
2978 
2979 		return 0;
2980 	}
2981 
2982 	/*
2983 	 * If the ops uses a trampoline, then it needs to be
2984 	 * tested first on update.
2985 	 */
2986 	ops->flags |= FTRACE_OPS_FL_REMOVING;
2987 	removed_ops = ops;
2988 
2989 	/* The trampoline logic checks the old hashes */
2990 	ops->old_hash.filter_hash = ops->func_hash->filter_hash;
2991 	ops->old_hash.notrace_hash = ops->func_hash->notrace_hash;
2992 
2993 	ftrace_run_update_code(command);
2994 
2995 	/*
2996 	 * If there's no more ops registered with ftrace, run a
2997 	 * sanity check to make sure all rec flags are cleared.
2998 	 */
2999 	if (rcu_dereference_protected(ftrace_ops_list,
3000 			lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
3001 		struct ftrace_page *pg;
3002 		struct dyn_ftrace *rec;
3003 
3004 		do_for_each_ftrace_rec(pg, rec) {
3005 			if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_FL_DISABLED))
3006 				pr_warn("  %pS flags:%lx\n",
3007 					(void *)rec->ip, rec->flags);
3008 		} while_for_each_ftrace_rec();
3009 	}
3010 
3011 	ops->old_hash.filter_hash = NULL;
3012 	ops->old_hash.notrace_hash = NULL;
3013 
3014 	removed_ops = NULL;
3015 	ops->flags &= ~FTRACE_OPS_FL_REMOVING;
3016 
3017 	/*
3018 	 * Dynamic ops may be freed, we must make sure that all
3019 	 * callers are done before leaving this function.
3020 	 * The same goes for freeing the per_cpu data of the per_cpu
3021 	 * ops.
3022 	 */
3023 	if (ops->flags & FTRACE_OPS_FL_DYNAMIC) {
3024 		/*
3025 		 * We need to do a hard force of sched synchronization.
3026 		 * This is because we use preempt_disable() to do RCU, but
3027 		 * the function tracers can be called where RCU is not watching
3028 		 * (like before user_exit()). We can not rely on the RCU
3029 		 * infrastructure to do the synchronization, thus we must do it
3030 		 * ourselves.
3031 		 */
3032 		synchronize_rcu_tasks_rude();
3033 
3034 		/*
3035 		 * When the kernel is preemptive, tasks can be preempted
3036 		 * while on a ftrace trampoline. Just scheduling a task on
3037 		 * a CPU is not good enough to flush them. Calling
3038 		 * synchronize_rcu_tasks() will wait for those tasks to
3039 		 * execute and either schedule voluntarily or enter user space.
3040 		 */
3041 		if (IS_ENABLED(CONFIG_PREEMPTION))
3042 			synchronize_rcu_tasks();
3043 
3044  free_ops:
3045 		ftrace_trampoline_free(ops);
3046 	}
3047 
3048 	return 0;
3049 }
3050 
3051 static void ftrace_startup_sysctl(void)
3052 {
3053 	int command;
3054 
3055 	if (unlikely(ftrace_disabled))
3056 		return;
3057 
3058 	/* Force update next time */
3059 	saved_ftrace_func = NULL;
3060 	/* ftrace_start_up is true if we want ftrace running */
3061 	if (ftrace_start_up) {
3062 		command = FTRACE_UPDATE_CALLS;
3063 		if (ftrace_graph_active)
3064 			command |= FTRACE_START_FUNC_RET;
3065 		ftrace_startup_enable(command);
3066 	}
3067 }
3068 
3069 static void ftrace_shutdown_sysctl(void)
3070 {
3071 	int command;
3072 
3073 	if (unlikely(ftrace_disabled))
3074 		return;
3075 
3076 	/* ftrace_start_up is true if ftrace is running */
3077 	if (ftrace_start_up) {
3078 		command = FTRACE_DISABLE_CALLS;
3079 		if (ftrace_graph_active)
3080 			command |= FTRACE_STOP_FUNC_RET;
3081 		ftrace_run_update_code(command);
3082 	}
3083 }
3084 
3085 static u64		ftrace_update_time;
3086 unsigned long		ftrace_update_tot_cnt;
3087 unsigned long		ftrace_number_of_pages;
3088 unsigned long		ftrace_number_of_groups;
3089 
3090 static inline int ops_traces_mod(struct ftrace_ops *ops)
3091 {
3092 	/*
3093 	 * Filter_hash being empty will default to trace module.
3094 	 * But notrace hash requires a test of individual module functions.
3095 	 */
3096 	return ftrace_hash_empty(ops->func_hash->filter_hash) &&
3097 		ftrace_hash_empty(ops->func_hash->notrace_hash);
3098 }
3099 
3100 /*
3101  * Check if the current ops references the record.
3102  *
3103  * If the ops traces all functions, then it was already accounted for.
3104  * If the ops does not trace the current record function, skip it.
3105  * If the ops ignores the function via notrace filter, skip it.
3106  */
3107 static inline bool
3108 ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
3109 {
3110 	/* If ops isn't enabled, ignore it */
3111 	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
3112 		return false;
3113 
3114 	/* If ops traces all then it includes this function */
3115 	if (ops_traces_mod(ops))
3116 		return true;
3117 
3118 	/* The function must be in the filter */
3119 	if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
3120 	    !__ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))
3121 		return false;
3122 
3123 	/* If in notrace hash, we ignore it too */
3124 	if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip))
3125 		return false;
3126 
3127 	return true;
3128 }
3129 
3130 static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
3131 {
3132 	bool init_nop = ftrace_need_init_nop();
3133 	struct ftrace_page *pg;
3134 	struct dyn_ftrace *p;
3135 	u64 start, stop;
3136 	unsigned long update_cnt = 0;
3137 	unsigned long rec_flags = 0;
3138 	int i;
3139 
3140 	start = ftrace_now(raw_smp_processor_id());
3141 
3142 	/*
3143 	 * When a module is loaded, this function is called to convert
3144 	 * the calls to mcount in its text to nops, and also to create
3145 	 * an entry in the ftrace data. Now, if ftrace is activated
3146 	 * after this call, but before the module sets its text to
3147 	 * read-only, the modification of enabling ftrace can fail if
3148 	 * the read-only is done while ftrace is converting the calls.
3149 	 * To prevent this, the module's records are set as disabled
3150 	 * and will be enabled after the call to set the module's text
3151 	 * to read-only.
3152 	 */
3153 	if (mod)
3154 		rec_flags |= FTRACE_FL_DISABLED;
3155 
3156 	for (pg = new_pgs; pg; pg = pg->next) {
3157 
3158 		for (i = 0; i < pg->index; i++) {
3159 
3160 			/* If something went wrong, bail without enabling anything */
3161 			if (unlikely(ftrace_disabled))
3162 				return -1;
3163 
3164 			p = &pg->records[i];
3165 			p->flags = rec_flags;
3166 
3167 			/*
3168 			 * Do the initial record conversion from mcount jump
3169 			 * to the NOP instructions.
3170 			 */
3171 			if (init_nop && !ftrace_nop_initialize(mod, p))
3172 				break;
3173 
3174 			update_cnt++;
3175 		}
3176 	}
3177 
3178 	stop = ftrace_now(raw_smp_processor_id());
3179 	ftrace_update_time = stop - start;
3180 	ftrace_update_tot_cnt += update_cnt;
3181 
3182 	return 0;
3183 }
3184 
3185 static int ftrace_allocate_records(struct ftrace_page *pg, int count)
3186 {
3187 	int order;
3188 	int pages;
3189 	int cnt;
3190 
3191 	if (WARN_ON(!count))
3192 		return -EINVAL;
3193 
3194 	/* We want to fill as much as possible, with no empty pages */
3195 	pages = DIV_ROUND_UP(count, ENTRIES_PER_PAGE);
3196 	order = fls(pages) - 1;
3197 
3198  again:
3199 	pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
3200 
3201 	if (!pg->records) {
3202 		/* if we can't allocate this size, try something smaller */
3203 		if (!order)
3204 			return -ENOMEM;
3205 		order >>= 1;
3206 		goto again;
3207 	}
3208 
3209 	ftrace_number_of_pages += 1 << order;
3210 	ftrace_number_of_groups++;
3211 
3212 	cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
3213 	pg->order = order;
3214 
3215 	if (cnt > count)
3216 		cnt = count;
3217 
3218 	return cnt;
3219 }
3220 
3221 static struct ftrace_page *
3222 ftrace_allocate_pages(unsigned long num_to_init)
3223 {
3224 	struct ftrace_page *start_pg;
3225 	struct ftrace_page *pg;
3226 	int cnt;
3227 
3228 	if (!num_to_init)
3229 		return NULL;
3230 
3231 	start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
3232 	if (!pg)
3233 		return NULL;
3234 
3235 	/*
3236 	 * Try to allocate as much as possible in one continues
3237 	 * location that fills in all of the space. We want to
3238 	 * waste as little space as possible.
3239 	 */
3240 	for (;;) {
3241 		cnt = ftrace_allocate_records(pg, num_to_init);
3242 		if (cnt < 0)
3243 			goto free_pages;
3244 
3245 		num_to_init -= cnt;
3246 		if (!num_to_init)
3247 			break;
3248 
3249 		pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
3250 		if (!pg->next)
3251 			goto free_pages;
3252 
3253 		pg = pg->next;
3254 	}
3255 
3256 	return start_pg;
3257 
3258  free_pages:
3259 	pg = start_pg;
3260 	while (pg) {
3261 		if (pg->records) {
3262 			free_pages((unsigned long)pg->records, pg->order);
3263 			ftrace_number_of_pages -= 1 << pg->order;
3264 		}
3265 		start_pg = pg->next;
3266 		kfree(pg);
3267 		pg = start_pg;
3268 		ftrace_number_of_groups--;
3269 	}
3270 	pr_info("ftrace: FAILED to allocate memory for functions\n");
3271 	return NULL;
3272 }
3273 
3274 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
3275 
3276 struct ftrace_iterator {
3277 	loff_t				pos;
3278 	loff_t				func_pos;
3279 	loff_t				mod_pos;
3280 	struct ftrace_page		*pg;
3281 	struct dyn_ftrace		*func;
3282 	struct ftrace_func_probe	*probe;
3283 	struct ftrace_func_entry	*probe_entry;
3284 	struct trace_parser		parser;
3285 	struct ftrace_hash		*hash;
3286 	struct ftrace_ops		*ops;
3287 	struct trace_array		*tr;
3288 	struct list_head		*mod_list;
3289 	int				pidx;
3290 	int				idx;
3291 	unsigned			flags;
3292 };
3293 
3294 static void *
3295 t_probe_next(struct seq_file *m, loff_t *pos)
3296 {
3297 	struct ftrace_iterator *iter = m->private;
3298 	struct trace_array *tr = iter->ops->private;
3299 	struct list_head *func_probes;
3300 	struct ftrace_hash *hash;
3301 	struct list_head *next;
3302 	struct hlist_node *hnd = NULL;
3303 	struct hlist_head *hhd;
3304 	int size;
3305 
3306 	(*pos)++;
3307 	iter->pos = *pos;
3308 
3309 	if (!tr)
3310 		return NULL;
3311 
3312 	func_probes = &tr->func_probes;
3313 	if (list_empty(func_probes))
3314 		return NULL;
3315 
3316 	if (!iter->probe) {
3317 		next = func_probes->next;
3318 		iter->probe = list_entry(next, struct ftrace_func_probe, list);
3319 	}
3320 
3321 	if (iter->probe_entry)
3322 		hnd = &iter->probe_entry->hlist;
3323 
3324 	hash = iter->probe->ops.func_hash->filter_hash;
3325 
3326 	/*
3327 	 * A probe being registered may temporarily have an empty hash
3328 	 * and it's at the end of the func_probes list.
3329 	 */
3330 	if (!hash || hash == EMPTY_HASH)
3331 		return NULL;
3332 
3333 	size = 1 << hash->size_bits;
3334 
3335  retry:
3336 	if (iter->pidx >= size) {
3337 		if (iter->probe->list.next == func_probes)
3338 			return NULL;
3339 		next = iter->probe->list.next;
3340 		iter->probe = list_entry(next, struct ftrace_func_probe, list);
3341 		hash = iter->probe->ops.func_hash->filter_hash;
3342 		size = 1 << hash->size_bits;
3343 		iter->pidx = 0;
3344 	}
3345 
3346 	hhd = &hash->buckets[iter->pidx];
3347 
3348 	if (hlist_empty(hhd)) {
3349 		iter->pidx++;
3350 		hnd = NULL;
3351 		goto retry;
3352 	}
3353 
3354 	if (!hnd)
3355 		hnd = hhd->first;
3356 	else {
3357 		hnd = hnd->next;
3358 		if (!hnd) {
3359 			iter->pidx++;
3360 			goto retry;
3361 		}
3362 	}
3363 
3364 	if (WARN_ON_ONCE(!hnd))
3365 		return NULL;
3366 
3367 	iter->probe_entry = hlist_entry(hnd, struct ftrace_func_entry, hlist);
3368 
3369 	return iter;
3370 }
3371 
3372 static void *t_probe_start(struct seq_file *m, loff_t *pos)
3373 {
3374 	struct ftrace_iterator *iter = m->private;
3375 	void *p = NULL;
3376 	loff_t l;
3377 
3378 	if (!(iter->flags & FTRACE_ITER_DO_PROBES))
3379 		return NULL;
3380 
3381 	if (iter->mod_pos > *pos)
3382 		return NULL;
3383 
3384 	iter->probe = NULL;
3385 	iter->probe_entry = NULL;
3386 	iter->pidx = 0;
3387 	for (l = 0; l <= (*pos - iter->mod_pos); ) {
3388 		p = t_probe_next(m, &l);
3389 		if (!p)
3390 			break;
3391 	}
3392 	if (!p)
3393 		return NULL;
3394 
3395 	/* Only set this if we have an item */
3396 	iter->flags |= FTRACE_ITER_PROBE;
3397 
3398 	return iter;
3399 }
3400 
3401 static int
3402 t_probe_show(struct seq_file *m, struct ftrace_iterator *iter)
3403 {
3404 	struct ftrace_func_entry *probe_entry;
3405 	struct ftrace_probe_ops *probe_ops;
3406 	struct ftrace_func_probe *probe;
3407 
3408 	probe = iter->probe;
3409 	probe_entry = iter->probe_entry;
3410 
3411 	if (WARN_ON_ONCE(!probe || !probe_entry))
3412 		return -EIO;
3413 
3414 	probe_ops = probe->probe_ops;
3415 
3416 	if (probe_ops->print)
3417 		return probe_ops->print(m, probe_entry->ip, probe_ops, probe->data);
3418 
3419 	seq_printf(m, "%ps:%ps\n", (void *)probe_entry->ip,
3420 		   (void *)probe_ops->func);
3421 
3422 	return 0;
3423 }
3424 
3425 static void *
3426 t_mod_next(struct seq_file *m, loff_t *pos)
3427 {
3428 	struct ftrace_iterator *iter = m->private;
3429 	struct trace_array *tr = iter->tr;
3430 
3431 	(*pos)++;
3432 	iter->pos = *pos;
3433 
3434 	iter->mod_list = iter->mod_list->next;
3435 
3436 	if (iter->mod_list == &tr->mod_trace ||
3437 	    iter->mod_list == &tr->mod_notrace) {
3438 		iter->flags &= ~FTRACE_ITER_MOD;
3439 		return NULL;
3440 	}
3441 
3442 	iter->mod_pos = *pos;
3443 
3444 	return iter;
3445 }
3446 
3447 static void *t_mod_start(struct seq_file *m, loff_t *pos)
3448 {
3449 	struct ftrace_iterator *iter = m->private;
3450 	void *p = NULL;
3451 	loff_t l;
3452 
3453 	if (iter->func_pos > *pos)
3454 		return NULL;
3455 
3456 	iter->mod_pos = iter->func_pos;
3457 
3458 	/* probes are only available if tr is set */
3459 	if (!iter->tr)
3460 		return NULL;
3461 
3462 	for (l = 0; l <= (*pos - iter->func_pos); ) {
3463 		p = t_mod_next(m, &l);
3464 		if (!p)
3465 			break;
3466 	}
3467 	if (!p) {
3468 		iter->flags &= ~FTRACE_ITER_MOD;
3469 		return t_probe_start(m, pos);
3470 	}
3471 
3472 	/* Only set this if we have an item */
3473 	iter->flags |= FTRACE_ITER_MOD;
3474 
3475 	return iter;
3476 }
3477 
3478 static int
3479 t_mod_show(struct seq_file *m, struct ftrace_iterator *iter)
3480 {
3481 	struct ftrace_mod_load *ftrace_mod;
3482 	struct trace_array *tr = iter->tr;
3483 
3484 	if (WARN_ON_ONCE(!iter->mod_list) ||
3485 			 iter->mod_list == &tr->mod_trace ||
3486 			 iter->mod_list == &tr->mod_notrace)
3487 		return -EIO;
3488 
3489 	ftrace_mod = list_entry(iter->mod_list, struct ftrace_mod_load, list);
3490 
3491 	if (ftrace_mod->func)
3492 		seq_printf(m, "%s", ftrace_mod->func);
3493 	else
3494 		seq_putc(m, '*');
3495 
3496 	seq_printf(m, ":mod:%s\n", ftrace_mod->module);
3497 
3498 	return 0;
3499 }
3500 
3501 static void *
3502 t_func_next(struct seq_file *m, loff_t *pos)
3503 {
3504 	struct ftrace_iterator *iter = m->private;
3505 	struct dyn_ftrace *rec = NULL;
3506 
3507 	(*pos)++;
3508 
3509  retry:
3510 	if (iter->idx >= iter->pg->index) {
3511 		if (iter->pg->next) {
3512 			iter->pg = iter->pg->next;
3513 			iter->idx = 0;
3514 			goto retry;
3515 		}
3516 	} else {
3517 		rec = &iter->pg->records[iter->idx++];
3518 		if (((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
3519 		     !ftrace_lookup_ip(iter->hash, rec->ip)) ||
3520 
3521 		    ((iter->flags & FTRACE_ITER_ENABLED) &&
3522 		     !(rec->flags & FTRACE_FL_ENABLED))) {
3523 
3524 			rec = NULL;
3525 			goto retry;
3526 		}
3527 	}
3528 
3529 	if (!rec)
3530 		return NULL;
3531 
3532 	iter->pos = iter->func_pos = *pos;
3533 	iter->func = rec;
3534 
3535 	return iter;
3536 }
3537 
3538 static void *
3539 t_next(struct seq_file *m, void *v, loff_t *pos)
3540 {
3541 	struct ftrace_iterator *iter = m->private;
3542 	loff_t l = *pos; /* t_probe_start() must use original pos */
3543 	void *ret;
3544 
3545 	if (unlikely(ftrace_disabled))
3546 		return NULL;
3547 
3548 	if (iter->flags & FTRACE_ITER_PROBE)
3549 		return t_probe_next(m, pos);
3550 
3551 	if (iter->flags & FTRACE_ITER_MOD)
3552 		return t_mod_next(m, pos);
3553 
3554 	if (iter->flags & FTRACE_ITER_PRINTALL) {
3555 		/* next must increment pos, and t_probe_start does not */
3556 		(*pos)++;
3557 		return t_mod_start(m, &l);
3558 	}
3559 
3560 	ret = t_func_next(m, pos);
3561 
3562 	if (!ret)
3563 		return t_mod_start(m, &l);
3564 
3565 	return ret;
3566 }
3567 
3568 static void reset_iter_read(struct ftrace_iterator *iter)
3569 {
3570 	iter->pos = 0;
3571 	iter->func_pos = 0;
3572 	iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_PROBE | FTRACE_ITER_MOD);
3573 }
3574 
3575 static void *t_start(struct seq_file *m, loff_t *pos)
3576 {
3577 	struct ftrace_iterator *iter = m->private;
3578 	void *p = NULL;
3579 	loff_t l;
3580 
3581 	mutex_lock(&ftrace_lock);
3582 
3583 	if (unlikely(ftrace_disabled))
3584 		return NULL;
3585 
3586 	/*
3587 	 * If an lseek was done, then reset and start from beginning.
3588 	 */
3589 	if (*pos < iter->pos)
3590 		reset_iter_read(iter);
3591 
3592 	/*
3593 	 * For set_ftrace_filter reading, if we have the filter
3594 	 * off, we can short cut and just print out that all
3595 	 * functions are enabled.
3596 	 */
3597 	if ((iter->flags & (FTRACE_ITER_FILTER | FTRACE_ITER_NOTRACE)) &&
3598 	    ftrace_hash_empty(iter->hash)) {
3599 		iter->func_pos = 1; /* Account for the message */
3600 		if (*pos > 0)
3601 			return t_mod_start(m, pos);
3602 		iter->flags |= FTRACE_ITER_PRINTALL;
3603 		/* reset in case of seek/pread */
3604 		iter->flags &= ~FTRACE_ITER_PROBE;
3605 		return iter;
3606 	}
3607 
3608 	if (iter->flags & FTRACE_ITER_MOD)
3609 		return t_mod_start(m, pos);
3610 
3611 	/*
3612 	 * Unfortunately, we need to restart at ftrace_pages_start
3613 	 * every time we let go of the ftrace_mutex. This is because
3614 	 * those pointers can change without the lock.
3615 	 */
3616 	iter->pg = ftrace_pages_start;
3617 	iter->idx = 0;
3618 	for (l = 0; l <= *pos; ) {
3619 		p = t_func_next(m, &l);
3620 		if (!p)
3621 			break;
3622 	}
3623 
3624 	if (!p)
3625 		return t_mod_start(m, pos);
3626 
3627 	return iter;
3628 }
3629 
3630 static void t_stop(struct seq_file *m, void *p)
3631 {
3632 	mutex_unlock(&ftrace_lock);
3633 }
3634 
3635 void * __weak
3636 arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
3637 {
3638 	return NULL;
3639 }
3640 
3641 static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops,
3642 				struct dyn_ftrace *rec)
3643 {
3644 	void *ptr;
3645 
3646 	ptr = arch_ftrace_trampoline_func(ops, rec);
3647 	if (ptr)
3648 		seq_printf(m, " ->%pS", ptr);
3649 }
3650 
3651 static int t_show(struct seq_file *m, void *v)
3652 {
3653 	struct ftrace_iterator *iter = m->private;
3654 	struct dyn_ftrace *rec;
3655 
3656 	if (iter->flags & FTRACE_ITER_PROBE)
3657 		return t_probe_show(m, iter);
3658 
3659 	if (iter->flags & FTRACE_ITER_MOD)
3660 		return t_mod_show(m, iter);
3661 
3662 	if (iter->flags & FTRACE_ITER_PRINTALL) {
3663 		if (iter->flags & FTRACE_ITER_NOTRACE)
3664 			seq_puts(m, "#### no functions disabled ####\n");
3665 		else
3666 			seq_puts(m, "#### all functions enabled ####\n");
3667 		return 0;
3668 	}
3669 
3670 	rec = iter->func;
3671 
3672 	if (!rec)
3673 		return 0;
3674 
3675 	seq_printf(m, "%ps", (void *)rec->ip);
3676 	if (iter->flags & FTRACE_ITER_ENABLED) {
3677 		struct ftrace_ops *ops;
3678 
3679 		seq_printf(m, " (%ld)%s%s%s",
3680 			   ftrace_rec_count(rec),
3681 			   rec->flags & FTRACE_FL_REGS ? " R" : "  ",
3682 			   rec->flags & FTRACE_FL_IPMODIFY ? " I" : "  ",
3683 			   rec->flags & FTRACE_FL_DIRECT ? " D" : "  ");
3684 		if (rec->flags & FTRACE_FL_TRAMP_EN) {
3685 			ops = ftrace_find_tramp_ops_any(rec);
3686 			if (ops) {
3687 				do {
3688 					seq_printf(m, "\ttramp: %pS (%pS)",
3689 						   (void *)ops->trampoline,
3690 						   (void *)ops->func);
3691 					add_trampoline_func(m, ops, rec);
3692 					ops = ftrace_find_tramp_ops_next(rec, ops);
3693 				} while (ops);
3694 			} else
3695 				seq_puts(m, "\ttramp: ERROR!");
3696 		} else {
3697 			add_trampoline_func(m, NULL, rec);
3698 		}
3699 		if (rec->flags & FTRACE_FL_DIRECT) {
3700 			unsigned long direct;
3701 
3702 			direct = ftrace_find_rec_direct(rec->ip);
3703 			if (direct)
3704 				seq_printf(m, "\n\tdirect-->%pS", (void *)direct);
3705 		}
3706 	}
3707 
3708 	seq_putc(m, '\n');
3709 
3710 	return 0;
3711 }
3712 
3713 static const struct seq_operations show_ftrace_seq_ops = {
3714 	.start = t_start,
3715 	.next = t_next,
3716 	.stop = t_stop,
3717 	.show = t_show,
3718 };
3719 
3720 static int
3721 ftrace_avail_open(struct inode *inode, struct file *file)
3722 {
3723 	struct ftrace_iterator *iter;
3724 	int ret;
3725 
3726 	ret = security_locked_down(LOCKDOWN_TRACEFS);
3727 	if (ret)
3728 		return ret;
3729 
3730 	if (unlikely(ftrace_disabled))
3731 		return -ENODEV;
3732 
3733 	iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3734 	if (!iter)
3735 		return -ENOMEM;
3736 
3737 	iter->pg = ftrace_pages_start;
3738 	iter->ops = &global_ops;
3739 
3740 	return 0;
3741 }
3742 
3743 static int
3744 ftrace_enabled_open(struct inode *inode, struct file *file)
3745 {
3746 	struct ftrace_iterator *iter;
3747 
3748 	/*
3749 	 * This shows us what functions are currently being
3750 	 * traced and by what. Not sure if we want lockdown
3751 	 * to hide such critical information for an admin.
3752 	 * Although, perhaps it can show information we don't
3753 	 * want people to see, but if something is tracing
3754 	 * something, we probably want to know about it.
3755 	 */
3756 
3757 	iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3758 	if (!iter)
3759 		return -ENOMEM;
3760 
3761 	iter->pg = ftrace_pages_start;
3762 	iter->flags = FTRACE_ITER_ENABLED;
3763 	iter->ops = &global_ops;
3764 
3765 	return 0;
3766 }
3767 
3768 /**
3769  * ftrace_regex_open - initialize function tracer filter files
3770  * @ops: The ftrace_ops that hold the hash filters
3771  * @flag: The type of filter to process
3772  * @inode: The inode, usually passed in to your open routine
3773  * @file: The file, usually passed in to your open routine
3774  *
3775  * ftrace_regex_open() initializes the filter files for the
3776  * @ops. Depending on @flag it may process the filter hash or
3777  * the notrace hash of @ops. With this called from the open
3778  * routine, you can use ftrace_filter_write() for the write
3779  * routine if @flag has FTRACE_ITER_FILTER set, or
3780  * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
3781  * tracing_lseek() should be used as the lseek routine, and
3782  * release must call ftrace_regex_release().
3783  */
3784 int
3785 ftrace_regex_open(struct ftrace_ops *ops, int flag,
3786 		  struct inode *inode, struct file *file)
3787 {
3788 	struct ftrace_iterator *iter;
3789 	struct ftrace_hash *hash;
3790 	struct list_head *mod_head;
3791 	struct trace_array *tr = ops->private;
3792 	int ret = -ENOMEM;
3793 
3794 	ftrace_ops_init(ops);
3795 
3796 	if (unlikely(ftrace_disabled))
3797 		return -ENODEV;
3798 
3799 	if (tracing_check_open_get_tr(tr))
3800 		return -ENODEV;
3801 
3802 	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
3803 	if (!iter)
3804 		goto out;
3805 
3806 	if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX))
3807 		goto out;
3808 
3809 	iter->ops = ops;
3810 	iter->flags = flag;
3811 	iter->tr = tr;
3812 
3813 	mutex_lock(&ops->func_hash->regex_lock);
3814 
3815 	if (flag & FTRACE_ITER_NOTRACE) {
3816 		hash = ops->func_hash->notrace_hash;
3817 		mod_head = tr ? &tr->mod_notrace : NULL;
3818 	} else {
3819 		hash = ops->func_hash->filter_hash;
3820 		mod_head = tr ? &tr->mod_trace : NULL;
3821 	}
3822 
3823 	iter->mod_list = mod_head;
3824 
3825 	if (file->f_mode & FMODE_WRITE) {
3826 		const int size_bits = FTRACE_HASH_DEFAULT_BITS;
3827 
3828 		if (file->f_flags & O_TRUNC) {
3829 			iter->hash = alloc_ftrace_hash(size_bits);
3830 			clear_ftrace_mod_list(mod_head);
3831 	        } else {
3832 			iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);
3833 		}
3834 
3835 		if (!iter->hash) {
3836 			trace_parser_put(&iter->parser);
3837 			goto out_unlock;
3838 		}
3839 	} else
3840 		iter->hash = hash;
3841 
3842 	ret = 0;
3843 
3844 	if (file->f_mode & FMODE_READ) {
3845 		iter->pg = ftrace_pages_start;
3846 
3847 		ret = seq_open(file, &show_ftrace_seq_ops);
3848 		if (!ret) {
3849 			struct seq_file *m = file->private_data;
3850 			m->private = iter;
3851 		} else {
3852 			/* Failed */
3853 			free_ftrace_hash(iter->hash);
3854 			trace_parser_put(&iter->parser);
3855 		}
3856 	} else
3857 		file->private_data = iter;
3858 
3859  out_unlock:
3860 	mutex_unlock(&ops->func_hash->regex_lock);
3861 
3862  out:
3863 	if (ret) {
3864 		kfree(iter);
3865 		if (tr)
3866 			trace_array_put(tr);
3867 	}
3868 
3869 	return ret;
3870 }
3871 
3872 static int
3873 ftrace_filter_open(struct inode *inode, struct file *file)
3874 {
3875 	struct ftrace_ops *ops = inode->i_private;
3876 
3877 	/* Checks for tracefs lockdown */
3878 	return ftrace_regex_open(ops,
3879 			FTRACE_ITER_FILTER | FTRACE_ITER_DO_PROBES,
3880 			inode, file);
3881 }
3882 
3883 static int
3884 ftrace_notrace_open(struct inode *inode, struct file *file)
3885 {
3886 	struct ftrace_ops *ops = inode->i_private;
3887 
3888 	/* Checks for tracefs lockdown */
3889 	return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
3890 				 inode, file);
3891 }
3892 
3893 /* Type for quick search ftrace basic regexes (globs) from filter_parse_regex */
3894 struct ftrace_glob {
3895 	char *search;
3896 	unsigned len;
3897 	int type;
3898 };
3899 
3900 /*
3901  * If symbols in an architecture don't correspond exactly to the user-visible
3902  * name of what they represent, it is possible to define this function to
3903  * perform the necessary adjustments.
3904 */
3905 char * __weak arch_ftrace_match_adjust(char *str, const char *search)
3906 {
3907 	return str;
3908 }
3909 
3910 static int ftrace_match(char *str, struct ftrace_glob *g)
3911 {
3912 	int matched = 0;
3913 	int slen;
3914 
3915 	str = arch_ftrace_match_adjust(str, g->search);
3916 
3917 	switch (g->type) {
3918 	case MATCH_FULL:
3919 		if (strcmp(str, g->search) == 0)
3920 			matched = 1;
3921 		break;
3922 	case MATCH_FRONT_ONLY:
3923 		if (strncmp(str, g->search, g->len) == 0)
3924 			matched = 1;
3925 		break;
3926 	case MATCH_MIDDLE_ONLY:
3927 		if (strstr(str, g->search))
3928 			matched = 1;
3929 		break;
3930 	case MATCH_END_ONLY:
3931 		slen = strlen(str);
3932 		if (slen >= g->len &&
3933 		    memcmp(str + slen - g->len, g->search, g->len) == 0)
3934 			matched = 1;
3935 		break;
3936 	case MATCH_GLOB:
3937 		if (glob_match(g->search, str))
3938 			matched = 1;
3939 		break;
3940 	}
3941 
3942 	return matched;
3943 }
3944 
3945 static int
3946 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter)
3947 {
3948 	struct ftrace_func_entry *entry;
3949 	int ret = 0;
3950 
3951 	entry = ftrace_lookup_ip(hash, rec->ip);
3952 	if (clear_filter) {
3953 		/* Do nothing if it doesn't exist */
3954 		if (!entry)
3955 			return 0;
3956 
3957 		free_hash_entry(hash, entry);
3958 	} else {
3959 		/* Do nothing if it exists */
3960 		if (entry)
3961 			return 0;
3962 
3963 		ret = add_hash_entry(hash, rec->ip);
3964 	}
3965 	return ret;
3966 }
3967 
3968 static int
3969 add_rec_by_index(struct ftrace_hash *hash, struct ftrace_glob *func_g,
3970 		 int clear_filter)
3971 {
3972 	long index = simple_strtoul(func_g->search, NULL, 0);
3973 	struct ftrace_page *pg;
3974 	struct dyn_ftrace *rec;
3975 
3976 	/* The index starts at 1 */
3977 	if (--index < 0)
3978 		return 0;
3979 
3980 	do_for_each_ftrace_rec(pg, rec) {
3981 		if (pg->index <= index) {
3982 			index -= pg->index;
3983 			/* this is a double loop, break goes to the next page */
3984 			break;
3985 		}
3986 		rec = &pg->records[index];
3987 		enter_record(hash, rec, clear_filter);
3988 		return 1;
3989 	} while_for_each_ftrace_rec();
3990 	return 0;
3991 }
3992 
3993 static int
3994 ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g,
3995 		struct ftrace_glob *mod_g, int exclude_mod)
3996 {
3997 	char str[KSYM_SYMBOL_LEN];
3998 	char *modname;
3999 
4000 	kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
4001 
4002 	if (mod_g) {
4003 		int mod_matches = (modname) ? ftrace_match(modname, mod_g) : 0;
4004 
4005 		/* blank module name to match all modules */
4006 		if (!mod_g->len) {
4007 			/* blank module globbing: modname xor exclude_mod */
4008 			if (!exclude_mod != !modname)
4009 				goto func_match;
4010 			return 0;
4011 		}
4012 
4013 		/*
4014 		 * exclude_mod is set to trace everything but the given
4015 		 * module. If it is set and the module matches, then
4016 		 * return 0. If it is not set, and the module doesn't match
4017 		 * also return 0. Otherwise, check the function to see if
4018 		 * that matches.
4019 		 */
4020 		if (!mod_matches == !exclude_mod)
4021 			return 0;
4022 func_match:
4023 		/* blank search means to match all funcs in the mod */
4024 		if (!func_g->len)
4025 			return 1;
4026 	}
4027 
4028 	return ftrace_match(str, func_g);
4029 }
4030 
4031 static int
4032 match_records(struct ftrace_hash *hash, char *func, int len, char *mod)
4033 {
4034 	struct ftrace_page *pg;
4035 	struct dyn_ftrace *rec;
4036 	struct ftrace_glob func_g = { .type = MATCH_FULL };
4037 	struct ftrace_glob mod_g = { .type = MATCH_FULL };
4038 	struct ftrace_glob *mod_match = (mod) ? &mod_g : NULL;
4039 	int exclude_mod = 0;
4040 	int found = 0;
4041 	int ret;
4042 	int clear_filter = 0;
4043 
4044 	if (func) {
4045 		func_g.type = filter_parse_regex(func, len, &func_g.search,
4046 						 &clear_filter);
4047 		func_g.len = strlen(func_g.search);
4048 	}
4049 
4050 	if (mod) {
4051 		mod_g.type = filter_parse_regex(mod, strlen(mod),
4052 				&mod_g.search, &exclude_mod);
4053 		mod_g.len = strlen(mod_g.search);
4054 	}
4055 
4056 	mutex_lock(&ftrace_lock);
4057 
4058 	if (unlikely(ftrace_disabled))
4059 		goto out_unlock;
4060 
4061 	if (func_g.type == MATCH_INDEX) {
4062 		found = add_rec_by_index(hash, &func_g, clear_filter);
4063 		goto out_unlock;
4064 	}
4065 
4066 	do_for_each_ftrace_rec(pg, rec) {
4067 
4068 		if (rec->flags & FTRACE_FL_DISABLED)
4069 			continue;
4070 
4071 		if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) {
4072 			ret = enter_record(hash, rec, clear_filter);
4073 			if (ret < 0) {
4074 				found = ret;
4075 				goto out_unlock;
4076 			}
4077 			found = 1;
4078 		}
4079 	} while_for_each_ftrace_rec();
4080  out_unlock:
4081 	mutex_unlock(&ftrace_lock);
4082 
4083 	return found;
4084 }
4085 
4086 static int
4087 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
4088 {
4089 	return match_records(hash, buff, len, NULL);
4090 }
4091 
4092 static void ftrace_ops_update_code(struct ftrace_ops *ops,
4093 				   struct ftrace_ops_hash *old_hash)
4094 {
4095 	struct ftrace_ops *op;
4096 
4097 	if (!ftrace_enabled)
4098 		return;
4099 
4100 	if (ops->flags & FTRACE_OPS_FL_ENABLED) {
4101 		ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
4102 		return;
4103 	}
4104 
4105 	/*
4106 	 * If this is the shared global_ops filter, then we need to
4107 	 * check if there is another ops that shares it, is enabled.
4108 	 * If so, we still need to run the modify code.
4109 	 */
4110 	if (ops->func_hash != &global_ops.local_hash)
4111 		return;
4112 
4113 	do_for_each_ftrace_op(op, ftrace_ops_list) {
4114 		if (op->func_hash == &global_ops.local_hash &&
4115 		    op->flags & FTRACE_OPS_FL_ENABLED) {
4116 			ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash);
4117 			/* Only need to do this once */
4118 			return;
4119 		}
4120 	} while_for_each_ftrace_op(op);
4121 }
4122 
4123 static int ftrace_hash_move_and_update_ops(struct ftrace_ops *ops,
4124 					   struct ftrace_hash **orig_hash,
4125 					   struct ftrace_hash *hash,
4126 					   int enable)
4127 {
4128 	struct ftrace_ops_hash old_hash_ops;
4129 	struct ftrace_hash *old_hash;
4130 	int ret;
4131 
4132 	old_hash = *orig_hash;
4133 	old_hash_ops.filter_hash = ops->func_hash->filter_hash;
4134 	old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
4135 	ret = ftrace_hash_move(ops, enable, orig_hash, hash);
4136 	if (!ret) {
4137 		ftrace_ops_update_code(ops, &old_hash_ops);
4138 		free_ftrace_hash_rcu(old_hash);
4139 	}
4140 	return ret;
4141 }
4142 
4143 static bool module_exists(const char *module)
4144 {
4145 	/* All modules have the symbol __this_module */
4146 	static const char this_mod[] = "__this_module";
4147 	char modname[MAX_PARAM_PREFIX_LEN + sizeof(this_mod) + 2];
4148 	unsigned long val;
4149 	int n;
4150 
4151 	n = snprintf(modname, sizeof(modname), "%s:%s", module, this_mod);
4152 
4153 	if (n > sizeof(modname) - 1)
4154 		return false;
4155 
4156 	val = module_kallsyms_lookup_name(modname);
4157 	return val != 0;
4158 }
4159 
4160 static int cache_mod(struct trace_array *tr,
4161 		     const char *func, char *module, int enable)
4162 {
4163 	struct ftrace_mod_load *ftrace_mod, *n;
4164 	struct list_head *head = enable ? &tr->mod_trace : &tr->mod_notrace;
4165 	int ret;
4166 
4167 	mutex_lock(&ftrace_lock);
4168 
4169 	/* We do not cache inverse filters */
4170 	if (func[0] == '!') {
4171 		func++;
4172 		ret = -EINVAL;
4173 
4174 		/* Look to remove this hash */
4175 		list_for_each_entry_safe(ftrace_mod, n, head, list) {
4176 			if (strcmp(ftrace_mod->module, module) != 0)
4177 				continue;
4178 
4179 			/* no func matches all */
4180 			if (strcmp(func, "*") == 0 ||
4181 			    (ftrace_mod->func &&
4182 			     strcmp(ftrace_mod->func, func) == 0)) {
4183 				ret = 0;
4184 				free_ftrace_mod(ftrace_mod);
4185 				continue;
4186 			}
4187 		}
4188 		goto out;
4189 	}
4190 
4191 	ret = -EINVAL;
4192 	/* We only care about modules that have not been loaded yet */
4193 	if (module_exists(module))
4194 		goto out;
4195 
4196 	/* Save this string off, and execute it when the module is loaded */
4197 	ret = ftrace_add_mod(tr, func, module, enable);
4198  out:
4199 	mutex_unlock(&ftrace_lock);
4200 
4201 	return ret;
4202 }
4203 
4204 static int
4205 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
4206 		 int reset, int enable);
4207 
4208 #ifdef CONFIG_MODULES
4209 static void process_mod_list(struct list_head *head, struct ftrace_ops *ops,
4210 			     char *mod, bool enable)
4211 {
4212 	struct ftrace_mod_load *ftrace_mod, *n;
4213 	struct ftrace_hash **orig_hash, *new_hash;
4214 	LIST_HEAD(process_mods);
4215 	char *func;
4216 
4217 	mutex_lock(&ops->func_hash->regex_lock);
4218 
4219 	if (enable)
4220 		orig_hash = &ops->func_hash->filter_hash;
4221 	else
4222 		orig_hash = &ops->func_hash->notrace_hash;
4223 
4224 	new_hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS,
4225 					      *orig_hash);
4226 	if (!new_hash)
4227 		goto out; /* warn? */
4228 
4229 	mutex_lock(&ftrace_lock);
4230 
4231 	list_for_each_entry_safe(ftrace_mod, n, head, list) {
4232 
4233 		if (strcmp(ftrace_mod->module, mod) != 0)
4234 			continue;
4235 
4236 		if (ftrace_mod->func)
4237 			func = kstrdup(ftrace_mod->func, GFP_KERNEL);
4238 		else
4239 			func = kstrdup("*", GFP_KERNEL);
4240 
4241 		if (!func) /* warn? */
4242 			continue;
4243 
4244 		list_move(&ftrace_mod->list, &process_mods);
4245 
4246 		/* Use the newly allocated func, as it may be "*" */
4247 		kfree(ftrace_mod->func);
4248 		ftrace_mod->func = func;
4249 	}
4250 
4251 	mutex_unlock(&ftrace_lock);
4252 
4253 	list_for_each_entry_safe(ftrace_mod, n, &process_mods, list) {
4254 
4255 		func = ftrace_mod->func;
4256 
4257 		/* Grabs ftrace_lock, which is why we have this extra step */
4258 		match_records(new_hash, func, strlen(func), mod);
4259 		free_ftrace_mod(ftrace_mod);
4260 	}
4261 
4262 	if (enable && list_empty(head))
4263 		new_hash->flags &= ~FTRACE_HASH_FL_MOD;
4264 
4265 	mutex_lock(&ftrace_lock);
4266 
4267 	ftrace_hash_move_and_update_ops(ops, orig_hash,
4268 					      new_hash, enable);
4269 	mutex_unlock(&ftrace_lock);
4270 
4271  out:
4272 	mutex_unlock(&ops->func_hash->regex_lock);
4273 
4274 	free_ftrace_hash(new_hash);
4275 }
4276 
4277 static void process_cached_mods(const char *mod_name)
4278 {
4279 	struct trace_array *tr;
4280 	char *mod;
4281 
4282 	mod = kstrdup(mod_name, GFP_KERNEL);
4283 	if (!mod)
4284 		return;
4285 
4286 	mutex_lock(&trace_types_lock);
4287 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
4288 		if (!list_empty(&tr->mod_trace))
4289 			process_mod_list(&tr->mod_trace, tr->ops, mod, true);
4290 		if (!list_empty(&tr->mod_notrace))
4291 			process_mod_list(&tr->mod_notrace, tr->ops, mod, false);
4292 	}
4293 	mutex_unlock(&trace_types_lock);
4294 
4295 	kfree(mod);
4296 }
4297 #endif
4298 
4299 /*
4300  * We register the module command as a template to show others how
4301  * to register the a command as well.
4302  */
4303 
4304 static int
4305 ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash,
4306 		    char *func_orig, char *cmd, char *module, int enable)
4307 {
4308 	char *func;
4309 	int ret;
4310 
4311 	/* match_records() modifies func, and we need the original */
4312 	func = kstrdup(func_orig, GFP_KERNEL);
4313 	if (!func)
4314 		return -ENOMEM;
4315 
4316 	/*
4317 	 * cmd == 'mod' because we only registered this func
4318 	 * for the 'mod' ftrace_func_command.
4319 	 * But if you register one func with multiple commands,
4320 	 * you can tell which command was used by the cmd
4321 	 * parameter.
4322 	 */
4323 	ret = match_records(hash, func, strlen(func), module);
4324 	kfree(func);
4325 
4326 	if (!ret)
4327 		return cache_mod(tr, func_orig, module, enable);
4328 	if (ret < 0)
4329 		return ret;
4330 	return 0;
4331 }
4332 
4333 static struct ftrace_func_command ftrace_mod_cmd = {
4334 	.name			= "mod",
4335 	.func			= ftrace_mod_callback,
4336 };
4337 
4338 static int __init ftrace_mod_cmd_init(void)
4339 {
4340 	return register_ftrace_command(&ftrace_mod_cmd);
4341 }
4342 core_initcall(ftrace_mod_cmd_init);
4343 
4344 static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
4345 				      struct ftrace_ops *op, struct ftrace_regs *fregs)
4346 {
4347 	struct ftrace_probe_ops *probe_ops;
4348 	struct ftrace_func_probe *probe;
4349 
4350 	probe = container_of(op, struct ftrace_func_probe, ops);
4351 	probe_ops = probe->probe_ops;
4352 
4353 	/*
4354 	 * Disable preemption for these calls to prevent a RCU grace
4355 	 * period. This syncs the hash iteration and freeing of items
4356 	 * on the hash. rcu_read_lock is too dangerous here.
4357 	 */
4358 	preempt_disable_notrace();
4359 	probe_ops->func(ip, parent_ip, probe->tr, probe_ops, probe->data);
4360 	preempt_enable_notrace();
4361 }
4362 
4363 struct ftrace_func_map {
4364 	struct ftrace_func_entry	entry;
4365 	void				*data;
4366 };
4367 
4368 struct ftrace_func_mapper {
4369 	struct ftrace_hash		hash;
4370 };
4371 
4372 /**
4373  * allocate_ftrace_func_mapper - allocate a new ftrace_func_mapper
4374  *
4375  * Returns a ftrace_func_mapper descriptor that can be used to map ips to data.
4376  */
4377 struct ftrace_func_mapper *allocate_ftrace_func_mapper(void)
4378 {
4379 	struct ftrace_hash *hash;
4380 
4381 	/*
4382 	 * The mapper is simply a ftrace_hash, but since the entries
4383 	 * in the hash are not ftrace_func_entry type, we define it
4384 	 * as a separate structure.
4385 	 */
4386 	hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
4387 	return (struct ftrace_func_mapper *)hash;
4388 }
4389 
4390 /**
4391  * ftrace_func_mapper_find_ip - Find some data mapped to an ip
4392  * @mapper: The mapper that has the ip maps
4393  * @ip: the instruction pointer to find the data for
4394  *
4395  * Returns the data mapped to @ip if found otherwise NULL. The return
4396  * is actually the address of the mapper data pointer. The address is
4397  * returned for use cases where the data is no bigger than a long, and
4398  * the user can use the data pointer as its data instead of having to
4399  * allocate more memory for the reference.
4400  */
4401 void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper,
4402 				  unsigned long ip)
4403 {
4404 	struct ftrace_func_entry *entry;
4405 	struct ftrace_func_map *map;
4406 
4407 	entry = ftrace_lookup_ip(&mapper->hash, ip);
4408 	if (!entry)
4409 		return NULL;
4410 
4411 	map = (struct ftrace_func_map *)entry;
4412 	return &map->data;
4413 }
4414 
4415 /**
4416  * ftrace_func_mapper_add_ip - Map some data to an ip
4417  * @mapper: The mapper that has the ip maps
4418  * @ip: The instruction pointer address to map @data to
4419  * @data: The data to map to @ip
4420  *
4421  * Returns 0 on success otherwise an error.
4422  */
4423 int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper,
4424 			      unsigned long ip, void *data)
4425 {
4426 	struct ftrace_func_entry *entry;
4427 	struct ftrace_func_map *map;
4428 
4429 	entry = ftrace_lookup_ip(&mapper->hash, ip);
4430 	if (entry)
4431 		return -EBUSY;
4432 
4433 	map = kmalloc(sizeof(*map), GFP_KERNEL);
4434 	if (!map)
4435 		return -ENOMEM;
4436 
4437 	map->entry.ip = ip;
4438 	map->data = data;
4439 
4440 	__add_hash_entry(&mapper->hash, &map->entry);
4441 
4442 	return 0;
4443 }
4444 
4445 /**
4446  * ftrace_func_mapper_remove_ip - Remove an ip from the mapping
4447  * @mapper: The mapper that has the ip maps
4448  * @ip: The instruction pointer address to remove the data from
4449  *
4450  * Returns the data if it is found, otherwise NULL.
4451  * Note, if the data pointer is used as the data itself, (see
4452  * ftrace_func_mapper_find_ip(), then the return value may be meaningless,
4453  * if the data pointer was set to zero.
4454  */
4455 void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper,
4456 				   unsigned long ip)
4457 {
4458 	struct ftrace_func_entry *entry;
4459 	struct ftrace_func_map *map;
4460 	void *data;
4461 
4462 	entry = ftrace_lookup_ip(&mapper->hash, ip);
4463 	if (!entry)
4464 		return NULL;
4465 
4466 	map = (struct ftrace_func_map *)entry;
4467 	data = map->data;
4468 
4469 	remove_hash_entry(&mapper->hash, entry);
4470 	kfree(entry);
4471 
4472 	return data;
4473 }
4474 
4475 /**
4476  * free_ftrace_func_mapper - free a mapping of ips and data
4477  * @mapper: The mapper that has the ip maps
4478  * @free_func: A function to be called on each data item.
4479  *
4480  * This is used to free the function mapper. The @free_func is optional
4481  * and can be used if the data needs to be freed as well.
4482  */
4483 void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper,
4484 			     ftrace_mapper_func free_func)
4485 {
4486 	struct ftrace_func_entry *entry;
4487 	struct ftrace_func_map *map;
4488 	struct hlist_head *hhd;
4489 	int size, i;
4490 
4491 	if (!mapper)
4492 		return;
4493 
4494 	if (free_func && mapper->hash.count) {
4495 		size = 1 << mapper->hash.size_bits;
4496 		for (i = 0; i < size; i++) {
4497 			hhd = &mapper->hash.buckets[i];
4498 			hlist_for_each_entry(entry, hhd, hlist) {
4499 				map = (struct ftrace_func_map *)entry;
4500 				free_func(map);
4501 			}
4502 		}
4503 	}
4504 	free_ftrace_hash(&mapper->hash);
4505 }
4506 
4507 static void release_probe(struct ftrace_func_probe *probe)
4508 {
4509 	struct ftrace_probe_ops *probe_ops;
4510 
4511 	mutex_lock(&ftrace_lock);
4512 
4513 	WARN_ON(probe->ref <= 0);
4514 
4515 	/* Subtract the ref that was used to protect this instance */
4516 	probe->ref--;
4517 
4518 	if (!probe->ref) {
4519 		probe_ops = probe->probe_ops;
4520 		/*
4521 		 * Sending zero as ip tells probe_ops to free
4522 		 * the probe->data itself
4523 		 */
4524 		if (probe_ops->free)
4525 			probe_ops->free(probe_ops, probe->tr, 0, probe->data);
4526 		list_del(&probe->list);
4527 		kfree(probe);
4528 	}
4529 	mutex_unlock(&ftrace_lock);
4530 }
4531 
4532 static void acquire_probe_locked(struct ftrace_func_probe *probe)
4533 {
4534 	/*
4535 	 * Add one ref to keep it from being freed when releasing the
4536 	 * ftrace_lock mutex.
4537 	 */
4538 	probe->ref++;
4539 }
4540 
4541 int
4542 register_ftrace_function_probe(char *glob, struct trace_array *tr,
4543 			       struct ftrace_probe_ops *probe_ops,
4544 			       void *data)
4545 {
4546 	struct ftrace_func_entry *entry;
4547 	struct ftrace_func_probe *probe;
4548 	struct ftrace_hash **orig_hash;
4549 	struct ftrace_hash *old_hash;
4550 	struct ftrace_hash *hash;
4551 	int count = 0;
4552 	int size;
4553 	int ret;
4554 	int i;
4555 
4556 	if (WARN_ON(!tr))
4557 		return -EINVAL;
4558 
4559 	/* We do not support '!' for function probes */
4560 	if (WARN_ON(glob[0] == '!'))
4561 		return -EINVAL;
4562 
4563 
4564 	mutex_lock(&ftrace_lock);
4565 	/* Check if the probe_ops is already registered */
4566 	list_for_each_entry(probe, &tr->func_probes, list) {
4567 		if (probe->probe_ops == probe_ops)
4568 			break;
4569 	}
4570 	if (&probe->list == &tr->func_probes) {
4571 		probe = kzalloc(sizeof(*probe), GFP_KERNEL);
4572 		if (!probe) {
4573 			mutex_unlock(&ftrace_lock);
4574 			return -ENOMEM;
4575 		}
4576 		probe->probe_ops = probe_ops;
4577 		probe->ops.func = function_trace_probe_call;
4578 		probe->tr = tr;
4579 		ftrace_ops_init(&probe->ops);
4580 		list_add(&probe->list, &tr->func_probes);
4581 	}
4582 
4583 	acquire_probe_locked(probe);
4584 
4585 	mutex_unlock(&ftrace_lock);
4586 
4587 	/*
4588 	 * Note, there's a small window here that the func_hash->filter_hash
4589 	 * may be NULL or empty. Need to be careful when reading the loop.
4590 	 */
4591 	mutex_lock(&probe->ops.func_hash->regex_lock);
4592 
4593 	orig_hash = &probe->ops.func_hash->filter_hash;
4594 	old_hash = *orig_hash;
4595 	hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
4596 
4597 	if (!hash) {
4598 		ret = -ENOMEM;
4599 		goto out;
4600 	}
4601 
4602 	ret = ftrace_match_records(hash, glob, strlen(glob));
4603 
4604 	/* Nothing found? */
4605 	if (!ret)
4606 		ret = -EINVAL;
4607 
4608 	if (ret < 0)
4609 		goto out;
4610 
4611 	size = 1 << hash->size_bits;
4612 	for (i = 0; i < size; i++) {
4613 		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
4614 			if (ftrace_lookup_ip(old_hash, entry->ip))
4615 				continue;
4616 			/*
4617 			 * The caller might want to do something special
4618 			 * for each function we find. We call the callback
4619 			 * to give the caller an opportunity to do so.
4620 			 */
4621 			if (probe_ops->init) {
4622 				ret = probe_ops->init(probe_ops, tr,
4623 						      entry->ip, data,
4624 						      &probe->data);
4625 				if (ret < 0) {
4626 					if (probe_ops->free && count)
4627 						probe_ops->free(probe_ops, tr,
4628 								0, probe->data);
4629 					probe->data = NULL;
4630 					goto out;
4631 				}
4632 			}
4633 			count++;
4634 		}
4635 	}
4636 
4637 	mutex_lock(&ftrace_lock);
4638 
4639 	if (!count) {
4640 		/* Nothing was added? */
4641 		ret = -EINVAL;
4642 		goto out_unlock;
4643 	}
4644 
4645 	ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
4646 					      hash, 1);
4647 	if (ret < 0)
4648 		goto err_unlock;
4649 
4650 	/* One ref for each new function traced */
4651 	probe->ref += count;
4652 
4653 	if (!(probe->ops.flags & FTRACE_OPS_FL_ENABLED))
4654 		ret = ftrace_startup(&probe->ops, 0);
4655 
4656  out_unlock:
4657 	mutex_unlock(&ftrace_lock);
4658 
4659 	if (!ret)
4660 		ret = count;
4661  out:
4662 	mutex_unlock(&probe->ops.func_hash->regex_lock);
4663 	free_ftrace_hash(hash);
4664 
4665 	release_probe(probe);
4666 
4667 	return ret;
4668 
4669  err_unlock:
4670 	if (!probe_ops->free || !count)
4671 		goto out_unlock;
4672 
4673 	/* Failed to do the move, need to call the free functions */
4674 	for (i = 0; i < size; i++) {
4675 		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
4676 			if (ftrace_lookup_ip(old_hash, entry->ip))
4677 				continue;
4678 			probe_ops->free(probe_ops, tr, entry->ip, probe->data);
4679 		}
4680 	}
4681 	goto out_unlock;
4682 }
4683 
4684 int
4685 unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr,
4686 				      struct ftrace_probe_ops *probe_ops)
4687 {
4688 	struct ftrace_ops_hash old_hash_ops;
4689 	struct ftrace_func_entry *entry;
4690 	struct ftrace_func_probe *probe;
4691 	struct ftrace_glob func_g;
4692 	struct ftrace_hash **orig_hash;
4693 	struct ftrace_hash *old_hash;
4694 	struct ftrace_hash *hash = NULL;
4695 	struct hlist_node *tmp;
4696 	struct hlist_head hhd;
4697 	char str[KSYM_SYMBOL_LEN];
4698 	int count = 0;
4699 	int i, ret = -ENODEV;
4700 	int size;
4701 
4702 	if (!glob || !strlen(glob) || !strcmp(glob, "*"))
4703 		func_g.search = NULL;
4704 	else {
4705 		int not;
4706 
4707 		func_g.type = filter_parse_regex(glob, strlen(glob),
4708 						 &func_g.search, &not);
4709 		func_g.len = strlen(func_g.search);
4710 
4711 		/* we do not support '!' for function probes */
4712 		if (WARN_ON(not))
4713 			return -EINVAL;
4714 	}
4715 
4716 	mutex_lock(&ftrace_lock);
4717 	/* Check if the probe_ops is already registered */
4718 	list_for_each_entry(probe, &tr->func_probes, list) {
4719 		if (probe->probe_ops == probe_ops)
4720 			break;
4721 	}
4722 	if (&probe->list == &tr->func_probes)
4723 		goto err_unlock_ftrace;
4724 
4725 	ret = -EINVAL;
4726 	if (!(probe->ops.flags & FTRACE_OPS_FL_INITIALIZED))
4727 		goto err_unlock_ftrace;
4728 
4729 	acquire_probe_locked(probe);
4730 
4731 	mutex_unlock(&ftrace_lock);
4732 
4733 	mutex_lock(&probe->ops.func_hash->regex_lock);
4734 
4735 	orig_hash = &probe->ops.func_hash->filter_hash;
4736 	old_hash = *orig_hash;
4737 
4738 	if (ftrace_hash_empty(old_hash))
4739 		goto out_unlock;
4740 
4741 	old_hash_ops.filter_hash = old_hash;
4742 	/* Probes only have filters */
4743 	old_hash_ops.notrace_hash = NULL;
4744 
4745 	ret = -ENOMEM;
4746 	hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
4747 	if (!hash)
4748 		goto out_unlock;
4749 
4750 	INIT_HLIST_HEAD(&hhd);
4751 
4752 	size = 1 << hash->size_bits;
4753 	for (i = 0; i < size; i++) {
4754 		hlist_for_each_entry_safe(entry, tmp, &hash->buckets[i], hlist) {
4755 
4756 			if (func_g.search) {
4757 				kallsyms_lookup(entry->ip, NULL, NULL,
4758 						NULL, str);
4759 				if (!ftrace_match(str, &func_g))
4760 					continue;
4761 			}
4762 			count++;
4763 			remove_hash_entry(hash, entry);
4764 			hlist_add_head(&entry->hlist, &hhd);
4765 		}
4766 	}
4767 
4768 	/* Nothing found? */
4769 	if (!count) {
4770 		ret = -EINVAL;
4771 		goto out_unlock;
4772 	}
4773 
4774 	mutex_lock(&ftrace_lock);
4775 
4776 	WARN_ON(probe->ref < count);
4777 
4778 	probe->ref -= count;
4779 
4780 	if (ftrace_hash_empty(hash))
4781 		ftrace_shutdown(&probe->ops, 0);
4782 
4783 	ret = ftrace_hash_move_and_update_ops(&probe->ops, orig_hash,
4784 					      hash, 1);
4785 
4786 	/* still need to update the function call sites */
4787 	if (ftrace_enabled && !ftrace_hash_empty(hash))
4788 		ftrace_run_modify_code(&probe->ops, FTRACE_UPDATE_CALLS,
4789 				       &old_hash_ops);
4790 	synchronize_rcu();
4791 
4792 	hlist_for_each_entry_safe(entry, tmp, &hhd, hlist) {
4793 		hlist_del(&entry->hlist);
4794 		if (probe_ops->free)
4795 			probe_ops->free(probe_ops, tr, entry->ip, probe->data);
4796 		kfree(entry);
4797 	}
4798 	mutex_unlock(&ftrace_lock);
4799 
4800  out_unlock:
4801 	mutex_unlock(&probe->ops.func_hash->regex_lock);
4802 	free_ftrace_hash(hash);
4803 
4804 	release_probe(probe);
4805 
4806 	return ret;
4807 
4808  err_unlock_ftrace:
4809 	mutex_unlock(&ftrace_lock);
4810 	return ret;
4811 }
4812 
4813 void clear_ftrace_function_probes(struct trace_array *tr)
4814 {
4815 	struct ftrace_func_probe *probe, *n;
4816 
4817 	list_for_each_entry_safe(probe, n, &tr->func_probes, list)
4818 		unregister_ftrace_function_probe_func(NULL, tr, probe->probe_ops);
4819 }
4820 
4821 static LIST_HEAD(ftrace_commands);
4822 static DEFINE_MUTEX(ftrace_cmd_mutex);
4823 
4824 /*
4825  * Currently we only register ftrace commands from __init, so mark this
4826  * __init too.
4827  */
4828 __init int register_ftrace_command(struct ftrace_func_command *cmd)
4829 {
4830 	struct ftrace_func_command *p;
4831 	int ret = 0;
4832 
4833 	mutex_lock(&ftrace_cmd_mutex);
4834 	list_for_each_entry(p, &ftrace_commands, list) {
4835 		if (strcmp(cmd->name, p->name) == 0) {
4836 			ret = -EBUSY;
4837 			goto out_unlock;
4838 		}
4839 	}
4840 	list_add(&cmd->list, &ftrace_commands);
4841  out_unlock:
4842 	mutex_unlock(&ftrace_cmd_mutex);
4843 
4844 	return ret;
4845 }
4846 
4847 /*
4848  * Currently we only unregister ftrace commands from __init, so mark
4849  * this __init too.
4850  */
4851 __init int unregister_ftrace_command(struct ftrace_func_command *cmd)
4852 {
4853 	struct ftrace_func_command *p, *n;
4854 	int ret = -ENODEV;
4855 
4856 	mutex_lock(&ftrace_cmd_mutex);
4857 	list_for_each_entry_safe(p, n, &ftrace_commands, list) {
4858 		if (strcmp(cmd->name, p->name) == 0) {
4859 			ret = 0;
4860 			list_del_init(&p->list);
4861 			goto out_unlock;
4862 		}
4863 	}
4864  out_unlock:
4865 	mutex_unlock(&ftrace_cmd_mutex);
4866 
4867 	return ret;
4868 }
4869 
4870 static int ftrace_process_regex(struct ftrace_iterator *iter,
4871 				char *buff, int len, int enable)
4872 {
4873 	struct ftrace_hash *hash = iter->hash;
4874 	struct trace_array *tr = iter->ops->private;
4875 	char *func, *command, *next = buff;
4876 	struct ftrace_func_command *p;
4877 	int ret = -EINVAL;
4878 
4879 	func = strsep(&next, ":");
4880 
4881 	if (!next) {
4882 		ret = ftrace_match_records(hash, func, len);
4883 		if (!ret)
4884 			ret = -EINVAL;
4885 		if (ret < 0)
4886 			return ret;
4887 		return 0;
4888 	}
4889 
4890 	/* command found */
4891 
4892 	command = strsep(&next, ":");
4893 
4894 	mutex_lock(&ftrace_cmd_mutex);
4895 	list_for_each_entry(p, &ftrace_commands, list) {
4896 		if (strcmp(p->name, command) == 0) {
4897 			ret = p->func(tr, hash, func, command, next, enable);
4898 			goto out_unlock;
4899 		}
4900 	}
4901  out_unlock:
4902 	mutex_unlock(&ftrace_cmd_mutex);
4903 
4904 	return ret;
4905 }
4906 
4907 static ssize_t
4908 ftrace_regex_write(struct file *file, const char __user *ubuf,
4909 		   size_t cnt, loff_t *ppos, int enable)
4910 {
4911 	struct ftrace_iterator *iter;
4912 	struct trace_parser *parser;
4913 	ssize_t ret, read;
4914 
4915 	if (!cnt)
4916 		return 0;
4917 
4918 	if (file->f_mode & FMODE_READ) {
4919 		struct seq_file *m = file->private_data;
4920 		iter = m->private;
4921 	} else
4922 		iter = file->private_data;
4923 
4924 	if (unlikely(ftrace_disabled))
4925 		return -ENODEV;
4926 
4927 	/* iter->hash is a local copy, so we don't need regex_lock */
4928 
4929 	parser = &iter->parser;
4930 	read = trace_get_user(parser, ubuf, cnt, ppos);
4931 
4932 	if (read >= 0 && trace_parser_loaded(parser) &&
4933 	    !trace_parser_cont(parser)) {
4934 		ret = ftrace_process_regex(iter, parser->buffer,
4935 					   parser->idx, enable);
4936 		trace_parser_clear(parser);
4937 		if (ret < 0)
4938 			goto out;
4939 	}
4940 
4941 	ret = read;
4942  out:
4943 	return ret;
4944 }
4945 
4946 ssize_t
4947 ftrace_filter_write(struct file *file, const char __user *ubuf,
4948 		    size_t cnt, loff_t *ppos)
4949 {
4950 	return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
4951 }
4952 
4953 ssize_t
4954 ftrace_notrace_write(struct file *file, const char __user *ubuf,
4955 		     size_t cnt, loff_t *ppos)
4956 {
4957 	return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
4958 }
4959 
4960 static int
4961 ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
4962 {
4963 	struct ftrace_func_entry *entry;
4964 
4965 	if (!ftrace_location(ip))
4966 		return -EINVAL;
4967 
4968 	if (remove) {
4969 		entry = ftrace_lookup_ip(hash, ip);
4970 		if (!entry)
4971 			return -ENOENT;
4972 		free_hash_entry(hash, entry);
4973 		return 0;
4974 	}
4975 
4976 	return add_hash_entry(hash, ip);
4977 }
4978 
4979 static int
4980 ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
4981 		unsigned long ip, int remove, int reset, int enable)
4982 {
4983 	struct ftrace_hash **orig_hash;
4984 	struct ftrace_hash *hash;
4985 	int ret;
4986 
4987 	if (unlikely(ftrace_disabled))
4988 		return -ENODEV;
4989 
4990 	mutex_lock(&ops->func_hash->regex_lock);
4991 
4992 	if (enable)
4993 		orig_hash = &ops->func_hash->filter_hash;
4994 	else
4995 		orig_hash = &ops->func_hash->notrace_hash;
4996 
4997 	if (reset)
4998 		hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
4999 	else
5000 		hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
5001 
5002 	if (!hash) {
5003 		ret = -ENOMEM;
5004 		goto out_regex_unlock;
5005 	}
5006 
5007 	if (buf && !ftrace_match_records(hash, buf, len)) {
5008 		ret = -EINVAL;
5009 		goto out_regex_unlock;
5010 	}
5011 	if (ip) {
5012 		ret = ftrace_match_addr(hash, ip, remove);
5013 		if (ret < 0)
5014 			goto out_regex_unlock;
5015 	}
5016 
5017 	mutex_lock(&ftrace_lock);
5018 	ret = ftrace_hash_move_and_update_ops(ops, orig_hash, hash, enable);
5019 	mutex_unlock(&ftrace_lock);
5020 
5021  out_regex_unlock:
5022 	mutex_unlock(&ops->func_hash->regex_lock);
5023 
5024 	free_ftrace_hash(hash);
5025 	return ret;
5026 }
5027 
5028 static int
5029 ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
5030 		int reset, int enable)
5031 {
5032 	return ftrace_set_hash(ops, NULL, 0, ip, remove, reset, enable);
5033 }
5034 
5035 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
5036 
5037 struct ftrace_direct_func {
5038 	struct list_head	next;
5039 	unsigned long		addr;
5040 	int			count;
5041 };
5042 
5043 static LIST_HEAD(ftrace_direct_funcs);
5044 
5045 /**
5046  * ftrace_find_direct_func - test an address if it is a registered direct caller
5047  * @addr: The address of a registered direct caller
5048  *
5049  * This searches to see if a ftrace direct caller has been registered
5050  * at a specific address, and if so, it returns a descriptor for it.
5051  *
5052  * This can be used by architecture code to see if an address is
5053  * a direct caller (trampoline) attached to a fentry/mcount location.
5054  * This is useful for the function_graph tracer, as it may need to
5055  * do adjustments if it traced a location that also has a direct
5056  * trampoline attached to it.
5057  */
5058 struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr)
5059 {
5060 	struct ftrace_direct_func *entry;
5061 	bool found = false;
5062 
5063 	/* May be called by fgraph trampoline (protected by rcu tasks) */
5064 	list_for_each_entry_rcu(entry, &ftrace_direct_funcs, next) {
5065 		if (entry->addr == addr) {
5066 			found = true;
5067 			break;
5068 		}
5069 	}
5070 	if (found)
5071 		return entry;
5072 
5073 	return NULL;
5074 }
5075 
5076 static struct ftrace_direct_func *ftrace_alloc_direct_func(unsigned long addr)
5077 {
5078 	struct ftrace_direct_func *direct;
5079 
5080 	direct = kmalloc(sizeof(*direct), GFP_KERNEL);
5081 	if (!direct)
5082 		return NULL;
5083 	direct->addr = addr;
5084 	direct->count = 0;
5085 	list_add_rcu(&direct->next, &ftrace_direct_funcs);
5086 	ftrace_direct_func_count++;
5087 	return direct;
5088 }
5089 
5090 /**
5091  * register_ftrace_direct - Call a custom trampoline directly
5092  * @ip: The address of the nop at the beginning of a function
5093  * @addr: The address of the trampoline to call at @ip
5094  *
5095  * This is used to connect a direct call from the nop location (@ip)
5096  * at the start of ftrace traced functions. The location that it calls
5097  * (@addr) must be able to handle a direct call, and save the parameters
5098  * of the function being traced, and restore them (or inject new ones
5099  * if needed), before returning.
5100  *
5101  * Returns:
5102  *  0 on success
5103  *  -EBUSY - Another direct function is already attached (there can be only one)
5104  *  -ENODEV - @ip does not point to a ftrace nop location (or not supported)
5105  *  -ENOMEM - There was an allocation failure.
5106  */
5107 int register_ftrace_direct(unsigned long ip, unsigned long addr)
5108 {
5109 	struct ftrace_direct_func *direct;
5110 	struct ftrace_func_entry *entry;
5111 	struct ftrace_hash *free_hash = NULL;
5112 	struct dyn_ftrace *rec;
5113 	int ret = -EBUSY;
5114 
5115 	mutex_lock(&direct_mutex);
5116 
5117 	/* See if there's a direct function at @ip already */
5118 	if (ftrace_find_rec_direct(ip))
5119 		goto out_unlock;
5120 
5121 	ret = -ENODEV;
5122 	rec = lookup_rec(ip, ip);
5123 	if (!rec)
5124 		goto out_unlock;
5125 
5126 	/*
5127 	 * Check if the rec says it has a direct call but we didn't
5128 	 * find one earlier?
5129 	 */
5130 	if (WARN_ON(rec->flags & FTRACE_FL_DIRECT))
5131 		goto out_unlock;
5132 
5133 	/* Make sure the ip points to the exact record */
5134 	if (ip != rec->ip) {
5135 		ip = rec->ip;
5136 		/* Need to check this ip for a direct. */
5137 		if (ftrace_find_rec_direct(ip))
5138 			goto out_unlock;
5139 	}
5140 
5141 	ret = -ENOMEM;
5142 	direct = ftrace_find_direct_func(addr);
5143 	if (!direct) {
5144 		direct = ftrace_alloc_direct_func(addr);
5145 		if (!direct)
5146 			goto out_unlock;
5147 	}
5148 
5149 	entry = ftrace_add_rec_direct(ip, addr, &free_hash);
5150 	if (!entry)
5151 		goto out_unlock;
5152 
5153 	ret = ftrace_set_filter_ip(&direct_ops, ip, 0, 0);
5154 	if (ret)
5155 		remove_hash_entry(direct_functions, entry);
5156 
5157 	if (!ret && !(direct_ops.flags & FTRACE_OPS_FL_ENABLED)) {
5158 		ret = register_ftrace_function(&direct_ops);
5159 		if (ret)
5160 			ftrace_set_filter_ip(&direct_ops, ip, 1, 0);
5161 	}
5162 
5163 	if (ret) {
5164 		kfree(entry);
5165 		if (!direct->count) {
5166 			list_del_rcu(&direct->next);
5167 			synchronize_rcu_tasks();
5168 			kfree(direct);
5169 			if (free_hash)
5170 				free_ftrace_hash(free_hash);
5171 			free_hash = NULL;
5172 			ftrace_direct_func_count--;
5173 		}
5174 	} else {
5175 		direct->count++;
5176 	}
5177  out_unlock:
5178 	mutex_unlock(&direct_mutex);
5179 
5180 	if (free_hash) {
5181 		synchronize_rcu_tasks();
5182 		free_ftrace_hash(free_hash);
5183 	}
5184 
5185 	return ret;
5186 }
5187 EXPORT_SYMBOL_GPL(register_ftrace_direct);
5188 
5189 static struct ftrace_func_entry *find_direct_entry(unsigned long *ip,
5190 						   struct dyn_ftrace **recp)
5191 {
5192 	struct ftrace_func_entry *entry;
5193 	struct dyn_ftrace *rec;
5194 
5195 	rec = lookup_rec(*ip, *ip);
5196 	if (!rec)
5197 		return NULL;
5198 
5199 	entry = __ftrace_lookup_ip(direct_functions, rec->ip);
5200 	if (!entry) {
5201 		WARN_ON(rec->flags & FTRACE_FL_DIRECT);
5202 		return NULL;
5203 	}
5204 
5205 	WARN_ON(!(rec->flags & FTRACE_FL_DIRECT));
5206 
5207 	/* Passed in ip just needs to be on the call site */
5208 	*ip = rec->ip;
5209 
5210 	if (recp)
5211 		*recp = rec;
5212 
5213 	return entry;
5214 }
5215 
5216 int unregister_ftrace_direct(unsigned long ip, unsigned long addr)
5217 {
5218 	struct ftrace_direct_func *direct;
5219 	struct ftrace_func_entry *entry;
5220 	int ret = -ENODEV;
5221 
5222 	mutex_lock(&direct_mutex);
5223 
5224 	entry = find_direct_entry(&ip, NULL);
5225 	if (!entry)
5226 		goto out_unlock;
5227 
5228 	if (direct_functions->count == 1)
5229 		unregister_ftrace_function(&direct_ops);
5230 
5231 	ret = ftrace_set_filter_ip(&direct_ops, ip, 1, 0);
5232 
5233 	WARN_ON(ret);
5234 
5235 	remove_hash_entry(direct_functions, entry);
5236 
5237 	direct = ftrace_find_direct_func(addr);
5238 	if (!WARN_ON(!direct)) {
5239 		/* This is the good path (see the ! before WARN) */
5240 		direct->count--;
5241 		WARN_ON(direct->count < 0);
5242 		if (!direct->count) {
5243 			list_del_rcu(&direct->next);
5244 			synchronize_rcu_tasks();
5245 			kfree(direct);
5246 			kfree(entry);
5247 			ftrace_direct_func_count--;
5248 		}
5249 	}
5250  out_unlock:
5251 	mutex_unlock(&direct_mutex);
5252 
5253 	return ret;
5254 }
5255 EXPORT_SYMBOL_GPL(unregister_ftrace_direct);
5256 
5257 static struct ftrace_ops stub_ops = {
5258 	.func		= ftrace_stub,
5259 };
5260 
5261 /**
5262  * ftrace_modify_direct_caller - modify ftrace nop directly
5263  * @entry: The ftrace hash entry of the direct helper for @rec
5264  * @rec: The record representing the function site to patch
5265  * @old_addr: The location that the site at @rec->ip currently calls
5266  * @new_addr: The location that the site at @rec->ip should call
5267  *
5268  * An architecture may overwrite this function to optimize the
5269  * changing of the direct callback on an ftrace nop location.
5270  * This is called with the ftrace_lock mutex held, and no other
5271  * ftrace callbacks are on the associated record (@rec). Thus,
5272  * it is safe to modify the ftrace record, where it should be
5273  * currently calling @old_addr directly, to call @new_addr.
5274  *
5275  * Safety checks should be made to make sure that the code at
5276  * @rec->ip is currently calling @old_addr. And this must
5277  * also update entry->direct to @new_addr.
5278  */
5279 int __weak ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
5280 				       struct dyn_ftrace *rec,
5281 				       unsigned long old_addr,
5282 				       unsigned long new_addr)
5283 {
5284 	unsigned long ip = rec->ip;
5285 	int ret;
5286 
5287 	/*
5288 	 * The ftrace_lock was used to determine if the record
5289 	 * had more than one registered user to it. If it did,
5290 	 * we needed to prevent that from changing to do the quick
5291 	 * switch. But if it did not (only a direct caller was attached)
5292 	 * then this function is called. But this function can deal
5293 	 * with attached callers to the rec that we care about, and
5294 	 * since this function uses standard ftrace calls that take
5295 	 * the ftrace_lock mutex, we need to release it.
5296 	 */
5297 	mutex_unlock(&ftrace_lock);
5298 
5299 	/*
5300 	 * By setting a stub function at the same address, we force
5301 	 * the code to call the iterator and the direct_ops helper.
5302 	 * This means that @ip does not call the direct call, and
5303 	 * we can simply modify it.
5304 	 */
5305 	ret = ftrace_set_filter_ip(&stub_ops, ip, 0, 0);
5306 	if (ret)
5307 		goto out_lock;
5308 
5309 	ret = register_ftrace_function(&stub_ops);
5310 	if (ret) {
5311 		ftrace_set_filter_ip(&stub_ops, ip, 1, 0);
5312 		goto out_lock;
5313 	}
5314 
5315 	entry->direct = new_addr;
5316 
5317 	/*
5318 	 * By removing the stub, we put back the direct call, calling
5319 	 * the @new_addr.
5320 	 */
5321 	unregister_ftrace_function(&stub_ops);
5322 	ftrace_set_filter_ip(&stub_ops, ip, 1, 0);
5323 
5324  out_lock:
5325 	mutex_lock(&ftrace_lock);
5326 
5327 	return ret;
5328 }
5329 
5330 /**
5331  * modify_ftrace_direct - Modify an existing direct call to call something else
5332  * @ip: The instruction pointer to modify
5333  * @old_addr: The address that the current @ip calls directly
5334  * @new_addr: The address that the @ip should call
5335  *
5336  * This modifies a ftrace direct caller at an instruction pointer without
5337  * having to disable it first. The direct call will switch over to the
5338  * @new_addr without missing anything.
5339  *
5340  * Returns: zero on success. Non zero on error, which includes:
5341  *  -ENODEV : the @ip given has no direct caller attached
5342  *  -EINVAL : the @old_addr does not match the current direct caller
5343  */
5344 int modify_ftrace_direct(unsigned long ip,
5345 			 unsigned long old_addr, unsigned long new_addr)
5346 {
5347 	struct ftrace_direct_func *direct, *new_direct = NULL;
5348 	struct ftrace_func_entry *entry;
5349 	struct dyn_ftrace *rec;
5350 	int ret = -ENODEV;
5351 
5352 	mutex_lock(&direct_mutex);
5353 
5354 	mutex_lock(&ftrace_lock);
5355 	entry = find_direct_entry(&ip, &rec);
5356 	if (!entry)
5357 		goto out_unlock;
5358 
5359 	ret = -EINVAL;
5360 	if (entry->direct != old_addr)
5361 		goto out_unlock;
5362 
5363 	direct = ftrace_find_direct_func(old_addr);
5364 	if (WARN_ON(!direct))
5365 		goto out_unlock;
5366 	if (direct->count > 1) {
5367 		ret = -ENOMEM;
5368 		new_direct = ftrace_alloc_direct_func(new_addr);
5369 		if (!new_direct)
5370 			goto out_unlock;
5371 		direct->count--;
5372 		new_direct->count++;
5373 	} else {
5374 		direct->addr = new_addr;
5375 	}
5376 
5377 	/*
5378 	 * If there's no other ftrace callback on the rec->ip location,
5379 	 * then it can be changed directly by the architecture.
5380 	 * If there is another caller, then we just need to change the
5381 	 * direct caller helper to point to @new_addr.
5382 	 */
5383 	if (ftrace_rec_count(rec) == 1) {
5384 		ret = ftrace_modify_direct_caller(entry, rec, old_addr, new_addr);
5385 	} else {
5386 		entry->direct = new_addr;
5387 		ret = 0;
5388 	}
5389 
5390 	if (unlikely(ret && new_direct)) {
5391 		direct->count++;
5392 		list_del_rcu(&new_direct->next);
5393 		synchronize_rcu_tasks();
5394 		kfree(new_direct);
5395 		ftrace_direct_func_count--;
5396 	}
5397 
5398  out_unlock:
5399 	mutex_unlock(&ftrace_lock);
5400 	mutex_unlock(&direct_mutex);
5401 	return ret;
5402 }
5403 EXPORT_SYMBOL_GPL(modify_ftrace_direct);
5404 
5405 #define MULTI_FLAGS (FTRACE_OPS_FL_IPMODIFY | FTRACE_OPS_FL_DIRECT | \
5406 		     FTRACE_OPS_FL_SAVE_REGS)
5407 
5408 static int check_direct_multi(struct ftrace_ops *ops)
5409 {
5410 	if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED))
5411 		return -EINVAL;
5412 	if ((ops->flags & MULTI_FLAGS) != MULTI_FLAGS)
5413 		return -EINVAL;
5414 	return 0;
5415 }
5416 
5417 static void remove_direct_functions_hash(struct ftrace_hash *hash, unsigned long addr)
5418 {
5419 	struct ftrace_func_entry *entry, *del;
5420 	int size, i;
5421 
5422 	size = 1 << hash->size_bits;
5423 	for (i = 0; i < size; i++) {
5424 		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
5425 			del = __ftrace_lookup_ip(direct_functions, entry->ip);
5426 			if (del && del->direct == addr) {
5427 				remove_hash_entry(direct_functions, del);
5428 				kfree(del);
5429 			}
5430 		}
5431 	}
5432 }
5433 
5434 /**
5435  * register_ftrace_direct_multi - Call a custom trampoline directly
5436  * for multiple functions registered in @ops
5437  * @ops: The address of the struct ftrace_ops object
5438  * @addr: The address of the trampoline to call at @ops functions
5439  *
5440  * This is used to connect a direct calls to @addr from the nop locations
5441  * of the functions registered in @ops (with by ftrace_set_filter_ip
5442  * function).
5443  *
5444  * The location that it calls (@addr) must be able to handle a direct call,
5445  * and save the parameters of the function being traced, and restore them
5446  * (or inject new ones if needed), before returning.
5447  *
5448  * Returns:
5449  *  0 on success
5450  *  -EINVAL  - The @ops object was already registered with this call or
5451  *             when there are no functions in @ops object.
5452  *  -EBUSY   - Another direct function is already attached (there can be only one)
5453  *  -ENODEV  - @ip does not point to a ftrace nop location (or not supported)
5454  *  -ENOMEM  - There was an allocation failure.
5455  */
5456 int register_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr)
5457 {
5458 	struct ftrace_hash *hash, *free_hash = NULL;
5459 	struct ftrace_func_entry *entry, *new;
5460 	int err = -EBUSY, size, i;
5461 
5462 	if (ops->func || ops->trampoline)
5463 		return -EINVAL;
5464 	if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED))
5465 		return -EINVAL;
5466 	if (ops->flags & FTRACE_OPS_FL_ENABLED)
5467 		return -EINVAL;
5468 
5469 	hash = ops->func_hash->filter_hash;
5470 	if (ftrace_hash_empty(hash))
5471 		return -EINVAL;
5472 
5473 	mutex_lock(&direct_mutex);
5474 
5475 	/* Make sure requested entries are not already registered.. */
5476 	size = 1 << hash->size_bits;
5477 	for (i = 0; i < size; i++) {
5478 		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
5479 			if (ftrace_find_rec_direct(entry->ip))
5480 				goto out_unlock;
5481 		}
5482 	}
5483 
5484 	/* ... and insert them to direct_functions hash. */
5485 	err = -ENOMEM;
5486 	for (i = 0; i < size; i++) {
5487 		hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
5488 			new = ftrace_add_rec_direct(entry->ip, addr, &free_hash);
5489 			if (!new)
5490 				goto out_remove;
5491 			entry->direct = addr;
5492 		}
5493 	}
5494 
5495 	ops->func = call_direct_funcs;
5496 	ops->flags = MULTI_FLAGS;
5497 	ops->trampoline = FTRACE_REGS_ADDR;
5498 
5499 	err = register_ftrace_function(ops);
5500 
5501  out_remove:
5502 	if (err)
5503 		remove_direct_functions_hash(hash, addr);
5504 
5505  out_unlock:
5506 	mutex_unlock(&direct_mutex);
5507 
5508 	if (free_hash) {
5509 		synchronize_rcu_tasks();
5510 		free_ftrace_hash(free_hash);
5511 	}
5512 	return err;
5513 }
5514 EXPORT_SYMBOL_GPL(register_ftrace_direct_multi);
5515 
5516 /**
5517  * unregister_ftrace_direct_multi - Remove calls to custom trampoline
5518  * previously registered by register_ftrace_direct_multi for @ops object.
5519  * @ops: The address of the struct ftrace_ops object
5520  *
5521  * This is used to remove a direct calls to @addr from the nop locations
5522  * of the functions registered in @ops (with by ftrace_set_filter_ip
5523  * function).
5524  *
5525  * Returns:
5526  *  0 on success
5527  *  -EINVAL - The @ops object was not properly registered.
5528  */
5529 int unregister_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr)
5530 {
5531 	struct ftrace_hash *hash = ops->func_hash->filter_hash;
5532 	int err;
5533 
5534 	if (check_direct_multi(ops))
5535 		return -EINVAL;
5536 	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
5537 		return -EINVAL;
5538 
5539 	mutex_lock(&direct_mutex);
5540 	err = unregister_ftrace_function(ops);
5541 	remove_direct_functions_hash(hash, addr);
5542 	mutex_unlock(&direct_mutex);
5543 	return err;
5544 }
5545 EXPORT_SYMBOL_GPL(unregister_ftrace_direct_multi);
5546 
5547 /**
5548  * modify_ftrace_direct_multi - Modify an existing direct 'multi' call
5549  * to call something else
5550  * @ops: The address of the struct ftrace_ops object
5551  * @addr: The address of the new trampoline to call at @ops functions
5552  *
5553  * This is used to unregister currently registered direct caller and
5554  * register new one @addr on functions registered in @ops object.
5555  *
5556  * Note there's window between ftrace_shutdown and ftrace_startup calls
5557  * where there will be no callbacks called.
5558  *
5559  * Returns: zero on success. Non zero on error, which includes:
5560  *  -EINVAL - The @ops object was not properly registered.
5561  */
5562 int modify_ftrace_direct_multi(struct ftrace_ops *ops, unsigned long addr)
5563 {
5564 	struct ftrace_hash *hash;
5565 	struct ftrace_func_entry *entry, *iter;
5566 	static struct ftrace_ops tmp_ops = {
5567 		.func		= ftrace_stub,
5568 		.flags		= FTRACE_OPS_FL_STUB,
5569 	};
5570 	int i, size;
5571 	int err;
5572 
5573 	if (check_direct_multi(ops))
5574 		return -EINVAL;
5575 	if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
5576 		return -EINVAL;
5577 
5578 	mutex_lock(&direct_mutex);
5579 
5580 	/* Enable the tmp_ops to have the same functions as the direct ops */
5581 	ftrace_ops_init(&tmp_ops);
5582 	tmp_ops.func_hash = ops->func_hash;
5583 
5584 	err = register_ftrace_function(&tmp_ops);
5585 	if (err)
5586 		goto out_direct;
5587 
5588 	/*
5589 	 * Now the ftrace_ops_list_func() is called to do the direct callers.
5590 	 * We can safely change the direct functions attached to each entry.
5591 	 */
5592 	mutex_lock(&ftrace_lock);
5593 
5594 	hash = ops->func_hash->filter_hash;
5595 	size = 1 << hash->size_bits;
5596 	for (i = 0; i < size; i++) {
5597 		hlist_for_each_entry(iter, &hash->buckets[i], hlist) {
5598 			entry = __ftrace_lookup_ip(direct_functions, iter->ip);
5599 			if (!entry)
5600 				continue;
5601 			entry->direct = addr;
5602 		}
5603 	}
5604 
5605 	mutex_unlock(&ftrace_lock);
5606 
5607 	/* Removing the tmp_ops will add the updated direct callers to the functions */
5608 	unregister_ftrace_function(&tmp_ops);
5609 
5610  out_direct:
5611 	mutex_unlock(&direct_mutex);
5612 	return err;
5613 }
5614 EXPORT_SYMBOL_GPL(modify_ftrace_direct_multi);
5615 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
5616 
5617 /**
5618  * ftrace_set_filter_ip - set a function to filter on in ftrace by address
5619  * @ops - the ops to set the filter with
5620  * @ip - the address to add to or remove from the filter.
5621  * @remove - non zero to remove the ip from the filter
5622  * @reset - non zero to reset all filters before applying this filter.
5623  *
5624  * Filters denote which functions should be enabled when tracing is enabled
5625  * If @ip is NULL, it fails to update filter.
5626  */
5627 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
5628 			 int remove, int reset)
5629 {
5630 	ftrace_ops_init(ops);
5631 	return ftrace_set_addr(ops, ip, remove, reset, 1);
5632 }
5633 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
5634 
5635 /**
5636  * ftrace_ops_set_global_filter - setup ops to use global filters
5637  * @ops - the ops which will use the global filters
5638  *
5639  * ftrace users who need global function trace filtering should call this.
5640  * It can set the global filter only if ops were not initialized before.
5641  */
5642 void ftrace_ops_set_global_filter(struct ftrace_ops *ops)
5643 {
5644 	if (ops->flags & FTRACE_OPS_FL_INITIALIZED)
5645 		return;
5646 
5647 	ftrace_ops_init(ops);
5648 	ops->func_hash = &global_ops.local_hash;
5649 }
5650 EXPORT_SYMBOL_GPL(ftrace_ops_set_global_filter);
5651 
5652 static int
5653 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
5654 		 int reset, int enable)
5655 {
5656 	return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable);
5657 }
5658 
5659 /**
5660  * ftrace_set_filter - set a function to filter on in ftrace
5661  * @ops - the ops to set the filter with
5662  * @buf - the string that holds the function filter text.
5663  * @len - the length of the string.
5664  * @reset - non zero to reset all filters before applying this filter.
5665  *
5666  * Filters denote which functions should be enabled when tracing is enabled.
5667  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
5668  */
5669 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
5670 		       int len, int reset)
5671 {
5672 	ftrace_ops_init(ops);
5673 	return ftrace_set_regex(ops, buf, len, reset, 1);
5674 }
5675 EXPORT_SYMBOL_GPL(ftrace_set_filter);
5676 
5677 /**
5678  * ftrace_set_notrace - set a function to not trace in ftrace
5679  * @ops - the ops to set the notrace filter with
5680  * @buf - the string that holds the function notrace text.
5681  * @len - the length of the string.
5682  * @reset - non zero to reset all filters before applying this filter.
5683  *
5684  * Notrace Filters denote which functions should not be enabled when tracing
5685  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
5686  * for tracing.
5687  */
5688 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
5689 			int len, int reset)
5690 {
5691 	ftrace_ops_init(ops);
5692 	return ftrace_set_regex(ops, buf, len, reset, 0);
5693 }
5694 EXPORT_SYMBOL_GPL(ftrace_set_notrace);
5695 /**
5696  * ftrace_set_global_filter - set a function to filter on with global tracers
5697  * @buf - the string that holds the function filter text.
5698  * @len - the length of the string.
5699  * @reset - non zero to reset all filters before applying this filter.
5700  *
5701  * Filters denote which functions should be enabled when tracing is enabled.
5702  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
5703  */
5704 void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
5705 {
5706 	ftrace_set_regex(&global_ops, buf, len, reset, 1);
5707 }
5708 EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
5709 
5710 /**
5711  * ftrace_set_global_notrace - set a function to not trace with global tracers
5712  * @buf - the string that holds the function notrace text.
5713  * @len - the length of the string.
5714  * @reset - non zero to reset all filters before applying this filter.
5715  *
5716  * Notrace Filters denote which functions should not be enabled when tracing
5717  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
5718  * for tracing.
5719  */
5720 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
5721 {
5722 	ftrace_set_regex(&global_ops, buf, len, reset, 0);
5723 }
5724 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
5725 
5726 /*
5727  * command line interface to allow users to set filters on boot up.
5728  */
5729 #define FTRACE_FILTER_SIZE		COMMAND_LINE_SIZE
5730 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
5731 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
5732 
5733 /* Used by function selftest to not test if filter is set */
5734 bool ftrace_filter_param __initdata;
5735 
5736 static int __init set_ftrace_notrace(char *str)
5737 {
5738 	ftrace_filter_param = true;
5739 	strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
5740 	return 1;
5741 }
5742 __setup("ftrace_notrace=", set_ftrace_notrace);
5743 
5744 static int __init set_ftrace_filter(char *str)
5745 {
5746 	ftrace_filter_param = true;
5747 	strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
5748 	return 1;
5749 }
5750 __setup("ftrace_filter=", set_ftrace_filter);
5751 
5752 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5753 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
5754 static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
5755 static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer);
5756 
5757 static int __init set_graph_function(char *str)
5758 {
5759 	strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
5760 	return 1;
5761 }
5762 __setup("ftrace_graph_filter=", set_graph_function);
5763 
5764 static int __init set_graph_notrace_function(char *str)
5765 {
5766 	strlcpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE);
5767 	return 1;
5768 }
5769 __setup("ftrace_graph_notrace=", set_graph_notrace_function);
5770 
5771 static int __init set_graph_max_depth_function(char *str)
5772 {
5773 	if (!str)
5774 		return 0;
5775 	fgraph_max_depth = simple_strtoul(str, NULL, 0);
5776 	return 1;
5777 }
5778 __setup("ftrace_graph_max_depth=", set_graph_max_depth_function);
5779 
5780 static void __init set_ftrace_early_graph(char *buf, int enable)
5781 {
5782 	int ret;
5783 	char *func;
5784 	struct ftrace_hash *hash;
5785 
5786 	hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
5787 	if (MEM_FAIL(!hash, "Failed to allocate hash\n"))
5788 		return;
5789 
5790 	while (buf) {
5791 		func = strsep(&buf, ",");
5792 		/* we allow only one expression at a time */
5793 		ret = ftrace_graph_set_hash(hash, func);
5794 		if (ret)
5795 			printk(KERN_DEBUG "ftrace: function %s not "
5796 					  "traceable\n", func);
5797 	}
5798 
5799 	if (enable)
5800 		ftrace_graph_hash = hash;
5801 	else
5802 		ftrace_graph_notrace_hash = hash;
5803 }
5804 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
5805 
5806 void __init
5807 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
5808 {
5809 	char *func;
5810 
5811 	ftrace_ops_init(ops);
5812 
5813 	while (buf) {
5814 		func = strsep(&buf, ",");
5815 		ftrace_set_regex(ops, func, strlen(func), 0, enable);
5816 	}
5817 }
5818 
5819 static void __init set_ftrace_early_filters(void)
5820 {
5821 	if (ftrace_filter_buf[0])
5822 		ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
5823 	if (ftrace_notrace_buf[0])
5824 		ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
5825 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5826 	if (ftrace_graph_buf[0])
5827 		set_ftrace_early_graph(ftrace_graph_buf, 1);
5828 	if (ftrace_graph_notrace_buf[0])
5829 		set_ftrace_early_graph(ftrace_graph_notrace_buf, 0);
5830 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
5831 }
5832 
5833 int ftrace_regex_release(struct inode *inode, struct file *file)
5834 {
5835 	struct seq_file *m = (struct seq_file *)file->private_data;
5836 	struct ftrace_iterator *iter;
5837 	struct ftrace_hash **orig_hash;
5838 	struct trace_parser *parser;
5839 	int filter_hash;
5840 
5841 	if (file->f_mode & FMODE_READ) {
5842 		iter = m->private;
5843 		seq_release(inode, file);
5844 	} else
5845 		iter = file->private_data;
5846 
5847 	parser = &iter->parser;
5848 	if (trace_parser_loaded(parser)) {
5849 		int enable = !(iter->flags & FTRACE_ITER_NOTRACE);
5850 
5851 		ftrace_process_regex(iter, parser->buffer,
5852 				     parser->idx, enable);
5853 	}
5854 
5855 	trace_parser_put(parser);
5856 
5857 	mutex_lock(&iter->ops->func_hash->regex_lock);
5858 
5859 	if (file->f_mode & FMODE_WRITE) {
5860 		filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
5861 
5862 		if (filter_hash) {
5863 			orig_hash = &iter->ops->func_hash->filter_hash;
5864 			if (iter->tr && !list_empty(&iter->tr->mod_trace))
5865 				iter->hash->flags |= FTRACE_HASH_FL_MOD;
5866 		} else
5867 			orig_hash = &iter->ops->func_hash->notrace_hash;
5868 
5869 		mutex_lock(&ftrace_lock);
5870 		ftrace_hash_move_and_update_ops(iter->ops, orig_hash,
5871 						      iter->hash, filter_hash);
5872 		mutex_unlock(&ftrace_lock);
5873 	} else {
5874 		/* For read only, the hash is the ops hash */
5875 		iter->hash = NULL;
5876 	}
5877 
5878 	mutex_unlock(&iter->ops->func_hash->regex_lock);
5879 	free_ftrace_hash(iter->hash);
5880 	if (iter->tr)
5881 		trace_array_put(iter->tr);
5882 	kfree(iter);
5883 
5884 	return 0;
5885 }
5886 
5887 static const struct file_operations ftrace_avail_fops = {
5888 	.open = ftrace_avail_open,
5889 	.read = seq_read,
5890 	.llseek = seq_lseek,
5891 	.release = seq_release_private,
5892 };
5893 
5894 static const struct file_operations ftrace_enabled_fops = {
5895 	.open = ftrace_enabled_open,
5896 	.read = seq_read,
5897 	.llseek = seq_lseek,
5898 	.release = seq_release_private,
5899 };
5900 
5901 static const struct file_operations ftrace_filter_fops = {
5902 	.open = ftrace_filter_open,
5903 	.read = seq_read,
5904 	.write = ftrace_filter_write,
5905 	.llseek = tracing_lseek,
5906 	.release = ftrace_regex_release,
5907 };
5908 
5909 static const struct file_operations ftrace_notrace_fops = {
5910 	.open = ftrace_notrace_open,
5911 	.read = seq_read,
5912 	.write = ftrace_notrace_write,
5913 	.llseek = tracing_lseek,
5914 	.release = ftrace_regex_release,
5915 };
5916 
5917 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5918 
5919 static DEFINE_MUTEX(graph_lock);
5920 
5921 struct ftrace_hash __rcu *ftrace_graph_hash = EMPTY_HASH;
5922 struct ftrace_hash __rcu *ftrace_graph_notrace_hash = EMPTY_HASH;
5923 
5924 enum graph_filter_type {
5925 	GRAPH_FILTER_NOTRACE	= 0,
5926 	GRAPH_FILTER_FUNCTION,
5927 };
5928 
5929 #define FTRACE_GRAPH_EMPTY	((void *)1)
5930 
5931 struct ftrace_graph_data {
5932 	struct ftrace_hash		*hash;
5933 	struct ftrace_func_entry	*entry;
5934 	int				idx;   /* for hash table iteration */
5935 	enum graph_filter_type		type;
5936 	struct ftrace_hash		*new_hash;
5937 	const struct seq_operations	*seq_ops;
5938 	struct trace_parser		parser;
5939 };
5940 
5941 static void *
5942 __g_next(struct seq_file *m, loff_t *pos)
5943 {
5944 	struct ftrace_graph_data *fgd = m->private;
5945 	struct ftrace_func_entry *entry = fgd->entry;
5946 	struct hlist_head *head;
5947 	int i, idx = fgd->idx;
5948 
5949 	if (*pos >= fgd->hash->count)
5950 		return NULL;
5951 
5952 	if (entry) {
5953 		hlist_for_each_entry_continue(entry, hlist) {
5954 			fgd->entry = entry;
5955 			return entry;
5956 		}
5957 
5958 		idx++;
5959 	}
5960 
5961 	for (i = idx; i < 1 << fgd->hash->size_bits; i++) {
5962 		head = &fgd->hash->buckets[i];
5963 		hlist_for_each_entry(entry, head, hlist) {
5964 			fgd->entry = entry;
5965 			fgd->idx = i;
5966 			return entry;
5967 		}
5968 	}
5969 	return NULL;
5970 }
5971 
5972 static void *
5973 g_next(struct seq_file *m, void *v, loff_t *pos)
5974 {
5975 	(*pos)++;
5976 	return __g_next(m, pos);
5977 }
5978 
5979 static void *g_start(struct seq_file *m, loff_t *pos)
5980 {
5981 	struct ftrace_graph_data *fgd = m->private;
5982 
5983 	mutex_lock(&graph_lock);
5984 
5985 	if (fgd->type == GRAPH_FILTER_FUNCTION)
5986 		fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
5987 					lockdep_is_held(&graph_lock));
5988 	else
5989 		fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
5990 					lockdep_is_held(&graph_lock));
5991 
5992 	/* Nothing, tell g_show to print all functions are enabled */
5993 	if (ftrace_hash_empty(fgd->hash) && !*pos)
5994 		return FTRACE_GRAPH_EMPTY;
5995 
5996 	fgd->idx = 0;
5997 	fgd->entry = NULL;
5998 	return __g_next(m, pos);
5999 }
6000 
6001 static void g_stop(struct seq_file *m, void *p)
6002 {
6003 	mutex_unlock(&graph_lock);
6004 }
6005 
6006 static int g_show(struct seq_file *m, void *v)
6007 {
6008 	struct ftrace_func_entry *entry = v;
6009 
6010 	if (!entry)
6011 		return 0;
6012 
6013 	if (entry == FTRACE_GRAPH_EMPTY) {
6014 		struct ftrace_graph_data *fgd = m->private;
6015 
6016 		if (fgd->type == GRAPH_FILTER_FUNCTION)
6017 			seq_puts(m, "#### all functions enabled ####\n");
6018 		else
6019 			seq_puts(m, "#### no functions disabled ####\n");
6020 		return 0;
6021 	}
6022 
6023 	seq_printf(m, "%ps\n", (void *)entry->ip);
6024 
6025 	return 0;
6026 }
6027 
6028 static const struct seq_operations ftrace_graph_seq_ops = {
6029 	.start = g_start,
6030 	.next = g_next,
6031 	.stop = g_stop,
6032 	.show = g_show,
6033 };
6034 
6035 static int
6036 __ftrace_graph_open(struct inode *inode, struct file *file,
6037 		    struct ftrace_graph_data *fgd)
6038 {
6039 	int ret;
6040 	struct ftrace_hash *new_hash = NULL;
6041 
6042 	ret = security_locked_down(LOCKDOWN_TRACEFS);
6043 	if (ret)
6044 		return ret;
6045 
6046 	if (file->f_mode & FMODE_WRITE) {
6047 		const int size_bits = FTRACE_HASH_DEFAULT_BITS;
6048 
6049 		if (trace_parser_get_init(&fgd->parser, FTRACE_BUFF_MAX))
6050 			return -ENOMEM;
6051 
6052 		if (file->f_flags & O_TRUNC)
6053 			new_hash = alloc_ftrace_hash(size_bits);
6054 		else
6055 			new_hash = alloc_and_copy_ftrace_hash(size_bits,
6056 							      fgd->hash);
6057 		if (!new_hash) {
6058 			ret = -ENOMEM;
6059 			goto out;
6060 		}
6061 	}
6062 
6063 	if (file->f_mode & FMODE_READ) {
6064 		ret = seq_open(file, &ftrace_graph_seq_ops);
6065 		if (!ret) {
6066 			struct seq_file *m = file->private_data;
6067 			m->private = fgd;
6068 		} else {
6069 			/* Failed */
6070 			free_ftrace_hash(new_hash);
6071 			new_hash = NULL;
6072 		}
6073 	} else
6074 		file->private_data = fgd;
6075 
6076 out:
6077 	if (ret < 0 && file->f_mode & FMODE_WRITE)
6078 		trace_parser_put(&fgd->parser);
6079 
6080 	fgd->new_hash = new_hash;
6081 
6082 	/*
6083 	 * All uses of fgd->hash must be taken with the graph_lock
6084 	 * held. The graph_lock is going to be released, so force
6085 	 * fgd->hash to be reinitialized when it is taken again.
6086 	 */
6087 	fgd->hash = NULL;
6088 
6089 	return ret;
6090 }
6091 
6092 static int
6093 ftrace_graph_open(struct inode *inode, struct file *file)
6094 {
6095 	struct ftrace_graph_data *fgd;
6096 	int ret;
6097 
6098 	if (unlikely(ftrace_disabled))
6099 		return -ENODEV;
6100 
6101 	fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
6102 	if (fgd == NULL)
6103 		return -ENOMEM;
6104 
6105 	mutex_lock(&graph_lock);
6106 
6107 	fgd->hash = rcu_dereference_protected(ftrace_graph_hash,
6108 					lockdep_is_held(&graph_lock));
6109 	fgd->type = GRAPH_FILTER_FUNCTION;
6110 	fgd->seq_ops = &ftrace_graph_seq_ops;
6111 
6112 	ret = __ftrace_graph_open(inode, file, fgd);
6113 	if (ret < 0)
6114 		kfree(fgd);
6115 
6116 	mutex_unlock(&graph_lock);
6117 	return ret;
6118 }
6119 
6120 static int
6121 ftrace_graph_notrace_open(struct inode *inode, struct file *file)
6122 {
6123 	struct ftrace_graph_data *fgd;
6124 	int ret;
6125 
6126 	if (unlikely(ftrace_disabled))
6127 		return -ENODEV;
6128 
6129 	fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
6130 	if (fgd == NULL)
6131 		return -ENOMEM;
6132 
6133 	mutex_lock(&graph_lock);
6134 
6135 	fgd->hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
6136 					lockdep_is_held(&graph_lock));
6137 	fgd->type = GRAPH_FILTER_NOTRACE;
6138 	fgd->seq_ops = &ftrace_graph_seq_ops;
6139 
6140 	ret = __ftrace_graph_open(inode, file, fgd);
6141 	if (ret < 0)
6142 		kfree(fgd);
6143 
6144 	mutex_unlock(&graph_lock);
6145 	return ret;
6146 }
6147 
6148 static int
6149 ftrace_graph_release(struct inode *inode, struct file *file)
6150 {
6151 	struct ftrace_graph_data *fgd;
6152 	struct ftrace_hash *old_hash, *new_hash;
6153 	struct trace_parser *parser;
6154 	int ret = 0;
6155 
6156 	if (file->f_mode & FMODE_READ) {
6157 		struct seq_file *m = file->private_data;
6158 
6159 		fgd = m->private;
6160 		seq_release(inode, file);
6161 	} else {
6162 		fgd = file->private_data;
6163 	}
6164 
6165 
6166 	if (file->f_mode & FMODE_WRITE) {
6167 
6168 		parser = &fgd->parser;
6169 
6170 		if (trace_parser_loaded((parser))) {
6171 			ret = ftrace_graph_set_hash(fgd->new_hash,
6172 						    parser->buffer);
6173 		}
6174 
6175 		trace_parser_put(parser);
6176 
6177 		new_hash = __ftrace_hash_move(fgd->new_hash);
6178 		if (!new_hash) {
6179 			ret = -ENOMEM;
6180 			goto out;
6181 		}
6182 
6183 		mutex_lock(&graph_lock);
6184 
6185 		if (fgd->type == GRAPH_FILTER_FUNCTION) {
6186 			old_hash = rcu_dereference_protected(ftrace_graph_hash,
6187 					lockdep_is_held(&graph_lock));
6188 			rcu_assign_pointer(ftrace_graph_hash, new_hash);
6189 		} else {
6190 			old_hash = rcu_dereference_protected(ftrace_graph_notrace_hash,
6191 					lockdep_is_held(&graph_lock));
6192 			rcu_assign_pointer(ftrace_graph_notrace_hash, new_hash);
6193 		}
6194 
6195 		mutex_unlock(&graph_lock);
6196 
6197 		/*
6198 		 * We need to do a hard force of sched synchronization.
6199 		 * This is because we use preempt_disable() to do RCU, but
6200 		 * the function tracers can be called where RCU is not watching
6201 		 * (like before user_exit()). We can not rely on the RCU
6202 		 * infrastructure to do the synchronization, thus we must do it
6203 		 * ourselves.
6204 		 */
6205 		if (old_hash != EMPTY_HASH)
6206 			synchronize_rcu_tasks_rude();
6207 
6208 		free_ftrace_hash(old_hash);
6209 	}
6210 
6211  out:
6212 	free_ftrace_hash(fgd->new_hash);
6213 	kfree(fgd);
6214 
6215 	return ret;
6216 }
6217 
6218 static int
6219 ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer)
6220 {
6221 	struct ftrace_glob func_g;
6222 	struct dyn_ftrace *rec;
6223 	struct ftrace_page *pg;
6224 	struct ftrace_func_entry *entry;
6225 	int fail = 1;
6226 	int not;
6227 
6228 	/* decode regex */
6229 	func_g.type = filter_parse_regex(buffer, strlen(buffer),
6230 					 &func_g.search, &not);
6231 
6232 	func_g.len = strlen(func_g.search);
6233 
6234 	mutex_lock(&ftrace_lock);
6235 
6236 	if (unlikely(ftrace_disabled)) {
6237 		mutex_unlock(&ftrace_lock);
6238 		return -ENODEV;
6239 	}
6240 
6241 	do_for_each_ftrace_rec(pg, rec) {
6242 
6243 		if (rec->flags & FTRACE_FL_DISABLED)
6244 			continue;
6245 
6246 		if (ftrace_match_record(rec, &func_g, NULL, 0)) {
6247 			entry = ftrace_lookup_ip(hash, rec->ip);
6248 
6249 			if (!not) {
6250 				fail = 0;
6251 
6252 				if (entry)
6253 					continue;
6254 				if (add_hash_entry(hash, rec->ip) < 0)
6255 					goto out;
6256 			} else {
6257 				if (entry) {
6258 					free_hash_entry(hash, entry);
6259 					fail = 0;
6260 				}
6261 			}
6262 		}
6263 	} while_for_each_ftrace_rec();
6264 out:
6265 	mutex_unlock(&ftrace_lock);
6266 
6267 	if (fail)
6268 		return -EINVAL;
6269 
6270 	return 0;
6271 }
6272 
6273 static ssize_t
6274 ftrace_graph_write(struct file *file, const char __user *ubuf,
6275 		   size_t cnt, loff_t *ppos)
6276 {
6277 	ssize_t read, ret = 0;
6278 	struct ftrace_graph_data *fgd = file->private_data;
6279 	struct trace_parser *parser;
6280 
6281 	if (!cnt)
6282 		return 0;
6283 
6284 	/* Read mode uses seq functions */
6285 	if (file->f_mode & FMODE_READ) {
6286 		struct seq_file *m = file->private_data;
6287 		fgd = m->private;
6288 	}
6289 
6290 	parser = &fgd->parser;
6291 
6292 	read = trace_get_user(parser, ubuf, cnt, ppos);
6293 
6294 	if (read >= 0 && trace_parser_loaded(parser) &&
6295 	    !trace_parser_cont(parser)) {
6296 
6297 		ret = ftrace_graph_set_hash(fgd->new_hash,
6298 					    parser->buffer);
6299 		trace_parser_clear(parser);
6300 	}
6301 
6302 	if (!ret)
6303 		ret = read;
6304 
6305 	return ret;
6306 }
6307 
6308 static const struct file_operations ftrace_graph_fops = {
6309 	.open		= ftrace_graph_open,
6310 	.read		= seq_read,
6311 	.write		= ftrace_graph_write,
6312 	.llseek		= tracing_lseek,
6313 	.release	= ftrace_graph_release,
6314 };
6315 
6316 static const struct file_operations ftrace_graph_notrace_fops = {
6317 	.open		= ftrace_graph_notrace_open,
6318 	.read		= seq_read,
6319 	.write		= ftrace_graph_write,
6320 	.llseek		= tracing_lseek,
6321 	.release	= ftrace_graph_release,
6322 };
6323 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
6324 
6325 void ftrace_create_filter_files(struct ftrace_ops *ops,
6326 				struct dentry *parent)
6327 {
6328 
6329 	trace_create_file("set_ftrace_filter", TRACE_MODE_WRITE, parent,
6330 			  ops, &ftrace_filter_fops);
6331 
6332 	trace_create_file("set_ftrace_notrace", TRACE_MODE_WRITE, parent,
6333 			  ops, &ftrace_notrace_fops);
6334 }
6335 
6336 /*
6337  * The name "destroy_filter_files" is really a misnomer. Although
6338  * in the future, it may actually delete the files, but this is
6339  * really intended to make sure the ops passed in are disabled
6340  * and that when this function returns, the caller is free to
6341  * free the ops.
6342  *
6343  * The "destroy" name is only to match the "create" name that this
6344  * should be paired with.
6345  */
6346 void ftrace_destroy_filter_files(struct ftrace_ops *ops)
6347 {
6348 	mutex_lock(&ftrace_lock);
6349 	if (ops->flags & FTRACE_OPS_FL_ENABLED)
6350 		ftrace_shutdown(ops, 0);
6351 	ops->flags |= FTRACE_OPS_FL_DELETED;
6352 	ftrace_free_filter(ops);
6353 	mutex_unlock(&ftrace_lock);
6354 }
6355 
6356 static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer)
6357 {
6358 
6359 	trace_create_file("available_filter_functions", TRACE_MODE_READ,
6360 			d_tracer, NULL, &ftrace_avail_fops);
6361 
6362 	trace_create_file("enabled_functions", TRACE_MODE_READ,
6363 			d_tracer, NULL, &ftrace_enabled_fops);
6364 
6365 	ftrace_create_filter_files(&global_ops, d_tracer);
6366 
6367 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6368 	trace_create_file("set_graph_function", TRACE_MODE_WRITE, d_tracer,
6369 				    NULL,
6370 				    &ftrace_graph_fops);
6371 	trace_create_file("set_graph_notrace", TRACE_MODE_WRITE, d_tracer,
6372 				    NULL,
6373 				    &ftrace_graph_notrace_fops);
6374 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
6375 
6376 	return 0;
6377 }
6378 
6379 static int ftrace_cmp_ips(const void *a, const void *b)
6380 {
6381 	const unsigned long *ipa = a;
6382 	const unsigned long *ipb = b;
6383 
6384 	if (*ipa > *ipb)
6385 		return 1;
6386 	if (*ipa < *ipb)
6387 		return -1;
6388 	return 0;
6389 }
6390 
6391 static int ftrace_process_locs(struct module *mod,
6392 			       unsigned long *start,
6393 			       unsigned long *end)
6394 {
6395 	struct ftrace_page *start_pg;
6396 	struct ftrace_page *pg;
6397 	struct dyn_ftrace *rec;
6398 	unsigned long count;
6399 	unsigned long *p;
6400 	unsigned long addr;
6401 	unsigned long flags = 0; /* Shut up gcc */
6402 	int ret = -ENOMEM;
6403 
6404 	count = end - start;
6405 
6406 	if (!count)
6407 		return 0;
6408 
6409 	sort(start, count, sizeof(*start),
6410 	     ftrace_cmp_ips, NULL);
6411 
6412 	start_pg = ftrace_allocate_pages(count);
6413 	if (!start_pg)
6414 		return -ENOMEM;
6415 
6416 	mutex_lock(&ftrace_lock);
6417 
6418 	/*
6419 	 * Core and each module needs their own pages, as
6420 	 * modules will free them when they are removed.
6421 	 * Force a new page to be allocated for modules.
6422 	 */
6423 	if (!mod) {
6424 		WARN_ON(ftrace_pages || ftrace_pages_start);
6425 		/* First initialization */
6426 		ftrace_pages = ftrace_pages_start = start_pg;
6427 	} else {
6428 		if (!ftrace_pages)
6429 			goto out;
6430 
6431 		if (WARN_ON(ftrace_pages->next)) {
6432 			/* Hmm, we have free pages? */
6433 			while (ftrace_pages->next)
6434 				ftrace_pages = ftrace_pages->next;
6435 		}
6436 
6437 		ftrace_pages->next = start_pg;
6438 	}
6439 
6440 	p = start;
6441 	pg = start_pg;
6442 	while (p < end) {
6443 		unsigned long end_offset;
6444 		addr = ftrace_call_adjust(*p++);
6445 		/*
6446 		 * Some architecture linkers will pad between
6447 		 * the different mcount_loc sections of different
6448 		 * object files to satisfy alignments.
6449 		 * Skip any NULL pointers.
6450 		 */
6451 		if (!addr)
6452 			continue;
6453 
6454 		end_offset = (pg->index+1) * sizeof(pg->records[0]);
6455 		if (end_offset > PAGE_SIZE << pg->order) {
6456 			/* We should have allocated enough */
6457 			if (WARN_ON(!pg->next))
6458 				break;
6459 			pg = pg->next;
6460 		}
6461 
6462 		rec = &pg->records[pg->index++];
6463 		rec->ip = addr;
6464 	}
6465 
6466 	/* We should have used all pages */
6467 	WARN_ON(pg->next);
6468 
6469 	/* Assign the last page to ftrace_pages */
6470 	ftrace_pages = pg;
6471 
6472 	/*
6473 	 * We only need to disable interrupts on start up
6474 	 * because we are modifying code that an interrupt
6475 	 * may execute, and the modification is not atomic.
6476 	 * But for modules, nothing runs the code we modify
6477 	 * until we are finished with it, and there's no
6478 	 * reason to cause large interrupt latencies while we do it.
6479 	 */
6480 	if (!mod)
6481 		local_irq_save(flags);
6482 	ftrace_update_code(mod, start_pg);
6483 	if (!mod)
6484 		local_irq_restore(flags);
6485 	ret = 0;
6486  out:
6487 	mutex_unlock(&ftrace_lock);
6488 
6489 	return ret;
6490 }
6491 
6492 struct ftrace_mod_func {
6493 	struct list_head	list;
6494 	char			*name;
6495 	unsigned long		ip;
6496 	unsigned int		size;
6497 };
6498 
6499 struct ftrace_mod_map {
6500 	struct rcu_head		rcu;
6501 	struct list_head	list;
6502 	struct module		*mod;
6503 	unsigned long		start_addr;
6504 	unsigned long		end_addr;
6505 	struct list_head	funcs;
6506 	unsigned int		num_funcs;
6507 };
6508 
6509 static int ftrace_get_trampoline_kallsym(unsigned int symnum,
6510 					 unsigned long *value, char *type,
6511 					 char *name, char *module_name,
6512 					 int *exported)
6513 {
6514 	struct ftrace_ops *op;
6515 
6516 	list_for_each_entry_rcu(op, &ftrace_ops_trampoline_list, list) {
6517 		if (!op->trampoline || symnum--)
6518 			continue;
6519 		*value = op->trampoline;
6520 		*type = 't';
6521 		strlcpy(name, FTRACE_TRAMPOLINE_SYM, KSYM_NAME_LEN);
6522 		strlcpy(module_name, FTRACE_TRAMPOLINE_MOD, MODULE_NAME_LEN);
6523 		*exported = 0;
6524 		return 0;
6525 	}
6526 
6527 	return -ERANGE;
6528 }
6529 
6530 #ifdef CONFIG_MODULES
6531 
6532 #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
6533 
6534 static LIST_HEAD(ftrace_mod_maps);
6535 
6536 static int referenced_filters(struct dyn_ftrace *rec)
6537 {
6538 	struct ftrace_ops *ops;
6539 	int cnt = 0;
6540 
6541 	for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
6542 		if (ops_references_rec(ops, rec)) {
6543 			if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_DIRECT))
6544 				continue;
6545 			if (WARN_ON_ONCE(ops->flags & FTRACE_OPS_FL_IPMODIFY))
6546 				continue;
6547 			cnt++;
6548 			if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
6549 				rec->flags |= FTRACE_FL_REGS;
6550 			if (cnt == 1 && ops->trampoline)
6551 				rec->flags |= FTRACE_FL_TRAMP;
6552 			else
6553 				rec->flags &= ~FTRACE_FL_TRAMP;
6554 		}
6555 	}
6556 
6557 	return cnt;
6558 }
6559 
6560 static void
6561 clear_mod_from_hash(struct ftrace_page *pg, struct ftrace_hash *hash)
6562 {
6563 	struct ftrace_func_entry *entry;
6564 	struct dyn_ftrace *rec;
6565 	int i;
6566 
6567 	if (ftrace_hash_empty(hash))
6568 		return;
6569 
6570 	for (i = 0; i < pg->index; i++) {
6571 		rec = &pg->records[i];
6572 		entry = __ftrace_lookup_ip(hash, rec->ip);
6573 		/*
6574 		 * Do not allow this rec to match again.
6575 		 * Yeah, it may waste some memory, but will be removed
6576 		 * if/when the hash is modified again.
6577 		 */
6578 		if (entry)
6579 			entry->ip = 0;
6580 	}
6581 }
6582 
6583 /* Clear any records from hashes */
6584 static void clear_mod_from_hashes(struct ftrace_page *pg)
6585 {
6586 	struct trace_array *tr;
6587 
6588 	mutex_lock(&trace_types_lock);
6589 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6590 		if (!tr->ops || !tr->ops->func_hash)
6591 			continue;
6592 		mutex_lock(&tr->ops->func_hash->regex_lock);
6593 		clear_mod_from_hash(pg, tr->ops->func_hash->filter_hash);
6594 		clear_mod_from_hash(pg, tr->ops->func_hash->notrace_hash);
6595 		mutex_unlock(&tr->ops->func_hash->regex_lock);
6596 	}
6597 	mutex_unlock(&trace_types_lock);
6598 }
6599 
6600 static void ftrace_free_mod_map(struct rcu_head *rcu)
6601 {
6602 	struct ftrace_mod_map *mod_map = container_of(rcu, struct ftrace_mod_map, rcu);
6603 	struct ftrace_mod_func *mod_func;
6604 	struct ftrace_mod_func *n;
6605 
6606 	/* All the contents of mod_map are now not visible to readers */
6607 	list_for_each_entry_safe(mod_func, n, &mod_map->funcs, list) {
6608 		kfree(mod_func->name);
6609 		list_del(&mod_func->list);
6610 		kfree(mod_func);
6611 	}
6612 
6613 	kfree(mod_map);
6614 }
6615 
6616 void ftrace_release_mod(struct module *mod)
6617 {
6618 	struct ftrace_mod_map *mod_map;
6619 	struct ftrace_mod_map *n;
6620 	struct dyn_ftrace *rec;
6621 	struct ftrace_page **last_pg;
6622 	struct ftrace_page *tmp_page = NULL;
6623 	struct ftrace_page *pg;
6624 
6625 	mutex_lock(&ftrace_lock);
6626 
6627 	if (ftrace_disabled)
6628 		goto out_unlock;
6629 
6630 	list_for_each_entry_safe(mod_map, n, &ftrace_mod_maps, list) {
6631 		if (mod_map->mod == mod) {
6632 			list_del_rcu(&mod_map->list);
6633 			call_rcu(&mod_map->rcu, ftrace_free_mod_map);
6634 			break;
6635 		}
6636 	}
6637 
6638 	/*
6639 	 * Each module has its own ftrace_pages, remove
6640 	 * them from the list.
6641 	 */
6642 	last_pg = &ftrace_pages_start;
6643 	for (pg = ftrace_pages_start; pg; pg = *last_pg) {
6644 		rec = &pg->records[0];
6645 		if (within_module_core(rec->ip, mod) ||
6646 		    within_module_init(rec->ip, mod)) {
6647 			/*
6648 			 * As core pages are first, the first
6649 			 * page should never be a module page.
6650 			 */
6651 			if (WARN_ON(pg == ftrace_pages_start))
6652 				goto out_unlock;
6653 
6654 			/* Check if we are deleting the last page */
6655 			if (pg == ftrace_pages)
6656 				ftrace_pages = next_to_ftrace_page(last_pg);
6657 
6658 			ftrace_update_tot_cnt -= pg->index;
6659 			*last_pg = pg->next;
6660 
6661 			pg->next = tmp_page;
6662 			tmp_page = pg;
6663 		} else
6664 			last_pg = &pg->next;
6665 	}
6666  out_unlock:
6667 	mutex_unlock(&ftrace_lock);
6668 
6669 	for (pg = tmp_page; pg; pg = tmp_page) {
6670 
6671 		/* Needs to be called outside of ftrace_lock */
6672 		clear_mod_from_hashes(pg);
6673 
6674 		if (pg->records) {
6675 			free_pages((unsigned long)pg->records, pg->order);
6676 			ftrace_number_of_pages -= 1 << pg->order;
6677 		}
6678 		tmp_page = pg->next;
6679 		kfree(pg);
6680 		ftrace_number_of_groups--;
6681 	}
6682 }
6683 
6684 void ftrace_module_enable(struct module *mod)
6685 {
6686 	struct dyn_ftrace *rec;
6687 	struct ftrace_page *pg;
6688 
6689 	mutex_lock(&ftrace_lock);
6690 
6691 	if (ftrace_disabled)
6692 		goto out_unlock;
6693 
6694 	/*
6695 	 * If the tracing is enabled, go ahead and enable the record.
6696 	 *
6697 	 * The reason not to enable the record immediately is the
6698 	 * inherent check of ftrace_make_nop/ftrace_make_call for
6699 	 * correct previous instructions.  Making first the NOP
6700 	 * conversion puts the module to the correct state, thus
6701 	 * passing the ftrace_make_call check.
6702 	 *
6703 	 * We also delay this to after the module code already set the
6704 	 * text to read-only, as we now need to set it back to read-write
6705 	 * so that we can modify the text.
6706 	 */
6707 	if (ftrace_start_up)
6708 		ftrace_arch_code_modify_prepare();
6709 
6710 	do_for_each_ftrace_rec(pg, rec) {
6711 		int cnt;
6712 		/*
6713 		 * do_for_each_ftrace_rec() is a double loop.
6714 		 * module text shares the pg. If a record is
6715 		 * not part of this module, then skip this pg,
6716 		 * which the "break" will do.
6717 		 */
6718 		if (!within_module_core(rec->ip, mod) &&
6719 		    !within_module_init(rec->ip, mod))
6720 			break;
6721 
6722 		cnt = 0;
6723 
6724 		/*
6725 		 * When adding a module, we need to check if tracers are
6726 		 * currently enabled and if they are, and can trace this record,
6727 		 * we need to enable the module functions as well as update the
6728 		 * reference counts for those function records.
6729 		 */
6730 		if (ftrace_start_up)
6731 			cnt += referenced_filters(rec);
6732 
6733 		rec->flags &= ~FTRACE_FL_DISABLED;
6734 		rec->flags += cnt;
6735 
6736 		if (ftrace_start_up && cnt) {
6737 			int failed = __ftrace_replace_code(rec, 1);
6738 			if (failed) {
6739 				ftrace_bug(failed, rec);
6740 				goto out_loop;
6741 			}
6742 		}
6743 
6744 	} while_for_each_ftrace_rec();
6745 
6746  out_loop:
6747 	if (ftrace_start_up)
6748 		ftrace_arch_code_modify_post_process();
6749 
6750  out_unlock:
6751 	mutex_unlock(&ftrace_lock);
6752 
6753 	process_cached_mods(mod->name);
6754 }
6755 
6756 void ftrace_module_init(struct module *mod)
6757 {
6758 	if (ftrace_disabled || !mod->num_ftrace_callsites)
6759 		return;
6760 
6761 	ftrace_process_locs(mod, mod->ftrace_callsites,
6762 			    mod->ftrace_callsites + mod->num_ftrace_callsites);
6763 }
6764 
6765 static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
6766 				struct dyn_ftrace *rec)
6767 {
6768 	struct ftrace_mod_func *mod_func;
6769 	unsigned long symsize;
6770 	unsigned long offset;
6771 	char str[KSYM_SYMBOL_LEN];
6772 	char *modname;
6773 	const char *ret;
6774 
6775 	ret = kallsyms_lookup(rec->ip, &symsize, &offset, &modname, str);
6776 	if (!ret)
6777 		return;
6778 
6779 	mod_func = kmalloc(sizeof(*mod_func), GFP_KERNEL);
6780 	if (!mod_func)
6781 		return;
6782 
6783 	mod_func->name = kstrdup(str, GFP_KERNEL);
6784 	if (!mod_func->name) {
6785 		kfree(mod_func);
6786 		return;
6787 	}
6788 
6789 	mod_func->ip = rec->ip - offset;
6790 	mod_func->size = symsize;
6791 
6792 	mod_map->num_funcs++;
6793 
6794 	list_add_rcu(&mod_func->list, &mod_map->funcs);
6795 }
6796 
6797 static struct ftrace_mod_map *
6798 allocate_ftrace_mod_map(struct module *mod,
6799 			unsigned long start, unsigned long end)
6800 {
6801 	struct ftrace_mod_map *mod_map;
6802 
6803 	mod_map = kmalloc(sizeof(*mod_map), GFP_KERNEL);
6804 	if (!mod_map)
6805 		return NULL;
6806 
6807 	mod_map->mod = mod;
6808 	mod_map->start_addr = start;
6809 	mod_map->end_addr = end;
6810 	mod_map->num_funcs = 0;
6811 
6812 	INIT_LIST_HEAD_RCU(&mod_map->funcs);
6813 
6814 	list_add_rcu(&mod_map->list, &ftrace_mod_maps);
6815 
6816 	return mod_map;
6817 }
6818 
6819 static const char *
6820 ftrace_func_address_lookup(struct ftrace_mod_map *mod_map,
6821 			   unsigned long addr, unsigned long *size,
6822 			   unsigned long *off, char *sym)
6823 {
6824 	struct ftrace_mod_func *found_func =  NULL;
6825 	struct ftrace_mod_func *mod_func;
6826 
6827 	list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
6828 		if (addr >= mod_func->ip &&
6829 		    addr < mod_func->ip + mod_func->size) {
6830 			found_func = mod_func;
6831 			break;
6832 		}
6833 	}
6834 
6835 	if (found_func) {
6836 		if (size)
6837 			*size = found_func->size;
6838 		if (off)
6839 			*off = addr - found_func->ip;
6840 		if (sym)
6841 			strlcpy(sym, found_func->name, KSYM_NAME_LEN);
6842 
6843 		return found_func->name;
6844 	}
6845 
6846 	return NULL;
6847 }
6848 
6849 const char *
6850 ftrace_mod_address_lookup(unsigned long addr, unsigned long *size,
6851 		   unsigned long *off, char **modname, char *sym)
6852 {
6853 	struct ftrace_mod_map *mod_map;
6854 	const char *ret = NULL;
6855 
6856 	/* mod_map is freed via call_rcu() */
6857 	preempt_disable();
6858 	list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
6859 		ret = ftrace_func_address_lookup(mod_map, addr, size, off, sym);
6860 		if (ret) {
6861 			if (modname)
6862 				*modname = mod_map->mod->name;
6863 			break;
6864 		}
6865 	}
6866 	preempt_enable();
6867 
6868 	return ret;
6869 }
6870 
6871 int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
6872 			   char *type, char *name,
6873 			   char *module_name, int *exported)
6874 {
6875 	struct ftrace_mod_map *mod_map;
6876 	struct ftrace_mod_func *mod_func;
6877 	int ret;
6878 
6879 	preempt_disable();
6880 	list_for_each_entry_rcu(mod_map, &ftrace_mod_maps, list) {
6881 
6882 		if (symnum >= mod_map->num_funcs) {
6883 			symnum -= mod_map->num_funcs;
6884 			continue;
6885 		}
6886 
6887 		list_for_each_entry_rcu(mod_func, &mod_map->funcs, list) {
6888 			if (symnum > 1) {
6889 				symnum--;
6890 				continue;
6891 			}
6892 
6893 			*value = mod_func->ip;
6894 			*type = 'T';
6895 			strlcpy(name, mod_func->name, KSYM_NAME_LEN);
6896 			strlcpy(module_name, mod_map->mod->name, MODULE_NAME_LEN);
6897 			*exported = 1;
6898 			preempt_enable();
6899 			return 0;
6900 		}
6901 		WARN_ON(1);
6902 		break;
6903 	}
6904 	ret = ftrace_get_trampoline_kallsym(symnum, value, type, name,
6905 					    module_name, exported);
6906 	preempt_enable();
6907 	return ret;
6908 }
6909 
6910 #else
6911 static void save_ftrace_mod_rec(struct ftrace_mod_map *mod_map,
6912 				struct dyn_ftrace *rec) { }
6913 static inline struct ftrace_mod_map *
6914 allocate_ftrace_mod_map(struct module *mod,
6915 			unsigned long start, unsigned long end)
6916 {
6917 	return NULL;
6918 }
6919 int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value,
6920 			   char *type, char *name, char *module_name,
6921 			   int *exported)
6922 {
6923 	int ret;
6924 
6925 	preempt_disable();
6926 	ret = ftrace_get_trampoline_kallsym(symnum, value, type, name,
6927 					    module_name, exported);
6928 	preempt_enable();
6929 	return ret;
6930 }
6931 #endif /* CONFIG_MODULES */
6932 
6933 struct ftrace_init_func {
6934 	struct list_head list;
6935 	unsigned long ip;
6936 };
6937 
6938 /* Clear any init ips from hashes */
6939 static void
6940 clear_func_from_hash(struct ftrace_init_func *func, struct ftrace_hash *hash)
6941 {
6942 	struct ftrace_func_entry *entry;
6943 
6944 	entry = ftrace_lookup_ip(hash, func->ip);
6945 	/*
6946 	 * Do not allow this rec to match again.
6947 	 * Yeah, it may waste some memory, but will be removed
6948 	 * if/when the hash is modified again.
6949 	 */
6950 	if (entry)
6951 		entry->ip = 0;
6952 }
6953 
6954 static void
6955 clear_func_from_hashes(struct ftrace_init_func *func)
6956 {
6957 	struct trace_array *tr;
6958 
6959 	mutex_lock(&trace_types_lock);
6960 	list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6961 		if (!tr->ops || !tr->ops->func_hash)
6962 			continue;
6963 		mutex_lock(&tr->ops->func_hash->regex_lock);
6964 		clear_func_from_hash(func, tr->ops->func_hash->filter_hash);
6965 		clear_func_from_hash(func, tr->ops->func_hash->notrace_hash);
6966 		mutex_unlock(&tr->ops->func_hash->regex_lock);
6967 	}
6968 	mutex_unlock(&trace_types_lock);
6969 }
6970 
6971 static void add_to_clear_hash_list(struct list_head *clear_list,
6972 				   struct dyn_ftrace *rec)
6973 {
6974 	struct ftrace_init_func *func;
6975 
6976 	func = kmalloc(sizeof(*func), GFP_KERNEL);
6977 	if (!func) {
6978 		MEM_FAIL(1, "alloc failure, ftrace filter could be stale\n");
6979 		return;
6980 	}
6981 
6982 	func->ip = rec->ip;
6983 	list_add(&func->list, clear_list);
6984 }
6985 
6986 void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
6987 {
6988 	unsigned long start = (unsigned long)(start_ptr);
6989 	unsigned long end = (unsigned long)(end_ptr);
6990 	struct ftrace_page **last_pg = &ftrace_pages_start;
6991 	struct ftrace_page *pg;
6992 	struct dyn_ftrace *rec;
6993 	struct dyn_ftrace key;
6994 	struct ftrace_mod_map *mod_map = NULL;
6995 	struct ftrace_init_func *func, *func_next;
6996 	struct list_head clear_hash;
6997 
6998 	INIT_LIST_HEAD(&clear_hash);
6999 
7000 	key.ip = start;
7001 	key.flags = end;	/* overload flags, as it is unsigned long */
7002 
7003 	mutex_lock(&ftrace_lock);
7004 
7005 	/*
7006 	 * If we are freeing module init memory, then check if
7007 	 * any tracer is active. If so, we need to save a mapping of
7008 	 * the module functions being freed with the address.
7009 	 */
7010 	if (mod && ftrace_ops_list != &ftrace_list_end)
7011 		mod_map = allocate_ftrace_mod_map(mod, start, end);
7012 
7013 	for (pg = ftrace_pages_start; pg; last_pg = &pg->next, pg = *last_pg) {
7014 		if (end < pg->records[0].ip ||
7015 		    start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
7016 			continue;
7017  again:
7018 		rec = bsearch(&key, pg->records, pg->index,
7019 			      sizeof(struct dyn_ftrace),
7020 			      ftrace_cmp_recs);
7021 		if (!rec)
7022 			continue;
7023 
7024 		/* rec will be cleared from hashes after ftrace_lock unlock */
7025 		add_to_clear_hash_list(&clear_hash, rec);
7026 
7027 		if (mod_map)
7028 			save_ftrace_mod_rec(mod_map, rec);
7029 
7030 		pg->index--;
7031 		ftrace_update_tot_cnt--;
7032 		if (!pg->index) {
7033 			*last_pg = pg->next;
7034 			if (pg->records) {
7035 				free_pages((unsigned long)pg->records, pg->order);
7036 				ftrace_number_of_pages -= 1 << pg->order;
7037 			}
7038 			ftrace_number_of_groups--;
7039 			kfree(pg);
7040 			pg = container_of(last_pg, struct ftrace_page, next);
7041 			if (!(*last_pg))
7042 				ftrace_pages = pg;
7043 			continue;
7044 		}
7045 		memmove(rec, rec + 1,
7046 			(pg->index - (rec - pg->records)) * sizeof(*rec));
7047 		/* More than one function may be in this block */
7048 		goto again;
7049 	}
7050 	mutex_unlock(&ftrace_lock);
7051 
7052 	list_for_each_entry_safe(func, func_next, &clear_hash, list) {
7053 		clear_func_from_hashes(func);
7054 		kfree(func);
7055 	}
7056 }
7057 
7058 void __init ftrace_free_init_mem(void)
7059 {
7060 	void *start = (void *)(&__init_begin);
7061 	void *end = (void *)(&__init_end);
7062 
7063 	ftrace_free_mem(NULL, start, end);
7064 }
7065 
7066 int __init __weak ftrace_dyn_arch_init(void)
7067 {
7068 	return 0;
7069 }
7070 
7071 void __init ftrace_init(void)
7072 {
7073 	extern unsigned long __start_mcount_loc[];
7074 	extern unsigned long __stop_mcount_loc[];
7075 	unsigned long count, flags;
7076 	int ret;
7077 
7078 	local_irq_save(flags);
7079 	ret = ftrace_dyn_arch_init();
7080 	local_irq_restore(flags);
7081 	if (ret)
7082 		goto failed;
7083 
7084 	count = __stop_mcount_loc - __start_mcount_loc;
7085 	if (!count) {
7086 		pr_info("ftrace: No functions to be traced?\n");
7087 		goto failed;
7088 	}
7089 
7090 	pr_info("ftrace: allocating %ld entries in %ld pages\n",
7091 		count, count / ENTRIES_PER_PAGE + 1);
7092 
7093 	last_ftrace_enabled = ftrace_enabled = 1;
7094 
7095 	ret = ftrace_process_locs(NULL,
7096 				  __start_mcount_loc,
7097 				  __stop_mcount_loc);
7098 
7099 	pr_info("ftrace: allocated %ld pages with %ld groups\n",
7100 		ftrace_number_of_pages, ftrace_number_of_groups);
7101 
7102 	set_ftrace_early_filters();
7103 
7104 	return;
7105  failed:
7106 	ftrace_disabled = 1;
7107 }
7108 
7109 /* Do nothing if arch does not support this */
7110 void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops)
7111 {
7112 }
7113 
7114 static void ftrace_update_trampoline(struct ftrace_ops *ops)
7115 {
7116 	unsigned long trampoline = ops->trampoline;
7117 
7118 	arch_ftrace_update_trampoline(ops);
7119 	if (ops->trampoline && ops->trampoline != trampoline &&
7120 	    (ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) {
7121 		/* Add to kallsyms before the perf events */
7122 		ftrace_add_trampoline_to_kallsyms(ops);
7123 		perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
7124 				   ops->trampoline, ops->trampoline_size, false,
7125 				   FTRACE_TRAMPOLINE_SYM);
7126 		/*
7127 		 * Record the perf text poke event after the ksymbol register
7128 		 * event.
7129 		 */
7130 		perf_event_text_poke((void *)ops->trampoline, NULL, 0,
7131 				     (void *)ops->trampoline,
7132 				     ops->trampoline_size);
7133 	}
7134 }
7135 
7136 void ftrace_init_trace_array(struct trace_array *tr)
7137 {
7138 	INIT_LIST_HEAD(&tr->func_probes);
7139 	INIT_LIST_HEAD(&tr->mod_trace);
7140 	INIT_LIST_HEAD(&tr->mod_notrace);
7141 }
7142 #else
7143 
7144 struct ftrace_ops global_ops = {
7145 	.func			= ftrace_stub,
7146 	.flags			= FTRACE_OPS_FL_INITIALIZED |
7147 				  FTRACE_OPS_FL_PID,
7148 };
7149 
7150 static int __init ftrace_nodyn_init(void)
7151 {
7152 	ftrace_enabled = 1;
7153 	return 0;
7154 }
7155 core_initcall(ftrace_nodyn_init);
7156 
7157 static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; }
7158 static inline void ftrace_startup_enable(int command) { }
7159 static inline void ftrace_startup_all(int command) { }
7160 
7161 # define ftrace_startup_sysctl()	do { } while (0)
7162 # define ftrace_shutdown_sysctl()	do { } while (0)
7163 
7164 static void ftrace_update_trampoline(struct ftrace_ops *ops)
7165 {
7166 }
7167 
7168 #endif /* CONFIG_DYNAMIC_FTRACE */
7169 
7170 __init void ftrace_init_global_array_ops(struct trace_array *tr)
7171 {
7172 	tr->ops = &global_ops;
7173 	tr->ops->private = tr;
7174 	ftrace_init_trace_array(tr);
7175 }
7176 
7177 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
7178 {
7179 	/* If we filter on pids, update to use the pid function */
7180 	if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
7181 		if (WARN_ON(tr->ops->func != ftrace_stub))
7182 			printk("ftrace ops had %pS for function\n",
7183 			       tr->ops->func);
7184 	}
7185 	tr->ops->func = func;
7186 	tr->ops->private = tr;
7187 }
7188 
7189 void ftrace_reset_array_ops(struct trace_array *tr)
7190 {
7191 	tr->ops->func = ftrace_stub;
7192 }
7193 
7194 static nokprobe_inline void
7195 __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
7196 		       struct ftrace_ops *ignored, struct ftrace_regs *fregs)
7197 {
7198 	struct pt_regs *regs = ftrace_get_regs(fregs);
7199 	struct ftrace_ops *op;
7200 	int bit;
7201 
7202 	/*
7203 	 * The ftrace_test_and_set_recursion() will disable preemption,
7204 	 * which is required since some of the ops may be dynamically
7205 	 * allocated, they must be freed after a synchronize_rcu().
7206 	 */
7207 	bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START);
7208 	if (bit < 0)
7209 		return;
7210 
7211 	do_for_each_ftrace_op(op, ftrace_ops_list) {
7212 		/* Stub functions don't need to be called nor tested */
7213 		if (op->flags & FTRACE_OPS_FL_STUB)
7214 			continue;
7215 		/*
7216 		 * Check the following for each ops before calling their func:
7217 		 *  if RCU flag is set, then rcu_is_watching() must be true
7218 		 *  if PER_CPU is set, then ftrace_function_local_disable()
7219 		 *                          must be false
7220 		 *  Otherwise test if the ip matches the ops filter
7221 		 *
7222 		 * If any of the above fails then the op->func() is not executed.
7223 		 */
7224 		if ((!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) &&
7225 		    ftrace_ops_test(op, ip, regs)) {
7226 			if (FTRACE_WARN_ON(!op->func)) {
7227 				pr_warn("op=%p %pS\n", op, op);
7228 				goto out;
7229 			}
7230 			op->func(ip, parent_ip, op, fregs);
7231 		}
7232 	} while_for_each_ftrace_op(op);
7233 out:
7234 	trace_clear_recursion(bit);
7235 }
7236 
7237 /*
7238  * Some archs only support passing ip and parent_ip. Even though
7239  * the list function ignores the op parameter, we do not want any
7240  * C side effects, where a function is called without the caller
7241  * sending a third parameter.
7242  * Archs are to support both the regs and ftrace_ops at the same time.
7243  * If they support ftrace_ops, it is assumed they support regs.
7244  * If call backs want to use regs, they must either check for regs
7245  * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
7246  * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
7247  * An architecture can pass partial regs with ftrace_ops and still
7248  * set the ARCH_SUPPORTS_FTRACE_OPS.
7249  *
7250  * In vmlinux.lds.h, ftrace_ops_list_func() is defined to be
7251  * arch_ftrace_ops_list_func.
7252  */
7253 #if ARCH_SUPPORTS_FTRACE_OPS
7254 void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
7255 			       struct ftrace_ops *op, struct ftrace_regs *fregs)
7256 {
7257 	__ftrace_ops_list_func(ip, parent_ip, NULL, fregs);
7258 }
7259 #else
7260 void arch_ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
7261 {
7262 	__ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
7263 }
7264 #endif
7265 NOKPROBE_SYMBOL(arch_ftrace_ops_list_func);
7266 
7267 /*
7268  * If there's only one function registered but it does not support
7269  * recursion, needs RCU protection and/or requires per cpu handling, then
7270  * this function will be called by the mcount trampoline.
7271  */
7272 static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip,
7273 				   struct ftrace_ops *op, struct ftrace_regs *fregs)
7274 {
7275 	int bit;
7276 
7277 	bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START);
7278 	if (bit < 0)
7279 		return;
7280 
7281 	if (!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching())
7282 		op->func(ip, parent_ip, op, fregs);
7283 
7284 	trace_clear_recursion(bit);
7285 }
7286 NOKPROBE_SYMBOL(ftrace_ops_assist_func);
7287 
7288 /**
7289  * ftrace_ops_get_func - get the function a trampoline should call
7290  * @ops: the ops to get the function for
7291  *
7292  * Normally the mcount trampoline will call the ops->func, but there
7293  * are times that it should not. For example, if the ops does not
7294  * have its own recursion protection, then it should call the
7295  * ftrace_ops_assist_func() instead.
7296  *
7297  * Returns the function that the trampoline should call for @ops.
7298  */
7299 ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
7300 {
7301 	/*
7302 	 * If the function does not handle recursion or needs to be RCU safe,
7303 	 * then we need to call the assist handler.
7304 	 */
7305 	if (ops->flags & (FTRACE_OPS_FL_RECURSION |
7306 			  FTRACE_OPS_FL_RCU))
7307 		return ftrace_ops_assist_func;
7308 
7309 	return ops->func;
7310 }
7311 
7312 static void
7313 ftrace_filter_pid_sched_switch_probe(void *data, bool preempt,
7314 		    struct task_struct *prev, struct task_struct *next)
7315 {
7316 	struct trace_array *tr = data;
7317 	struct trace_pid_list *pid_list;
7318 	struct trace_pid_list *no_pid_list;
7319 
7320 	pid_list = rcu_dereference_sched(tr->function_pids);
7321 	no_pid_list = rcu_dereference_sched(tr->function_no_pids);
7322 
7323 	if (trace_ignore_this_task(pid_list, no_pid_list, next))
7324 		this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
7325 			       FTRACE_PID_IGNORE);
7326 	else
7327 		this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
7328 			       next->pid);
7329 }
7330 
7331 static void
7332 ftrace_pid_follow_sched_process_fork(void *data,
7333 				     struct task_struct *self,
7334 				     struct task_struct *task)
7335 {
7336 	struct trace_pid_list *pid_list;
7337 	struct trace_array *tr = data;
7338 
7339 	pid_list = rcu_dereference_sched(tr->function_pids);
7340 	trace_filter_add_remove_task(pid_list, self, task);
7341 
7342 	pid_list = rcu_dereference_sched(tr->function_no_pids);
7343 	trace_filter_add_remove_task(pid_list, self, task);
7344 }
7345 
7346 static void
7347 ftrace_pid_follow_sched_process_exit(void *data, struct task_struct *task)
7348 {
7349 	struct trace_pid_list *pid_list;
7350 	struct trace_array *tr = data;
7351 
7352 	pid_list = rcu_dereference_sched(tr->function_pids);
7353 	trace_filter_add_remove_task(pid_list, NULL, task);
7354 
7355 	pid_list = rcu_dereference_sched(tr->function_no_pids);
7356 	trace_filter_add_remove_task(pid_list, NULL, task);
7357 }
7358 
7359 void ftrace_pid_follow_fork(struct trace_array *tr, bool enable)
7360 {
7361 	if (enable) {
7362 		register_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
7363 						  tr);
7364 		register_trace_sched_process_free(ftrace_pid_follow_sched_process_exit,
7365 						  tr);
7366 	} else {
7367 		unregister_trace_sched_process_fork(ftrace_pid_follow_sched_process_fork,
7368 						    tr);
7369 		unregister_trace_sched_process_free(ftrace_pid_follow_sched_process_exit,
7370 						    tr);
7371 	}
7372 }
7373 
7374 static void clear_ftrace_pids(struct trace_array *tr, int type)
7375 {
7376 	struct trace_pid_list *pid_list;
7377 	struct trace_pid_list *no_pid_list;
7378 	int cpu;
7379 
7380 	pid_list = rcu_dereference_protected(tr->function_pids,
7381 					     lockdep_is_held(&ftrace_lock));
7382 	no_pid_list = rcu_dereference_protected(tr->function_no_pids,
7383 						lockdep_is_held(&ftrace_lock));
7384 
7385 	/* Make sure there's something to do */
7386 	if (!pid_type_enabled(type, pid_list, no_pid_list))
7387 		return;
7388 
7389 	/* See if the pids still need to be checked after this */
7390 	if (!still_need_pid_events(type, pid_list, no_pid_list)) {
7391 		unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
7392 		for_each_possible_cpu(cpu)
7393 			per_cpu_ptr(tr->array_buffer.data, cpu)->ftrace_ignore_pid = FTRACE_PID_TRACE;
7394 	}
7395 
7396 	if (type & TRACE_PIDS)
7397 		rcu_assign_pointer(tr->function_pids, NULL);
7398 
7399 	if (type & TRACE_NO_PIDS)
7400 		rcu_assign_pointer(tr->function_no_pids, NULL);
7401 
7402 	/* Wait till all users are no longer using pid filtering */
7403 	synchronize_rcu();
7404 
7405 	if ((type & TRACE_PIDS) && pid_list)
7406 		trace_pid_list_free(pid_list);
7407 
7408 	if ((type & TRACE_NO_PIDS) && no_pid_list)
7409 		trace_pid_list_free(no_pid_list);
7410 }
7411 
7412 void ftrace_clear_pids(struct trace_array *tr)
7413 {
7414 	mutex_lock(&ftrace_lock);
7415 
7416 	clear_ftrace_pids(tr, TRACE_PIDS | TRACE_NO_PIDS);
7417 
7418 	mutex_unlock(&ftrace_lock);
7419 }
7420 
7421 static void ftrace_pid_reset(struct trace_array *tr, int type)
7422 {
7423 	mutex_lock(&ftrace_lock);
7424 	clear_ftrace_pids(tr, type);
7425 
7426 	ftrace_update_pid_func();
7427 	ftrace_startup_all(0);
7428 
7429 	mutex_unlock(&ftrace_lock);
7430 }
7431 
7432 /* Greater than any max PID */
7433 #define FTRACE_NO_PIDS		(void *)(PID_MAX_LIMIT + 1)
7434 
7435 static void *fpid_start(struct seq_file *m, loff_t *pos)
7436 	__acquires(RCU)
7437 {
7438 	struct trace_pid_list *pid_list;
7439 	struct trace_array *tr = m->private;
7440 
7441 	mutex_lock(&ftrace_lock);
7442 	rcu_read_lock_sched();
7443 
7444 	pid_list = rcu_dereference_sched(tr->function_pids);
7445 
7446 	if (!pid_list)
7447 		return !(*pos) ? FTRACE_NO_PIDS : NULL;
7448 
7449 	return trace_pid_start(pid_list, pos);
7450 }
7451 
7452 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
7453 {
7454 	struct trace_array *tr = m->private;
7455 	struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids);
7456 
7457 	if (v == FTRACE_NO_PIDS) {
7458 		(*pos)++;
7459 		return NULL;
7460 	}
7461 	return trace_pid_next(pid_list, v, pos);
7462 }
7463 
7464 static void fpid_stop(struct seq_file *m, void *p)
7465 	__releases(RCU)
7466 {
7467 	rcu_read_unlock_sched();
7468 	mutex_unlock(&ftrace_lock);
7469 }
7470 
7471 static int fpid_show(struct seq_file *m, void *v)
7472 {
7473 	if (v == FTRACE_NO_PIDS) {
7474 		seq_puts(m, "no pid\n");
7475 		return 0;
7476 	}
7477 
7478 	return trace_pid_show(m, v);
7479 }
7480 
7481 static const struct seq_operations ftrace_pid_sops = {
7482 	.start = fpid_start,
7483 	.next = fpid_next,
7484 	.stop = fpid_stop,
7485 	.show = fpid_show,
7486 };
7487 
7488 static void *fnpid_start(struct seq_file *m, loff_t *pos)
7489 	__acquires(RCU)
7490 {
7491 	struct trace_pid_list *pid_list;
7492 	struct trace_array *tr = m->private;
7493 
7494 	mutex_lock(&ftrace_lock);
7495 	rcu_read_lock_sched();
7496 
7497 	pid_list = rcu_dereference_sched(tr->function_no_pids);
7498 
7499 	if (!pid_list)
7500 		return !(*pos) ? FTRACE_NO_PIDS : NULL;
7501 
7502 	return trace_pid_start(pid_list, pos);
7503 }
7504 
7505 static void *fnpid_next(struct seq_file *m, void *v, loff_t *pos)
7506 {
7507 	struct trace_array *tr = m->private;
7508 	struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_no_pids);
7509 
7510 	if (v == FTRACE_NO_PIDS) {
7511 		(*pos)++;
7512 		return NULL;
7513 	}
7514 	return trace_pid_next(pid_list, v, pos);
7515 }
7516 
7517 static const struct seq_operations ftrace_no_pid_sops = {
7518 	.start = fnpid_start,
7519 	.next = fnpid_next,
7520 	.stop = fpid_stop,
7521 	.show = fpid_show,
7522 };
7523 
7524 static int pid_open(struct inode *inode, struct file *file, int type)
7525 {
7526 	const struct seq_operations *seq_ops;
7527 	struct trace_array *tr = inode->i_private;
7528 	struct seq_file *m;
7529 	int ret = 0;
7530 
7531 	ret = tracing_check_open_get_tr(tr);
7532 	if (ret)
7533 		return ret;
7534 
7535 	if ((file->f_mode & FMODE_WRITE) &&
7536 	    (file->f_flags & O_TRUNC))
7537 		ftrace_pid_reset(tr, type);
7538 
7539 	switch (type) {
7540 	case TRACE_PIDS:
7541 		seq_ops = &ftrace_pid_sops;
7542 		break;
7543 	case TRACE_NO_PIDS:
7544 		seq_ops = &ftrace_no_pid_sops;
7545 		break;
7546 	default:
7547 		trace_array_put(tr);
7548 		WARN_ON_ONCE(1);
7549 		return -EINVAL;
7550 	}
7551 
7552 	ret = seq_open(file, seq_ops);
7553 	if (ret < 0) {
7554 		trace_array_put(tr);
7555 	} else {
7556 		m = file->private_data;
7557 		/* copy tr over to seq ops */
7558 		m->private = tr;
7559 	}
7560 
7561 	return ret;
7562 }
7563 
7564 static int
7565 ftrace_pid_open(struct inode *inode, struct file *file)
7566 {
7567 	return pid_open(inode, file, TRACE_PIDS);
7568 }
7569 
7570 static int
7571 ftrace_no_pid_open(struct inode *inode, struct file *file)
7572 {
7573 	return pid_open(inode, file, TRACE_NO_PIDS);
7574 }
7575 
7576 static void ignore_task_cpu(void *data)
7577 {
7578 	struct trace_array *tr = data;
7579 	struct trace_pid_list *pid_list;
7580 	struct trace_pid_list *no_pid_list;
7581 
7582 	/*
7583 	 * This function is called by on_each_cpu() while the
7584 	 * event_mutex is held.
7585 	 */
7586 	pid_list = rcu_dereference_protected(tr->function_pids,
7587 					     mutex_is_locked(&ftrace_lock));
7588 	no_pid_list = rcu_dereference_protected(tr->function_no_pids,
7589 						mutex_is_locked(&ftrace_lock));
7590 
7591 	if (trace_ignore_this_task(pid_list, no_pid_list, current))
7592 		this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
7593 			       FTRACE_PID_IGNORE);
7594 	else
7595 		this_cpu_write(tr->array_buffer.data->ftrace_ignore_pid,
7596 			       current->pid);
7597 }
7598 
7599 static ssize_t
7600 pid_write(struct file *filp, const char __user *ubuf,
7601 	  size_t cnt, loff_t *ppos, int type)
7602 {
7603 	struct seq_file *m = filp->private_data;
7604 	struct trace_array *tr = m->private;
7605 	struct trace_pid_list *filtered_pids;
7606 	struct trace_pid_list *other_pids;
7607 	struct trace_pid_list *pid_list;
7608 	ssize_t ret;
7609 
7610 	if (!cnt)
7611 		return 0;
7612 
7613 	mutex_lock(&ftrace_lock);
7614 
7615 	switch (type) {
7616 	case TRACE_PIDS:
7617 		filtered_pids = rcu_dereference_protected(tr->function_pids,
7618 					     lockdep_is_held(&ftrace_lock));
7619 		other_pids = rcu_dereference_protected(tr->function_no_pids,
7620 					     lockdep_is_held(&ftrace_lock));
7621 		break;
7622 	case TRACE_NO_PIDS:
7623 		filtered_pids = rcu_dereference_protected(tr->function_no_pids,
7624 					     lockdep_is_held(&ftrace_lock));
7625 		other_pids = rcu_dereference_protected(tr->function_pids,
7626 					     lockdep_is_held(&ftrace_lock));
7627 		break;
7628 	default:
7629 		ret = -EINVAL;
7630 		WARN_ON_ONCE(1);
7631 		goto out;
7632 	}
7633 
7634 	ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
7635 	if (ret < 0)
7636 		goto out;
7637 
7638 	switch (type) {
7639 	case TRACE_PIDS:
7640 		rcu_assign_pointer(tr->function_pids, pid_list);
7641 		break;
7642 	case TRACE_NO_PIDS:
7643 		rcu_assign_pointer(tr->function_no_pids, pid_list);
7644 		break;
7645 	}
7646 
7647 
7648 	if (filtered_pids) {
7649 		synchronize_rcu();
7650 		trace_pid_list_free(filtered_pids);
7651 	} else if (pid_list && !other_pids) {
7652 		/* Register a probe to set whether to ignore the tracing of a task */
7653 		register_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
7654 	}
7655 
7656 	/*
7657 	 * Ignoring of pids is done at task switch. But we have to
7658 	 * check for those tasks that are currently running.
7659 	 * Always do this in case a pid was appended or removed.
7660 	 */
7661 	on_each_cpu(ignore_task_cpu, tr, 1);
7662 
7663 	ftrace_update_pid_func();
7664 	ftrace_startup_all(0);
7665  out:
7666 	mutex_unlock(&ftrace_lock);
7667 
7668 	if (ret > 0)
7669 		*ppos += ret;
7670 
7671 	return ret;
7672 }
7673 
7674 static ssize_t
7675 ftrace_pid_write(struct file *filp, const char __user *ubuf,
7676 		 size_t cnt, loff_t *ppos)
7677 {
7678 	return pid_write(filp, ubuf, cnt, ppos, TRACE_PIDS);
7679 }
7680 
7681 static ssize_t
7682 ftrace_no_pid_write(struct file *filp, const char __user *ubuf,
7683 		    size_t cnt, loff_t *ppos)
7684 {
7685 	return pid_write(filp, ubuf, cnt, ppos, TRACE_NO_PIDS);
7686 }
7687 
7688 static int
7689 ftrace_pid_release(struct inode *inode, struct file *file)
7690 {
7691 	struct trace_array *tr = inode->i_private;
7692 
7693 	trace_array_put(tr);
7694 
7695 	return seq_release(inode, file);
7696 }
7697 
7698 static const struct file_operations ftrace_pid_fops = {
7699 	.open		= ftrace_pid_open,
7700 	.write		= ftrace_pid_write,
7701 	.read		= seq_read,
7702 	.llseek		= tracing_lseek,
7703 	.release	= ftrace_pid_release,
7704 };
7705 
7706 static const struct file_operations ftrace_no_pid_fops = {
7707 	.open		= ftrace_no_pid_open,
7708 	.write		= ftrace_no_pid_write,
7709 	.read		= seq_read,
7710 	.llseek		= tracing_lseek,
7711 	.release	= ftrace_pid_release,
7712 };
7713 
7714 void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer)
7715 {
7716 	trace_create_file("set_ftrace_pid", TRACE_MODE_WRITE, d_tracer,
7717 			    tr, &ftrace_pid_fops);
7718 	trace_create_file("set_ftrace_notrace_pid", TRACE_MODE_WRITE,
7719 			  d_tracer, tr, &ftrace_no_pid_fops);
7720 }
7721 
7722 void __init ftrace_init_tracefs_toplevel(struct trace_array *tr,
7723 					 struct dentry *d_tracer)
7724 {
7725 	/* Only the top level directory has the dyn_tracefs and profile */
7726 	WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
7727 
7728 	ftrace_init_dyn_tracefs(d_tracer);
7729 	ftrace_profile_tracefs(d_tracer);
7730 }
7731 
7732 /**
7733  * ftrace_kill - kill ftrace
7734  *
7735  * This function should be used by panic code. It stops ftrace
7736  * but in a not so nice way. If you need to simply kill ftrace
7737  * from a non-atomic section, use ftrace_kill.
7738  */
7739 void ftrace_kill(void)
7740 {
7741 	ftrace_disabled = 1;
7742 	ftrace_enabled = 0;
7743 	ftrace_trace_function = ftrace_stub;
7744 }
7745 
7746 /**
7747  * ftrace_is_dead - Test if ftrace is dead or not.
7748  *
7749  * Returns 1 if ftrace is "dead", zero otherwise.
7750  */
7751 int ftrace_is_dead(void)
7752 {
7753 	return ftrace_disabled;
7754 }
7755 
7756 /**
7757  * register_ftrace_function - register a function for profiling
7758  * @ops - ops structure that holds the function for profiling.
7759  *
7760  * Register a function to be called by all functions in the
7761  * kernel.
7762  *
7763  * Note: @ops->func and all the functions it calls must be labeled
7764  *       with "notrace", otherwise it will go into a
7765  *       recursive loop.
7766  */
7767 int register_ftrace_function(struct ftrace_ops *ops)
7768 {
7769 	int ret;
7770 
7771 	ftrace_ops_init(ops);
7772 
7773 	mutex_lock(&ftrace_lock);
7774 
7775 	ret = ftrace_startup(ops, 0);
7776 
7777 	mutex_unlock(&ftrace_lock);
7778 
7779 	return ret;
7780 }
7781 EXPORT_SYMBOL_GPL(register_ftrace_function);
7782 
7783 /**
7784  * unregister_ftrace_function - unregister a function for profiling.
7785  * @ops - ops structure that holds the function to unregister
7786  *
7787  * Unregister a function that was added to be called by ftrace profiling.
7788  */
7789 int unregister_ftrace_function(struct ftrace_ops *ops)
7790 {
7791 	int ret;
7792 
7793 	mutex_lock(&ftrace_lock);
7794 	ret = ftrace_shutdown(ops, 0);
7795 	mutex_unlock(&ftrace_lock);
7796 
7797 	return ret;
7798 }
7799 EXPORT_SYMBOL_GPL(unregister_ftrace_function);
7800 
7801 static bool is_permanent_ops_registered(void)
7802 {
7803 	struct ftrace_ops *op;
7804 
7805 	do_for_each_ftrace_op(op, ftrace_ops_list) {
7806 		if (op->flags & FTRACE_OPS_FL_PERMANENT)
7807 			return true;
7808 	} while_for_each_ftrace_op(op);
7809 
7810 	return false;
7811 }
7812 
7813 int
7814 ftrace_enable_sysctl(struct ctl_table *table, int write,
7815 		     void *buffer, size_t *lenp, loff_t *ppos)
7816 {
7817 	int ret = -ENODEV;
7818 
7819 	mutex_lock(&ftrace_lock);
7820 
7821 	if (unlikely(ftrace_disabled))
7822 		goto out;
7823 
7824 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
7825 
7826 	if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
7827 		goto out;
7828 
7829 	if (ftrace_enabled) {
7830 
7831 		/* we are starting ftrace again */
7832 		if (rcu_dereference_protected(ftrace_ops_list,
7833 			lockdep_is_held(&ftrace_lock)) != &ftrace_list_end)
7834 			update_ftrace_function();
7835 
7836 		ftrace_startup_sysctl();
7837 
7838 	} else {
7839 		if (is_permanent_ops_registered()) {
7840 			ftrace_enabled = true;
7841 			ret = -EBUSY;
7842 			goto out;
7843 		}
7844 
7845 		/* stopping ftrace calls (just send to ftrace_stub) */
7846 		ftrace_trace_function = ftrace_stub;
7847 
7848 		ftrace_shutdown_sysctl();
7849 	}
7850 
7851 	last_ftrace_enabled = !!ftrace_enabled;
7852  out:
7853 	mutex_unlock(&ftrace_lock);
7854 	return ret;
7855 }
7856