xref: /openbmc/linux/kernel/trace/ftrace.c (revision b0fc494fae96a7089f3651cb451f461c7291244c)
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15 
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/kthread.h>
20 #include <linux/hardirq.h>
21 #include <linux/ftrace.h>
22 #include <linux/module.h>
23 #include <linux/sysctl.h>
24 #include <linux/hash.h>
25 #include <linux/list.h>
26 
27 #include "trace.h"
28 
29 #ifdef CONFIG_DYNAMIC_FTRACE
30 # define FTRACE_ENABLED_INIT 1
31 #else
32 # define FTRACE_ENABLED_INIT 0
33 #endif
34 
35 int ftrace_enabled = FTRACE_ENABLED_INIT;
36 static int last_ftrace_enabled = FTRACE_ENABLED_INIT;
37 
38 static DEFINE_SPINLOCK(ftrace_lock);
39 static DEFINE_MUTEX(ftrace_sysctl_lock);
40 
41 static struct ftrace_ops ftrace_list_end __read_mostly =
42 {
43 	.func = ftrace_stub,
44 };
45 
46 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
47 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
48 
49 /* mcount is defined per arch in assembly */
50 EXPORT_SYMBOL(mcount);
51 
52 notrace void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
53 {
54 	struct ftrace_ops *op = ftrace_list;
55 
56 	/* in case someone actually ports this to alpha! */
57 	read_barrier_depends();
58 
59 	while (op != &ftrace_list_end) {
60 		/* silly alpha */
61 		read_barrier_depends();
62 		op->func(ip, parent_ip);
63 		op = op->next;
64 	};
65 }
66 
67 /**
68  * clear_ftrace_function - reset the ftrace function
69  *
70  * This NULLs the ftrace function and in essence stops
71  * tracing.  There may be lag
72  */
73 void clear_ftrace_function(void)
74 {
75 	ftrace_trace_function = ftrace_stub;
76 }
77 
78 static int notrace __register_ftrace_function(struct ftrace_ops *ops)
79 {
80 	/* Should never be called by interrupts */
81 	spin_lock(&ftrace_lock);
82 
83 	ops->next = ftrace_list;
84 	/*
85 	 * We are entering ops into the ftrace_list but another
86 	 * CPU might be walking that list. We need to make sure
87 	 * the ops->next pointer is valid before another CPU sees
88 	 * the ops pointer included into the ftrace_list.
89 	 */
90 	smp_wmb();
91 	ftrace_list = ops;
92 
93 	if (ftrace_enabled) {
94 		/*
95 		 * For one func, simply call it directly.
96 		 * For more than one func, call the chain.
97 		 */
98 		if (ops->next == &ftrace_list_end)
99 			ftrace_trace_function = ops->func;
100 		else
101 			ftrace_trace_function = ftrace_list_func;
102 	}
103 
104 	spin_unlock(&ftrace_lock);
105 
106 	return 0;
107 }
108 
109 static int notrace __unregister_ftrace_function(struct ftrace_ops *ops)
110 {
111 	struct ftrace_ops **p;
112 	int ret = 0;
113 
114 	spin_lock(&ftrace_lock);
115 
116 	/*
117 	 * If we are removing the last function, then simply point
118 	 * to the ftrace_stub.
119 	 */
120 	if (ftrace_list == ops && ops->next == &ftrace_list_end) {
121 		ftrace_trace_function = ftrace_stub;
122 		ftrace_list = &ftrace_list_end;
123 		goto out;
124 	}
125 
126 	for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
127 		if (*p == ops)
128 			break;
129 
130 	if (*p != ops) {
131 		ret = -1;
132 		goto out;
133 	}
134 
135 	*p = (*p)->next;
136 
137 	if (ftrace_enabled) {
138 		/* If we only have one func left, then call that directly */
139 		if (ftrace_list == &ftrace_list_end ||
140 		    ftrace_list->next == &ftrace_list_end)
141 			ftrace_trace_function = ftrace_list->func;
142 	}
143 
144  out:
145 	spin_unlock(&ftrace_lock);
146 
147 	return ret;
148 }
149 
150 #ifdef CONFIG_DYNAMIC_FTRACE
151 
152 static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
153 
154 static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
155 
156 static DEFINE_SPINLOCK(ftrace_shutdown_lock);
157 static DEFINE_MUTEX(ftraced_lock);
158 
159 static int ftraced_trigger;
160 static int ftraced_suspend;
161 
162 static int ftrace_record_suspend;
163 
164 static inline int
165 notrace ftrace_ip_in_hash(unsigned long ip, unsigned long key)
166 {
167 	struct dyn_ftrace *p;
168 	struct hlist_node *t;
169 	int found = 0;
170 
171 	hlist_for_each_entry(p, t, &ftrace_hash[key], node) {
172 		if (p->ip == ip) {
173 			found = 1;
174 			break;
175 		}
176 	}
177 
178 	return found;
179 }
180 
181 static inline void notrace
182 ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
183 {
184 	hlist_add_head(&node->node, &ftrace_hash[key]);
185 }
186 
187 static void notrace
188 ftrace_record_ip(unsigned long ip, unsigned long parent_ip)
189 {
190 	struct dyn_ftrace *node;
191 	unsigned long flags;
192 	unsigned long key;
193 	int resched;
194 	int atomic;
195 
196 	resched = need_resched();
197 	preempt_disable_notrace();
198 
199 	/* We simply need to protect against recursion */
200 	__get_cpu_var(ftrace_shutdown_disable_cpu)++;
201 	if (__get_cpu_var(ftrace_shutdown_disable_cpu) != 1)
202 		goto out;
203 
204 	if (unlikely(ftrace_record_suspend))
205 		goto out;
206 
207 	key = hash_long(ip, FTRACE_HASHBITS);
208 
209 	WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
210 
211 	if (ftrace_ip_in_hash(ip, key))
212 		goto out;
213 
214 	atomic = irqs_disabled();
215 
216 	spin_lock_irqsave(&ftrace_shutdown_lock, flags);
217 
218 	/* This ip may have hit the hash before the lock */
219 	if (ftrace_ip_in_hash(ip, key))
220 		goto out_unlock;
221 
222 	/*
223 	 * There's a slight race that the ftraced will update the
224 	 * hash and reset here. The arch alloc is responsible
225 	 * for seeing if the IP has already changed, and if
226 	 * it has, the alloc will fail.
227 	 */
228 	node = ftrace_alloc_shutdown_node(ip);
229 	if (!node)
230 		goto out_unlock;
231 
232 	node->ip = ip;
233 
234 	ftrace_add_hash(node, key);
235 
236 	ftraced_trigger = 1;
237 
238  out_unlock:
239 	spin_unlock_irqrestore(&ftrace_shutdown_lock, flags);
240  out:
241 	__get_cpu_var(ftrace_shutdown_disable_cpu)--;
242 
243 	/* prevent recursion with scheduler */
244 	if (resched)
245 		preempt_enable_no_resched_notrace();
246 	else
247 		preempt_enable_notrace();
248 }
249 
250 static struct ftrace_ops ftrace_shutdown_ops __read_mostly =
251 {
252 	.func = ftrace_record_ip,
253 };
254 
255 
256 static int notrace __ftrace_modify_code(void *data)
257 {
258 	void (*func)(void) = data;
259 
260 	func();
261 	return 0;
262 }
263 
264 static void notrace ftrace_run_startup_code(void)
265 {
266 	stop_machine_run(__ftrace_modify_code, ftrace_startup_code, NR_CPUS);
267 }
268 
269 static void notrace ftrace_run_shutdown_code(void)
270 {
271 	stop_machine_run(__ftrace_modify_code, ftrace_shutdown_code, NR_CPUS);
272 }
273 
274 static void notrace ftrace_startup(void)
275 {
276 	mutex_lock(&ftraced_lock);
277 	ftraced_suspend++;
278 	if (ftraced_suspend != 1)
279 		goto out;
280 	__unregister_ftrace_function(&ftrace_shutdown_ops);
281 
282 	if (ftrace_enabled)
283 		ftrace_run_startup_code();
284  out:
285 	mutex_unlock(&ftraced_lock);
286 }
287 
288 static void notrace ftrace_shutdown(void)
289 {
290 	mutex_lock(&ftraced_lock);
291 	ftraced_suspend--;
292 	if (ftraced_suspend)
293 		goto out;
294 
295 	if (ftrace_enabled)
296 		ftrace_run_shutdown_code();
297 
298 	__register_ftrace_function(&ftrace_shutdown_ops);
299  out:
300 	mutex_unlock(&ftraced_lock);
301 }
302 
303 static void notrace ftrace_startup_sysctl(void)
304 {
305 	mutex_lock(&ftraced_lock);
306 	/* ftraced_suspend is true if we want ftrace running */
307 	if (ftraced_suspend)
308 		ftrace_run_startup_code();
309 	mutex_unlock(&ftraced_lock);
310 }
311 
312 static void notrace ftrace_shutdown_sysctl(void)
313 {
314 	mutex_lock(&ftraced_lock);
315 	/* ftraced_suspend is true if ftrace is running */
316 	if (ftraced_suspend)
317 		ftrace_run_shutdown_code();
318 	mutex_unlock(&ftraced_lock);
319 }
320 
321 static cycle_t		ftrace_update_time;
322 static unsigned long	ftrace_update_cnt;
323 unsigned long		ftrace_update_tot_cnt;
324 
325 static int notrace __ftrace_update_code(void *ignore)
326 {
327 	struct dyn_ftrace *p;
328 	struct hlist_head head;
329 	struct hlist_node *t;
330 	cycle_t start, stop;
331 	int i;
332 
333 	/* Don't be calling ftrace ops now */
334 	__unregister_ftrace_function(&ftrace_shutdown_ops);
335 
336 	start = now(raw_smp_processor_id());
337 	ftrace_update_cnt = 0;
338 
339 	/* No locks needed, the machine is stopped! */
340 	for (i = 0; i < FTRACE_HASHSIZE; i++) {
341 		if (hlist_empty(&ftrace_hash[i]))
342 			continue;
343 
344 		head = ftrace_hash[i];
345 		INIT_HLIST_HEAD(&ftrace_hash[i]);
346 
347 		/* all CPUS are stopped, we are safe to modify code */
348 		hlist_for_each_entry(p, t, &head, node) {
349 			ftrace_code_disable(p);
350 			ftrace_update_cnt++;
351 		}
352 
353 	}
354 
355 	stop = now(raw_smp_processor_id());
356 	ftrace_update_time = stop - start;
357 	ftrace_update_tot_cnt += ftrace_update_cnt;
358 
359 	__register_ftrace_function(&ftrace_shutdown_ops);
360 
361 	return 0;
362 }
363 
364 static void notrace ftrace_update_code(void)
365 {
366 	stop_machine_run(__ftrace_update_code, NULL, NR_CPUS);
367 }
368 
369 static int notrace ftraced(void *ignore)
370 {
371 	unsigned long usecs;
372 
373 	set_current_state(TASK_INTERRUPTIBLE);
374 
375 	while (!kthread_should_stop()) {
376 
377 		/* check once a second */
378 		schedule_timeout(HZ);
379 
380 		mutex_lock(&ftrace_sysctl_lock);
381 		mutex_lock(&ftraced_lock);
382 		if (ftrace_enabled && ftraced_trigger && !ftraced_suspend) {
383 			ftrace_record_suspend++;
384 			ftrace_update_code();
385 			usecs = nsecs_to_usecs(ftrace_update_time);
386 			if (ftrace_update_tot_cnt > 100000) {
387 				ftrace_update_tot_cnt = 0;
388 				pr_info("hm, dftrace overflow: %lu change%s"
389 					 " (%lu total) in %lu usec%s\n",
390 					ftrace_update_cnt,
391 					ftrace_update_cnt != 1 ? "s" : "",
392 					ftrace_update_tot_cnt,
393 					usecs, usecs != 1 ? "s" : "");
394 				WARN_ON_ONCE(1);
395 			}
396 			ftraced_trigger = 0;
397 			ftrace_record_suspend--;
398 		}
399 		mutex_unlock(&ftraced_lock);
400 		mutex_unlock(&ftrace_sysctl_lock);
401 
402 		ftrace_shutdown_replenish();
403 
404 		set_current_state(TASK_INTERRUPTIBLE);
405 	}
406 	__set_current_state(TASK_RUNNING);
407 	return 0;
408 }
409 
410 static int __init notrace ftrace_shutdown_init(void)
411 {
412 	struct task_struct *p;
413 	int ret;
414 
415 	ret = ftrace_shutdown_arch_init();
416 	if (ret)
417 		return ret;
418 
419 	p = kthread_run(ftraced, NULL, "ftraced");
420 	if (IS_ERR(p))
421 		return -1;
422 
423 	__register_ftrace_function(&ftrace_shutdown_ops);
424 
425 	return 0;
426 }
427 
428 core_initcall(ftrace_shutdown_init);
429 #else
430 # define ftrace_startup()	  do { } while (0)
431 # define ftrace_shutdown()	  do { } while (0)
432 # define ftrace_startup_sysctl()  do { } while (0)
433 # define ftrace_shutdown_sysctl() do { } while (0)
434 #endif /* CONFIG_DYNAMIC_FTRACE */
435 
436 /**
437  * register_ftrace_function - register a function for profiling
438  * @ops - ops structure that holds the function for profiling.
439  *
440  * Register a function to be called by all functions in the
441  * kernel.
442  *
443  * Note: @ops->func and all the functions it calls must be labeled
444  *       with "notrace", otherwise it will go into a
445  *       recursive loop.
446  */
447 int register_ftrace_function(struct ftrace_ops *ops)
448 {
449 	int ret;
450 
451 	mutex_lock(&ftrace_sysctl_lock);
452 	ftrace_startup();
453 
454 	ret = __register_ftrace_function(ops);
455 	mutex_unlock(&ftrace_sysctl_lock);
456 
457 	return ret;
458 }
459 
460 /**
461  * unregister_ftrace_function - unresgister a function for profiling.
462  * @ops - ops structure that holds the function to unregister
463  *
464  * Unregister a function that was added to be called by ftrace profiling.
465  */
466 int unregister_ftrace_function(struct ftrace_ops *ops)
467 {
468 	int ret;
469 
470 	mutex_lock(&ftrace_sysctl_lock);
471 	ret = __unregister_ftrace_function(ops);
472 
473 	if (ftrace_list == &ftrace_list_end)
474 		ftrace_shutdown();
475 
476 	mutex_unlock(&ftrace_sysctl_lock);
477 
478 	return ret;
479 }
480 
481 notrace int
482 ftrace_enable_sysctl(struct ctl_table *table, int write,
483 		     struct file *filp, void __user *buffer, size_t *lenp,
484 		     loff_t *ppos)
485 {
486 	int ret;
487 
488 	mutex_lock(&ftrace_sysctl_lock);
489 
490 	ret  = proc_dointvec(table, write, filp, buffer, lenp, ppos);
491 
492 	if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
493 		goto out;
494 
495 	last_ftrace_enabled = ftrace_enabled;
496 
497 	if (ftrace_enabled) {
498 
499 		ftrace_startup_sysctl();
500 
501 		/* we are starting ftrace again */
502 		if (ftrace_list != &ftrace_list_end) {
503 			if (ftrace_list->next == &ftrace_list_end)
504 				ftrace_trace_function = ftrace_list->func;
505 			else
506 				ftrace_trace_function = ftrace_list_func;
507 		}
508 
509 	} else {
510 		/* stopping ftrace calls (just send to ftrace_stub) */
511 		ftrace_trace_function = ftrace_stub;
512 
513 		ftrace_shutdown_sysctl();
514 	}
515 
516  out:
517 	mutex_unlock(&ftrace_sysctl_lock);
518 	return ret;
519 }
520