ftrace.c (3d0833953e1b98b79ddf491dd49229eef9baeac1) ftrace.c (b0fc494fae96a7089f3651cb451f461c7291244c)
1/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>

--- 6 unchanged lines hidden (view full) ---

15
16#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
19#include <linux/kthread.h>
20#include <linux/hardirq.h>
21#include <linux/ftrace.h>
22#include <linux/module.h>
1/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>

--- 6 unchanged lines hidden (view full) ---

15
16#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
19#include <linux/kthread.h>
20#include <linux/hardirq.h>
21#include <linux/ftrace.h>
22#include <linux/module.h>
23#include <linux/sysctl.h>
23#include <linux/hash.h>
24#include <linux/list.h>
25
26#include "trace.h"
27
24#include <linux/hash.h>
25#include <linux/list.h>
26
27#include "trace.h"
28
29#ifdef CONFIG_DYNAMIC_FTRACE
30# define FTRACE_ENABLED_INIT 1
31#else
32# define FTRACE_ENABLED_INIT 0
33#endif
34
35int ftrace_enabled = FTRACE_ENABLED_INIT;
36static int last_ftrace_enabled = FTRACE_ENABLED_INIT;
37
28static DEFINE_SPINLOCK(ftrace_lock);
38static DEFINE_SPINLOCK(ftrace_lock);
39static DEFINE_MUTEX(ftrace_sysctl_lock);
40
29static struct ftrace_ops ftrace_list_end __read_mostly =
30{
31 .func = ftrace_stub,
32};
33
34static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
35ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
36

--- 36 unchanged lines hidden (view full) ---

73 * We are entering ops into the ftrace_list but another
74 * CPU might be walking that list. We need to make sure
75 * the ops->next pointer is valid before another CPU sees
76 * the ops pointer included into the ftrace_list.
77 */
78 smp_wmb();
79 ftrace_list = ops;
80
41static struct ftrace_ops ftrace_list_end __read_mostly =
42{
43 .func = ftrace_stub,
44};
45
46static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
47ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
48

--- 36 unchanged lines hidden (view full) ---

85 * We are entering ops into the ftrace_list but another
86 * CPU might be walking that list. We need to make sure
87 * the ops->next pointer is valid before another CPU sees
88 * the ops pointer included into the ftrace_list.
89 */
90 smp_wmb();
91 ftrace_list = ops;
92
81 /*
82 * For one func, simply call it directly.
83 * For more than one func, call the chain.
84 */
85 if (ops->next == &ftrace_list_end)
86 ftrace_trace_function = ops->func;
87 else
88 ftrace_trace_function = ftrace_list_func;
93 if (ftrace_enabled) {
94 /*
95 * For one func, simply call it directly.
96 * For more than one func, call the chain.
97 */
98 if (ops->next == &ftrace_list_end)
99 ftrace_trace_function = ops->func;
100 else
101 ftrace_trace_function = ftrace_list_func;
102 }
89
90 spin_unlock(&ftrace_lock);
91
92 return 0;
93}
94
95static int notrace __unregister_ftrace_function(struct ftrace_ops *ops)
96{

--- 18 unchanged lines hidden (view full) ---

115
116 if (*p != ops) {
117 ret = -1;
118 goto out;
119 }
120
121 *p = (*p)->next;
122
103
104 spin_unlock(&ftrace_lock);
105
106 return 0;
107}
108
109static int notrace __unregister_ftrace_function(struct ftrace_ops *ops)
110{

--- 18 unchanged lines hidden (view full) ---

129
130 if (*p != ops) {
131 ret = -1;
132 goto out;
133 }
134
135 *p = (*p)->next;
136
123 /* If we only have one func left, then call that directly */
124 if (ftrace_list == &ftrace_list_end ||
125 ftrace_list->next == &ftrace_list_end)
126 ftrace_trace_function = ftrace_list->func;
137 if (ftrace_enabled) {
138 /* If we only have one func left, then call that directly */
139 if (ftrace_list == &ftrace_list_end ||
140 ftrace_list->next == &ftrace_list_end)
141 ftrace_trace_function = ftrace_list->func;
142 }
127
128 out:
129 spin_unlock(&ftrace_lock);
130
131 return ret;
132}
133
134#ifdef CONFIG_DYNAMIC_FTRACE

--- 123 unchanged lines hidden (view full) ---

258static void notrace ftrace_startup(void)
259{
260 mutex_lock(&ftraced_lock);
261 ftraced_suspend++;
262 if (ftraced_suspend != 1)
263 goto out;
264 __unregister_ftrace_function(&ftrace_shutdown_ops);
265
143
144 out:
145 spin_unlock(&ftrace_lock);
146
147 return ret;
148}
149
150#ifdef CONFIG_DYNAMIC_FTRACE

--- 123 unchanged lines hidden (view full) ---

274static void notrace ftrace_startup(void)
275{
276 mutex_lock(&ftraced_lock);
277 ftraced_suspend++;
278 if (ftraced_suspend != 1)
279 goto out;
280 __unregister_ftrace_function(&ftrace_shutdown_ops);
281
266 ftrace_run_startup_code();
282 if (ftrace_enabled)
283 ftrace_run_startup_code();
267 out:
268 mutex_unlock(&ftraced_lock);
269}
270
271static void notrace ftrace_shutdown(void)
272{
273 mutex_lock(&ftraced_lock);
274 ftraced_suspend--;
275 if (ftraced_suspend)
276 goto out;
277
284 out:
285 mutex_unlock(&ftraced_lock);
286}
287
288static void notrace ftrace_shutdown(void)
289{
290 mutex_lock(&ftraced_lock);
291 ftraced_suspend--;
292 if (ftraced_suspend)
293 goto out;
294
278 ftrace_run_shutdown_code();
295 if (ftrace_enabled)
296 ftrace_run_shutdown_code();
279
280 __register_ftrace_function(&ftrace_shutdown_ops);
281 out:
282 mutex_unlock(&ftraced_lock);
283}
284
297
298 __register_ftrace_function(&ftrace_shutdown_ops);
299 out:
300 mutex_unlock(&ftraced_lock);
301}
302
303static void notrace ftrace_startup_sysctl(void)
304{
305 mutex_lock(&ftraced_lock);
306 /* ftraced_suspend is true if we want ftrace running */
307 if (ftraced_suspend)
308 ftrace_run_startup_code();
309 mutex_unlock(&ftraced_lock);
310}
311
312static void notrace ftrace_shutdown_sysctl(void)
313{
314 mutex_lock(&ftraced_lock);
315 /* ftraced_suspend is true if ftrace is running */
316 if (ftraced_suspend)
317 ftrace_run_shutdown_code();
318 mutex_unlock(&ftraced_lock);
319}
320
285static cycle_t ftrace_update_time;
286static unsigned long ftrace_update_cnt;
287unsigned long ftrace_update_tot_cnt;
288
289static int notrace __ftrace_update_code(void *ignore)
290{
291 struct dyn_ftrace *p;
292 struct hlist_head head;

--- 43 unchanged lines hidden (view full) ---

336
337 set_current_state(TASK_INTERRUPTIBLE);
338
339 while (!kthread_should_stop()) {
340
341 /* check once a second */
342 schedule_timeout(HZ);
343
321static cycle_t ftrace_update_time;
322static unsigned long ftrace_update_cnt;
323unsigned long ftrace_update_tot_cnt;
324
325static int notrace __ftrace_update_code(void *ignore)
326{
327 struct dyn_ftrace *p;
328 struct hlist_head head;

--- 43 unchanged lines hidden (view full) ---

372
373 set_current_state(TASK_INTERRUPTIBLE);
374
375 while (!kthread_should_stop()) {
376
377 /* check once a second */
378 schedule_timeout(HZ);
379
380 mutex_lock(&ftrace_sysctl_lock);
344 mutex_lock(&ftraced_lock);
381 mutex_lock(&ftraced_lock);
345 if (ftraced_trigger && !ftraced_suspend) {
382 if (ftrace_enabled && ftraced_trigger && !ftraced_suspend) {
346 ftrace_record_suspend++;
347 ftrace_update_code();
348 usecs = nsecs_to_usecs(ftrace_update_time);
349 if (ftrace_update_tot_cnt > 100000) {
350 ftrace_update_tot_cnt = 0;
351 pr_info("hm, dftrace overflow: %lu change%s"
352 " (%lu total) in %lu usec%s\n",
353 ftrace_update_cnt,
354 ftrace_update_cnt != 1 ? "s" : "",
355 ftrace_update_tot_cnt,
356 usecs, usecs != 1 ? "s" : "");
357 WARN_ON_ONCE(1);
358 }
359 ftraced_trigger = 0;
360 ftrace_record_suspend--;
361 }
362 mutex_unlock(&ftraced_lock);
383 ftrace_record_suspend++;
384 ftrace_update_code();
385 usecs = nsecs_to_usecs(ftrace_update_time);
386 if (ftrace_update_tot_cnt > 100000) {
387 ftrace_update_tot_cnt = 0;
388 pr_info("hm, dftrace overflow: %lu change%s"
389 " (%lu total) in %lu usec%s\n",
390 ftrace_update_cnt,
391 ftrace_update_cnt != 1 ? "s" : "",
392 ftrace_update_tot_cnt,
393 usecs, usecs != 1 ? "s" : "");
394 WARN_ON_ONCE(1);
395 }
396 ftraced_trigger = 0;
397 ftrace_record_suspend--;
398 }
399 mutex_unlock(&ftraced_lock);
400 mutex_unlock(&ftrace_sysctl_lock);
363
364 ftrace_shutdown_replenish();
365
366 set_current_state(TASK_INTERRUPTIBLE);
367 }
368 __set_current_state(TASK_RUNNING);
369 return 0;
370}

--- 13 unchanged lines hidden (view full) ---

384
385 __register_ftrace_function(&ftrace_shutdown_ops);
386
387 return 0;
388}
389
390core_initcall(ftrace_shutdown_init);
391#else
401
402 ftrace_shutdown_replenish();
403
404 set_current_state(TASK_INTERRUPTIBLE);
405 }
406 __set_current_state(TASK_RUNNING);
407 return 0;
408}

--- 13 unchanged lines hidden (view full) ---

422
423 __register_ftrace_function(&ftrace_shutdown_ops);
424
425 return 0;
426}
427
428core_initcall(ftrace_shutdown_init);
429#else
392# define ftrace_startup() do { } while (0)
393# define ftrace_shutdown() do { } while (0)
430# define ftrace_startup() do { } while (0)
431# define ftrace_shutdown() do { } while (0)
432# define ftrace_startup_sysctl() do { } while (0)
433# define ftrace_shutdown_sysctl() do { } while (0)
394#endif /* CONFIG_DYNAMIC_FTRACE */
395
396/**
397 * register_ftrace_function - register a function for profiling
398 * @ops - ops structure that holds the function for profiling.
399 *
400 * Register a function to be called by all functions in the
401 * kernel.
402 *
403 * Note: @ops->func and all the functions it calls must be labeled
404 * with "notrace", otherwise it will go into a
405 * recursive loop.
406 */
407int register_ftrace_function(struct ftrace_ops *ops)
408{
434#endif /* CONFIG_DYNAMIC_FTRACE */
435
436/**
437 * register_ftrace_function - register a function for profiling
438 * @ops - ops structure that holds the function for profiling.
439 *
440 * Register a function to be called by all functions in the
441 * kernel.
442 *
443 * Note: @ops->func and all the functions it calls must be labeled
444 * with "notrace", otherwise it will go into a
445 * recursive loop.
446 */
447int register_ftrace_function(struct ftrace_ops *ops)
448{
449 int ret;
450
451 mutex_lock(&ftrace_sysctl_lock);
409 ftrace_startup();
410
452 ftrace_startup();
453
411 return __register_ftrace_function(ops);
454 ret = __register_ftrace_function(ops);
455 mutex_unlock(&ftrace_sysctl_lock);
456
457 return ret;
412}
413
414/**
415 * unregister_ftrace_function - unresgister a function for profiling.
416 * @ops - ops structure that holds the function to unregister
417 *
418 * Unregister a function that was added to be called by ftrace profiling.
419 */
420int unregister_ftrace_function(struct ftrace_ops *ops)
421{
422 int ret;
423
458}
459
460/**
461 * unregister_ftrace_function - unresgister a function for profiling.
462 * @ops - ops structure that holds the function to unregister
463 *
464 * Unregister a function that was added to be called by ftrace profiling.
465 */
466int unregister_ftrace_function(struct ftrace_ops *ops)
467{
468 int ret;
469
470 mutex_lock(&ftrace_sysctl_lock);
424 ret = __unregister_ftrace_function(ops);
425
426 if (ftrace_list == &ftrace_list_end)
427 ftrace_shutdown();
428
471 ret = __unregister_ftrace_function(ops);
472
473 if (ftrace_list == &ftrace_list_end)
474 ftrace_shutdown();
475
476 mutex_unlock(&ftrace_sysctl_lock);
477
429 return ret;
430}
478 return ret;
479}
480
481notrace int
482ftrace_enable_sysctl(struct ctl_table *table, int write,
483 struct file *filp, void __user *buffer, size_t *lenp,
484 loff_t *ppos)
485{
486 int ret;
487
488 mutex_lock(&ftrace_sysctl_lock);
489
490 ret = proc_dointvec(table, write, filp, buffer, lenp, ppos);
491
492 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
493 goto out;
494
495 last_ftrace_enabled = ftrace_enabled;
496
497 if (ftrace_enabled) {
498
499 ftrace_startup_sysctl();
500
501 /* we are starting ftrace again */
502 if (ftrace_list != &ftrace_list_end) {
503 if (ftrace_list->next == &ftrace_list_end)
504 ftrace_trace_function = ftrace_list->func;
505 else
506 ftrace_trace_function = ftrace_list_func;
507 }
508
509 } else {
510 /* stopping ftrace calls (just send to ftrace_stub) */
511 ftrace_trace_function = ftrace_stub;
512
513 ftrace_shutdown_sysctl();
514 }
515
516 out:
517 mutex_unlock(&ftrace_sysctl_lock);
518 return ret;
519}