1 2 #ifndef _LINUX_KERNEL_TRACE_H 3 #define _LINUX_KERNEL_TRACE_H 4 5 #include <linux/fs.h> 6 #include <linux/atomic.h> 7 #include <linux/sched.h> 8 #include <linux/clocksource.h> 9 #include <linux/ring_buffer.h> 10 #include <linux/mmiotrace.h> 11 #include <linux/tracepoint.h> 12 #include <linux/ftrace.h> 13 #include <linux/hw_breakpoint.h> 14 #include <linux/trace_seq.h> 15 #include <linux/ftrace_event.h> 16 #include <linux/compiler.h> 17 18 #ifdef CONFIG_FTRACE_SYSCALLS 19 #include <asm/unistd.h> /* For NR_SYSCALLS */ 20 #include <asm/syscall.h> /* some archs define it here */ 21 #endif 22 23 enum trace_type { 24 __TRACE_FIRST_TYPE = 0, 25 26 TRACE_FN, 27 TRACE_CTX, 28 TRACE_WAKE, 29 TRACE_STACK, 30 TRACE_PRINT, 31 TRACE_BPRINT, 32 TRACE_MMIO_RW, 33 TRACE_MMIO_MAP, 34 TRACE_BRANCH, 35 TRACE_GRAPH_RET, 36 TRACE_GRAPH_ENT, 37 TRACE_USER_STACK, 38 TRACE_BLK, 39 TRACE_BPUTS, 40 41 __TRACE_LAST_TYPE, 42 }; 43 44 45 #undef __field 46 #define __field(type, item) type item; 47 48 #undef __field_struct 49 #define __field_struct(type, item) __field(type, item) 50 51 #undef __field_desc 52 #define __field_desc(type, container, item) 53 54 #undef __array 55 #define __array(type, item, size) type item[size]; 56 57 #undef __array_desc 58 #define __array_desc(type, container, item, size) 59 60 #undef __dynamic_array 61 #define __dynamic_array(type, item) type item[]; 62 63 #undef F_STRUCT 64 #define F_STRUCT(args...) args 65 66 #undef FTRACE_ENTRY 67 #define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \ 68 struct struct_name { \ 69 struct trace_entry ent; \ 70 tstruct \ 71 } 72 73 #undef TP_ARGS 74 #define TP_ARGS(args...) args 75 76 #undef FTRACE_ENTRY_DUP 77 #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter) 78 79 #undef FTRACE_ENTRY_REG 80 #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, \ 81 filter, regfn) \ 82 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \ 83 filter) 84 85 #include "trace_entries.h" 86 87 /* 88 * syscalls are special, and need special handling, this is why 89 * they are not included in trace_entries.h 90 */ 91 struct syscall_trace_enter { 92 struct trace_entry ent; 93 int nr; 94 unsigned long args[]; 95 }; 96 97 struct syscall_trace_exit { 98 struct trace_entry ent; 99 int nr; 100 long ret; 101 }; 102 103 struct kprobe_trace_entry_head { 104 struct trace_entry ent; 105 unsigned long ip; 106 }; 107 108 struct kretprobe_trace_entry_head { 109 struct trace_entry ent; 110 unsigned long func; 111 unsigned long ret_ip; 112 }; 113 114 /* 115 * trace_flag_type is an enumeration that holds different 116 * states when a trace occurs. These are: 117 * IRQS_OFF - interrupts were disabled 118 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags 119 * NEED_RESCHED - reschedule is requested 120 * HARDIRQ - inside an interrupt handler 121 * SOFTIRQ - inside a softirq handler 122 */ 123 enum trace_flag_type { 124 TRACE_FLAG_IRQS_OFF = 0x01, 125 TRACE_FLAG_IRQS_NOSUPPORT = 0x02, 126 TRACE_FLAG_NEED_RESCHED = 0x04, 127 TRACE_FLAG_HARDIRQ = 0x08, 128 TRACE_FLAG_SOFTIRQ = 0x10, 129 TRACE_FLAG_PREEMPT_RESCHED = 0x20, 130 }; 131 132 #define TRACE_BUF_SIZE 1024 133 134 struct trace_array; 135 136 /* 137 * The CPU trace array - it consists of thousands of trace entries 138 * plus some other descriptor data: (for example which task started 139 * the trace, etc.) 140 */ 141 struct trace_array_cpu { 142 atomic_t disabled; 143 void *buffer_page; /* ring buffer spare */ 144 145 unsigned long entries; 146 unsigned long saved_latency; 147 unsigned long critical_start; 148 unsigned long critical_end; 149 unsigned long critical_sequence; 150 unsigned long nice; 151 unsigned long policy; 152 unsigned long rt_priority; 153 unsigned long skipped_entries; 154 cycle_t preempt_timestamp; 155 pid_t pid; 156 kuid_t uid; 157 char comm[TASK_COMM_LEN]; 158 }; 159 160 struct tracer; 161 162 struct trace_buffer { 163 struct trace_array *tr; 164 struct ring_buffer *buffer; 165 struct trace_array_cpu __percpu *data; 166 cycle_t time_start; 167 int cpu; 168 }; 169 170 /* 171 * The trace array - an array of per-CPU trace arrays. This is the 172 * highest level data structure that individual tracers deal with. 173 * They have on/off state as well: 174 */ 175 struct trace_array { 176 struct list_head list; 177 char *name; 178 struct trace_buffer trace_buffer; 179 #ifdef CONFIG_TRACER_MAX_TRACE 180 /* 181 * The max_buffer is used to snapshot the trace when a maximum 182 * latency is reached, or when the user initiates a snapshot. 183 * Some tracers will use this to store a maximum trace while 184 * it continues examining live traces. 185 * 186 * The buffers for the max_buffer are set up the same as the trace_buffer 187 * When a snapshot is taken, the buffer of the max_buffer is swapped 188 * with the buffer of the trace_buffer and the buffers are reset for 189 * the trace_buffer so the tracing can continue. 190 */ 191 struct trace_buffer max_buffer; 192 bool allocated_snapshot; 193 #endif 194 int buffer_disabled; 195 #ifdef CONFIG_FTRACE_SYSCALLS 196 int sys_refcount_enter; 197 int sys_refcount_exit; 198 struct ftrace_event_file __rcu *enter_syscall_files[NR_syscalls]; 199 struct ftrace_event_file __rcu *exit_syscall_files[NR_syscalls]; 200 #endif 201 int stop_count; 202 int clock_id; 203 struct tracer *current_trace; 204 unsigned int flags; 205 raw_spinlock_t start_lock; 206 struct dentry *dir; 207 struct dentry *options; 208 struct dentry *percpu_dir; 209 struct dentry *event_dir; 210 struct list_head systems; 211 struct list_head events; 212 cpumask_var_t tracing_cpumask; /* only trace on set CPUs */ 213 int ref; 214 #ifdef CONFIG_FUNCTION_TRACER 215 struct ftrace_ops *ops; 216 /* function tracing enabled */ 217 int function_enabled; 218 #endif 219 }; 220 221 enum { 222 TRACE_ARRAY_FL_GLOBAL = (1 << 0) 223 }; 224 225 extern struct list_head ftrace_trace_arrays; 226 227 extern struct mutex trace_types_lock; 228 229 extern int trace_array_get(struct trace_array *tr); 230 extern void trace_array_put(struct trace_array *tr); 231 232 /* 233 * The global tracer (top) should be the first trace array added, 234 * but we check the flag anyway. 235 */ 236 static inline struct trace_array *top_trace_array(void) 237 { 238 struct trace_array *tr; 239 240 tr = list_entry(ftrace_trace_arrays.prev, 241 typeof(*tr), list); 242 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL)); 243 return tr; 244 } 245 246 #define FTRACE_CMP_TYPE(var, type) \ 247 __builtin_types_compatible_p(typeof(var), type *) 248 249 #undef IF_ASSIGN 250 #define IF_ASSIGN(var, entry, etype, id) \ 251 if (FTRACE_CMP_TYPE(var, etype)) { \ 252 var = (typeof(var))(entry); \ 253 WARN_ON(id && (entry)->type != id); \ 254 break; \ 255 } 256 257 /* Will cause compile errors if type is not found. */ 258 extern void __ftrace_bad_type(void); 259 260 /* 261 * The trace_assign_type is a verifier that the entry type is 262 * the same as the type being assigned. To add new types simply 263 * add a line with the following format: 264 * 265 * IF_ASSIGN(var, ent, type, id); 266 * 267 * Where "type" is the trace type that includes the trace_entry 268 * as the "ent" item. And "id" is the trace identifier that is 269 * used in the trace_type enum. 270 * 271 * If the type can have more than one id, then use zero. 272 */ 273 #define trace_assign_type(var, ent) \ 274 do { \ 275 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \ 276 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \ 277 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ 278 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ 279 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ 280 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \ 281 IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \ 282 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ 283 TRACE_MMIO_RW); \ 284 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ 285 TRACE_MMIO_MAP); \ 286 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \ 287 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \ 288 TRACE_GRAPH_ENT); \ 289 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ 290 TRACE_GRAPH_RET); \ 291 __ftrace_bad_type(); \ 292 } while (0) 293 294 /* 295 * An option specific to a tracer. This is a boolean value. 296 * The bit is the bit index that sets its value on the 297 * flags value in struct tracer_flags. 298 */ 299 struct tracer_opt { 300 const char *name; /* Will appear on the trace_options file */ 301 u32 bit; /* Mask assigned in val field in tracer_flags */ 302 }; 303 304 /* 305 * The set of specific options for a tracer. Your tracer 306 * have to set the initial value of the flags val. 307 */ 308 struct tracer_flags { 309 u32 val; 310 struct tracer_opt *opts; 311 }; 312 313 /* Makes more easy to define a tracer opt */ 314 #define TRACER_OPT(s, b) .name = #s, .bit = b 315 316 317 /** 318 * struct tracer - a specific tracer and its callbacks to interact with debugfs 319 * @name: the name chosen to select it on the available_tracers file 320 * @init: called when one switches to this tracer (echo name > current_tracer) 321 * @reset: called when one switches to another tracer 322 * @start: called when tracing is unpaused (echo 1 > tracing_enabled) 323 * @stop: called when tracing is paused (echo 0 > tracing_enabled) 324 * @open: called when the trace file is opened 325 * @pipe_open: called when the trace_pipe file is opened 326 * @wait_pipe: override how the user waits for traces on trace_pipe 327 * @close: called when the trace file is released 328 * @pipe_close: called when the trace_pipe file is released 329 * @read: override the default read callback on trace_pipe 330 * @splice_read: override the default splice_read callback on trace_pipe 331 * @selftest: selftest to run on boot (see trace_selftest.c) 332 * @print_headers: override the first lines that describe your columns 333 * @print_line: callback that prints a trace 334 * @set_flag: signals one of your private flags changed (trace_options file) 335 * @flags: your private flags 336 */ 337 struct tracer { 338 const char *name; 339 int (*init)(struct trace_array *tr); 340 void (*reset)(struct trace_array *tr); 341 void (*start)(struct trace_array *tr); 342 void (*stop)(struct trace_array *tr); 343 void (*open)(struct trace_iterator *iter); 344 void (*pipe_open)(struct trace_iterator *iter); 345 void (*wait_pipe)(struct trace_iterator *iter); 346 void (*close)(struct trace_iterator *iter); 347 void (*pipe_close)(struct trace_iterator *iter); 348 ssize_t (*read)(struct trace_iterator *iter, 349 struct file *filp, char __user *ubuf, 350 size_t cnt, loff_t *ppos); 351 ssize_t (*splice_read)(struct trace_iterator *iter, 352 struct file *filp, 353 loff_t *ppos, 354 struct pipe_inode_info *pipe, 355 size_t len, 356 unsigned int flags); 357 #ifdef CONFIG_FTRACE_STARTUP_TEST 358 int (*selftest)(struct tracer *trace, 359 struct trace_array *tr); 360 #endif 361 void (*print_header)(struct seq_file *m); 362 enum print_line_t (*print_line)(struct trace_iterator *iter); 363 /* If you handled the flag setting, return 0 */ 364 int (*set_flag)(struct trace_array *tr, 365 u32 old_flags, u32 bit, int set); 366 /* Return 0 if OK with change, else return non-zero */ 367 int (*flag_changed)(struct trace_array *tr, 368 u32 mask, int set); 369 struct tracer *next; 370 struct tracer_flags *flags; 371 int enabled; 372 bool print_max; 373 bool allow_instances; 374 #ifdef CONFIG_TRACER_MAX_TRACE 375 bool use_max_tr; 376 #endif 377 }; 378 379 380 /* Only current can touch trace_recursion */ 381 382 /* 383 * For function tracing recursion: 384 * The order of these bits are important. 385 * 386 * When function tracing occurs, the following steps are made: 387 * If arch does not support a ftrace feature: 388 * call internal function (uses INTERNAL bits) which calls... 389 * If callback is registered to the "global" list, the list 390 * function is called and recursion checks the GLOBAL bits. 391 * then this function calls... 392 * The function callback, which can use the FTRACE bits to 393 * check for recursion. 394 * 395 * Now if the arch does not suppport a feature, and it calls 396 * the global list function which calls the ftrace callback 397 * all three of these steps will do a recursion protection. 398 * There's no reason to do one if the previous caller already 399 * did. The recursion that we are protecting against will 400 * go through the same steps again. 401 * 402 * To prevent the multiple recursion checks, if a recursion 403 * bit is set that is higher than the MAX bit of the current 404 * check, then we know that the check was made by the previous 405 * caller, and we can skip the current check. 406 */ 407 enum { 408 TRACE_BUFFER_BIT, 409 TRACE_BUFFER_NMI_BIT, 410 TRACE_BUFFER_IRQ_BIT, 411 TRACE_BUFFER_SIRQ_BIT, 412 413 /* Start of function recursion bits */ 414 TRACE_FTRACE_BIT, 415 TRACE_FTRACE_NMI_BIT, 416 TRACE_FTRACE_IRQ_BIT, 417 TRACE_FTRACE_SIRQ_BIT, 418 419 /* GLOBAL_BITs must be greater than FTRACE_BITs */ 420 TRACE_GLOBAL_BIT, 421 TRACE_GLOBAL_NMI_BIT, 422 TRACE_GLOBAL_IRQ_BIT, 423 TRACE_GLOBAL_SIRQ_BIT, 424 425 /* INTERNAL_BITs must be greater than GLOBAL_BITs */ 426 TRACE_INTERNAL_BIT, 427 TRACE_INTERNAL_NMI_BIT, 428 TRACE_INTERNAL_IRQ_BIT, 429 TRACE_INTERNAL_SIRQ_BIT, 430 431 TRACE_CONTROL_BIT, 432 433 /* 434 * Abuse of the trace_recursion. 435 * As we need a way to maintain state if we are tracing the function 436 * graph in irq because we want to trace a particular function that 437 * was called in irq context but we have irq tracing off. Since this 438 * can only be modified by current, we can reuse trace_recursion. 439 */ 440 TRACE_IRQ_BIT, 441 }; 442 443 #define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0) 444 #define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0) 445 #define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit))) 446 447 #define TRACE_CONTEXT_BITS 4 448 449 #define TRACE_FTRACE_START TRACE_FTRACE_BIT 450 #define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1) 451 452 #define TRACE_GLOBAL_START TRACE_GLOBAL_BIT 453 #define TRACE_GLOBAL_MAX ((1 << (TRACE_GLOBAL_START + TRACE_CONTEXT_BITS)) - 1) 454 455 #define TRACE_LIST_START TRACE_INTERNAL_BIT 456 #define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1) 457 458 #define TRACE_CONTEXT_MASK TRACE_LIST_MAX 459 460 static __always_inline int trace_get_context_bit(void) 461 { 462 int bit; 463 464 if (in_interrupt()) { 465 if (in_nmi()) 466 bit = 0; 467 468 else if (in_irq()) 469 bit = 1; 470 else 471 bit = 2; 472 } else 473 bit = 3; 474 475 return bit; 476 } 477 478 static __always_inline int trace_test_and_set_recursion(int start, int max) 479 { 480 unsigned int val = current->trace_recursion; 481 int bit; 482 483 /* A previous recursion check was made */ 484 if ((val & TRACE_CONTEXT_MASK) > max) 485 return 0; 486 487 bit = trace_get_context_bit() + start; 488 if (unlikely(val & (1 << bit))) 489 return -1; 490 491 val |= 1 << bit; 492 current->trace_recursion = val; 493 barrier(); 494 495 return bit; 496 } 497 498 static __always_inline void trace_clear_recursion(int bit) 499 { 500 unsigned int val = current->trace_recursion; 501 502 if (!bit) 503 return; 504 505 bit = 1 << bit; 506 val &= ~bit; 507 508 barrier(); 509 current->trace_recursion = val; 510 } 511 512 static inline struct ring_buffer_iter * 513 trace_buffer_iter(struct trace_iterator *iter, int cpu) 514 { 515 if (iter->buffer_iter && iter->buffer_iter[cpu]) 516 return iter->buffer_iter[cpu]; 517 return NULL; 518 } 519 520 int tracer_init(struct tracer *t, struct trace_array *tr); 521 int tracing_is_enabled(void); 522 void tracing_reset(struct trace_buffer *buf, int cpu); 523 void tracing_reset_online_cpus(struct trace_buffer *buf); 524 void tracing_reset_current(int cpu); 525 void tracing_reset_all_online_cpus(void); 526 int tracing_open_generic(struct inode *inode, struct file *filp); 527 bool tracing_is_disabled(void); 528 struct dentry *trace_create_file(const char *name, 529 umode_t mode, 530 struct dentry *parent, 531 void *data, 532 const struct file_operations *fops); 533 534 struct dentry *tracing_init_dentry_tr(struct trace_array *tr); 535 struct dentry *tracing_init_dentry(void); 536 537 struct ring_buffer_event; 538 539 struct ring_buffer_event * 540 trace_buffer_lock_reserve(struct ring_buffer *buffer, 541 int type, 542 unsigned long len, 543 unsigned long flags, 544 int pc); 545 546 struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, 547 struct trace_array_cpu *data); 548 549 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, 550 int *ent_cpu, u64 *ent_ts); 551 552 void __buffer_unlock_commit(struct ring_buffer *buffer, 553 struct ring_buffer_event *event); 554 555 int trace_empty(struct trace_iterator *iter); 556 557 void *trace_find_next_entry_inc(struct trace_iterator *iter); 558 559 void trace_init_global_iter(struct trace_iterator *iter); 560 561 void tracing_iter_reset(struct trace_iterator *iter, int cpu); 562 563 void poll_wait_pipe(struct trace_iterator *iter); 564 565 void tracing_sched_switch_trace(struct trace_array *tr, 566 struct task_struct *prev, 567 struct task_struct *next, 568 unsigned long flags, int pc); 569 570 void tracing_sched_wakeup_trace(struct trace_array *tr, 571 struct task_struct *wakee, 572 struct task_struct *cur, 573 unsigned long flags, int pc); 574 void trace_function(struct trace_array *tr, 575 unsigned long ip, 576 unsigned long parent_ip, 577 unsigned long flags, int pc); 578 void trace_graph_function(struct trace_array *tr, 579 unsigned long ip, 580 unsigned long parent_ip, 581 unsigned long flags, int pc); 582 void trace_latency_header(struct seq_file *m); 583 void trace_default_header(struct seq_file *m); 584 void print_trace_header(struct seq_file *m, struct trace_iterator *iter); 585 int trace_empty(struct trace_iterator *iter); 586 587 void trace_graph_return(struct ftrace_graph_ret *trace); 588 int trace_graph_entry(struct ftrace_graph_ent *trace); 589 void set_graph_array(struct trace_array *tr); 590 591 void tracing_start_cmdline_record(void); 592 void tracing_stop_cmdline_record(void); 593 void tracing_sched_switch_assign_trace(struct trace_array *tr); 594 void tracing_stop_sched_switch_record(void); 595 void tracing_start_sched_switch_record(void); 596 int register_tracer(struct tracer *type); 597 int is_tracing_stopped(void); 598 599 loff_t tracing_lseek(struct file *file, loff_t offset, int whence); 600 601 extern cpumask_var_t __read_mostly tracing_buffer_mask; 602 603 #define for_each_tracing_cpu(cpu) \ 604 for_each_cpu(cpu, tracing_buffer_mask) 605 606 extern unsigned long nsecs_to_usecs(unsigned long nsecs); 607 608 extern unsigned long tracing_thresh; 609 610 #ifdef CONFIG_TRACER_MAX_TRACE 611 extern unsigned long tracing_max_latency; 612 613 void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu); 614 void update_max_tr_single(struct trace_array *tr, 615 struct task_struct *tsk, int cpu); 616 #endif /* CONFIG_TRACER_MAX_TRACE */ 617 618 #ifdef CONFIG_STACKTRACE 619 void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags, 620 int skip, int pc); 621 622 void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags, 623 int skip, int pc, struct pt_regs *regs); 624 625 void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, 626 int pc); 627 628 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, 629 int pc); 630 #else 631 static inline void ftrace_trace_stack(struct ring_buffer *buffer, 632 unsigned long flags, int skip, int pc) 633 { 634 } 635 636 static inline void ftrace_trace_stack_regs(struct ring_buffer *buffer, 637 unsigned long flags, int skip, 638 int pc, struct pt_regs *regs) 639 { 640 } 641 642 static inline void ftrace_trace_userstack(struct ring_buffer *buffer, 643 unsigned long flags, int pc) 644 { 645 } 646 647 static inline void __trace_stack(struct trace_array *tr, unsigned long flags, 648 int skip, int pc) 649 { 650 } 651 #endif /* CONFIG_STACKTRACE */ 652 653 extern cycle_t ftrace_now(int cpu); 654 655 extern void trace_find_cmdline(int pid, char comm[]); 656 657 #ifdef CONFIG_DYNAMIC_FTRACE 658 extern unsigned long ftrace_update_tot_cnt; 659 #endif 660 #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func 661 extern int DYN_FTRACE_TEST_NAME(void); 662 #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2 663 extern int DYN_FTRACE_TEST_NAME2(void); 664 665 extern bool ring_buffer_expanded; 666 extern bool tracing_selftest_disabled; 667 DECLARE_PER_CPU(int, ftrace_cpu_disabled); 668 669 #ifdef CONFIG_FTRACE_STARTUP_TEST 670 extern int trace_selftest_startup_function(struct tracer *trace, 671 struct trace_array *tr); 672 extern int trace_selftest_startup_function_graph(struct tracer *trace, 673 struct trace_array *tr); 674 extern int trace_selftest_startup_irqsoff(struct tracer *trace, 675 struct trace_array *tr); 676 extern int trace_selftest_startup_preemptoff(struct tracer *trace, 677 struct trace_array *tr); 678 extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace, 679 struct trace_array *tr); 680 extern int trace_selftest_startup_wakeup(struct tracer *trace, 681 struct trace_array *tr); 682 extern int trace_selftest_startup_nop(struct tracer *trace, 683 struct trace_array *tr); 684 extern int trace_selftest_startup_sched_switch(struct tracer *trace, 685 struct trace_array *tr); 686 extern int trace_selftest_startup_branch(struct tracer *trace, 687 struct trace_array *tr); 688 /* 689 * Tracer data references selftest functions that only occur 690 * on boot up. These can be __init functions. Thus, when selftests 691 * are enabled, then the tracers need to reference __init functions. 692 */ 693 #define __tracer_data __refdata 694 #else 695 /* Tracers are seldom changed. Optimize when selftests are disabled. */ 696 #define __tracer_data __read_mostly 697 #endif /* CONFIG_FTRACE_STARTUP_TEST */ 698 699 extern void *head_page(struct trace_array_cpu *data); 700 extern unsigned long long ns2usecs(cycle_t nsec); 701 extern int 702 trace_vbprintk(unsigned long ip, const char *fmt, va_list args); 703 extern int 704 trace_vprintk(unsigned long ip, const char *fmt, va_list args); 705 extern int 706 trace_array_vprintk(struct trace_array *tr, 707 unsigned long ip, const char *fmt, va_list args); 708 int trace_array_printk(struct trace_array *tr, 709 unsigned long ip, const char *fmt, ...); 710 int trace_array_printk_buf(struct ring_buffer *buffer, 711 unsigned long ip, const char *fmt, ...); 712 void trace_printk_seq(struct trace_seq *s); 713 enum print_line_t print_trace_line(struct trace_iterator *iter); 714 715 extern unsigned long trace_flags; 716 717 /* Standard output formatting function used for function return traces */ 718 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 719 720 /* Flag options */ 721 #define TRACE_GRAPH_PRINT_OVERRUN 0x1 722 #define TRACE_GRAPH_PRINT_CPU 0x2 723 #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 724 #define TRACE_GRAPH_PRINT_PROC 0x8 725 #define TRACE_GRAPH_PRINT_DURATION 0x10 726 #define TRACE_GRAPH_PRINT_ABS_TIME 0x20 727 #define TRACE_GRAPH_PRINT_FILL_SHIFT 28 728 #define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT) 729 730 extern enum print_line_t 731 print_graph_function_flags(struct trace_iterator *iter, u32 flags); 732 extern void print_graph_headers_flags(struct seq_file *s, u32 flags); 733 extern enum print_line_t 734 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s); 735 extern void graph_trace_open(struct trace_iterator *iter); 736 extern void graph_trace_close(struct trace_iterator *iter); 737 extern int __trace_graph_entry(struct trace_array *tr, 738 struct ftrace_graph_ent *trace, 739 unsigned long flags, int pc); 740 extern void __trace_graph_return(struct trace_array *tr, 741 struct ftrace_graph_ret *trace, 742 unsigned long flags, int pc); 743 744 745 #ifdef CONFIG_DYNAMIC_FTRACE 746 /* TODO: make this variable */ 747 #define FTRACE_GRAPH_MAX_FUNCS 32 748 extern int ftrace_graph_count; 749 extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS]; 750 extern int ftrace_graph_notrace_count; 751 extern unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS]; 752 753 static inline int ftrace_graph_addr(unsigned long addr) 754 { 755 int i; 756 757 if (!ftrace_graph_count) 758 return 1; 759 760 for (i = 0; i < ftrace_graph_count; i++) { 761 if (addr == ftrace_graph_funcs[i]) { 762 /* 763 * If no irqs are to be traced, but a set_graph_function 764 * is set, and called by an interrupt handler, we still 765 * want to trace it. 766 */ 767 if (in_irq()) 768 trace_recursion_set(TRACE_IRQ_BIT); 769 else 770 trace_recursion_clear(TRACE_IRQ_BIT); 771 return 1; 772 } 773 } 774 775 return 0; 776 } 777 778 static inline int ftrace_graph_notrace_addr(unsigned long addr) 779 { 780 int i; 781 782 if (!ftrace_graph_notrace_count) 783 return 0; 784 785 for (i = 0; i < ftrace_graph_notrace_count; i++) { 786 if (addr == ftrace_graph_notrace_funcs[i]) 787 return 1; 788 } 789 790 return 0; 791 } 792 #else 793 static inline int ftrace_graph_addr(unsigned long addr) 794 { 795 return 1; 796 } 797 798 static inline int ftrace_graph_notrace_addr(unsigned long addr) 799 { 800 return 0; 801 } 802 #endif /* CONFIG_DYNAMIC_FTRACE */ 803 #else /* CONFIG_FUNCTION_GRAPH_TRACER */ 804 static inline enum print_line_t 805 print_graph_function_flags(struct trace_iterator *iter, u32 flags) 806 { 807 return TRACE_TYPE_UNHANDLED; 808 } 809 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 810 811 extern struct list_head ftrace_pids; 812 813 #ifdef CONFIG_FUNCTION_TRACER 814 extern bool ftrace_filter_param __initdata; 815 static inline int ftrace_trace_task(struct task_struct *task) 816 { 817 if (list_empty(&ftrace_pids)) 818 return 1; 819 820 return test_tsk_trace_trace(task); 821 } 822 extern int ftrace_is_dead(void); 823 int ftrace_create_function_files(struct trace_array *tr, 824 struct dentry *parent); 825 void ftrace_destroy_function_files(struct trace_array *tr); 826 #else 827 static inline int ftrace_trace_task(struct task_struct *task) 828 { 829 return 1; 830 } 831 static inline int ftrace_is_dead(void) { return 0; } 832 static inline int 833 ftrace_create_function_files(struct trace_array *tr, 834 struct dentry *parent) 835 { 836 return 0; 837 } 838 static inline void ftrace_destroy_function_files(struct trace_array *tr) { } 839 #endif /* CONFIG_FUNCTION_TRACER */ 840 841 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE) 842 void ftrace_create_filter_files(struct ftrace_ops *ops, 843 struct dentry *parent); 844 void ftrace_destroy_filter_files(struct ftrace_ops *ops); 845 #else 846 /* 847 * The ops parameter passed in is usually undefined. 848 * This must be a macro. 849 */ 850 #define ftrace_create_filter_files(ops, parent) do { } while (0) 851 #define ftrace_destroy_filter_files(ops) do { } while (0) 852 #endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */ 853 854 int ftrace_event_is_function(struct ftrace_event_call *call); 855 856 /* 857 * struct trace_parser - servers for reading the user input separated by spaces 858 * @cont: set if the input is not complete - no final space char was found 859 * @buffer: holds the parsed user input 860 * @idx: user input length 861 * @size: buffer size 862 */ 863 struct trace_parser { 864 bool cont; 865 char *buffer; 866 unsigned idx; 867 unsigned size; 868 }; 869 870 static inline bool trace_parser_loaded(struct trace_parser *parser) 871 { 872 return (parser->idx != 0); 873 } 874 875 static inline bool trace_parser_cont(struct trace_parser *parser) 876 { 877 return parser->cont; 878 } 879 880 static inline void trace_parser_clear(struct trace_parser *parser) 881 { 882 parser->cont = false; 883 parser->idx = 0; 884 } 885 886 extern int trace_parser_get_init(struct trace_parser *parser, int size); 887 extern void trace_parser_put(struct trace_parser *parser); 888 extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf, 889 size_t cnt, loff_t *ppos); 890 891 /* 892 * trace_iterator_flags is an enumeration that defines bit 893 * positions into trace_flags that controls the output. 894 * 895 * NOTE: These bits must match the trace_options array in 896 * trace.c. 897 */ 898 enum trace_iterator_flags { 899 TRACE_ITER_PRINT_PARENT = 0x01, 900 TRACE_ITER_SYM_OFFSET = 0x02, 901 TRACE_ITER_SYM_ADDR = 0x04, 902 TRACE_ITER_VERBOSE = 0x08, 903 TRACE_ITER_RAW = 0x10, 904 TRACE_ITER_HEX = 0x20, 905 TRACE_ITER_BIN = 0x40, 906 TRACE_ITER_BLOCK = 0x80, 907 TRACE_ITER_STACKTRACE = 0x100, 908 TRACE_ITER_PRINTK = 0x200, 909 TRACE_ITER_PREEMPTONLY = 0x400, 910 TRACE_ITER_BRANCH = 0x800, 911 TRACE_ITER_ANNOTATE = 0x1000, 912 TRACE_ITER_USERSTACKTRACE = 0x2000, 913 TRACE_ITER_SYM_USEROBJ = 0x4000, 914 TRACE_ITER_PRINTK_MSGONLY = 0x8000, 915 TRACE_ITER_CONTEXT_INFO = 0x10000, /* Print pid/cpu/time */ 916 TRACE_ITER_LATENCY_FMT = 0x20000, 917 TRACE_ITER_SLEEP_TIME = 0x40000, 918 TRACE_ITER_GRAPH_TIME = 0x80000, 919 TRACE_ITER_RECORD_CMD = 0x100000, 920 TRACE_ITER_OVERWRITE = 0x200000, 921 TRACE_ITER_STOP_ON_FREE = 0x400000, 922 TRACE_ITER_IRQ_INFO = 0x800000, 923 TRACE_ITER_MARKERS = 0x1000000, 924 TRACE_ITER_FUNCTION = 0x2000000, 925 }; 926 927 /* 928 * TRACE_ITER_SYM_MASK masks the options in trace_flags that 929 * control the output of kernel symbols. 930 */ 931 #define TRACE_ITER_SYM_MASK \ 932 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR) 933 934 extern struct tracer nop_trace; 935 936 #ifdef CONFIG_BRANCH_TRACER 937 extern int enable_branch_tracing(struct trace_array *tr); 938 extern void disable_branch_tracing(void); 939 static inline int trace_branch_enable(struct trace_array *tr) 940 { 941 if (trace_flags & TRACE_ITER_BRANCH) 942 return enable_branch_tracing(tr); 943 return 0; 944 } 945 static inline void trace_branch_disable(void) 946 { 947 /* due to races, always disable */ 948 disable_branch_tracing(); 949 } 950 #else 951 static inline int trace_branch_enable(struct trace_array *tr) 952 { 953 return 0; 954 } 955 static inline void trace_branch_disable(void) 956 { 957 } 958 #endif /* CONFIG_BRANCH_TRACER */ 959 960 /* set ring buffers to default size if not already done so */ 961 int tracing_update_buffers(void); 962 963 struct ftrace_event_field { 964 struct list_head link; 965 const char *name; 966 const char *type; 967 int filter_type; 968 int offset; 969 int size; 970 int is_signed; 971 }; 972 973 struct event_filter { 974 int n_preds; /* Number assigned */ 975 int a_preds; /* allocated */ 976 struct filter_pred *preds; 977 struct filter_pred *root; 978 char *filter_string; 979 }; 980 981 struct event_subsystem { 982 struct list_head list; 983 const char *name; 984 struct event_filter *filter; 985 int ref_count; 986 }; 987 988 struct ftrace_subsystem_dir { 989 struct list_head list; 990 struct event_subsystem *subsystem; 991 struct trace_array *tr; 992 struct dentry *entry; 993 int ref_count; 994 int nr_events; 995 }; 996 997 #define FILTER_PRED_INVALID ((unsigned short)-1) 998 #define FILTER_PRED_IS_RIGHT (1 << 15) 999 #define FILTER_PRED_FOLD (1 << 15) 1000 1001 /* 1002 * The max preds is the size of unsigned short with 1003 * two flags at the MSBs. One bit is used for both the IS_RIGHT 1004 * and FOLD flags. The other is reserved. 1005 * 1006 * 2^14 preds is way more than enough. 1007 */ 1008 #define MAX_FILTER_PRED 16384 1009 1010 struct filter_pred; 1011 struct regex; 1012 1013 typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event); 1014 1015 typedef int (*regex_match_func)(char *str, struct regex *r, int len); 1016 1017 enum regex_type { 1018 MATCH_FULL = 0, 1019 MATCH_FRONT_ONLY, 1020 MATCH_MIDDLE_ONLY, 1021 MATCH_END_ONLY, 1022 }; 1023 1024 struct regex { 1025 char pattern[MAX_FILTER_STR_VAL]; 1026 int len; 1027 int field_len; 1028 regex_match_func match; 1029 }; 1030 1031 struct filter_pred { 1032 filter_pred_fn_t fn; 1033 u64 val; 1034 struct regex regex; 1035 unsigned short *ops; 1036 struct ftrace_event_field *field; 1037 int offset; 1038 int not; 1039 int op; 1040 unsigned short index; 1041 unsigned short parent; 1042 unsigned short left; 1043 unsigned short right; 1044 }; 1045 1046 extern enum regex_type 1047 filter_parse_regex(char *buff, int len, char **search, int *not); 1048 extern void print_event_filter(struct ftrace_event_file *file, 1049 struct trace_seq *s); 1050 extern int apply_event_filter(struct ftrace_event_file *file, 1051 char *filter_string); 1052 extern int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir, 1053 char *filter_string); 1054 extern void print_subsystem_event_filter(struct event_subsystem *system, 1055 struct trace_seq *s); 1056 extern int filter_assign_type(const char *type); 1057 extern int create_event_filter(struct ftrace_event_call *call, 1058 char *filter_str, bool set_str, 1059 struct event_filter **filterp); 1060 extern void free_event_filter(struct event_filter *filter); 1061 1062 struct ftrace_event_field * 1063 trace_find_event_field(struct ftrace_event_call *call, char *name); 1064 1065 extern void trace_event_enable_cmd_record(bool enable); 1066 extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr); 1067 extern int event_trace_del_tracer(struct trace_array *tr); 1068 1069 extern struct ftrace_event_file *find_event_file(struct trace_array *tr, 1070 const char *system, 1071 const char *event); 1072 1073 static inline void *event_file_data(struct file *filp) 1074 { 1075 return ACCESS_ONCE(file_inode(filp)->i_private); 1076 } 1077 1078 extern struct mutex event_mutex; 1079 extern struct list_head ftrace_events; 1080 1081 extern const struct file_operations event_trigger_fops; 1082 1083 extern int register_trigger_cmds(void); 1084 extern void clear_event_triggers(struct trace_array *tr); 1085 1086 struct event_trigger_data { 1087 unsigned long count; 1088 int ref; 1089 struct event_trigger_ops *ops; 1090 struct event_command *cmd_ops; 1091 struct event_filter __rcu *filter; 1092 char *filter_str; 1093 void *private_data; 1094 struct list_head list; 1095 }; 1096 1097 /** 1098 * struct event_trigger_ops - callbacks for trace event triggers 1099 * 1100 * The methods in this structure provide per-event trigger hooks for 1101 * various trigger operations. 1102 * 1103 * All the methods below, except for @init() and @free(), must be 1104 * implemented. 1105 * 1106 * @func: The trigger 'probe' function called when the triggering 1107 * event occurs. The data passed into this callback is the data 1108 * that was supplied to the event_command @reg() function that 1109 * registered the trigger (see struct event_command). 1110 * 1111 * @init: An optional initialization function called for the trigger 1112 * when the trigger is registered (via the event_command reg() 1113 * function). This can be used to perform per-trigger 1114 * initialization such as incrementing a per-trigger reference 1115 * count, for instance. This is usually implemented by the 1116 * generic utility function @event_trigger_init() (see 1117 * trace_event_triggers.c). 1118 * 1119 * @free: An optional de-initialization function called for the 1120 * trigger when the trigger is unregistered (via the 1121 * event_command @reg() function). This can be used to perform 1122 * per-trigger de-initialization such as decrementing a 1123 * per-trigger reference count and freeing corresponding trigger 1124 * data, for instance. This is usually implemented by the 1125 * generic utility function @event_trigger_free() (see 1126 * trace_event_triggers.c). 1127 * 1128 * @print: The callback function invoked to have the trigger print 1129 * itself. This is usually implemented by a wrapper function 1130 * that calls the generic utility function @event_trigger_print() 1131 * (see trace_event_triggers.c). 1132 */ 1133 struct event_trigger_ops { 1134 void (*func)(struct event_trigger_data *data); 1135 int (*init)(struct event_trigger_ops *ops, 1136 struct event_trigger_data *data); 1137 void (*free)(struct event_trigger_ops *ops, 1138 struct event_trigger_data *data); 1139 int (*print)(struct seq_file *m, 1140 struct event_trigger_ops *ops, 1141 struct event_trigger_data *data); 1142 }; 1143 1144 /** 1145 * struct event_command - callbacks and data members for event commands 1146 * 1147 * Event commands are invoked by users by writing the command name 1148 * into the 'trigger' file associated with a trace event. The 1149 * parameters associated with a specific invocation of an event 1150 * command are used to create an event trigger instance, which is 1151 * added to the list of trigger instances associated with that trace 1152 * event. When the event is hit, the set of triggers associated with 1153 * that event is invoked. 1154 * 1155 * The data members in this structure provide per-event command data 1156 * for various event commands. 1157 * 1158 * All the data members below, except for @post_trigger, must be set 1159 * for each event command. 1160 * 1161 * @name: The unique name that identifies the event command. This is 1162 * the name used when setting triggers via trigger files. 1163 * 1164 * @trigger_type: A unique id that identifies the event command 1165 * 'type'. This value has two purposes, the first to ensure that 1166 * only one trigger of the same type can be set at a given time 1167 * for a particular event e.g. it doesn't make sense to have both 1168 * a traceon and traceoff trigger attached to a single event at 1169 * the same time, so traceon and traceoff have the same type 1170 * though they have different names. The @trigger_type value is 1171 * also used as a bit value for deferring the actual trigger 1172 * action until after the current event is finished. Some 1173 * commands need to do this if they themselves log to the trace 1174 * buffer (see the @post_trigger() member below). @trigger_type 1175 * values are defined by adding new values to the trigger_type 1176 * enum in include/linux/ftrace_event.h. 1177 * 1178 * @post_trigger: A flag that says whether or not this command needs 1179 * to have its action delayed until after the current event has 1180 * been closed. Some triggers need to avoid being invoked while 1181 * an event is currently in the process of being logged, since 1182 * the trigger may itself log data into the trace buffer. Thus 1183 * we make sure the current event is committed before invoking 1184 * those triggers. To do that, the trigger invocation is split 1185 * in two - the first part checks the filter using the current 1186 * trace record; if a command has the @post_trigger flag set, it 1187 * sets a bit for itself in the return value, otherwise it 1188 * directly invokes the trigger. Once all commands have been 1189 * either invoked or set their return flag, the current record is 1190 * either committed or discarded. At that point, if any commands 1191 * have deferred their triggers, those commands are finally 1192 * invoked following the close of the current event. In other 1193 * words, if the event_trigger_ops @func() probe implementation 1194 * itself logs to the trace buffer, this flag should be set, 1195 * otherwise it can be left unspecified. 1196 * 1197 * All the methods below, except for @set_filter(), must be 1198 * implemented. 1199 * 1200 * @func: The callback function responsible for parsing and 1201 * registering the trigger written to the 'trigger' file by the 1202 * user. It allocates the trigger instance and registers it with 1203 * the appropriate trace event. It makes use of the other 1204 * event_command callback functions to orchestrate this, and is 1205 * usually implemented by the generic utility function 1206 * @event_trigger_callback() (see trace_event_triggers.c). 1207 * 1208 * @reg: Adds the trigger to the list of triggers associated with the 1209 * event, and enables the event trigger itself, after 1210 * initializing it (via the event_trigger_ops @init() function). 1211 * This is also where commands can use the @trigger_type value to 1212 * make the decision as to whether or not multiple instances of 1213 * the trigger should be allowed. This is usually implemented by 1214 * the generic utility function @register_trigger() (see 1215 * trace_event_triggers.c). 1216 * 1217 * @unreg: Removes the trigger from the list of triggers associated 1218 * with the event, and disables the event trigger itself, after 1219 * initializing it (via the event_trigger_ops @free() function). 1220 * This is usually implemented by the generic utility function 1221 * @unregister_trigger() (see trace_event_triggers.c). 1222 * 1223 * @set_filter: An optional function called to parse and set a filter 1224 * for the trigger. If no @set_filter() method is set for the 1225 * event command, filters set by the user for the command will be 1226 * ignored. This is usually implemented by the generic utility 1227 * function @set_trigger_filter() (see trace_event_triggers.c). 1228 * 1229 * @get_trigger_ops: The callback function invoked to retrieve the 1230 * event_trigger_ops implementation associated with the command. 1231 */ 1232 struct event_command { 1233 struct list_head list; 1234 char *name; 1235 enum event_trigger_type trigger_type; 1236 bool post_trigger; 1237 int (*func)(struct event_command *cmd_ops, 1238 struct ftrace_event_file *file, 1239 char *glob, char *cmd, char *params); 1240 int (*reg)(char *glob, 1241 struct event_trigger_ops *ops, 1242 struct event_trigger_data *data, 1243 struct ftrace_event_file *file); 1244 void (*unreg)(char *glob, 1245 struct event_trigger_ops *ops, 1246 struct event_trigger_data *data, 1247 struct ftrace_event_file *file); 1248 int (*set_filter)(char *filter_str, 1249 struct event_trigger_data *data, 1250 struct ftrace_event_file *file); 1251 struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param); 1252 }; 1253 1254 extern int trace_event_enable_disable(struct ftrace_event_file *file, 1255 int enable, int soft_disable); 1256 extern int tracing_alloc_snapshot(void); 1257 1258 extern const char *__start___trace_bprintk_fmt[]; 1259 extern const char *__stop___trace_bprintk_fmt[]; 1260 1261 extern const char *__start___tracepoint_str[]; 1262 extern const char *__stop___tracepoint_str[]; 1263 1264 void trace_printk_init_buffers(void); 1265 void trace_printk_start_comm(void); 1266 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set); 1267 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled); 1268 1269 /* 1270 * Normal trace_printk() and friends allocates special buffers 1271 * to do the manipulation, as well as saves the print formats 1272 * into sections to display. But the trace infrastructure wants 1273 * to use these without the added overhead at the price of being 1274 * a bit slower (used mainly for warnings, where we don't care 1275 * about performance). The internal_trace_puts() is for such 1276 * a purpose. 1277 */ 1278 #define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str)) 1279 1280 #undef FTRACE_ENTRY 1281 #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ 1282 extern struct ftrace_event_call \ 1283 __aligned(4) event_##call; 1284 #undef FTRACE_ENTRY_DUP 1285 #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \ 1286 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \ 1287 filter) 1288 #include "trace_entries.h" 1289 1290 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER) 1291 int perf_ftrace_event_register(struct ftrace_event_call *call, 1292 enum trace_reg type, void *data); 1293 #else 1294 #define perf_ftrace_event_register NULL 1295 #endif 1296 1297 #endif /* _LINUX_KERNEL_TRACE_H */ 1298