1 2 #ifndef _LINUX_KERNEL_TRACE_H 3 #define _LINUX_KERNEL_TRACE_H 4 5 #include <linux/fs.h> 6 #include <linux/atomic.h> 7 #include <linux/sched.h> 8 #include <linux/clocksource.h> 9 #include <linux/ring_buffer.h> 10 #include <linux/mmiotrace.h> 11 #include <linux/tracepoint.h> 12 #include <linux/ftrace.h> 13 #include <linux/hw_breakpoint.h> 14 #include <linux/trace_seq.h> 15 #include <linux/trace_events.h> 16 #include <linux/compiler.h> 17 #include <linux/trace_seq.h> 18 19 #ifdef CONFIG_FTRACE_SYSCALLS 20 #include <asm/unistd.h> /* For NR_SYSCALLS */ 21 #include <asm/syscall.h> /* some archs define it here */ 22 #endif 23 24 enum trace_type { 25 __TRACE_FIRST_TYPE = 0, 26 27 TRACE_FN, 28 TRACE_CTX, 29 TRACE_WAKE, 30 TRACE_STACK, 31 TRACE_PRINT, 32 TRACE_BPRINT, 33 TRACE_MMIO_RW, 34 TRACE_MMIO_MAP, 35 TRACE_BRANCH, 36 TRACE_GRAPH_RET, 37 TRACE_GRAPH_ENT, 38 TRACE_USER_STACK, 39 TRACE_BLK, 40 TRACE_BPUTS, 41 42 __TRACE_LAST_TYPE, 43 }; 44 45 46 #undef __field 47 #define __field(type, item) type item; 48 49 #undef __field_struct 50 #define __field_struct(type, item) __field(type, item) 51 52 #undef __field_desc 53 #define __field_desc(type, container, item) 54 55 #undef __array 56 #define __array(type, item, size) type item[size]; 57 58 #undef __array_desc 59 #define __array_desc(type, container, item, size) 60 61 #undef __dynamic_array 62 #define __dynamic_array(type, item) type item[]; 63 64 #undef F_STRUCT 65 #define F_STRUCT(args...) args 66 67 #undef FTRACE_ENTRY 68 #define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \ 69 struct struct_name { \ 70 struct trace_entry ent; \ 71 tstruct \ 72 } 73 74 #undef FTRACE_ENTRY_DUP 75 #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter) 76 77 #undef FTRACE_ENTRY_REG 78 #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, \ 79 filter, regfn) \ 80 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \ 81 filter) 82 83 #include "trace_entries.h" 84 85 /* 86 * syscalls are special, and need special handling, this is why 87 * they are not included in trace_entries.h 88 */ 89 struct syscall_trace_enter { 90 struct trace_entry ent; 91 int nr; 92 unsigned long args[]; 93 }; 94 95 struct syscall_trace_exit { 96 struct trace_entry ent; 97 int nr; 98 long ret; 99 }; 100 101 struct kprobe_trace_entry_head { 102 struct trace_entry ent; 103 unsigned long ip; 104 }; 105 106 struct kretprobe_trace_entry_head { 107 struct trace_entry ent; 108 unsigned long func; 109 unsigned long ret_ip; 110 }; 111 112 /* 113 * trace_flag_type is an enumeration that holds different 114 * states when a trace occurs. These are: 115 * IRQS_OFF - interrupts were disabled 116 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags 117 * NEED_RESCHED - reschedule is requested 118 * HARDIRQ - inside an interrupt handler 119 * SOFTIRQ - inside a softirq handler 120 */ 121 enum trace_flag_type { 122 TRACE_FLAG_IRQS_OFF = 0x01, 123 TRACE_FLAG_IRQS_NOSUPPORT = 0x02, 124 TRACE_FLAG_NEED_RESCHED = 0x04, 125 TRACE_FLAG_HARDIRQ = 0x08, 126 TRACE_FLAG_SOFTIRQ = 0x10, 127 TRACE_FLAG_PREEMPT_RESCHED = 0x20, 128 TRACE_FLAG_NMI = 0x40, 129 }; 130 131 #define TRACE_BUF_SIZE 1024 132 133 struct trace_array; 134 135 /* 136 * The CPU trace array - it consists of thousands of trace entries 137 * plus some other descriptor data: (for example which task started 138 * the trace, etc.) 139 */ 140 struct trace_array_cpu { 141 atomic_t disabled; 142 void *buffer_page; /* ring buffer spare */ 143 144 unsigned long entries; 145 unsigned long saved_latency; 146 unsigned long critical_start; 147 unsigned long critical_end; 148 unsigned long critical_sequence; 149 unsigned long nice; 150 unsigned long policy; 151 unsigned long rt_priority; 152 unsigned long skipped_entries; 153 cycle_t preempt_timestamp; 154 pid_t pid; 155 kuid_t uid; 156 char comm[TASK_COMM_LEN]; 157 158 bool ignore_pid; 159 }; 160 161 struct tracer; 162 struct trace_option_dentry; 163 164 struct trace_buffer { 165 struct trace_array *tr; 166 struct ring_buffer *buffer; 167 struct trace_array_cpu __percpu *data; 168 cycle_t time_start; 169 int cpu; 170 }; 171 172 #define TRACE_FLAGS_MAX_SIZE 32 173 174 struct trace_options { 175 struct tracer *tracer; 176 struct trace_option_dentry *topts; 177 }; 178 179 struct trace_pid_list { 180 int pid_max; 181 unsigned long *pids; 182 }; 183 184 /* 185 * The trace array - an array of per-CPU trace arrays. This is the 186 * highest level data structure that individual tracers deal with. 187 * They have on/off state as well: 188 */ 189 struct trace_array { 190 struct list_head list; 191 char *name; 192 struct trace_buffer trace_buffer; 193 #ifdef CONFIG_TRACER_MAX_TRACE 194 /* 195 * The max_buffer is used to snapshot the trace when a maximum 196 * latency is reached, or when the user initiates a snapshot. 197 * Some tracers will use this to store a maximum trace while 198 * it continues examining live traces. 199 * 200 * The buffers for the max_buffer are set up the same as the trace_buffer 201 * When a snapshot is taken, the buffer of the max_buffer is swapped 202 * with the buffer of the trace_buffer and the buffers are reset for 203 * the trace_buffer so the tracing can continue. 204 */ 205 struct trace_buffer max_buffer; 206 bool allocated_snapshot; 207 unsigned long max_latency; 208 #endif 209 struct trace_pid_list __rcu *filtered_pids; 210 /* 211 * max_lock is used to protect the swapping of buffers 212 * when taking a max snapshot. The buffers themselves are 213 * protected by per_cpu spinlocks. But the action of the swap 214 * needs its own lock. 215 * 216 * This is defined as a arch_spinlock_t in order to help 217 * with performance when lockdep debugging is enabled. 218 * 219 * It is also used in other places outside the update_max_tr 220 * so it needs to be defined outside of the 221 * CONFIG_TRACER_MAX_TRACE. 222 */ 223 arch_spinlock_t max_lock; 224 int buffer_disabled; 225 #ifdef CONFIG_FTRACE_SYSCALLS 226 int sys_refcount_enter; 227 int sys_refcount_exit; 228 struct trace_event_file __rcu *enter_syscall_files[NR_syscalls]; 229 struct trace_event_file __rcu *exit_syscall_files[NR_syscalls]; 230 #endif 231 int stop_count; 232 int clock_id; 233 int nr_topts; 234 struct tracer *current_trace; 235 unsigned int trace_flags; 236 unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE]; 237 unsigned int flags; 238 raw_spinlock_t start_lock; 239 struct dentry *dir; 240 struct dentry *options; 241 struct dentry *percpu_dir; 242 struct dentry *event_dir; 243 struct trace_options *topts; 244 struct list_head systems; 245 struct list_head events; 246 cpumask_var_t tracing_cpumask; /* only trace on set CPUs */ 247 int ref; 248 #ifdef CONFIG_FUNCTION_TRACER 249 struct ftrace_ops *ops; 250 /* function tracing enabled */ 251 int function_enabled; 252 #endif 253 }; 254 255 enum { 256 TRACE_ARRAY_FL_GLOBAL = (1 << 0) 257 }; 258 259 extern struct list_head ftrace_trace_arrays; 260 261 extern struct mutex trace_types_lock; 262 263 extern int trace_array_get(struct trace_array *tr); 264 extern void trace_array_put(struct trace_array *tr); 265 266 /* 267 * The global tracer (top) should be the first trace array added, 268 * but we check the flag anyway. 269 */ 270 static inline struct trace_array *top_trace_array(void) 271 { 272 struct trace_array *tr; 273 274 if (list_empty(&ftrace_trace_arrays)) 275 return NULL; 276 277 tr = list_entry(ftrace_trace_arrays.prev, 278 typeof(*tr), list); 279 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL)); 280 return tr; 281 } 282 283 #define FTRACE_CMP_TYPE(var, type) \ 284 __builtin_types_compatible_p(typeof(var), type *) 285 286 #undef IF_ASSIGN 287 #define IF_ASSIGN(var, entry, etype, id) \ 288 if (FTRACE_CMP_TYPE(var, etype)) { \ 289 var = (typeof(var))(entry); \ 290 WARN_ON(id && (entry)->type != id); \ 291 break; \ 292 } 293 294 /* Will cause compile errors if type is not found. */ 295 extern void __ftrace_bad_type(void); 296 297 /* 298 * The trace_assign_type is a verifier that the entry type is 299 * the same as the type being assigned. To add new types simply 300 * add a line with the following format: 301 * 302 * IF_ASSIGN(var, ent, type, id); 303 * 304 * Where "type" is the trace type that includes the trace_entry 305 * as the "ent" item. And "id" is the trace identifier that is 306 * used in the trace_type enum. 307 * 308 * If the type can have more than one id, then use zero. 309 */ 310 #define trace_assign_type(var, ent) \ 311 do { \ 312 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \ 313 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \ 314 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ 315 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ 316 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ 317 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \ 318 IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \ 319 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ 320 TRACE_MMIO_RW); \ 321 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ 322 TRACE_MMIO_MAP); \ 323 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \ 324 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \ 325 TRACE_GRAPH_ENT); \ 326 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ 327 TRACE_GRAPH_RET); \ 328 __ftrace_bad_type(); \ 329 } while (0) 330 331 /* 332 * An option specific to a tracer. This is a boolean value. 333 * The bit is the bit index that sets its value on the 334 * flags value in struct tracer_flags. 335 */ 336 struct tracer_opt { 337 const char *name; /* Will appear on the trace_options file */ 338 u32 bit; /* Mask assigned in val field in tracer_flags */ 339 }; 340 341 /* 342 * The set of specific options for a tracer. Your tracer 343 * have to set the initial value of the flags val. 344 */ 345 struct tracer_flags { 346 u32 val; 347 struct tracer_opt *opts; 348 struct tracer *trace; 349 }; 350 351 /* Makes more easy to define a tracer opt */ 352 #define TRACER_OPT(s, b) .name = #s, .bit = b 353 354 355 struct trace_option_dentry { 356 struct tracer_opt *opt; 357 struct tracer_flags *flags; 358 struct trace_array *tr; 359 struct dentry *entry; 360 }; 361 362 /** 363 * struct tracer - a specific tracer and its callbacks to interact with tracefs 364 * @name: the name chosen to select it on the available_tracers file 365 * @init: called when one switches to this tracer (echo name > current_tracer) 366 * @reset: called when one switches to another tracer 367 * @start: called when tracing is unpaused (echo 1 > tracing_on) 368 * @stop: called when tracing is paused (echo 0 > tracing_on) 369 * @update_thresh: called when tracing_thresh is updated 370 * @open: called when the trace file is opened 371 * @pipe_open: called when the trace_pipe file is opened 372 * @close: called when the trace file is released 373 * @pipe_close: called when the trace_pipe file is released 374 * @read: override the default read callback on trace_pipe 375 * @splice_read: override the default splice_read callback on trace_pipe 376 * @selftest: selftest to run on boot (see trace_selftest.c) 377 * @print_headers: override the first lines that describe your columns 378 * @print_line: callback that prints a trace 379 * @set_flag: signals one of your private flags changed (trace_options file) 380 * @flags: your private flags 381 */ 382 struct tracer { 383 const char *name; 384 int (*init)(struct trace_array *tr); 385 void (*reset)(struct trace_array *tr); 386 void (*start)(struct trace_array *tr); 387 void (*stop)(struct trace_array *tr); 388 int (*update_thresh)(struct trace_array *tr); 389 void (*open)(struct trace_iterator *iter); 390 void (*pipe_open)(struct trace_iterator *iter); 391 void (*close)(struct trace_iterator *iter); 392 void (*pipe_close)(struct trace_iterator *iter); 393 ssize_t (*read)(struct trace_iterator *iter, 394 struct file *filp, char __user *ubuf, 395 size_t cnt, loff_t *ppos); 396 ssize_t (*splice_read)(struct trace_iterator *iter, 397 struct file *filp, 398 loff_t *ppos, 399 struct pipe_inode_info *pipe, 400 size_t len, 401 unsigned int flags); 402 #ifdef CONFIG_FTRACE_STARTUP_TEST 403 int (*selftest)(struct tracer *trace, 404 struct trace_array *tr); 405 #endif 406 void (*print_header)(struct seq_file *m); 407 enum print_line_t (*print_line)(struct trace_iterator *iter); 408 /* If you handled the flag setting, return 0 */ 409 int (*set_flag)(struct trace_array *tr, 410 u32 old_flags, u32 bit, int set); 411 /* Return 0 if OK with change, else return non-zero */ 412 int (*flag_changed)(struct trace_array *tr, 413 u32 mask, int set); 414 struct tracer *next; 415 struct tracer_flags *flags; 416 int enabled; 417 int ref; 418 bool print_max; 419 bool allow_instances; 420 #ifdef CONFIG_TRACER_MAX_TRACE 421 bool use_max_tr; 422 #endif 423 }; 424 425 426 /* Only current can touch trace_recursion */ 427 428 /* 429 * For function tracing recursion: 430 * The order of these bits are important. 431 * 432 * When function tracing occurs, the following steps are made: 433 * If arch does not support a ftrace feature: 434 * call internal function (uses INTERNAL bits) which calls... 435 * If callback is registered to the "global" list, the list 436 * function is called and recursion checks the GLOBAL bits. 437 * then this function calls... 438 * The function callback, which can use the FTRACE bits to 439 * check for recursion. 440 * 441 * Now if the arch does not suppport a feature, and it calls 442 * the global list function which calls the ftrace callback 443 * all three of these steps will do a recursion protection. 444 * There's no reason to do one if the previous caller already 445 * did. The recursion that we are protecting against will 446 * go through the same steps again. 447 * 448 * To prevent the multiple recursion checks, if a recursion 449 * bit is set that is higher than the MAX bit of the current 450 * check, then we know that the check was made by the previous 451 * caller, and we can skip the current check. 452 */ 453 enum { 454 TRACE_BUFFER_BIT, 455 TRACE_BUFFER_NMI_BIT, 456 TRACE_BUFFER_IRQ_BIT, 457 TRACE_BUFFER_SIRQ_BIT, 458 459 /* Start of function recursion bits */ 460 TRACE_FTRACE_BIT, 461 TRACE_FTRACE_NMI_BIT, 462 TRACE_FTRACE_IRQ_BIT, 463 TRACE_FTRACE_SIRQ_BIT, 464 465 /* INTERNAL_BITs must be greater than FTRACE_BITs */ 466 TRACE_INTERNAL_BIT, 467 TRACE_INTERNAL_NMI_BIT, 468 TRACE_INTERNAL_IRQ_BIT, 469 TRACE_INTERNAL_SIRQ_BIT, 470 471 TRACE_BRANCH_BIT, 472 /* 473 * Abuse of the trace_recursion. 474 * As we need a way to maintain state if we are tracing the function 475 * graph in irq because we want to trace a particular function that 476 * was called in irq context but we have irq tracing off. Since this 477 * can only be modified by current, we can reuse trace_recursion. 478 */ 479 TRACE_IRQ_BIT, 480 }; 481 482 #define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0) 483 #define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0) 484 #define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit))) 485 486 #define TRACE_CONTEXT_BITS 4 487 488 #define TRACE_FTRACE_START TRACE_FTRACE_BIT 489 #define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1) 490 491 #define TRACE_LIST_START TRACE_INTERNAL_BIT 492 #define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1) 493 494 #define TRACE_CONTEXT_MASK TRACE_LIST_MAX 495 496 static __always_inline int trace_get_context_bit(void) 497 { 498 int bit; 499 500 if (in_interrupt()) { 501 if (in_nmi()) 502 bit = 0; 503 504 else if (in_irq()) 505 bit = 1; 506 else 507 bit = 2; 508 } else 509 bit = 3; 510 511 return bit; 512 } 513 514 static __always_inline int trace_test_and_set_recursion(int start, int max) 515 { 516 unsigned int val = current->trace_recursion; 517 int bit; 518 519 /* A previous recursion check was made */ 520 if ((val & TRACE_CONTEXT_MASK) > max) 521 return 0; 522 523 bit = trace_get_context_bit() + start; 524 if (unlikely(val & (1 << bit))) 525 return -1; 526 527 val |= 1 << bit; 528 current->trace_recursion = val; 529 barrier(); 530 531 return bit; 532 } 533 534 static __always_inline void trace_clear_recursion(int bit) 535 { 536 unsigned int val = current->trace_recursion; 537 538 if (!bit) 539 return; 540 541 bit = 1 << bit; 542 val &= ~bit; 543 544 barrier(); 545 current->trace_recursion = val; 546 } 547 548 static inline struct ring_buffer_iter * 549 trace_buffer_iter(struct trace_iterator *iter, int cpu) 550 { 551 if (iter->buffer_iter && iter->buffer_iter[cpu]) 552 return iter->buffer_iter[cpu]; 553 return NULL; 554 } 555 556 int tracer_init(struct tracer *t, struct trace_array *tr); 557 int tracing_is_enabled(void); 558 void tracing_reset(struct trace_buffer *buf, int cpu); 559 void tracing_reset_online_cpus(struct trace_buffer *buf); 560 void tracing_reset_current(int cpu); 561 void tracing_reset_all_online_cpus(void); 562 int tracing_open_generic(struct inode *inode, struct file *filp); 563 bool tracing_is_disabled(void); 564 struct dentry *trace_create_file(const char *name, 565 umode_t mode, 566 struct dentry *parent, 567 void *data, 568 const struct file_operations *fops); 569 570 struct dentry *tracing_init_dentry(void); 571 572 struct ring_buffer_event; 573 574 struct ring_buffer_event * 575 trace_buffer_lock_reserve(struct ring_buffer *buffer, 576 int type, 577 unsigned long len, 578 unsigned long flags, 579 int pc); 580 581 struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, 582 struct trace_array_cpu *data); 583 584 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, 585 int *ent_cpu, u64 *ent_ts); 586 587 void __buffer_unlock_commit(struct ring_buffer *buffer, 588 struct ring_buffer_event *event); 589 590 int trace_empty(struct trace_iterator *iter); 591 592 void *trace_find_next_entry_inc(struct trace_iterator *iter); 593 594 void trace_init_global_iter(struct trace_iterator *iter); 595 596 void tracing_iter_reset(struct trace_iterator *iter, int cpu); 597 598 void trace_function(struct trace_array *tr, 599 unsigned long ip, 600 unsigned long parent_ip, 601 unsigned long flags, int pc); 602 void trace_graph_function(struct trace_array *tr, 603 unsigned long ip, 604 unsigned long parent_ip, 605 unsigned long flags, int pc); 606 void trace_latency_header(struct seq_file *m); 607 void trace_default_header(struct seq_file *m); 608 void print_trace_header(struct seq_file *m, struct trace_iterator *iter); 609 int trace_empty(struct trace_iterator *iter); 610 611 void trace_graph_return(struct ftrace_graph_ret *trace); 612 int trace_graph_entry(struct ftrace_graph_ent *trace); 613 void set_graph_array(struct trace_array *tr); 614 615 void tracing_start_cmdline_record(void); 616 void tracing_stop_cmdline_record(void); 617 int register_tracer(struct tracer *type); 618 int is_tracing_stopped(void); 619 620 loff_t tracing_lseek(struct file *file, loff_t offset, int whence); 621 622 extern cpumask_var_t __read_mostly tracing_buffer_mask; 623 624 #define for_each_tracing_cpu(cpu) \ 625 for_each_cpu(cpu, tracing_buffer_mask) 626 627 extern unsigned long nsecs_to_usecs(unsigned long nsecs); 628 629 extern unsigned long tracing_thresh; 630 631 #ifdef CONFIG_TRACER_MAX_TRACE 632 void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu); 633 void update_max_tr_single(struct trace_array *tr, 634 struct task_struct *tsk, int cpu); 635 #endif /* CONFIG_TRACER_MAX_TRACE */ 636 637 #ifdef CONFIG_STACKTRACE 638 void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, 639 int pc); 640 641 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, 642 int pc); 643 #else 644 static inline void ftrace_trace_userstack(struct ring_buffer *buffer, 645 unsigned long flags, int pc) 646 { 647 } 648 649 static inline void __trace_stack(struct trace_array *tr, unsigned long flags, 650 int skip, int pc) 651 { 652 } 653 #endif /* CONFIG_STACKTRACE */ 654 655 extern cycle_t ftrace_now(int cpu); 656 657 extern void trace_find_cmdline(int pid, char comm[]); 658 extern void trace_event_follow_fork(struct trace_array *tr, bool enable); 659 660 #ifdef CONFIG_DYNAMIC_FTRACE 661 extern unsigned long ftrace_update_tot_cnt; 662 #endif 663 #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func 664 extern int DYN_FTRACE_TEST_NAME(void); 665 #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2 666 extern int DYN_FTRACE_TEST_NAME2(void); 667 668 extern bool ring_buffer_expanded; 669 extern bool tracing_selftest_disabled; 670 671 #ifdef CONFIG_FTRACE_STARTUP_TEST 672 extern int trace_selftest_startup_function(struct tracer *trace, 673 struct trace_array *tr); 674 extern int trace_selftest_startup_function_graph(struct tracer *trace, 675 struct trace_array *tr); 676 extern int trace_selftest_startup_irqsoff(struct tracer *trace, 677 struct trace_array *tr); 678 extern int trace_selftest_startup_preemptoff(struct tracer *trace, 679 struct trace_array *tr); 680 extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace, 681 struct trace_array *tr); 682 extern int trace_selftest_startup_wakeup(struct tracer *trace, 683 struct trace_array *tr); 684 extern int trace_selftest_startup_nop(struct tracer *trace, 685 struct trace_array *tr); 686 extern int trace_selftest_startup_sched_switch(struct tracer *trace, 687 struct trace_array *tr); 688 extern int trace_selftest_startup_branch(struct tracer *trace, 689 struct trace_array *tr); 690 /* 691 * Tracer data references selftest functions that only occur 692 * on boot up. These can be __init functions. Thus, when selftests 693 * are enabled, then the tracers need to reference __init functions. 694 */ 695 #define __tracer_data __refdata 696 #else 697 /* Tracers are seldom changed. Optimize when selftests are disabled. */ 698 #define __tracer_data __read_mostly 699 #endif /* CONFIG_FTRACE_STARTUP_TEST */ 700 701 extern void *head_page(struct trace_array_cpu *data); 702 extern unsigned long long ns2usecs(cycle_t nsec); 703 extern int 704 trace_vbprintk(unsigned long ip, const char *fmt, va_list args); 705 extern int 706 trace_vprintk(unsigned long ip, const char *fmt, va_list args); 707 extern int 708 trace_array_vprintk(struct trace_array *tr, 709 unsigned long ip, const char *fmt, va_list args); 710 int trace_array_printk(struct trace_array *tr, 711 unsigned long ip, const char *fmt, ...); 712 int trace_array_printk_buf(struct ring_buffer *buffer, 713 unsigned long ip, const char *fmt, ...); 714 void trace_printk_seq(struct trace_seq *s); 715 enum print_line_t print_trace_line(struct trace_iterator *iter); 716 717 extern char trace_find_mark(unsigned long long duration); 718 719 /* Standard output formatting function used for function return traces */ 720 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 721 722 /* Flag options */ 723 #define TRACE_GRAPH_PRINT_OVERRUN 0x1 724 #define TRACE_GRAPH_PRINT_CPU 0x2 725 #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 726 #define TRACE_GRAPH_PRINT_PROC 0x8 727 #define TRACE_GRAPH_PRINT_DURATION 0x10 728 #define TRACE_GRAPH_PRINT_ABS_TIME 0x20 729 #define TRACE_GRAPH_PRINT_IRQS 0x40 730 #define TRACE_GRAPH_PRINT_TAIL 0x80 731 #define TRACE_GRAPH_SLEEP_TIME 0x100 732 #define TRACE_GRAPH_GRAPH_TIME 0x200 733 #define TRACE_GRAPH_PRINT_FILL_SHIFT 28 734 #define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT) 735 736 extern void ftrace_graph_sleep_time_control(bool enable); 737 extern void ftrace_graph_graph_time_control(bool enable); 738 739 extern enum print_line_t 740 print_graph_function_flags(struct trace_iterator *iter, u32 flags); 741 extern void print_graph_headers_flags(struct seq_file *s, u32 flags); 742 extern void 743 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s); 744 extern void graph_trace_open(struct trace_iterator *iter); 745 extern void graph_trace_close(struct trace_iterator *iter); 746 extern int __trace_graph_entry(struct trace_array *tr, 747 struct ftrace_graph_ent *trace, 748 unsigned long flags, int pc); 749 extern void __trace_graph_return(struct trace_array *tr, 750 struct ftrace_graph_ret *trace, 751 unsigned long flags, int pc); 752 753 754 #ifdef CONFIG_DYNAMIC_FTRACE 755 /* TODO: make this variable */ 756 #define FTRACE_GRAPH_MAX_FUNCS 32 757 extern int ftrace_graph_count; 758 extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS]; 759 extern int ftrace_graph_notrace_count; 760 extern unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS]; 761 762 static inline int ftrace_graph_addr(unsigned long addr) 763 { 764 int i; 765 766 if (!ftrace_graph_count) 767 return 1; 768 769 for (i = 0; i < ftrace_graph_count; i++) { 770 if (addr == ftrace_graph_funcs[i]) { 771 /* 772 * If no irqs are to be traced, but a set_graph_function 773 * is set, and called by an interrupt handler, we still 774 * want to trace it. 775 */ 776 if (in_irq()) 777 trace_recursion_set(TRACE_IRQ_BIT); 778 else 779 trace_recursion_clear(TRACE_IRQ_BIT); 780 return 1; 781 } 782 } 783 784 return 0; 785 } 786 787 static inline int ftrace_graph_notrace_addr(unsigned long addr) 788 { 789 int i; 790 791 if (!ftrace_graph_notrace_count) 792 return 0; 793 794 for (i = 0; i < ftrace_graph_notrace_count; i++) { 795 if (addr == ftrace_graph_notrace_funcs[i]) 796 return 1; 797 } 798 799 return 0; 800 } 801 #else 802 static inline int ftrace_graph_addr(unsigned long addr) 803 { 804 return 1; 805 } 806 807 static inline int ftrace_graph_notrace_addr(unsigned long addr) 808 { 809 return 0; 810 } 811 #endif /* CONFIG_DYNAMIC_FTRACE */ 812 #else /* CONFIG_FUNCTION_GRAPH_TRACER */ 813 static inline enum print_line_t 814 print_graph_function_flags(struct trace_iterator *iter, u32 flags) 815 { 816 return TRACE_TYPE_UNHANDLED; 817 } 818 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 819 820 extern struct list_head ftrace_pids; 821 822 #ifdef CONFIG_FUNCTION_TRACER 823 extern bool ftrace_filter_param __initdata; 824 static inline int ftrace_trace_task(struct task_struct *task) 825 { 826 if (list_empty(&ftrace_pids)) 827 return 1; 828 829 return test_tsk_trace_trace(task); 830 } 831 extern int ftrace_is_dead(void); 832 int ftrace_create_function_files(struct trace_array *tr, 833 struct dentry *parent); 834 void ftrace_destroy_function_files(struct trace_array *tr); 835 void ftrace_init_global_array_ops(struct trace_array *tr); 836 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func); 837 void ftrace_reset_array_ops(struct trace_array *tr); 838 int using_ftrace_ops_list_func(void); 839 #else 840 static inline int ftrace_trace_task(struct task_struct *task) 841 { 842 return 1; 843 } 844 static inline int ftrace_is_dead(void) { return 0; } 845 static inline int 846 ftrace_create_function_files(struct trace_array *tr, 847 struct dentry *parent) 848 { 849 return 0; 850 } 851 static inline void ftrace_destroy_function_files(struct trace_array *tr) { } 852 static inline __init void 853 ftrace_init_global_array_ops(struct trace_array *tr) { } 854 static inline void ftrace_reset_array_ops(struct trace_array *tr) { } 855 /* ftace_func_t type is not defined, use macro instead of static inline */ 856 #define ftrace_init_array_ops(tr, func) do { } while (0) 857 #endif /* CONFIG_FUNCTION_TRACER */ 858 859 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE) 860 void ftrace_create_filter_files(struct ftrace_ops *ops, 861 struct dentry *parent); 862 void ftrace_destroy_filter_files(struct ftrace_ops *ops); 863 #else 864 /* 865 * The ops parameter passed in is usually undefined. 866 * This must be a macro. 867 */ 868 #define ftrace_create_filter_files(ops, parent) do { } while (0) 869 #define ftrace_destroy_filter_files(ops) do { } while (0) 870 #endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */ 871 872 bool ftrace_event_is_function(struct trace_event_call *call); 873 874 /* 875 * struct trace_parser - servers for reading the user input separated by spaces 876 * @cont: set if the input is not complete - no final space char was found 877 * @buffer: holds the parsed user input 878 * @idx: user input length 879 * @size: buffer size 880 */ 881 struct trace_parser { 882 bool cont; 883 char *buffer; 884 unsigned idx; 885 unsigned size; 886 }; 887 888 static inline bool trace_parser_loaded(struct trace_parser *parser) 889 { 890 return (parser->idx != 0); 891 } 892 893 static inline bool trace_parser_cont(struct trace_parser *parser) 894 { 895 return parser->cont; 896 } 897 898 static inline void trace_parser_clear(struct trace_parser *parser) 899 { 900 parser->cont = false; 901 parser->idx = 0; 902 } 903 904 extern int trace_parser_get_init(struct trace_parser *parser, int size); 905 extern void trace_parser_put(struct trace_parser *parser); 906 extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf, 907 size_t cnt, loff_t *ppos); 908 909 /* 910 * Only create function graph options if function graph is configured. 911 */ 912 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 913 # define FGRAPH_FLAGS \ 914 C(DISPLAY_GRAPH, "display-graph"), 915 #else 916 # define FGRAPH_FLAGS 917 #endif 918 919 #ifdef CONFIG_BRANCH_TRACER 920 # define BRANCH_FLAGS \ 921 C(BRANCH, "branch"), 922 #else 923 # define BRANCH_FLAGS 924 #endif 925 926 #ifdef CONFIG_FUNCTION_TRACER 927 # define FUNCTION_FLAGS \ 928 C(FUNCTION, "function-trace"), 929 # define FUNCTION_DEFAULT_FLAGS TRACE_ITER_FUNCTION 930 #else 931 # define FUNCTION_FLAGS 932 # define FUNCTION_DEFAULT_FLAGS 0UL 933 #endif 934 935 #ifdef CONFIG_STACKTRACE 936 # define STACK_FLAGS \ 937 C(STACKTRACE, "stacktrace"), 938 #else 939 # define STACK_FLAGS 940 #endif 941 942 /* 943 * trace_iterator_flags is an enumeration that defines bit 944 * positions into trace_flags that controls the output. 945 * 946 * NOTE: These bits must match the trace_options array in 947 * trace.c (this macro guarantees it). 948 */ 949 #define TRACE_FLAGS \ 950 C(PRINT_PARENT, "print-parent"), \ 951 C(SYM_OFFSET, "sym-offset"), \ 952 C(SYM_ADDR, "sym-addr"), \ 953 C(VERBOSE, "verbose"), \ 954 C(RAW, "raw"), \ 955 C(HEX, "hex"), \ 956 C(BIN, "bin"), \ 957 C(BLOCK, "block"), \ 958 C(PRINTK, "trace_printk"), \ 959 C(ANNOTATE, "annotate"), \ 960 C(USERSTACKTRACE, "userstacktrace"), \ 961 C(SYM_USEROBJ, "sym-userobj"), \ 962 C(PRINTK_MSGONLY, "printk-msg-only"), \ 963 C(CONTEXT_INFO, "context-info"), /* Print pid/cpu/time */ \ 964 C(LATENCY_FMT, "latency-format"), \ 965 C(RECORD_CMD, "record-cmd"), \ 966 C(OVERWRITE, "overwrite"), \ 967 C(STOP_ON_FREE, "disable_on_free"), \ 968 C(IRQ_INFO, "irq-info"), \ 969 C(MARKERS, "markers"), \ 970 C(EVENT_FORK, "event-fork"), \ 971 FUNCTION_FLAGS \ 972 FGRAPH_FLAGS \ 973 STACK_FLAGS \ 974 BRANCH_FLAGS 975 976 /* 977 * By defining C, we can make TRACE_FLAGS a list of bit names 978 * that will define the bits for the flag masks. 979 */ 980 #undef C 981 #define C(a, b) TRACE_ITER_##a##_BIT 982 983 enum trace_iterator_bits { 984 TRACE_FLAGS 985 /* Make sure we don't go more than we have bits for */ 986 TRACE_ITER_LAST_BIT 987 }; 988 989 /* 990 * By redefining C, we can make TRACE_FLAGS a list of masks that 991 * use the bits as defined above. 992 */ 993 #undef C 994 #define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT) 995 996 enum trace_iterator_flags { TRACE_FLAGS }; 997 998 /* 999 * TRACE_ITER_SYM_MASK masks the options in trace_flags that 1000 * control the output of kernel symbols. 1001 */ 1002 #define TRACE_ITER_SYM_MASK \ 1003 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR) 1004 1005 extern struct tracer nop_trace; 1006 1007 #ifdef CONFIG_BRANCH_TRACER 1008 extern int enable_branch_tracing(struct trace_array *tr); 1009 extern void disable_branch_tracing(void); 1010 static inline int trace_branch_enable(struct trace_array *tr) 1011 { 1012 if (tr->trace_flags & TRACE_ITER_BRANCH) 1013 return enable_branch_tracing(tr); 1014 return 0; 1015 } 1016 static inline void trace_branch_disable(void) 1017 { 1018 /* due to races, always disable */ 1019 disable_branch_tracing(); 1020 } 1021 #else 1022 static inline int trace_branch_enable(struct trace_array *tr) 1023 { 1024 return 0; 1025 } 1026 static inline void trace_branch_disable(void) 1027 { 1028 } 1029 #endif /* CONFIG_BRANCH_TRACER */ 1030 1031 /* set ring buffers to default size if not already done so */ 1032 int tracing_update_buffers(void); 1033 1034 struct ftrace_event_field { 1035 struct list_head link; 1036 const char *name; 1037 const char *type; 1038 int filter_type; 1039 int offset; 1040 int size; 1041 int is_signed; 1042 }; 1043 1044 struct event_filter { 1045 int n_preds; /* Number assigned */ 1046 int a_preds; /* allocated */ 1047 struct filter_pred *preds; 1048 struct filter_pred *root; 1049 char *filter_string; 1050 }; 1051 1052 struct event_subsystem { 1053 struct list_head list; 1054 const char *name; 1055 struct event_filter *filter; 1056 int ref_count; 1057 }; 1058 1059 struct trace_subsystem_dir { 1060 struct list_head list; 1061 struct event_subsystem *subsystem; 1062 struct trace_array *tr; 1063 struct dentry *entry; 1064 int ref_count; 1065 int nr_events; 1066 }; 1067 1068 extern int call_filter_check_discard(struct trace_event_call *call, void *rec, 1069 struct ring_buffer *buffer, 1070 struct ring_buffer_event *event); 1071 1072 void trace_buffer_unlock_commit_regs(struct trace_array *tr, 1073 struct ring_buffer *buffer, 1074 struct ring_buffer_event *event, 1075 unsigned long flags, int pc, 1076 struct pt_regs *regs); 1077 1078 static inline void trace_buffer_unlock_commit(struct trace_array *tr, 1079 struct ring_buffer *buffer, 1080 struct ring_buffer_event *event, 1081 unsigned long flags, int pc) 1082 { 1083 trace_buffer_unlock_commit_regs(tr, buffer, event, flags, pc, NULL); 1084 } 1085 1086 DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event); 1087 DECLARE_PER_CPU(int, trace_buffered_event_cnt); 1088 void trace_buffered_event_disable(void); 1089 void trace_buffered_event_enable(void); 1090 1091 static inline void 1092 __trace_event_discard_commit(struct ring_buffer *buffer, 1093 struct ring_buffer_event *event) 1094 { 1095 if (this_cpu_read(trace_buffered_event) == event) { 1096 /* Simply release the temp buffer */ 1097 this_cpu_dec(trace_buffered_event_cnt); 1098 return; 1099 } 1100 ring_buffer_discard_commit(buffer, event); 1101 } 1102 1103 /* 1104 * Helper function for event_trigger_unlock_commit{_regs}(). 1105 * If there are event triggers attached to this event that requires 1106 * filtering against its fields, then they wil be called as the 1107 * entry already holds the field information of the current event. 1108 * 1109 * It also checks if the event should be discarded or not. 1110 * It is to be discarded if the event is soft disabled and the 1111 * event was only recorded to process triggers, or if the event 1112 * filter is active and this event did not match the filters. 1113 * 1114 * Returns true if the event is discarded, false otherwise. 1115 */ 1116 static inline bool 1117 __event_trigger_test_discard(struct trace_event_file *file, 1118 struct ring_buffer *buffer, 1119 struct ring_buffer_event *event, 1120 void *entry, 1121 enum event_trigger_type *tt) 1122 { 1123 unsigned long eflags = file->flags; 1124 1125 if (eflags & EVENT_FILE_FL_TRIGGER_COND) 1126 *tt = event_triggers_call(file, entry); 1127 1128 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) || 1129 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) && 1130 !filter_match_preds(file->filter, entry))) { 1131 __trace_event_discard_commit(buffer, event); 1132 return true; 1133 } 1134 1135 return false; 1136 } 1137 1138 /** 1139 * event_trigger_unlock_commit - handle triggers and finish event commit 1140 * @file: The file pointer assoctiated to the event 1141 * @buffer: The ring buffer that the event is being written to 1142 * @event: The event meta data in the ring buffer 1143 * @entry: The event itself 1144 * @irq_flags: The state of the interrupts at the start of the event 1145 * @pc: The state of the preempt count at the start of the event. 1146 * 1147 * This is a helper function to handle triggers that require data 1148 * from the event itself. It also tests the event against filters and 1149 * if the event is soft disabled and should be discarded. 1150 */ 1151 static inline void 1152 event_trigger_unlock_commit(struct trace_event_file *file, 1153 struct ring_buffer *buffer, 1154 struct ring_buffer_event *event, 1155 void *entry, unsigned long irq_flags, int pc) 1156 { 1157 enum event_trigger_type tt = ETT_NONE; 1158 1159 if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) 1160 trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc); 1161 1162 if (tt) 1163 event_triggers_post_call(file, tt, entry); 1164 } 1165 1166 /** 1167 * event_trigger_unlock_commit_regs - handle triggers and finish event commit 1168 * @file: The file pointer assoctiated to the event 1169 * @buffer: The ring buffer that the event is being written to 1170 * @event: The event meta data in the ring buffer 1171 * @entry: The event itself 1172 * @irq_flags: The state of the interrupts at the start of the event 1173 * @pc: The state of the preempt count at the start of the event. 1174 * 1175 * This is a helper function to handle triggers that require data 1176 * from the event itself. It also tests the event against filters and 1177 * if the event is soft disabled and should be discarded. 1178 * 1179 * Same as event_trigger_unlock_commit() but calls 1180 * trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit(). 1181 */ 1182 static inline void 1183 event_trigger_unlock_commit_regs(struct trace_event_file *file, 1184 struct ring_buffer *buffer, 1185 struct ring_buffer_event *event, 1186 void *entry, unsigned long irq_flags, int pc, 1187 struct pt_regs *regs) 1188 { 1189 enum event_trigger_type tt = ETT_NONE; 1190 1191 if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) 1192 trace_buffer_unlock_commit_regs(file->tr, buffer, event, 1193 irq_flags, pc, regs); 1194 1195 if (tt) 1196 event_triggers_post_call(file, tt, entry); 1197 } 1198 1199 #define FILTER_PRED_INVALID ((unsigned short)-1) 1200 #define FILTER_PRED_IS_RIGHT (1 << 15) 1201 #define FILTER_PRED_FOLD (1 << 15) 1202 1203 /* 1204 * The max preds is the size of unsigned short with 1205 * two flags at the MSBs. One bit is used for both the IS_RIGHT 1206 * and FOLD flags. The other is reserved. 1207 * 1208 * 2^14 preds is way more than enough. 1209 */ 1210 #define MAX_FILTER_PRED 16384 1211 1212 struct filter_pred; 1213 struct regex; 1214 1215 typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event); 1216 1217 typedef int (*regex_match_func)(char *str, struct regex *r, int len); 1218 1219 enum regex_type { 1220 MATCH_FULL = 0, 1221 MATCH_FRONT_ONLY, 1222 MATCH_MIDDLE_ONLY, 1223 MATCH_END_ONLY, 1224 }; 1225 1226 struct regex { 1227 char pattern[MAX_FILTER_STR_VAL]; 1228 int len; 1229 int field_len; 1230 regex_match_func match; 1231 }; 1232 1233 struct filter_pred { 1234 filter_pred_fn_t fn; 1235 u64 val; 1236 struct regex regex; 1237 unsigned short *ops; 1238 struct ftrace_event_field *field; 1239 int offset; 1240 int not; 1241 int op; 1242 unsigned short index; 1243 unsigned short parent; 1244 unsigned short left; 1245 unsigned short right; 1246 }; 1247 1248 static inline bool is_string_field(struct ftrace_event_field *field) 1249 { 1250 return field->filter_type == FILTER_DYN_STRING || 1251 field->filter_type == FILTER_STATIC_STRING || 1252 field->filter_type == FILTER_PTR_STRING; 1253 } 1254 1255 static inline bool is_function_field(struct ftrace_event_field *field) 1256 { 1257 return field->filter_type == FILTER_TRACE_FN; 1258 } 1259 1260 extern enum regex_type 1261 filter_parse_regex(char *buff, int len, char **search, int *not); 1262 extern void print_event_filter(struct trace_event_file *file, 1263 struct trace_seq *s); 1264 extern int apply_event_filter(struct trace_event_file *file, 1265 char *filter_string); 1266 extern int apply_subsystem_event_filter(struct trace_subsystem_dir *dir, 1267 char *filter_string); 1268 extern void print_subsystem_event_filter(struct event_subsystem *system, 1269 struct trace_seq *s); 1270 extern int filter_assign_type(const char *type); 1271 extern int create_event_filter(struct trace_event_call *call, 1272 char *filter_str, bool set_str, 1273 struct event_filter **filterp); 1274 extern void free_event_filter(struct event_filter *filter); 1275 1276 struct ftrace_event_field * 1277 trace_find_event_field(struct trace_event_call *call, char *name); 1278 1279 extern void trace_event_enable_cmd_record(bool enable); 1280 extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr); 1281 extern int event_trace_del_tracer(struct trace_array *tr); 1282 1283 extern struct trace_event_file *find_event_file(struct trace_array *tr, 1284 const char *system, 1285 const char *event); 1286 1287 static inline void *event_file_data(struct file *filp) 1288 { 1289 return ACCESS_ONCE(file_inode(filp)->i_private); 1290 } 1291 1292 extern struct mutex event_mutex; 1293 extern struct list_head ftrace_events; 1294 1295 extern const struct file_operations event_trigger_fops; 1296 extern const struct file_operations event_hist_fops; 1297 1298 #ifdef CONFIG_HIST_TRIGGERS 1299 extern int register_trigger_hist_cmd(void); 1300 extern int register_trigger_hist_enable_disable_cmds(void); 1301 #else 1302 static inline int register_trigger_hist_cmd(void) { return 0; } 1303 static inline int register_trigger_hist_enable_disable_cmds(void) { return 0; } 1304 #endif 1305 1306 extern int register_trigger_cmds(void); 1307 extern void clear_event_triggers(struct trace_array *tr); 1308 1309 struct event_trigger_data { 1310 unsigned long count; 1311 int ref; 1312 struct event_trigger_ops *ops; 1313 struct event_command *cmd_ops; 1314 struct event_filter __rcu *filter; 1315 char *filter_str; 1316 void *private_data; 1317 bool paused; 1318 bool paused_tmp; 1319 struct list_head list; 1320 char *name; 1321 struct list_head named_list; 1322 struct event_trigger_data *named_data; 1323 }; 1324 1325 /* Avoid typos */ 1326 #define ENABLE_EVENT_STR "enable_event" 1327 #define DISABLE_EVENT_STR "disable_event" 1328 #define ENABLE_HIST_STR "enable_hist" 1329 #define DISABLE_HIST_STR "disable_hist" 1330 1331 struct enable_trigger_data { 1332 struct trace_event_file *file; 1333 bool enable; 1334 bool hist; 1335 }; 1336 1337 extern int event_enable_trigger_print(struct seq_file *m, 1338 struct event_trigger_ops *ops, 1339 struct event_trigger_data *data); 1340 extern void event_enable_trigger_free(struct event_trigger_ops *ops, 1341 struct event_trigger_data *data); 1342 extern int event_enable_trigger_func(struct event_command *cmd_ops, 1343 struct trace_event_file *file, 1344 char *glob, char *cmd, char *param); 1345 extern int event_enable_register_trigger(char *glob, 1346 struct event_trigger_ops *ops, 1347 struct event_trigger_data *data, 1348 struct trace_event_file *file); 1349 extern void event_enable_unregister_trigger(char *glob, 1350 struct event_trigger_ops *ops, 1351 struct event_trigger_data *test, 1352 struct trace_event_file *file); 1353 extern void trigger_data_free(struct event_trigger_data *data); 1354 extern int event_trigger_init(struct event_trigger_ops *ops, 1355 struct event_trigger_data *data); 1356 extern int trace_event_trigger_enable_disable(struct trace_event_file *file, 1357 int trigger_enable); 1358 extern void update_cond_flag(struct trace_event_file *file); 1359 extern void unregister_trigger(char *glob, struct event_trigger_ops *ops, 1360 struct event_trigger_data *test, 1361 struct trace_event_file *file); 1362 extern int set_trigger_filter(char *filter_str, 1363 struct event_trigger_data *trigger_data, 1364 struct trace_event_file *file); 1365 extern struct event_trigger_data *find_named_trigger(const char *name); 1366 extern bool is_named_trigger(struct event_trigger_data *test); 1367 extern int save_named_trigger(const char *name, 1368 struct event_trigger_data *data); 1369 extern void del_named_trigger(struct event_trigger_data *data); 1370 extern void pause_named_trigger(struct event_trigger_data *data); 1371 extern void unpause_named_trigger(struct event_trigger_data *data); 1372 extern void set_named_trigger_data(struct event_trigger_data *data, 1373 struct event_trigger_data *named_data); 1374 extern int register_event_command(struct event_command *cmd); 1375 extern int unregister_event_command(struct event_command *cmd); 1376 extern int register_trigger_hist_enable_disable_cmds(void); 1377 1378 /** 1379 * struct event_trigger_ops - callbacks for trace event triggers 1380 * 1381 * The methods in this structure provide per-event trigger hooks for 1382 * various trigger operations. 1383 * 1384 * All the methods below, except for @init() and @free(), must be 1385 * implemented. 1386 * 1387 * @func: The trigger 'probe' function called when the triggering 1388 * event occurs. The data passed into this callback is the data 1389 * that was supplied to the event_command @reg() function that 1390 * registered the trigger (see struct event_command) along with 1391 * the trace record, rec. 1392 * 1393 * @init: An optional initialization function called for the trigger 1394 * when the trigger is registered (via the event_command reg() 1395 * function). This can be used to perform per-trigger 1396 * initialization such as incrementing a per-trigger reference 1397 * count, for instance. This is usually implemented by the 1398 * generic utility function @event_trigger_init() (see 1399 * trace_event_triggers.c). 1400 * 1401 * @free: An optional de-initialization function called for the 1402 * trigger when the trigger is unregistered (via the 1403 * event_command @reg() function). This can be used to perform 1404 * per-trigger de-initialization such as decrementing a 1405 * per-trigger reference count and freeing corresponding trigger 1406 * data, for instance. This is usually implemented by the 1407 * generic utility function @event_trigger_free() (see 1408 * trace_event_triggers.c). 1409 * 1410 * @print: The callback function invoked to have the trigger print 1411 * itself. This is usually implemented by a wrapper function 1412 * that calls the generic utility function @event_trigger_print() 1413 * (see trace_event_triggers.c). 1414 */ 1415 struct event_trigger_ops { 1416 void (*func)(struct event_trigger_data *data, 1417 void *rec); 1418 int (*init)(struct event_trigger_ops *ops, 1419 struct event_trigger_data *data); 1420 void (*free)(struct event_trigger_ops *ops, 1421 struct event_trigger_data *data); 1422 int (*print)(struct seq_file *m, 1423 struct event_trigger_ops *ops, 1424 struct event_trigger_data *data); 1425 }; 1426 1427 /** 1428 * struct event_command - callbacks and data members for event commands 1429 * 1430 * Event commands are invoked by users by writing the command name 1431 * into the 'trigger' file associated with a trace event. The 1432 * parameters associated with a specific invocation of an event 1433 * command are used to create an event trigger instance, which is 1434 * added to the list of trigger instances associated with that trace 1435 * event. When the event is hit, the set of triggers associated with 1436 * that event is invoked. 1437 * 1438 * The data members in this structure provide per-event command data 1439 * for various event commands. 1440 * 1441 * All the data members below, except for @post_trigger, must be set 1442 * for each event command. 1443 * 1444 * @name: The unique name that identifies the event command. This is 1445 * the name used when setting triggers via trigger files. 1446 * 1447 * @trigger_type: A unique id that identifies the event command 1448 * 'type'. This value has two purposes, the first to ensure that 1449 * only one trigger of the same type can be set at a given time 1450 * for a particular event e.g. it doesn't make sense to have both 1451 * a traceon and traceoff trigger attached to a single event at 1452 * the same time, so traceon and traceoff have the same type 1453 * though they have different names. The @trigger_type value is 1454 * also used as a bit value for deferring the actual trigger 1455 * action until after the current event is finished. Some 1456 * commands need to do this if they themselves log to the trace 1457 * buffer (see the @post_trigger() member below). @trigger_type 1458 * values are defined by adding new values to the trigger_type 1459 * enum in include/linux/trace_events.h. 1460 * 1461 * @flags: See the enum event_command_flags below. 1462 * 1463 * All the methods below, except for @set_filter() and @unreg_all(), 1464 * must be implemented. 1465 * 1466 * @func: The callback function responsible for parsing and 1467 * registering the trigger written to the 'trigger' file by the 1468 * user. It allocates the trigger instance and registers it with 1469 * the appropriate trace event. It makes use of the other 1470 * event_command callback functions to orchestrate this, and is 1471 * usually implemented by the generic utility function 1472 * @event_trigger_callback() (see trace_event_triggers.c). 1473 * 1474 * @reg: Adds the trigger to the list of triggers associated with the 1475 * event, and enables the event trigger itself, after 1476 * initializing it (via the event_trigger_ops @init() function). 1477 * This is also where commands can use the @trigger_type value to 1478 * make the decision as to whether or not multiple instances of 1479 * the trigger should be allowed. This is usually implemented by 1480 * the generic utility function @register_trigger() (see 1481 * trace_event_triggers.c). 1482 * 1483 * @unreg: Removes the trigger from the list of triggers associated 1484 * with the event, and disables the event trigger itself, after 1485 * initializing it (via the event_trigger_ops @free() function). 1486 * This is usually implemented by the generic utility function 1487 * @unregister_trigger() (see trace_event_triggers.c). 1488 * 1489 * @unreg_all: An optional function called to remove all the triggers 1490 * from the list of triggers associated with the event. Called 1491 * when a trigger file is opened in truncate mode. 1492 * 1493 * @set_filter: An optional function called to parse and set a filter 1494 * for the trigger. If no @set_filter() method is set for the 1495 * event command, filters set by the user for the command will be 1496 * ignored. This is usually implemented by the generic utility 1497 * function @set_trigger_filter() (see trace_event_triggers.c). 1498 * 1499 * @get_trigger_ops: The callback function invoked to retrieve the 1500 * event_trigger_ops implementation associated with the command. 1501 */ 1502 struct event_command { 1503 struct list_head list; 1504 char *name; 1505 enum event_trigger_type trigger_type; 1506 int flags; 1507 int (*func)(struct event_command *cmd_ops, 1508 struct trace_event_file *file, 1509 char *glob, char *cmd, char *params); 1510 int (*reg)(char *glob, 1511 struct event_trigger_ops *ops, 1512 struct event_trigger_data *data, 1513 struct trace_event_file *file); 1514 void (*unreg)(char *glob, 1515 struct event_trigger_ops *ops, 1516 struct event_trigger_data *data, 1517 struct trace_event_file *file); 1518 void (*unreg_all)(struct trace_event_file *file); 1519 int (*set_filter)(char *filter_str, 1520 struct event_trigger_data *data, 1521 struct trace_event_file *file); 1522 struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param); 1523 }; 1524 1525 /** 1526 * enum event_command_flags - flags for struct event_command 1527 * 1528 * @POST_TRIGGER: A flag that says whether or not this command needs 1529 * to have its action delayed until after the current event has 1530 * been closed. Some triggers need to avoid being invoked while 1531 * an event is currently in the process of being logged, since 1532 * the trigger may itself log data into the trace buffer. Thus 1533 * we make sure the current event is committed before invoking 1534 * those triggers. To do that, the trigger invocation is split 1535 * in two - the first part checks the filter using the current 1536 * trace record; if a command has the @post_trigger flag set, it 1537 * sets a bit for itself in the return value, otherwise it 1538 * directly invokes the trigger. Once all commands have been 1539 * either invoked or set their return flag, the current record is 1540 * either committed or discarded. At that point, if any commands 1541 * have deferred their triggers, those commands are finally 1542 * invoked following the close of the current event. In other 1543 * words, if the event_trigger_ops @func() probe implementation 1544 * itself logs to the trace buffer, this flag should be set, 1545 * otherwise it can be left unspecified. 1546 * 1547 * @NEEDS_REC: A flag that says whether or not this command needs 1548 * access to the trace record in order to perform its function, 1549 * regardless of whether or not it has a filter associated with 1550 * it (filters make a trigger require access to the trace record 1551 * but are not always present). 1552 */ 1553 enum event_command_flags { 1554 EVENT_CMD_FL_POST_TRIGGER = 1, 1555 EVENT_CMD_FL_NEEDS_REC = 2, 1556 }; 1557 1558 static inline bool event_command_post_trigger(struct event_command *cmd_ops) 1559 { 1560 return cmd_ops->flags & EVENT_CMD_FL_POST_TRIGGER; 1561 } 1562 1563 static inline bool event_command_needs_rec(struct event_command *cmd_ops) 1564 { 1565 return cmd_ops->flags & EVENT_CMD_FL_NEEDS_REC; 1566 } 1567 1568 extern int trace_event_enable_disable(struct trace_event_file *file, 1569 int enable, int soft_disable); 1570 extern int tracing_alloc_snapshot(void); 1571 1572 extern const char *__start___trace_bprintk_fmt[]; 1573 extern const char *__stop___trace_bprintk_fmt[]; 1574 1575 extern const char *__start___tracepoint_str[]; 1576 extern const char *__stop___tracepoint_str[]; 1577 1578 void trace_printk_control(bool enabled); 1579 void trace_printk_init_buffers(void); 1580 void trace_printk_start_comm(void); 1581 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set); 1582 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled); 1583 1584 /* 1585 * Normal trace_printk() and friends allocates special buffers 1586 * to do the manipulation, as well as saves the print formats 1587 * into sections to display. But the trace infrastructure wants 1588 * to use these without the added overhead at the price of being 1589 * a bit slower (used mainly for warnings, where we don't care 1590 * about performance). The internal_trace_puts() is for such 1591 * a purpose. 1592 */ 1593 #define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str)) 1594 1595 #undef FTRACE_ENTRY 1596 #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ 1597 extern struct trace_event_call \ 1598 __aligned(4) event_##call; 1599 #undef FTRACE_ENTRY_DUP 1600 #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \ 1601 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \ 1602 filter) 1603 #include "trace_entries.h" 1604 1605 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER) 1606 int perf_ftrace_event_register(struct trace_event_call *call, 1607 enum trace_reg type, void *data); 1608 #else 1609 #define perf_ftrace_event_register NULL 1610 #endif 1611 1612 #ifdef CONFIG_FTRACE_SYSCALLS 1613 void init_ftrace_syscalls(void); 1614 const char *get_syscall_name(int syscall); 1615 #else 1616 static inline void init_ftrace_syscalls(void) { } 1617 static inline const char *get_syscall_name(int syscall) 1618 { 1619 return NULL; 1620 } 1621 #endif 1622 1623 #ifdef CONFIG_EVENT_TRACING 1624 void trace_event_init(void); 1625 void trace_event_enum_update(struct trace_enum_map **map, int len); 1626 #else 1627 static inline void __init trace_event_init(void) { } 1628 static inline void trace_event_enum_update(struct trace_enum_map **map, int len) { } 1629 #endif 1630 1631 extern struct trace_iterator *tracepoint_print_iter; 1632 1633 #endif /* _LINUX_KERNEL_TRACE_H */ 1634