1 2 #ifndef _LINUX_KERNEL_TRACE_H 3 #define _LINUX_KERNEL_TRACE_H 4 5 #include <linux/fs.h> 6 #include <linux/atomic.h> 7 #include <linux/sched.h> 8 #include <linux/clocksource.h> 9 #include <linux/ring_buffer.h> 10 #include <linux/mmiotrace.h> 11 #include <linux/tracepoint.h> 12 #include <linux/ftrace.h> 13 #include <linux/hw_breakpoint.h> 14 #include <linux/trace_seq.h> 15 #include <linux/trace_events.h> 16 #include <linux/compiler.h> 17 #include <linux/trace_seq.h> 18 19 #ifdef CONFIG_FTRACE_SYSCALLS 20 #include <asm/unistd.h> /* For NR_SYSCALLS */ 21 #include <asm/syscall.h> /* some archs define it here */ 22 #endif 23 24 enum trace_type { 25 __TRACE_FIRST_TYPE = 0, 26 27 TRACE_FN, 28 TRACE_CTX, 29 TRACE_WAKE, 30 TRACE_STACK, 31 TRACE_PRINT, 32 TRACE_BPRINT, 33 TRACE_MMIO_RW, 34 TRACE_MMIO_MAP, 35 TRACE_BRANCH, 36 TRACE_GRAPH_RET, 37 TRACE_GRAPH_ENT, 38 TRACE_USER_STACK, 39 TRACE_BLK, 40 TRACE_BPUTS, 41 TRACE_HWLAT, 42 43 __TRACE_LAST_TYPE, 44 }; 45 46 47 #undef __field 48 #define __field(type, item) type item; 49 50 #undef __field_struct 51 #define __field_struct(type, item) __field(type, item) 52 53 #undef __field_desc 54 #define __field_desc(type, container, item) 55 56 #undef __array 57 #define __array(type, item, size) type item[size]; 58 59 #undef __array_desc 60 #define __array_desc(type, container, item, size) 61 62 #undef __dynamic_array 63 #define __dynamic_array(type, item) type item[]; 64 65 #undef F_STRUCT 66 #define F_STRUCT(args...) args 67 68 #undef FTRACE_ENTRY 69 #define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \ 70 struct struct_name { \ 71 struct trace_entry ent; \ 72 tstruct \ 73 } 74 75 #undef FTRACE_ENTRY_DUP 76 #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter) 77 78 #undef FTRACE_ENTRY_REG 79 #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, \ 80 filter, regfn) \ 81 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \ 82 filter) 83 84 #undef FTRACE_ENTRY_PACKED 85 #define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print, \ 86 filter) \ 87 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \ 88 filter) __packed 89 90 #include "trace_entries.h" 91 92 /* 93 * syscalls are special, and need special handling, this is why 94 * they are not included in trace_entries.h 95 */ 96 struct syscall_trace_enter { 97 struct trace_entry ent; 98 int nr; 99 unsigned long args[]; 100 }; 101 102 struct syscall_trace_exit { 103 struct trace_entry ent; 104 int nr; 105 long ret; 106 }; 107 108 struct kprobe_trace_entry_head { 109 struct trace_entry ent; 110 unsigned long ip; 111 }; 112 113 struct kretprobe_trace_entry_head { 114 struct trace_entry ent; 115 unsigned long func; 116 unsigned long ret_ip; 117 }; 118 119 /* 120 * trace_flag_type is an enumeration that holds different 121 * states when a trace occurs. These are: 122 * IRQS_OFF - interrupts were disabled 123 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags 124 * NEED_RESCHED - reschedule is requested 125 * HARDIRQ - inside an interrupt handler 126 * SOFTIRQ - inside a softirq handler 127 */ 128 enum trace_flag_type { 129 TRACE_FLAG_IRQS_OFF = 0x01, 130 TRACE_FLAG_IRQS_NOSUPPORT = 0x02, 131 TRACE_FLAG_NEED_RESCHED = 0x04, 132 TRACE_FLAG_HARDIRQ = 0x08, 133 TRACE_FLAG_SOFTIRQ = 0x10, 134 TRACE_FLAG_PREEMPT_RESCHED = 0x20, 135 TRACE_FLAG_NMI = 0x40, 136 }; 137 138 #define TRACE_BUF_SIZE 1024 139 140 struct trace_array; 141 142 /* 143 * The CPU trace array - it consists of thousands of trace entries 144 * plus some other descriptor data: (for example which task started 145 * the trace, etc.) 146 */ 147 struct trace_array_cpu { 148 atomic_t disabled; 149 void *buffer_page; /* ring buffer spare */ 150 151 unsigned long entries; 152 unsigned long saved_latency; 153 unsigned long critical_start; 154 unsigned long critical_end; 155 unsigned long critical_sequence; 156 unsigned long nice; 157 unsigned long policy; 158 unsigned long rt_priority; 159 unsigned long skipped_entries; 160 cycle_t preempt_timestamp; 161 pid_t pid; 162 kuid_t uid; 163 char comm[TASK_COMM_LEN]; 164 165 bool ignore_pid; 166 #ifdef CONFIG_FUNCTION_TRACER 167 bool ftrace_ignore_pid; 168 #endif 169 }; 170 171 struct tracer; 172 struct trace_option_dentry; 173 174 struct trace_buffer { 175 struct trace_array *tr; 176 struct ring_buffer *buffer; 177 struct trace_array_cpu __percpu *data; 178 cycle_t time_start; 179 int cpu; 180 }; 181 182 #define TRACE_FLAGS_MAX_SIZE 32 183 184 struct trace_options { 185 struct tracer *tracer; 186 struct trace_option_dentry *topts; 187 }; 188 189 struct trace_pid_list { 190 int pid_max; 191 unsigned long *pids; 192 }; 193 194 /* 195 * The trace array - an array of per-CPU trace arrays. This is the 196 * highest level data structure that individual tracers deal with. 197 * They have on/off state as well: 198 */ 199 struct trace_array { 200 struct list_head list; 201 char *name; 202 struct trace_buffer trace_buffer; 203 #ifdef CONFIG_TRACER_MAX_TRACE 204 /* 205 * The max_buffer is used to snapshot the trace when a maximum 206 * latency is reached, or when the user initiates a snapshot. 207 * Some tracers will use this to store a maximum trace while 208 * it continues examining live traces. 209 * 210 * The buffers for the max_buffer are set up the same as the trace_buffer 211 * When a snapshot is taken, the buffer of the max_buffer is swapped 212 * with the buffer of the trace_buffer and the buffers are reset for 213 * the trace_buffer so the tracing can continue. 214 */ 215 struct trace_buffer max_buffer; 216 bool allocated_snapshot; 217 #endif 218 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) 219 unsigned long max_latency; 220 #endif 221 struct trace_pid_list __rcu *filtered_pids; 222 /* 223 * max_lock is used to protect the swapping of buffers 224 * when taking a max snapshot. The buffers themselves are 225 * protected by per_cpu spinlocks. But the action of the swap 226 * needs its own lock. 227 * 228 * This is defined as a arch_spinlock_t in order to help 229 * with performance when lockdep debugging is enabled. 230 * 231 * It is also used in other places outside the update_max_tr 232 * so it needs to be defined outside of the 233 * CONFIG_TRACER_MAX_TRACE. 234 */ 235 arch_spinlock_t max_lock; 236 int buffer_disabled; 237 #ifdef CONFIG_FTRACE_SYSCALLS 238 int sys_refcount_enter; 239 int sys_refcount_exit; 240 struct trace_event_file __rcu *enter_syscall_files[NR_syscalls]; 241 struct trace_event_file __rcu *exit_syscall_files[NR_syscalls]; 242 #endif 243 int stop_count; 244 int clock_id; 245 int nr_topts; 246 struct tracer *current_trace; 247 unsigned int trace_flags; 248 unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE]; 249 unsigned int flags; 250 raw_spinlock_t start_lock; 251 struct dentry *dir; 252 struct dentry *options; 253 struct dentry *percpu_dir; 254 struct dentry *event_dir; 255 struct trace_options *topts; 256 struct list_head systems; 257 struct list_head events; 258 cpumask_var_t tracing_cpumask; /* only trace on set CPUs */ 259 int ref; 260 #ifdef CONFIG_FUNCTION_TRACER 261 struct ftrace_ops *ops; 262 struct trace_pid_list __rcu *function_pids; 263 /* function tracing enabled */ 264 int function_enabled; 265 #endif 266 }; 267 268 enum { 269 TRACE_ARRAY_FL_GLOBAL = (1 << 0) 270 }; 271 272 extern struct list_head ftrace_trace_arrays; 273 274 extern struct mutex trace_types_lock; 275 276 extern int trace_array_get(struct trace_array *tr); 277 extern void trace_array_put(struct trace_array *tr); 278 279 /* 280 * The global tracer (top) should be the first trace array added, 281 * but we check the flag anyway. 282 */ 283 static inline struct trace_array *top_trace_array(void) 284 { 285 struct trace_array *tr; 286 287 if (list_empty(&ftrace_trace_arrays)) 288 return NULL; 289 290 tr = list_entry(ftrace_trace_arrays.prev, 291 typeof(*tr), list); 292 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL)); 293 return tr; 294 } 295 296 #define FTRACE_CMP_TYPE(var, type) \ 297 __builtin_types_compatible_p(typeof(var), type *) 298 299 #undef IF_ASSIGN 300 #define IF_ASSIGN(var, entry, etype, id) \ 301 if (FTRACE_CMP_TYPE(var, etype)) { \ 302 var = (typeof(var))(entry); \ 303 WARN_ON(id && (entry)->type != id); \ 304 break; \ 305 } 306 307 /* Will cause compile errors if type is not found. */ 308 extern void __ftrace_bad_type(void); 309 310 /* 311 * The trace_assign_type is a verifier that the entry type is 312 * the same as the type being assigned. To add new types simply 313 * add a line with the following format: 314 * 315 * IF_ASSIGN(var, ent, type, id); 316 * 317 * Where "type" is the trace type that includes the trace_entry 318 * as the "ent" item. And "id" is the trace identifier that is 319 * used in the trace_type enum. 320 * 321 * If the type can have more than one id, then use zero. 322 */ 323 #define trace_assign_type(var, ent) \ 324 do { \ 325 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \ 326 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \ 327 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ 328 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ 329 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ 330 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \ 331 IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \ 332 IF_ASSIGN(var, ent, struct hwlat_entry, TRACE_HWLAT); \ 333 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ 334 TRACE_MMIO_RW); \ 335 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ 336 TRACE_MMIO_MAP); \ 337 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \ 338 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \ 339 TRACE_GRAPH_ENT); \ 340 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ 341 TRACE_GRAPH_RET); \ 342 __ftrace_bad_type(); \ 343 } while (0) 344 345 /* 346 * An option specific to a tracer. This is a boolean value. 347 * The bit is the bit index that sets its value on the 348 * flags value in struct tracer_flags. 349 */ 350 struct tracer_opt { 351 const char *name; /* Will appear on the trace_options file */ 352 u32 bit; /* Mask assigned in val field in tracer_flags */ 353 }; 354 355 /* 356 * The set of specific options for a tracer. Your tracer 357 * have to set the initial value of the flags val. 358 */ 359 struct tracer_flags { 360 u32 val; 361 struct tracer_opt *opts; 362 struct tracer *trace; 363 }; 364 365 /* Makes more easy to define a tracer opt */ 366 #define TRACER_OPT(s, b) .name = #s, .bit = b 367 368 369 struct trace_option_dentry { 370 struct tracer_opt *opt; 371 struct tracer_flags *flags; 372 struct trace_array *tr; 373 struct dentry *entry; 374 }; 375 376 /** 377 * struct tracer - a specific tracer and its callbacks to interact with tracefs 378 * @name: the name chosen to select it on the available_tracers file 379 * @init: called when one switches to this tracer (echo name > current_tracer) 380 * @reset: called when one switches to another tracer 381 * @start: called when tracing is unpaused (echo 1 > tracing_on) 382 * @stop: called when tracing is paused (echo 0 > tracing_on) 383 * @update_thresh: called when tracing_thresh is updated 384 * @open: called when the trace file is opened 385 * @pipe_open: called when the trace_pipe file is opened 386 * @close: called when the trace file is released 387 * @pipe_close: called when the trace_pipe file is released 388 * @read: override the default read callback on trace_pipe 389 * @splice_read: override the default splice_read callback on trace_pipe 390 * @selftest: selftest to run on boot (see trace_selftest.c) 391 * @print_headers: override the first lines that describe your columns 392 * @print_line: callback that prints a trace 393 * @set_flag: signals one of your private flags changed (trace_options file) 394 * @flags: your private flags 395 */ 396 struct tracer { 397 const char *name; 398 int (*init)(struct trace_array *tr); 399 void (*reset)(struct trace_array *tr); 400 void (*start)(struct trace_array *tr); 401 void (*stop)(struct trace_array *tr); 402 int (*update_thresh)(struct trace_array *tr); 403 void (*open)(struct trace_iterator *iter); 404 void (*pipe_open)(struct trace_iterator *iter); 405 void (*close)(struct trace_iterator *iter); 406 void (*pipe_close)(struct trace_iterator *iter); 407 ssize_t (*read)(struct trace_iterator *iter, 408 struct file *filp, char __user *ubuf, 409 size_t cnt, loff_t *ppos); 410 ssize_t (*splice_read)(struct trace_iterator *iter, 411 struct file *filp, 412 loff_t *ppos, 413 struct pipe_inode_info *pipe, 414 size_t len, 415 unsigned int flags); 416 #ifdef CONFIG_FTRACE_STARTUP_TEST 417 int (*selftest)(struct tracer *trace, 418 struct trace_array *tr); 419 #endif 420 void (*print_header)(struct seq_file *m); 421 enum print_line_t (*print_line)(struct trace_iterator *iter); 422 /* If you handled the flag setting, return 0 */ 423 int (*set_flag)(struct trace_array *tr, 424 u32 old_flags, u32 bit, int set); 425 /* Return 0 if OK with change, else return non-zero */ 426 int (*flag_changed)(struct trace_array *tr, 427 u32 mask, int set); 428 struct tracer *next; 429 struct tracer_flags *flags; 430 int enabled; 431 int ref; 432 bool print_max; 433 bool allow_instances; 434 #ifdef CONFIG_TRACER_MAX_TRACE 435 bool use_max_tr; 436 #endif 437 }; 438 439 440 /* Only current can touch trace_recursion */ 441 442 /* 443 * For function tracing recursion: 444 * The order of these bits are important. 445 * 446 * When function tracing occurs, the following steps are made: 447 * If arch does not support a ftrace feature: 448 * call internal function (uses INTERNAL bits) which calls... 449 * If callback is registered to the "global" list, the list 450 * function is called and recursion checks the GLOBAL bits. 451 * then this function calls... 452 * The function callback, which can use the FTRACE bits to 453 * check for recursion. 454 * 455 * Now if the arch does not suppport a feature, and it calls 456 * the global list function which calls the ftrace callback 457 * all three of these steps will do a recursion protection. 458 * There's no reason to do one if the previous caller already 459 * did. The recursion that we are protecting against will 460 * go through the same steps again. 461 * 462 * To prevent the multiple recursion checks, if a recursion 463 * bit is set that is higher than the MAX bit of the current 464 * check, then we know that the check was made by the previous 465 * caller, and we can skip the current check. 466 */ 467 enum { 468 TRACE_BUFFER_BIT, 469 TRACE_BUFFER_NMI_BIT, 470 TRACE_BUFFER_IRQ_BIT, 471 TRACE_BUFFER_SIRQ_BIT, 472 473 /* Start of function recursion bits */ 474 TRACE_FTRACE_BIT, 475 TRACE_FTRACE_NMI_BIT, 476 TRACE_FTRACE_IRQ_BIT, 477 TRACE_FTRACE_SIRQ_BIT, 478 479 /* INTERNAL_BITs must be greater than FTRACE_BITs */ 480 TRACE_INTERNAL_BIT, 481 TRACE_INTERNAL_NMI_BIT, 482 TRACE_INTERNAL_IRQ_BIT, 483 TRACE_INTERNAL_SIRQ_BIT, 484 485 TRACE_BRANCH_BIT, 486 /* 487 * Abuse of the trace_recursion. 488 * As we need a way to maintain state if we are tracing the function 489 * graph in irq because we want to trace a particular function that 490 * was called in irq context but we have irq tracing off. Since this 491 * can only be modified by current, we can reuse trace_recursion. 492 */ 493 TRACE_IRQ_BIT, 494 }; 495 496 #define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0) 497 #define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0) 498 #define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit))) 499 500 #define TRACE_CONTEXT_BITS 4 501 502 #define TRACE_FTRACE_START TRACE_FTRACE_BIT 503 #define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1) 504 505 #define TRACE_LIST_START TRACE_INTERNAL_BIT 506 #define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1) 507 508 #define TRACE_CONTEXT_MASK TRACE_LIST_MAX 509 510 static __always_inline int trace_get_context_bit(void) 511 { 512 int bit; 513 514 if (in_interrupt()) { 515 if (in_nmi()) 516 bit = 0; 517 518 else if (in_irq()) 519 bit = 1; 520 else 521 bit = 2; 522 } else 523 bit = 3; 524 525 return bit; 526 } 527 528 static __always_inline int trace_test_and_set_recursion(int start, int max) 529 { 530 unsigned int val = current->trace_recursion; 531 int bit; 532 533 /* A previous recursion check was made */ 534 if ((val & TRACE_CONTEXT_MASK) > max) 535 return 0; 536 537 bit = trace_get_context_bit() + start; 538 if (unlikely(val & (1 << bit))) 539 return -1; 540 541 val |= 1 << bit; 542 current->trace_recursion = val; 543 barrier(); 544 545 return bit; 546 } 547 548 static __always_inline void trace_clear_recursion(int bit) 549 { 550 unsigned int val = current->trace_recursion; 551 552 if (!bit) 553 return; 554 555 bit = 1 << bit; 556 val &= ~bit; 557 558 barrier(); 559 current->trace_recursion = val; 560 } 561 562 static inline struct ring_buffer_iter * 563 trace_buffer_iter(struct trace_iterator *iter, int cpu) 564 { 565 if (iter->buffer_iter && iter->buffer_iter[cpu]) 566 return iter->buffer_iter[cpu]; 567 return NULL; 568 } 569 570 int tracer_init(struct tracer *t, struct trace_array *tr); 571 int tracing_is_enabled(void); 572 void tracing_reset(struct trace_buffer *buf, int cpu); 573 void tracing_reset_online_cpus(struct trace_buffer *buf); 574 void tracing_reset_current(int cpu); 575 void tracing_reset_all_online_cpus(void); 576 int tracing_open_generic(struct inode *inode, struct file *filp); 577 bool tracing_is_disabled(void); 578 int tracer_tracing_is_on(struct trace_array *tr); 579 struct dentry *trace_create_file(const char *name, 580 umode_t mode, 581 struct dentry *parent, 582 void *data, 583 const struct file_operations *fops); 584 585 struct dentry *tracing_init_dentry(void); 586 587 struct ring_buffer_event; 588 589 struct ring_buffer_event * 590 trace_buffer_lock_reserve(struct ring_buffer *buffer, 591 int type, 592 unsigned long len, 593 unsigned long flags, 594 int pc); 595 596 struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, 597 struct trace_array_cpu *data); 598 599 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, 600 int *ent_cpu, u64 *ent_ts); 601 602 void __buffer_unlock_commit(struct ring_buffer *buffer, 603 struct ring_buffer_event *event); 604 605 int trace_empty(struct trace_iterator *iter); 606 607 void *trace_find_next_entry_inc(struct trace_iterator *iter); 608 609 void trace_init_global_iter(struct trace_iterator *iter); 610 611 void tracing_iter_reset(struct trace_iterator *iter, int cpu); 612 613 void trace_function(struct trace_array *tr, 614 unsigned long ip, 615 unsigned long parent_ip, 616 unsigned long flags, int pc); 617 void trace_graph_function(struct trace_array *tr, 618 unsigned long ip, 619 unsigned long parent_ip, 620 unsigned long flags, int pc); 621 void trace_latency_header(struct seq_file *m); 622 void trace_default_header(struct seq_file *m); 623 void print_trace_header(struct seq_file *m, struct trace_iterator *iter); 624 int trace_empty(struct trace_iterator *iter); 625 626 void trace_graph_return(struct ftrace_graph_ret *trace); 627 int trace_graph_entry(struct ftrace_graph_ent *trace); 628 void set_graph_array(struct trace_array *tr); 629 630 void tracing_start_cmdline_record(void); 631 void tracing_stop_cmdline_record(void); 632 int register_tracer(struct tracer *type); 633 int is_tracing_stopped(void); 634 635 loff_t tracing_lseek(struct file *file, loff_t offset, int whence); 636 637 extern cpumask_var_t __read_mostly tracing_buffer_mask; 638 639 #define for_each_tracing_cpu(cpu) \ 640 for_each_cpu(cpu, tracing_buffer_mask) 641 642 extern unsigned long nsecs_to_usecs(unsigned long nsecs); 643 644 extern unsigned long tracing_thresh; 645 646 /* PID filtering */ 647 648 extern int pid_max; 649 650 bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids, 651 pid_t search_pid); 652 bool trace_ignore_this_task(struct trace_pid_list *filtered_pids, 653 struct task_struct *task); 654 void trace_filter_add_remove_task(struct trace_pid_list *pid_list, 655 struct task_struct *self, 656 struct task_struct *task); 657 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos); 658 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos); 659 int trace_pid_show(struct seq_file *m, void *v); 660 void trace_free_pid_list(struct trace_pid_list *pid_list); 661 int trace_pid_write(struct trace_pid_list *filtered_pids, 662 struct trace_pid_list **new_pid_list, 663 const char __user *ubuf, size_t cnt); 664 665 #ifdef CONFIG_TRACER_MAX_TRACE 666 void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu); 667 void update_max_tr_single(struct trace_array *tr, 668 struct task_struct *tsk, int cpu); 669 #endif /* CONFIG_TRACER_MAX_TRACE */ 670 671 #ifdef CONFIG_STACKTRACE 672 void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, 673 int pc); 674 675 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, 676 int pc); 677 #else 678 static inline void ftrace_trace_userstack(struct ring_buffer *buffer, 679 unsigned long flags, int pc) 680 { 681 } 682 683 static inline void __trace_stack(struct trace_array *tr, unsigned long flags, 684 int skip, int pc) 685 { 686 } 687 #endif /* CONFIG_STACKTRACE */ 688 689 extern cycle_t ftrace_now(int cpu); 690 691 extern void trace_find_cmdline(int pid, char comm[]); 692 extern void trace_event_follow_fork(struct trace_array *tr, bool enable); 693 694 #ifdef CONFIG_DYNAMIC_FTRACE 695 extern unsigned long ftrace_update_tot_cnt; 696 #endif 697 #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func 698 extern int DYN_FTRACE_TEST_NAME(void); 699 #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2 700 extern int DYN_FTRACE_TEST_NAME2(void); 701 702 extern bool ring_buffer_expanded; 703 extern bool tracing_selftest_disabled; 704 705 #ifdef CONFIG_FTRACE_STARTUP_TEST 706 extern int trace_selftest_startup_function(struct tracer *trace, 707 struct trace_array *tr); 708 extern int trace_selftest_startup_function_graph(struct tracer *trace, 709 struct trace_array *tr); 710 extern int trace_selftest_startup_irqsoff(struct tracer *trace, 711 struct trace_array *tr); 712 extern int trace_selftest_startup_preemptoff(struct tracer *trace, 713 struct trace_array *tr); 714 extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace, 715 struct trace_array *tr); 716 extern int trace_selftest_startup_wakeup(struct tracer *trace, 717 struct trace_array *tr); 718 extern int trace_selftest_startup_nop(struct tracer *trace, 719 struct trace_array *tr); 720 extern int trace_selftest_startup_sched_switch(struct tracer *trace, 721 struct trace_array *tr); 722 extern int trace_selftest_startup_branch(struct tracer *trace, 723 struct trace_array *tr); 724 /* 725 * Tracer data references selftest functions that only occur 726 * on boot up. These can be __init functions. Thus, when selftests 727 * are enabled, then the tracers need to reference __init functions. 728 */ 729 #define __tracer_data __refdata 730 #else 731 /* Tracers are seldom changed. Optimize when selftests are disabled. */ 732 #define __tracer_data __read_mostly 733 #endif /* CONFIG_FTRACE_STARTUP_TEST */ 734 735 extern void *head_page(struct trace_array_cpu *data); 736 extern unsigned long long ns2usecs(cycle_t nsec); 737 extern int 738 trace_vbprintk(unsigned long ip, const char *fmt, va_list args); 739 extern int 740 trace_vprintk(unsigned long ip, const char *fmt, va_list args); 741 extern int 742 trace_array_vprintk(struct trace_array *tr, 743 unsigned long ip, const char *fmt, va_list args); 744 int trace_array_printk(struct trace_array *tr, 745 unsigned long ip, const char *fmt, ...); 746 int trace_array_printk_buf(struct ring_buffer *buffer, 747 unsigned long ip, const char *fmt, ...); 748 void trace_printk_seq(struct trace_seq *s); 749 enum print_line_t print_trace_line(struct trace_iterator *iter); 750 751 extern char trace_find_mark(unsigned long long duration); 752 753 /* Standard output formatting function used for function return traces */ 754 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 755 756 /* Flag options */ 757 #define TRACE_GRAPH_PRINT_OVERRUN 0x1 758 #define TRACE_GRAPH_PRINT_CPU 0x2 759 #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 760 #define TRACE_GRAPH_PRINT_PROC 0x8 761 #define TRACE_GRAPH_PRINT_DURATION 0x10 762 #define TRACE_GRAPH_PRINT_ABS_TIME 0x20 763 #define TRACE_GRAPH_PRINT_IRQS 0x40 764 #define TRACE_GRAPH_PRINT_TAIL 0x80 765 #define TRACE_GRAPH_SLEEP_TIME 0x100 766 #define TRACE_GRAPH_GRAPH_TIME 0x200 767 #define TRACE_GRAPH_PRINT_FILL_SHIFT 28 768 #define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT) 769 770 extern void ftrace_graph_sleep_time_control(bool enable); 771 extern void ftrace_graph_graph_time_control(bool enable); 772 773 extern enum print_line_t 774 print_graph_function_flags(struct trace_iterator *iter, u32 flags); 775 extern void print_graph_headers_flags(struct seq_file *s, u32 flags); 776 extern void 777 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s); 778 extern void graph_trace_open(struct trace_iterator *iter); 779 extern void graph_trace_close(struct trace_iterator *iter); 780 extern int __trace_graph_entry(struct trace_array *tr, 781 struct ftrace_graph_ent *trace, 782 unsigned long flags, int pc); 783 extern void __trace_graph_return(struct trace_array *tr, 784 struct ftrace_graph_ret *trace, 785 unsigned long flags, int pc); 786 787 788 #ifdef CONFIG_DYNAMIC_FTRACE 789 /* TODO: make this variable */ 790 #define FTRACE_GRAPH_MAX_FUNCS 32 791 extern int ftrace_graph_count; 792 extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS]; 793 extern int ftrace_graph_notrace_count; 794 extern unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS]; 795 796 static inline int ftrace_graph_addr(unsigned long addr) 797 { 798 int i; 799 800 if (!ftrace_graph_count) 801 return 1; 802 803 for (i = 0; i < ftrace_graph_count; i++) { 804 if (addr == ftrace_graph_funcs[i]) { 805 /* 806 * If no irqs are to be traced, but a set_graph_function 807 * is set, and called by an interrupt handler, we still 808 * want to trace it. 809 */ 810 if (in_irq()) 811 trace_recursion_set(TRACE_IRQ_BIT); 812 else 813 trace_recursion_clear(TRACE_IRQ_BIT); 814 return 1; 815 } 816 } 817 818 return 0; 819 } 820 821 static inline int ftrace_graph_notrace_addr(unsigned long addr) 822 { 823 int i; 824 825 if (!ftrace_graph_notrace_count) 826 return 0; 827 828 for (i = 0; i < ftrace_graph_notrace_count; i++) { 829 if (addr == ftrace_graph_notrace_funcs[i]) 830 return 1; 831 } 832 833 return 0; 834 } 835 #else 836 static inline int ftrace_graph_addr(unsigned long addr) 837 { 838 return 1; 839 } 840 841 static inline int ftrace_graph_notrace_addr(unsigned long addr) 842 { 843 return 0; 844 } 845 #endif /* CONFIG_DYNAMIC_FTRACE */ 846 #else /* CONFIG_FUNCTION_GRAPH_TRACER */ 847 static inline enum print_line_t 848 print_graph_function_flags(struct trace_iterator *iter, u32 flags) 849 { 850 return TRACE_TYPE_UNHANDLED; 851 } 852 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 853 854 extern struct list_head ftrace_pids; 855 856 #ifdef CONFIG_FUNCTION_TRACER 857 extern bool ftrace_filter_param __initdata; 858 static inline int ftrace_trace_task(struct trace_array *tr) 859 { 860 return !this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid); 861 } 862 extern int ftrace_is_dead(void); 863 int ftrace_create_function_files(struct trace_array *tr, 864 struct dentry *parent); 865 void ftrace_destroy_function_files(struct trace_array *tr); 866 void ftrace_init_global_array_ops(struct trace_array *tr); 867 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func); 868 void ftrace_reset_array_ops(struct trace_array *tr); 869 int using_ftrace_ops_list_func(void); 870 void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer); 871 void ftrace_init_tracefs_toplevel(struct trace_array *tr, 872 struct dentry *d_tracer); 873 #else 874 static inline int ftrace_trace_task(struct trace_array *tr) 875 { 876 return 1; 877 } 878 static inline int ftrace_is_dead(void) { return 0; } 879 static inline int 880 ftrace_create_function_files(struct trace_array *tr, 881 struct dentry *parent) 882 { 883 return 0; 884 } 885 static inline void ftrace_destroy_function_files(struct trace_array *tr) { } 886 static inline __init void 887 ftrace_init_global_array_ops(struct trace_array *tr) { } 888 static inline void ftrace_reset_array_ops(struct trace_array *tr) { } 889 static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { } 890 static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { } 891 /* ftace_func_t type is not defined, use macro instead of static inline */ 892 #define ftrace_init_array_ops(tr, func) do { } while (0) 893 #endif /* CONFIG_FUNCTION_TRACER */ 894 895 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE) 896 void ftrace_create_filter_files(struct ftrace_ops *ops, 897 struct dentry *parent); 898 void ftrace_destroy_filter_files(struct ftrace_ops *ops); 899 #else 900 /* 901 * The ops parameter passed in is usually undefined. 902 * This must be a macro. 903 */ 904 #define ftrace_create_filter_files(ops, parent) do { } while (0) 905 #define ftrace_destroy_filter_files(ops) do { } while (0) 906 #endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */ 907 908 bool ftrace_event_is_function(struct trace_event_call *call); 909 910 /* 911 * struct trace_parser - servers for reading the user input separated by spaces 912 * @cont: set if the input is not complete - no final space char was found 913 * @buffer: holds the parsed user input 914 * @idx: user input length 915 * @size: buffer size 916 */ 917 struct trace_parser { 918 bool cont; 919 char *buffer; 920 unsigned idx; 921 unsigned size; 922 }; 923 924 static inline bool trace_parser_loaded(struct trace_parser *parser) 925 { 926 return (parser->idx != 0); 927 } 928 929 static inline bool trace_parser_cont(struct trace_parser *parser) 930 { 931 return parser->cont; 932 } 933 934 static inline void trace_parser_clear(struct trace_parser *parser) 935 { 936 parser->cont = false; 937 parser->idx = 0; 938 } 939 940 extern int trace_parser_get_init(struct trace_parser *parser, int size); 941 extern void trace_parser_put(struct trace_parser *parser); 942 extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf, 943 size_t cnt, loff_t *ppos); 944 945 /* 946 * Only create function graph options if function graph is configured. 947 */ 948 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 949 # define FGRAPH_FLAGS \ 950 C(DISPLAY_GRAPH, "display-graph"), 951 #else 952 # define FGRAPH_FLAGS 953 #endif 954 955 #ifdef CONFIG_BRANCH_TRACER 956 # define BRANCH_FLAGS \ 957 C(BRANCH, "branch"), 958 #else 959 # define BRANCH_FLAGS 960 #endif 961 962 #ifdef CONFIG_FUNCTION_TRACER 963 # define FUNCTION_FLAGS \ 964 C(FUNCTION, "function-trace"), 965 # define FUNCTION_DEFAULT_FLAGS TRACE_ITER_FUNCTION 966 #else 967 # define FUNCTION_FLAGS 968 # define FUNCTION_DEFAULT_FLAGS 0UL 969 #endif 970 971 #ifdef CONFIG_STACKTRACE 972 # define STACK_FLAGS \ 973 C(STACKTRACE, "stacktrace"), 974 #else 975 # define STACK_FLAGS 976 #endif 977 978 /* 979 * trace_iterator_flags is an enumeration that defines bit 980 * positions into trace_flags that controls the output. 981 * 982 * NOTE: These bits must match the trace_options array in 983 * trace.c (this macro guarantees it). 984 */ 985 #define TRACE_FLAGS \ 986 C(PRINT_PARENT, "print-parent"), \ 987 C(SYM_OFFSET, "sym-offset"), \ 988 C(SYM_ADDR, "sym-addr"), \ 989 C(VERBOSE, "verbose"), \ 990 C(RAW, "raw"), \ 991 C(HEX, "hex"), \ 992 C(BIN, "bin"), \ 993 C(BLOCK, "block"), \ 994 C(PRINTK, "trace_printk"), \ 995 C(ANNOTATE, "annotate"), \ 996 C(USERSTACKTRACE, "userstacktrace"), \ 997 C(SYM_USEROBJ, "sym-userobj"), \ 998 C(PRINTK_MSGONLY, "printk-msg-only"), \ 999 C(CONTEXT_INFO, "context-info"), /* Print pid/cpu/time */ \ 1000 C(LATENCY_FMT, "latency-format"), \ 1001 C(RECORD_CMD, "record-cmd"), \ 1002 C(OVERWRITE, "overwrite"), \ 1003 C(STOP_ON_FREE, "disable_on_free"), \ 1004 C(IRQ_INFO, "irq-info"), \ 1005 C(MARKERS, "markers"), \ 1006 C(EVENT_FORK, "event-fork"), \ 1007 FUNCTION_FLAGS \ 1008 FGRAPH_FLAGS \ 1009 STACK_FLAGS \ 1010 BRANCH_FLAGS 1011 1012 /* 1013 * By defining C, we can make TRACE_FLAGS a list of bit names 1014 * that will define the bits for the flag masks. 1015 */ 1016 #undef C 1017 #define C(a, b) TRACE_ITER_##a##_BIT 1018 1019 enum trace_iterator_bits { 1020 TRACE_FLAGS 1021 /* Make sure we don't go more than we have bits for */ 1022 TRACE_ITER_LAST_BIT 1023 }; 1024 1025 /* 1026 * By redefining C, we can make TRACE_FLAGS a list of masks that 1027 * use the bits as defined above. 1028 */ 1029 #undef C 1030 #define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT) 1031 1032 enum trace_iterator_flags { TRACE_FLAGS }; 1033 1034 /* 1035 * TRACE_ITER_SYM_MASK masks the options in trace_flags that 1036 * control the output of kernel symbols. 1037 */ 1038 #define TRACE_ITER_SYM_MASK \ 1039 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR) 1040 1041 extern struct tracer nop_trace; 1042 1043 #ifdef CONFIG_BRANCH_TRACER 1044 extern int enable_branch_tracing(struct trace_array *tr); 1045 extern void disable_branch_tracing(void); 1046 static inline int trace_branch_enable(struct trace_array *tr) 1047 { 1048 if (tr->trace_flags & TRACE_ITER_BRANCH) 1049 return enable_branch_tracing(tr); 1050 return 0; 1051 } 1052 static inline void trace_branch_disable(void) 1053 { 1054 /* due to races, always disable */ 1055 disable_branch_tracing(); 1056 } 1057 #else 1058 static inline int trace_branch_enable(struct trace_array *tr) 1059 { 1060 return 0; 1061 } 1062 static inline void trace_branch_disable(void) 1063 { 1064 } 1065 #endif /* CONFIG_BRANCH_TRACER */ 1066 1067 /* set ring buffers to default size if not already done so */ 1068 int tracing_update_buffers(void); 1069 1070 struct ftrace_event_field { 1071 struct list_head link; 1072 const char *name; 1073 const char *type; 1074 int filter_type; 1075 int offset; 1076 int size; 1077 int is_signed; 1078 }; 1079 1080 struct event_filter { 1081 int n_preds; /* Number assigned */ 1082 int a_preds; /* allocated */ 1083 struct filter_pred *preds; 1084 struct filter_pred *root; 1085 char *filter_string; 1086 }; 1087 1088 struct event_subsystem { 1089 struct list_head list; 1090 const char *name; 1091 struct event_filter *filter; 1092 int ref_count; 1093 }; 1094 1095 struct trace_subsystem_dir { 1096 struct list_head list; 1097 struct event_subsystem *subsystem; 1098 struct trace_array *tr; 1099 struct dentry *entry; 1100 int ref_count; 1101 int nr_events; 1102 }; 1103 1104 extern int call_filter_check_discard(struct trace_event_call *call, void *rec, 1105 struct ring_buffer *buffer, 1106 struct ring_buffer_event *event); 1107 1108 void trace_buffer_unlock_commit_regs(struct trace_array *tr, 1109 struct ring_buffer *buffer, 1110 struct ring_buffer_event *event, 1111 unsigned long flags, int pc, 1112 struct pt_regs *regs); 1113 1114 static inline void trace_buffer_unlock_commit(struct trace_array *tr, 1115 struct ring_buffer *buffer, 1116 struct ring_buffer_event *event, 1117 unsigned long flags, int pc) 1118 { 1119 trace_buffer_unlock_commit_regs(tr, buffer, event, flags, pc, NULL); 1120 } 1121 1122 DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event); 1123 DECLARE_PER_CPU(int, trace_buffered_event_cnt); 1124 void trace_buffered_event_disable(void); 1125 void trace_buffered_event_enable(void); 1126 1127 static inline void 1128 __trace_event_discard_commit(struct ring_buffer *buffer, 1129 struct ring_buffer_event *event) 1130 { 1131 if (this_cpu_read(trace_buffered_event) == event) { 1132 /* Simply release the temp buffer */ 1133 this_cpu_dec(trace_buffered_event_cnt); 1134 return; 1135 } 1136 ring_buffer_discard_commit(buffer, event); 1137 } 1138 1139 /* 1140 * Helper function for event_trigger_unlock_commit{_regs}(). 1141 * If there are event triggers attached to this event that requires 1142 * filtering against its fields, then they wil be called as the 1143 * entry already holds the field information of the current event. 1144 * 1145 * It also checks if the event should be discarded or not. 1146 * It is to be discarded if the event is soft disabled and the 1147 * event was only recorded to process triggers, or if the event 1148 * filter is active and this event did not match the filters. 1149 * 1150 * Returns true if the event is discarded, false otherwise. 1151 */ 1152 static inline bool 1153 __event_trigger_test_discard(struct trace_event_file *file, 1154 struct ring_buffer *buffer, 1155 struct ring_buffer_event *event, 1156 void *entry, 1157 enum event_trigger_type *tt) 1158 { 1159 unsigned long eflags = file->flags; 1160 1161 if (eflags & EVENT_FILE_FL_TRIGGER_COND) 1162 *tt = event_triggers_call(file, entry); 1163 1164 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) || 1165 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) && 1166 !filter_match_preds(file->filter, entry))) { 1167 __trace_event_discard_commit(buffer, event); 1168 return true; 1169 } 1170 1171 return false; 1172 } 1173 1174 /** 1175 * event_trigger_unlock_commit - handle triggers and finish event commit 1176 * @file: The file pointer assoctiated to the event 1177 * @buffer: The ring buffer that the event is being written to 1178 * @event: The event meta data in the ring buffer 1179 * @entry: The event itself 1180 * @irq_flags: The state of the interrupts at the start of the event 1181 * @pc: The state of the preempt count at the start of the event. 1182 * 1183 * This is a helper function to handle triggers that require data 1184 * from the event itself. It also tests the event against filters and 1185 * if the event is soft disabled and should be discarded. 1186 */ 1187 static inline void 1188 event_trigger_unlock_commit(struct trace_event_file *file, 1189 struct ring_buffer *buffer, 1190 struct ring_buffer_event *event, 1191 void *entry, unsigned long irq_flags, int pc) 1192 { 1193 enum event_trigger_type tt = ETT_NONE; 1194 1195 if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) 1196 trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc); 1197 1198 if (tt) 1199 event_triggers_post_call(file, tt, entry); 1200 } 1201 1202 /** 1203 * event_trigger_unlock_commit_regs - handle triggers and finish event commit 1204 * @file: The file pointer assoctiated to the event 1205 * @buffer: The ring buffer that the event is being written to 1206 * @event: The event meta data in the ring buffer 1207 * @entry: The event itself 1208 * @irq_flags: The state of the interrupts at the start of the event 1209 * @pc: The state of the preempt count at the start of the event. 1210 * 1211 * This is a helper function to handle triggers that require data 1212 * from the event itself. It also tests the event against filters and 1213 * if the event is soft disabled and should be discarded. 1214 * 1215 * Same as event_trigger_unlock_commit() but calls 1216 * trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit(). 1217 */ 1218 static inline void 1219 event_trigger_unlock_commit_regs(struct trace_event_file *file, 1220 struct ring_buffer *buffer, 1221 struct ring_buffer_event *event, 1222 void *entry, unsigned long irq_flags, int pc, 1223 struct pt_regs *regs) 1224 { 1225 enum event_trigger_type tt = ETT_NONE; 1226 1227 if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) 1228 trace_buffer_unlock_commit_regs(file->tr, buffer, event, 1229 irq_flags, pc, regs); 1230 1231 if (tt) 1232 event_triggers_post_call(file, tt, entry); 1233 } 1234 1235 #define FILTER_PRED_INVALID ((unsigned short)-1) 1236 #define FILTER_PRED_IS_RIGHT (1 << 15) 1237 #define FILTER_PRED_FOLD (1 << 15) 1238 1239 /* 1240 * The max preds is the size of unsigned short with 1241 * two flags at the MSBs. One bit is used for both the IS_RIGHT 1242 * and FOLD flags. The other is reserved. 1243 * 1244 * 2^14 preds is way more than enough. 1245 */ 1246 #define MAX_FILTER_PRED 16384 1247 1248 struct filter_pred; 1249 struct regex; 1250 1251 typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event); 1252 1253 typedef int (*regex_match_func)(char *str, struct regex *r, int len); 1254 1255 enum regex_type { 1256 MATCH_FULL = 0, 1257 MATCH_FRONT_ONLY, 1258 MATCH_MIDDLE_ONLY, 1259 MATCH_END_ONLY, 1260 }; 1261 1262 struct regex { 1263 char pattern[MAX_FILTER_STR_VAL]; 1264 int len; 1265 int field_len; 1266 regex_match_func match; 1267 }; 1268 1269 struct filter_pred { 1270 filter_pred_fn_t fn; 1271 u64 val; 1272 struct regex regex; 1273 unsigned short *ops; 1274 struct ftrace_event_field *field; 1275 int offset; 1276 int not; 1277 int op; 1278 unsigned short index; 1279 unsigned short parent; 1280 unsigned short left; 1281 unsigned short right; 1282 }; 1283 1284 static inline bool is_string_field(struct ftrace_event_field *field) 1285 { 1286 return field->filter_type == FILTER_DYN_STRING || 1287 field->filter_type == FILTER_STATIC_STRING || 1288 field->filter_type == FILTER_PTR_STRING; 1289 } 1290 1291 static inline bool is_function_field(struct ftrace_event_field *field) 1292 { 1293 return field->filter_type == FILTER_TRACE_FN; 1294 } 1295 1296 extern enum regex_type 1297 filter_parse_regex(char *buff, int len, char **search, int *not); 1298 extern void print_event_filter(struct trace_event_file *file, 1299 struct trace_seq *s); 1300 extern int apply_event_filter(struct trace_event_file *file, 1301 char *filter_string); 1302 extern int apply_subsystem_event_filter(struct trace_subsystem_dir *dir, 1303 char *filter_string); 1304 extern void print_subsystem_event_filter(struct event_subsystem *system, 1305 struct trace_seq *s); 1306 extern int filter_assign_type(const char *type); 1307 extern int create_event_filter(struct trace_event_call *call, 1308 char *filter_str, bool set_str, 1309 struct event_filter **filterp); 1310 extern void free_event_filter(struct event_filter *filter); 1311 1312 struct ftrace_event_field * 1313 trace_find_event_field(struct trace_event_call *call, char *name); 1314 1315 extern void trace_event_enable_cmd_record(bool enable); 1316 extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr); 1317 extern int event_trace_del_tracer(struct trace_array *tr); 1318 1319 extern struct trace_event_file *find_event_file(struct trace_array *tr, 1320 const char *system, 1321 const char *event); 1322 1323 static inline void *event_file_data(struct file *filp) 1324 { 1325 return ACCESS_ONCE(file_inode(filp)->i_private); 1326 } 1327 1328 extern struct mutex event_mutex; 1329 extern struct list_head ftrace_events; 1330 1331 extern const struct file_operations event_trigger_fops; 1332 extern const struct file_operations event_hist_fops; 1333 1334 #ifdef CONFIG_HIST_TRIGGERS 1335 extern int register_trigger_hist_cmd(void); 1336 extern int register_trigger_hist_enable_disable_cmds(void); 1337 #else 1338 static inline int register_trigger_hist_cmd(void) { return 0; } 1339 static inline int register_trigger_hist_enable_disable_cmds(void) { return 0; } 1340 #endif 1341 1342 extern int register_trigger_cmds(void); 1343 extern void clear_event_triggers(struct trace_array *tr); 1344 1345 struct event_trigger_data { 1346 unsigned long count; 1347 int ref; 1348 struct event_trigger_ops *ops; 1349 struct event_command *cmd_ops; 1350 struct event_filter __rcu *filter; 1351 char *filter_str; 1352 void *private_data; 1353 bool paused; 1354 bool paused_tmp; 1355 struct list_head list; 1356 char *name; 1357 struct list_head named_list; 1358 struct event_trigger_data *named_data; 1359 }; 1360 1361 /* Avoid typos */ 1362 #define ENABLE_EVENT_STR "enable_event" 1363 #define DISABLE_EVENT_STR "disable_event" 1364 #define ENABLE_HIST_STR "enable_hist" 1365 #define DISABLE_HIST_STR "disable_hist" 1366 1367 struct enable_trigger_data { 1368 struct trace_event_file *file; 1369 bool enable; 1370 bool hist; 1371 }; 1372 1373 extern int event_enable_trigger_print(struct seq_file *m, 1374 struct event_trigger_ops *ops, 1375 struct event_trigger_data *data); 1376 extern void event_enable_trigger_free(struct event_trigger_ops *ops, 1377 struct event_trigger_data *data); 1378 extern int event_enable_trigger_func(struct event_command *cmd_ops, 1379 struct trace_event_file *file, 1380 char *glob, char *cmd, char *param); 1381 extern int event_enable_register_trigger(char *glob, 1382 struct event_trigger_ops *ops, 1383 struct event_trigger_data *data, 1384 struct trace_event_file *file); 1385 extern void event_enable_unregister_trigger(char *glob, 1386 struct event_trigger_ops *ops, 1387 struct event_trigger_data *test, 1388 struct trace_event_file *file); 1389 extern void trigger_data_free(struct event_trigger_data *data); 1390 extern int event_trigger_init(struct event_trigger_ops *ops, 1391 struct event_trigger_data *data); 1392 extern int trace_event_trigger_enable_disable(struct trace_event_file *file, 1393 int trigger_enable); 1394 extern void update_cond_flag(struct trace_event_file *file); 1395 extern void unregister_trigger(char *glob, struct event_trigger_ops *ops, 1396 struct event_trigger_data *test, 1397 struct trace_event_file *file); 1398 extern int set_trigger_filter(char *filter_str, 1399 struct event_trigger_data *trigger_data, 1400 struct trace_event_file *file); 1401 extern struct event_trigger_data *find_named_trigger(const char *name); 1402 extern bool is_named_trigger(struct event_trigger_data *test); 1403 extern int save_named_trigger(const char *name, 1404 struct event_trigger_data *data); 1405 extern void del_named_trigger(struct event_trigger_data *data); 1406 extern void pause_named_trigger(struct event_trigger_data *data); 1407 extern void unpause_named_trigger(struct event_trigger_data *data); 1408 extern void set_named_trigger_data(struct event_trigger_data *data, 1409 struct event_trigger_data *named_data); 1410 extern int register_event_command(struct event_command *cmd); 1411 extern int unregister_event_command(struct event_command *cmd); 1412 extern int register_trigger_hist_enable_disable_cmds(void); 1413 1414 /** 1415 * struct event_trigger_ops - callbacks for trace event triggers 1416 * 1417 * The methods in this structure provide per-event trigger hooks for 1418 * various trigger operations. 1419 * 1420 * All the methods below, except for @init() and @free(), must be 1421 * implemented. 1422 * 1423 * @func: The trigger 'probe' function called when the triggering 1424 * event occurs. The data passed into this callback is the data 1425 * that was supplied to the event_command @reg() function that 1426 * registered the trigger (see struct event_command) along with 1427 * the trace record, rec. 1428 * 1429 * @init: An optional initialization function called for the trigger 1430 * when the trigger is registered (via the event_command reg() 1431 * function). This can be used to perform per-trigger 1432 * initialization such as incrementing a per-trigger reference 1433 * count, for instance. This is usually implemented by the 1434 * generic utility function @event_trigger_init() (see 1435 * trace_event_triggers.c). 1436 * 1437 * @free: An optional de-initialization function called for the 1438 * trigger when the trigger is unregistered (via the 1439 * event_command @reg() function). This can be used to perform 1440 * per-trigger de-initialization such as decrementing a 1441 * per-trigger reference count and freeing corresponding trigger 1442 * data, for instance. This is usually implemented by the 1443 * generic utility function @event_trigger_free() (see 1444 * trace_event_triggers.c). 1445 * 1446 * @print: The callback function invoked to have the trigger print 1447 * itself. This is usually implemented by a wrapper function 1448 * that calls the generic utility function @event_trigger_print() 1449 * (see trace_event_triggers.c). 1450 */ 1451 struct event_trigger_ops { 1452 void (*func)(struct event_trigger_data *data, 1453 void *rec); 1454 int (*init)(struct event_trigger_ops *ops, 1455 struct event_trigger_data *data); 1456 void (*free)(struct event_trigger_ops *ops, 1457 struct event_trigger_data *data); 1458 int (*print)(struct seq_file *m, 1459 struct event_trigger_ops *ops, 1460 struct event_trigger_data *data); 1461 }; 1462 1463 /** 1464 * struct event_command - callbacks and data members for event commands 1465 * 1466 * Event commands are invoked by users by writing the command name 1467 * into the 'trigger' file associated with a trace event. The 1468 * parameters associated with a specific invocation of an event 1469 * command are used to create an event trigger instance, which is 1470 * added to the list of trigger instances associated with that trace 1471 * event. When the event is hit, the set of triggers associated with 1472 * that event is invoked. 1473 * 1474 * The data members in this structure provide per-event command data 1475 * for various event commands. 1476 * 1477 * All the data members below, except for @post_trigger, must be set 1478 * for each event command. 1479 * 1480 * @name: The unique name that identifies the event command. This is 1481 * the name used when setting triggers via trigger files. 1482 * 1483 * @trigger_type: A unique id that identifies the event command 1484 * 'type'. This value has two purposes, the first to ensure that 1485 * only one trigger of the same type can be set at a given time 1486 * for a particular event e.g. it doesn't make sense to have both 1487 * a traceon and traceoff trigger attached to a single event at 1488 * the same time, so traceon and traceoff have the same type 1489 * though they have different names. The @trigger_type value is 1490 * also used as a bit value for deferring the actual trigger 1491 * action until after the current event is finished. Some 1492 * commands need to do this if they themselves log to the trace 1493 * buffer (see the @post_trigger() member below). @trigger_type 1494 * values are defined by adding new values to the trigger_type 1495 * enum in include/linux/trace_events.h. 1496 * 1497 * @flags: See the enum event_command_flags below. 1498 * 1499 * All the methods below, except for @set_filter() and @unreg_all(), 1500 * must be implemented. 1501 * 1502 * @func: The callback function responsible for parsing and 1503 * registering the trigger written to the 'trigger' file by the 1504 * user. It allocates the trigger instance and registers it with 1505 * the appropriate trace event. It makes use of the other 1506 * event_command callback functions to orchestrate this, and is 1507 * usually implemented by the generic utility function 1508 * @event_trigger_callback() (see trace_event_triggers.c). 1509 * 1510 * @reg: Adds the trigger to the list of triggers associated with the 1511 * event, and enables the event trigger itself, after 1512 * initializing it (via the event_trigger_ops @init() function). 1513 * This is also where commands can use the @trigger_type value to 1514 * make the decision as to whether or not multiple instances of 1515 * the trigger should be allowed. This is usually implemented by 1516 * the generic utility function @register_trigger() (see 1517 * trace_event_triggers.c). 1518 * 1519 * @unreg: Removes the trigger from the list of triggers associated 1520 * with the event, and disables the event trigger itself, after 1521 * initializing it (via the event_trigger_ops @free() function). 1522 * This is usually implemented by the generic utility function 1523 * @unregister_trigger() (see trace_event_triggers.c). 1524 * 1525 * @unreg_all: An optional function called to remove all the triggers 1526 * from the list of triggers associated with the event. Called 1527 * when a trigger file is opened in truncate mode. 1528 * 1529 * @set_filter: An optional function called to parse and set a filter 1530 * for the trigger. If no @set_filter() method is set for the 1531 * event command, filters set by the user for the command will be 1532 * ignored. This is usually implemented by the generic utility 1533 * function @set_trigger_filter() (see trace_event_triggers.c). 1534 * 1535 * @get_trigger_ops: The callback function invoked to retrieve the 1536 * event_trigger_ops implementation associated with the command. 1537 */ 1538 struct event_command { 1539 struct list_head list; 1540 char *name; 1541 enum event_trigger_type trigger_type; 1542 int flags; 1543 int (*func)(struct event_command *cmd_ops, 1544 struct trace_event_file *file, 1545 char *glob, char *cmd, char *params); 1546 int (*reg)(char *glob, 1547 struct event_trigger_ops *ops, 1548 struct event_trigger_data *data, 1549 struct trace_event_file *file); 1550 void (*unreg)(char *glob, 1551 struct event_trigger_ops *ops, 1552 struct event_trigger_data *data, 1553 struct trace_event_file *file); 1554 void (*unreg_all)(struct trace_event_file *file); 1555 int (*set_filter)(char *filter_str, 1556 struct event_trigger_data *data, 1557 struct trace_event_file *file); 1558 struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param); 1559 }; 1560 1561 /** 1562 * enum event_command_flags - flags for struct event_command 1563 * 1564 * @POST_TRIGGER: A flag that says whether or not this command needs 1565 * to have its action delayed until after the current event has 1566 * been closed. Some triggers need to avoid being invoked while 1567 * an event is currently in the process of being logged, since 1568 * the trigger may itself log data into the trace buffer. Thus 1569 * we make sure the current event is committed before invoking 1570 * those triggers. To do that, the trigger invocation is split 1571 * in two - the first part checks the filter using the current 1572 * trace record; if a command has the @post_trigger flag set, it 1573 * sets a bit for itself in the return value, otherwise it 1574 * directly invokes the trigger. Once all commands have been 1575 * either invoked or set their return flag, the current record is 1576 * either committed or discarded. At that point, if any commands 1577 * have deferred their triggers, those commands are finally 1578 * invoked following the close of the current event. In other 1579 * words, if the event_trigger_ops @func() probe implementation 1580 * itself logs to the trace buffer, this flag should be set, 1581 * otherwise it can be left unspecified. 1582 * 1583 * @NEEDS_REC: A flag that says whether or not this command needs 1584 * access to the trace record in order to perform its function, 1585 * regardless of whether or not it has a filter associated with 1586 * it (filters make a trigger require access to the trace record 1587 * but are not always present). 1588 */ 1589 enum event_command_flags { 1590 EVENT_CMD_FL_POST_TRIGGER = 1, 1591 EVENT_CMD_FL_NEEDS_REC = 2, 1592 }; 1593 1594 static inline bool event_command_post_trigger(struct event_command *cmd_ops) 1595 { 1596 return cmd_ops->flags & EVENT_CMD_FL_POST_TRIGGER; 1597 } 1598 1599 static inline bool event_command_needs_rec(struct event_command *cmd_ops) 1600 { 1601 return cmd_ops->flags & EVENT_CMD_FL_NEEDS_REC; 1602 } 1603 1604 extern int trace_event_enable_disable(struct trace_event_file *file, 1605 int enable, int soft_disable); 1606 extern int tracing_alloc_snapshot(void); 1607 1608 extern const char *__start___trace_bprintk_fmt[]; 1609 extern const char *__stop___trace_bprintk_fmt[]; 1610 1611 extern const char *__start___tracepoint_str[]; 1612 extern const char *__stop___tracepoint_str[]; 1613 1614 void trace_printk_control(bool enabled); 1615 void trace_printk_init_buffers(void); 1616 void trace_printk_start_comm(void); 1617 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set); 1618 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled); 1619 1620 /* 1621 * Normal trace_printk() and friends allocates special buffers 1622 * to do the manipulation, as well as saves the print formats 1623 * into sections to display. But the trace infrastructure wants 1624 * to use these without the added overhead at the price of being 1625 * a bit slower (used mainly for warnings, where we don't care 1626 * about performance). The internal_trace_puts() is for such 1627 * a purpose. 1628 */ 1629 #define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str)) 1630 1631 #undef FTRACE_ENTRY 1632 #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ 1633 extern struct trace_event_call \ 1634 __aligned(4) event_##call; 1635 #undef FTRACE_ENTRY_DUP 1636 #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \ 1637 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \ 1638 filter) 1639 #undef FTRACE_ENTRY_PACKED 1640 #define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print, filter) \ 1641 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \ 1642 filter) 1643 1644 #include "trace_entries.h" 1645 1646 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER) 1647 int perf_ftrace_event_register(struct trace_event_call *call, 1648 enum trace_reg type, void *data); 1649 #else 1650 #define perf_ftrace_event_register NULL 1651 #endif 1652 1653 #ifdef CONFIG_FTRACE_SYSCALLS 1654 void init_ftrace_syscalls(void); 1655 const char *get_syscall_name(int syscall); 1656 #else 1657 static inline void init_ftrace_syscalls(void) { } 1658 static inline const char *get_syscall_name(int syscall) 1659 { 1660 return NULL; 1661 } 1662 #endif 1663 1664 #ifdef CONFIG_EVENT_TRACING 1665 void trace_event_init(void); 1666 void trace_event_enum_update(struct trace_enum_map **map, int len); 1667 #else 1668 static inline void __init trace_event_init(void) { } 1669 static inline void trace_event_enum_update(struct trace_enum_map **map, int len) { } 1670 #endif 1671 1672 extern struct trace_iterator *tracepoint_print_iter; 1673 1674 #endif /* _LINUX_KERNEL_TRACE_H */ 1675