1 2 #ifndef _LINUX_KERNEL_TRACE_H 3 #define _LINUX_KERNEL_TRACE_H 4 5 #include <linux/fs.h> 6 #include <linux/atomic.h> 7 #include <linux/sched.h> 8 #include <linux/clocksource.h> 9 #include <linux/ring_buffer.h> 10 #include <linux/mmiotrace.h> 11 #include <linux/tracepoint.h> 12 #include <linux/ftrace.h> 13 #include <linux/hw_breakpoint.h> 14 #include <linux/trace_seq.h> 15 #include <linux/trace_events.h> 16 #include <linux/compiler.h> 17 #include <linux/trace_seq.h> 18 19 #ifdef CONFIG_FTRACE_SYSCALLS 20 #include <asm/unistd.h> /* For NR_SYSCALLS */ 21 #include <asm/syscall.h> /* some archs define it here */ 22 #endif 23 24 enum trace_type { 25 __TRACE_FIRST_TYPE = 0, 26 27 TRACE_FN, 28 TRACE_CTX, 29 TRACE_WAKE, 30 TRACE_STACK, 31 TRACE_PRINT, 32 TRACE_BPRINT, 33 TRACE_MMIO_RW, 34 TRACE_MMIO_MAP, 35 TRACE_BRANCH, 36 TRACE_GRAPH_RET, 37 TRACE_GRAPH_ENT, 38 TRACE_USER_STACK, 39 TRACE_BLK, 40 TRACE_BPUTS, 41 42 __TRACE_LAST_TYPE, 43 }; 44 45 46 #undef __field 47 #define __field(type, item) type item; 48 49 #undef __field_struct 50 #define __field_struct(type, item) __field(type, item) 51 52 #undef __field_desc 53 #define __field_desc(type, container, item) 54 55 #undef __array 56 #define __array(type, item, size) type item[size]; 57 58 #undef __array_desc 59 #define __array_desc(type, container, item, size) 60 61 #undef __dynamic_array 62 #define __dynamic_array(type, item) type item[]; 63 64 #undef F_STRUCT 65 #define F_STRUCT(args...) args 66 67 #undef FTRACE_ENTRY 68 #define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \ 69 struct struct_name { \ 70 struct trace_entry ent; \ 71 tstruct \ 72 } 73 74 #undef FTRACE_ENTRY_DUP 75 #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter) 76 77 #undef FTRACE_ENTRY_REG 78 #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, \ 79 filter, regfn) \ 80 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \ 81 filter) 82 83 #undef FTRACE_ENTRY_PACKED 84 #define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print, \ 85 filter) \ 86 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \ 87 filter) __packed 88 89 #include "trace_entries.h" 90 91 /* 92 * syscalls are special, and need special handling, this is why 93 * they are not included in trace_entries.h 94 */ 95 struct syscall_trace_enter { 96 struct trace_entry ent; 97 int nr; 98 unsigned long args[]; 99 }; 100 101 struct syscall_trace_exit { 102 struct trace_entry ent; 103 int nr; 104 long ret; 105 }; 106 107 struct kprobe_trace_entry_head { 108 struct trace_entry ent; 109 unsigned long ip; 110 }; 111 112 struct kretprobe_trace_entry_head { 113 struct trace_entry ent; 114 unsigned long func; 115 unsigned long ret_ip; 116 }; 117 118 /* 119 * trace_flag_type is an enumeration that holds different 120 * states when a trace occurs. These are: 121 * IRQS_OFF - interrupts were disabled 122 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags 123 * NEED_RESCHED - reschedule is requested 124 * HARDIRQ - inside an interrupt handler 125 * SOFTIRQ - inside a softirq handler 126 */ 127 enum trace_flag_type { 128 TRACE_FLAG_IRQS_OFF = 0x01, 129 TRACE_FLAG_IRQS_NOSUPPORT = 0x02, 130 TRACE_FLAG_NEED_RESCHED = 0x04, 131 TRACE_FLAG_HARDIRQ = 0x08, 132 TRACE_FLAG_SOFTIRQ = 0x10, 133 TRACE_FLAG_PREEMPT_RESCHED = 0x20, 134 TRACE_FLAG_NMI = 0x40, 135 }; 136 137 #define TRACE_BUF_SIZE 1024 138 139 struct trace_array; 140 141 /* 142 * The CPU trace array - it consists of thousands of trace entries 143 * plus some other descriptor data: (for example which task started 144 * the trace, etc.) 145 */ 146 struct trace_array_cpu { 147 atomic_t disabled; 148 void *buffer_page; /* ring buffer spare */ 149 150 unsigned long entries; 151 unsigned long saved_latency; 152 unsigned long critical_start; 153 unsigned long critical_end; 154 unsigned long critical_sequence; 155 unsigned long nice; 156 unsigned long policy; 157 unsigned long rt_priority; 158 unsigned long skipped_entries; 159 cycle_t preempt_timestamp; 160 pid_t pid; 161 kuid_t uid; 162 char comm[TASK_COMM_LEN]; 163 164 bool ignore_pid; 165 #ifdef CONFIG_FUNCTION_TRACER 166 bool ftrace_ignore_pid; 167 #endif 168 }; 169 170 struct tracer; 171 struct trace_option_dentry; 172 173 struct trace_buffer { 174 struct trace_array *tr; 175 struct ring_buffer *buffer; 176 struct trace_array_cpu __percpu *data; 177 cycle_t time_start; 178 int cpu; 179 }; 180 181 #define TRACE_FLAGS_MAX_SIZE 32 182 183 struct trace_options { 184 struct tracer *tracer; 185 struct trace_option_dentry *topts; 186 }; 187 188 struct trace_pid_list { 189 int pid_max; 190 unsigned long *pids; 191 }; 192 193 /* 194 * The trace array - an array of per-CPU trace arrays. This is the 195 * highest level data structure that individual tracers deal with. 196 * They have on/off state as well: 197 */ 198 struct trace_array { 199 struct list_head list; 200 char *name; 201 struct trace_buffer trace_buffer; 202 #ifdef CONFIG_TRACER_MAX_TRACE 203 /* 204 * The max_buffer is used to snapshot the trace when a maximum 205 * latency is reached, or when the user initiates a snapshot. 206 * Some tracers will use this to store a maximum trace while 207 * it continues examining live traces. 208 * 209 * The buffers for the max_buffer are set up the same as the trace_buffer 210 * When a snapshot is taken, the buffer of the max_buffer is swapped 211 * with the buffer of the trace_buffer and the buffers are reset for 212 * the trace_buffer so the tracing can continue. 213 */ 214 struct trace_buffer max_buffer; 215 bool allocated_snapshot; 216 unsigned long max_latency; 217 #endif 218 struct trace_pid_list __rcu *filtered_pids; 219 /* 220 * max_lock is used to protect the swapping of buffers 221 * when taking a max snapshot. The buffers themselves are 222 * protected by per_cpu spinlocks. But the action of the swap 223 * needs its own lock. 224 * 225 * This is defined as a arch_spinlock_t in order to help 226 * with performance when lockdep debugging is enabled. 227 * 228 * It is also used in other places outside the update_max_tr 229 * so it needs to be defined outside of the 230 * CONFIG_TRACER_MAX_TRACE. 231 */ 232 arch_spinlock_t max_lock; 233 int buffer_disabled; 234 #ifdef CONFIG_FTRACE_SYSCALLS 235 int sys_refcount_enter; 236 int sys_refcount_exit; 237 struct trace_event_file __rcu *enter_syscall_files[NR_syscalls]; 238 struct trace_event_file __rcu *exit_syscall_files[NR_syscalls]; 239 #endif 240 int stop_count; 241 int clock_id; 242 int nr_topts; 243 struct tracer *current_trace; 244 unsigned int trace_flags; 245 unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE]; 246 unsigned int flags; 247 raw_spinlock_t start_lock; 248 struct dentry *dir; 249 struct dentry *options; 250 struct dentry *percpu_dir; 251 struct dentry *event_dir; 252 struct trace_options *topts; 253 struct list_head systems; 254 struct list_head events; 255 cpumask_var_t tracing_cpumask; /* only trace on set CPUs */ 256 int ref; 257 #ifdef CONFIG_FUNCTION_TRACER 258 struct ftrace_ops *ops; 259 struct trace_pid_list __rcu *function_pids; 260 /* function tracing enabled */ 261 int function_enabled; 262 #endif 263 }; 264 265 enum { 266 TRACE_ARRAY_FL_GLOBAL = (1 << 0) 267 }; 268 269 extern struct list_head ftrace_trace_arrays; 270 271 extern struct mutex trace_types_lock; 272 273 extern int trace_array_get(struct trace_array *tr); 274 extern void trace_array_put(struct trace_array *tr); 275 276 /* 277 * The global tracer (top) should be the first trace array added, 278 * but we check the flag anyway. 279 */ 280 static inline struct trace_array *top_trace_array(void) 281 { 282 struct trace_array *tr; 283 284 if (list_empty(&ftrace_trace_arrays)) 285 return NULL; 286 287 tr = list_entry(ftrace_trace_arrays.prev, 288 typeof(*tr), list); 289 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL)); 290 return tr; 291 } 292 293 #define FTRACE_CMP_TYPE(var, type) \ 294 __builtin_types_compatible_p(typeof(var), type *) 295 296 #undef IF_ASSIGN 297 #define IF_ASSIGN(var, entry, etype, id) \ 298 if (FTRACE_CMP_TYPE(var, etype)) { \ 299 var = (typeof(var))(entry); \ 300 WARN_ON(id && (entry)->type != id); \ 301 break; \ 302 } 303 304 /* Will cause compile errors if type is not found. */ 305 extern void __ftrace_bad_type(void); 306 307 /* 308 * The trace_assign_type is a verifier that the entry type is 309 * the same as the type being assigned. To add new types simply 310 * add a line with the following format: 311 * 312 * IF_ASSIGN(var, ent, type, id); 313 * 314 * Where "type" is the trace type that includes the trace_entry 315 * as the "ent" item. And "id" is the trace identifier that is 316 * used in the trace_type enum. 317 * 318 * If the type can have more than one id, then use zero. 319 */ 320 #define trace_assign_type(var, ent) \ 321 do { \ 322 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \ 323 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \ 324 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ 325 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ 326 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ 327 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \ 328 IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \ 329 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ 330 TRACE_MMIO_RW); \ 331 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ 332 TRACE_MMIO_MAP); \ 333 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \ 334 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \ 335 TRACE_GRAPH_ENT); \ 336 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ 337 TRACE_GRAPH_RET); \ 338 __ftrace_bad_type(); \ 339 } while (0) 340 341 /* 342 * An option specific to a tracer. This is a boolean value. 343 * The bit is the bit index that sets its value on the 344 * flags value in struct tracer_flags. 345 */ 346 struct tracer_opt { 347 const char *name; /* Will appear on the trace_options file */ 348 u32 bit; /* Mask assigned in val field in tracer_flags */ 349 }; 350 351 /* 352 * The set of specific options for a tracer. Your tracer 353 * have to set the initial value of the flags val. 354 */ 355 struct tracer_flags { 356 u32 val; 357 struct tracer_opt *opts; 358 struct tracer *trace; 359 }; 360 361 /* Makes more easy to define a tracer opt */ 362 #define TRACER_OPT(s, b) .name = #s, .bit = b 363 364 365 struct trace_option_dentry { 366 struct tracer_opt *opt; 367 struct tracer_flags *flags; 368 struct trace_array *tr; 369 struct dentry *entry; 370 }; 371 372 /** 373 * struct tracer - a specific tracer and its callbacks to interact with tracefs 374 * @name: the name chosen to select it on the available_tracers file 375 * @init: called when one switches to this tracer (echo name > current_tracer) 376 * @reset: called when one switches to another tracer 377 * @start: called when tracing is unpaused (echo 1 > tracing_on) 378 * @stop: called when tracing is paused (echo 0 > tracing_on) 379 * @update_thresh: called when tracing_thresh is updated 380 * @open: called when the trace file is opened 381 * @pipe_open: called when the trace_pipe file is opened 382 * @close: called when the trace file is released 383 * @pipe_close: called when the trace_pipe file is released 384 * @read: override the default read callback on trace_pipe 385 * @splice_read: override the default splice_read callback on trace_pipe 386 * @selftest: selftest to run on boot (see trace_selftest.c) 387 * @print_headers: override the first lines that describe your columns 388 * @print_line: callback that prints a trace 389 * @set_flag: signals one of your private flags changed (trace_options file) 390 * @flags: your private flags 391 */ 392 struct tracer { 393 const char *name; 394 int (*init)(struct trace_array *tr); 395 void (*reset)(struct trace_array *tr); 396 void (*start)(struct trace_array *tr); 397 void (*stop)(struct trace_array *tr); 398 int (*update_thresh)(struct trace_array *tr); 399 void (*open)(struct trace_iterator *iter); 400 void (*pipe_open)(struct trace_iterator *iter); 401 void (*close)(struct trace_iterator *iter); 402 void (*pipe_close)(struct trace_iterator *iter); 403 ssize_t (*read)(struct trace_iterator *iter, 404 struct file *filp, char __user *ubuf, 405 size_t cnt, loff_t *ppos); 406 ssize_t (*splice_read)(struct trace_iterator *iter, 407 struct file *filp, 408 loff_t *ppos, 409 struct pipe_inode_info *pipe, 410 size_t len, 411 unsigned int flags); 412 #ifdef CONFIG_FTRACE_STARTUP_TEST 413 int (*selftest)(struct tracer *trace, 414 struct trace_array *tr); 415 #endif 416 void (*print_header)(struct seq_file *m); 417 enum print_line_t (*print_line)(struct trace_iterator *iter); 418 /* If you handled the flag setting, return 0 */ 419 int (*set_flag)(struct trace_array *tr, 420 u32 old_flags, u32 bit, int set); 421 /* Return 0 if OK with change, else return non-zero */ 422 int (*flag_changed)(struct trace_array *tr, 423 u32 mask, int set); 424 struct tracer *next; 425 struct tracer_flags *flags; 426 int enabled; 427 int ref; 428 bool print_max; 429 bool allow_instances; 430 #ifdef CONFIG_TRACER_MAX_TRACE 431 bool use_max_tr; 432 #endif 433 }; 434 435 436 /* Only current can touch trace_recursion */ 437 438 /* 439 * For function tracing recursion: 440 * The order of these bits are important. 441 * 442 * When function tracing occurs, the following steps are made: 443 * If arch does not support a ftrace feature: 444 * call internal function (uses INTERNAL bits) which calls... 445 * If callback is registered to the "global" list, the list 446 * function is called and recursion checks the GLOBAL bits. 447 * then this function calls... 448 * The function callback, which can use the FTRACE bits to 449 * check for recursion. 450 * 451 * Now if the arch does not suppport a feature, and it calls 452 * the global list function which calls the ftrace callback 453 * all three of these steps will do a recursion protection. 454 * There's no reason to do one if the previous caller already 455 * did. The recursion that we are protecting against will 456 * go through the same steps again. 457 * 458 * To prevent the multiple recursion checks, if a recursion 459 * bit is set that is higher than the MAX bit of the current 460 * check, then we know that the check was made by the previous 461 * caller, and we can skip the current check. 462 */ 463 enum { 464 TRACE_BUFFER_BIT, 465 TRACE_BUFFER_NMI_BIT, 466 TRACE_BUFFER_IRQ_BIT, 467 TRACE_BUFFER_SIRQ_BIT, 468 469 /* Start of function recursion bits */ 470 TRACE_FTRACE_BIT, 471 TRACE_FTRACE_NMI_BIT, 472 TRACE_FTRACE_IRQ_BIT, 473 TRACE_FTRACE_SIRQ_BIT, 474 475 /* INTERNAL_BITs must be greater than FTRACE_BITs */ 476 TRACE_INTERNAL_BIT, 477 TRACE_INTERNAL_NMI_BIT, 478 TRACE_INTERNAL_IRQ_BIT, 479 TRACE_INTERNAL_SIRQ_BIT, 480 481 TRACE_BRANCH_BIT, 482 /* 483 * Abuse of the trace_recursion. 484 * As we need a way to maintain state if we are tracing the function 485 * graph in irq because we want to trace a particular function that 486 * was called in irq context but we have irq tracing off. Since this 487 * can only be modified by current, we can reuse trace_recursion. 488 */ 489 TRACE_IRQ_BIT, 490 }; 491 492 #define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0) 493 #define trace_recursion_clear(bit) do { (current)->trace_recursion &= ~(1<<(bit)); } while (0) 494 #define trace_recursion_test(bit) ((current)->trace_recursion & (1<<(bit))) 495 496 #define TRACE_CONTEXT_BITS 4 497 498 #define TRACE_FTRACE_START TRACE_FTRACE_BIT 499 #define TRACE_FTRACE_MAX ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1) 500 501 #define TRACE_LIST_START TRACE_INTERNAL_BIT 502 #define TRACE_LIST_MAX ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1) 503 504 #define TRACE_CONTEXT_MASK TRACE_LIST_MAX 505 506 static __always_inline int trace_get_context_bit(void) 507 { 508 int bit; 509 510 if (in_interrupt()) { 511 if (in_nmi()) 512 bit = 0; 513 514 else if (in_irq()) 515 bit = 1; 516 else 517 bit = 2; 518 } else 519 bit = 3; 520 521 return bit; 522 } 523 524 static __always_inline int trace_test_and_set_recursion(int start, int max) 525 { 526 unsigned int val = current->trace_recursion; 527 int bit; 528 529 /* A previous recursion check was made */ 530 if ((val & TRACE_CONTEXT_MASK) > max) 531 return 0; 532 533 bit = trace_get_context_bit() + start; 534 if (unlikely(val & (1 << bit))) 535 return -1; 536 537 val |= 1 << bit; 538 current->trace_recursion = val; 539 barrier(); 540 541 return bit; 542 } 543 544 static __always_inline void trace_clear_recursion(int bit) 545 { 546 unsigned int val = current->trace_recursion; 547 548 if (!bit) 549 return; 550 551 bit = 1 << bit; 552 val &= ~bit; 553 554 barrier(); 555 current->trace_recursion = val; 556 } 557 558 static inline struct ring_buffer_iter * 559 trace_buffer_iter(struct trace_iterator *iter, int cpu) 560 { 561 if (iter->buffer_iter && iter->buffer_iter[cpu]) 562 return iter->buffer_iter[cpu]; 563 return NULL; 564 } 565 566 int tracer_init(struct tracer *t, struct trace_array *tr); 567 int tracing_is_enabled(void); 568 void tracing_reset(struct trace_buffer *buf, int cpu); 569 void tracing_reset_online_cpus(struct trace_buffer *buf); 570 void tracing_reset_current(int cpu); 571 void tracing_reset_all_online_cpus(void); 572 int tracing_open_generic(struct inode *inode, struct file *filp); 573 bool tracing_is_disabled(void); 574 struct dentry *trace_create_file(const char *name, 575 umode_t mode, 576 struct dentry *parent, 577 void *data, 578 const struct file_operations *fops); 579 580 struct dentry *tracing_init_dentry(void); 581 582 struct ring_buffer_event; 583 584 struct ring_buffer_event * 585 trace_buffer_lock_reserve(struct ring_buffer *buffer, 586 int type, 587 unsigned long len, 588 unsigned long flags, 589 int pc); 590 591 struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, 592 struct trace_array_cpu *data); 593 594 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, 595 int *ent_cpu, u64 *ent_ts); 596 597 void __buffer_unlock_commit(struct ring_buffer *buffer, 598 struct ring_buffer_event *event); 599 600 int trace_empty(struct trace_iterator *iter); 601 602 void *trace_find_next_entry_inc(struct trace_iterator *iter); 603 604 void trace_init_global_iter(struct trace_iterator *iter); 605 606 void tracing_iter_reset(struct trace_iterator *iter, int cpu); 607 608 void trace_function(struct trace_array *tr, 609 unsigned long ip, 610 unsigned long parent_ip, 611 unsigned long flags, int pc); 612 void trace_graph_function(struct trace_array *tr, 613 unsigned long ip, 614 unsigned long parent_ip, 615 unsigned long flags, int pc); 616 void trace_latency_header(struct seq_file *m); 617 void trace_default_header(struct seq_file *m); 618 void print_trace_header(struct seq_file *m, struct trace_iterator *iter); 619 int trace_empty(struct trace_iterator *iter); 620 621 void trace_graph_return(struct ftrace_graph_ret *trace); 622 int trace_graph_entry(struct ftrace_graph_ent *trace); 623 void set_graph_array(struct trace_array *tr); 624 625 void tracing_start_cmdline_record(void); 626 void tracing_stop_cmdline_record(void); 627 int register_tracer(struct tracer *type); 628 int is_tracing_stopped(void); 629 630 loff_t tracing_lseek(struct file *file, loff_t offset, int whence); 631 632 extern cpumask_var_t __read_mostly tracing_buffer_mask; 633 634 #define for_each_tracing_cpu(cpu) \ 635 for_each_cpu(cpu, tracing_buffer_mask) 636 637 extern unsigned long nsecs_to_usecs(unsigned long nsecs); 638 639 extern unsigned long tracing_thresh; 640 641 /* PID filtering */ 642 643 extern int pid_max; 644 645 bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids, 646 pid_t search_pid); 647 bool trace_ignore_this_task(struct trace_pid_list *filtered_pids, 648 struct task_struct *task); 649 void trace_filter_add_remove_task(struct trace_pid_list *pid_list, 650 struct task_struct *self, 651 struct task_struct *task); 652 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos); 653 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos); 654 int trace_pid_show(struct seq_file *m, void *v); 655 void trace_free_pid_list(struct trace_pid_list *pid_list); 656 int trace_pid_write(struct trace_pid_list *filtered_pids, 657 struct trace_pid_list **new_pid_list, 658 const char __user *ubuf, size_t cnt); 659 660 #ifdef CONFIG_TRACER_MAX_TRACE 661 void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu); 662 void update_max_tr_single(struct trace_array *tr, 663 struct task_struct *tsk, int cpu); 664 #endif /* CONFIG_TRACER_MAX_TRACE */ 665 666 #ifdef CONFIG_STACKTRACE 667 void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, 668 int pc); 669 670 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, 671 int pc); 672 #else 673 static inline void ftrace_trace_userstack(struct ring_buffer *buffer, 674 unsigned long flags, int pc) 675 { 676 } 677 678 static inline void __trace_stack(struct trace_array *tr, unsigned long flags, 679 int skip, int pc) 680 { 681 } 682 #endif /* CONFIG_STACKTRACE */ 683 684 extern cycle_t ftrace_now(int cpu); 685 686 extern void trace_find_cmdline(int pid, char comm[]); 687 extern void trace_event_follow_fork(struct trace_array *tr, bool enable); 688 689 #ifdef CONFIG_DYNAMIC_FTRACE 690 extern unsigned long ftrace_update_tot_cnt; 691 #endif 692 #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func 693 extern int DYN_FTRACE_TEST_NAME(void); 694 #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2 695 extern int DYN_FTRACE_TEST_NAME2(void); 696 697 extern bool ring_buffer_expanded; 698 extern bool tracing_selftest_disabled; 699 700 #ifdef CONFIG_FTRACE_STARTUP_TEST 701 extern int trace_selftest_startup_function(struct tracer *trace, 702 struct trace_array *tr); 703 extern int trace_selftest_startup_function_graph(struct tracer *trace, 704 struct trace_array *tr); 705 extern int trace_selftest_startup_irqsoff(struct tracer *trace, 706 struct trace_array *tr); 707 extern int trace_selftest_startup_preemptoff(struct tracer *trace, 708 struct trace_array *tr); 709 extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace, 710 struct trace_array *tr); 711 extern int trace_selftest_startup_wakeup(struct tracer *trace, 712 struct trace_array *tr); 713 extern int trace_selftest_startup_nop(struct tracer *trace, 714 struct trace_array *tr); 715 extern int trace_selftest_startup_sched_switch(struct tracer *trace, 716 struct trace_array *tr); 717 extern int trace_selftest_startup_branch(struct tracer *trace, 718 struct trace_array *tr); 719 /* 720 * Tracer data references selftest functions that only occur 721 * on boot up. These can be __init functions. Thus, when selftests 722 * are enabled, then the tracers need to reference __init functions. 723 */ 724 #define __tracer_data __refdata 725 #else 726 /* Tracers are seldom changed. Optimize when selftests are disabled. */ 727 #define __tracer_data __read_mostly 728 #endif /* CONFIG_FTRACE_STARTUP_TEST */ 729 730 extern void *head_page(struct trace_array_cpu *data); 731 extern unsigned long long ns2usecs(cycle_t nsec); 732 extern int 733 trace_vbprintk(unsigned long ip, const char *fmt, va_list args); 734 extern int 735 trace_vprintk(unsigned long ip, const char *fmt, va_list args); 736 extern int 737 trace_array_vprintk(struct trace_array *tr, 738 unsigned long ip, const char *fmt, va_list args); 739 int trace_array_printk(struct trace_array *tr, 740 unsigned long ip, const char *fmt, ...); 741 int trace_array_printk_buf(struct ring_buffer *buffer, 742 unsigned long ip, const char *fmt, ...); 743 void trace_printk_seq(struct trace_seq *s); 744 enum print_line_t print_trace_line(struct trace_iterator *iter); 745 746 extern char trace_find_mark(unsigned long long duration); 747 748 /* Standard output formatting function used for function return traces */ 749 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 750 751 /* Flag options */ 752 #define TRACE_GRAPH_PRINT_OVERRUN 0x1 753 #define TRACE_GRAPH_PRINT_CPU 0x2 754 #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 755 #define TRACE_GRAPH_PRINT_PROC 0x8 756 #define TRACE_GRAPH_PRINT_DURATION 0x10 757 #define TRACE_GRAPH_PRINT_ABS_TIME 0x20 758 #define TRACE_GRAPH_PRINT_IRQS 0x40 759 #define TRACE_GRAPH_PRINT_TAIL 0x80 760 #define TRACE_GRAPH_SLEEP_TIME 0x100 761 #define TRACE_GRAPH_GRAPH_TIME 0x200 762 #define TRACE_GRAPH_PRINT_FILL_SHIFT 28 763 #define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT) 764 765 extern void ftrace_graph_sleep_time_control(bool enable); 766 extern void ftrace_graph_graph_time_control(bool enable); 767 768 extern enum print_line_t 769 print_graph_function_flags(struct trace_iterator *iter, u32 flags); 770 extern void print_graph_headers_flags(struct seq_file *s, u32 flags); 771 extern void 772 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s); 773 extern void graph_trace_open(struct trace_iterator *iter); 774 extern void graph_trace_close(struct trace_iterator *iter); 775 extern int __trace_graph_entry(struct trace_array *tr, 776 struct ftrace_graph_ent *trace, 777 unsigned long flags, int pc); 778 extern void __trace_graph_return(struct trace_array *tr, 779 struct ftrace_graph_ret *trace, 780 unsigned long flags, int pc); 781 782 783 #ifdef CONFIG_DYNAMIC_FTRACE 784 /* TODO: make this variable */ 785 #define FTRACE_GRAPH_MAX_FUNCS 32 786 extern int ftrace_graph_count; 787 extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS]; 788 extern int ftrace_graph_notrace_count; 789 extern unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS]; 790 791 static inline int ftrace_graph_addr(unsigned long addr) 792 { 793 int i; 794 795 if (!ftrace_graph_count) 796 return 1; 797 798 for (i = 0; i < ftrace_graph_count; i++) { 799 if (addr == ftrace_graph_funcs[i]) { 800 /* 801 * If no irqs are to be traced, but a set_graph_function 802 * is set, and called by an interrupt handler, we still 803 * want to trace it. 804 */ 805 if (in_irq()) 806 trace_recursion_set(TRACE_IRQ_BIT); 807 else 808 trace_recursion_clear(TRACE_IRQ_BIT); 809 return 1; 810 } 811 } 812 813 return 0; 814 } 815 816 static inline int ftrace_graph_notrace_addr(unsigned long addr) 817 { 818 int i; 819 820 if (!ftrace_graph_notrace_count) 821 return 0; 822 823 for (i = 0; i < ftrace_graph_notrace_count; i++) { 824 if (addr == ftrace_graph_notrace_funcs[i]) 825 return 1; 826 } 827 828 return 0; 829 } 830 #else 831 static inline int ftrace_graph_addr(unsigned long addr) 832 { 833 return 1; 834 } 835 836 static inline int ftrace_graph_notrace_addr(unsigned long addr) 837 { 838 return 0; 839 } 840 #endif /* CONFIG_DYNAMIC_FTRACE */ 841 #else /* CONFIG_FUNCTION_GRAPH_TRACER */ 842 static inline enum print_line_t 843 print_graph_function_flags(struct trace_iterator *iter, u32 flags) 844 { 845 return TRACE_TYPE_UNHANDLED; 846 } 847 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 848 849 extern struct list_head ftrace_pids; 850 851 #ifdef CONFIG_FUNCTION_TRACER 852 extern bool ftrace_filter_param __initdata; 853 static inline int ftrace_trace_task(struct trace_array *tr) 854 { 855 return !this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid); 856 } 857 extern int ftrace_is_dead(void); 858 int ftrace_create_function_files(struct trace_array *tr, 859 struct dentry *parent); 860 void ftrace_destroy_function_files(struct trace_array *tr); 861 void ftrace_init_global_array_ops(struct trace_array *tr); 862 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func); 863 void ftrace_reset_array_ops(struct trace_array *tr); 864 int using_ftrace_ops_list_func(void); 865 void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer); 866 void ftrace_init_tracefs_toplevel(struct trace_array *tr, 867 struct dentry *d_tracer); 868 #else 869 static inline int ftrace_trace_task(struct trace_array *tr) 870 { 871 return 1; 872 } 873 static inline int ftrace_is_dead(void) { return 0; } 874 static inline int 875 ftrace_create_function_files(struct trace_array *tr, 876 struct dentry *parent) 877 { 878 return 0; 879 } 880 static inline void ftrace_destroy_function_files(struct trace_array *tr) { } 881 static inline __init void 882 ftrace_init_global_array_ops(struct trace_array *tr) { } 883 static inline void ftrace_reset_array_ops(struct trace_array *tr) { } 884 static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { } 885 static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { } 886 /* ftace_func_t type is not defined, use macro instead of static inline */ 887 #define ftrace_init_array_ops(tr, func) do { } while (0) 888 #endif /* CONFIG_FUNCTION_TRACER */ 889 890 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE) 891 void ftrace_create_filter_files(struct ftrace_ops *ops, 892 struct dentry *parent); 893 void ftrace_destroy_filter_files(struct ftrace_ops *ops); 894 #else 895 /* 896 * The ops parameter passed in is usually undefined. 897 * This must be a macro. 898 */ 899 #define ftrace_create_filter_files(ops, parent) do { } while (0) 900 #define ftrace_destroy_filter_files(ops) do { } while (0) 901 #endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */ 902 903 bool ftrace_event_is_function(struct trace_event_call *call); 904 905 /* 906 * struct trace_parser - servers for reading the user input separated by spaces 907 * @cont: set if the input is not complete - no final space char was found 908 * @buffer: holds the parsed user input 909 * @idx: user input length 910 * @size: buffer size 911 */ 912 struct trace_parser { 913 bool cont; 914 char *buffer; 915 unsigned idx; 916 unsigned size; 917 }; 918 919 static inline bool trace_parser_loaded(struct trace_parser *parser) 920 { 921 return (parser->idx != 0); 922 } 923 924 static inline bool trace_parser_cont(struct trace_parser *parser) 925 { 926 return parser->cont; 927 } 928 929 static inline void trace_parser_clear(struct trace_parser *parser) 930 { 931 parser->cont = false; 932 parser->idx = 0; 933 } 934 935 extern int trace_parser_get_init(struct trace_parser *parser, int size); 936 extern void trace_parser_put(struct trace_parser *parser); 937 extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf, 938 size_t cnt, loff_t *ppos); 939 940 /* 941 * Only create function graph options if function graph is configured. 942 */ 943 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 944 # define FGRAPH_FLAGS \ 945 C(DISPLAY_GRAPH, "display-graph"), 946 #else 947 # define FGRAPH_FLAGS 948 #endif 949 950 #ifdef CONFIG_BRANCH_TRACER 951 # define BRANCH_FLAGS \ 952 C(BRANCH, "branch"), 953 #else 954 # define BRANCH_FLAGS 955 #endif 956 957 #ifdef CONFIG_FUNCTION_TRACER 958 # define FUNCTION_FLAGS \ 959 C(FUNCTION, "function-trace"), 960 # define FUNCTION_DEFAULT_FLAGS TRACE_ITER_FUNCTION 961 #else 962 # define FUNCTION_FLAGS 963 # define FUNCTION_DEFAULT_FLAGS 0UL 964 #endif 965 966 #ifdef CONFIG_STACKTRACE 967 # define STACK_FLAGS \ 968 C(STACKTRACE, "stacktrace"), 969 #else 970 # define STACK_FLAGS 971 #endif 972 973 /* 974 * trace_iterator_flags is an enumeration that defines bit 975 * positions into trace_flags that controls the output. 976 * 977 * NOTE: These bits must match the trace_options array in 978 * trace.c (this macro guarantees it). 979 */ 980 #define TRACE_FLAGS \ 981 C(PRINT_PARENT, "print-parent"), \ 982 C(SYM_OFFSET, "sym-offset"), \ 983 C(SYM_ADDR, "sym-addr"), \ 984 C(VERBOSE, "verbose"), \ 985 C(RAW, "raw"), \ 986 C(HEX, "hex"), \ 987 C(BIN, "bin"), \ 988 C(BLOCK, "block"), \ 989 C(PRINTK, "trace_printk"), \ 990 C(ANNOTATE, "annotate"), \ 991 C(USERSTACKTRACE, "userstacktrace"), \ 992 C(SYM_USEROBJ, "sym-userobj"), \ 993 C(PRINTK_MSGONLY, "printk-msg-only"), \ 994 C(CONTEXT_INFO, "context-info"), /* Print pid/cpu/time */ \ 995 C(LATENCY_FMT, "latency-format"), \ 996 C(RECORD_CMD, "record-cmd"), \ 997 C(OVERWRITE, "overwrite"), \ 998 C(STOP_ON_FREE, "disable_on_free"), \ 999 C(IRQ_INFO, "irq-info"), \ 1000 C(MARKERS, "markers"), \ 1001 C(EVENT_FORK, "event-fork"), \ 1002 FUNCTION_FLAGS \ 1003 FGRAPH_FLAGS \ 1004 STACK_FLAGS \ 1005 BRANCH_FLAGS 1006 1007 /* 1008 * By defining C, we can make TRACE_FLAGS a list of bit names 1009 * that will define the bits for the flag masks. 1010 */ 1011 #undef C 1012 #define C(a, b) TRACE_ITER_##a##_BIT 1013 1014 enum trace_iterator_bits { 1015 TRACE_FLAGS 1016 /* Make sure we don't go more than we have bits for */ 1017 TRACE_ITER_LAST_BIT 1018 }; 1019 1020 /* 1021 * By redefining C, we can make TRACE_FLAGS a list of masks that 1022 * use the bits as defined above. 1023 */ 1024 #undef C 1025 #define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT) 1026 1027 enum trace_iterator_flags { TRACE_FLAGS }; 1028 1029 /* 1030 * TRACE_ITER_SYM_MASK masks the options in trace_flags that 1031 * control the output of kernel symbols. 1032 */ 1033 #define TRACE_ITER_SYM_MASK \ 1034 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR) 1035 1036 extern struct tracer nop_trace; 1037 1038 #ifdef CONFIG_BRANCH_TRACER 1039 extern int enable_branch_tracing(struct trace_array *tr); 1040 extern void disable_branch_tracing(void); 1041 static inline int trace_branch_enable(struct trace_array *tr) 1042 { 1043 if (tr->trace_flags & TRACE_ITER_BRANCH) 1044 return enable_branch_tracing(tr); 1045 return 0; 1046 } 1047 static inline void trace_branch_disable(void) 1048 { 1049 /* due to races, always disable */ 1050 disable_branch_tracing(); 1051 } 1052 #else 1053 static inline int trace_branch_enable(struct trace_array *tr) 1054 { 1055 return 0; 1056 } 1057 static inline void trace_branch_disable(void) 1058 { 1059 } 1060 #endif /* CONFIG_BRANCH_TRACER */ 1061 1062 /* set ring buffers to default size if not already done so */ 1063 int tracing_update_buffers(void); 1064 1065 struct ftrace_event_field { 1066 struct list_head link; 1067 const char *name; 1068 const char *type; 1069 int filter_type; 1070 int offset; 1071 int size; 1072 int is_signed; 1073 }; 1074 1075 struct event_filter { 1076 int n_preds; /* Number assigned */ 1077 int a_preds; /* allocated */ 1078 struct filter_pred *preds; 1079 struct filter_pred *root; 1080 char *filter_string; 1081 }; 1082 1083 struct event_subsystem { 1084 struct list_head list; 1085 const char *name; 1086 struct event_filter *filter; 1087 int ref_count; 1088 }; 1089 1090 struct trace_subsystem_dir { 1091 struct list_head list; 1092 struct event_subsystem *subsystem; 1093 struct trace_array *tr; 1094 struct dentry *entry; 1095 int ref_count; 1096 int nr_events; 1097 }; 1098 1099 extern int call_filter_check_discard(struct trace_event_call *call, void *rec, 1100 struct ring_buffer *buffer, 1101 struct ring_buffer_event *event); 1102 1103 void trace_buffer_unlock_commit_regs(struct trace_array *tr, 1104 struct ring_buffer *buffer, 1105 struct ring_buffer_event *event, 1106 unsigned long flags, int pc, 1107 struct pt_regs *regs); 1108 1109 static inline void trace_buffer_unlock_commit(struct trace_array *tr, 1110 struct ring_buffer *buffer, 1111 struct ring_buffer_event *event, 1112 unsigned long flags, int pc) 1113 { 1114 trace_buffer_unlock_commit_regs(tr, buffer, event, flags, pc, NULL); 1115 } 1116 1117 DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event); 1118 DECLARE_PER_CPU(int, trace_buffered_event_cnt); 1119 void trace_buffered_event_disable(void); 1120 void trace_buffered_event_enable(void); 1121 1122 static inline void 1123 __trace_event_discard_commit(struct ring_buffer *buffer, 1124 struct ring_buffer_event *event) 1125 { 1126 if (this_cpu_read(trace_buffered_event) == event) { 1127 /* Simply release the temp buffer */ 1128 this_cpu_dec(trace_buffered_event_cnt); 1129 return; 1130 } 1131 ring_buffer_discard_commit(buffer, event); 1132 } 1133 1134 /* 1135 * Helper function for event_trigger_unlock_commit{_regs}(). 1136 * If there are event triggers attached to this event that requires 1137 * filtering against its fields, then they wil be called as the 1138 * entry already holds the field information of the current event. 1139 * 1140 * It also checks if the event should be discarded or not. 1141 * It is to be discarded if the event is soft disabled and the 1142 * event was only recorded to process triggers, or if the event 1143 * filter is active and this event did not match the filters. 1144 * 1145 * Returns true if the event is discarded, false otherwise. 1146 */ 1147 static inline bool 1148 __event_trigger_test_discard(struct trace_event_file *file, 1149 struct ring_buffer *buffer, 1150 struct ring_buffer_event *event, 1151 void *entry, 1152 enum event_trigger_type *tt) 1153 { 1154 unsigned long eflags = file->flags; 1155 1156 if (eflags & EVENT_FILE_FL_TRIGGER_COND) 1157 *tt = event_triggers_call(file, entry); 1158 1159 if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) || 1160 (unlikely(file->flags & EVENT_FILE_FL_FILTERED) && 1161 !filter_match_preds(file->filter, entry))) { 1162 __trace_event_discard_commit(buffer, event); 1163 return true; 1164 } 1165 1166 return false; 1167 } 1168 1169 /** 1170 * event_trigger_unlock_commit - handle triggers and finish event commit 1171 * @file: The file pointer assoctiated to the event 1172 * @buffer: The ring buffer that the event is being written to 1173 * @event: The event meta data in the ring buffer 1174 * @entry: The event itself 1175 * @irq_flags: The state of the interrupts at the start of the event 1176 * @pc: The state of the preempt count at the start of the event. 1177 * 1178 * This is a helper function to handle triggers that require data 1179 * from the event itself. It also tests the event against filters and 1180 * if the event is soft disabled and should be discarded. 1181 */ 1182 static inline void 1183 event_trigger_unlock_commit(struct trace_event_file *file, 1184 struct ring_buffer *buffer, 1185 struct ring_buffer_event *event, 1186 void *entry, unsigned long irq_flags, int pc) 1187 { 1188 enum event_trigger_type tt = ETT_NONE; 1189 1190 if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) 1191 trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc); 1192 1193 if (tt) 1194 event_triggers_post_call(file, tt, entry); 1195 } 1196 1197 /** 1198 * event_trigger_unlock_commit_regs - handle triggers and finish event commit 1199 * @file: The file pointer assoctiated to the event 1200 * @buffer: The ring buffer that the event is being written to 1201 * @event: The event meta data in the ring buffer 1202 * @entry: The event itself 1203 * @irq_flags: The state of the interrupts at the start of the event 1204 * @pc: The state of the preempt count at the start of the event. 1205 * 1206 * This is a helper function to handle triggers that require data 1207 * from the event itself. It also tests the event against filters and 1208 * if the event is soft disabled and should be discarded. 1209 * 1210 * Same as event_trigger_unlock_commit() but calls 1211 * trace_buffer_unlock_commit_regs() instead of trace_buffer_unlock_commit(). 1212 */ 1213 static inline void 1214 event_trigger_unlock_commit_regs(struct trace_event_file *file, 1215 struct ring_buffer *buffer, 1216 struct ring_buffer_event *event, 1217 void *entry, unsigned long irq_flags, int pc, 1218 struct pt_regs *regs) 1219 { 1220 enum event_trigger_type tt = ETT_NONE; 1221 1222 if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) 1223 trace_buffer_unlock_commit_regs(file->tr, buffer, event, 1224 irq_flags, pc, regs); 1225 1226 if (tt) 1227 event_triggers_post_call(file, tt, entry); 1228 } 1229 1230 #define FILTER_PRED_INVALID ((unsigned short)-1) 1231 #define FILTER_PRED_IS_RIGHT (1 << 15) 1232 #define FILTER_PRED_FOLD (1 << 15) 1233 1234 /* 1235 * The max preds is the size of unsigned short with 1236 * two flags at the MSBs. One bit is used for both the IS_RIGHT 1237 * and FOLD flags. The other is reserved. 1238 * 1239 * 2^14 preds is way more than enough. 1240 */ 1241 #define MAX_FILTER_PRED 16384 1242 1243 struct filter_pred; 1244 struct regex; 1245 1246 typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event); 1247 1248 typedef int (*regex_match_func)(char *str, struct regex *r, int len); 1249 1250 enum regex_type { 1251 MATCH_FULL = 0, 1252 MATCH_FRONT_ONLY, 1253 MATCH_MIDDLE_ONLY, 1254 MATCH_END_ONLY, 1255 }; 1256 1257 struct regex { 1258 char pattern[MAX_FILTER_STR_VAL]; 1259 int len; 1260 int field_len; 1261 regex_match_func match; 1262 }; 1263 1264 struct filter_pred { 1265 filter_pred_fn_t fn; 1266 u64 val; 1267 struct regex regex; 1268 unsigned short *ops; 1269 struct ftrace_event_field *field; 1270 int offset; 1271 int not; 1272 int op; 1273 unsigned short index; 1274 unsigned short parent; 1275 unsigned short left; 1276 unsigned short right; 1277 }; 1278 1279 static inline bool is_string_field(struct ftrace_event_field *field) 1280 { 1281 return field->filter_type == FILTER_DYN_STRING || 1282 field->filter_type == FILTER_STATIC_STRING || 1283 field->filter_type == FILTER_PTR_STRING; 1284 } 1285 1286 static inline bool is_function_field(struct ftrace_event_field *field) 1287 { 1288 return field->filter_type == FILTER_TRACE_FN; 1289 } 1290 1291 extern enum regex_type 1292 filter_parse_regex(char *buff, int len, char **search, int *not); 1293 extern void print_event_filter(struct trace_event_file *file, 1294 struct trace_seq *s); 1295 extern int apply_event_filter(struct trace_event_file *file, 1296 char *filter_string); 1297 extern int apply_subsystem_event_filter(struct trace_subsystem_dir *dir, 1298 char *filter_string); 1299 extern void print_subsystem_event_filter(struct event_subsystem *system, 1300 struct trace_seq *s); 1301 extern int filter_assign_type(const char *type); 1302 extern int create_event_filter(struct trace_event_call *call, 1303 char *filter_str, bool set_str, 1304 struct event_filter **filterp); 1305 extern void free_event_filter(struct event_filter *filter); 1306 1307 struct ftrace_event_field * 1308 trace_find_event_field(struct trace_event_call *call, char *name); 1309 1310 extern void trace_event_enable_cmd_record(bool enable); 1311 extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr); 1312 extern int event_trace_del_tracer(struct trace_array *tr); 1313 1314 extern struct trace_event_file *find_event_file(struct trace_array *tr, 1315 const char *system, 1316 const char *event); 1317 1318 static inline void *event_file_data(struct file *filp) 1319 { 1320 return ACCESS_ONCE(file_inode(filp)->i_private); 1321 } 1322 1323 extern struct mutex event_mutex; 1324 extern struct list_head ftrace_events; 1325 1326 extern const struct file_operations event_trigger_fops; 1327 extern const struct file_operations event_hist_fops; 1328 1329 #ifdef CONFIG_HIST_TRIGGERS 1330 extern int register_trigger_hist_cmd(void); 1331 extern int register_trigger_hist_enable_disable_cmds(void); 1332 #else 1333 static inline int register_trigger_hist_cmd(void) { return 0; } 1334 static inline int register_trigger_hist_enable_disable_cmds(void) { return 0; } 1335 #endif 1336 1337 extern int register_trigger_cmds(void); 1338 extern void clear_event_triggers(struct trace_array *tr); 1339 1340 struct event_trigger_data { 1341 unsigned long count; 1342 int ref; 1343 struct event_trigger_ops *ops; 1344 struct event_command *cmd_ops; 1345 struct event_filter __rcu *filter; 1346 char *filter_str; 1347 void *private_data; 1348 bool paused; 1349 bool paused_tmp; 1350 struct list_head list; 1351 char *name; 1352 struct list_head named_list; 1353 struct event_trigger_data *named_data; 1354 }; 1355 1356 /* Avoid typos */ 1357 #define ENABLE_EVENT_STR "enable_event" 1358 #define DISABLE_EVENT_STR "disable_event" 1359 #define ENABLE_HIST_STR "enable_hist" 1360 #define DISABLE_HIST_STR "disable_hist" 1361 1362 struct enable_trigger_data { 1363 struct trace_event_file *file; 1364 bool enable; 1365 bool hist; 1366 }; 1367 1368 extern int event_enable_trigger_print(struct seq_file *m, 1369 struct event_trigger_ops *ops, 1370 struct event_trigger_data *data); 1371 extern void event_enable_trigger_free(struct event_trigger_ops *ops, 1372 struct event_trigger_data *data); 1373 extern int event_enable_trigger_func(struct event_command *cmd_ops, 1374 struct trace_event_file *file, 1375 char *glob, char *cmd, char *param); 1376 extern int event_enable_register_trigger(char *glob, 1377 struct event_trigger_ops *ops, 1378 struct event_trigger_data *data, 1379 struct trace_event_file *file); 1380 extern void event_enable_unregister_trigger(char *glob, 1381 struct event_trigger_ops *ops, 1382 struct event_trigger_data *test, 1383 struct trace_event_file *file); 1384 extern void trigger_data_free(struct event_trigger_data *data); 1385 extern int event_trigger_init(struct event_trigger_ops *ops, 1386 struct event_trigger_data *data); 1387 extern int trace_event_trigger_enable_disable(struct trace_event_file *file, 1388 int trigger_enable); 1389 extern void update_cond_flag(struct trace_event_file *file); 1390 extern void unregister_trigger(char *glob, struct event_trigger_ops *ops, 1391 struct event_trigger_data *test, 1392 struct trace_event_file *file); 1393 extern int set_trigger_filter(char *filter_str, 1394 struct event_trigger_data *trigger_data, 1395 struct trace_event_file *file); 1396 extern struct event_trigger_data *find_named_trigger(const char *name); 1397 extern bool is_named_trigger(struct event_trigger_data *test); 1398 extern int save_named_trigger(const char *name, 1399 struct event_trigger_data *data); 1400 extern void del_named_trigger(struct event_trigger_data *data); 1401 extern void pause_named_trigger(struct event_trigger_data *data); 1402 extern void unpause_named_trigger(struct event_trigger_data *data); 1403 extern void set_named_trigger_data(struct event_trigger_data *data, 1404 struct event_trigger_data *named_data); 1405 extern int register_event_command(struct event_command *cmd); 1406 extern int unregister_event_command(struct event_command *cmd); 1407 extern int register_trigger_hist_enable_disable_cmds(void); 1408 1409 /** 1410 * struct event_trigger_ops - callbacks for trace event triggers 1411 * 1412 * The methods in this structure provide per-event trigger hooks for 1413 * various trigger operations. 1414 * 1415 * All the methods below, except for @init() and @free(), must be 1416 * implemented. 1417 * 1418 * @func: The trigger 'probe' function called when the triggering 1419 * event occurs. The data passed into this callback is the data 1420 * that was supplied to the event_command @reg() function that 1421 * registered the trigger (see struct event_command) along with 1422 * the trace record, rec. 1423 * 1424 * @init: An optional initialization function called for the trigger 1425 * when the trigger is registered (via the event_command reg() 1426 * function). This can be used to perform per-trigger 1427 * initialization such as incrementing a per-trigger reference 1428 * count, for instance. This is usually implemented by the 1429 * generic utility function @event_trigger_init() (see 1430 * trace_event_triggers.c). 1431 * 1432 * @free: An optional de-initialization function called for the 1433 * trigger when the trigger is unregistered (via the 1434 * event_command @reg() function). This can be used to perform 1435 * per-trigger de-initialization such as decrementing a 1436 * per-trigger reference count and freeing corresponding trigger 1437 * data, for instance. This is usually implemented by the 1438 * generic utility function @event_trigger_free() (see 1439 * trace_event_triggers.c). 1440 * 1441 * @print: The callback function invoked to have the trigger print 1442 * itself. This is usually implemented by a wrapper function 1443 * that calls the generic utility function @event_trigger_print() 1444 * (see trace_event_triggers.c). 1445 */ 1446 struct event_trigger_ops { 1447 void (*func)(struct event_trigger_data *data, 1448 void *rec); 1449 int (*init)(struct event_trigger_ops *ops, 1450 struct event_trigger_data *data); 1451 void (*free)(struct event_trigger_ops *ops, 1452 struct event_trigger_data *data); 1453 int (*print)(struct seq_file *m, 1454 struct event_trigger_ops *ops, 1455 struct event_trigger_data *data); 1456 }; 1457 1458 /** 1459 * struct event_command - callbacks and data members for event commands 1460 * 1461 * Event commands are invoked by users by writing the command name 1462 * into the 'trigger' file associated with a trace event. The 1463 * parameters associated with a specific invocation of an event 1464 * command are used to create an event trigger instance, which is 1465 * added to the list of trigger instances associated with that trace 1466 * event. When the event is hit, the set of triggers associated with 1467 * that event is invoked. 1468 * 1469 * The data members in this structure provide per-event command data 1470 * for various event commands. 1471 * 1472 * All the data members below, except for @post_trigger, must be set 1473 * for each event command. 1474 * 1475 * @name: The unique name that identifies the event command. This is 1476 * the name used when setting triggers via trigger files. 1477 * 1478 * @trigger_type: A unique id that identifies the event command 1479 * 'type'. This value has two purposes, the first to ensure that 1480 * only one trigger of the same type can be set at a given time 1481 * for a particular event e.g. it doesn't make sense to have both 1482 * a traceon and traceoff trigger attached to a single event at 1483 * the same time, so traceon and traceoff have the same type 1484 * though they have different names. The @trigger_type value is 1485 * also used as a bit value for deferring the actual trigger 1486 * action until after the current event is finished. Some 1487 * commands need to do this if they themselves log to the trace 1488 * buffer (see the @post_trigger() member below). @trigger_type 1489 * values are defined by adding new values to the trigger_type 1490 * enum in include/linux/trace_events.h. 1491 * 1492 * @flags: See the enum event_command_flags below. 1493 * 1494 * All the methods below, except for @set_filter() and @unreg_all(), 1495 * must be implemented. 1496 * 1497 * @func: The callback function responsible for parsing and 1498 * registering the trigger written to the 'trigger' file by the 1499 * user. It allocates the trigger instance and registers it with 1500 * the appropriate trace event. It makes use of the other 1501 * event_command callback functions to orchestrate this, and is 1502 * usually implemented by the generic utility function 1503 * @event_trigger_callback() (see trace_event_triggers.c). 1504 * 1505 * @reg: Adds the trigger to the list of triggers associated with the 1506 * event, and enables the event trigger itself, after 1507 * initializing it (via the event_trigger_ops @init() function). 1508 * This is also where commands can use the @trigger_type value to 1509 * make the decision as to whether or not multiple instances of 1510 * the trigger should be allowed. This is usually implemented by 1511 * the generic utility function @register_trigger() (see 1512 * trace_event_triggers.c). 1513 * 1514 * @unreg: Removes the trigger from the list of triggers associated 1515 * with the event, and disables the event trigger itself, after 1516 * initializing it (via the event_trigger_ops @free() function). 1517 * This is usually implemented by the generic utility function 1518 * @unregister_trigger() (see trace_event_triggers.c). 1519 * 1520 * @unreg_all: An optional function called to remove all the triggers 1521 * from the list of triggers associated with the event. Called 1522 * when a trigger file is opened in truncate mode. 1523 * 1524 * @set_filter: An optional function called to parse and set a filter 1525 * for the trigger. If no @set_filter() method is set for the 1526 * event command, filters set by the user for the command will be 1527 * ignored. This is usually implemented by the generic utility 1528 * function @set_trigger_filter() (see trace_event_triggers.c). 1529 * 1530 * @get_trigger_ops: The callback function invoked to retrieve the 1531 * event_trigger_ops implementation associated with the command. 1532 */ 1533 struct event_command { 1534 struct list_head list; 1535 char *name; 1536 enum event_trigger_type trigger_type; 1537 int flags; 1538 int (*func)(struct event_command *cmd_ops, 1539 struct trace_event_file *file, 1540 char *glob, char *cmd, char *params); 1541 int (*reg)(char *glob, 1542 struct event_trigger_ops *ops, 1543 struct event_trigger_data *data, 1544 struct trace_event_file *file); 1545 void (*unreg)(char *glob, 1546 struct event_trigger_ops *ops, 1547 struct event_trigger_data *data, 1548 struct trace_event_file *file); 1549 void (*unreg_all)(struct trace_event_file *file); 1550 int (*set_filter)(char *filter_str, 1551 struct event_trigger_data *data, 1552 struct trace_event_file *file); 1553 struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param); 1554 }; 1555 1556 /** 1557 * enum event_command_flags - flags for struct event_command 1558 * 1559 * @POST_TRIGGER: A flag that says whether or not this command needs 1560 * to have its action delayed until after the current event has 1561 * been closed. Some triggers need to avoid being invoked while 1562 * an event is currently in the process of being logged, since 1563 * the trigger may itself log data into the trace buffer. Thus 1564 * we make sure the current event is committed before invoking 1565 * those triggers. To do that, the trigger invocation is split 1566 * in two - the first part checks the filter using the current 1567 * trace record; if a command has the @post_trigger flag set, it 1568 * sets a bit for itself in the return value, otherwise it 1569 * directly invokes the trigger. Once all commands have been 1570 * either invoked or set their return flag, the current record is 1571 * either committed or discarded. At that point, if any commands 1572 * have deferred their triggers, those commands are finally 1573 * invoked following the close of the current event. In other 1574 * words, if the event_trigger_ops @func() probe implementation 1575 * itself logs to the trace buffer, this flag should be set, 1576 * otherwise it can be left unspecified. 1577 * 1578 * @NEEDS_REC: A flag that says whether or not this command needs 1579 * access to the trace record in order to perform its function, 1580 * regardless of whether or not it has a filter associated with 1581 * it (filters make a trigger require access to the trace record 1582 * but are not always present). 1583 */ 1584 enum event_command_flags { 1585 EVENT_CMD_FL_POST_TRIGGER = 1, 1586 EVENT_CMD_FL_NEEDS_REC = 2, 1587 }; 1588 1589 static inline bool event_command_post_trigger(struct event_command *cmd_ops) 1590 { 1591 return cmd_ops->flags & EVENT_CMD_FL_POST_TRIGGER; 1592 } 1593 1594 static inline bool event_command_needs_rec(struct event_command *cmd_ops) 1595 { 1596 return cmd_ops->flags & EVENT_CMD_FL_NEEDS_REC; 1597 } 1598 1599 extern int trace_event_enable_disable(struct trace_event_file *file, 1600 int enable, int soft_disable); 1601 extern int tracing_alloc_snapshot(void); 1602 1603 extern const char *__start___trace_bprintk_fmt[]; 1604 extern const char *__stop___trace_bprintk_fmt[]; 1605 1606 extern const char *__start___tracepoint_str[]; 1607 extern const char *__stop___tracepoint_str[]; 1608 1609 void trace_printk_control(bool enabled); 1610 void trace_printk_init_buffers(void); 1611 void trace_printk_start_comm(void); 1612 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set); 1613 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled); 1614 1615 /* 1616 * Normal trace_printk() and friends allocates special buffers 1617 * to do the manipulation, as well as saves the print formats 1618 * into sections to display. But the trace infrastructure wants 1619 * to use these without the added overhead at the price of being 1620 * a bit slower (used mainly for warnings, where we don't care 1621 * about performance). The internal_trace_puts() is for such 1622 * a purpose. 1623 */ 1624 #define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str)) 1625 1626 #undef FTRACE_ENTRY 1627 #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ 1628 extern struct trace_event_call \ 1629 __aligned(4) event_##call; 1630 #undef FTRACE_ENTRY_DUP 1631 #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter) \ 1632 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \ 1633 filter) 1634 #undef FTRACE_ENTRY_PACKED 1635 #define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print, filter) \ 1636 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \ 1637 filter) 1638 1639 #include "trace_entries.h" 1640 1641 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER) 1642 int perf_ftrace_event_register(struct trace_event_call *call, 1643 enum trace_reg type, void *data); 1644 #else 1645 #define perf_ftrace_event_register NULL 1646 #endif 1647 1648 #ifdef CONFIG_FTRACE_SYSCALLS 1649 void init_ftrace_syscalls(void); 1650 const char *get_syscall_name(int syscall); 1651 #else 1652 static inline void init_ftrace_syscalls(void) { } 1653 static inline const char *get_syscall_name(int syscall) 1654 { 1655 return NULL; 1656 } 1657 #endif 1658 1659 #ifdef CONFIG_EVENT_TRACING 1660 void trace_event_init(void); 1661 void trace_event_enum_update(struct trace_enum_map **map, int len); 1662 #else 1663 static inline void __init trace_event_init(void) { } 1664 static inline void trace_event_enum_update(struct trace_enum_map **map, int len) { } 1665 #endif 1666 1667 extern struct trace_iterator *tracepoint_print_iter; 1668 1669 #endif /* _LINUX_KERNEL_TRACE_H */ 1670