1 // SPDX-License-Identifier: GPL-2.0 2 3 #ifndef _LINUX_KERNEL_TRACE_H 4 #define _LINUX_KERNEL_TRACE_H 5 6 #include <linux/fs.h> 7 #include <linux/atomic.h> 8 #include <linux/sched.h> 9 #include <linux/clocksource.h> 10 #include <linux/ring_buffer.h> 11 #include <linux/mmiotrace.h> 12 #include <linux/tracepoint.h> 13 #include <linux/ftrace.h> 14 #include <linux/trace.h> 15 #include <linux/hw_breakpoint.h> 16 #include <linux/trace_seq.h> 17 #include <linux/trace_events.h> 18 #include <linux/compiler.h> 19 #include <linux/glob.h> 20 #include <linux/irq_work.h> 21 #include <linux/workqueue.h> 22 #include <linux/ctype.h> 23 #include <linux/once_lite.h> 24 25 #include "pid_list.h" 26 27 #ifdef CONFIG_FTRACE_SYSCALLS 28 #include <asm/unistd.h> /* For NR_syscalls */ 29 #include <asm/syscall.h> /* some archs define it here */ 30 #endif 31 32 #define TRACE_MODE_WRITE 0640 33 #define TRACE_MODE_READ 0440 34 35 enum trace_type { 36 __TRACE_FIRST_TYPE = 0, 37 38 TRACE_FN, 39 TRACE_CTX, 40 TRACE_WAKE, 41 TRACE_STACK, 42 TRACE_PRINT, 43 TRACE_BPRINT, 44 TRACE_MMIO_RW, 45 TRACE_MMIO_MAP, 46 TRACE_BRANCH, 47 TRACE_GRAPH_RET, 48 TRACE_GRAPH_ENT, 49 TRACE_USER_STACK, 50 TRACE_BLK, 51 TRACE_BPUTS, 52 TRACE_HWLAT, 53 TRACE_OSNOISE, 54 TRACE_TIMERLAT, 55 TRACE_RAW_DATA, 56 TRACE_FUNC_REPEATS, 57 58 __TRACE_LAST_TYPE, 59 }; 60 61 62 #undef __field 63 #define __field(type, item) type item; 64 65 #undef __field_fn 66 #define __field_fn(type, item) type item; 67 68 #undef __field_struct 69 #define __field_struct(type, item) __field(type, item) 70 71 #undef __field_desc 72 #define __field_desc(type, container, item) 73 74 #undef __field_packed 75 #define __field_packed(type, container, item) 76 77 #undef __array 78 #define __array(type, item, size) type item[size]; 79 80 #undef __array_desc 81 #define __array_desc(type, container, item, size) 82 83 #undef __dynamic_array 84 #define __dynamic_array(type, item) type item[]; 85 86 #undef __rel_dynamic_array 87 #define __rel_dynamic_array(type, item) type item[]; 88 89 #undef F_STRUCT 90 #define F_STRUCT(args...) args 91 92 #undef FTRACE_ENTRY 93 #define FTRACE_ENTRY(name, struct_name, id, tstruct, print) \ 94 struct struct_name { \ 95 struct trace_entry ent; \ 96 tstruct \ 97 } 98 99 #undef FTRACE_ENTRY_DUP 100 #define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk) 101 102 #undef FTRACE_ENTRY_REG 103 #define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, regfn) \ 104 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) 105 106 #undef FTRACE_ENTRY_PACKED 107 #define FTRACE_ENTRY_PACKED(name, struct_name, id, tstruct, print) \ 108 FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print)) __packed 109 110 #include "trace_entries.h" 111 112 /* Use this for memory failure errors */ 113 #define MEM_FAIL(condition, fmt, ...) \ 114 DO_ONCE_LITE_IF(condition, pr_err, "ERROR: " fmt, ##__VA_ARGS__) 115 116 #define FAULT_STRING "(fault)" 117 118 #define HIST_STACKTRACE_DEPTH 16 119 #define HIST_STACKTRACE_SIZE (HIST_STACKTRACE_DEPTH * sizeof(unsigned long)) 120 #define HIST_STACKTRACE_SKIP 5 121 122 /* 123 * syscalls are special, and need special handling, this is why 124 * they are not included in trace_entries.h 125 */ 126 struct syscall_trace_enter { 127 struct trace_entry ent; 128 int nr; 129 unsigned long args[]; 130 }; 131 132 struct syscall_trace_exit { 133 struct trace_entry ent; 134 int nr; 135 long ret; 136 }; 137 138 struct kprobe_trace_entry_head { 139 struct trace_entry ent; 140 unsigned long ip; 141 }; 142 143 struct eprobe_trace_entry_head { 144 struct trace_entry ent; 145 }; 146 147 struct kretprobe_trace_entry_head { 148 struct trace_entry ent; 149 unsigned long func; 150 unsigned long ret_ip; 151 }; 152 153 struct fentry_trace_entry_head { 154 struct trace_entry ent; 155 unsigned long ip; 156 }; 157 158 struct fexit_trace_entry_head { 159 struct trace_entry ent; 160 unsigned long func; 161 unsigned long ret_ip; 162 }; 163 164 #define TRACE_BUF_SIZE 1024 165 166 struct trace_array; 167 168 /* 169 * The CPU trace array - it consists of thousands of trace entries 170 * plus some other descriptor data: (for example which task started 171 * the trace, etc.) 172 */ 173 struct trace_array_cpu { 174 atomic_t disabled; 175 void *buffer_page; /* ring buffer spare */ 176 177 unsigned long entries; 178 unsigned long saved_latency; 179 unsigned long critical_start; 180 unsigned long critical_end; 181 unsigned long critical_sequence; 182 unsigned long nice; 183 unsigned long policy; 184 unsigned long rt_priority; 185 unsigned long skipped_entries; 186 u64 preempt_timestamp; 187 pid_t pid; 188 kuid_t uid; 189 char comm[TASK_COMM_LEN]; 190 191 #ifdef CONFIG_FUNCTION_TRACER 192 int ftrace_ignore_pid; 193 #endif 194 bool ignore_pid; 195 }; 196 197 struct tracer; 198 struct trace_option_dentry; 199 200 struct array_buffer { 201 struct trace_array *tr; 202 struct trace_buffer *buffer; 203 struct trace_array_cpu __percpu *data; 204 u64 time_start; 205 int cpu; 206 }; 207 208 #define TRACE_FLAGS_MAX_SIZE 32 209 210 struct trace_options { 211 struct tracer *tracer; 212 struct trace_option_dentry *topts; 213 }; 214 215 struct trace_pid_list *trace_pid_list_alloc(void); 216 void trace_pid_list_free(struct trace_pid_list *pid_list); 217 bool trace_pid_list_is_set(struct trace_pid_list *pid_list, unsigned int pid); 218 int trace_pid_list_set(struct trace_pid_list *pid_list, unsigned int pid); 219 int trace_pid_list_clear(struct trace_pid_list *pid_list, unsigned int pid); 220 int trace_pid_list_first(struct trace_pid_list *pid_list, unsigned int *pid); 221 int trace_pid_list_next(struct trace_pid_list *pid_list, unsigned int pid, 222 unsigned int *next); 223 224 enum { 225 TRACE_PIDS = BIT(0), 226 TRACE_NO_PIDS = BIT(1), 227 }; 228 229 static inline bool pid_type_enabled(int type, struct trace_pid_list *pid_list, 230 struct trace_pid_list *no_pid_list) 231 { 232 /* Return true if the pid list in type has pids */ 233 return ((type & TRACE_PIDS) && pid_list) || 234 ((type & TRACE_NO_PIDS) && no_pid_list); 235 } 236 237 static inline bool still_need_pid_events(int type, struct trace_pid_list *pid_list, 238 struct trace_pid_list *no_pid_list) 239 { 240 /* 241 * Turning off what is in @type, return true if the "other" 242 * pid list, still has pids in it. 243 */ 244 return (!(type & TRACE_PIDS) && pid_list) || 245 (!(type & TRACE_NO_PIDS) && no_pid_list); 246 } 247 248 typedef bool (*cond_update_fn_t)(struct trace_array *tr, void *cond_data); 249 250 /** 251 * struct cond_snapshot - conditional snapshot data and callback 252 * 253 * The cond_snapshot structure encapsulates a callback function and 254 * data associated with the snapshot for a given tracing instance. 255 * 256 * When a snapshot is taken conditionally, by invoking 257 * tracing_snapshot_cond(tr, cond_data), the cond_data passed in is 258 * passed in turn to the cond_snapshot.update() function. That data 259 * can be compared by the update() implementation with the cond_data 260 * contained within the struct cond_snapshot instance associated with 261 * the trace_array. Because the tr->max_lock is held throughout the 262 * update() call, the update() function can directly retrieve the 263 * cond_snapshot and cond_data associated with the per-instance 264 * snapshot associated with the trace_array. 265 * 266 * The cond_snapshot.update() implementation can save data to be 267 * associated with the snapshot if it decides to, and returns 'true' 268 * in that case, or it returns 'false' if the conditional snapshot 269 * shouldn't be taken. 270 * 271 * The cond_snapshot instance is created and associated with the 272 * user-defined cond_data by tracing_cond_snapshot_enable(). 273 * Likewise, the cond_snapshot instance is destroyed and is no longer 274 * associated with the trace instance by 275 * tracing_cond_snapshot_disable(). 276 * 277 * The method below is required. 278 * 279 * @update: When a conditional snapshot is invoked, the update() 280 * callback function is invoked with the tr->max_lock held. The 281 * update() implementation signals whether or not to actually 282 * take the snapshot, by returning 'true' if so, 'false' if no 283 * snapshot should be taken. Because the max_lock is held for 284 * the duration of update(), the implementation is safe to 285 * directly retrieved and save any implementation data it needs 286 * to in association with the snapshot. 287 */ 288 struct cond_snapshot { 289 void *cond_data; 290 cond_update_fn_t update; 291 }; 292 293 /* 294 * struct trace_func_repeats - used to keep track of the consecutive 295 * (on the same CPU) calls of a single function. 296 */ 297 struct trace_func_repeats { 298 unsigned long ip; 299 unsigned long parent_ip; 300 unsigned long count; 301 u64 ts_last_call; 302 }; 303 304 /* 305 * The trace array - an array of per-CPU trace arrays. This is the 306 * highest level data structure that individual tracers deal with. 307 * They have on/off state as well: 308 */ 309 struct trace_array { 310 struct list_head list; 311 char *name; 312 struct array_buffer array_buffer; 313 #ifdef CONFIG_TRACER_MAX_TRACE 314 /* 315 * The max_buffer is used to snapshot the trace when a maximum 316 * latency is reached, or when the user initiates a snapshot. 317 * Some tracers will use this to store a maximum trace while 318 * it continues examining live traces. 319 * 320 * The buffers for the max_buffer are set up the same as the array_buffer 321 * When a snapshot is taken, the buffer of the max_buffer is swapped 322 * with the buffer of the array_buffer and the buffers are reset for 323 * the array_buffer so the tracing can continue. 324 */ 325 struct array_buffer max_buffer; 326 bool allocated_snapshot; 327 #endif 328 #ifdef CONFIG_TRACER_MAX_TRACE 329 unsigned long max_latency; 330 #ifdef CONFIG_FSNOTIFY 331 struct dentry *d_max_latency; 332 struct work_struct fsnotify_work; 333 struct irq_work fsnotify_irqwork; 334 #endif 335 #endif 336 struct trace_pid_list __rcu *filtered_pids; 337 struct trace_pid_list __rcu *filtered_no_pids; 338 /* 339 * max_lock is used to protect the swapping of buffers 340 * when taking a max snapshot. The buffers themselves are 341 * protected by per_cpu spinlocks. But the action of the swap 342 * needs its own lock. 343 * 344 * This is defined as a arch_spinlock_t in order to help 345 * with performance when lockdep debugging is enabled. 346 * 347 * It is also used in other places outside the update_max_tr 348 * so it needs to be defined outside of the 349 * CONFIG_TRACER_MAX_TRACE. 350 */ 351 arch_spinlock_t max_lock; 352 int buffer_disabled; 353 #ifdef CONFIG_FTRACE_SYSCALLS 354 int sys_refcount_enter; 355 int sys_refcount_exit; 356 struct trace_event_file __rcu *enter_syscall_files[NR_syscalls]; 357 struct trace_event_file __rcu *exit_syscall_files[NR_syscalls]; 358 #endif 359 int stop_count; 360 int clock_id; 361 int nr_topts; 362 bool clear_trace; 363 int buffer_percent; 364 unsigned int n_err_log_entries; 365 struct tracer *current_trace; 366 unsigned int trace_flags; 367 unsigned char trace_flags_index[TRACE_FLAGS_MAX_SIZE]; 368 unsigned int flags; 369 raw_spinlock_t start_lock; 370 struct list_head err_log; 371 struct dentry *dir; 372 struct dentry *options; 373 struct dentry *percpu_dir; 374 struct dentry *event_dir; 375 struct trace_options *topts; 376 struct list_head systems; 377 struct list_head events; 378 struct trace_event_file *trace_marker_file; 379 cpumask_var_t tracing_cpumask; /* only trace on set CPUs */ 380 /* one per_cpu trace_pipe can be opened by only one user */ 381 cpumask_var_t pipe_cpumask; 382 int ref; 383 int trace_ref; 384 #ifdef CONFIG_FUNCTION_TRACER 385 struct ftrace_ops *ops; 386 struct trace_pid_list __rcu *function_pids; 387 struct trace_pid_list __rcu *function_no_pids; 388 #ifdef CONFIG_DYNAMIC_FTRACE 389 /* All of these are protected by the ftrace_lock */ 390 struct list_head func_probes; 391 struct list_head mod_trace; 392 struct list_head mod_notrace; 393 #endif 394 /* function tracing enabled */ 395 int function_enabled; 396 #endif 397 int no_filter_buffering_ref; 398 struct list_head hist_vars; 399 #ifdef CONFIG_TRACER_SNAPSHOT 400 struct cond_snapshot *cond_snapshot; 401 #endif 402 struct trace_func_repeats __percpu *last_func_repeats; 403 }; 404 405 enum { 406 TRACE_ARRAY_FL_GLOBAL = (1 << 0) 407 }; 408 409 extern struct list_head ftrace_trace_arrays; 410 411 extern struct mutex trace_types_lock; 412 413 extern int trace_array_get(struct trace_array *tr); 414 extern int tracing_check_open_get_tr(struct trace_array *tr); 415 extern struct trace_array *trace_array_find(const char *instance); 416 extern struct trace_array *trace_array_find_get(const char *instance); 417 418 extern u64 tracing_event_time_stamp(struct trace_buffer *buffer, struct ring_buffer_event *rbe); 419 extern int tracing_set_filter_buffering(struct trace_array *tr, bool set); 420 extern int tracing_set_clock(struct trace_array *tr, const char *clockstr); 421 422 extern bool trace_clock_in_ns(struct trace_array *tr); 423 424 /* 425 * The global tracer (top) should be the first trace array added, 426 * but we check the flag anyway. 427 */ 428 static inline struct trace_array *top_trace_array(void) 429 { 430 struct trace_array *tr; 431 432 if (list_empty(&ftrace_trace_arrays)) 433 return NULL; 434 435 tr = list_entry(ftrace_trace_arrays.prev, 436 typeof(*tr), list); 437 WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL)); 438 return tr; 439 } 440 441 #define FTRACE_CMP_TYPE(var, type) \ 442 __builtin_types_compatible_p(typeof(var), type *) 443 444 #undef IF_ASSIGN 445 #define IF_ASSIGN(var, entry, etype, id) \ 446 if (FTRACE_CMP_TYPE(var, etype)) { \ 447 var = (typeof(var))(entry); \ 448 WARN_ON(id != 0 && (entry)->type != id); \ 449 break; \ 450 } 451 452 /* Will cause compile errors if type is not found. */ 453 extern void __ftrace_bad_type(void); 454 455 /* 456 * The trace_assign_type is a verifier that the entry type is 457 * the same as the type being assigned. To add new types simply 458 * add a line with the following format: 459 * 460 * IF_ASSIGN(var, ent, type, id); 461 * 462 * Where "type" is the trace type that includes the trace_entry 463 * as the "ent" item. And "id" is the trace identifier that is 464 * used in the trace_type enum. 465 * 466 * If the type can have more than one id, then use zero. 467 */ 468 #define trace_assign_type(var, ent) \ 469 do { \ 470 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \ 471 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \ 472 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ 473 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ 474 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ 475 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \ 476 IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \ 477 IF_ASSIGN(var, ent, struct hwlat_entry, TRACE_HWLAT); \ 478 IF_ASSIGN(var, ent, struct osnoise_entry, TRACE_OSNOISE);\ 479 IF_ASSIGN(var, ent, struct timerlat_entry, TRACE_TIMERLAT);\ 480 IF_ASSIGN(var, ent, struct raw_data_entry, TRACE_RAW_DATA);\ 481 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ 482 TRACE_MMIO_RW); \ 483 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ 484 TRACE_MMIO_MAP); \ 485 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \ 486 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \ 487 TRACE_GRAPH_ENT); \ 488 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ 489 TRACE_GRAPH_RET); \ 490 IF_ASSIGN(var, ent, struct func_repeats_entry, \ 491 TRACE_FUNC_REPEATS); \ 492 __ftrace_bad_type(); \ 493 } while (0) 494 495 /* 496 * An option specific to a tracer. This is a boolean value. 497 * The bit is the bit index that sets its value on the 498 * flags value in struct tracer_flags. 499 */ 500 struct tracer_opt { 501 const char *name; /* Will appear on the trace_options file */ 502 u32 bit; /* Mask assigned in val field in tracer_flags */ 503 }; 504 505 /* 506 * The set of specific options for a tracer. Your tracer 507 * have to set the initial value of the flags val. 508 */ 509 struct tracer_flags { 510 u32 val; 511 struct tracer_opt *opts; 512 struct tracer *trace; 513 }; 514 515 /* Makes more easy to define a tracer opt */ 516 #define TRACER_OPT(s, b) .name = #s, .bit = b 517 518 519 struct trace_option_dentry { 520 struct tracer_opt *opt; 521 struct tracer_flags *flags; 522 struct trace_array *tr; 523 struct dentry *entry; 524 }; 525 526 /** 527 * struct tracer - a specific tracer and its callbacks to interact with tracefs 528 * @name: the name chosen to select it on the available_tracers file 529 * @init: called when one switches to this tracer (echo name > current_tracer) 530 * @reset: called when one switches to another tracer 531 * @start: called when tracing is unpaused (echo 1 > tracing_on) 532 * @stop: called when tracing is paused (echo 0 > tracing_on) 533 * @update_thresh: called when tracing_thresh is updated 534 * @open: called when the trace file is opened 535 * @pipe_open: called when the trace_pipe file is opened 536 * @close: called when the trace file is released 537 * @pipe_close: called when the trace_pipe file is released 538 * @read: override the default read callback on trace_pipe 539 * @splice_read: override the default splice_read callback on trace_pipe 540 * @selftest: selftest to run on boot (see trace_selftest.c) 541 * @print_headers: override the first lines that describe your columns 542 * @print_line: callback that prints a trace 543 * @set_flag: signals one of your private flags changed (trace_options file) 544 * @flags: your private flags 545 */ 546 struct tracer { 547 const char *name; 548 int (*init)(struct trace_array *tr); 549 void (*reset)(struct trace_array *tr); 550 void (*start)(struct trace_array *tr); 551 void (*stop)(struct trace_array *tr); 552 int (*update_thresh)(struct trace_array *tr); 553 void (*open)(struct trace_iterator *iter); 554 void (*pipe_open)(struct trace_iterator *iter); 555 void (*close)(struct trace_iterator *iter); 556 void (*pipe_close)(struct trace_iterator *iter); 557 ssize_t (*read)(struct trace_iterator *iter, 558 struct file *filp, char __user *ubuf, 559 size_t cnt, loff_t *ppos); 560 ssize_t (*splice_read)(struct trace_iterator *iter, 561 struct file *filp, 562 loff_t *ppos, 563 struct pipe_inode_info *pipe, 564 size_t len, 565 unsigned int flags); 566 #ifdef CONFIG_FTRACE_STARTUP_TEST 567 int (*selftest)(struct tracer *trace, 568 struct trace_array *tr); 569 #endif 570 void (*print_header)(struct seq_file *m); 571 enum print_line_t (*print_line)(struct trace_iterator *iter); 572 /* If you handled the flag setting, return 0 */ 573 int (*set_flag)(struct trace_array *tr, 574 u32 old_flags, u32 bit, int set); 575 /* Return 0 if OK with change, else return non-zero */ 576 int (*flag_changed)(struct trace_array *tr, 577 u32 mask, int set); 578 struct tracer *next; 579 struct tracer_flags *flags; 580 int enabled; 581 bool print_max; 582 bool allow_instances; 583 #ifdef CONFIG_TRACER_MAX_TRACE 584 bool use_max_tr; 585 #endif 586 /* True if tracer cannot be enabled in kernel param */ 587 bool noboot; 588 }; 589 590 static inline struct ring_buffer_iter * 591 trace_buffer_iter(struct trace_iterator *iter, int cpu) 592 { 593 return iter->buffer_iter ? iter->buffer_iter[cpu] : NULL; 594 } 595 596 int tracer_init(struct tracer *t, struct trace_array *tr); 597 int tracing_is_enabled(void); 598 void tracing_reset_online_cpus(struct array_buffer *buf); 599 void tracing_reset_current(int cpu); 600 void tracing_reset_all_online_cpus(void); 601 void tracing_reset_all_online_cpus_unlocked(void); 602 int tracing_open_generic(struct inode *inode, struct file *filp); 603 int tracing_open_generic_tr(struct inode *inode, struct file *filp); 604 bool tracing_is_disabled(void); 605 bool tracer_tracing_is_on(struct trace_array *tr); 606 void tracer_tracing_on(struct trace_array *tr); 607 void tracer_tracing_off(struct trace_array *tr); 608 struct dentry *trace_create_file(const char *name, 609 umode_t mode, 610 struct dentry *parent, 611 void *data, 612 const struct file_operations *fops); 613 614 int tracing_init_dentry(void); 615 616 struct ring_buffer_event; 617 618 struct ring_buffer_event * 619 trace_buffer_lock_reserve(struct trace_buffer *buffer, 620 int type, 621 unsigned long len, 622 unsigned int trace_ctx); 623 624 struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, 625 struct trace_array_cpu *data); 626 627 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, 628 int *ent_cpu, u64 *ent_ts); 629 630 void trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer, 631 struct ring_buffer_event *event); 632 633 bool trace_is_tracepoint_string(const char *str); 634 const char *trace_event_format(struct trace_iterator *iter, const char *fmt); 635 void trace_check_vprintf(struct trace_iterator *iter, const char *fmt, 636 va_list ap) __printf(2, 0); 637 char *trace_iter_expand_format(struct trace_iterator *iter); 638 639 int trace_empty(struct trace_iterator *iter); 640 641 void *trace_find_next_entry_inc(struct trace_iterator *iter); 642 643 void trace_init_global_iter(struct trace_iterator *iter); 644 645 void tracing_iter_reset(struct trace_iterator *iter, int cpu); 646 647 unsigned long trace_total_entries_cpu(struct trace_array *tr, int cpu); 648 unsigned long trace_total_entries(struct trace_array *tr); 649 650 void trace_function(struct trace_array *tr, 651 unsigned long ip, 652 unsigned long parent_ip, 653 unsigned int trace_ctx); 654 void trace_graph_function(struct trace_array *tr, 655 unsigned long ip, 656 unsigned long parent_ip, 657 unsigned int trace_ctx); 658 void trace_latency_header(struct seq_file *m); 659 void trace_default_header(struct seq_file *m); 660 void print_trace_header(struct seq_file *m, struct trace_iterator *iter); 661 662 void trace_graph_return(struct ftrace_graph_ret *trace); 663 int trace_graph_entry(struct ftrace_graph_ent *trace); 664 void set_graph_array(struct trace_array *tr); 665 666 void tracing_start_cmdline_record(void); 667 void tracing_stop_cmdline_record(void); 668 void tracing_start_tgid_record(void); 669 void tracing_stop_tgid_record(void); 670 671 int register_tracer(struct tracer *type); 672 int is_tracing_stopped(void); 673 674 loff_t tracing_lseek(struct file *file, loff_t offset, int whence); 675 676 extern cpumask_var_t __read_mostly tracing_buffer_mask; 677 678 #define for_each_tracing_cpu(cpu) \ 679 for_each_cpu(cpu, tracing_buffer_mask) 680 681 extern unsigned long nsecs_to_usecs(unsigned long nsecs); 682 683 extern unsigned long tracing_thresh; 684 685 /* PID filtering */ 686 687 extern int pid_max; 688 689 bool trace_find_filtered_pid(struct trace_pid_list *filtered_pids, 690 pid_t search_pid); 691 bool trace_ignore_this_task(struct trace_pid_list *filtered_pids, 692 struct trace_pid_list *filtered_no_pids, 693 struct task_struct *task); 694 void trace_filter_add_remove_task(struct trace_pid_list *pid_list, 695 struct task_struct *self, 696 struct task_struct *task); 697 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos); 698 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos); 699 int trace_pid_show(struct seq_file *m, void *v); 700 void trace_free_pid_list(struct trace_pid_list *pid_list); 701 int trace_pid_write(struct trace_pid_list *filtered_pids, 702 struct trace_pid_list **new_pid_list, 703 const char __user *ubuf, size_t cnt); 704 705 #ifdef CONFIG_TRACER_MAX_TRACE 706 void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu, 707 void *cond_data); 708 void update_max_tr_single(struct trace_array *tr, 709 struct task_struct *tsk, int cpu); 710 711 #ifdef CONFIG_FSNOTIFY 712 #define LATENCY_FS_NOTIFY 713 #endif 714 #endif /* CONFIG_TRACER_MAX_TRACE */ 715 716 #ifdef LATENCY_FS_NOTIFY 717 void latency_fsnotify(struct trace_array *tr); 718 #else 719 static inline void latency_fsnotify(struct trace_array *tr) { } 720 #endif 721 722 #ifdef CONFIG_STACKTRACE 723 void __trace_stack(struct trace_array *tr, unsigned int trace_ctx, int skip); 724 #else 725 static inline void __trace_stack(struct trace_array *tr, unsigned int trace_ctx, 726 int skip) 727 { 728 } 729 #endif /* CONFIG_STACKTRACE */ 730 731 void trace_last_func_repeats(struct trace_array *tr, 732 struct trace_func_repeats *last_info, 733 unsigned int trace_ctx); 734 735 extern u64 ftrace_now(int cpu); 736 737 extern void trace_find_cmdline(int pid, char comm[]); 738 extern int trace_find_tgid(int pid); 739 extern void trace_event_follow_fork(struct trace_array *tr, bool enable); 740 741 #ifdef CONFIG_DYNAMIC_FTRACE 742 extern unsigned long ftrace_update_tot_cnt; 743 extern unsigned long ftrace_number_of_pages; 744 extern unsigned long ftrace_number_of_groups; 745 void ftrace_init_trace_array(struct trace_array *tr); 746 #else 747 static inline void ftrace_init_trace_array(struct trace_array *tr) { } 748 #endif 749 #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func 750 extern int DYN_FTRACE_TEST_NAME(void); 751 #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2 752 extern int DYN_FTRACE_TEST_NAME2(void); 753 754 extern bool ring_buffer_expanded; 755 extern bool tracing_selftest_disabled; 756 757 #ifdef CONFIG_FTRACE_STARTUP_TEST 758 extern void __init disable_tracing_selftest(const char *reason); 759 760 extern int trace_selftest_startup_function(struct tracer *trace, 761 struct trace_array *tr); 762 extern int trace_selftest_startup_function_graph(struct tracer *trace, 763 struct trace_array *tr); 764 extern int trace_selftest_startup_irqsoff(struct tracer *trace, 765 struct trace_array *tr); 766 extern int trace_selftest_startup_preemptoff(struct tracer *trace, 767 struct trace_array *tr); 768 extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace, 769 struct trace_array *tr); 770 extern int trace_selftest_startup_wakeup(struct tracer *trace, 771 struct trace_array *tr); 772 extern int trace_selftest_startup_nop(struct tracer *trace, 773 struct trace_array *tr); 774 extern int trace_selftest_startup_branch(struct tracer *trace, 775 struct trace_array *tr); 776 /* 777 * Tracer data references selftest functions that only occur 778 * on boot up. These can be __init functions. Thus, when selftests 779 * are enabled, then the tracers need to reference __init functions. 780 */ 781 #define __tracer_data __refdata 782 #else 783 static inline void __init disable_tracing_selftest(const char *reason) 784 { 785 } 786 /* Tracers are seldom changed. Optimize when selftests are disabled. */ 787 #define __tracer_data __read_mostly 788 #endif /* CONFIG_FTRACE_STARTUP_TEST */ 789 790 extern void *head_page(struct trace_array_cpu *data); 791 extern unsigned long long ns2usecs(u64 nsec); 792 extern int 793 trace_vbprintk(unsigned long ip, const char *fmt, va_list args); 794 extern int 795 trace_vprintk(unsigned long ip, const char *fmt, va_list args); 796 extern int 797 trace_array_vprintk(struct trace_array *tr, 798 unsigned long ip, const char *fmt, va_list args); 799 int trace_array_printk_buf(struct trace_buffer *buffer, 800 unsigned long ip, const char *fmt, ...); 801 void trace_printk_seq(struct trace_seq *s); 802 enum print_line_t print_trace_line(struct trace_iterator *iter); 803 804 extern char trace_find_mark(unsigned long long duration); 805 806 struct ftrace_hash; 807 808 struct ftrace_mod_load { 809 struct list_head list; 810 char *func; 811 char *module; 812 int enable; 813 }; 814 815 enum { 816 FTRACE_HASH_FL_MOD = (1 << 0), 817 }; 818 819 struct ftrace_hash { 820 unsigned long size_bits; 821 struct hlist_head *buckets; 822 unsigned long count; 823 unsigned long flags; 824 struct rcu_head rcu; 825 }; 826 827 struct ftrace_func_entry * 828 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip); 829 830 static __always_inline bool ftrace_hash_empty(struct ftrace_hash *hash) 831 { 832 return !hash || !(hash->count || (hash->flags & FTRACE_HASH_FL_MOD)); 833 } 834 835 /* Standard output formatting function used for function return traces */ 836 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 837 838 /* Flag options */ 839 #define TRACE_GRAPH_PRINT_OVERRUN 0x1 840 #define TRACE_GRAPH_PRINT_CPU 0x2 841 #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 842 #define TRACE_GRAPH_PRINT_PROC 0x8 843 #define TRACE_GRAPH_PRINT_DURATION 0x10 844 #define TRACE_GRAPH_PRINT_ABS_TIME 0x20 845 #define TRACE_GRAPH_PRINT_REL_TIME 0x40 846 #define TRACE_GRAPH_PRINT_IRQS 0x80 847 #define TRACE_GRAPH_PRINT_TAIL 0x100 848 #define TRACE_GRAPH_SLEEP_TIME 0x200 849 #define TRACE_GRAPH_GRAPH_TIME 0x400 850 #define TRACE_GRAPH_PRINT_RETVAL 0x800 851 #define TRACE_GRAPH_PRINT_RETVAL_HEX 0x1000 852 #define TRACE_GRAPH_PRINT_FILL_SHIFT 28 853 #define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT) 854 855 extern void ftrace_graph_sleep_time_control(bool enable); 856 857 #ifdef CONFIG_FUNCTION_PROFILER 858 extern void ftrace_graph_graph_time_control(bool enable); 859 #else 860 static inline void ftrace_graph_graph_time_control(bool enable) { } 861 #endif 862 863 extern enum print_line_t 864 print_graph_function_flags(struct trace_iterator *iter, u32 flags); 865 extern void print_graph_headers_flags(struct seq_file *s, u32 flags); 866 extern void 867 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s); 868 extern void graph_trace_open(struct trace_iterator *iter); 869 extern void graph_trace_close(struct trace_iterator *iter); 870 extern int __trace_graph_entry(struct trace_array *tr, 871 struct ftrace_graph_ent *trace, 872 unsigned int trace_ctx); 873 extern void __trace_graph_return(struct trace_array *tr, 874 struct ftrace_graph_ret *trace, 875 unsigned int trace_ctx); 876 877 #ifdef CONFIG_DYNAMIC_FTRACE 878 extern struct ftrace_hash __rcu *ftrace_graph_hash; 879 extern struct ftrace_hash __rcu *ftrace_graph_notrace_hash; 880 881 static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace) 882 { 883 unsigned long addr = trace->func; 884 int ret = 0; 885 struct ftrace_hash *hash; 886 887 preempt_disable_notrace(); 888 889 /* 890 * Have to open code "rcu_dereference_sched()" because the 891 * function graph tracer can be called when RCU is not 892 * "watching". 893 * Protected with schedule_on_each_cpu(ftrace_sync) 894 */ 895 hash = rcu_dereference_protected(ftrace_graph_hash, !preemptible()); 896 897 if (ftrace_hash_empty(hash)) { 898 ret = 1; 899 goto out; 900 } 901 902 if (ftrace_lookup_ip(hash, addr)) { 903 904 /* 905 * This needs to be cleared on the return functions 906 * when the depth is zero. 907 */ 908 trace_recursion_set(TRACE_GRAPH_BIT); 909 trace_recursion_set_depth(trace->depth); 910 911 /* 912 * If no irqs are to be traced, but a set_graph_function 913 * is set, and called by an interrupt handler, we still 914 * want to trace it. 915 */ 916 if (in_hardirq()) 917 trace_recursion_set(TRACE_IRQ_BIT); 918 else 919 trace_recursion_clear(TRACE_IRQ_BIT); 920 ret = 1; 921 } 922 923 out: 924 preempt_enable_notrace(); 925 return ret; 926 } 927 928 static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace) 929 { 930 if (trace_recursion_test(TRACE_GRAPH_BIT) && 931 trace->depth == trace_recursion_depth()) 932 trace_recursion_clear(TRACE_GRAPH_BIT); 933 } 934 935 static inline int ftrace_graph_notrace_addr(unsigned long addr) 936 { 937 int ret = 0; 938 struct ftrace_hash *notrace_hash; 939 940 preempt_disable_notrace(); 941 942 /* 943 * Have to open code "rcu_dereference_sched()" because the 944 * function graph tracer can be called when RCU is not 945 * "watching". 946 * Protected with schedule_on_each_cpu(ftrace_sync) 947 */ 948 notrace_hash = rcu_dereference_protected(ftrace_graph_notrace_hash, 949 !preemptible()); 950 951 if (ftrace_lookup_ip(notrace_hash, addr)) 952 ret = 1; 953 954 preempt_enable_notrace(); 955 return ret; 956 } 957 #else 958 static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace) 959 { 960 return 1; 961 } 962 963 static inline int ftrace_graph_notrace_addr(unsigned long addr) 964 { 965 return 0; 966 } 967 static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace) 968 { } 969 #endif /* CONFIG_DYNAMIC_FTRACE */ 970 971 extern unsigned int fgraph_max_depth; 972 973 static inline bool ftrace_graph_ignore_func(struct ftrace_graph_ent *trace) 974 { 975 /* trace it when it is-nested-in or is a function enabled. */ 976 return !(trace_recursion_test(TRACE_GRAPH_BIT) || 977 ftrace_graph_addr(trace)) || 978 (trace->depth < 0) || 979 (fgraph_max_depth && trace->depth >= fgraph_max_depth); 980 } 981 982 #else /* CONFIG_FUNCTION_GRAPH_TRACER */ 983 static inline enum print_line_t 984 print_graph_function_flags(struct trace_iterator *iter, u32 flags) 985 { 986 return TRACE_TYPE_UNHANDLED; 987 } 988 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 989 990 extern struct list_head ftrace_pids; 991 992 #ifdef CONFIG_FUNCTION_TRACER 993 994 #define FTRACE_PID_IGNORE -1 995 #define FTRACE_PID_TRACE -2 996 997 struct ftrace_func_command { 998 struct list_head list; 999 char *name; 1000 int (*func)(struct trace_array *tr, 1001 struct ftrace_hash *hash, 1002 char *func, char *cmd, 1003 char *params, int enable); 1004 }; 1005 extern bool ftrace_filter_param __initdata; 1006 static inline int ftrace_trace_task(struct trace_array *tr) 1007 { 1008 return this_cpu_read(tr->array_buffer.data->ftrace_ignore_pid) != 1009 FTRACE_PID_IGNORE; 1010 } 1011 extern int ftrace_is_dead(void); 1012 int ftrace_create_function_files(struct trace_array *tr, 1013 struct dentry *parent); 1014 void ftrace_destroy_function_files(struct trace_array *tr); 1015 int ftrace_allocate_ftrace_ops(struct trace_array *tr); 1016 void ftrace_free_ftrace_ops(struct trace_array *tr); 1017 void ftrace_init_global_array_ops(struct trace_array *tr); 1018 void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func); 1019 void ftrace_reset_array_ops(struct trace_array *tr); 1020 void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer); 1021 void ftrace_init_tracefs_toplevel(struct trace_array *tr, 1022 struct dentry *d_tracer); 1023 void ftrace_clear_pids(struct trace_array *tr); 1024 int init_function_trace(void); 1025 void ftrace_pid_follow_fork(struct trace_array *tr, bool enable); 1026 #else 1027 static inline int ftrace_trace_task(struct trace_array *tr) 1028 { 1029 return 1; 1030 } 1031 static inline int ftrace_is_dead(void) { return 0; } 1032 static inline int 1033 ftrace_create_function_files(struct trace_array *tr, 1034 struct dentry *parent) 1035 { 1036 return 0; 1037 } 1038 static inline int ftrace_allocate_ftrace_ops(struct trace_array *tr) 1039 { 1040 return 0; 1041 } 1042 static inline void ftrace_free_ftrace_ops(struct trace_array *tr) { } 1043 static inline void ftrace_destroy_function_files(struct trace_array *tr) { } 1044 static inline __init void 1045 ftrace_init_global_array_ops(struct trace_array *tr) { } 1046 static inline void ftrace_reset_array_ops(struct trace_array *tr) { } 1047 static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { } 1048 static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { } 1049 static inline void ftrace_clear_pids(struct trace_array *tr) { } 1050 static inline int init_function_trace(void) { return 0; } 1051 static inline void ftrace_pid_follow_fork(struct trace_array *tr, bool enable) { } 1052 /* ftace_func_t type is not defined, use macro instead of static inline */ 1053 #define ftrace_init_array_ops(tr, func) do { } while (0) 1054 #endif /* CONFIG_FUNCTION_TRACER */ 1055 1056 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE) 1057 1058 struct ftrace_probe_ops { 1059 void (*func)(unsigned long ip, 1060 unsigned long parent_ip, 1061 struct trace_array *tr, 1062 struct ftrace_probe_ops *ops, 1063 void *data); 1064 int (*init)(struct ftrace_probe_ops *ops, 1065 struct trace_array *tr, 1066 unsigned long ip, void *init_data, 1067 void **data); 1068 void (*free)(struct ftrace_probe_ops *ops, 1069 struct trace_array *tr, 1070 unsigned long ip, void *data); 1071 int (*print)(struct seq_file *m, 1072 unsigned long ip, 1073 struct ftrace_probe_ops *ops, 1074 void *data); 1075 }; 1076 1077 struct ftrace_func_mapper; 1078 typedef int (*ftrace_mapper_func)(void *data); 1079 1080 struct ftrace_func_mapper *allocate_ftrace_func_mapper(void); 1081 void **ftrace_func_mapper_find_ip(struct ftrace_func_mapper *mapper, 1082 unsigned long ip); 1083 int ftrace_func_mapper_add_ip(struct ftrace_func_mapper *mapper, 1084 unsigned long ip, void *data); 1085 void *ftrace_func_mapper_remove_ip(struct ftrace_func_mapper *mapper, 1086 unsigned long ip); 1087 void free_ftrace_func_mapper(struct ftrace_func_mapper *mapper, 1088 ftrace_mapper_func free_func); 1089 1090 extern int 1091 register_ftrace_function_probe(char *glob, struct trace_array *tr, 1092 struct ftrace_probe_ops *ops, void *data); 1093 extern int 1094 unregister_ftrace_function_probe_func(char *glob, struct trace_array *tr, 1095 struct ftrace_probe_ops *ops); 1096 extern void clear_ftrace_function_probes(struct trace_array *tr); 1097 1098 int register_ftrace_command(struct ftrace_func_command *cmd); 1099 int unregister_ftrace_command(struct ftrace_func_command *cmd); 1100 1101 void ftrace_create_filter_files(struct ftrace_ops *ops, 1102 struct dentry *parent); 1103 void ftrace_destroy_filter_files(struct ftrace_ops *ops); 1104 1105 extern int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, 1106 int len, int reset); 1107 extern int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, 1108 int len, int reset); 1109 #else 1110 struct ftrace_func_command; 1111 1112 static inline __init int register_ftrace_command(struct ftrace_func_command *cmd) 1113 { 1114 return -EINVAL; 1115 } 1116 static inline __init int unregister_ftrace_command(char *cmd_name) 1117 { 1118 return -EINVAL; 1119 } 1120 static inline void clear_ftrace_function_probes(struct trace_array *tr) 1121 { 1122 } 1123 1124 /* 1125 * The ops parameter passed in is usually undefined. 1126 * This must be a macro. 1127 */ 1128 #define ftrace_create_filter_files(ops, parent) do { } while (0) 1129 #define ftrace_destroy_filter_files(ops) do { } while (0) 1130 #endif /* CONFIG_FUNCTION_TRACER && CONFIG_DYNAMIC_FTRACE */ 1131 1132 bool ftrace_event_is_function(struct trace_event_call *call); 1133 1134 /* 1135 * struct trace_parser - servers for reading the user input separated by spaces 1136 * @cont: set if the input is not complete - no final space char was found 1137 * @buffer: holds the parsed user input 1138 * @idx: user input length 1139 * @size: buffer size 1140 */ 1141 struct trace_parser { 1142 bool cont; 1143 char *buffer; 1144 unsigned idx; 1145 unsigned size; 1146 }; 1147 1148 static inline bool trace_parser_loaded(struct trace_parser *parser) 1149 { 1150 return (parser->idx != 0); 1151 } 1152 1153 static inline bool trace_parser_cont(struct trace_parser *parser) 1154 { 1155 return parser->cont; 1156 } 1157 1158 static inline void trace_parser_clear(struct trace_parser *parser) 1159 { 1160 parser->cont = false; 1161 parser->idx = 0; 1162 } 1163 1164 extern int trace_parser_get_init(struct trace_parser *parser, int size); 1165 extern void trace_parser_put(struct trace_parser *parser); 1166 extern int trace_get_user(struct trace_parser *parser, const char __user *ubuf, 1167 size_t cnt, loff_t *ppos); 1168 1169 /* 1170 * Only create function graph options if function graph is configured. 1171 */ 1172 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 1173 # define FGRAPH_FLAGS \ 1174 C(DISPLAY_GRAPH, "display-graph"), 1175 #else 1176 # define FGRAPH_FLAGS 1177 #endif 1178 1179 #ifdef CONFIG_BRANCH_TRACER 1180 # define BRANCH_FLAGS \ 1181 C(BRANCH, "branch"), 1182 #else 1183 # define BRANCH_FLAGS 1184 #endif 1185 1186 #ifdef CONFIG_FUNCTION_TRACER 1187 # define FUNCTION_FLAGS \ 1188 C(FUNCTION, "function-trace"), \ 1189 C(FUNC_FORK, "function-fork"), 1190 # define FUNCTION_DEFAULT_FLAGS TRACE_ITER_FUNCTION 1191 #else 1192 # define FUNCTION_FLAGS 1193 # define FUNCTION_DEFAULT_FLAGS 0UL 1194 # define TRACE_ITER_FUNC_FORK 0UL 1195 #endif 1196 1197 #ifdef CONFIG_STACKTRACE 1198 # define STACK_FLAGS \ 1199 C(STACKTRACE, "stacktrace"), 1200 #else 1201 # define STACK_FLAGS 1202 #endif 1203 1204 /* 1205 * trace_iterator_flags is an enumeration that defines bit 1206 * positions into trace_flags that controls the output. 1207 * 1208 * NOTE: These bits must match the trace_options array in 1209 * trace.c (this macro guarantees it). 1210 */ 1211 #define TRACE_FLAGS \ 1212 C(PRINT_PARENT, "print-parent"), \ 1213 C(SYM_OFFSET, "sym-offset"), \ 1214 C(SYM_ADDR, "sym-addr"), \ 1215 C(VERBOSE, "verbose"), \ 1216 C(RAW, "raw"), \ 1217 C(HEX, "hex"), \ 1218 C(BIN, "bin"), \ 1219 C(BLOCK, "block"), \ 1220 C(FIELDS, "fields"), \ 1221 C(PRINTK, "trace_printk"), \ 1222 C(ANNOTATE, "annotate"), \ 1223 C(USERSTACKTRACE, "userstacktrace"), \ 1224 C(SYM_USEROBJ, "sym-userobj"), \ 1225 C(PRINTK_MSGONLY, "printk-msg-only"), \ 1226 C(CONTEXT_INFO, "context-info"), /* Print pid/cpu/time */ \ 1227 C(LATENCY_FMT, "latency-format"), \ 1228 C(RECORD_CMD, "record-cmd"), \ 1229 C(RECORD_TGID, "record-tgid"), \ 1230 C(OVERWRITE, "overwrite"), \ 1231 C(STOP_ON_FREE, "disable_on_free"), \ 1232 C(IRQ_INFO, "irq-info"), \ 1233 C(MARKERS, "markers"), \ 1234 C(EVENT_FORK, "event-fork"), \ 1235 C(PAUSE_ON_TRACE, "pause-on-trace"), \ 1236 C(HASH_PTR, "hash-ptr"), /* Print hashed pointer */ \ 1237 FUNCTION_FLAGS \ 1238 FGRAPH_FLAGS \ 1239 STACK_FLAGS \ 1240 BRANCH_FLAGS 1241 1242 /* 1243 * By defining C, we can make TRACE_FLAGS a list of bit names 1244 * that will define the bits for the flag masks. 1245 */ 1246 #undef C 1247 #define C(a, b) TRACE_ITER_##a##_BIT 1248 1249 enum trace_iterator_bits { 1250 TRACE_FLAGS 1251 /* Make sure we don't go more than we have bits for */ 1252 TRACE_ITER_LAST_BIT 1253 }; 1254 1255 /* 1256 * By redefining C, we can make TRACE_FLAGS a list of masks that 1257 * use the bits as defined above. 1258 */ 1259 #undef C 1260 #define C(a, b) TRACE_ITER_##a = (1 << TRACE_ITER_##a##_BIT) 1261 1262 enum trace_iterator_flags { TRACE_FLAGS }; 1263 1264 /* 1265 * TRACE_ITER_SYM_MASK masks the options in trace_flags that 1266 * control the output of kernel symbols. 1267 */ 1268 #define TRACE_ITER_SYM_MASK \ 1269 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR) 1270 1271 extern struct tracer nop_trace; 1272 1273 #ifdef CONFIG_BRANCH_TRACER 1274 extern int enable_branch_tracing(struct trace_array *tr); 1275 extern void disable_branch_tracing(void); 1276 static inline int trace_branch_enable(struct trace_array *tr) 1277 { 1278 if (tr->trace_flags & TRACE_ITER_BRANCH) 1279 return enable_branch_tracing(tr); 1280 return 0; 1281 } 1282 static inline void trace_branch_disable(void) 1283 { 1284 /* due to races, always disable */ 1285 disable_branch_tracing(); 1286 } 1287 #else 1288 static inline int trace_branch_enable(struct trace_array *tr) 1289 { 1290 return 0; 1291 } 1292 static inline void trace_branch_disable(void) 1293 { 1294 } 1295 #endif /* CONFIG_BRANCH_TRACER */ 1296 1297 /* set ring buffers to default size if not already done so */ 1298 int tracing_update_buffers(void); 1299 1300 union trace_synth_field { 1301 u8 as_u8; 1302 u16 as_u16; 1303 u32 as_u32; 1304 u64 as_u64; 1305 struct trace_dynamic_info as_dynamic; 1306 }; 1307 1308 struct ftrace_event_field { 1309 struct list_head link; 1310 const char *name; 1311 const char *type; 1312 int filter_type; 1313 int offset; 1314 int size; 1315 int is_signed; 1316 int len; 1317 }; 1318 1319 struct prog_entry; 1320 1321 struct event_filter { 1322 struct prog_entry __rcu *prog; 1323 char *filter_string; 1324 }; 1325 1326 struct event_subsystem { 1327 struct list_head list; 1328 const char *name; 1329 struct event_filter *filter; 1330 int ref_count; 1331 }; 1332 1333 struct trace_subsystem_dir { 1334 struct list_head list; 1335 struct event_subsystem *subsystem; 1336 struct trace_array *tr; 1337 struct dentry *entry; 1338 int ref_count; 1339 int nr_events; 1340 }; 1341 1342 extern int call_filter_check_discard(struct trace_event_call *call, void *rec, 1343 struct trace_buffer *buffer, 1344 struct ring_buffer_event *event); 1345 1346 void trace_buffer_unlock_commit_regs(struct trace_array *tr, 1347 struct trace_buffer *buffer, 1348 struct ring_buffer_event *event, 1349 unsigned int trcace_ctx, 1350 struct pt_regs *regs); 1351 1352 static inline void trace_buffer_unlock_commit(struct trace_array *tr, 1353 struct trace_buffer *buffer, 1354 struct ring_buffer_event *event, 1355 unsigned int trace_ctx) 1356 { 1357 trace_buffer_unlock_commit_regs(tr, buffer, event, trace_ctx, NULL); 1358 } 1359 1360 DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event); 1361 DECLARE_PER_CPU(int, trace_buffered_event_cnt); 1362 void trace_buffered_event_disable(void); 1363 void trace_buffered_event_enable(void); 1364 1365 void early_enable_events(struct trace_array *tr, char *buf, bool disable_first); 1366 1367 static inline void 1368 __trace_event_discard_commit(struct trace_buffer *buffer, 1369 struct ring_buffer_event *event) 1370 { 1371 if (this_cpu_read(trace_buffered_event) == event) { 1372 /* Simply release the temp buffer and enable preemption */ 1373 this_cpu_dec(trace_buffered_event_cnt); 1374 preempt_enable_notrace(); 1375 return; 1376 } 1377 /* ring_buffer_discard_commit() enables preemption */ 1378 ring_buffer_discard_commit(buffer, event); 1379 } 1380 1381 /* 1382 * Helper function for event_trigger_unlock_commit{_regs}(). 1383 * If there are event triggers attached to this event that requires 1384 * filtering against its fields, then they will be called as the 1385 * entry already holds the field information of the current event. 1386 * 1387 * It also checks if the event should be discarded or not. 1388 * It is to be discarded if the event is soft disabled and the 1389 * event was only recorded to process triggers, or if the event 1390 * filter is active and this event did not match the filters. 1391 * 1392 * Returns true if the event is discarded, false otherwise. 1393 */ 1394 static inline bool 1395 __event_trigger_test_discard(struct trace_event_file *file, 1396 struct trace_buffer *buffer, 1397 struct ring_buffer_event *event, 1398 void *entry, 1399 enum event_trigger_type *tt) 1400 { 1401 unsigned long eflags = file->flags; 1402 1403 if (eflags & EVENT_FILE_FL_TRIGGER_COND) 1404 *tt = event_triggers_call(file, buffer, entry, event); 1405 1406 if (likely(!(file->flags & (EVENT_FILE_FL_SOFT_DISABLED | 1407 EVENT_FILE_FL_FILTERED | 1408 EVENT_FILE_FL_PID_FILTER)))) 1409 return false; 1410 1411 if (file->flags & EVENT_FILE_FL_SOFT_DISABLED) 1412 goto discard; 1413 1414 if (file->flags & EVENT_FILE_FL_FILTERED && 1415 !filter_match_preds(file->filter, entry)) 1416 goto discard; 1417 1418 if ((file->flags & EVENT_FILE_FL_PID_FILTER) && 1419 trace_event_ignore_this_pid(file)) 1420 goto discard; 1421 1422 return false; 1423 discard: 1424 __trace_event_discard_commit(buffer, event); 1425 return true; 1426 } 1427 1428 /** 1429 * event_trigger_unlock_commit - handle triggers and finish event commit 1430 * @file: The file pointer associated with the event 1431 * @buffer: The ring buffer that the event is being written to 1432 * @event: The event meta data in the ring buffer 1433 * @entry: The event itself 1434 * @trace_ctx: The tracing context flags. 1435 * 1436 * This is a helper function to handle triggers that require data 1437 * from the event itself. It also tests the event against filters and 1438 * if the event is soft disabled and should be discarded. 1439 */ 1440 static inline void 1441 event_trigger_unlock_commit(struct trace_event_file *file, 1442 struct trace_buffer *buffer, 1443 struct ring_buffer_event *event, 1444 void *entry, unsigned int trace_ctx) 1445 { 1446 enum event_trigger_type tt = ETT_NONE; 1447 1448 if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) 1449 trace_buffer_unlock_commit(file->tr, buffer, event, trace_ctx); 1450 1451 if (tt) 1452 event_triggers_post_call(file, tt); 1453 } 1454 1455 #define FILTER_PRED_INVALID ((unsigned short)-1) 1456 #define FILTER_PRED_IS_RIGHT (1 << 15) 1457 #define FILTER_PRED_FOLD (1 << 15) 1458 1459 /* 1460 * The max preds is the size of unsigned short with 1461 * two flags at the MSBs. One bit is used for both the IS_RIGHT 1462 * and FOLD flags. The other is reserved. 1463 * 1464 * 2^14 preds is way more than enough. 1465 */ 1466 #define MAX_FILTER_PRED 16384 1467 1468 struct filter_pred; 1469 struct regex; 1470 1471 typedef int (*regex_match_func)(char *str, struct regex *r, int len); 1472 1473 enum regex_type { 1474 MATCH_FULL = 0, 1475 MATCH_FRONT_ONLY, 1476 MATCH_MIDDLE_ONLY, 1477 MATCH_END_ONLY, 1478 MATCH_GLOB, 1479 MATCH_INDEX, 1480 }; 1481 1482 struct regex { 1483 char pattern[MAX_FILTER_STR_VAL]; 1484 int len; 1485 int field_len; 1486 regex_match_func match; 1487 }; 1488 1489 static inline bool is_string_field(struct ftrace_event_field *field) 1490 { 1491 return field->filter_type == FILTER_DYN_STRING || 1492 field->filter_type == FILTER_RDYN_STRING || 1493 field->filter_type == FILTER_STATIC_STRING || 1494 field->filter_type == FILTER_PTR_STRING || 1495 field->filter_type == FILTER_COMM; 1496 } 1497 1498 static inline bool is_function_field(struct ftrace_event_field *field) 1499 { 1500 return field->filter_type == FILTER_TRACE_FN; 1501 } 1502 1503 extern enum regex_type 1504 filter_parse_regex(char *buff, int len, char **search, int *not); 1505 extern void print_event_filter(struct trace_event_file *file, 1506 struct trace_seq *s); 1507 extern int apply_event_filter(struct trace_event_file *file, 1508 char *filter_string); 1509 extern int apply_subsystem_event_filter(struct trace_subsystem_dir *dir, 1510 char *filter_string); 1511 extern void print_subsystem_event_filter(struct event_subsystem *system, 1512 struct trace_seq *s); 1513 extern int filter_assign_type(const char *type); 1514 extern int create_event_filter(struct trace_array *tr, 1515 struct trace_event_call *call, 1516 char *filter_str, bool set_str, 1517 struct event_filter **filterp); 1518 extern void free_event_filter(struct event_filter *filter); 1519 1520 struct ftrace_event_field * 1521 trace_find_event_field(struct trace_event_call *call, char *name); 1522 1523 extern void trace_event_enable_cmd_record(bool enable); 1524 extern void trace_event_enable_tgid_record(bool enable); 1525 1526 extern int event_trace_init(void); 1527 extern int init_events(void); 1528 extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr); 1529 extern int event_trace_del_tracer(struct trace_array *tr); 1530 extern void __trace_early_add_events(struct trace_array *tr); 1531 1532 extern struct trace_event_file *__find_event_file(struct trace_array *tr, 1533 const char *system, 1534 const char *event); 1535 extern struct trace_event_file *find_event_file(struct trace_array *tr, 1536 const char *system, 1537 const char *event); 1538 1539 static inline void *event_file_data(struct file *filp) 1540 { 1541 return READ_ONCE(file_inode(filp)->i_private); 1542 } 1543 1544 extern struct mutex event_mutex; 1545 extern struct list_head ftrace_events; 1546 1547 extern const struct file_operations event_trigger_fops; 1548 extern const struct file_operations event_hist_fops; 1549 extern const struct file_operations event_hist_debug_fops; 1550 extern const struct file_operations event_inject_fops; 1551 1552 #ifdef CONFIG_HIST_TRIGGERS 1553 extern int register_trigger_hist_cmd(void); 1554 extern int register_trigger_hist_enable_disable_cmds(void); 1555 #else 1556 static inline int register_trigger_hist_cmd(void) { return 0; } 1557 static inline int register_trigger_hist_enable_disable_cmds(void) { return 0; } 1558 #endif 1559 1560 extern int register_trigger_cmds(void); 1561 extern void clear_event_triggers(struct trace_array *tr); 1562 1563 enum { 1564 EVENT_TRIGGER_FL_PROBE = BIT(0), 1565 }; 1566 1567 struct event_trigger_data { 1568 unsigned long count; 1569 int ref; 1570 int flags; 1571 struct event_trigger_ops *ops; 1572 struct event_command *cmd_ops; 1573 struct event_filter __rcu *filter; 1574 char *filter_str; 1575 void *private_data; 1576 bool paused; 1577 bool paused_tmp; 1578 struct list_head list; 1579 char *name; 1580 struct list_head named_list; 1581 struct event_trigger_data *named_data; 1582 }; 1583 1584 /* Avoid typos */ 1585 #define ENABLE_EVENT_STR "enable_event" 1586 #define DISABLE_EVENT_STR "disable_event" 1587 #define ENABLE_HIST_STR "enable_hist" 1588 #define DISABLE_HIST_STR "disable_hist" 1589 1590 struct enable_trigger_data { 1591 struct trace_event_file *file; 1592 bool enable; 1593 bool hist; 1594 }; 1595 1596 extern int event_enable_trigger_print(struct seq_file *m, 1597 struct event_trigger_data *data); 1598 extern void event_enable_trigger_free(struct event_trigger_data *data); 1599 extern int event_enable_trigger_parse(struct event_command *cmd_ops, 1600 struct trace_event_file *file, 1601 char *glob, char *cmd, 1602 char *param_and_filter); 1603 extern int event_enable_register_trigger(char *glob, 1604 struct event_trigger_data *data, 1605 struct trace_event_file *file); 1606 extern void event_enable_unregister_trigger(char *glob, 1607 struct event_trigger_data *test, 1608 struct trace_event_file *file); 1609 extern void trigger_data_free(struct event_trigger_data *data); 1610 extern int event_trigger_init(struct event_trigger_data *data); 1611 extern int trace_event_trigger_enable_disable(struct trace_event_file *file, 1612 int trigger_enable); 1613 extern void update_cond_flag(struct trace_event_file *file); 1614 extern int set_trigger_filter(char *filter_str, 1615 struct event_trigger_data *trigger_data, 1616 struct trace_event_file *file); 1617 extern struct event_trigger_data *find_named_trigger(const char *name); 1618 extern bool is_named_trigger(struct event_trigger_data *test); 1619 extern int save_named_trigger(const char *name, 1620 struct event_trigger_data *data); 1621 extern void del_named_trigger(struct event_trigger_data *data); 1622 extern void pause_named_trigger(struct event_trigger_data *data); 1623 extern void unpause_named_trigger(struct event_trigger_data *data); 1624 extern void set_named_trigger_data(struct event_trigger_data *data, 1625 struct event_trigger_data *named_data); 1626 extern struct event_trigger_data * 1627 get_named_trigger_data(struct event_trigger_data *data); 1628 extern int register_event_command(struct event_command *cmd); 1629 extern int unregister_event_command(struct event_command *cmd); 1630 extern int register_trigger_hist_enable_disable_cmds(void); 1631 extern bool event_trigger_check_remove(const char *glob); 1632 extern bool event_trigger_empty_param(const char *param); 1633 extern int event_trigger_separate_filter(char *param_and_filter, char **param, 1634 char **filter, bool param_required); 1635 extern struct event_trigger_data * 1636 event_trigger_alloc(struct event_command *cmd_ops, 1637 char *cmd, 1638 char *param, 1639 void *private_data); 1640 extern int event_trigger_parse_num(char *trigger, 1641 struct event_trigger_data *trigger_data); 1642 extern int event_trigger_set_filter(struct event_command *cmd_ops, 1643 struct trace_event_file *file, 1644 char *param, 1645 struct event_trigger_data *trigger_data); 1646 extern void event_trigger_reset_filter(struct event_command *cmd_ops, 1647 struct event_trigger_data *trigger_data); 1648 extern int event_trigger_register(struct event_command *cmd_ops, 1649 struct trace_event_file *file, 1650 char *glob, 1651 struct event_trigger_data *trigger_data); 1652 extern void event_trigger_unregister(struct event_command *cmd_ops, 1653 struct trace_event_file *file, 1654 char *glob, 1655 struct event_trigger_data *trigger_data); 1656 1657 /** 1658 * struct event_trigger_ops - callbacks for trace event triggers 1659 * 1660 * The methods in this structure provide per-event trigger hooks for 1661 * various trigger operations. 1662 * 1663 * The @init and @free methods are used during trigger setup and 1664 * teardown, typically called from an event_command's @parse() 1665 * function implementation. 1666 * 1667 * The @print method is used to print the trigger spec. 1668 * 1669 * The @trigger method is the function that actually implements the 1670 * trigger and is called in the context of the triggering event 1671 * whenever that event occurs. 1672 * 1673 * All the methods below, except for @init() and @free(), must be 1674 * implemented. 1675 * 1676 * @trigger: The trigger 'probe' function called when the triggering 1677 * event occurs. The data passed into this callback is the data 1678 * that was supplied to the event_command @reg() function that 1679 * registered the trigger (see struct event_command) along with 1680 * the trace record, rec. 1681 * 1682 * @init: An optional initialization function called for the trigger 1683 * when the trigger is registered (via the event_command reg() 1684 * function). This can be used to perform per-trigger 1685 * initialization such as incrementing a per-trigger reference 1686 * count, for instance. This is usually implemented by the 1687 * generic utility function @event_trigger_init() (see 1688 * trace_event_triggers.c). 1689 * 1690 * @free: An optional de-initialization function called for the 1691 * trigger when the trigger is unregistered (via the 1692 * event_command @reg() function). This can be used to perform 1693 * per-trigger de-initialization such as decrementing a 1694 * per-trigger reference count and freeing corresponding trigger 1695 * data, for instance. This is usually implemented by the 1696 * generic utility function @event_trigger_free() (see 1697 * trace_event_triggers.c). 1698 * 1699 * @print: The callback function invoked to have the trigger print 1700 * itself. This is usually implemented by a wrapper function 1701 * that calls the generic utility function @event_trigger_print() 1702 * (see trace_event_triggers.c). 1703 */ 1704 struct event_trigger_ops { 1705 void (*trigger)(struct event_trigger_data *data, 1706 struct trace_buffer *buffer, 1707 void *rec, 1708 struct ring_buffer_event *rbe); 1709 int (*init)(struct event_trigger_data *data); 1710 void (*free)(struct event_trigger_data *data); 1711 int (*print)(struct seq_file *m, 1712 struct event_trigger_data *data); 1713 }; 1714 1715 /** 1716 * struct event_command - callbacks and data members for event commands 1717 * 1718 * Event commands are invoked by users by writing the command name 1719 * into the 'trigger' file associated with a trace event. The 1720 * parameters associated with a specific invocation of an event 1721 * command are used to create an event trigger instance, which is 1722 * added to the list of trigger instances associated with that trace 1723 * event. When the event is hit, the set of triggers associated with 1724 * that event is invoked. 1725 * 1726 * The data members in this structure provide per-event command data 1727 * for various event commands. 1728 * 1729 * All the data members below, except for @post_trigger, must be set 1730 * for each event command. 1731 * 1732 * @name: The unique name that identifies the event command. This is 1733 * the name used when setting triggers via trigger files. 1734 * 1735 * @trigger_type: A unique id that identifies the event command 1736 * 'type'. This value has two purposes, the first to ensure that 1737 * only one trigger of the same type can be set at a given time 1738 * for a particular event e.g. it doesn't make sense to have both 1739 * a traceon and traceoff trigger attached to a single event at 1740 * the same time, so traceon and traceoff have the same type 1741 * though they have different names. The @trigger_type value is 1742 * also used as a bit value for deferring the actual trigger 1743 * action until after the current event is finished. Some 1744 * commands need to do this if they themselves log to the trace 1745 * buffer (see the @post_trigger() member below). @trigger_type 1746 * values are defined by adding new values to the trigger_type 1747 * enum in include/linux/trace_events.h. 1748 * 1749 * @flags: See the enum event_command_flags below. 1750 * 1751 * All the methods below, except for @set_filter() and @unreg_all(), 1752 * must be implemented. 1753 * 1754 * @parse: The callback function responsible for parsing and 1755 * registering the trigger written to the 'trigger' file by the 1756 * user. It allocates the trigger instance and registers it with 1757 * the appropriate trace event. It makes use of the other 1758 * event_command callback functions to orchestrate this, and is 1759 * usually implemented by the generic utility function 1760 * @event_trigger_callback() (see trace_event_triggers.c). 1761 * 1762 * @reg: Adds the trigger to the list of triggers associated with the 1763 * event, and enables the event trigger itself, after 1764 * initializing it (via the event_trigger_ops @init() function). 1765 * This is also where commands can use the @trigger_type value to 1766 * make the decision as to whether or not multiple instances of 1767 * the trigger should be allowed. This is usually implemented by 1768 * the generic utility function @register_trigger() (see 1769 * trace_event_triggers.c). 1770 * 1771 * @unreg: Removes the trigger from the list of triggers associated 1772 * with the event, and disables the event trigger itself, after 1773 * initializing it (via the event_trigger_ops @free() function). 1774 * This is usually implemented by the generic utility function 1775 * @unregister_trigger() (see trace_event_triggers.c). 1776 * 1777 * @unreg_all: An optional function called to remove all the triggers 1778 * from the list of triggers associated with the event. Called 1779 * when a trigger file is opened in truncate mode. 1780 * 1781 * @set_filter: An optional function called to parse and set a filter 1782 * for the trigger. If no @set_filter() method is set for the 1783 * event command, filters set by the user for the command will be 1784 * ignored. This is usually implemented by the generic utility 1785 * function @set_trigger_filter() (see trace_event_triggers.c). 1786 * 1787 * @get_trigger_ops: The callback function invoked to retrieve the 1788 * event_trigger_ops implementation associated with the command. 1789 * This callback function allows a single event_command to 1790 * support multiple trigger implementations via different sets of 1791 * event_trigger_ops, depending on the value of the @param 1792 * string. 1793 */ 1794 struct event_command { 1795 struct list_head list; 1796 char *name; 1797 enum event_trigger_type trigger_type; 1798 int flags; 1799 int (*parse)(struct event_command *cmd_ops, 1800 struct trace_event_file *file, 1801 char *glob, char *cmd, 1802 char *param_and_filter); 1803 int (*reg)(char *glob, 1804 struct event_trigger_data *data, 1805 struct trace_event_file *file); 1806 void (*unreg)(char *glob, 1807 struct event_trigger_data *data, 1808 struct trace_event_file *file); 1809 void (*unreg_all)(struct trace_event_file *file); 1810 int (*set_filter)(char *filter_str, 1811 struct event_trigger_data *data, 1812 struct trace_event_file *file); 1813 struct event_trigger_ops *(*get_trigger_ops)(char *cmd, char *param); 1814 }; 1815 1816 /** 1817 * enum event_command_flags - flags for struct event_command 1818 * 1819 * @POST_TRIGGER: A flag that says whether or not this command needs 1820 * to have its action delayed until after the current event has 1821 * been closed. Some triggers need to avoid being invoked while 1822 * an event is currently in the process of being logged, since 1823 * the trigger may itself log data into the trace buffer. Thus 1824 * we make sure the current event is committed before invoking 1825 * those triggers. To do that, the trigger invocation is split 1826 * in two - the first part checks the filter using the current 1827 * trace record; if a command has the @post_trigger flag set, it 1828 * sets a bit for itself in the return value, otherwise it 1829 * directly invokes the trigger. Once all commands have been 1830 * either invoked or set their return flag, the current record is 1831 * either committed or discarded. At that point, if any commands 1832 * have deferred their triggers, those commands are finally 1833 * invoked following the close of the current event. In other 1834 * words, if the event_trigger_ops @func() probe implementation 1835 * itself logs to the trace buffer, this flag should be set, 1836 * otherwise it can be left unspecified. 1837 * 1838 * @NEEDS_REC: A flag that says whether or not this command needs 1839 * access to the trace record in order to perform its function, 1840 * regardless of whether or not it has a filter associated with 1841 * it (filters make a trigger require access to the trace record 1842 * but are not always present). 1843 */ 1844 enum event_command_flags { 1845 EVENT_CMD_FL_POST_TRIGGER = 1, 1846 EVENT_CMD_FL_NEEDS_REC = 2, 1847 }; 1848 1849 static inline bool event_command_post_trigger(struct event_command *cmd_ops) 1850 { 1851 return cmd_ops->flags & EVENT_CMD_FL_POST_TRIGGER; 1852 } 1853 1854 static inline bool event_command_needs_rec(struct event_command *cmd_ops) 1855 { 1856 return cmd_ops->flags & EVENT_CMD_FL_NEEDS_REC; 1857 } 1858 1859 extern int trace_event_enable_disable(struct trace_event_file *file, 1860 int enable, int soft_disable); 1861 extern int tracing_alloc_snapshot(void); 1862 extern void tracing_snapshot_cond(struct trace_array *tr, void *cond_data); 1863 extern int tracing_snapshot_cond_enable(struct trace_array *tr, void *cond_data, cond_update_fn_t update); 1864 1865 extern int tracing_snapshot_cond_disable(struct trace_array *tr); 1866 extern void *tracing_cond_snapshot_data(struct trace_array *tr); 1867 1868 extern const char *__start___trace_bprintk_fmt[]; 1869 extern const char *__stop___trace_bprintk_fmt[]; 1870 1871 extern const char *__start___tracepoint_str[]; 1872 extern const char *__stop___tracepoint_str[]; 1873 1874 void trace_printk_control(bool enabled); 1875 void trace_printk_start_comm(void); 1876 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set); 1877 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled); 1878 1879 /* Used from boot time tracer */ 1880 extern int trace_set_options(struct trace_array *tr, char *option); 1881 extern int tracing_set_tracer(struct trace_array *tr, const char *buf); 1882 extern ssize_t tracing_resize_ring_buffer(struct trace_array *tr, 1883 unsigned long size, int cpu_id); 1884 extern int tracing_set_cpumask(struct trace_array *tr, 1885 cpumask_var_t tracing_cpumask_new); 1886 1887 1888 #define MAX_EVENT_NAME_LEN 64 1889 1890 extern ssize_t trace_parse_run_command(struct file *file, 1891 const char __user *buffer, size_t count, loff_t *ppos, 1892 int (*createfn)(const char *)); 1893 1894 extern unsigned int err_pos(char *cmd, const char *str); 1895 extern void tracing_log_err(struct trace_array *tr, 1896 const char *loc, const char *cmd, 1897 const char **errs, u8 type, u16 pos); 1898 1899 /* 1900 * Normal trace_printk() and friends allocates special buffers 1901 * to do the manipulation, as well as saves the print formats 1902 * into sections to display. But the trace infrastructure wants 1903 * to use these without the added overhead at the price of being 1904 * a bit slower (used mainly for warnings, where we don't care 1905 * about performance). The internal_trace_puts() is for such 1906 * a purpose. 1907 */ 1908 #define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str)) 1909 1910 #undef FTRACE_ENTRY 1911 #define FTRACE_ENTRY(call, struct_name, id, tstruct, print) \ 1912 extern struct trace_event_call \ 1913 __aligned(4) event_##call; 1914 #undef FTRACE_ENTRY_DUP 1915 #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print) \ 1916 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print)) 1917 #undef FTRACE_ENTRY_PACKED 1918 #define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print) \ 1919 FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print)) 1920 1921 #include "trace_entries.h" 1922 1923 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER) 1924 int perf_ftrace_event_register(struct trace_event_call *call, 1925 enum trace_reg type, void *data); 1926 #else 1927 #define perf_ftrace_event_register NULL 1928 #endif 1929 1930 #ifdef CONFIG_FTRACE_SYSCALLS 1931 void init_ftrace_syscalls(void); 1932 const char *get_syscall_name(int syscall); 1933 #else 1934 static inline void init_ftrace_syscalls(void) { } 1935 static inline const char *get_syscall_name(int syscall) 1936 { 1937 return NULL; 1938 } 1939 #endif 1940 1941 #ifdef CONFIG_EVENT_TRACING 1942 void trace_event_init(void); 1943 void trace_event_eval_update(struct trace_eval_map **map, int len); 1944 /* Used from boot time tracer */ 1945 extern int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set); 1946 extern int trigger_process_regex(struct trace_event_file *file, char *buff); 1947 #else 1948 static inline void __init trace_event_init(void) { } 1949 static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { } 1950 #endif 1951 1952 #ifdef CONFIG_TRACER_SNAPSHOT 1953 void tracing_snapshot_instance(struct trace_array *tr); 1954 int tracing_alloc_snapshot_instance(struct trace_array *tr); 1955 #else 1956 static inline void tracing_snapshot_instance(struct trace_array *tr) { } 1957 static inline int tracing_alloc_snapshot_instance(struct trace_array *tr) 1958 { 1959 return 0; 1960 } 1961 #endif 1962 1963 #ifdef CONFIG_PREEMPT_TRACER 1964 void tracer_preempt_on(unsigned long a0, unsigned long a1); 1965 void tracer_preempt_off(unsigned long a0, unsigned long a1); 1966 #else 1967 static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { } 1968 static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { } 1969 #endif 1970 #ifdef CONFIG_IRQSOFF_TRACER 1971 void tracer_hardirqs_on(unsigned long a0, unsigned long a1); 1972 void tracer_hardirqs_off(unsigned long a0, unsigned long a1); 1973 #else 1974 static inline void tracer_hardirqs_on(unsigned long a0, unsigned long a1) { } 1975 static inline void tracer_hardirqs_off(unsigned long a0, unsigned long a1) { } 1976 #endif 1977 1978 /* 1979 * Reset the state of the trace_iterator so that it can read consumed data. 1980 * Normally, the trace_iterator is used for reading the data when it is not 1981 * consumed, and must retain state. 1982 */ 1983 static __always_inline void trace_iterator_reset(struct trace_iterator *iter) 1984 { 1985 memset_startat(iter, 0, seq); 1986 iter->pos = -1; 1987 } 1988 1989 /* Check the name is good for event/group/fields */ 1990 static inline bool __is_good_name(const char *name, bool hash_ok) 1991 { 1992 if (!isalpha(*name) && *name != '_' && (!hash_ok || *name != '-')) 1993 return false; 1994 while (*++name != '\0') { 1995 if (!isalpha(*name) && !isdigit(*name) && *name != '_' && 1996 (!hash_ok || *name != '-')) 1997 return false; 1998 } 1999 return true; 2000 } 2001 2002 /* Check the name is good for event/group/fields */ 2003 static inline bool is_good_name(const char *name) 2004 { 2005 return __is_good_name(name, false); 2006 } 2007 2008 /* Check the name is good for system */ 2009 static inline bool is_good_system_name(const char *name) 2010 { 2011 return __is_good_name(name, true); 2012 } 2013 2014 /* Convert certain expected symbols into '_' when generating event names */ 2015 static inline void sanitize_event_name(char *name) 2016 { 2017 while (*name++ != '\0') 2018 if (*name == ':' || *name == '.') 2019 *name = '_'; 2020 } 2021 2022 /* 2023 * This is a generic way to read and write a u64 value from a file in tracefs. 2024 * 2025 * The value is stored on the variable pointed by *val. The value needs 2026 * to be at least *min and at most *max. The write is protected by an 2027 * existing *lock. 2028 */ 2029 struct trace_min_max_param { 2030 struct mutex *lock; 2031 u64 *val; 2032 u64 *min; 2033 u64 *max; 2034 }; 2035 2036 #define U64_STR_SIZE 24 /* 20 digits max */ 2037 2038 extern const struct file_operations trace_min_max_fops; 2039 2040 #ifdef CONFIG_RV 2041 extern int rv_init_interface(void); 2042 #else 2043 static inline int rv_init_interface(void) 2044 { 2045 return 0; 2046 } 2047 #endif 2048 2049 #endif /* _LINUX_KERNEL_TRACE_H */ 2050