1 #ifndef _LINUX_KERNEL_TRACE_H 2 #define _LINUX_KERNEL_TRACE_H 3 4 #include <linux/fs.h> 5 #include <asm/atomic.h> 6 #include <linux/sched.h> 7 #include <linux/clocksource.h> 8 #include <linux/ring_buffer.h> 9 #include <linux/mmiotrace.h> 10 #include <linux/ftrace.h> 11 #include <trace/boot.h> 12 #include <linux/kmemtrace.h> 13 #include <trace/power.h> 14 15 #include <linux/trace_seq.h> 16 #include <linux/ftrace_event.h> 17 18 enum trace_type { 19 __TRACE_FIRST_TYPE = 0, 20 21 TRACE_FN, 22 TRACE_CTX, 23 TRACE_WAKE, 24 TRACE_STACK, 25 TRACE_PRINT, 26 TRACE_BPRINT, 27 TRACE_SPECIAL, 28 TRACE_MMIO_RW, 29 TRACE_MMIO_MAP, 30 TRACE_BRANCH, 31 TRACE_BOOT_CALL, 32 TRACE_BOOT_RET, 33 TRACE_GRAPH_RET, 34 TRACE_GRAPH_ENT, 35 TRACE_USER_STACK, 36 TRACE_HW_BRANCHES, 37 TRACE_SYSCALL_ENTER, 38 TRACE_SYSCALL_EXIT, 39 TRACE_KMEM_ALLOC, 40 TRACE_KMEM_FREE, 41 TRACE_POWER, 42 TRACE_BLK, 43 44 __TRACE_LAST_TYPE, 45 }; 46 47 /* 48 * Function trace entry - function address and parent function addres: 49 */ 50 struct ftrace_entry { 51 struct trace_entry ent; 52 unsigned long ip; 53 unsigned long parent_ip; 54 }; 55 56 /* Function call entry */ 57 struct ftrace_graph_ent_entry { 58 struct trace_entry ent; 59 struct ftrace_graph_ent graph_ent; 60 }; 61 62 /* Function return entry */ 63 struct ftrace_graph_ret_entry { 64 struct trace_entry ent; 65 struct ftrace_graph_ret ret; 66 }; 67 extern struct tracer boot_tracer; 68 69 /* 70 * Context switch trace entry - which task (and prio) we switched from/to: 71 */ 72 struct ctx_switch_entry { 73 struct trace_entry ent; 74 unsigned int prev_pid; 75 unsigned char prev_prio; 76 unsigned char prev_state; 77 unsigned int next_pid; 78 unsigned char next_prio; 79 unsigned char next_state; 80 unsigned int next_cpu; 81 }; 82 83 /* 84 * Special (free-form) trace entry: 85 */ 86 struct special_entry { 87 struct trace_entry ent; 88 unsigned long arg1; 89 unsigned long arg2; 90 unsigned long arg3; 91 }; 92 93 /* 94 * Stack-trace entry: 95 */ 96 97 #define FTRACE_STACK_ENTRIES 8 98 99 struct stack_entry { 100 struct trace_entry ent; 101 unsigned long caller[FTRACE_STACK_ENTRIES]; 102 }; 103 104 struct userstack_entry { 105 struct trace_entry ent; 106 unsigned long caller[FTRACE_STACK_ENTRIES]; 107 }; 108 109 /* 110 * trace_printk entry: 111 */ 112 struct bprint_entry { 113 struct trace_entry ent; 114 unsigned long ip; 115 const char *fmt; 116 u32 buf[]; 117 }; 118 119 struct print_entry { 120 struct trace_entry ent; 121 unsigned long ip; 122 char buf[]; 123 }; 124 125 #define TRACE_OLD_SIZE 88 126 127 struct trace_field_cont { 128 unsigned char type; 129 /* Temporary till we get rid of this completely */ 130 char buf[TRACE_OLD_SIZE - 1]; 131 }; 132 133 struct trace_mmiotrace_rw { 134 struct trace_entry ent; 135 struct mmiotrace_rw rw; 136 }; 137 138 struct trace_mmiotrace_map { 139 struct trace_entry ent; 140 struct mmiotrace_map map; 141 }; 142 143 struct trace_boot_call { 144 struct trace_entry ent; 145 struct boot_trace_call boot_call; 146 }; 147 148 struct trace_boot_ret { 149 struct trace_entry ent; 150 struct boot_trace_ret boot_ret; 151 }; 152 153 #define TRACE_FUNC_SIZE 30 154 #define TRACE_FILE_SIZE 20 155 struct trace_branch { 156 struct trace_entry ent; 157 unsigned line; 158 char func[TRACE_FUNC_SIZE+1]; 159 char file[TRACE_FILE_SIZE+1]; 160 char correct; 161 }; 162 163 struct hw_branch_entry { 164 struct trace_entry ent; 165 u64 from; 166 u64 to; 167 }; 168 169 struct trace_power { 170 struct trace_entry ent; 171 struct power_trace state_data; 172 }; 173 174 enum kmemtrace_type_id { 175 KMEMTRACE_TYPE_KMALLOC = 0, /* kmalloc() or kfree(). */ 176 KMEMTRACE_TYPE_CACHE, /* kmem_cache_*(). */ 177 KMEMTRACE_TYPE_PAGES, /* __get_free_pages() and friends. */ 178 }; 179 180 struct kmemtrace_alloc_entry { 181 struct trace_entry ent; 182 enum kmemtrace_type_id type_id; 183 unsigned long call_site; 184 const void *ptr; 185 size_t bytes_req; 186 size_t bytes_alloc; 187 gfp_t gfp_flags; 188 int node; 189 }; 190 191 struct kmemtrace_free_entry { 192 struct trace_entry ent; 193 enum kmemtrace_type_id type_id; 194 unsigned long call_site; 195 const void *ptr; 196 }; 197 198 struct syscall_trace_enter { 199 struct trace_entry ent; 200 int nr; 201 unsigned long args[]; 202 }; 203 204 struct syscall_trace_exit { 205 struct trace_entry ent; 206 int nr; 207 unsigned long ret; 208 }; 209 210 211 /* 212 * trace_flag_type is an enumeration that holds different 213 * states when a trace occurs. These are: 214 * IRQS_OFF - interrupts were disabled 215 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags 216 * NEED_RESCED - reschedule is requested 217 * HARDIRQ - inside an interrupt handler 218 * SOFTIRQ - inside a softirq handler 219 */ 220 enum trace_flag_type { 221 TRACE_FLAG_IRQS_OFF = 0x01, 222 TRACE_FLAG_IRQS_NOSUPPORT = 0x02, 223 TRACE_FLAG_NEED_RESCHED = 0x04, 224 TRACE_FLAG_HARDIRQ = 0x08, 225 TRACE_FLAG_SOFTIRQ = 0x10, 226 }; 227 228 #define TRACE_BUF_SIZE 1024 229 230 /* 231 * The CPU trace array - it consists of thousands of trace entries 232 * plus some other descriptor data: (for example which task started 233 * the trace, etc.) 234 */ 235 struct trace_array_cpu { 236 atomic_t disabled; 237 void *buffer_page; /* ring buffer spare */ 238 239 /* these fields get copied into max-trace: */ 240 unsigned long trace_idx; 241 unsigned long overrun; 242 unsigned long saved_latency; 243 unsigned long critical_start; 244 unsigned long critical_end; 245 unsigned long critical_sequence; 246 unsigned long nice; 247 unsigned long policy; 248 unsigned long rt_priority; 249 cycle_t preempt_timestamp; 250 pid_t pid; 251 uid_t uid; 252 char comm[TASK_COMM_LEN]; 253 }; 254 255 /* 256 * The trace array - an array of per-CPU trace arrays. This is the 257 * highest level data structure that individual tracers deal with. 258 * They have on/off state as well: 259 */ 260 struct trace_array { 261 struct ring_buffer *buffer; 262 unsigned long entries; 263 int cpu; 264 cycle_t time_start; 265 struct task_struct *waiter; 266 struct trace_array_cpu *data[NR_CPUS]; 267 }; 268 269 #define FTRACE_CMP_TYPE(var, type) \ 270 __builtin_types_compatible_p(typeof(var), type *) 271 272 #undef IF_ASSIGN 273 #define IF_ASSIGN(var, entry, etype, id) \ 274 if (FTRACE_CMP_TYPE(var, etype)) { \ 275 var = (typeof(var))(entry); \ 276 WARN_ON(id && (entry)->type != id); \ 277 break; \ 278 } 279 280 /* Will cause compile errors if type is not found. */ 281 extern void __ftrace_bad_type(void); 282 283 /* 284 * The trace_assign_type is a verifier that the entry type is 285 * the same as the type being assigned. To add new types simply 286 * add a line with the following format: 287 * 288 * IF_ASSIGN(var, ent, type, id); 289 * 290 * Where "type" is the trace type that includes the trace_entry 291 * as the "ent" item. And "id" is the trace identifier that is 292 * used in the trace_type enum. 293 * 294 * If the type can have more than one id, then use zero. 295 */ 296 #define trace_assign_type(var, ent) \ 297 do { \ 298 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \ 299 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \ 300 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ 301 IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ 302 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ 303 IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \ 304 IF_ASSIGN(var, ent, struct special_entry, 0); \ 305 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ 306 TRACE_MMIO_RW); \ 307 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ 308 TRACE_MMIO_MAP); \ 309 IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\ 310 IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\ 311 IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \ 312 IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \ 313 TRACE_GRAPH_ENT); \ 314 IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ 315 TRACE_GRAPH_RET); \ 316 IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\ 317 IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \ 318 IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \ 319 TRACE_KMEM_ALLOC); \ 320 IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \ 321 TRACE_KMEM_FREE); \ 322 IF_ASSIGN(var, ent, struct syscall_trace_enter, \ 323 TRACE_SYSCALL_ENTER); \ 324 IF_ASSIGN(var, ent, struct syscall_trace_exit, \ 325 TRACE_SYSCALL_EXIT); \ 326 __ftrace_bad_type(); \ 327 } while (0) 328 329 /* 330 * An option specific to a tracer. This is a boolean value. 331 * The bit is the bit index that sets its value on the 332 * flags value in struct tracer_flags. 333 */ 334 struct tracer_opt { 335 const char *name; /* Will appear on the trace_options file */ 336 u32 bit; /* Mask assigned in val field in tracer_flags */ 337 }; 338 339 /* 340 * The set of specific options for a tracer. Your tracer 341 * have to set the initial value of the flags val. 342 */ 343 struct tracer_flags { 344 u32 val; 345 struct tracer_opt *opts; 346 }; 347 348 /* Makes more easy to define a tracer opt */ 349 #define TRACER_OPT(s, b) .name = #s, .bit = b 350 351 352 /** 353 * struct tracer - a specific tracer and its callbacks to interact with debugfs 354 * @name: the name chosen to select it on the available_tracers file 355 * @init: called when one switches to this tracer (echo name > current_tracer) 356 * @reset: called when one switches to another tracer 357 * @start: called when tracing is unpaused (echo 1 > tracing_enabled) 358 * @stop: called when tracing is paused (echo 0 > tracing_enabled) 359 * @open: called when the trace file is opened 360 * @pipe_open: called when the trace_pipe file is opened 361 * @wait_pipe: override how the user waits for traces on trace_pipe 362 * @close: called when the trace file is released 363 * @read: override the default read callback on trace_pipe 364 * @splice_read: override the default splice_read callback on trace_pipe 365 * @selftest: selftest to run on boot (see trace_selftest.c) 366 * @print_headers: override the first lines that describe your columns 367 * @print_line: callback that prints a trace 368 * @set_flag: signals one of your private flags changed (trace_options file) 369 * @flags: your private flags 370 */ 371 struct tracer { 372 const char *name; 373 int (*init)(struct trace_array *tr); 374 void (*reset)(struct trace_array *tr); 375 void (*start)(struct trace_array *tr); 376 void (*stop)(struct trace_array *tr); 377 void (*open)(struct trace_iterator *iter); 378 void (*pipe_open)(struct trace_iterator *iter); 379 void (*wait_pipe)(struct trace_iterator *iter); 380 void (*close)(struct trace_iterator *iter); 381 ssize_t (*read)(struct trace_iterator *iter, 382 struct file *filp, char __user *ubuf, 383 size_t cnt, loff_t *ppos); 384 ssize_t (*splice_read)(struct trace_iterator *iter, 385 struct file *filp, 386 loff_t *ppos, 387 struct pipe_inode_info *pipe, 388 size_t len, 389 unsigned int flags); 390 #ifdef CONFIG_FTRACE_STARTUP_TEST 391 int (*selftest)(struct tracer *trace, 392 struct trace_array *tr); 393 #endif 394 void (*print_header)(struct seq_file *m); 395 enum print_line_t (*print_line)(struct trace_iterator *iter); 396 /* If you handled the flag setting, return 0 */ 397 int (*set_flag)(u32 old_flags, u32 bit, int set); 398 struct tracer *next; 399 int print_max; 400 struct tracer_flags *flags; 401 struct tracer_stat *stats; 402 }; 403 404 405 #define TRACE_PIPE_ALL_CPU -1 406 407 int tracer_init(struct tracer *t, struct trace_array *tr); 408 int tracing_is_enabled(void); 409 void trace_wake_up(void); 410 void tracing_reset(struct trace_array *tr, int cpu); 411 void tracing_reset_online_cpus(struct trace_array *tr); 412 void tracing_reset_current(int cpu); 413 void tracing_reset_current_online_cpus(void); 414 int tracing_open_generic(struct inode *inode, struct file *filp); 415 struct dentry *trace_create_file(const char *name, 416 mode_t mode, 417 struct dentry *parent, 418 void *data, 419 const struct file_operations *fops); 420 421 struct dentry *tracing_init_dentry(void); 422 void init_tracer_sysprof_debugfs(struct dentry *d_tracer); 423 424 struct ring_buffer_event; 425 426 struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr, 427 int type, 428 unsigned long len, 429 unsigned long flags, 430 int pc); 431 void trace_buffer_unlock_commit(struct trace_array *tr, 432 struct ring_buffer_event *event, 433 unsigned long flags, int pc); 434 435 struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, 436 struct trace_array_cpu *data); 437 438 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, 439 int *ent_cpu, u64 *ent_ts); 440 441 void tracing_generic_entry_update(struct trace_entry *entry, 442 unsigned long flags, 443 int pc); 444 445 void default_wait_pipe(struct trace_iterator *iter); 446 void poll_wait_pipe(struct trace_iterator *iter); 447 448 void ftrace(struct trace_array *tr, 449 struct trace_array_cpu *data, 450 unsigned long ip, 451 unsigned long parent_ip, 452 unsigned long flags, int pc); 453 void tracing_sched_switch_trace(struct trace_array *tr, 454 struct task_struct *prev, 455 struct task_struct *next, 456 unsigned long flags, int pc); 457 458 void tracing_sched_wakeup_trace(struct trace_array *tr, 459 struct task_struct *wakee, 460 struct task_struct *cur, 461 unsigned long flags, int pc); 462 void trace_special(struct trace_array *tr, 463 struct trace_array_cpu *data, 464 unsigned long arg1, 465 unsigned long arg2, 466 unsigned long arg3, int pc); 467 void trace_function(struct trace_array *tr, 468 unsigned long ip, 469 unsigned long parent_ip, 470 unsigned long flags, int pc); 471 472 void trace_graph_return(struct ftrace_graph_ret *trace); 473 int trace_graph_entry(struct ftrace_graph_ent *trace); 474 475 void tracing_start_cmdline_record(void); 476 void tracing_stop_cmdline_record(void); 477 void tracing_sched_switch_assign_trace(struct trace_array *tr); 478 void tracing_stop_sched_switch_record(void); 479 void tracing_start_sched_switch_record(void); 480 int register_tracer(struct tracer *type); 481 void unregister_tracer(struct tracer *type); 482 483 extern unsigned long nsecs_to_usecs(unsigned long nsecs); 484 485 extern unsigned long tracing_max_latency; 486 extern unsigned long tracing_thresh; 487 488 void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu); 489 void update_max_tr_single(struct trace_array *tr, 490 struct task_struct *tsk, int cpu); 491 492 void __trace_stack(struct trace_array *tr, 493 unsigned long flags, 494 int skip, int pc); 495 496 extern cycle_t ftrace_now(int cpu); 497 498 #ifdef CONFIG_CONTEXT_SWITCH_TRACER 499 typedef void 500 (*tracer_switch_func_t)(void *private, 501 void *__rq, 502 struct task_struct *prev, 503 struct task_struct *next); 504 505 struct tracer_switch_ops { 506 tracer_switch_func_t func; 507 void *private; 508 struct tracer_switch_ops *next; 509 }; 510 #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ 511 512 extern void trace_find_cmdline(int pid, char comm[]); 513 514 #ifdef CONFIG_DYNAMIC_FTRACE 515 extern unsigned long ftrace_update_tot_cnt; 516 #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func 517 extern int DYN_FTRACE_TEST_NAME(void); 518 #endif 519 520 #ifdef CONFIG_FTRACE_STARTUP_TEST 521 extern int trace_selftest_startup_function(struct tracer *trace, 522 struct trace_array *tr); 523 extern int trace_selftest_startup_function_graph(struct tracer *trace, 524 struct trace_array *tr); 525 extern int trace_selftest_startup_irqsoff(struct tracer *trace, 526 struct trace_array *tr); 527 extern int trace_selftest_startup_preemptoff(struct tracer *trace, 528 struct trace_array *tr); 529 extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace, 530 struct trace_array *tr); 531 extern int trace_selftest_startup_wakeup(struct tracer *trace, 532 struct trace_array *tr); 533 extern int trace_selftest_startup_nop(struct tracer *trace, 534 struct trace_array *tr); 535 extern int trace_selftest_startup_sched_switch(struct tracer *trace, 536 struct trace_array *tr); 537 extern int trace_selftest_startup_sysprof(struct tracer *trace, 538 struct trace_array *tr); 539 extern int trace_selftest_startup_branch(struct tracer *trace, 540 struct trace_array *tr); 541 extern int trace_selftest_startup_hw_branches(struct tracer *trace, 542 struct trace_array *tr); 543 #endif /* CONFIG_FTRACE_STARTUP_TEST */ 544 545 extern void *head_page(struct trace_array_cpu *data); 546 extern unsigned long long ns2usecs(cycle_t nsec); 547 extern int 548 trace_vbprintk(unsigned long ip, const char *fmt, va_list args); 549 extern int 550 trace_vprintk(unsigned long ip, const char *fmt, va_list args); 551 552 extern unsigned long trace_flags; 553 554 /* Standard output formatting function used for function return traces */ 555 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 556 extern enum print_line_t print_graph_function(struct trace_iterator *iter); 557 extern enum print_line_t 558 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s); 559 560 #ifdef CONFIG_DYNAMIC_FTRACE 561 /* TODO: make this variable */ 562 #define FTRACE_GRAPH_MAX_FUNCS 32 563 extern int ftrace_graph_count; 564 extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS]; 565 566 static inline int ftrace_graph_addr(unsigned long addr) 567 { 568 int i; 569 570 if (!ftrace_graph_count || test_tsk_trace_graph(current)) 571 return 1; 572 573 for (i = 0; i < ftrace_graph_count; i++) { 574 if (addr == ftrace_graph_funcs[i]) 575 return 1; 576 } 577 578 return 0; 579 } 580 #else 581 static inline int ftrace_trace_addr(unsigned long addr) 582 { 583 return 1; 584 } 585 static inline int ftrace_graph_addr(unsigned long addr) 586 { 587 return 1; 588 } 589 #endif /* CONFIG_DYNAMIC_FTRACE */ 590 #else /* CONFIG_FUNCTION_GRAPH_TRACER */ 591 static inline enum print_line_t 592 print_graph_function(struct trace_iterator *iter) 593 { 594 return TRACE_TYPE_UNHANDLED; 595 } 596 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 597 598 extern struct pid *ftrace_pid_trace; 599 600 static inline int ftrace_trace_task(struct task_struct *task) 601 { 602 if (!ftrace_pid_trace) 603 return 1; 604 605 return test_tsk_trace_trace(task); 606 } 607 608 /* 609 * trace_iterator_flags is an enumeration that defines bit 610 * positions into trace_flags that controls the output. 611 * 612 * NOTE: These bits must match the trace_options array in 613 * trace.c. 614 */ 615 enum trace_iterator_flags { 616 TRACE_ITER_PRINT_PARENT = 0x01, 617 TRACE_ITER_SYM_OFFSET = 0x02, 618 TRACE_ITER_SYM_ADDR = 0x04, 619 TRACE_ITER_VERBOSE = 0x08, 620 TRACE_ITER_RAW = 0x10, 621 TRACE_ITER_HEX = 0x20, 622 TRACE_ITER_BIN = 0x40, 623 TRACE_ITER_BLOCK = 0x80, 624 TRACE_ITER_STACKTRACE = 0x100, 625 TRACE_ITER_SCHED_TREE = 0x200, 626 TRACE_ITER_PRINTK = 0x400, 627 TRACE_ITER_PREEMPTONLY = 0x800, 628 TRACE_ITER_BRANCH = 0x1000, 629 TRACE_ITER_ANNOTATE = 0x2000, 630 TRACE_ITER_USERSTACKTRACE = 0x4000, 631 TRACE_ITER_SYM_USEROBJ = 0x8000, 632 TRACE_ITER_PRINTK_MSGONLY = 0x10000, 633 TRACE_ITER_CONTEXT_INFO = 0x20000, /* Print pid/cpu/time */ 634 TRACE_ITER_LATENCY_FMT = 0x40000, 635 TRACE_ITER_GLOBAL_CLK = 0x80000, 636 TRACE_ITER_SLEEP_TIME = 0x100000, 637 TRACE_ITER_GRAPH_TIME = 0x200000, 638 }; 639 640 /* 641 * TRACE_ITER_SYM_MASK masks the options in trace_flags that 642 * control the output of kernel symbols. 643 */ 644 #define TRACE_ITER_SYM_MASK \ 645 (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR) 646 647 extern struct tracer nop_trace; 648 649 /** 650 * ftrace_preempt_disable - disable preemption scheduler safe 651 * 652 * When tracing can happen inside the scheduler, there exists 653 * cases that the tracing might happen before the need_resched 654 * flag is checked. If this happens and the tracer calls 655 * preempt_enable (after a disable), a schedule might take place 656 * causing an infinite recursion. 657 * 658 * To prevent this, we read the need_resched flag before 659 * disabling preemption. When we want to enable preemption we 660 * check the flag, if it is set, then we call preempt_enable_no_resched. 661 * Otherwise, we call preempt_enable. 662 * 663 * The rational for doing the above is that if need_resched is set 664 * and we have yet to reschedule, we are either in an atomic location 665 * (where we do not need to check for scheduling) or we are inside 666 * the scheduler and do not want to resched. 667 */ 668 static inline int ftrace_preempt_disable(void) 669 { 670 int resched; 671 672 resched = need_resched(); 673 preempt_disable_notrace(); 674 675 return resched; 676 } 677 678 /** 679 * ftrace_preempt_enable - enable preemption scheduler safe 680 * @resched: the return value from ftrace_preempt_disable 681 * 682 * This is a scheduler safe way to enable preemption and not miss 683 * any preemption checks. The disabled saved the state of preemption. 684 * If resched is set, then we are either inside an atomic or 685 * are inside the scheduler (we would have already scheduled 686 * otherwise). In this case, we do not want to call normal 687 * preempt_enable, but preempt_enable_no_resched instead. 688 */ 689 static inline void ftrace_preempt_enable(int resched) 690 { 691 if (resched) 692 preempt_enable_no_resched_notrace(); 693 else 694 preempt_enable_notrace(); 695 } 696 697 #ifdef CONFIG_BRANCH_TRACER 698 extern int enable_branch_tracing(struct trace_array *tr); 699 extern void disable_branch_tracing(void); 700 static inline int trace_branch_enable(struct trace_array *tr) 701 { 702 if (trace_flags & TRACE_ITER_BRANCH) 703 return enable_branch_tracing(tr); 704 return 0; 705 } 706 static inline void trace_branch_disable(void) 707 { 708 /* due to races, always disable */ 709 disable_branch_tracing(); 710 } 711 #else 712 static inline int trace_branch_enable(struct trace_array *tr) 713 { 714 return 0; 715 } 716 static inline void trace_branch_disable(void) 717 { 718 } 719 #endif /* CONFIG_BRANCH_TRACER */ 720 721 /* set ring buffers to default size if not already done so */ 722 int tracing_update_buffers(void); 723 724 /* trace event type bit fields, not numeric */ 725 enum { 726 TRACE_EVENT_TYPE_PRINTF = 1, 727 TRACE_EVENT_TYPE_RAW = 2, 728 }; 729 730 struct ftrace_event_field { 731 struct list_head link; 732 char *name; 733 char *type; 734 int offset; 735 int size; 736 int is_signed; 737 }; 738 739 struct event_filter { 740 int n_preds; 741 struct filter_pred **preds; 742 char *filter_string; 743 }; 744 745 struct event_subsystem { 746 struct list_head list; 747 const char *name; 748 struct dentry *entry; 749 void *filter; 750 }; 751 752 struct filter_pred; 753 754 typedef int (*filter_pred_fn_t) (struct filter_pred *pred, void *event, 755 int val1, int val2); 756 757 struct filter_pred { 758 filter_pred_fn_t fn; 759 u64 val; 760 char str_val[MAX_FILTER_STR_VAL]; 761 int str_len; 762 char *field_name; 763 int offset; 764 int not; 765 int op; 766 int pop_n; 767 }; 768 769 extern void print_event_filter(struct ftrace_event_call *call, 770 struct trace_seq *s); 771 extern int apply_event_filter(struct ftrace_event_call *call, 772 char *filter_string); 773 extern int apply_subsystem_event_filter(struct event_subsystem *system, 774 char *filter_string); 775 extern void print_subsystem_event_filter(struct event_subsystem *system, 776 struct trace_seq *s); 777 778 static inline int 779 filter_check_discard(struct ftrace_event_call *call, void *rec, 780 struct ring_buffer *buffer, 781 struct ring_buffer_event *event) 782 { 783 if (unlikely(call->filter_active) && !filter_match_preds(call, rec)) { 784 ring_buffer_discard_commit(buffer, event); 785 return 1; 786 } 787 788 return 0; 789 } 790 791 #define DEFINE_COMPARISON_PRED(type) \ 792 static int filter_pred_##type(struct filter_pred *pred, void *event, \ 793 int val1, int val2) \ 794 { \ 795 type *addr = (type *)(event + pred->offset); \ 796 type val = (type)pred->val; \ 797 int match = 0; \ 798 \ 799 switch (pred->op) { \ 800 case OP_LT: \ 801 match = (*addr < val); \ 802 break; \ 803 case OP_LE: \ 804 match = (*addr <= val); \ 805 break; \ 806 case OP_GT: \ 807 match = (*addr > val); \ 808 break; \ 809 case OP_GE: \ 810 match = (*addr >= val); \ 811 break; \ 812 default: \ 813 break; \ 814 } \ 815 \ 816 return match; \ 817 } 818 819 #define DEFINE_EQUALITY_PRED(size) \ 820 static int filter_pred_##size(struct filter_pred *pred, void *event, \ 821 int val1, int val2) \ 822 { \ 823 u##size *addr = (u##size *)(event + pred->offset); \ 824 u##size val = (u##size)pred->val; \ 825 int match; \ 826 \ 827 match = (val == *addr) ^ pred->not; \ 828 \ 829 return match; \ 830 } 831 832 extern struct mutex event_mutex; 833 extern struct list_head ftrace_events; 834 835 extern const char *__start___trace_bprintk_fmt[]; 836 extern const char *__stop___trace_bprintk_fmt[]; 837 838 #undef TRACE_EVENT_FORMAT 839 #define TRACE_EVENT_FORMAT(call, proto, args, fmt, tstruct, tpfmt) \ 840 extern struct ftrace_event_call event_##call; 841 #undef TRACE_EVENT_FORMAT_NOFILTER 842 #define TRACE_EVENT_FORMAT_NOFILTER(call, proto, args, fmt, tstruct, tpfmt) 843 #include "trace_event_types.h" 844 845 #endif /* _LINUX_KERNEL_TRACE_H */ 846