1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Ftrace header. For implementation details beyond the random comments 4 * scattered below, see: Documentation/trace/ftrace-design.rst 5 */ 6 7 #ifndef _LINUX_FTRACE_H 8 #define _LINUX_FTRACE_H 9 10 #include <linux/trace_recursion.h> 11 #include <linux/trace_clock.h> 12 #include <linux/kallsyms.h> 13 #include <linux/linkage.h> 14 #include <linux/bitops.h> 15 #include <linux/ptrace.h> 16 #include <linux/ktime.h> 17 #include <linux/sched.h> 18 #include <linux/types.h> 19 #include <linux/init.h> 20 #include <linux/fs.h> 21 22 #include <asm/ftrace.h> 23 24 /* 25 * If the arch supports passing the variable contents of 26 * function_trace_op as the third parameter back from the 27 * mcount call, then the arch should define this as 1. 28 */ 29 #ifndef ARCH_SUPPORTS_FTRACE_OPS 30 #define ARCH_SUPPORTS_FTRACE_OPS 0 31 #endif 32 33 /* 34 * If the arch's mcount caller does not support all of ftrace's 35 * features, then it must call an indirect function that 36 * does. Or at least does enough to prevent any unwelcome side effects. 37 */ 38 #if !ARCH_SUPPORTS_FTRACE_OPS 39 # define FTRACE_FORCE_LIST_FUNC 1 40 #else 41 # define FTRACE_FORCE_LIST_FUNC 0 42 #endif 43 44 /* Main tracing buffer and events set up */ 45 #ifdef CONFIG_TRACING 46 void trace_init(void); 47 void early_trace_init(void); 48 #else 49 static inline void trace_init(void) { } 50 static inline void early_trace_init(void) { } 51 #endif 52 53 struct module; 54 struct ftrace_hash; 55 struct ftrace_direct_func; 56 57 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \ 58 defined(CONFIG_DYNAMIC_FTRACE) 59 const char * 60 ftrace_mod_address_lookup(unsigned long addr, unsigned long *size, 61 unsigned long *off, char **modname, char *sym); 62 #else 63 static inline const char * 64 ftrace_mod_address_lookup(unsigned long addr, unsigned long *size, 65 unsigned long *off, char **modname, char *sym) 66 { 67 return NULL; 68 } 69 #endif 70 71 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE) 72 int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, 73 char *type, char *name, 74 char *module_name, int *exported); 75 #else 76 static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, 77 char *type, char *name, 78 char *module_name, int *exported) 79 { 80 return -1; 81 } 82 #endif 83 84 #ifdef CONFIG_FUNCTION_TRACER 85 86 extern int ftrace_enabled; 87 extern int 88 ftrace_enable_sysctl(struct ctl_table *table, int write, 89 void *buffer, size_t *lenp, loff_t *ppos); 90 91 struct ftrace_ops; 92 93 #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS 94 95 struct ftrace_regs { 96 struct pt_regs regs; 97 }; 98 #define arch_ftrace_get_regs(fregs) (&(fregs)->regs) 99 100 /* 101 * ftrace_instruction_pointer_set() is to be defined by the architecture 102 * if to allow setting of the instruction pointer from the ftrace_regs 103 * when HAVE_DYNAMIC_FTRACE_WITH_ARGS is set and it supports 104 * live kernel patching. 105 */ 106 #define ftrace_instruction_pointer_set(fregs, ip) do { } while (0) 107 #endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */ 108 109 static __always_inline struct pt_regs *ftrace_get_regs(struct ftrace_regs *fregs) 110 { 111 if (!fregs) 112 return NULL; 113 114 return arch_ftrace_get_regs(fregs); 115 } 116 117 typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, 118 struct ftrace_ops *op, struct ftrace_regs *fregs); 119 120 ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); 121 122 /* 123 * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are 124 * set in the flags member. 125 * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION, STUB and 126 * IPMODIFY are a kind of attribute flags which can be set only before 127 * registering the ftrace_ops, and can not be modified while registered. 128 * Changing those attribute flags after registering ftrace_ops will 129 * cause unexpected results. 130 * 131 * ENABLED - set/unset when ftrace_ops is registered/unregistered 132 * DYNAMIC - set when ftrace_ops is registered to denote dynamically 133 * allocated ftrace_ops which need special care 134 * SAVE_REGS - The ftrace_ops wants regs saved at each function called 135 * and passed to the callback. If this flag is set, but the 136 * architecture does not support passing regs 137 * (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the 138 * ftrace_ops will fail to register, unless the next flag 139 * is set. 140 * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the 141 * handler can handle an arch that does not save regs 142 * (the handler tests if regs == NULL), then it can set 143 * this flag instead. It will not fail registering the ftrace_ops 144 * but, the regs field will be NULL if the arch does not support 145 * passing regs to the handler. 146 * Note, if this flag is set, the SAVE_REGS flag will automatically 147 * get set upon registering the ftrace_ops, if the arch supports it. 148 * RECURSION - The ftrace_ops can set this to tell the ftrace infrastructure 149 * that the call back needs recursion protection. If it does 150 * not set this, then the ftrace infrastructure will assume 151 * that the callback can handle recursion on its own. 152 * STUB - The ftrace_ops is just a place holder. 153 * INITIALIZED - The ftrace_ops has already been initialized (first use time 154 * register_ftrace_function() is called, it will initialized the ops) 155 * DELETED - The ops are being deleted, do not let them be registered again. 156 * ADDING - The ops is in the process of being added. 157 * REMOVING - The ops is in the process of being removed. 158 * MODIFYING - The ops is in the process of changing its filter functions. 159 * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code. 160 * The arch specific code sets this flag when it allocated a 161 * trampoline. This lets the arch know that it can update the 162 * trampoline in case the callback function changes. 163 * The ftrace_ops trampoline can be set by the ftrace users, and 164 * in such cases the arch must not modify it. Only the arch ftrace 165 * core code should set this flag. 166 * IPMODIFY - The ops can modify the IP register. This can only be set with 167 * SAVE_REGS. If another ops with this flag set is already registered 168 * for any of the functions that this ops will be registered for, then 169 * this ops will fail to register or set_filter_ip. 170 * PID - Is affected by set_ftrace_pid (allows filtering on those pids) 171 * RCU - Set when the ops can only be called when RCU is watching. 172 * TRACE_ARRAY - The ops->private points to a trace_array descriptor. 173 * PERMANENT - Set when the ops is permanent and should not be affected by 174 * ftrace_enabled. 175 * DIRECT - Used by the direct ftrace_ops helper for direct functions 176 * (internal ftrace only, should not be used by others) 177 */ 178 enum { 179 FTRACE_OPS_FL_ENABLED = BIT(0), 180 FTRACE_OPS_FL_DYNAMIC = BIT(1), 181 FTRACE_OPS_FL_SAVE_REGS = BIT(2), 182 FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = BIT(3), 183 FTRACE_OPS_FL_RECURSION = BIT(4), 184 FTRACE_OPS_FL_STUB = BIT(5), 185 FTRACE_OPS_FL_INITIALIZED = BIT(6), 186 FTRACE_OPS_FL_DELETED = BIT(7), 187 FTRACE_OPS_FL_ADDING = BIT(8), 188 FTRACE_OPS_FL_REMOVING = BIT(9), 189 FTRACE_OPS_FL_MODIFYING = BIT(10), 190 FTRACE_OPS_FL_ALLOC_TRAMP = BIT(11), 191 FTRACE_OPS_FL_IPMODIFY = BIT(12), 192 FTRACE_OPS_FL_PID = BIT(13), 193 FTRACE_OPS_FL_RCU = BIT(14), 194 FTRACE_OPS_FL_TRACE_ARRAY = BIT(15), 195 FTRACE_OPS_FL_PERMANENT = BIT(16), 196 FTRACE_OPS_FL_DIRECT = BIT(17), 197 }; 198 199 #ifdef CONFIG_DYNAMIC_FTRACE 200 /* The hash used to know what functions callbacks trace */ 201 struct ftrace_ops_hash { 202 struct ftrace_hash __rcu *notrace_hash; 203 struct ftrace_hash __rcu *filter_hash; 204 struct mutex regex_lock; 205 }; 206 207 void ftrace_free_init_mem(void); 208 void ftrace_free_mem(struct module *mod, void *start, void *end); 209 #else 210 static inline void ftrace_free_init_mem(void) { } 211 static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { } 212 #endif 213 214 /* 215 * Note, ftrace_ops can be referenced outside of RCU protection, unless 216 * the RCU flag is set. If ftrace_ops is allocated and not part of kernel 217 * core data, the unregistering of it will perform a scheduling on all CPUs 218 * to make sure that there are no more users. Depending on the load of the 219 * system that may take a bit of time. 220 * 221 * Any private data added must also take care not to be freed and if private 222 * data is added to a ftrace_ops that is in core code, the user of the 223 * ftrace_ops must perform a schedule_on_each_cpu() before freeing it. 224 */ 225 struct ftrace_ops { 226 ftrace_func_t func; 227 struct ftrace_ops __rcu *next; 228 unsigned long flags; 229 void *private; 230 ftrace_func_t saved_func; 231 #ifdef CONFIG_DYNAMIC_FTRACE 232 struct ftrace_ops_hash local_hash; 233 struct ftrace_ops_hash *func_hash; 234 struct ftrace_ops_hash old_hash; 235 unsigned long trampoline; 236 unsigned long trampoline_size; 237 struct list_head list; 238 #endif 239 }; 240 241 extern struct ftrace_ops __rcu *ftrace_ops_list; 242 extern struct ftrace_ops ftrace_list_end; 243 244 /* 245 * Traverse the ftrace_ops_list, invoking all entries. The reason that we 246 * can use rcu_dereference_raw_check() is that elements removed from this list 247 * are simply leaked, so there is no need to interact with a grace-period 248 * mechanism. The rcu_dereference_raw_check() calls are needed to handle 249 * concurrent insertions into the ftrace_ops_list. 250 * 251 * Silly Alpha and silly pointer-speculation compiler optimizations! 252 */ 253 #define do_for_each_ftrace_op(op, list) \ 254 op = rcu_dereference_raw_check(list); \ 255 do 256 257 /* 258 * Optimized for just a single item in the list (as that is the normal case). 259 */ 260 #define while_for_each_ftrace_op(op) \ 261 while (likely(op = rcu_dereference_raw_check((op)->next)) && \ 262 unlikely((op) != &ftrace_list_end)) 263 264 /* 265 * Type of the current tracing. 266 */ 267 enum ftrace_tracing_type_t { 268 FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */ 269 FTRACE_TYPE_RETURN, /* Hook the return of the function */ 270 }; 271 272 /* Current tracing type, default is FTRACE_TYPE_ENTER */ 273 extern enum ftrace_tracing_type_t ftrace_tracing_type; 274 275 /* 276 * The ftrace_ops must be a static and should also 277 * be read_mostly. These functions do modify read_mostly variables 278 * so use them sparely. Never free an ftrace_op or modify the 279 * next pointer after it has been registered. Even after unregistering 280 * it, the next pointer may still be used internally. 281 */ 282 int register_ftrace_function(struct ftrace_ops *ops); 283 int unregister_ftrace_function(struct ftrace_ops *ops); 284 285 extern void ftrace_stub(unsigned long a0, unsigned long a1, 286 struct ftrace_ops *op, struct ftrace_regs *fregs); 287 288 #else /* !CONFIG_FUNCTION_TRACER */ 289 /* 290 * (un)register_ftrace_function must be a macro since the ops parameter 291 * must not be evaluated. 292 */ 293 #define register_ftrace_function(ops) ({ 0; }) 294 #define unregister_ftrace_function(ops) ({ 0; }) 295 static inline void ftrace_kill(void) { } 296 static inline void ftrace_free_init_mem(void) { } 297 static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { } 298 #endif /* CONFIG_FUNCTION_TRACER */ 299 300 struct ftrace_func_entry { 301 struct hlist_node hlist; 302 unsigned long ip; 303 unsigned long direct; /* for direct lookup only */ 304 }; 305 306 struct dyn_ftrace; 307 308 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 309 extern int ftrace_direct_func_count; 310 int register_ftrace_direct(unsigned long ip, unsigned long addr); 311 int unregister_ftrace_direct(unsigned long ip, unsigned long addr); 312 int modify_ftrace_direct(unsigned long ip, unsigned long old_addr, unsigned long new_addr); 313 struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr); 314 int ftrace_modify_direct_caller(struct ftrace_func_entry *entry, 315 struct dyn_ftrace *rec, 316 unsigned long old_addr, 317 unsigned long new_addr); 318 unsigned long ftrace_find_rec_direct(unsigned long ip); 319 #else 320 # define ftrace_direct_func_count 0 321 static inline int register_ftrace_direct(unsigned long ip, unsigned long addr) 322 { 323 return -ENOTSUPP; 324 } 325 static inline int unregister_ftrace_direct(unsigned long ip, unsigned long addr) 326 { 327 return -ENOTSUPP; 328 } 329 static inline int modify_ftrace_direct(unsigned long ip, 330 unsigned long old_addr, unsigned long new_addr) 331 { 332 return -ENOTSUPP; 333 } 334 static inline struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr) 335 { 336 return NULL; 337 } 338 static inline int ftrace_modify_direct_caller(struct ftrace_func_entry *entry, 339 struct dyn_ftrace *rec, 340 unsigned long old_addr, 341 unsigned long new_addr) 342 { 343 return -ENODEV; 344 } 345 static inline unsigned long ftrace_find_rec_direct(unsigned long ip) 346 { 347 return 0; 348 } 349 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ 350 351 #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 352 /* 353 * This must be implemented by the architecture. 354 * It is the way the ftrace direct_ops helper, when called 355 * via ftrace (because there's other callbacks besides the 356 * direct call), can inform the architecture's trampoline that this 357 * routine has a direct caller, and what the caller is. 358 * 359 * For example, in x86, it returns the direct caller 360 * callback function via the regs->orig_ax parameter. 361 * Then in the ftrace trampoline, if this is set, it makes 362 * the return from the trampoline jump to the direct caller 363 * instead of going back to the function it just traced. 364 */ 365 static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs, 366 unsigned long addr) { } 367 #endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ 368 369 #ifdef CONFIG_STACK_TRACER 370 371 extern int stack_tracer_enabled; 372 373 int stack_trace_sysctl(struct ctl_table *table, int write, void *buffer, 374 size_t *lenp, loff_t *ppos); 375 376 /* DO NOT MODIFY THIS VARIABLE DIRECTLY! */ 377 DECLARE_PER_CPU(int, disable_stack_tracer); 378 379 /** 380 * stack_tracer_disable - temporarily disable the stack tracer 381 * 382 * There's a few locations (namely in RCU) where stack tracing 383 * cannot be executed. This function is used to disable stack 384 * tracing during those critical sections. 385 * 386 * This function must be called with preemption or interrupts 387 * disabled and stack_tracer_enable() must be called shortly after 388 * while preemption or interrupts are still disabled. 389 */ 390 static inline void stack_tracer_disable(void) 391 { 392 /* Preemption or interrupts must be disabled */ 393 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) 394 WARN_ON_ONCE(!preempt_count() || !irqs_disabled()); 395 this_cpu_inc(disable_stack_tracer); 396 } 397 398 /** 399 * stack_tracer_enable - re-enable the stack tracer 400 * 401 * After stack_tracer_disable() is called, stack_tracer_enable() 402 * must be called shortly afterward. 403 */ 404 static inline void stack_tracer_enable(void) 405 { 406 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) 407 WARN_ON_ONCE(!preempt_count() || !irqs_disabled()); 408 this_cpu_dec(disable_stack_tracer); 409 } 410 #else 411 static inline void stack_tracer_disable(void) { } 412 static inline void stack_tracer_enable(void) { } 413 #endif 414 415 #ifdef CONFIG_DYNAMIC_FTRACE 416 417 int ftrace_arch_code_modify_prepare(void); 418 int ftrace_arch_code_modify_post_process(void); 419 420 enum ftrace_bug_type { 421 FTRACE_BUG_UNKNOWN, 422 FTRACE_BUG_INIT, 423 FTRACE_BUG_NOP, 424 FTRACE_BUG_CALL, 425 FTRACE_BUG_UPDATE, 426 }; 427 extern enum ftrace_bug_type ftrace_bug_type; 428 429 /* 430 * Archs can set this to point to a variable that holds the value that was 431 * expected at the call site before calling ftrace_bug(). 432 */ 433 extern const void *ftrace_expected; 434 435 void ftrace_bug(int err, struct dyn_ftrace *rec); 436 437 struct seq_file; 438 439 extern int ftrace_text_reserved(const void *start, const void *end); 440 441 struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr); 442 443 bool is_ftrace_trampoline(unsigned long addr); 444 445 /* 446 * The dyn_ftrace record's flags field is split into two parts. 447 * the first part which is '0-FTRACE_REF_MAX' is a counter of 448 * the number of callbacks that have registered the function that 449 * the dyn_ftrace descriptor represents. 450 * 451 * The second part is a mask: 452 * ENABLED - the function is being traced 453 * REGS - the record wants the function to save regs 454 * REGS_EN - the function is set up to save regs. 455 * IPMODIFY - the record allows for the IP address to be changed. 456 * DISABLED - the record is not ready to be touched yet 457 * DIRECT - there is a direct function to call 458 * 459 * When a new ftrace_ops is registered and wants a function to save 460 * pt_regs, the rec->flags REGS is set. When the function has been 461 * set up to save regs, the REG_EN flag is set. Once a function 462 * starts saving regs it will do so until all ftrace_ops are removed 463 * from tracing that function. 464 */ 465 enum { 466 FTRACE_FL_ENABLED = (1UL << 31), 467 FTRACE_FL_REGS = (1UL << 30), 468 FTRACE_FL_REGS_EN = (1UL << 29), 469 FTRACE_FL_TRAMP = (1UL << 28), 470 FTRACE_FL_TRAMP_EN = (1UL << 27), 471 FTRACE_FL_IPMODIFY = (1UL << 26), 472 FTRACE_FL_DISABLED = (1UL << 25), 473 FTRACE_FL_DIRECT = (1UL << 24), 474 FTRACE_FL_DIRECT_EN = (1UL << 23), 475 }; 476 477 #define FTRACE_REF_MAX_SHIFT 23 478 #define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1) 479 480 #define ftrace_rec_count(rec) ((rec)->flags & FTRACE_REF_MAX) 481 482 struct dyn_ftrace { 483 unsigned long ip; /* address of mcount call-site */ 484 unsigned long flags; 485 struct dyn_arch_ftrace arch; 486 }; 487 488 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, 489 int remove, int reset); 490 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, 491 int len, int reset); 492 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, 493 int len, int reset); 494 void ftrace_set_global_filter(unsigned char *buf, int len, int reset); 495 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset); 496 void ftrace_free_filter(struct ftrace_ops *ops); 497 void ftrace_ops_set_global_filter(struct ftrace_ops *ops); 498 499 enum { 500 FTRACE_UPDATE_CALLS = (1 << 0), 501 FTRACE_DISABLE_CALLS = (1 << 1), 502 FTRACE_UPDATE_TRACE_FUNC = (1 << 2), 503 FTRACE_START_FUNC_RET = (1 << 3), 504 FTRACE_STOP_FUNC_RET = (1 << 4), 505 FTRACE_MAY_SLEEP = (1 << 5), 506 }; 507 508 /* 509 * The FTRACE_UPDATE_* enum is used to pass information back 510 * from the ftrace_update_record() and ftrace_test_record() 511 * functions. These are called by the code update routines 512 * to find out what is to be done for a given function. 513 * 514 * IGNORE - The function is already what we want it to be 515 * MAKE_CALL - Start tracing the function 516 * MODIFY_CALL - Stop saving regs for the function 517 * MAKE_NOP - Stop tracing the function 518 */ 519 enum { 520 FTRACE_UPDATE_IGNORE, 521 FTRACE_UPDATE_MAKE_CALL, 522 FTRACE_UPDATE_MODIFY_CALL, 523 FTRACE_UPDATE_MAKE_NOP, 524 }; 525 526 enum { 527 FTRACE_ITER_FILTER = (1 << 0), 528 FTRACE_ITER_NOTRACE = (1 << 1), 529 FTRACE_ITER_PRINTALL = (1 << 2), 530 FTRACE_ITER_DO_PROBES = (1 << 3), 531 FTRACE_ITER_PROBE = (1 << 4), 532 FTRACE_ITER_MOD = (1 << 5), 533 FTRACE_ITER_ENABLED = (1 << 6), 534 }; 535 536 void arch_ftrace_update_code(int command); 537 void arch_ftrace_update_trampoline(struct ftrace_ops *ops); 538 void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec); 539 void arch_ftrace_trampoline_free(struct ftrace_ops *ops); 540 541 struct ftrace_rec_iter; 542 543 struct ftrace_rec_iter *ftrace_rec_iter_start(void); 544 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter); 545 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter); 546 547 #define for_ftrace_rec_iter(iter) \ 548 for (iter = ftrace_rec_iter_start(); \ 549 iter; \ 550 iter = ftrace_rec_iter_next(iter)) 551 552 553 int ftrace_update_record(struct dyn_ftrace *rec, bool enable); 554 int ftrace_test_record(struct dyn_ftrace *rec, bool enable); 555 void ftrace_run_stop_machine(int command); 556 unsigned long ftrace_location(unsigned long ip); 557 unsigned long ftrace_location_range(unsigned long start, unsigned long end); 558 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec); 559 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec); 560 561 extern ftrace_func_t ftrace_trace_function; 562 563 int ftrace_regex_open(struct ftrace_ops *ops, int flag, 564 struct inode *inode, struct file *file); 565 ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf, 566 size_t cnt, loff_t *ppos); 567 ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf, 568 size_t cnt, loff_t *ppos); 569 int ftrace_regex_release(struct inode *inode, struct file *file); 570 571 void __init 572 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable); 573 574 /* defined in arch */ 575 extern int ftrace_ip_converted(unsigned long ip); 576 extern int ftrace_dyn_arch_init(void); 577 extern void ftrace_replace_code(int enable); 578 extern int ftrace_update_ftrace_func(ftrace_func_t func); 579 extern void ftrace_caller(void); 580 extern void ftrace_regs_caller(void); 581 extern void ftrace_call(void); 582 extern void ftrace_regs_call(void); 583 extern void mcount_call(void); 584 585 void ftrace_modify_all_code(int command); 586 587 #ifndef FTRACE_ADDR 588 #define FTRACE_ADDR ((unsigned long)ftrace_caller) 589 #endif 590 591 #ifndef FTRACE_GRAPH_ADDR 592 #define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller) 593 #endif 594 595 #ifndef FTRACE_REGS_ADDR 596 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 597 # define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller) 598 #else 599 # define FTRACE_REGS_ADDR FTRACE_ADDR 600 #endif 601 #endif 602 603 /* 604 * If an arch would like functions that are only traced 605 * by the function graph tracer to jump directly to its own 606 * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR 607 * to be that address to jump to. 608 */ 609 #ifndef FTRACE_GRAPH_TRAMP_ADDR 610 #define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0) 611 #endif 612 613 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 614 extern void ftrace_graph_caller(void); 615 extern int ftrace_enable_ftrace_graph_caller(void); 616 extern int ftrace_disable_ftrace_graph_caller(void); 617 #else 618 static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; } 619 static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; } 620 #endif 621 622 /** 623 * ftrace_make_nop - convert code into nop 624 * @mod: module structure if called by module load initialization 625 * @rec: the call site record (e.g. mcount/fentry) 626 * @addr: the address that the call site should be calling 627 * 628 * This is a very sensitive operation and great care needs 629 * to be taken by the arch. The operation should carefully 630 * read the location, check to see if what is read is indeed 631 * what we expect it to be, and then on success of the compare, 632 * it should write to the location. 633 * 634 * The code segment at @rec->ip should be a caller to @addr 635 * 636 * Return must be: 637 * 0 on success 638 * -EFAULT on error reading the location 639 * -EINVAL on a failed compare of the contents 640 * -EPERM on error writing to the location 641 * Any other value will be considered a failure. 642 */ 643 extern int ftrace_make_nop(struct module *mod, 644 struct dyn_ftrace *rec, unsigned long addr); 645 646 /** 647 * ftrace_need_init_nop - return whether nop call sites should be initialized 648 * 649 * Normally the compiler's -mnop-mcount generates suitable nops, so we don't 650 * need to call ftrace_init_nop() if the code is built with that flag. 651 * Architectures where this is not always the case may define their own 652 * condition. 653 * 654 * Return must be: 655 * 0 if ftrace_init_nop() should be called 656 * Nonzero if ftrace_init_nop() should not be called 657 */ 658 659 #ifndef ftrace_need_init_nop 660 #define ftrace_need_init_nop() (!__is_defined(CC_USING_NOP_MCOUNT)) 661 #endif 662 663 /** 664 * ftrace_init_nop - initialize a nop call site 665 * @mod: module structure if called by module load initialization 666 * @rec: the call site record (e.g. mcount/fentry) 667 * 668 * This is a very sensitive operation and great care needs 669 * to be taken by the arch. The operation should carefully 670 * read the location, check to see if what is read is indeed 671 * what we expect it to be, and then on success of the compare, 672 * it should write to the location. 673 * 674 * The code segment at @rec->ip should contain the contents created by 675 * the compiler 676 * 677 * Return must be: 678 * 0 on success 679 * -EFAULT on error reading the location 680 * -EINVAL on a failed compare of the contents 681 * -EPERM on error writing to the location 682 * Any other value will be considered a failure. 683 */ 684 #ifndef ftrace_init_nop 685 static inline int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) 686 { 687 return ftrace_make_nop(mod, rec, MCOUNT_ADDR); 688 } 689 #endif 690 691 /** 692 * ftrace_make_call - convert a nop call site into a call to addr 693 * @rec: the call site record (e.g. mcount/fentry) 694 * @addr: the address that the call site should call 695 * 696 * This is a very sensitive operation and great care needs 697 * to be taken by the arch. The operation should carefully 698 * read the location, check to see if what is read is indeed 699 * what we expect it to be, and then on success of the compare, 700 * it should write to the location. 701 * 702 * The code segment at @rec->ip should be a nop 703 * 704 * Return must be: 705 * 0 on success 706 * -EFAULT on error reading the location 707 * -EINVAL on a failed compare of the contents 708 * -EPERM on error writing to the location 709 * Any other value will be considered a failure. 710 */ 711 extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); 712 713 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 714 /** 715 * ftrace_modify_call - convert from one addr to another (no nop) 716 * @rec: the call site record (e.g. mcount/fentry) 717 * @old_addr: the address expected to be currently called to 718 * @addr: the address to change to 719 * 720 * This is a very sensitive operation and great care needs 721 * to be taken by the arch. The operation should carefully 722 * read the location, check to see if what is read is indeed 723 * what we expect it to be, and then on success of the compare, 724 * it should write to the location. 725 * 726 * The code segment at @rec->ip should be a caller to @old_addr 727 * 728 * Return must be: 729 * 0 on success 730 * -EFAULT on error reading the location 731 * -EINVAL on a failed compare of the contents 732 * -EPERM on error writing to the location 733 * Any other value will be considered a failure. 734 */ 735 extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 736 unsigned long addr); 737 #else 738 /* Should never be called */ 739 static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 740 unsigned long addr) 741 { 742 return -EINVAL; 743 } 744 #endif 745 746 /* May be defined in arch */ 747 extern int ftrace_arch_read_dyn_info(char *buf, int size); 748 749 extern int skip_trace(unsigned long ip); 750 extern void ftrace_module_init(struct module *mod); 751 extern void ftrace_module_enable(struct module *mod); 752 extern void ftrace_release_mod(struct module *mod); 753 754 extern void ftrace_disable_daemon(void); 755 extern void ftrace_enable_daemon(void); 756 #else /* CONFIG_DYNAMIC_FTRACE */ 757 static inline int skip_trace(unsigned long ip) { return 0; } 758 static inline void ftrace_disable_daemon(void) { } 759 static inline void ftrace_enable_daemon(void) { } 760 static inline void ftrace_module_init(struct module *mod) { } 761 static inline void ftrace_module_enable(struct module *mod) { } 762 static inline void ftrace_release_mod(struct module *mod) { } 763 static inline int ftrace_text_reserved(const void *start, const void *end) 764 { 765 return 0; 766 } 767 static inline unsigned long ftrace_location(unsigned long ip) 768 { 769 return 0; 770 } 771 772 /* 773 * Again users of functions that have ftrace_ops may not 774 * have them defined when ftrace is not enabled, but these 775 * functions may still be called. Use a macro instead of inline. 776 */ 777 #define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; }) 778 #define ftrace_set_early_filter(ops, buf, enable) do { } while (0) 779 #define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; }) 780 #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; }) 781 #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; }) 782 #define ftrace_free_filter(ops) do { } while (0) 783 #define ftrace_ops_set_global_filter(ops) do { } while (0) 784 785 static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf, 786 size_t cnt, loff_t *ppos) { return -ENODEV; } 787 static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf, 788 size_t cnt, loff_t *ppos) { return -ENODEV; } 789 static inline int 790 ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; } 791 792 static inline bool is_ftrace_trampoline(unsigned long addr) 793 { 794 return false; 795 } 796 #endif /* CONFIG_DYNAMIC_FTRACE */ 797 798 /* totally disable ftrace - can not re-enable after this */ 799 void ftrace_kill(void); 800 801 static inline void tracer_disable(void) 802 { 803 #ifdef CONFIG_FUNCTION_TRACER 804 ftrace_enabled = 0; 805 #endif 806 } 807 808 /* 809 * Ftrace disable/restore without lock. Some synchronization mechanism 810 * must be used to prevent ftrace_enabled to be changed between 811 * disable/restore. 812 */ 813 static inline int __ftrace_enabled_save(void) 814 { 815 #ifdef CONFIG_FUNCTION_TRACER 816 int saved_ftrace_enabled = ftrace_enabled; 817 ftrace_enabled = 0; 818 return saved_ftrace_enabled; 819 #else 820 return 0; 821 #endif 822 } 823 824 static inline void __ftrace_enabled_restore(int enabled) 825 { 826 #ifdef CONFIG_FUNCTION_TRACER 827 ftrace_enabled = enabled; 828 #endif 829 } 830 831 /* All archs should have this, but we define it for consistency */ 832 #ifndef ftrace_return_address0 833 # define ftrace_return_address0 __builtin_return_address(0) 834 #endif 835 836 /* Archs may use other ways for ADDR1 and beyond */ 837 #ifndef ftrace_return_address 838 # ifdef CONFIG_FRAME_POINTER 839 # define ftrace_return_address(n) __builtin_return_address(n) 840 # else 841 # define ftrace_return_address(n) 0UL 842 # endif 843 #endif 844 845 #define CALLER_ADDR0 ((unsigned long)ftrace_return_address0) 846 #define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1)) 847 #define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2)) 848 #define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3)) 849 #define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4)) 850 #define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5)) 851 #define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6)) 852 853 static inline unsigned long get_lock_parent_ip(void) 854 { 855 unsigned long addr = CALLER_ADDR0; 856 857 if (!in_lock_functions(addr)) 858 return addr; 859 addr = CALLER_ADDR1; 860 if (!in_lock_functions(addr)) 861 return addr; 862 return CALLER_ADDR2; 863 } 864 865 #ifdef CONFIG_TRACE_PREEMPT_TOGGLE 866 extern void trace_preempt_on(unsigned long a0, unsigned long a1); 867 extern void trace_preempt_off(unsigned long a0, unsigned long a1); 868 #else 869 /* 870 * Use defines instead of static inlines because some arches will make code out 871 * of the CALLER_ADDR, when we really want these to be a real nop. 872 */ 873 # define trace_preempt_on(a0, a1) do { } while (0) 874 # define trace_preempt_off(a0, a1) do { } while (0) 875 #endif 876 877 #ifdef CONFIG_FTRACE_MCOUNT_RECORD 878 extern void ftrace_init(void); 879 #ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY 880 #define FTRACE_CALLSITE_SECTION "__patchable_function_entries" 881 #else 882 #define FTRACE_CALLSITE_SECTION "__mcount_loc" 883 #endif 884 #else 885 static inline void ftrace_init(void) { } 886 #endif 887 888 /* 889 * Structure that defines an entry function trace. 890 * It's already packed but the attribute "packed" is needed 891 * to remove extra padding at the end. 892 */ 893 struct ftrace_graph_ent { 894 unsigned long func; /* Current function */ 895 int depth; 896 } __packed; 897 898 /* 899 * Structure that defines a return function trace. 900 * It's already packed but the attribute "packed" is needed 901 * to remove extra padding at the end. 902 */ 903 struct ftrace_graph_ret { 904 unsigned long func; /* Current function */ 905 int depth; 906 /* Number of functions that overran the depth limit for current task */ 907 unsigned int overrun; 908 unsigned long long calltime; 909 unsigned long long rettime; 910 } __packed; 911 912 /* Type of the callback handlers for tracing function graph*/ 913 typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */ 914 typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */ 915 916 extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace); 917 918 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 919 920 struct fgraph_ops { 921 trace_func_graph_ent_t entryfunc; 922 trace_func_graph_ret_t retfunc; 923 }; 924 925 /* 926 * Stack of return addresses for functions 927 * of a thread. 928 * Used in struct thread_info 929 */ 930 struct ftrace_ret_stack { 931 unsigned long ret; 932 unsigned long func; 933 unsigned long long calltime; 934 #ifdef CONFIG_FUNCTION_PROFILER 935 unsigned long long subtime; 936 #endif 937 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST 938 unsigned long fp; 939 #endif 940 #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR 941 unsigned long *retp; 942 #endif 943 }; 944 945 /* 946 * Primary handler of a function return. 947 * It relays on ftrace_return_to_handler. 948 * Defined in entry_32/64.S 949 */ 950 extern void return_to_handler(void); 951 952 extern int 953 function_graph_enter(unsigned long ret, unsigned long func, 954 unsigned long frame_pointer, unsigned long *retp); 955 956 struct ftrace_ret_stack * 957 ftrace_graph_get_ret_stack(struct task_struct *task, int idx); 958 959 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, 960 unsigned long ret, unsigned long *retp); 961 962 /* 963 * Sometimes we don't want to trace a function with the function 964 * graph tracer but we want them to keep traced by the usual function 965 * tracer if the function graph tracer is not configured. 966 */ 967 #define __notrace_funcgraph notrace 968 969 #define FTRACE_RETFUNC_DEPTH 50 970 #define FTRACE_RETSTACK_ALLOC_SIZE 32 971 972 extern int register_ftrace_graph(struct fgraph_ops *ops); 973 extern void unregister_ftrace_graph(struct fgraph_ops *ops); 974 975 extern bool ftrace_graph_is_dead(void); 976 extern void ftrace_graph_stop(void); 977 978 /* The current handlers in use */ 979 extern trace_func_graph_ret_t ftrace_graph_return; 980 extern trace_func_graph_ent_t ftrace_graph_entry; 981 982 extern void ftrace_graph_init_task(struct task_struct *t); 983 extern void ftrace_graph_exit_task(struct task_struct *t); 984 extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu); 985 986 static inline void pause_graph_tracing(void) 987 { 988 atomic_inc(¤t->tracing_graph_pause); 989 } 990 991 static inline void unpause_graph_tracing(void) 992 { 993 atomic_dec(¤t->tracing_graph_pause); 994 } 995 #else /* !CONFIG_FUNCTION_GRAPH_TRACER */ 996 997 #define __notrace_funcgraph 998 999 static inline void ftrace_graph_init_task(struct task_struct *t) { } 1000 static inline void ftrace_graph_exit_task(struct task_struct *t) { } 1001 static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { } 1002 1003 /* Define as macros as fgraph_ops may not be defined */ 1004 #define register_ftrace_graph(ops) ({ -1; }) 1005 #define unregister_ftrace_graph(ops) do { } while (0) 1006 1007 static inline unsigned long 1008 ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret, 1009 unsigned long *retp) 1010 { 1011 return ret; 1012 } 1013 1014 static inline void pause_graph_tracing(void) { } 1015 static inline void unpause_graph_tracing(void) { } 1016 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 1017 1018 #ifdef CONFIG_TRACING 1019 1020 /* flags for current->trace */ 1021 enum { 1022 TSK_TRACE_FL_TRACE_BIT = 0, 1023 TSK_TRACE_FL_GRAPH_BIT = 1, 1024 }; 1025 enum { 1026 TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT, 1027 TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT, 1028 }; 1029 1030 static inline void set_tsk_trace_trace(struct task_struct *tsk) 1031 { 1032 set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace); 1033 } 1034 1035 static inline void clear_tsk_trace_trace(struct task_struct *tsk) 1036 { 1037 clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace); 1038 } 1039 1040 static inline int test_tsk_trace_trace(struct task_struct *tsk) 1041 { 1042 return tsk->trace & TSK_TRACE_FL_TRACE; 1043 } 1044 1045 static inline void set_tsk_trace_graph(struct task_struct *tsk) 1046 { 1047 set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace); 1048 } 1049 1050 static inline void clear_tsk_trace_graph(struct task_struct *tsk) 1051 { 1052 clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace); 1053 } 1054 1055 static inline int test_tsk_trace_graph(struct task_struct *tsk) 1056 { 1057 return tsk->trace & TSK_TRACE_FL_GRAPH; 1058 } 1059 1060 enum ftrace_dump_mode; 1061 1062 extern enum ftrace_dump_mode ftrace_dump_on_oops; 1063 extern int tracepoint_printk; 1064 1065 extern void disable_trace_on_warning(void); 1066 extern int __disable_trace_on_warning; 1067 1068 int tracepoint_printk_sysctl(struct ctl_table *table, int write, 1069 void *buffer, size_t *lenp, loff_t *ppos); 1070 1071 #else /* CONFIG_TRACING */ 1072 static inline void disable_trace_on_warning(void) { } 1073 #endif /* CONFIG_TRACING */ 1074 1075 #ifdef CONFIG_FTRACE_SYSCALLS 1076 1077 unsigned long arch_syscall_addr(int nr); 1078 1079 #endif /* CONFIG_FTRACE_SYSCALLS */ 1080 1081 #endif /* _LINUX_FTRACE_H */ 1082