1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Ftrace header. For implementation details beyond the random comments 4 * scattered below, see: Documentation/trace/ftrace-design.rst 5 */ 6 7 #ifndef _LINUX_FTRACE_H 8 #define _LINUX_FTRACE_H 9 10 #include <linux/trace_recursion.h> 11 #include <linux/trace_clock.h> 12 #include <linux/kallsyms.h> 13 #include <linux/linkage.h> 14 #include <linux/bitops.h> 15 #include <linux/ptrace.h> 16 #include <linux/ktime.h> 17 #include <linux/sched.h> 18 #include <linux/types.h> 19 #include <linux/init.h> 20 #include <linux/fs.h> 21 22 #include <asm/ftrace.h> 23 24 /* 25 * If the arch supports passing the variable contents of 26 * function_trace_op as the third parameter back from the 27 * mcount call, then the arch should define this as 1. 28 */ 29 #ifndef ARCH_SUPPORTS_FTRACE_OPS 30 #define ARCH_SUPPORTS_FTRACE_OPS 0 31 #endif 32 33 /* 34 * If the arch's mcount caller does not support all of ftrace's 35 * features, then it must call an indirect function that 36 * does. Or at least does enough to prevent any unwelcomed side effects. 37 */ 38 #if !ARCH_SUPPORTS_FTRACE_OPS 39 # define FTRACE_FORCE_LIST_FUNC 1 40 #else 41 # define FTRACE_FORCE_LIST_FUNC 0 42 #endif 43 44 /* Main tracing buffer and events set up */ 45 #ifdef CONFIG_TRACING 46 void trace_init(void); 47 void early_trace_init(void); 48 #else 49 static inline void trace_init(void) { } 50 static inline void early_trace_init(void) { } 51 #endif 52 53 struct module; 54 struct ftrace_hash; 55 struct ftrace_direct_func; 56 57 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \ 58 defined(CONFIG_DYNAMIC_FTRACE) 59 const char * 60 ftrace_mod_address_lookup(unsigned long addr, unsigned long *size, 61 unsigned long *off, char **modname, char *sym); 62 #else 63 static inline const char * 64 ftrace_mod_address_lookup(unsigned long addr, unsigned long *size, 65 unsigned long *off, char **modname, char *sym) 66 { 67 return NULL; 68 } 69 #endif 70 71 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE) 72 int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, 73 char *type, char *name, 74 char *module_name, int *exported); 75 #else 76 static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, 77 char *type, char *name, 78 char *module_name, int *exported) 79 { 80 return -1; 81 } 82 #endif 83 84 #ifdef CONFIG_FUNCTION_TRACER 85 86 extern int ftrace_enabled; 87 extern int 88 ftrace_enable_sysctl(struct ctl_table *table, int write, 89 void *buffer, size_t *lenp, loff_t *ppos); 90 91 struct ftrace_ops; 92 93 #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS 94 95 struct ftrace_regs { 96 struct pt_regs regs; 97 }; 98 #define arch_ftrace_get_regs(fregs) (&(fregs)->regs) 99 100 /* 101 * ftrace_instruction_pointer_set() is to be defined by the architecture 102 * if to allow setting of the instruction pointer from the ftrace_regs 103 * when HAVE_DYNAMIC_FTRACE_WITH_ARGS is set and it supports 104 * live kernel patching. 105 */ 106 #define ftrace_instruction_pointer_set(fregs, ip) do { } while (0) 107 #endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */ 108 109 static __always_inline struct pt_regs *ftrace_get_regs(struct ftrace_regs *fregs) 110 { 111 if (!fregs) 112 return NULL; 113 114 return arch_ftrace_get_regs(fregs); 115 } 116 117 typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, 118 struct ftrace_ops *op, struct ftrace_regs *fregs); 119 120 ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); 121 122 /* 123 * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are 124 * set in the flags member. 125 * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION, STUB and 126 * IPMODIFY are a kind of attribute flags which can be set only before 127 * registering the ftrace_ops, and can not be modified while registered. 128 * Changing those attribute flags after registering ftrace_ops will 129 * cause unexpected results. 130 * 131 * ENABLED - set/unset when ftrace_ops is registered/unregistered 132 * DYNAMIC - set when ftrace_ops is registered to denote dynamically 133 * allocated ftrace_ops which need special care 134 * SAVE_REGS - The ftrace_ops wants regs saved at each function called 135 * and passed to the callback. If this flag is set, but the 136 * architecture does not support passing regs 137 * (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the 138 * ftrace_ops will fail to register, unless the next flag 139 * is set. 140 * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the 141 * handler can handle an arch that does not save regs 142 * (the handler tests if regs == NULL), then it can set 143 * this flag instead. It will not fail registering the ftrace_ops 144 * but, the regs field will be NULL if the arch does not support 145 * passing regs to the handler. 146 * Note, if this flag is set, the SAVE_REGS flag will automatically 147 * get set upon registering the ftrace_ops, if the arch supports it. 148 * RECURSION - The ftrace_ops can set this to tell the ftrace infrastructure 149 * that the call back needs recursion protection. If it does 150 * not set this, then the ftrace infrastructure will assume 151 * that the callback can handle recursion on its own. 152 * STUB - The ftrace_ops is just a place holder. 153 * INITIALIZED - The ftrace_ops has already been initialized (first use time 154 * register_ftrace_function() is called, it will initialized the ops) 155 * DELETED - The ops are being deleted, do not let them be registered again. 156 * ADDING - The ops is in the process of being added. 157 * REMOVING - The ops is in the process of being removed. 158 * MODIFYING - The ops is in the process of changing its filter functions. 159 * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code. 160 * The arch specific code sets this flag when it allocated a 161 * trampoline. This lets the arch know that it can update the 162 * trampoline in case the callback function changes. 163 * The ftrace_ops trampoline can be set by the ftrace users, and 164 * in such cases the arch must not modify it. Only the arch ftrace 165 * core code should set this flag. 166 * IPMODIFY - The ops can modify the IP register. This can only be set with 167 * SAVE_REGS. If another ops with this flag set is already registered 168 * for any of the functions that this ops will be registered for, then 169 * this ops will fail to register or set_filter_ip. 170 * PID - Is affected by set_ftrace_pid (allows filtering on those pids) 171 * RCU - Set when the ops can only be called when RCU is watching. 172 * TRACE_ARRAY - The ops->private points to a trace_array descriptor. 173 * PERMANENT - Set when the ops is permanent and should not be affected by 174 * ftrace_enabled. 175 * DIRECT - Used by the direct ftrace_ops helper for direct functions 176 * (internal ftrace only, should not be used by others) 177 */ 178 enum { 179 FTRACE_OPS_FL_ENABLED = BIT(0), 180 FTRACE_OPS_FL_DYNAMIC = BIT(1), 181 FTRACE_OPS_FL_SAVE_REGS = BIT(2), 182 FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = BIT(3), 183 FTRACE_OPS_FL_RECURSION = BIT(4), 184 FTRACE_OPS_FL_STUB = BIT(5), 185 FTRACE_OPS_FL_INITIALIZED = BIT(6), 186 FTRACE_OPS_FL_DELETED = BIT(7), 187 FTRACE_OPS_FL_ADDING = BIT(8), 188 FTRACE_OPS_FL_REMOVING = BIT(9), 189 FTRACE_OPS_FL_MODIFYING = BIT(10), 190 FTRACE_OPS_FL_ALLOC_TRAMP = BIT(11), 191 FTRACE_OPS_FL_IPMODIFY = BIT(12), 192 FTRACE_OPS_FL_PID = BIT(13), 193 FTRACE_OPS_FL_RCU = BIT(14), 194 FTRACE_OPS_FL_TRACE_ARRAY = BIT(15), 195 FTRACE_OPS_FL_PERMANENT = BIT(16), 196 FTRACE_OPS_FL_DIRECT = BIT(17), 197 }; 198 199 #ifdef CONFIG_DYNAMIC_FTRACE 200 /* The hash used to know what functions callbacks trace */ 201 struct ftrace_ops_hash { 202 struct ftrace_hash __rcu *notrace_hash; 203 struct ftrace_hash __rcu *filter_hash; 204 struct mutex regex_lock; 205 }; 206 207 void ftrace_free_init_mem(void); 208 void ftrace_free_mem(struct module *mod, void *start, void *end); 209 #else 210 static inline void ftrace_free_init_mem(void) { } 211 static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { } 212 #endif 213 214 /* 215 * Note, ftrace_ops can be referenced outside of RCU protection, unless 216 * the RCU flag is set. If ftrace_ops is allocated and not part of kernel 217 * core data, the unregistering of it will perform a scheduling on all CPUs 218 * to make sure that there are no more users. Depending on the load of the 219 * system that may take a bit of time. 220 * 221 * Any private data added must also take care not to be freed and if private 222 * data is added to a ftrace_ops that is in core code, the user of the 223 * ftrace_ops must perform a schedule_on_each_cpu() before freeing it. 224 */ 225 struct ftrace_ops { 226 ftrace_func_t func; 227 struct ftrace_ops __rcu *next; 228 unsigned long flags; 229 void *private; 230 ftrace_func_t saved_func; 231 #ifdef CONFIG_DYNAMIC_FTRACE 232 struct ftrace_ops_hash local_hash; 233 struct ftrace_ops_hash *func_hash; 234 struct ftrace_ops_hash old_hash; 235 unsigned long trampoline; 236 unsigned long trampoline_size; 237 struct list_head list; 238 #endif 239 }; 240 241 extern struct ftrace_ops __rcu *ftrace_ops_list; 242 extern struct ftrace_ops ftrace_list_end; 243 244 /* 245 * Traverse the ftrace_ops_list, invoking all entries. The reason that we 246 * can use rcu_dereference_raw_check() is that elements removed from this list 247 * are simply leaked, so there is no need to interact with a grace-period 248 * mechanism. The rcu_dereference_raw_check() calls are needed to handle 249 * concurrent insertions into the ftrace_ops_list. 250 * 251 * Silly Alpha and silly pointer-speculation compiler optimizations! 252 */ 253 #define do_for_each_ftrace_op(op, list) \ 254 op = rcu_dereference_raw_check(list); \ 255 do 256 257 /* 258 * Optimized for just a single item in the list (as that is the normal case). 259 */ 260 #define while_for_each_ftrace_op(op) \ 261 while (likely(op = rcu_dereference_raw_check((op)->next)) && \ 262 unlikely((op) != &ftrace_list_end)) 263 264 /* 265 * Type of the current tracing. 266 */ 267 enum ftrace_tracing_type_t { 268 FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */ 269 FTRACE_TYPE_RETURN, /* Hook the return of the function */ 270 }; 271 272 /* Current tracing type, default is FTRACE_TYPE_ENTER */ 273 extern enum ftrace_tracing_type_t ftrace_tracing_type; 274 275 /* 276 * The ftrace_ops must be a static and should also 277 * be read_mostly. These functions do modify read_mostly variables 278 * so use them sparely. Never free an ftrace_op or modify the 279 * next pointer after it has been registered. Even after unregistering 280 * it, the next pointer may still be used internally. 281 */ 282 int register_ftrace_function(struct ftrace_ops *ops); 283 int unregister_ftrace_function(struct ftrace_ops *ops); 284 285 extern void ftrace_stub(unsigned long a0, unsigned long a1, 286 struct ftrace_ops *op, struct ftrace_regs *fregs); 287 288 #else /* !CONFIG_FUNCTION_TRACER */ 289 /* 290 * (un)register_ftrace_function must be a macro since the ops parameter 291 * must not be evaluated. 292 */ 293 #define register_ftrace_function(ops) ({ 0; }) 294 #define unregister_ftrace_function(ops) ({ 0; }) 295 static inline void ftrace_kill(void) { } 296 static inline void ftrace_free_init_mem(void) { } 297 static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { } 298 #endif /* CONFIG_FUNCTION_TRACER */ 299 300 struct ftrace_func_entry { 301 struct hlist_node hlist; 302 unsigned long ip; 303 unsigned long direct; /* for direct lookup only */ 304 }; 305 306 struct dyn_ftrace; 307 308 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 309 extern int ftrace_direct_func_count; 310 int register_ftrace_direct(unsigned long ip, unsigned long addr); 311 int unregister_ftrace_direct(unsigned long ip, unsigned long addr); 312 int modify_ftrace_direct(unsigned long ip, unsigned long old_addr, unsigned long new_addr); 313 struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr); 314 int ftrace_modify_direct_caller(struct ftrace_func_entry *entry, 315 struct dyn_ftrace *rec, 316 unsigned long old_addr, 317 unsigned long new_addr); 318 unsigned long ftrace_find_rec_direct(unsigned long ip); 319 #else 320 # define ftrace_direct_func_count 0 321 static inline int register_ftrace_direct(unsigned long ip, unsigned long addr) 322 { 323 return -ENOTSUPP; 324 } 325 static inline int unregister_ftrace_direct(unsigned long ip, unsigned long addr) 326 { 327 return -ENOTSUPP; 328 } 329 static inline int modify_ftrace_direct(unsigned long ip, 330 unsigned long old_addr, unsigned long new_addr) 331 { 332 return -ENOTSUPP; 333 } 334 static inline struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr) 335 { 336 return NULL; 337 } 338 static inline int ftrace_modify_direct_caller(struct ftrace_func_entry *entry, 339 struct dyn_ftrace *rec, 340 unsigned long old_addr, 341 unsigned long new_addr) 342 { 343 return -ENODEV; 344 } 345 static inline unsigned long ftrace_find_rec_direct(unsigned long ip) 346 { 347 return 0; 348 } 349 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ 350 351 #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 352 /* 353 * This must be implemented by the architecture. 354 * It is the way the ftrace direct_ops helper, when called 355 * via ftrace (because there's other callbacks besides the 356 * direct call), can inform the architecture's trampoline that this 357 * routine has a direct caller, and what the caller is. 358 * 359 * For example, in x86, it returns the direct caller 360 * callback function via the regs->orig_ax parameter. 361 * Then in the ftrace trampoline, if this is set, it makes 362 * the return from the trampoline jump to the direct caller 363 * instead of going back to the function it just traced. 364 */ 365 static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs, 366 unsigned long addr) { } 367 #endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ 368 369 #ifdef CONFIG_STACK_TRACER 370 371 extern int stack_tracer_enabled; 372 373 int stack_trace_sysctl(struct ctl_table *table, int write, void *buffer, 374 size_t *lenp, loff_t *ppos); 375 376 /* DO NOT MODIFY THIS VARIABLE DIRECTLY! */ 377 DECLARE_PER_CPU(int, disable_stack_tracer); 378 379 /** 380 * stack_tracer_disable - temporarily disable the stack tracer 381 * 382 * There's a few locations (namely in RCU) where stack tracing 383 * cannot be executed. This function is used to disable stack 384 * tracing during those critical sections. 385 * 386 * This function must be called with preemption or interrupts 387 * disabled and stack_tracer_enable() must be called shortly after 388 * while preemption or interrupts are still disabled. 389 */ 390 static inline void stack_tracer_disable(void) 391 { 392 /* Preemption or interupts must be disabled */ 393 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) 394 WARN_ON_ONCE(!preempt_count() || !irqs_disabled()); 395 this_cpu_inc(disable_stack_tracer); 396 } 397 398 /** 399 * stack_tracer_enable - re-enable the stack tracer 400 * 401 * After stack_tracer_disable() is called, stack_tracer_enable() 402 * must be called shortly afterward. 403 */ 404 static inline void stack_tracer_enable(void) 405 { 406 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) 407 WARN_ON_ONCE(!preempt_count() || !irqs_disabled()); 408 this_cpu_dec(disable_stack_tracer); 409 } 410 #else 411 static inline void stack_tracer_disable(void) { } 412 static inline void stack_tracer_enable(void) { } 413 #endif 414 415 #ifdef CONFIG_DYNAMIC_FTRACE 416 417 int ftrace_arch_code_modify_prepare(void); 418 int ftrace_arch_code_modify_post_process(void); 419 420 enum ftrace_bug_type { 421 FTRACE_BUG_UNKNOWN, 422 FTRACE_BUG_INIT, 423 FTRACE_BUG_NOP, 424 FTRACE_BUG_CALL, 425 FTRACE_BUG_UPDATE, 426 }; 427 extern enum ftrace_bug_type ftrace_bug_type; 428 429 /* 430 * Archs can set this to point to a variable that holds the value that was 431 * expected at the call site before calling ftrace_bug(). 432 */ 433 extern const void *ftrace_expected; 434 435 void ftrace_bug(int err, struct dyn_ftrace *rec); 436 437 struct seq_file; 438 439 extern int ftrace_text_reserved(const void *start, const void *end); 440 441 struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr); 442 443 bool is_ftrace_trampoline(unsigned long addr); 444 445 /* 446 * The dyn_ftrace record's flags field is split into two parts. 447 * the first part which is '0-FTRACE_REF_MAX' is a counter of 448 * the number of callbacks that have registered the function that 449 * the dyn_ftrace descriptor represents. 450 * 451 * The second part is a mask: 452 * ENABLED - the function is being traced 453 * REGS - the record wants the function to save regs 454 * REGS_EN - the function is set up to save regs. 455 * IPMODIFY - the record allows for the IP address to be changed. 456 * DISABLED - the record is not ready to be touched yet 457 * DIRECT - there is a direct function to call 458 * 459 * When a new ftrace_ops is registered and wants a function to save 460 * pt_regs, the rec->flags REGS is set. When the function has been 461 * set up to save regs, the REG_EN flag is set. Once a function 462 * starts saving regs it will do so until all ftrace_ops are removed 463 * from tracing that function. 464 */ 465 enum { 466 FTRACE_FL_ENABLED = (1UL << 31), 467 FTRACE_FL_REGS = (1UL << 30), 468 FTRACE_FL_REGS_EN = (1UL << 29), 469 FTRACE_FL_TRAMP = (1UL << 28), 470 FTRACE_FL_TRAMP_EN = (1UL << 27), 471 FTRACE_FL_IPMODIFY = (1UL << 26), 472 FTRACE_FL_DISABLED = (1UL << 25), 473 FTRACE_FL_DIRECT = (1UL << 24), 474 FTRACE_FL_DIRECT_EN = (1UL << 23), 475 }; 476 477 #define FTRACE_REF_MAX_SHIFT 23 478 #define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1) 479 480 #define ftrace_rec_count(rec) ((rec)->flags & FTRACE_REF_MAX) 481 482 struct dyn_ftrace { 483 unsigned long ip; /* address of mcount call-site */ 484 unsigned long flags; 485 struct dyn_arch_ftrace arch; 486 }; 487 488 int ftrace_force_update(void); 489 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, 490 int remove, int reset); 491 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, 492 int len, int reset); 493 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, 494 int len, int reset); 495 void ftrace_set_global_filter(unsigned char *buf, int len, int reset); 496 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset); 497 void ftrace_free_filter(struct ftrace_ops *ops); 498 void ftrace_ops_set_global_filter(struct ftrace_ops *ops); 499 500 enum { 501 FTRACE_UPDATE_CALLS = (1 << 0), 502 FTRACE_DISABLE_CALLS = (1 << 1), 503 FTRACE_UPDATE_TRACE_FUNC = (1 << 2), 504 FTRACE_START_FUNC_RET = (1 << 3), 505 FTRACE_STOP_FUNC_RET = (1 << 4), 506 FTRACE_MAY_SLEEP = (1 << 5), 507 }; 508 509 /* 510 * The FTRACE_UPDATE_* enum is used to pass information back 511 * from the ftrace_update_record() and ftrace_test_record() 512 * functions. These are called by the code update routines 513 * to find out what is to be done for a given function. 514 * 515 * IGNORE - The function is already what we want it to be 516 * MAKE_CALL - Start tracing the function 517 * MODIFY_CALL - Stop saving regs for the function 518 * MAKE_NOP - Stop tracing the function 519 */ 520 enum { 521 FTRACE_UPDATE_IGNORE, 522 FTRACE_UPDATE_MAKE_CALL, 523 FTRACE_UPDATE_MODIFY_CALL, 524 FTRACE_UPDATE_MAKE_NOP, 525 }; 526 527 enum { 528 FTRACE_ITER_FILTER = (1 << 0), 529 FTRACE_ITER_NOTRACE = (1 << 1), 530 FTRACE_ITER_PRINTALL = (1 << 2), 531 FTRACE_ITER_DO_PROBES = (1 << 3), 532 FTRACE_ITER_PROBE = (1 << 4), 533 FTRACE_ITER_MOD = (1 << 5), 534 FTRACE_ITER_ENABLED = (1 << 6), 535 }; 536 537 void arch_ftrace_update_code(int command); 538 void arch_ftrace_update_trampoline(struct ftrace_ops *ops); 539 void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec); 540 void arch_ftrace_trampoline_free(struct ftrace_ops *ops); 541 542 struct ftrace_rec_iter; 543 544 struct ftrace_rec_iter *ftrace_rec_iter_start(void); 545 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter); 546 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter); 547 548 #define for_ftrace_rec_iter(iter) \ 549 for (iter = ftrace_rec_iter_start(); \ 550 iter; \ 551 iter = ftrace_rec_iter_next(iter)) 552 553 554 int ftrace_update_record(struct dyn_ftrace *rec, bool enable); 555 int ftrace_test_record(struct dyn_ftrace *rec, bool enable); 556 void ftrace_run_stop_machine(int command); 557 unsigned long ftrace_location(unsigned long ip); 558 unsigned long ftrace_location_range(unsigned long start, unsigned long end); 559 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec); 560 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec); 561 562 extern ftrace_func_t ftrace_trace_function; 563 564 int ftrace_regex_open(struct ftrace_ops *ops, int flag, 565 struct inode *inode, struct file *file); 566 ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf, 567 size_t cnt, loff_t *ppos); 568 ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf, 569 size_t cnt, loff_t *ppos); 570 int ftrace_regex_release(struct inode *inode, struct file *file); 571 572 void __init 573 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable); 574 575 /* defined in arch */ 576 extern int ftrace_ip_converted(unsigned long ip); 577 extern int ftrace_dyn_arch_init(void); 578 extern void ftrace_replace_code(int enable); 579 extern int ftrace_update_ftrace_func(ftrace_func_t func); 580 extern void ftrace_caller(void); 581 extern void ftrace_regs_caller(void); 582 extern void ftrace_call(void); 583 extern void ftrace_regs_call(void); 584 extern void mcount_call(void); 585 586 void ftrace_modify_all_code(int command); 587 588 #ifndef FTRACE_ADDR 589 #define FTRACE_ADDR ((unsigned long)ftrace_caller) 590 #endif 591 592 #ifndef FTRACE_GRAPH_ADDR 593 #define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller) 594 #endif 595 596 #ifndef FTRACE_REGS_ADDR 597 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 598 # define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller) 599 #else 600 # define FTRACE_REGS_ADDR FTRACE_ADDR 601 #endif 602 #endif 603 604 /* 605 * If an arch would like functions that are only traced 606 * by the function graph tracer to jump directly to its own 607 * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR 608 * to be that address to jump to. 609 */ 610 #ifndef FTRACE_GRAPH_TRAMP_ADDR 611 #define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0) 612 #endif 613 614 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 615 extern void ftrace_graph_caller(void); 616 extern int ftrace_enable_ftrace_graph_caller(void); 617 extern int ftrace_disable_ftrace_graph_caller(void); 618 #else 619 static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; } 620 static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; } 621 #endif 622 623 /** 624 * ftrace_make_nop - convert code into nop 625 * @mod: module structure if called by module load initialization 626 * @rec: the call site record (e.g. mcount/fentry) 627 * @addr: the address that the call site should be calling 628 * 629 * This is a very sensitive operation and great care needs 630 * to be taken by the arch. The operation should carefully 631 * read the location, check to see if what is read is indeed 632 * what we expect it to be, and then on success of the compare, 633 * it should write to the location. 634 * 635 * The code segment at @rec->ip should be a caller to @addr 636 * 637 * Return must be: 638 * 0 on success 639 * -EFAULT on error reading the location 640 * -EINVAL on a failed compare of the contents 641 * -EPERM on error writing to the location 642 * Any other value will be considered a failure. 643 */ 644 extern int ftrace_make_nop(struct module *mod, 645 struct dyn_ftrace *rec, unsigned long addr); 646 647 648 /** 649 * ftrace_init_nop - initialize a nop call site 650 * @mod: module structure if called by module load initialization 651 * @rec: the call site record (e.g. mcount/fentry) 652 * 653 * This is a very sensitive operation and great care needs 654 * to be taken by the arch. The operation should carefully 655 * read the location, check to see if what is read is indeed 656 * what we expect it to be, and then on success of the compare, 657 * it should write to the location. 658 * 659 * The code segment at @rec->ip should contain the contents created by 660 * the compiler 661 * 662 * Return must be: 663 * 0 on success 664 * -EFAULT on error reading the location 665 * -EINVAL on a failed compare of the contents 666 * -EPERM on error writing to the location 667 * Any other value will be considered a failure. 668 */ 669 #ifndef ftrace_init_nop 670 static inline int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) 671 { 672 return ftrace_make_nop(mod, rec, MCOUNT_ADDR); 673 } 674 #endif 675 676 /** 677 * ftrace_make_call - convert a nop call site into a call to addr 678 * @rec: the call site record (e.g. mcount/fentry) 679 * @addr: the address that the call site should call 680 * 681 * This is a very sensitive operation and great care needs 682 * to be taken by the arch. The operation should carefully 683 * read the location, check to see if what is read is indeed 684 * what we expect it to be, and then on success of the compare, 685 * it should write to the location. 686 * 687 * The code segment at @rec->ip should be a nop 688 * 689 * Return must be: 690 * 0 on success 691 * -EFAULT on error reading the location 692 * -EINVAL on a failed compare of the contents 693 * -EPERM on error writing to the location 694 * Any other value will be considered a failure. 695 */ 696 extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); 697 698 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 699 /** 700 * ftrace_modify_call - convert from one addr to another (no nop) 701 * @rec: the call site record (e.g. mcount/fentry) 702 * @old_addr: the address expected to be currently called to 703 * @addr: the address to change to 704 * 705 * This is a very sensitive operation and great care needs 706 * to be taken by the arch. The operation should carefully 707 * read the location, check to see if what is read is indeed 708 * what we expect it to be, and then on success of the compare, 709 * it should write to the location. 710 * 711 * The code segment at @rec->ip should be a caller to @old_addr 712 * 713 * Return must be: 714 * 0 on success 715 * -EFAULT on error reading the location 716 * -EINVAL on a failed compare of the contents 717 * -EPERM on error writing to the location 718 * Any other value will be considered a failure. 719 */ 720 extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 721 unsigned long addr); 722 #else 723 /* Should never be called */ 724 static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 725 unsigned long addr) 726 { 727 return -EINVAL; 728 } 729 #endif 730 731 /* May be defined in arch */ 732 extern int ftrace_arch_read_dyn_info(char *buf, int size); 733 734 extern int skip_trace(unsigned long ip); 735 extern void ftrace_module_init(struct module *mod); 736 extern void ftrace_module_enable(struct module *mod); 737 extern void ftrace_release_mod(struct module *mod); 738 739 extern void ftrace_disable_daemon(void); 740 extern void ftrace_enable_daemon(void); 741 #else /* CONFIG_DYNAMIC_FTRACE */ 742 static inline int skip_trace(unsigned long ip) { return 0; } 743 static inline int ftrace_force_update(void) { return 0; } 744 static inline void ftrace_disable_daemon(void) { } 745 static inline void ftrace_enable_daemon(void) { } 746 static inline void ftrace_module_init(struct module *mod) { } 747 static inline void ftrace_module_enable(struct module *mod) { } 748 static inline void ftrace_release_mod(struct module *mod) { } 749 static inline int ftrace_text_reserved(const void *start, const void *end) 750 { 751 return 0; 752 } 753 static inline unsigned long ftrace_location(unsigned long ip) 754 { 755 return 0; 756 } 757 758 /* 759 * Again users of functions that have ftrace_ops may not 760 * have them defined when ftrace is not enabled, but these 761 * functions may still be called. Use a macro instead of inline. 762 */ 763 #define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; }) 764 #define ftrace_set_early_filter(ops, buf, enable) do { } while (0) 765 #define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; }) 766 #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; }) 767 #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; }) 768 #define ftrace_free_filter(ops) do { } while (0) 769 #define ftrace_ops_set_global_filter(ops) do { } while (0) 770 771 static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf, 772 size_t cnt, loff_t *ppos) { return -ENODEV; } 773 static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf, 774 size_t cnt, loff_t *ppos) { return -ENODEV; } 775 static inline int 776 ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; } 777 778 static inline bool is_ftrace_trampoline(unsigned long addr) 779 { 780 return false; 781 } 782 #endif /* CONFIG_DYNAMIC_FTRACE */ 783 784 /* totally disable ftrace - can not re-enable after this */ 785 void ftrace_kill(void); 786 787 static inline void tracer_disable(void) 788 { 789 #ifdef CONFIG_FUNCTION_TRACER 790 ftrace_enabled = 0; 791 #endif 792 } 793 794 /* 795 * Ftrace disable/restore without lock. Some synchronization mechanism 796 * must be used to prevent ftrace_enabled to be changed between 797 * disable/restore. 798 */ 799 static inline int __ftrace_enabled_save(void) 800 { 801 #ifdef CONFIG_FUNCTION_TRACER 802 int saved_ftrace_enabled = ftrace_enabled; 803 ftrace_enabled = 0; 804 return saved_ftrace_enabled; 805 #else 806 return 0; 807 #endif 808 } 809 810 static inline void __ftrace_enabled_restore(int enabled) 811 { 812 #ifdef CONFIG_FUNCTION_TRACER 813 ftrace_enabled = enabled; 814 #endif 815 } 816 817 /* All archs should have this, but we define it for consistency */ 818 #ifndef ftrace_return_address0 819 # define ftrace_return_address0 __builtin_return_address(0) 820 #endif 821 822 /* Archs may use other ways for ADDR1 and beyond */ 823 #ifndef ftrace_return_address 824 # ifdef CONFIG_FRAME_POINTER 825 # define ftrace_return_address(n) __builtin_return_address(n) 826 # else 827 # define ftrace_return_address(n) 0UL 828 # endif 829 #endif 830 831 #define CALLER_ADDR0 ((unsigned long)ftrace_return_address0) 832 #define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1)) 833 #define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2)) 834 #define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3)) 835 #define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4)) 836 #define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5)) 837 #define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6)) 838 839 static inline unsigned long get_lock_parent_ip(void) 840 { 841 unsigned long addr = CALLER_ADDR0; 842 843 if (!in_lock_functions(addr)) 844 return addr; 845 addr = CALLER_ADDR1; 846 if (!in_lock_functions(addr)) 847 return addr; 848 return CALLER_ADDR2; 849 } 850 851 #ifdef CONFIG_TRACE_PREEMPT_TOGGLE 852 extern void trace_preempt_on(unsigned long a0, unsigned long a1); 853 extern void trace_preempt_off(unsigned long a0, unsigned long a1); 854 #else 855 /* 856 * Use defines instead of static inlines because some arches will make code out 857 * of the CALLER_ADDR, when we really want these to be a real nop. 858 */ 859 # define trace_preempt_on(a0, a1) do { } while (0) 860 # define trace_preempt_off(a0, a1) do { } while (0) 861 #endif 862 863 #ifdef CONFIG_FTRACE_MCOUNT_RECORD 864 extern void ftrace_init(void); 865 #ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY 866 #define FTRACE_CALLSITE_SECTION "__patchable_function_entries" 867 #else 868 #define FTRACE_CALLSITE_SECTION "__mcount_loc" 869 #endif 870 #else 871 static inline void ftrace_init(void) { } 872 #endif 873 874 /* 875 * Structure that defines an entry function trace. 876 * It's already packed but the attribute "packed" is needed 877 * to remove extra padding at the end. 878 */ 879 struct ftrace_graph_ent { 880 unsigned long func; /* Current function */ 881 int depth; 882 } __packed; 883 884 /* 885 * Structure that defines a return function trace. 886 * It's already packed but the attribute "packed" is needed 887 * to remove extra padding at the end. 888 */ 889 struct ftrace_graph_ret { 890 unsigned long func; /* Current function */ 891 int depth; 892 /* Number of functions that overran the depth limit for current task */ 893 unsigned int overrun; 894 unsigned long long calltime; 895 unsigned long long rettime; 896 } __packed; 897 898 /* Type of the callback handlers for tracing function graph*/ 899 typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */ 900 typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */ 901 902 extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace); 903 904 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 905 906 struct fgraph_ops { 907 trace_func_graph_ent_t entryfunc; 908 trace_func_graph_ret_t retfunc; 909 }; 910 911 /* 912 * Stack of return addresses for functions 913 * of a thread. 914 * Used in struct thread_info 915 */ 916 struct ftrace_ret_stack { 917 unsigned long ret; 918 unsigned long func; 919 unsigned long long calltime; 920 #ifdef CONFIG_FUNCTION_PROFILER 921 unsigned long long subtime; 922 #endif 923 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST 924 unsigned long fp; 925 #endif 926 #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR 927 unsigned long *retp; 928 #endif 929 }; 930 931 /* 932 * Primary handler of a function return. 933 * It relays on ftrace_return_to_handler. 934 * Defined in entry_32/64.S 935 */ 936 extern void return_to_handler(void); 937 938 extern int 939 function_graph_enter(unsigned long ret, unsigned long func, 940 unsigned long frame_pointer, unsigned long *retp); 941 942 struct ftrace_ret_stack * 943 ftrace_graph_get_ret_stack(struct task_struct *task, int idx); 944 945 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, 946 unsigned long ret, unsigned long *retp); 947 948 /* 949 * Sometimes we don't want to trace a function with the function 950 * graph tracer but we want them to keep traced by the usual function 951 * tracer if the function graph tracer is not configured. 952 */ 953 #define __notrace_funcgraph notrace 954 955 #define FTRACE_RETFUNC_DEPTH 50 956 #define FTRACE_RETSTACK_ALLOC_SIZE 32 957 958 extern int register_ftrace_graph(struct fgraph_ops *ops); 959 extern void unregister_ftrace_graph(struct fgraph_ops *ops); 960 961 extern bool ftrace_graph_is_dead(void); 962 extern void ftrace_graph_stop(void); 963 964 /* The current handlers in use */ 965 extern trace_func_graph_ret_t ftrace_graph_return; 966 extern trace_func_graph_ent_t ftrace_graph_entry; 967 968 extern void ftrace_graph_init_task(struct task_struct *t); 969 extern void ftrace_graph_exit_task(struct task_struct *t); 970 extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu); 971 972 static inline void pause_graph_tracing(void) 973 { 974 atomic_inc(¤t->tracing_graph_pause); 975 } 976 977 static inline void unpause_graph_tracing(void) 978 { 979 atomic_dec(¤t->tracing_graph_pause); 980 } 981 #else /* !CONFIG_FUNCTION_GRAPH_TRACER */ 982 983 #define __notrace_funcgraph 984 985 static inline void ftrace_graph_init_task(struct task_struct *t) { } 986 static inline void ftrace_graph_exit_task(struct task_struct *t) { } 987 static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { } 988 989 /* Define as macros as fgraph_ops may not be defined */ 990 #define register_ftrace_graph(ops) ({ -1; }) 991 #define unregister_ftrace_graph(ops) do { } while (0) 992 993 static inline unsigned long 994 ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret, 995 unsigned long *retp) 996 { 997 return ret; 998 } 999 1000 static inline void pause_graph_tracing(void) { } 1001 static inline void unpause_graph_tracing(void) { } 1002 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 1003 1004 #ifdef CONFIG_TRACING 1005 1006 /* flags for current->trace */ 1007 enum { 1008 TSK_TRACE_FL_TRACE_BIT = 0, 1009 TSK_TRACE_FL_GRAPH_BIT = 1, 1010 }; 1011 enum { 1012 TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT, 1013 TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT, 1014 }; 1015 1016 static inline void set_tsk_trace_trace(struct task_struct *tsk) 1017 { 1018 set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace); 1019 } 1020 1021 static inline void clear_tsk_trace_trace(struct task_struct *tsk) 1022 { 1023 clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace); 1024 } 1025 1026 static inline int test_tsk_trace_trace(struct task_struct *tsk) 1027 { 1028 return tsk->trace & TSK_TRACE_FL_TRACE; 1029 } 1030 1031 static inline void set_tsk_trace_graph(struct task_struct *tsk) 1032 { 1033 set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace); 1034 } 1035 1036 static inline void clear_tsk_trace_graph(struct task_struct *tsk) 1037 { 1038 clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace); 1039 } 1040 1041 static inline int test_tsk_trace_graph(struct task_struct *tsk) 1042 { 1043 return tsk->trace & TSK_TRACE_FL_GRAPH; 1044 } 1045 1046 enum ftrace_dump_mode; 1047 1048 extern enum ftrace_dump_mode ftrace_dump_on_oops; 1049 extern int tracepoint_printk; 1050 1051 extern void disable_trace_on_warning(void); 1052 extern int __disable_trace_on_warning; 1053 1054 int tracepoint_printk_sysctl(struct ctl_table *table, int write, 1055 void *buffer, size_t *lenp, loff_t *ppos); 1056 1057 #else /* CONFIG_TRACING */ 1058 static inline void disable_trace_on_warning(void) { } 1059 #endif /* CONFIG_TRACING */ 1060 1061 #ifdef CONFIG_FTRACE_SYSCALLS 1062 1063 unsigned long arch_syscall_addr(int nr); 1064 1065 #endif /* CONFIG_FTRACE_SYSCALLS */ 1066 1067 #endif /* _LINUX_FTRACE_H */ 1068