1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Ftrace header. For implementation details beyond the random comments 4 * scattered below, see: Documentation/trace/ftrace-design.rst 5 */ 6 7 #ifndef _LINUX_FTRACE_H 8 #define _LINUX_FTRACE_H 9 10 #include <linux/trace_clock.h> 11 #include <linux/kallsyms.h> 12 #include <linux/linkage.h> 13 #include <linux/bitops.h> 14 #include <linux/ptrace.h> 15 #include <linux/ktime.h> 16 #include <linux/sched.h> 17 #include <linux/types.h> 18 #include <linux/init.h> 19 #include <linux/fs.h> 20 21 #include <asm/ftrace.h> 22 23 /* 24 * If the arch supports passing the variable contents of 25 * function_trace_op as the third parameter back from the 26 * mcount call, then the arch should define this as 1. 27 */ 28 #ifndef ARCH_SUPPORTS_FTRACE_OPS 29 #define ARCH_SUPPORTS_FTRACE_OPS 0 30 #endif 31 32 /* 33 * If the arch's mcount caller does not support all of ftrace's 34 * features, then it must call an indirect function that 35 * does. Or at least does enough to prevent any unwelcomed side effects. 36 */ 37 #if !ARCH_SUPPORTS_FTRACE_OPS 38 # define FTRACE_FORCE_LIST_FUNC 1 39 #else 40 # define FTRACE_FORCE_LIST_FUNC 0 41 #endif 42 43 /* Main tracing buffer and events set up */ 44 #ifdef CONFIG_TRACING 45 void trace_init(void); 46 void early_trace_init(void); 47 #else 48 static inline void trace_init(void) { } 49 static inline void early_trace_init(void) { } 50 #endif 51 52 struct module; 53 struct ftrace_hash; 54 struct ftrace_direct_func; 55 56 #if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \ 57 defined(CONFIG_DYNAMIC_FTRACE) 58 const char * 59 ftrace_mod_address_lookup(unsigned long addr, unsigned long *size, 60 unsigned long *off, char **modname, char *sym); 61 int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, 62 char *type, char *name, 63 char *module_name, int *exported); 64 #else 65 static inline const char * 66 ftrace_mod_address_lookup(unsigned long addr, unsigned long *size, 67 unsigned long *off, char **modname, char *sym) 68 { 69 return NULL; 70 } 71 static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *value, 72 char *type, char *name, 73 char *module_name, int *exported) 74 { 75 return -1; 76 } 77 #endif 78 79 80 #ifdef CONFIG_FUNCTION_TRACER 81 82 extern int ftrace_enabled; 83 extern int 84 ftrace_enable_sysctl(struct ctl_table *table, int write, 85 void __user *buffer, size_t *lenp, 86 loff_t *ppos); 87 88 struct ftrace_ops; 89 90 typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, 91 struct ftrace_ops *op, struct pt_regs *regs); 92 93 ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); 94 95 /* 96 * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are 97 * set in the flags member. 98 * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION_SAFE, STUB and 99 * IPMODIFY are a kind of attribute flags which can be set only before 100 * registering the ftrace_ops, and can not be modified while registered. 101 * Changing those attribute flags after registering ftrace_ops will 102 * cause unexpected results. 103 * 104 * ENABLED - set/unset when ftrace_ops is registered/unregistered 105 * DYNAMIC - set when ftrace_ops is registered to denote dynamically 106 * allocated ftrace_ops which need special care 107 * SAVE_REGS - The ftrace_ops wants regs saved at each function called 108 * and passed to the callback. If this flag is set, but the 109 * architecture does not support passing regs 110 * (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the 111 * ftrace_ops will fail to register, unless the next flag 112 * is set. 113 * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the 114 * handler can handle an arch that does not save regs 115 * (the handler tests if regs == NULL), then it can set 116 * this flag instead. It will not fail registering the ftrace_ops 117 * but, the regs field will be NULL if the arch does not support 118 * passing regs to the handler. 119 * Note, if this flag is set, the SAVE_REGS flag will automatically 120 * get set upon registering the ftrace_ops, if the arch supports it. 121 * RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure 122 * that the call back has its own recursion protection. If it does 123 * not set this, then the ftrace infrastructure will add recursion 124 * protection for the caller. 125 * STUB - The ftrace_ops is just a place holder. 126 * INITIALIZED - The ftrace_ops has already been initialized (first use time 127 * register_ftrace_function() is called, it will initialized the ops) 128 * DELETED - The ops are being deleted, do not let them be registered again. 129 * ADDING - The ops is in the process of being added. 130 * REMOVING - The ops is in the process of being removed. 131 * MODIFYING - The ops is in the process of changing its filter functions. 132 * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code. 133 * The arch specific code sets this flag when it allocated a 134 * trampoline. This lets the arch know that it can update the 135 * trampoline in case the callback function changes. 136 * The ftrace_ops trampoline can be set by the ftrace users, and 137 * in such cases the arch must not modify it. Only the arch ftrace 138 * core code should set this flag. 139 * IPMODIFY - The ops can modify the IP register. This can only be set with 140 * SAVE_REGS. If another ops with this flag set is already registered 141 * for any of the functions that this ops will be registered for, then 142 * this ops will fail to register or set_filter_ip. 143 * PID - Is affected by set_ftrace_pid (allows filtering on those pids) 144 * RCU - Set when the ops can only be called when RCU is watching. 145 * TRACE_ARRAY - The ops->private points to a trace_array descriptor. 146 * PERMANENT - Set when the ops is permanent and should not be affected by 147 * ftrace_enabled. 148 * DIRECT - Used by the direct ftrace_ops helper for direct functions 149 * (internal ftrace only, should not be used by others) 150 */ 151 enum { 152 FTRACE_OPS_FL_ENABLED = BIT(0), 153 FTRACE_OPS_FL_DYNAMIC = BIT(1), 154 FTRACE_OPS_FL_SAVE_REGS = BIT(2), 155 FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = BIT(3), 156 FTRACE_OPS_FL_RECURSION_SAFE = BIT(4), 157 FTRACE_OPS_FL_STUB = BIT(5), 158 FTRACE_OPS_FL_INITIALIZED = BIT(6), 159 FTRACE_OPS_FL_DELETED = BIT(7), 160 FTRACE_OPS_FL_ADDING = BIT(8), 161 FTRACE_OPS_FL_REMOVING = BIT(9), 162 FTRACE_OPS_FL_MODIFYING = BIT(10), 163 FTRACE_OPS_FL_ALLOC_TRAMP = BIT(11), 164 FTRACE_OPS_FL_IPMODIFY = BIT(12), 165 FTRACE_OPS_FL_PID = BIT(13), 166 FTRACE_OPS_FL_RCU = BIT(14), 167 FTRACE_OPS_FL_TRACE_ARRAY = BIT(15), 168 FTRACE_OPS_FL_PERMANENT = BIT(16), 169 FTRACE_OPS_FL_DIRECT = BIT(17), 170 }; 171 172 #ifdef CONFIG_DYNAMIC_FTRACE 173 /* The hash used to know what functions callbacks trace */ 174 struct ftrace_ops_hash { 175 struct ftrace_hash __rcu *notrace_hash; 176 struct ftrace_hash __rcu *filter_hash; 177 struct mutex regex_lock; 178 }; 179 180 void ftrace_free_init_mem(void); 181 void ftrace_free_mem(struct module *mod, void *start, void *end); 182 #else 183 static inline void ftrace_free_init_mem(void) { } 184 static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { } 185 #endif 186 187 /* 188 * Note, ftrace_ops can be referenced outside of RCU protection, unless 189 * the RCU flag is set. If ftrace_ops is allocated and not part of kernel 190 * core data, the unregistering of it will perform a scheduling on all CPUs 191 * to make sure that there are no more users. Depending on the load of the 192 * system that may take a bit of time. 193 * 194 * Any private data added must also take care not to be freed and if private 195 * data is added to a ftrace_ops that is in core code, the user of the 196 * ftrace_ops must perform a schedule_on_each_cpu() before freeing it. 197 */ 198 struct ftrace_ops { 199 ftrace_func_t func; 200 struct ftrace_ops __rcu *next; 201 unsigned long flags; 202 void *private; 203 ftrace_func_t saved_func; 204 #ifdef CONFIG_DYNAMIC_FTRACE 205 struct ftrace_ops_hash local_hash; 206 struct ftrace_ops_hash *func_hash; 207 struct ftrace_ops_hash old_hash; 208 unsigned long trampoline; 209 unsigned long trampoline_size; 210 #endif 211 }; 212 213 /* 214 * Type of the current tracing. 215 */ 216 enum ftrace_tracing_type_t { 217 FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */ 218 FTRACE_TYPE_RETURN, /* Hook the return of the function */ 219 }; 220 221 /* Current tracing type, default is FTRACE_TYPE_ENTER */ 222 extern enum ftrace_tracing_type_t ftrace_tracing_type; 223 224 /* 225 * The ftrace_ops must be a static and should also 226 * be read_mostly. These functions do modify read_mostly variables 227 * so use them sparely. Never free an ftrace_op or modify the 228 * next pointer after it has been registered. Even after unregistering 229 * it, the next pointer may still be used internally. 230 */ 231 int register_ftrace_function(struct ftrace_ops *ops); 232 int unregister_ftrace_function(struct ftrace_ops *ops); 233 234 extern void ftrace_stub(unsigned long a0, unsigned long a1, 235 struct ftrace_ops *op, struct pt_regs *regs); 236 237 #else /* !CONFIG_FUNCTION_TRACER */ 238 /* 239 * (un)register_ftrace_function must be a macro since the ops parameter 240 * must not be evaluated. 241 */ 242 #define register_ftrace_function(ops) ({ 0; }) 243 #define unregister_ftrace_function(ops) ({ 0; }) 244 static inline void ftrace_kill(void) { } 245 static inline void ftrace_free_init_mem(void) { } 246 static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { } 247 #endif /* CONFIG_FUNCTION_TRACER */ 248 249 struct ftrace_func_entry { 250 struct hlist_node hlist; 251 unsigned long ip; 252 unsigned long direct; /* for direct lookup only */ 253 }; 254 255 struct dyn_ftrace; 256 257 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 258 extern int ftrace_direct_func_count; 259 int register_ftrace_direct(unsigned long ip, unsigned long addr); 260 int unregister_ftrace_direct(unsigned long ip, unsigned long addr); 261 int modify_ftrace_direct(unsigned long ip, unsigned long old_addr, unsigned long new_addr); 262 struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr); 263 int ftrace_modify_direct_caller(struct ftrace_func_entry *entry, 264 struct dyn_ftrace *rec, 265 unsigned long old_addr, 266 unsigned long new_addr); 267 #else 268 # define ftrace_direct_func_count 0 269 static inline int register_ftrace_direct(unsigned long ip, unsigned long addr) 270 { 271 return -ENOTSUPP; 272 } 273 static inline int unregister_ftrace_direct(unsigned long ip, unsigned long addr) 274 { 275 return -ENOTSUPP; 276 } 277 static inline int modify_ftrace_direct(unsigned long ip, 278 unsigned long old_addr, unsigned long new_addr) 279 { 280 return -ENOTSUPP; 281 } 282 static inline struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr) 283 { 284 return NULL; 285 } 286 static inline int ftrace_modify_direct_caller(struct ftrace_func_entry *entry, 287 struct dyn_ftrace *rec, 288 unsigned long old_addr, 289 unsigned long new_addr) 290 { 291 return -ENODEV; 292 } 293 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ 294 295 #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 296 /* 297 * This must be implemented by the architecture. 298 * It is the way the ftrace direct_ops helper, when called 299 * via ftrace (because there's other callbacks besides the 300 * direct call), can inform the architecture's trampoline that this 301 * routine has a direct caller, and what the caller is. 302 * 303 * For example, in x86, it returns the direct caller 304 * callback function via the regs->orig_ax parameter. 305 * Then in the ftrace trampoline, if this is set, it makes 306 * the return from the trampoline jump to the direct caller 307 * instead of going back to the function it just traced. 308 */ 309 static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs, 310 unsigned long addr) { } 311 #endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ 312 313 #ifdef CONFIG_STACK_TRACER 314 315 extern int stack_tracer_enabled; 316 317 int stack_trace_sysctl(struct ctl_table *table, int write, 318 void __user *buffer, size_t *lenp, 319 loff_t *ppos); 320 321 /* DO NOT MODIFY THIS VARIABLE DIRECTLY! */ 322 DECLARE_PER_CPU(int, disable_stack_tracer); 323 324 /** 325 * stack_tracer_disable - temporarily disable the stack tracer 326 * 327 * There's a few locations (namely in RCU) where stack tracing 328 * cannot be executed. This function is used to disable stack 329 * tracing during those critical sections. 330 * 331 * This function must be called with preemption or interrupts 332 * disabled and stack_tracer_enable() must be called shortly after 333 * while preemption or interrupts are still disabled. 334 */ 335 static inline void stack_tracer_disable(void) 336 { 337 /* Preemption or interupts must be disabled */ 338 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) 339 WARN_ON_ONCE(!preempt_count() || !irqs_disabled()); 340 this_cpu_inc(disable_stack_tracer); 341 } 342 343 /** 344 * stack_tracer_enable - re-enable the stack tracer 345 * 346 * After stack_tracer_disable() is called, stack_tracer_enable() 347 * must be called shortly afterward. 348 */ 349 static inline void stack_tracer_enable(void) 350 { 351 if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) 352 WARN_ON_ONCE(!preempt_count() || !irqs_disabled()); 353 this_cpu_dec(disable_stack_tracer); 354 } 355 #else 356 static inline void stack_tracer_disable(void) { } 357 static inline void stack_tracer_enable(void) { } 358 #endif 359 360 #ifdef CONFIG_DYNAMIC_FTRACE 361 362 int ftrace_arch_code_modify_prepare(void); 363 int ftrace_arch_code_modify_post_process(void); 364 365 enum ftrace_bug_type { 366 FTRACE_BUG_UNKNOWN, 367 FTRACE_BUG_INIT, 368 FTRACE_BUG_NOP, 369 FTRACE_BUG_CALL, 370 FTRACE_BUG_UPDATE, 371 }; 372 extern enum ftrace_bug_type ftrace_bug_type; 373 374 /* 375 * Archs can set this to point to a variable that holds the value that was 376 * expected at the call site before calling ftrace_bug(). 377 */ 378 extern const void *ftrace_expected; 379 380 void ftrace_bug(int err, struct dyn_ftrace *rec); 381 382 struct seq_file; 383 384 extern int ftrace_text_reserved(const void *start, const void *end); 385 386 struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr); 387 388 bool is_ftrace_trampoline(unsigned long addr); 389 390 /* 391 * The dyn_ftrace record's flags field is split into two parts. 392 * the first part which is '0-FTRACE_REF_MAX' is a counter of 393 * the number of callbacks that have registered the function that 394 * the dyn_ftrace descriptor represents. 395 * 396 * The second part is a mask: 397 * ENABLED - the function is being traced 398 * REGS - the record wants the function to save regs 399 * REGS_EN - the function is set up to save regs. 400 * IPMODIFY - the record allows for the IP address to be changed. 401 * DISABLED - the record is not ready to be touched yet 402 * DIRECT - there is a direct function to call 403 * 404 * When a new ftrace_ops is registered and wants a function to save 405 * pt_regs, the rec->flag REGS is set. When the function has been 406 * set up to save regs, the REG_EN flag is set. Once a function 407 * starts saving regs it will do so until all ftrace_ops are removed 408 * from tracing that function. 409 */ 410 enum { 411 FTRACE_FL_ENABLED = (1UL << 31), 412 FTRACE_FL_REGS = (1UL << 30), 413 FTRACE_FL_REGS_EN = (1UL << 29), 414 FTRACE_FL_TRAMP = (1UL << 28), 415 FTRACE_FL_TRAMP_EN = (1UL << 27), 416 FTRACE_FL_IPMODIFY = (1UL << 26), 417 FTRACE_FL_DISABLED = (1UL << 25), 418 FTRACE_FL_DIRECT = (1UL << 24), 419 FTRACE_FL_DIRECT_EN = (1UL << 23), 420 }; 421 422 #define FTRACE_REF_MAX_SHIFT 23 423 #define FTRACE_FL_BITS 9 424 #define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1) 425 #define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT) 426 #define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1) 427 428 #define ftrace_rec_count(rec) ((rec)->flags & ~FTRACE_FL_MASK) 429 430 struct dyn_ftrace { 431 unsigned long ip; /* address of mcount call-site */ 432 unsigned long flags; 433 struct dyn_arch_ftrace arch; 434 }; 435 436 int ftrace_force_update(void); 437 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, 438 int remove, int reset); 439 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, 440 int len, int reset); 441 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, 442 int len, int reset); 443 void ftrace_set_global_filter(unsigned char *buf, int len, int reset); 444 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset); 445 void ftrace_free_filter(struct ftrace_ops *ops); 446 void ftrace_ops_set_global_filter(struct ftrace_ops *ops); 447 448 enum { 449 FTRACE_UPDATE_CALLS = (1 << 0), 450 FTRACE_DISABLE_CALLS = (1 << 1), 451 FTRACE_UPDATE_TRACE_FUNC = (1 << 2), 452 FTRACE_START_FUNC_RET = (1 << 3), 453 FTRACE_STOP_FUNC_RET = (1 << 4), 454 FTRACE_MAY_SLEEP = (1 << 5), 455 }; 456 457 /* 458 * The FTRACE_UPDATE_* enum is used to pass information back 459 * from the ftrace_update_record() and ftrace_test_record() 460 * functions. These are called by the code update routines 461 * to find out what is to be done for a given function. 462 * 463 * IGNORE - The function is already what we want it to be 464 * MAKE_CALL - Start tracing the function 465 * MODIFY_CALL - Stop saving regs for the function 466 * MAKE_NOP - Stop tracing the function 467 */ 468 enum { 469 FTRACE_UPDATE_IGNORE, 470 FTRACE_UPDATE_MAKE_CALL, 471 FTRACE_UPDATE_MODIFY_CALL, 472 FTRACE_UPDATE_MAKE_NOP, 473 }; 474 475 enum { 476 FTRACE_ITER_FILTER = (1 << 0), 477 FTRACE_ITER_NOTRACE = (1 << 1), 478 FTRACE_ITER_PRINTALL = (1 << 2), 479 FTRACE_ITER_DO_PROBES = (1 << 3), 480 FTRACE_ITER_PROBE = (1 << 4), 481 FTRACE_ITER_MOD = (1 << 5), 482 FTRACE_ITER_ENABLED = (1 << 6), 483 }; 484 485 void arch_ftrace_update_code(int command); 486 void arch_ftrace_update_trampoline(struct ftrace_ops *ops); 487 void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec); 488 void arch_ftrace_trampoline_free(struct ftrace_ops *ops); 489 490 struct ftrace_rec_iter; 491 492 struct ftrace_rec_iter *ftrace_rec_iter_start(void); 493 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter); 494 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter); 495 496 #define for_ftrace_rec_iter(iter) \ 497 for (iter = ftrace_rec_iter_start(); \ 498 iter; \ 499 iter = ftrace_rec_iter_next(iter)) 500 501 502 int ftrace_update_record(struct dyn_ftrace *rec, bool enable); 503 int ftrace_test_record(struct dyn_ftrace *rec, bool enable); 504 void ftrace_run_stop_machine(int command); 505 unsigned long ftrace_location(unsigned long ip); 506 unsigned long ftrace_location_range(unsigned long start, unsigned long end); 507 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec); 508 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec); 509 510 extern ftrace_func_t ftrace_trace_function; 511 512 int ftrace_regex_open(struct ftrace_ops *ops, int flag, 513 struct inode *inode, struct file *file); 514 ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf, 515 size_t cnt, loff_t *ppos); 516 ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf, 517 size_t cnt, loff_t *ppos); 518 int ftrace_regex_release(struct inode *inode, struct file *file); 519 520 void __init 521 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable); 522 523 /* defined in arch */ 524 extern int ftrace_ip_converted(unsigned long ip); 525 extern int ftrace_dyn_arch_init(void); 526 extern void ftrace_replace_code(int enable); 527 extern int ftrace_update_ftrace_func(ftrace_func_t func); 528 extern void ftrace_caller(void); 529 extern void ftrace_regs_caller(void); 530 extern void ftrace_call(void); 531 extern void ftrace_regs_call(void); 532 extern void mcount_call(void); 533 534 void ftrace_modify_all_code(int command); 535 536 #ifndef FTRACE_ADDR 537 #define FTRACE_ADDR ((unsigned long)ftrace_caller) 538 #endif 539 540 #ifndef FTRACE_GRAPH_ADDR 541 #define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller) 542 #endif 543 544 #ifndef FTRACE_REGS_ADDR 545 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 546 # define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller) 547 #else 548 # define FTRACE_REGS_ADDR FTRACE_ADDR 549 #endif 550 #endif 551 552 /* 553 * If an arch would like functions that are only traced 554 * by the function graph tracer to jump directly to its own 555 * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR 556 * to be that address to jump to. 557 */ 558 #ifndef FTRACE_GRAPH_TRAMP_ADDR 559 #define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0) 560 #endif 561 562 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 563 extern void ftrace_graph_caller(void); 564 extern int ftrace_enable_ftrace_graph_caller(void); 565 extern int ftrace_disable_ftrace_graph_caller(void); 566 #else 567 static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; } 568 static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; } 569 #endif 570 571 /** 572 * ftrace_make_nop - convert code into nop 573 * @mod: module structure if called by module load initialization 574 * @rec: the call site record (e.g. mcount/fentry) 575 * @addr: the address that the call site should be calling 576 * 577 * This is a very sensitive operation and great care needs 578 * to be taken by the arch. The operation should carefully 579 * read the location, check to see if what is read is indeed 580 * what we expect it to be, and then on success of the compare, 581 * it should write to the location. 582 * 583 * The code segment at @rec->ip should be a caller to @addr 584 * 585 * Return must be: 586 * 0 on success 587 * -EFAULT on error reading the location 588 * -EINVAL on a failed compare of the contents 589 * -EPERM on error writing to the location 590 * Any other value will be considered a failure. 591 */ 592 extern int ftrace_make_nop(struct module *mod, 593 struct dyn_ftrace *rec, unsigned long addr); 594 595 596 /** 597 * ftrace_init_nop - initialize a nop call site 598 * @mod: module structure if called by module load initialization 599 * @rec: the call site record (e.g. mcount/fentry) 600 * 601 * This is a very sensitive operation and great care needs 602 * to be taken by the arch. The operation should carefully 603 * read the location, check to see if what is read is indeed 604 * what we expect it to be, and then on success of the compare, 605 * it should write to the location. 606 * 607 * The code segment at @rec->ip should contain the contents created by 608 * the compiler 609 * 610 * Return must be: 611 * 0 on success 612 * -EFAULT on error reading the location 613 * -EINVAL on a failed compare of the contents 614 * -EPERM on error writing to the location 615 * Any other value will be considered a failure. 616 */ 617 #ifndef ftrace_init_nop 618 static inline int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) 619 { 620 return ftrace_make_nop(mod, rec, MCOUNT_ADDR); 621 } 622 #endif 623 624 /** 625 * ftrace_make_call - convert a nop call site into a call to addr 626 * @rec: the call site record (e.g. mcount/fentry) 627 * @addr: the address that the call site should call 628 * 629 * This is a very sensitive operation and great care needs 630 * to be taken by the arch. The operation should carefully 631 * read the location, check to see if what is read is indeed 632 * what we expect it to be, and then on success of the compare, 633 * it should write to the location. 634 * 635 * The code segment at @rec->ip should be a nop 636 * 637 * Return must be: 638 * 0 on success 639 * -EFAULT on error reading the location 640 * -EINVAL on a failed compare of the contents 641 * -EPERM on error writing to the location 642 * Any other value will be considered a failure. 643 */ 644 extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); 645 646 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 647 /** 648 * ftrace_modify_call - convert from one addr to another (no nop) 649 * @rec: the call site record (e.g. mcount/fentry) 650 * @old_addr: the address expected to be currently called to 651 * @addr: the address to change to 652 * 653 * This is a very sensitive operation and great care needs 654 * to be taken by the arch. The operation should carefully 655 * read the location, check to see if what is read is indeed 656 * what we expect it to be, and then on success of the compare, 657 * it should write to the location. 658 * 659 * The code segment at @rec->ip should be a caller to @old_addr 660 * 661 * Return must be: 662 * 0 on success 663 * -EFAULT on error reading the location 664 * -EINVAL on a failed compare of the contents 665 * -EPERM on error writing to the location 666 * Any other value will be considered a failure. 667 */ 668 extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 669 unsigned long addr); 670 #else 671 /* Should never be called */ 672 static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 673 unsigned long addr) 674 { 675 return -EINVAL; 676 } 677 #endif 678 679 /* May be defined in arch */ 680 extern int ftrace_arch_read_dyn_info(char *buf, int size); 681 682 extern int skip_trace(unsigned long ip); 683 extern void ftrace_module_init(struct module *mod); 684 extern void ftrace_module_enable(struct module *mod); 685 extern void ftrace_release_mod(struct module *mod); 686 687 extern void ftrace_disable_daemon(void); 688 extern void ftrace_enable_daemon(void); 689 #else /* CONFIG_DYNAMIC_FTRACE */ 690 static inline int skip_trace(unsigned long ip) { return 0; } 691 static inline int ftrace_force_update(void) { return 0; } 692 static inline void ftrace_disable_daemon(void) { } 693 static inline void ftrace_enable_daemon(void) { } 694 static inline void ftrace_module_init(struct module *mod) { } 695 static inline void ftrace_module_enable(struct module *mod) { } 696 static inline void ftrace_release_mod(struct module *mod) { } 697 static inline int ftrace_text_reserved(const void *start, const void *end) 698 { 699 return 0; 700 } 701 static inline unsigned long ftrace_location(unsigned long ip) 702 { 703 return 0; 704 } 705 706 /* 707 * Again users of functions that have ftrace_ops may not 708 * have them defined when ftrace is not enabled, but these 709 * functions may still be called. Use a macro instead of inline. 710 */ 711 #define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; }) 712 #define ftrace_set_early_filter(ops, buf, enable) do { } while (0) 713 #define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; }) 714 #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; }) 715 #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; }) 716 #define ftrace_free_filter(ops) do { } while (0) 717 #define ftrace_ops_set_global_filter(ops) do { } while (0) 718 719 static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf, 720 size_t cnt, loff_t *ppos) { return -ENODEV; } 721 static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf, 722 size_t cnt, loff_t *ppos) { return -ENODEV; } 723 static inline int 724 ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; } 725 726 static inline bool is_ftrace_trampoline(unsigned long addr) 727 { 728 return false; 729 } 730 #endif /* CONFIG_DYNAMIC_FTRACE */ 731 732 /* totally disable ftrace - can not re-enable after this */ 733 void ftrace_kill(void); 734 735 static inline void tracer_disable(void) 736 { 737 #ifdef CONFIG_FUNCTION_TRACER 738 ftrace_enabled = 0; 739 #endif 740 } 741 742 /* 743 * Ftrace disable/restore without lock. Some synchronization mechanism 744 * must be used to prevent ftrace_enabled to be changed between 745 * disable/restore. 746 */ 747 static inline int __ftrace_enabled_save(void) 748 { 749 #ifdef CONFIG_FUNCTION_TRACER 750 int saved_ftrace_enabled = ftrace_enabled; 751 ftrace_enabled = 0; 752 return saved_ftrace_enabled; 753 #else 754 return 0; 755 #endif 756 } 757 758 static inline void __ftrace_enabled_restore(int enabled) 759 { 760 #ifdef CONFIG_FUNCTION_TRACER 761 ftrace_enabled = enabled; 762 #endif 763 } 764 765 /* All archs should have this, but we define it for consistency */ 766 #ifndef ftrace_return_address0 767 # define ftrace_return_address0 __builtin_return_address(0) 768 #endif 769 770 /* Archs may use other ways for ADDR1 and beyond */ 771 #ifndef ftrace_return_address 772 # ifdef CONFIG_FRAME_POINTER 773 # define ftrace_return_address(n) __builtin_return_address(n) 774 # else 775 # define ftrace_return_address(n) 0UL 776 # endif 777 #endif 778 779 #define CALLER_ADDR0 ((unsigned long)ftrace_return_address0) 780 #define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1)) 781 #define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2)) 782 #define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3)) 783 #define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4)) 784 #define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5)) 785 #define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6)) 786 787 static inline unsigned long get_lock_parent_ip(void) 788 { 789 unsigned long addr = CALLER_ADDR0; 790 791 if (!in_lock_functions(addr)) 792 return addr; 793 addr = CALLER_ADDR1; 794 if (!in_lock_functions(addr)) 795 return addr; 796 return CALLER_ADDR2; 797 } 798 799 #ifdef CONFIG_TRACE_PREEMPT_TOGGLE 800 extern void trace_preempt_on(unsigned long a0, unsigned long a1); 801 extern void trace_preempt_off(unsigned long a0, unsigned long a1); 802 #else 803 /* 804 * Use defines instead of static inlines because some arches will make code out 805 * of the CALLER_ADDR, when we really want these to be a real nop. 806 */ 807 # define trace_preempt_on(a0, a1) do { } while (0) 808 # define trace_preempt_off(a0, a1) do { } while (0) 809 #endif 810 811 #ifdef CONFIG_FTRACE_MCOUNT_RECORD 812 extern void ftrace_init(void); 813 #ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY 814 #define FTRACE_CALLSITE_SECTION "__patchable_function_entries" 815 #else 816 #define FTRACE_CALLSITE_SECTION "__mcount_loc" 817 #endif 818 #else 819 static inline void ftrace_init(void) { } 820 #endif 821 822 /* 823 * Structure that defines an entry function trace. 824 * It's already packed but the attribute "packed" is needed 825 * to remove extra padding at the end. 826 */ 827 struct ftrace_graph_ent { 828 unsigned long func; /* Current function */ 829 int depth; 830 } __packed; 831 832 /* 833 * Structure that defines a return function trace. 834 * It's already packed but the attribute "packed" is needed 835 * to remove extra padding at the end. 836 */ 837 struct ftrace_graph_ret { 838 unsigned long func; /* Current function */ 839 /* Number of functions that overran the depth limit for current task */ 840 unsigned long overrun; 841 unsigned long long calltime; 842 unsigned long long rettime; 843 int depth; 844 } __packed; 845 846 /* Type of the callback handlers for tracing function graph*/ 847 typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */ 848 typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */ 849 850 extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace); 851 852 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 853 854 struct fgraph_ops { 855 trace_func_graph_ent_t entryfunc; 856 trace_func_graph_ret_t retfunc; 857 }; 858 859 /* 860 * Stack of return addresses for functions 861 * of a thread. 862 * Used in struct thread_info 863 */ 864 struct ftrace_ret_stack { 865 unsigned long ret; 866 unsigned long func; 867 unsigned long long calltime; 868 #ifdef CONFIG_FUNCTION_PROFILER 869 unsigned long long subtime; 870 #endif 871 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST 872 unsigned long fp; 873 #endif 874 #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR 875 unsigned long *retp; 876 #endif 877 }; 878 879 /* 880 * Primary handler of a function return. 881 * It relays on ftrace_return_to_handler. 882 * Defined in entry_32/64.S 883 */ 884 extern void return_to_handler(void); 885 886 extern int 887 function_graph_enter(unsigned long ret, unsigned long func, 888 unsigned long frame_pointer, unsigned long *retp); 889 890 struct ftrace_ret_stack * 891 ftrace_graph_get_ret_stack(struct task_struct *task, int idx); 892 893 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, 894 unsigned long ret, unsigned long *retp); 895 896 /* 897 * Sometimes we don't want to trace a function with the function 898 * graph tracer but we want them to keep traced by the usual function 899 * tracer if the function graph tracer is not configured. 900 */ 901 #define __notrace_funcgraph notrace 902 903 #define FTRACE_RETFUNC_DEPTH 50 904 #define FTRACE_RETSTACK_ALLOC_SIZE 32 905 906 extern int register_ftrace_graph(struct fgraph_ops *ops); 907 extern void unregister_ftrace_graph(struct fgraph_ops *ops); 908 909 extern bool ftrace_graph_is_dead(void); 910 extern void ftrace_graph_stop(void); 911 912 /* The current handlers in use */ 913 extern trace_func_graph_ret_t ftrace_graph_return; 914 extern trace_func_graph_ent_t ftrace_graph_entry; 915 916 extern void ftrace_graph_init_task(struct task_struct *t); 917 extern void ftrace_graph_exit_task(struct task_struct *t); 918 extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu); 919 920 static inline void pause_graph_tracing(void) 921 { 922 atomic_inc(¤t->tracing_graph_pause); 923 } 924 925 static inline void unpause_graph_tracing(void) 926 { 927 atomic_dec(¤t->tracing_graph_pause); 928 } 929 #else /* !CONFIG_FUNCTION_GRAPH_TRACER */ 930 931 #define __notrace_funcgraph 932 933 static inline void ftrace_graph_init_task(struct task_struct *t) { } 934 static inline void ftrace_graph_exit_task(struct task_struct *t) { } 935 static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { } 936 937 /* Define as macros as fgraph_ops may not be defined */ 938 #define register_ftrace_graph(ops) ({ -1; }) 939 #define unregister_ftrace_graph(ops) do { } while (0) 940 941 static inline unsigned long 942 ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret, 943 unsigned long *retp) 944 { 945 return ret; 946 } 947 948 static inline void pause_graph_tracing(void) { } 949 static inline void unpause_graph_tracing(void) { } 950 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 951 952 #ifdef CONFIG_TRACING 953 954 /* flags for current->trace */ 955 enum { 956 TSK_TRACE_FL_TRACE_BIT = 0, 957 TSK_TRACE_FL_GRAPH_BIT = 1, 958 }; 959 enum { 960 TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT, 961 TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT, 962 }; 963 964 static inline void set_tsk_trace_trace(struct task_struct *tsk) 965 { 966 set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace); 967 } 968 969 static inline void clear_tsk_trace_trace(struct task_struct *tsk) 970 { 971 clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace); 972 } 973 974 static inline int test_tsk_trace_trace(struct task_struct *tsk) 975 { 976 return tsk->trace & TSK_TRACE_FL_TRACE; 977 } 978 979 static inline void set_tsk_trace_graph(struct task_struct *tsk) 980 { 981 set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace); 982 } 983 984 static inline void clear_tsk_trace_graph(struct task_struct *tsk) 985 { 986 clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace); 987 } 988 989 static inline int test_tsk_trace_graph(struct task_struct *tsk) 990 { 991 return tsk->trace & TSK_TRACE_FL_GRAPH; 992 } 993 994 enum ftrace_dump_mode; 995 996 extern enum ftrace_dump_mode ftrace_dump_on_oops; 997 extern int tracepoint_printk; 998 999 extern void disable_trace_on_warning(void); 1000 extern int __disable_trace_on_warning; 1001 1002 int tracepoint_printk_sysctl(struct ctl_table *table, int write, 1003 void __user *buffer, size_t *lenp, 1004 loff_t *ppos); 1005 1006 #else /* CONFIG_TRACING */ 1007 static inline void disable_trace_on_warning(void) { } 1008 #endif /* CONFIG_TRACING */ 1009 1010 #ifdef CONFIG_FTRACE_SYSCALLS 1011 1012 unsigned long arch_syscall_addr(int nr); 1013 1014 #endif /* CONFIG_FTRACE_SYSCALLS */ 1015 1016 #endif /* _LINUX_FTRACE_H */ 1017