1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Infrastructure to took into function calls and returns. 4 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com> 5 * Mostly borrowed from function tracer which 6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com> 7 * 8 * Highly modified by Steven Rostedt (VMware). 9 */ 10 #include <linux/jump_label.h> 11 #include <linux/suspend.h> 12 #include <linux/ftrace.h> 13 #include <linux/slab.h> 14 15 #include <trace/events/sched.h> 16 17 #include "ftrace_internal.h" 18 19 #ifdef CONFIG_DYNAMIC_FTRACE 20 #define ASSIGN_OPS_HASH(opsname, val) \ 21 .func_hash = val, \ 22 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock), 23 #else 24 #define ASSIGN_OPS_HASH(opsname, val) 25 #endif 26 27 DEFINE_STATIC_KEY_FALSE(kill_ftrace_graph); 28 int ftrace_graph_active; 29 30 /* Both enabled by default (can be cleared by function_graph tracer flags */ 31 static bool fgraph_sleep_time = true; 32 33 /** 34 * ftrace_graph_stop - set to permanently disable function graph tracing 35 * 36 * In case of an error int function graph tracing, this is called 37 * to try to keep function graph tracing from causing any more harm. 38 * Usually this is pretty severe and this is called to try to at least 39 * get a warning out to the user. 40 */ 41 void ftrace_graph_stop(void) 42 { 43 static_branch_enable(&kill_ftrace_graph); 44 } 45 46 /* Add a function return address to the trace stack on thread info.*/ 47 static int 48 ftrace_push_return_trace(unsigned long ret, unsigned long func, 49 unsigned long frame_pointer, unsigned long *retp) 50 { 51 unsigned long long calltime; 52 int index; 53 54 if (unlikely(ftrace_graph_is_dead())) 55 return -EBUSY; 56 57 if (!current->ret_stack) 58 return -EBUSY; 59 60 /* 61 * We must make sure the ret_stack is tested before we read 62 * anything else. 63 */ 64 smp_rmb(); 65 66 /* The return trace stack is full */ 67 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { 68 atomic_inc(¤t->trace_overrun); 69 return -EBUSY; 70 } 71 72 calltime = trace_clock_local(); 73 74 index = ++current->curr_ret_stack; 75 barrier(); 76 current->ret_stack[index].ret = ret; 77 current->ret_stack[index].func = func; 78 current->ret_stack[index].calltime = calltime; 79 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST 80 current->ret_stack[index].fp = frame_pointer; 81 #endif 82 #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR 83 current->ret_stack[index].retp = retp; 84 #endif 85 return 0; 86 } 87 88 /* 89 * Not all archs define MCOUNT_INSN_SIZE which is used to look for direct 90 * functions. But those archs currently don't support direct functions 91 * anyway, and ftrace_find_rec_direct() is just a stub for them. 92 * Define MCOUNT_INSN_SIZE to keep those archs compiling. 93 */ 94 #ifndef MCOUNT_INSN_SIZE 95 /* Make sure this only works without direct calls */ 96 # ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS 97 # error MCOUNT_INSN_SIZE not defined with direct calls enabled 98 # endif 99 # define MCOUNT_INSN_SIZE 0 100 #endif 101 102 int function_graph_enter(unsigned long ret, unsigned long func, 103 unsigned long frame_pointer, unsigned long *retp) 104 { 105 struct ftrace_graph_ent trace; 106 107 #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS 108 /* 109 * Skip graph tracing if the return location is served by direct trampoline, 110 * since call sequence and return addresses are unpredictable anyway. 111 * Ex: BPF trampoline may call original function and may skip frame 112 * depending on type of BPF programs attached. 113 */ 114 if (ftrace_direct_func_count && 115 ftrace_find_rec_direct(ret - MCOUNT_INSN_SIZE)) 116 return -EBUSY; 117 #endif 118 trace.func = func; 119 trace.depth = ++current->curr_ret_depth; 120 121 if (ftrace_push_return_trace(ret, func, frame_pointer, retp)) 122 goto out; 123 124 /* Only trace if the calling function expects to */ 125 if (!ftrace_graph_entry(&trace)) 126 goto out_ret; 127 128 return 0; 129 out_ret: 130 current->curr_ret_stack--; 131 out: 132 current->curr_ret_depth--; 133 return -EBUSY; 134 } 135 136 /* Retrieve a function return address to the trace stack on thread info.*/ 137 static void 138 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret, 139 unsigned long frame_pointer) 140 { 141 int index; 142 143 index = current->curr_ret_stack; 144 145 if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) { 146 ftrace_graph_stop(); 147 WARN_ON(1); 148 /* Might as well panic, otherwise we have no where to go */ 149 *ret = (unsigned long)panic; 150 return; 151 } 152 153 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST 154 /* 155 * The arch may choose to record the frame pointer used 156 * and check it here to make sure that it is what we expect it 157 * to be. If gcc does not set the place holder of the return 158 * address in the frame pointer, and does a copy instead, then 159 * the function graph trace will fail. This test detects this 160 * case. 161 * 162 * Currently, x86_32 with optimize for size (-Os) makes the latest 163 * gcc do the above. 164 * 165 * Note, -mfentry does not use frame pointers, and this test 166 * is not needed if CC_USING_FENTRY is set. 167 */ 168 if (unlikely(current->ret_stack[index].fp != frame_pointer)) { 169 ftrace_graph_stop(); 170 WARN(1, "Bad frame pointer: expected %lx, received %lx\n" 171 " from func %ps return to %lx\n", 172 current->ret_stack[index].fp, 173 frame_pointer, 174 (void *)current->ret_stack[index].func, 175 current->ret_stack[index].ret); 176 *ret = (unsigned long)panic; 177 return; 178 } 179 #endif 180 181 *ret = current->ret_stack[index].ret; 182 trace->func = current->ret_stack[index].func; 183 trace->calltime = current->ret_stack[index].calltime; 184 trace->overrun = atomic_read(¤t->trace_overrun); 185 trace->depth = current->curr_ret_depth--; 186 /* 187 * We still want to trace interrupts coming in if 188 * max_depth is set to 1. Make sure the decrement is 189 * seen before ftrace_graph_return. 190 */ 191 barrier(); 192 } 193 194 /* 195 * Hibernation protection. 196 * The state of the current task is too much unstable during 197 * suspend/restore to disk. We want to protect against that. 198 */ 199 static int 200 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state, 201 void *unused) 202 { 203 switch (state) { 204 case PM_HIBERNATION_PREPARE: 205 pause_graph_tracing(); 206 break; 207 208 case PM_POST_HIBERNATION: 209 unpause_graph_tracing(); 210 break; 211 } 212 return NOTIFY_DONE; 213 } 214 215 static struct notifier_block ftrace_suspend_notifier = { 216 .notifier_call = ftrace_suspend_notifier_call, 217 }; 218 219 /* 220 * Send the trace to the ring-buffer. 221 * @return the original return address. 222 */ 223 unsigned long ftrace_return_to_handler(unsigned long frame_pointer) 224 { 225 struct ftrace_graph_ret trace; 226 unsigned long ret; 227 228 ftrace_pop_return_trace(&trace, &ret, frame_pointer); 229 trace.rettime = trace_clock_local(); 230 ftrace_graph_return(&trace); 231 /* 232 * The ftrace_graph_return() may still access the current 233 * ret_stack structure, we need to make sure the update of 234 * curr_ret_stack is after that. 235 */ 236 barrier(); 237 current->curr_ret_stack--; 238 239 if (unlikely(!ret)) { 240 ftrace_graph_stop(); 241 WARN_ON(1); 242 /* Might as well panic. What else to do? */ 243 ret = (unsigned long)panic; 244 } 245 246 return ret; 247 } 248 249 /** 250 * ftrace_graph_get_ret_stack - return the entry of the shadow stack 251 * @task: The task to read the shadow stack from 252 * @idx: Index down the shadow stack 253 * 254 * Return the ret_struct on the shadow stack of the @task at the 255 * call graph at @idx starting with zero. If @idx is zero, it 256 * will return the last saved ret_stack entry. If it is greater than 257 * zero, it will return the corresponding ret_stack for the depth 258 * of saved return addresses. 259 */ 260 struct ftrace_ret_stack * 261 ftrace_graph_get_ret_stack(struct task_struct *task, int idx) 262 { 263 idx = task->curr_ret_stack - idx; 264 265 if (idx >= 0 && idx <= task->curr_ret_stack) 266 return &task->ret_stack[idx]; 267 268 return NULL; 269 } 270 271 /** 272 * ftrace_graph_ret_addr - convert a potentially modified stack return address 273 * to its original value 274 * 275 * This function can be called by stack unwinding code to convert a found stack 276 * return address ('ret') to its original value, in case the function graph 277 * tracer has modified it to be 'return_to_handler'. If the address hasn't 278 * been modified, the unchanged value of 'ret' is returned. 279 * 280 * 'idx' is a state variable which should be initialized by the caller to zero 281 * before the first call. 282 * 283 * 'retp' is a pointer to the return address on the stack. It's ignored if 284 * the arch doesn't have HAVE_FUNCTION_GRAPH_RET_ADDR_PTR defined. 285 */ 286 #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR 287 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, 288 unsigned long ret, unsigned long *retp) 289 { 290 int index = task->curr_ret_stack; 291 int i; 292 293 if (ret != (unsigned long)dereference_kernel_function_descriptor(return_to_handler)) 294 return ret; 295 296 if (index < 0) 297 return ret; 298 299 for (i = 0; i <= index; i++) 300 if (task->ret_stack[i].retp == retp) 301 return task->ret_stack[i].ret; 302 303 return ret; 304 } 305 #else /* !HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */ 306 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, 307 unsigned long ret, unsigned long *retp) 308 { 309 int task_idx; 310 311 if (ret != (unsigned long)dereference_kernel_function_descriptor(return_to_handler)) 312 return ret; 313 314 task_idx = task->curr_ret_stack; 315 316 if (!task->ret_stack || task_idx < *idx) 317 return ret; 318 319 task_idx -= *idx; 320 (*idx)++; 321 322 return task->ret_stack[task_idx].ret; 323 } 324 #endif /* HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */ 325 326 static struct ftrace_ops graph_ops = { 327 .func = ftrace_graph_func, 328 .flags = FTRACE_OPS_FL_INITIALIZED | 329 FTRACE_OPS_FL_PID | 330 FTRACE_OPS_GRAPH_STUB, 331 #ifdef FTRACE_GRAPH_TRAMP_ADDR 332 .trampoline = FTRACE_GRAPH_TRAMP_ADDR, 333 /* trampoline_size is only needed for dynamically allocated tramps */ 334 #endif 335 ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash) 336 }; 337 338 void ftrace_graph_sleep_time_control(bool enable) 339 { 340 fgraph_sleep_time = enable; 341 } 342 343 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) 344 { 345 return 0; 346 } 347 348 /* 349 * Simply points to ftrace_stub, but with the proper protocol. 350 * Defined by the linker script in linux/vmlinux.lds.h 351 */ 352 extern void ftrace_stub_graph(struct ftrace_graph_ret *); 353 354 /* The callbacks that hook a function */ 355 trace_func_graph_ret_t ftrace_graph_return = ftrace_stub_graph; 356 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub; 357 static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub; 358 359 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */ 360 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) 361 { 362 int i; 363 int ret = 0; 364 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE; 365 struct task_struct *g, *t; 366 367 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) { 368 ret_stack_list[i] = 369 kmalloc_array(FTRACE_RETFUNC_DEPTH, 370 sizeof(struct ftrace_ret_stack), 371 GFP_KERNEL); 372 if (!ret_stack_list[i]) { 373 start = 0; 374 end = i; 375 ret = -ENOMEM; 376 goto free; 377 } 378 } 379 380 rcu_read_lock(); 381 for_each_process_thread(g, t) { 382 if (start == end) { 383 ret = -EAGAIN; 384 goto unlock; 385 } 386 387 if (t->ret_stack == NULL) { 388 atomic_set(&t->trace_overrun, 0); 389 t->curr_ret_stack = -1; 390 t->curr_ret_depth = -1; 391 /* Make sure the tasks see the -1 first: */ 392 smp_wmb(); 393 t->ret_stack = ret_stack_list[start++]; 394 } 395 } 396 397 unlock: 398 rcu_read_unlock(); 399 free: 400 for (i = start; i < end; i++) 401 kfree(ret_stack_list[i]); 402 return ret; 403 } 404 405 static void 406 ftrace_graph_probe_sched_switch(void *ignore, bool preempt, 407 unsigned int prev_state, 408 struct task_struct *prev, 409 struct task_struct *next) 410 { 411 unsigned long long timestamp; 412 int index; 413 414 /* 415 * Does the user want to count the time a function was asleep. 416 * If so, do not update the time stamps. 417 */ 418 if (fgraph_sleep_time) 419 return; 420 421 timestamp = trace_clock_local(); 422 423 prev->ftrace_timestamp = timestamp; 424 425 /* only process tasks that we timestamped */ 426 if (!next->ftrace_timestamp) 427 return; 428 429 /* 430 * Update all the counters in next to make up for the 431 * time next was sleeping. 432 */ 433 timestamp -= next->ftrace_timestamp; 434 435 for (index = next->curr_ret_stack; index >= 0; index--) 436 next->ret_stack[index].calltime += timestamp; 437 } 438 439 static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace) 440 { 441 if (!ftrace_ops_test(&global_ops, trace->func, NULL)) 442 return 0; 443 return __ftrace_graph_entry(trace); 444 } 445 446 /* 447 * The function graph tracer should only trace the functions defined 448 * by set_ftrace_filter and set_ftrace_notrace. If another function 449 * tracer ops is registered, the graph tracer requires testing the 450 * function against the global ops, and not just trace any function 451 * that any ftrace_ops registered. 452 */ 453 void update_function_graph_func(void) 454 { 455 struct ftrace_ops *op; 456 bool do_test = false; 457 458 /* 459 * The graph and global ops share the same set of functions 460 * to test. If any other ops is on the list, then 461 * the graph tracing needs to test if its the function 462 * it should call. 463 */ 464 do_for_each_ftrace_op(op, ftrace_ops_list) { 465 if (op != &global_ops && op != &graph_ops && 466 op != &ftrace_list_end) { 467 do_test = true; 468 /* in double loop, break out with goto */ 469 goto out; 470 } 471 } while_for_each_ftrace_op(op); 472 out: 473 if (do_test) 474 ftrace_graph_entry = ftrace_graph_entry_test; 475 else 476 ftrace_graph_entry = __ftrace_graph_entry; 477 } 478 479 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack); 480 481 static void 482 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack) 483 { 484 atomic_set(&t->trace_overrun, 0); 485 t->ftrace_timestamp = 0; 486 /* make curr_ret_stack visible before we add the ret_stack */ 487 smp_wmb(); 488 t->ret_stack = ret_stack; 489 } 490 491 /* 492 * Allocate a return stack for the idle task. May be the first 493 * time through, or it may be done by CPU hotplug online. 494 */ 495 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) 496 { 497 t->curr_ret_stack = -1; 498 t->curr_ret_depth = -1; 499 /* 500 * The idle task has no parent, it either has its own 501 * stack or no stack at all. 502 */ 503 if (t->ret_stack) 504 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu)); 505 506 if (ftrace_graph_active) { 507 struct ftrace_ret_stack *ret_stack; 508 509 ret_stack = per_cpu(idle_ret_stack, cpu); 510 if (!ret_stack) { 511 ret_stack = 512 kmalloc_array(FTRACE_RETFUNC_DEPTH, 513 sizeof(struct ftrace_ret_stack), 514 GFP_KERNEL); 515 if (!ret_stack) 516 return; 517 per_cpu(idle_ret_stack, cpu) = ret_stack; 518 } 519 graph_init_task(t, ret_stack); 520 } 521 } 522 523 /* Allocate a return stack for newly created task */ 524 void ftrace_graph_init_task(struct task_struct *t) 525 { 526 /* Make sure we do not use the parent ret_stack */ 527 t->ret_stack = NULL; 528 t->curr_ret_stack = -1; 529 t->curr_ret_depth = -1; 530 531 if (ftrace_graph_active) { 532 struct ftrace_ret_stack *ret_stack; 533 534 ret_stack = kmalloc_array(FTRACE_RETFUNC_DEPTH, 535 sizeof(struct ftrace_ret_stack), 536 GFP_KERNEL); 537 if (!ret_stack) 538 return; 539 graph_init_task(t, ret_stack); 540 } 541 } 542 543 void ftrace_graph_exit_task(struct task_struct *t) 544 { 545 struct ftrace_ret_stack *ret_stack = t->ret_stack; 546 547 t->ret_stack = NULL; 548 /* NULL must become visible to IRQs before we free it: */ 549 barrier(); 550 551 kfree(ret_stack); 552 } 553 554 /* Allocate a return stack for each task */ 555 static int start_graph_tracing(void) 556 { 557 struct ftrace_ret_stack **ret_stack_list; 558 int ret, cpu; 559 560 ret_stack_list = kmalloc_array(FTRACE_RETSTACK_ALLOC_SIZE, 561 sizeof(struct ftrace_ret_stack *), 562 GFP_KERNEL); 563 564 if (!ret_stack_list) 565 return -ENOMEM; 566 567 /* The cpu_boot init_task->ret_stack will never be freed */ 568 for_each_online_cpu(cpu) { 569 if (!idle_task(cpu)->ret_stack) 570 ftrace_graph_init_idle_task(idle_task(cpu), cpu); 571 } 572 573 do { 574 ret = alloc_retstack_tasklist(ret_stack_list); 575 } while (ret == -EAGAIN); 576 577 if (!ret) { 578 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); 579 if (ret) 580 pr_info("ftrace_graph: Couldn't activate tracepoint" 581 " probe to kernel_sched_switch\n"); 582 } 583 584 kfree(ret_stack_list); 585 return ret; 586 } 587 588 int register_ftrace_graph(struct fgraph_ops *gops) 589 { 590 int ret = 0; 591 592 mutex_lock(&ftrace_lock); 593 594 /* we currently allow only one tracer registered at a time */ 595 if (ftrace_graph_active) { 596 ret = -EBUSY; 597 goto out; 598 } 599 600 register_pm_notifier(&ftrace_suspend_notifier); 601 602 ftrace_graph_active++; 603 ret = start_graph_tracing(); 604 if (ret) { 605 ftrace_graph_active--; 606 goto out; 607 } 608 609 ftrace_graph_return = gops->retfunc; 610 611 /* 612 * Update the indirect function to the entryfunc, and the 613 * function that gets called to the entry_test first. Then 614 * call the update fgraph entry function to determine if 615 * the entryfunc should be called directly or not. 616 */ 617 __ftrace_graph_entry = gops->entryfunc; 618 ftrace_graph_entry = ftrace_graph_entry_test; 619 update_function_graph_func(); 620 621 ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET); 622 out: 623 mutex_unlock(&ftrace_lock); 624 return ret; 625 } 626 627 void unregister_ftrace_graph(struct fgraph_ops *gops) 628 { 629 mutex_lock(&ftrace_lock); 630 631 if (unlikely(!ftrace_graph_active)) 632 goto out; 633 634 ftrace_graph_active--; 635 ftrace_graph_return = ftrace_stub_graph; 636 ftrace_graph_entry = ftrace_graph_entry_stub; 637 __ftrace_graph_entry = ftrace_graph_entry_stub; 638 ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET); 639 unregister_pm_notifier(&ftrace_suspend_notifier); 640 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); 641 642 out: 643 mutex_unlock(&ftrace_lock); 644 } 645