1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Dynamic function tracing support. 4 * 5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> 6 * 7 * Thanks goes to Ingo Molnar, for suggesting the idea. 8 * Mathieu Desnoyers, for suggesting postponing the modifications. 9 * Arjan van de Ven, for keeping me straight, and explaining to me 10 * the dangers of modifying code on the run. 11 */ 12 13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 14 15 #include <linux/spinlock.h> 16 #include <linux/hardirq.h> 17 #include <linux/uaccess.h> 18 #include <linux/ftrace.h> 19 #include <linux/percpu.h> 20 #include <linux/sched.h> 21 #include <linux/slab.h> 22 #include <linux/init.h> 23 #include <linux/list.h> 24 #include <linux/module.h> 25 #include <linux/memory.h> 26 #include <linux/vmalloc.h> 27 28 #include <trace/syscall.h> 29 30 #include <asm/set_memory.h> 31 #include <asm/kprobes.h> 32 #include <asm/ftrace.h> 33 #include <asm/nops.h> 34 #include <asm/text-patching.h> 35 36 #ifdef CONFIG_DYNAMIC_FTRACE 37 38 static int ftrace_poke_late = 0; 39 40 void ftrace_arch_code_modify_prepare(void) 41 __acquires(&text_mutex) 42 { 43 /* 44 * Need to grab text_mutex to prevent a race from module loading 45 * and live kernel patching from changing the text permissions while 46 * ftrace has it set to "read/write". 47 */ 48 mutex_lock(&text_mutex); 49 ftrace_poke_late = 1; 50 } 51 52 void ftrace_arch_code_modify_post_process(void) 53 __releases(&text_mutex) 54 { 55 /* 56 * ftrace_make_{call,nop}() may be called during 57 * module load, and we need to finish the text_poke_queue() 58 * that they do, here. 59 */ 60 text_poke_finish(); 61 ftrace_poke_late = 0; 62 mutex_unlock(&text_mutex); 63 } 64 65 static const char *ftrace_nop_replace(void) 66 { 67 return x86_nops[5]; 68 } 69 70 static const char *ftrace_call_replace(unsigned long ip, unsigned long addr) 71 { 72 return text_gen_insn(CALL_INSN_OPCODE, (void *)ip, (void *)addr); 73 } 74 75 static int ftrace_verify_code(unsigned long ip, const char *old_code) 76 { 77 char cur_code[MCOUNT_INSN_SIZE]; 78 79 /* 80 * Note: 81 * We are paranoid about modifying text, as if a bug was to happen, it 82 * could cause us to read or write to someplace that could cause harm. 83 * Carefully read and modify the code with probe_kernel_*(), and make 84 * sure what we read is what we expected it to be before modifying it. 85 */ 86 /* read the text we want to modify */ 87 if (copy_from_kernel_nofault(cur_code, (void *)ip, MCOUNT_INSN_SIZE)) { 88 WARN_ON(1); 89 return -EFAULT; 90 } 91 92 /* Make sure it is what we expect it to be */ 93 if (memcmp(cur_code, old_code, MCOUNT_INSN_SIZE) != 0) { 94 WARN_ON(1); 95 return -EINVAL; 96 } 97 98 return 0; 99 } 100 101 /* 102 * Marked __ref because it calls text_poke_early() which is .init.text. That is 103 * ok because that call will happen early, during boot, when .init sections are 104 * still present. 105 */ 106 static int __ref 107 ftrace_modify_code_direct(unsigned long ip, const char *old_code, 108 const char *new_code) 109 { 110 int ret = ftrace_verify_code(ip, old_code); 111 if (ret) 112 return ret; 113 114 /* replace the text with the new text */ 115 if (ftrace_poke_late) 116 text_poke_queue((void *)ip, new_code, MCOUNT_INSN_SIZE, NULL); 117 else 118 text_poke_early((void *)ip, new_code, MCOUNT_INSN_SIZE); 119 return 0; 120 } 121 122 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) 123 { 124 unsigned long ip = rec->ip; 125 const char *new, *old; 126 127 old = ftrace_call_replace(ip, addr); 128 new = ftrace_nop_replace(); 129 130 /* 131 * On boot up, and when modules are loaded, the MCOUNT_ADDR 132 * is converted to a nop, and will never become MCOUNT_ADDR 133 * again. This code is either running before SMP (on boot up) 134 * or before the code will ever be executed (module load). 135 * We do not want to use the breakpoint version in this case, 136 * just modify the code directly. 137 */ 138 if (addr == MCOUNT_ADDR) 139 return ftrace_modify_code_direct(ip, old, new); 140 141 /* 142 * x86 overrides ftrace_replace_code -- this function will never be used 143 * in this case. 144 */ 145 WARN_ONCE(1, "invalid use of ftrace_make_nop"); 146 return -EINVAL; 147 } 148 149 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 150 { 151 unsigned long ip = rec->ip; 152 const char *new, *old; 153 154 old = ftrace_nop_replace(); 155 new = ftrace_call_replace(ip, addr); 156 157 /* Should only be called when module is loaded */ 158 return ftrace_modify_code_direct(rec->ip, old, new); 159 } 160 161 /* 162 * Should never be called: 163 * As it is only called by __ftrace_replace_code() which is called by 164 * ftrace_replace_code() that x86 overrides, and by ftrace_update_code() 165 * which is called to turn mcount into nops or nops into function calls 166 * but not to convert a function from not using regs to one that uses 167 * regs, which ftrace_modify_call() is for. 168 */ 169 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 170 unsigned long addr) 171 { 172 WARN_ON(1); 173 return -EINVAL; 174 } 175 176 int ftrace_update_ftrace_func(ftrace_func_t func) 177 { 178 unsigned long ip; 179 const char *new; 180 181 ip = (unsigned long)(&ftrace_call); 182 new = ftrace_call_replace(ip, (unsigned long)func); 183 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL); 184 185 ip = (unsigned long)(&ftrace_regs_call); 186 new = ftrace_call_replace(ip, (unsigned long)func); 187 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL); 188 189 return 0; 190 } 191 192 void ftrace_replace_code(int enable) 193 { 194 struct ftrace_rec_iter *iter; 195 struct dyn_ftrace *rec; 196 const char *new, *old; 197 int ret; 198 199 for_ftrace_rec_iter(iter) { 200 rec = ftrace_rec_iter_record(iter); 201 202 switch (ftrace_test_record(rec, enable)) { 203 case FTRACE_UPDATE_IGNORE: 204 default: 205 continue; 206 207 case FTRACE_UPDATE_MAKE_CALL: 208 old = ftrace_nop_replace(); 209 break; 210 211 case FTRACE_UPDATE_MODIFY_CALL: 212 case FTRACE_UPDATE_MAKE_NOP: 213 old = ftrace_call_replace(rec->ip, ftrace_get_addr_curr(rec)); 214 break; 215 } 216 217 ret = ftrace_verify_code(rec->ip, old); 218 if (ret) { 219 ftrace_bug(ret, rec); 220 return; 221 } 222 } 223 224 for_ftrace_rec_iter(iter) { 225 rec = ftrace_rec_iter_record(iter); 226 227 switch (ftrace_test_record(rec, enable)) { 228 case FTRACE_UPDATE_IGNORE: 229 default: 230 continue; 231 232 case FTRACE_UPDATE_MAKE_CALL: 233 case FTRACE_UPDATE_MODIFY_CALL: 234 new = ftrace_call_replace(rec->ip, ftrace_get_addr_new(rec)); 235 break; 236 237 case FTRACE_UPDATE_MAKE_NOP: 238 new = ftrace_nop_replace(); 239 break; 240 } 241 242 text_poke_queue((void *)rec->ip, new, MCOUNT_INSN_SIZE, NULL); 243 ftrace_update_record(rec, enable); 244 } 245 text_poke_finish(); 246 } 247 248 void arch_ftrace_update_code(int command) 249 { 250 ftrace_modify_all_code(command); 251 } 252 253 /* Currently only x86_64 supports dynamic trampolines */ 254 #ifdef CONFIG_X86_64 255 256 #ifdef CONFIG_MODULES 257 #include <linux/moduleloader.h> 258 /* Module allocation simplifies allocating memory for code */ 259 static inline void *alloc_tramp(unsigned long size) 260 { 261 return module_alloc(size); 262 } 263 static inline void tramp_free(void *tramp) 264 { 265 module_memfree(tramp); 266 } 267 #else 268 /* Trampolines can only be created if modules are supported */ 269 static inline void *alloc_tramp(unsigned long size) 270 { 271 return NULL; 272 } 273 static inline void tramp_free(void *tramp) { } 274 #endif 275 276 /* Defined as markers to the end of the ftrace default trampolines */ 277 extern void ftrace_regs_caller_end(void); 278 extern void ftrace_regs_caller_ret(void); 279 extern void ftrace_caller_end(void); 280 extern void ftrace_caller_op_ptr(void); 281 extern void ftrace_regs_caller_op_ptr(void); 282 extern void ftrace_regs_caller_jmp(void); 283 284 /* movq function_trace_op(%rip), %rdx */ 285 /* 0x48 0x8b 0x15 <offset-to-ftrace_trace_op (4 bytes)> */ 286 #define OP_REF_SIZE 7 287 288 /* 289 * The ftrace_ops is passed to the function callback. Since the 290 * trampoline only services a single ftrace_ops, we can pass in 291 * that ops directly. 292 * 293 * The ftrace_op_code_union is used to create a pointer to the 294 * ftrace_ops that will be passed to the callback function. 295 */ 296 union ftrace_op_code_union { 297 char code[OP_REF_SIZE]; 298 struct { 299 char op[3]; 300 int offset; 301 } __attribute__((packed)); 302 }; 303 304 #define RET_SIZE (IS_ENABLED(CONFIG_RETPOLINE) ? 5 : 1 + IS_ENABLED(CONFIG_SLS)) 305 306 static unsigned long 307 create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) 308 { 309 unsigned long start_offset; 310 unsigned long end_offset; 311 unsigned long op_offset; 312 unsigned long call_offset; 313 unsigned long jmp_offset; 314 unsigned long offset; 315 unsigned long npages; 316 unsigned long size; 317 unsigned long *ptr; 318 void *trampoline; 319 void *ip; 320 /* 48 8b 15 <offset> is movq <offset>(%rip), %rdx */ 321 unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 }; 322 unsigned const char retq[] = { RET_INSN_OPCODE, INT3_INSN_OPCODE }; 323 union ftrace_op_code_union op_ptr; 324 int ret; 325 326 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) { 327 start_offset = (unsigned long)ftrace_regs_caller; 328 end_offset = (unsigned long)ftrace_regs_caller_end; 329 op_offset = (unsigned long)ftrace_regs_caller_op_ptr; 330 call_offset = (unsigned long)ftrace_regs_call; 331 jmp_offset = (unsigned long)ftrace_regs_caller_jmp; 332 } else { 333 start_offset = (unsigned long)ftrace_caller; 334 end_offset = (unsigned long)ftrace_caller_end; 335 op_offset = (unsigned long)ftrace_caller_op_ptr; 336 call_offset = (unsigned long)ftrace_call; 337 jmp_offset = 0; 338 } 339 340 size = end_offset - start_offset; 341 342 /* 343 * Allocate enough size to store the ftrace_caller code, 344 * the iret , as well as the address of the ftrace_ops this 345 * trampoline is used for. 346 */ 347 trampoline = alloc_tramp(size + RET_SIZE + sizeof(void *)); 348 if (!trampoline) 349 return 0; 350 351 *tramp_size = size + RET_SIZE + sizeof(void *); 352 npages = DIV_ROUND_UP(*tramp_size, PAGE_SIZE); 353 354 /* Copy ftrace_caller onto the trampoline memory */ 355 ret = copy_from_kernel_nofault(trampoline, (void *)start_offset, size); 356 if (WARN_ON(ret < 0)) 357 goto fail; 358 359 ip = trampoline + size; 360 if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) 361 __text_gen_insn(ip, JMP32_INSN_OPCODE, ip, &__x86_return_thunk, JMP32_INSN_SIZE); 362 else 363 memcpy(ip, retq, sizeof(retq)); 364 365 /* No need to test direct calls on created trampolines */ 366 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) { 367 /* NOP the jnz 1f; but make sure it's a 2 byte jnz */ 368 ip = trampoline + (jmp_offset - start_offset); 369 if (WARN_ON(*(char *)ip != 0x75)) 370 goto fail; 371 ret = copy_from_kernel_nofault(ip, x86_nops[2], 2); 372 if (ret < 0) 373 goto fail; 374 } 375 376 /* 377 * The address of the ftrace_ops that is used for this trampoline 378 * is stored at the end of the trampoline. This will be used to 379 * load the third parameter for the callback. Basically, that 380 * location at the end of the trampoline takes the place of 381 * the global function_trace_op variable. 382 */ 383 384 ptr = (unsigned long *)(trampoline + size + RET_SIZE); 385 *ptr = (unsigned long)ops; 386 387 op_offset -= start_offset; 388 memcpy(&op_ptr, trampoline + op_offset, OP_REF_SIZE); 389 390 /* Are we pointing to the reference? */ 391 if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0)) 392 goto fail; 393 394 /* Load the contents of ptr into the callback parameter */ 395 offset = (unsigned long)ptr; 396 offset -= (unsigned long)trampoline + op_offset + OP_REF_SIZE; 397 398 op_ptr.offset = offset; 399 400 /* put in the new offset to the ftrace_ops */ 401 memcpy(trampoline + op_offset, &op_ptr, OP_REF_SIZE); 402 403 /* put in the call to the function */ 404 mutex_lock(&text_mutex); 405 call_offset -= start_offset; 406 memcpy(trampoline + call_offset, 407 text_gen_insn(CALL_INSN_OPCODE, 408 trampoline + call_offset, 409 ftrace_ops_get_func(ops)), CALL_INSN_SIZE); 410 mutex_unlock(&text_mutex); 411 412 /* ALLOC_TRAMP flags lets us know we created it */ 413 ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP; 414 415 set_vm_flush_reset_perms(trampoline); 416 417 if (likely(system_state != SYSTEM_BOOTING)) 418 set_memory_ro((unsigned long)trampoline, npages); 419 set_memory_x((unsigned long)trampoline, npages); 420 return (unsigned long)trampoline; 421 fail: 422 tramp_free(trampoline); 423 return 0; 424 } 425 426 void set_ftrace_ops_ro(void) 427 { 428 struct ftrace_ops *ops; 429 unsigned long start_offset; 430 unsigned long end_offset; 431 unsigned long npages; 432 unsigned long size; 433 434 do_for_each_ftrace_op(ops, ftrace_ops_list) { 435 if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) 436 continue; 437 438 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) { 439 start_offset = (unsigned long)ftrace_regs_caller; 440 end_offset = (unsigned long)ftrace_regs_caller_end; 441 } else { 442 start_offset = (unsigned long)ftrace_caller; 443 end_offset = (unsigned long)ftrace_caller_end; 444 } 445 size = end_offset - start_offset; 446 size = size + RET_SIZE + sizeof(void *); 447 npages = DIV_ROUND_UP(size, PAGE_SIZE); 448 set_memory_ro((unsigned long)ops->trampoline, npages); 449 } while_for_each_ftrace_op(ops); 450 } 451 452 static unsigned long calc_trampoline_call_offset(bool save_regs) 453 { 454 unsigned long start_offset; 455 unsigned long call_offset; 456 457 if (save_regs) { 458 start_offset = (unsigned long)ftrace_regs_caller; 459 call_offset = (unsigned long)ftrace_regs_call; 460 } else { 461 start_offset = (unsigned long)ftrace_caller; 462 call_offset = (unsigned long)ftrace_call; 463 } 464 465 return call_offset - start_offset; 466 } 467 468 void arch_ftrace_update_trampoline(struct ftrace_ops *ops) 469 { 470 ftrace_func_t func; 471 unsigned long offset; 472 unsigned long ip; 473 unsigned int size; 474 const char *new; 475 476 if (!ops->trampoline) { 477 ops->trampoline = create_trampoline(ops, &size); 478 if (!ops->trampoline) 479 return; 480 ops->trampoline_size = size; 481 return; 482 } 483 484 /* 485 * The ftrace_ops caller may set up its own trampoline. 486 * In such a case, this code must not modify it. 487 */ 488 if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) 489 return; 490 491 offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS); 492 ip = ops->trampoline + offset; 493 func = ftrace_ops_get_func(ops); 494 495 mutex_lock(&text_mutex); 496 /* Do a safe modify in case the trampoline is executing */ 497 new = ftrace_call_replace(ip, (unsigned long)func); 498 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL); 499 mutex_unlock(&text_mutex); 500 } 501 502 /* Return the address of the function the trampoline calls */ 503 static void *addr_from_call(void *ptr) 504 { 505 union text_poke_insn call; 506 int ret; 507 508 ret = copy_from_kernel_nofault(&call, ptr, CALL_INSN_SIZE); 509 if (WARN_ON_ONCE(ret < 0)) 510 return NULL; 511 512 /* Make sure this is a call */ 513 if (WARN_ON_ONCE(call.opcode != CALL_INSN_OPCODE)) { 514 pr_warn("Expected E8, got %x\n", call.opcode); 515 return NULL; 516 } 517 518 return ptr + CALL_INSN_SIZE + call.disp; 519 } 520 521 void prepare_ftrace_return(unsigned long ip, unsigned long *parent, 522 unsigned long frame_pointer); 523 524 /* 525 * If the ops->trampoline was not allocated, then it probably 526 * has a static trampoline func, or is the ftrace caller itself. 527 */ 528 static void *static_tramp_func(struct ftrace_ops *ops, struct dyn_ftrace *rec) 529 { 530 unsigned long offset; 531 bool save_regs = rec->flags & FTRACE_FL_REGS_EN; 532 void *ptr; 533 534 if (ops && ops->trampoline) { 535 #if !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS) && \ 536 defined(CONFIG_FUNCTION_GRAPH_TRACER) 537 /* 538 * We only know about function graph tracer setting as static 539 * trampoline. 540 */ 541 if (ops->trampoline == FTRACE_GRAPH_ADDR) 542 return (void *)prepare_ftrace_return; 543 #endif 544 return NULL; 545 } 546 547 offset = calc_trampoline_call_offset(save_regs); 548 549 if (save_regs) 550 ptr = (void *)FTRACE_REGS_ADDR + offset; 551 else 552 ptr = (void *)FTRACE_ADDR + offset; 553 554 return addr_from_call(ptr); 555 } 556 557 void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec) 558 { 559 unsigned long offset; 560 561 /* If we didn't allocate this trampoline, consider it static */ 562 if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) 563 return static_tramp_func(ops, rec); 564 565 offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS); 566 return addr_from_call((void *)ops->trampoline + offset); 567 } 568 569 void arch_ftrace_trampoline_free(struct ftrace_ops *ops) 570 { 571 if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) 572 return; 573 574 tramp_free((void *)ops->trampoline); 575 ops->trampoline = 0; 576 } 577 578 #endif /* CONFIG_X86_64 */ 579 #endif /* CONFIG_DYNAMIC_FTRACE */ 580 581 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 582 583 #if defined(CONFIG_DYNAMIC_FTRACE) && !defined(CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS) 584 extern void ftrace_graph_call(void); 585 static const char *ftrace_jmp_replace(unsigned long ip, unsigned long addr) 586 { 587 return text_gen_insn(JMP32_INSN_OPCODE, (void *)ip, (void *)addr); 588 } 589 590 static int ftrace_mod_jmp(unsigned long ip, void *func) 591 { 592 const char *new; 593 594 new = ftrace_jmp_replace(ip, (unsigned long)func); 595 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL); 596 return 0; 597 } 598 599 int ftrace_enable_ftrace_graph_caller(void) 600 { 601 unsigned long ip = (unsigned long)(&ftrace_graph_call); 602 603 return ftrace_mod_jmp(ip, &ftrace_graph_caller); 604 } 605 606 int ftrace_disable_ftrace_graph_caller(void) 607 { 608 unsigned long ip = (unsigned long)(&ftrace_graph_call); 609 610 return ftrace_mod_jmp(ip, &ftrace_stub); 611 } 612 #endif /* CONFIG_DYNAMIC_FTRACE && !CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */ 613 614 /* 615 * Hook the return address and push it in the stack of return addrs 616 * in current thread info. 617 */ 618 void prepare_ftrace_return(unsigned long ip, unsigned long *parent, 619 unsigned long frame_pointer) 620 { 621 unsigned long return_hooker = (unsigned long)&return_to_handler; 622 int bit; 623 624 /* 625 * When resuming from suspend-to-ram, this function can be indirectly 626 * called from early CPU startup code while the CPU is in real mode, 627 * which would fail miserably. Make sure the stack pointer is a 628 * virtual address. 629 * 630 * This check isn't as accurate as virt_addr_valid(), but it should be 631 * good enough for this purpose, and it's fast. 632 */ 633 if (unlikely((long)__builtin_frame_address(0) >= 0)) 634 return; 635 636 if (unlikely(ftrace_graph_is_dead())) 637 return; 638 639 if (unlikely(atomic_read(¤t->tracing_graph_pause))) 640 return; 641 642 bit = ftrace_test_recursion_trylock(ip, *parent); 643 if (bit < 0) 644 return; 645 646 if (!function_graph_enter(*parent, ip, frame_pointer, parent)) 647 *parent = return_hooker; 648 649 ftrace_test_recursion_unlock(bit); 650 } 651 652 #ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS 653 void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, 654 struct ftrace_ops *op, struct ftrace_regs *fregs) 655 { 656 struct pt_regs *regs = &fregs->regs; 657 unsigned long *stack = (unsigned long *)kernel_stack_pointer(regs); 658 659 prepare_ftrace_return(ip, (unsigned long *)stack, 0); 660 } 661 #endif 662 663 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 664