1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Code for replacing ftrace calls with jumps. 4 * 5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> 6 * 7 * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box. 8 * 9 * Added function graph tracer code, taken from x86 that was written 10 * by Frederic Weisbecker, and ported to PPC by Steven Rostedt. 11 * 12 */ 13 14 #define pr_fmt(fmt) "ftrace-powerpc: " fmt 15 16 #include <linux/spinlock.h> 17 #include <linux/hardirq.h> 18 #include <linux/uaccess.h> 19 #include <linux/module.h> 20 #include <linux/ftrace.h> 21 #include <linux/percpu.h> 22 #include <linux/init.h> 23 #include <linux/list.h> 24 25 #include <asm/cacheflush.h> 26 #include <asm/code-patching.h> 27 #include <asm/ftrace.h> 28 #include <asm/syscall.h> 29 #include <asm/inst.h> 30 31 /* 32 * We generally only have a single long_branch tramp and at most 2 or 3 plt 33 * tramps generated. But, we don't use the plt tramps currently. We also allot 34 * 2 tramps after .text and .init.text. So, we only end up with around 3 usable 35 * tramps in total. Set aside 8 just to be sure. 36 */ 37 #define NUM_FTRACE_TRAMPS 8 38 static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS]; 39 40 static ppc_inst_t 41 ftrace_call_replace(unsigned long ip, unsigned long addr, int link) 42 { 43 ppc_inst_t op; 44 45 addr = ppc_function_entry((void *)addr); 46 47 /* if (link) set op to 'bl' else 'b' */ 48 create_branch(&op, (u32 *)ip, addr, link ? BRANCH_SET_LINK : 0); 49 50 return op; 51 } 52 53 static inline int 54 ftrace_modify_code(unsigned long ip, ppc_inst_t old, ppc_inst_t new) 55 { 56 ppc_inst_t replaced; 57 58 /* 59 * Note: 60 * We are paranoid about modifying text, as if a bug was to happen, it 61 * could cause us to read or write to someplace that could cause harm. 62 * Carefully read and modify the code with probe_kernel_*(), and make 63 * sure what we read is what we expected it to be before modifying it. 64 */ 65 66 /* read the text we want to modify */ 67 if (copy_inst_from_kernel_nofault(&replaced, (void *)ip)) 68 return -EFAULT; 69 70 /* Make sure it is what we expect it to be */ 71 if (!ppc_inst_equal(replaced, old)) { 72 pr_err("%p: replaced (%s) != old (%s)", 73 (void *)ip, ppc_inst_as_str(replaced), ppc_inst_as_str(old)); 74 return -EINVAL; 75 } 76 77 /* replace the text with the new text */ 78 return patch_instruction((u32 *)ip, new); 79 } 80 81 /* 82 * Helper functions that are the same for both PPC64 and PPC32. 83 */ 84 static int test_24bit_addr(unsigned long ip, unsigned long addr) 85 { 86 addr = ppc_function_entry((void *)addr); 87 88 return is_offset_in_branch_range(addr - ip); 89 } 90 91 static int is_bl_op(ppc_inst_t op) 92 { 93 return (ppc_inst_val(op) & ~PPC_LI_MASK) == PPC_RAW_BL(0); 94 } 95 96 static int is_b_op(ppc_inst_t op) 97 { 98 return (ppc_inst_val(op) & ~PPC_LI_MASK) == PPC_RAW_BRANCH(0); 99 } 100 101 static unsigned long find_bl_target(unsigned long ip, ppc_inst_t op) 102 { 103 int offset; 104 105 offset = PPC_LI(ppc_inst_val(op)); 106 /* make it signed */ 107 if (offset & 0x02000000) 108 offset |= 0xfe000000; 109 110 return ip + (long)offset; 111 } 112 113 #ifdef CONFIG_MODULES 114 static int 115 __ftrace_make_nop(struct module *mod, 116 struct dyn_ftrace *rec, unsigned long addr) 117 { 118 unsigned long entry, ptr, tramp; 119 unsigned long ip = rec->ip; 120 ppc_inst_t op, pop; 121 122 /* read where this goes */ 123 if (copy_inst_from_kernel_nofault(&op, (void *)ip)) { 124 pr_err("Fetching opcode failed.\n"); 125 return -EFAULT; 126 } 127 128 /* Make sure that that this is still a 24bit jump */ 129 if (!is_bl_op(op)) { 130 pr_err("Not expected bl: opcode is %s\n", ppc_inst_as_str(op)); 131 return -EINVAL; 132 } 133 134 /* lets find where the pointer goes */ 135 tramp = find_bl_target(ip, op); 136 137 pr_devel("ip:%lx jumps to %lx", ip, tramp); 138 139 if (module_trampoline_target(mod, tramp, &ptr)) { 140 pr_err("Failed to get trampoline target\n"); 141 return -EFAULT; 142 } 143 144 pr_devel("trampoline target %lx", ptr); 145 146 entry = ppc_global_function_entry((void *)addr); 147 /* This should match what was called */ 148 if (ptr != entry) { 149 pr_err("addr %lx does not match expected %lx\n", ptr, entry); 150 return -EINVAL; 151 } 152 153 /* When using -mprofile-kernel or PPC32 there is no load to jump over */ 154 pop = ppc_inst(PPC_RAW_NOP()); 155 156 #ifdef CONFIG_PPC64 157 #ifdef CONFIG_MPROFILE_KERNEL 158 if (copy_inst_from_kernel_nofault(&op, (void *)(ip - 4))) { 159 pr_err("Fetching instruction at %lx failed.\n", ip - 4); 160 return -EFAULT; 161 } 162 163 /* We expect either a mflr r0, or a std r0, LRSAVE(r1) */ 164 if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_MFLR(_R0))) && 165 !ppc_inst_equal(op, ppc_inst(PPC_INST_STD_LR))) { 166 pr_err("Unexpected instruction %s around bl _mcount\n", 167 ppc_inst_as_str(op)); 168 return -EINVAL; 169 } 170 #else 171 /* 172 * Our original call site looks like: 173 * 174 * bl <tramp> 175 * ld r2,XX(r1) 176 * 177 * Milton Miller pointed out that we can not simply nop the branch. 178 * If a task was preempted when calling a trace function, the nops 179 * will remove the way to restore the TOC in r2 and the r2 TOC will 180 * get corrupted. 181 * 182 * Use a b +8 to jump over the load. 183 */ 184 185 pop = ppc_inst(PPC_RAW_BRANCH(8)); /* b +8 */ 186 187 /* 188 * Check what is in the next instruction. We can see ld r2,40(r1), but 189 * on first pass after boot we will see mflr r0. 190 */ 191 if (copy_inst_from_kernel_nofault(&op, (void *)(ip + 4))) { 192 pr_err("Fetching op failed.\n"); 193 return -EFAULT; 194 } 195 196 if (!ppc_inst_equal(op, ppc_inst(PPC_INST_LD_TOC))) { 197 pr_err("Expected %08lx found %s\n", PPC_INST_LD_TOC, ppc_inst_as_str(op)); 198 return -EINVAL; 199 } 200 #endif /* CONFIG_MPROFILE_KERNEL */ 201 #endif /* PPC64 */ 202 203 if (patch_instruction((u32 *)ip, pop)) { 204 pr_err("Patching NOP failed.\n"); 205 return -EPERM; 206 } 207 208 return 0; 209 } 210 #endif /* CONFIG_MODULES */ 211 212 static unsigned long find_ftrace_tramp(unsigned long ip) 213 { 214 int i; 215 216 /* 217 * We have the compiler generated long_branch tramps at the end 218 * and we prefer those 219 */ 220 for (i = NUM_FTRACE_TRAMPS - 1; i >= 0; i--) 221 if (!ftrace_tramps[i]) 222 continue; 223 else if (is_offset_in_branch_range(ftrace_tramps[i] - ip)) 224 return ftrace_tramps[i]; 225 226 return 0; 227 } 228 229 static int add_ftrace_tramp(unsigned long tramp) 230 { 231 int i; 232 233 for (i = 0; i < NUM_FTRACE_TRAMPS; i++) 234 if (!ftrace_tramps[i]) { 235 ftrace_tramps[i] = tramp; 236 return 0; 237 } 238 239 return -1; 240 } 241 242 /* 243 * If this is a compiler generated long_branch trampoline (essentially, a 244 * trampoline that has a branch to _mcount()), we re-write the branch to 245 * instead go to ftrace_[regs_]caller() and note down the location of this 246 * trampoline. 247 */ 248 static int setup_mcount_compiler_tramp(unsigned long tramp) 249 { 250 int i; 251 ppc_inst_t op; 252 unsigned long ptr; 253 254 /* Is this a known long jump tramp? */ 255 for (i = 0; i < NUM_FTRACE_TRAMPS; i++) 256 if (!ftrace_tramps[i]) 257 break; 258 else if (ftrace_tramps[i] == tramp) 259 return 0; 260 261 /* New trampoline -- read where this goes */ 262 if (copy_inst_from_kernel_nofault(&op, (void *)tramp)) { 263 pr_debug("Fetching opcode failed.\n"); 264 return -1; 265 } 266 267 /* Is this a 24 bit branch? */ 268 if (!is_b_op(op)) { 269 pr_debug("Trampoline is not a long branch tramp.\n"); 270 return -1; 271 } 272 273 /* lets find where the pointer goes */ 274 ptr = find_bl_target(tramp, op); 275 276 if (ptr != ppc_global_function_entry((void *)_mcount)) { 277 pr_debug("Trampoline target %p is not _mcount\n", (void *)ptr); 278 return -1; 279 } 280 281 /* Let's re-write the tramp to go to ftrace_[regs_]caller */ 282 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 283 ptr = ppc_global_function_entry((void *)ftrace_regs_caller); 284 #else 285 ptr = ppc_global_function_entry((void *)ftrace_caller); 286 #endif 287 if (patch_branch((u32 *)tramp, ptr, 0)) { 288 pr_debug("REL24 out of range!\n"); 289 return -1; 290 } 291 292 if (add_ftrace_tramp(tramp)) { 293 pr_debug("No tramp locations left\n"); 294 return -1; 295 } 296 297 return 0; 298 } 299 300 static int __ftrace_make_nop_kernel(struct dyn_ftrace *rec, unsigned long addr) 301 { 302 unsigned long tramp, ip = rec->ip; 303 ppc_inst_t op; 304 305 /* Read where this goes */ 306 if (copy_inst_from_kernel_nofault(&op, (void *)ip)) { 307 pr_err("Fetching opcode failed.\n"); 308 return -EFAULT; 309 } 310 311 /* Make sure that that this is still a 24bit jump */ 312 if (!is_bl_op(op)) { 313 pr_err("Not expected bl: opcode is %s\n", ppc_inst_as_str(op)); 314 return -EINVAL; 315 } 316 317 /* Let's find where the pointer goes */ 318 tramp = find_bl_target(ip, op); 319 320 pr_devel("ip:%lx jumps to %lx", ip, tramp); 321 322 if (setup_mcount_compiler_tramp(tramp)) { 323 /* Are other trampolines reachable? */ 324 if (!find_ftrace_tramp(ip)) { 325 pr_err("No ftrace trampolines reachable from %ps\n", 326 (void *)ip); 327 return -EINVAL; 328 } 329 } 330 331 if (patch_instruction((u32 *)ip, ppc_inst(PPC_RAW_NOP()))) { 332 pr_err("Patching NOP failed.\n"); 333 return -EPERM; 334 } 335 336 return 0; 337 } 338 339 int ftrace_make_nop(struct module *mod, 340 struct dyn_ftrace *rec, unsigned long addr) 341 { 342 unsigned long ip = rec->ip; 343 ppc_inst_t old, new; 344 345 /* 346 * If the calling address is more that 24 bits away, 347 * then we had to use a trampoline to make the call. 348 * Otherwise just update the call site. 349 */ 350 if (test_24bit_addr(ip, addr)) { 351 /* within range */ 352 old = ftrace_call_replace(ip, addr, 1); 353 new = ppc_inst(PPC_RAW_NOP()); 354 return ftrace_modify_code(ip, old, new); 355 } else if (core_kernel_text(ip)) 356 return __ftrace_make_nop_kernel(rec, addr); 357 358 #ifdef CONFIG_MODULES 359 /* 360 * Out of range jumps are called from modules. 361 * We should either already have a pointer to the module 362 * or it has been passed in. 363 */ 364 if (!rec->arch.mod) { 365 if (!mod) { 366 pr_err("No module loaded addr=%lx\n", addr); 367 return -EFAULT; 368 } 369 rec->arch.mod = mod; 370 } else if (mod) { 371 if (mod != rec->arch.mod) { 372 pr_err("Record mod %p not equal to passed in mod %p\n", 373 rec->arch.mod, mod); 374 return -EINVAL; 375 } 376 /* nothing to do if mod == rec->arch.mod */ 377 } else 378 mod = rec->arch.mod; 379 380 return __ftrace_make_nop(mod, rec, addr); 381 #else 382 /* We should not get here without modules */ 383 return -EINVAL; 384 #endif /* CONFIG_MODULES */ 385 } 386 387 #ifdef CONFIG_MODULES 388 /* 389 * Examine the existing instructions for __ftrace_make_call. 390 * They should effectively be a NOP, and follow formal constraints, 391 * depending on the ABI. Return false if they don't. 392 */ 393 #ifdef CONFIG_PPC64_ELF_ABI_V1 394 static int 395 expected_nop_sequence(void *ip, ppc_inst_t op0, ppc_inst_t op1) 396 { 397 if (!ppc_inst_equal(op0, ppc_inst(PPC_RAW_BRANCH(8))) || 398 !ppc_inst_equal(op1, ppc_inst(PPC_INST_LD_TOC))) 399 return 0; 400 return 1; 401 } 402 #else 403 static int 404 expected_nop_sequence(void *ip, ppc_inst_t op0, ppc_inst_t op1) 405 { 406 if (!ppc_inst_equal(op0, ppc_inst(PPC_RAW_NOP()))) 407 return 0; 408 return 1; 409 } 410 #endif 411 412 static int 413 __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 414 { 415 ppc_inst_t op[2]; 416 void *ip = (void *)rec->ip; 417 unsigned long entry, ptr, tramp; 418 struct module *mod = rec->arch.mod; 419 420 /* read where this goes */ 421 if (copy_inst_from_kernel_nofault(op, ip)) 422 return -EFAULT; 423 424 #ifdef CONFIG_PPC64_ELF_ABI_V1 425 if (copy_inst_from_kernel_nofault(op + 1, ip + 4)) 426 return -EFAULT; 427 #endif 428 429 if (!expected_nop_sequence(ip, op[0], op[1])) { 430 pr_err("Unexpected call sequence at %p: %s %s\n", 431 ip, ppc_inst_as_str(op[0]), ppc_inst_as_str(op[1])); 432 return -EINVAL; 433 } 434 435 /* If we never set up ftrace trampoline(s), then bail */ 436 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 437 if (!mod->arch.tramp || !mod->arch.tramp_regs) { 438 #else 439 if (!mod->arch.tramp) { 440 #endif 441 pr_err("No ftrace trampoline\n"); 442 return -EINVAL; 443 } 444 445 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 446 if (rec->flags & FTRACE_FL_REGS) 447 tramp = mod->arch.tramp_regs; 448 else 449 #endif 450 tramp = mod->arch.tramp; 451 452 if (module_trampoline_target(mod, tramp, &ptr)) { 453 pr_err("Failed to get trampoline target\n"); 454 return -EFAULT; 455 } 456 457 pr_devel("trampoline target %lx", ptr); 458 459 entry = ppc_global_function_entry((void *)addr); 460 /* This should match what was called */ 461 if (ptr != entry) { 462 pr_err("addr %lx does not match expected %lx\n", ptr, entry); 463 return -EINVAL; 464 } 465 466 if (patch_branch(ip, tramp, BRANCH_SET_LINK)) { 467 pr_err("REL24 out of range!\n"); 468 return -EINVAL; 469 } 470 471 return 0; 472 } 473 #endif /* CONFIG_MODULES */ 474 475 static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr) 476 { 477 ppc_inst_t op; 478 void *ip = (void *)rec->ip; 479 unsigned long tramp, entry, ptr; 480 481 /* Make sure we're being asked to patch branch to a known ftrace addr */ 482 entry = ppc_global_function_entry((void *)ftrace_caller); 483 ptr = ppc_global_function_entry((void *)addr); 484 485 if (ptr != entry) { 486 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 487 entry = ppc_global_function_entry((void *)ftrace_regs_caller); 488 if (ptr != entry) { 489 #endif 490 pr_err("Unknown ftrace addr to patch: %ps\n", (void *)ptr); 491 return -EINVAL; 492 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 493 } 494 #endif 495 } 496 497 /* Make sure we have a nop */ 498 if (copy_inst_from_kernel_nofault(&op, ip)) { 499 pr_err("Unable to read ftrace location %p\n", ip); 500 return -EFAULT; 501 } 502 503 if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_NOP()))) { 504 pr_err("Unexpected call sequence at %p: %s\n", ip, ppc_inst_as_str(op)); 505 return -EINVAL; 506 } 507 508 tramp = find_ftrace_tramp((unsigned long)ip); 509 if (!tramp) { 510 pr_err("No ftrace trampolines reachable from %ps\n", ip); 511 return -EINVAL; 512 } 513 514 if (patch_branch(ip, tramp, BRANCH_SET_LINK)) { 515 pr_err("Error patching branch to ftrace tramp!\n"); 516 return -EINVAL; 517 } 518 519 return 0; 520 } 521 522 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 523 { 524 unsigned long ip = rec->ip; 525 ppc_inst_t old, new; 526 527 /* 528 * If the calling address is more that 24 bits away, 529 * then we had to use a trampoline to make the call. 530 * Otherwise just update the call site. 531 */ 532 if (test_24bit_addr(ip, addr)) { 533 /* within range */ 534 old = ppc_inst(PPC_RAW_NOP()); 535 new = ftrace_call_replace(ip, addr, 1); 536 return ftrace_modify_code(ip, old, new); 537 } else if (core_kernel_text(ip)) 538 return __ftrace_make_call_kernel(rec, addr); 539 540 #ifdef CONFIG_MODULES 541 /* 542 * Out of range jumps are called from modules. 543 * Being that we are converting from nop, it had better 544 * already have a module defined. 545 */ 546 if (!rec->arch.mod) { 547 pr_err("No module loaded\n"); 548 return -EINVAL; 549 } 550 551 return __ftrace_make_call(rec, addr); 552 #else 553 /* We should not get here without modules */ 554 return -EINVAL; 555 #endif /* CONFIG_MODULES */ 556 } 557 558 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 559 #ifdef CONFIG_MODULES 560 static int 561 __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 562 unsigned long addr) 563 { 564 ppc_inst_t op; 565 unsigned long ip = rec->ip; 566 unsigned long entry, ptr, tramp; 567 struct module *mod = rec->arch.mod; 568 569 /* If we never set up ftrace trampolines, then bail */ 570 if (!mod->arch.tramp || !mod->arch.tramp_regs) { 571 pr_err("No ftrace trampoline\n"); 572 return -EINVAL; 573 } 574 575 /* read where this goes */ 576 if (copy_inst_from_kernel_nofault(&op, (void *)ip)) { 577 pr_err("Fetching opcode failed.\n"); 578 return -EFAULT; 579 } 580 581 /* Make sure that that this is still a 24bit jump */ 582 if (!is_bl_op(op)) { 583 pr_err("Not expected bl: opcode is %s\n", ppc_inst_as_str(op)); 584 return -EINVAL; 585 } 586 587 /* lets find where the pointer goes */ 588 tramp = find_bl_target(ip, op); 589 entry = ppc_global_function_entry((void *)old_addr); 590 591 pr_devel("ip:%lx jumps to %lx", ip, tramp); 592 593 if (tramp != entry) { 594 /* old_addr is not within range, so we must have used a trampoline */ 595 if (module_trampoline_target(mod, tramp, &ptr)) { 596 pr_err("Failed to get trampoline target\n"); 597 return -EFAULT; 598 } 599 600 pr_devel("trampoline target %lx", ptr); 601 602 /* This should match what was called */ 603 if (ptr != entry) { 604 pr_err("addr %lx does not match expected %lx\n", ptr, entry); 605 return -EINVAL; 606 } 607 } 608 609 /* The new target may be within range */ 610 if (test_24bit_addr(ip, addr)) { 611 /* within range */ 612 if (patch_branch((u32 *)ip, addr, BRANCH_SET_LINK)) { 613 pr_err("REL24 out of range!\n"); 614 return -EINVAL; 615 } 616 617 return 0; 618 } 619 620 if (rec->flags & FTRACE_FL_REGS) 621 tramp = mod->arch.tramp_regs; 622 else 623 tramp = mod->arch.tramp; 624 625 if (module_trampoline_target(mod, tramp, &ptr)) { 626 pr_err("Failed to get trampoline target\n"); 627 return -EFAULT; 628 } 629 630 pr_devel("trampoline target %lx", ptr); 631 632 entry = ppc_global_function_entry((void *)addr); 633 /* This should match what was called */ 634 if (ptr != entry) { 635 pr_err("addr %lx does not match expected %lx\n", ptr, entry); 636 return -EINVAL; 637 } 638 639 if (patch_branch((u32 *)ip, tramp, BRANCH_SET_LINK)) { 640 pr_err("REL24 out of range!\n"); 641 return -EINVAL; 642 } 643 644 return 0; 645 } 646 #endif 647 648 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 649 unsigned long addr) 650 { 651 unsigned long ip = rec->ip; 652 ppc_inst_t old, new; 653 654 /* 655 * If the calling address is more that 24 bits away, 656 * then we had to use a trampoline to make the call. 657 * Otherwise just update the call site. 658 */ 659 if (test_24bit_addr(ip, addr) && test_24bit_addr(ip, old_addr)) { 660 /* within range */ 661 old = ftrace_call_replace(ip, old_addr, 1); 662 new = ftrace_call_replace(ip, addr, 1); 663 return ftrace_modify_code(ip, old, new); 664 } else if (core_kernel_text(ip)) { 665 /* 666 * We always patch out of range locations to go to the regs 667 * variant, so there is nothing to do here 668 */ 669 return 0; 670 } 671 672 #ifdef CONFIG_MODULES 673 /* 674 * Out of range jumps are called from modules. 675 */ 676 if (!rec->arch.mod) { 677 pr_err("No module loaded\n"); 678 return -EINVAL; 679 } 680 681 return __ftrace_modify_call(rec, old_addr, addr); 682 #else 683 /* We should not get here without modules */ 684 return -EINVAL; 685 #endif /* CONFIG_MODULES */ 686 } 687 #endif 688 689 int ftrace_update_ftrace_func(ftrace_func_t func) 690 { 691 unsigned long ip = (unsigned long)(&ftrace_call); 692 ppc_inst_t old, new; 693 int ret; 694 695 old = ppc_inst_read((u32 *)&ftrace_call); 696 new = ftrace_call_replace(ip, (unsigned long)func, 1); 697 ret = ftrace_modify_code(ip, old, new); 698 699 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 700 /* Also update the regs callback function */ 701 if (!ret) { 702 ip = (unsigned long)(&ftrace_regs_call); 703 old = ppc_inst_read((u32 *)&ftrace_regs_call); 704 new = ftrace_call_replace(ip, (unsigned long)func, 1); 705 ret = ftrace_modify_code(ip, old, new); 706 } 707 #endif 708 709 return ret; 710 } 711 712 /* 713 * Use the default ftrace_modify_all_code, but without 714 * stop_machine(). 715 */ 716 void arch_ftrace_update_code(int command) 717 { 718 ftrace_modify_all_code(command); 719 } 720 721 #ifdef CONFIG_PPC64 722 #define PACATOC offsetof(struct paca_struct, kernel_toc) 723 724 extern unsigned int ftrace_tramp_text[], ftrace_tramp_init[]; 725 726 int __init ftrace_dyn_arch_init(void) 727 { 728 int i; 729 unsigned int *tramp[] = { ftrace_tramp_text, ftrace_tramp_init }; 730 u32 stub_insns[] = { 731 PPC_RAW_LD(_R12, _R13, PACATOC), 732 PPC_RAW_ADDIS(_R12, _R12, 0), 733 PPC_RAW_ADDI(_R12, _R12, 0), 734 PPC_RAW_MTCTR(_R12), 735 PPC_RAW_BCTR() 736 }; 737 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 738 unsigned long addr = ppc_global_function_entry((void *)ftrace_regs_caller); 739 #else 740 unsigned long addr = ppc_global_function_entry((void *)ftrace_caller); 741 #endif 742 long reladdr = addr - kernel_toc_addr(); 743 744 if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) { 745 pr_err("Address of %ps out of range of kernel_toc.\n", 746 (void *)addr); 747 return -1; 748 } 749 750 for (i = 0; i < 2; i++) { 751 memcpy(tramp[i], stub_insns, sizeof(stub_insns)); 752 tramp[i][1] |= PPC_HA(reladdr); 753 tramp[i][2] |= PPC_LO(reladdr); 754 add_ftrace_tramp((unsigned long)tramp[i]); 755 } 756 757 return 0; 758 } 759 #else 760 int __init ftrace_dyn_arch_init(void) 761 { 762 return 0; 763 } 764 #endif 765 766 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 767 768 extern void ftrace_graph_call(void); 769 extern void ftrace_graph_stub(void); 770 771 static int ftrace_modify_ftrace_graph_caller(bool enable) 772 { 773 unsigned long ip = (unsigned long)(&ftrace_graph_call); 774 unsigned long addr = (unsigned long)(&ftrace_graph_caller); 775 unsigned long stub = (unsigned long)(&ftrace_graph_stub); 776 ppc_inst_t old, new; 777 778 if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS)) 779 return 0; 780 781 old = ftrace_call_replace(ip, enable ? stub : addr, 0); 782 new = ftrace_call_replace(ip, enable ? addr : stub, 0); 783 784 return ftrace_modify_code(ip, old, new); 785 } 786 787 int ftrace_enable_ftrace_graph_caller(void) 788 { 789 return ftrace_modify_ftrace_graph_caller(true); 790 } 791 792 int ftrace_disable_ftrace_graph_caller(void) 793 { 794 return ftrace_modify_ftrace_graph_caller(false); 795 } 796 797 /* 798 * Hook the return address and push it in the stack of return addrs 799 * in current thread info. Return the address we want to divert to. 800 */ 801 static unsigned long 802 __prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long sp) 803 { 804 unsigned long return_hooker; 805 int bit; 806 807 if (unlikely(ftrace_graph_is_dead())) 808 goto out; 809 810 if (unlikely(atomic_read(¤t->tracing_graph_pause))) 811 goto out; 812 813 bit = ftrace_test_recursion_trylock(ip, parent); 814 if (bit < 0) 815 goto out; 816 817 return_hooker = ppc_function_entry(return_to_handler); 818 819 if (!function_graph_enter(parent, ip, 0, (unsigned long *)sp)) 820 parent = return_hooker; 821 822 ftrace_test_recursion_unlock(bit); 823 out: 824 return parent; 825 } 826 827 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS 828 void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, 829 struct ftrace_ops *op, struct ftrace_regs *fregs) 830 { 831 fregs->regs.link = __prepare_ftrace_return(parent_ip, ip, fregs->regs.gpr[1]); 832 } 833 #else 834 unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip, 835 unsigned long sp) 836 { 837 return __prepare_ftrace_return(parent, ip, sp); 838 } 839 #endif 840 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 841 842 #ifdef CONFIG_PPC64_ELF_ABI_V1 843 char *arch_ftrace_match_adjust(char *str, const char *search) 844 { 845 if (str[0] == '.' && search[0] != '.') 846 return str + 1; 847 else 848 return str; 849 } 850 #endif /* CONFIG_PPC64_ELF_ABI_V1 */ 851