1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Code for replacing ftrace calls with jumps. 4 * 5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> 6 * 7 * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box. 8 * 9 * Added function graph tracer code, taken from x86 that was written 10 * by Frederic Weisbecker, and ported to PPC by Steven Rostedt. 11 * 12 */ 13 14 #define pr_fmt(fmt) "ftrace-powerpc: " fmt 15 16 #include <linux/spinlock.h> 17 #include <linux/hardirq.h> 18 #include <linux/uaccess.h> 19 #include <linux/module.h> 20 #include <linux/ftrace.h> 21 #include <linux/percpu.h> 22 #include <linux/init.h> 23 #include <linux/list.h> 24 25 #include <asm/asm-prototypes.h> 26 #include <asm/cacheflush.h> 27 #include <asm/code-patching.h> 28 #include <asm/ftrace.h> 29 #include <asm/syscall.h> 30 #include <asm/inst.h> 31 32 33 #ifdef CONFIG_DYNAMIC_FTRACE 34 35 /* 36 * We generally only have a single long_branch tramp and at most 2 or 3 plt 37 * tramps generated. But, we don't use the plt tramps currently. We also allot 38 * 2 tramps after .text and .init.text. So, we only end up with around 3 usable 39 * tramps in total. Set aside 8 just to be sure. 40 */ 41 #define NUM_FTRACE_TRAMPS 8 42 static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS]; 43 44 static ppc_inst_t 45 ftrace_call_replace(unsigned long ip, unsigned long addr, int link) 46 { 47 ppc_inst_t op; 48 49 addr = ppc_function_entry((void *)addr); 50 51 /* if (link) set op to 'bl' else 'b' */ 52 create_branch(&op, (u32 *)ip, addr, link ? 1 : 0); 53 54 return op; 55 } 56 57 static int 58 ftrace_modify_code(unsigned long ip, ppc_inst_t old, ppc_inst_t new) 59 { 60 ppc_inst_t replaced; 61 62 /* 63 * Note: 64 * We are paranoid about modifying text, as if a bug was to happen, it 65 * could cause us to read or write to someplace that could cause harm. 66 * Carefully read and modify the code with probe_kernel_*(), and make 67 * sure what we read is what we expected it to be before modifying it. 68 */ 69 70 /* read the text we want to modify */ 71 if (copy_inst_from_kernel_nofault(&replaced, (void *)ip)) 72 return -EFAULT; 73 74 /* Make sure it is what we expect it to be */ 75 if (!ppc_inst_equal(replaced, old)) { 76 pr_err("%p: replaced (%s) != old (%s)", 77 (void *)ip, ppc_inst_as_str(replaced), ppc_inst_as_str(old)); 78 return -EINVAL; 79 } 80 81 /* replace the text with the new text */ 82 if (patch_instruction((u32 *)ip, new)) 83 return -EPERM; 84 85 return 0; 86 } 87 88 /* 89 * Helper functions that are the same for both PPC64 and PPC32. 90 */ 91 static int test_24bit_addr(unsigned long ip, unsigned long addr) 92 { 93 ppc_inst_t op; 94 addr = ppc_function_entry((void *)addr); 95 96 /* use the create_branch to verify that this offset can be branched */ 97 return create_branch(&op, (u32 *)ip, addr, 0) == 0; 98 } 99 100 static int is_bl_op(ppc_inst_t op) 101 { 102 return (ppc_inst_val(op) & 0xfc000003) == 0x48000001; 103 } 104 105 static int is_b_op(ppc_inst_t op) 106 { 107 return (ppc_inst_val(op) & 0xfc000003) == 0x48000000; 108 } 109 110 static unsigned long find_bl_target(unsigned long ip, ppc_inst_t op) 111 { 112 int offset; 113 114 offset = (ppc_inst_val(op) & 0x03fffffc); 115 /* make it signed */ 116 if (offset & 0x02000000) 117 offset |= 0xfe000000; 118 119 return ip + (long)offset; 120 } 121 122 #ifdef CONFIG_MODULES 123 #ifdef CONFIG_PPC64 124 static int 125 __ftrace_make_nop(struct module *mod, 126 struct dyn_ftrace *rec, unsigned long addr) 127 { 128 unsigned long entry, ptr, tramp; 129 unsigned long ip = rec->ip; 130 ppc_inst_t op, pop; 131 132 /* read where this goes */ 133 if (copy_inst_from_kernel_nofault(&op, (void *)ip)) { 134 pr_err("Fetching opcode failed.\n"); 135 return -EFAULT; 136 } 137 138 /* Make sure that that this is still a 24bit jump */ 139 if (!is_bl_op(op)) { 140 pr_err("Not expected bl: opcode is %s\n", ppc_inst_as_str(op)); 141 return -EINVAL; 142 } 143 144 /* lets find where the pointer goes */ 145 tramp = find_bl_target(ip, op); 146 147 pr_devel("ip:%lx jumps to %lx", ip, tramp); 148 149 if (module_trampoline_target(mod, tramp, &ptr)) { 150 pr_err("Failed to get trampoline target\n"); 151 return -EFAULT; 152 } 153 154 pr_devel("trampoline target %lx", ptr); 155 156 entry = ppc_global_function_entry((void *)addr); 157 /* This should match what was called */ 158 if (ptr != entry) { 159 pr_err("addr %lx does not match expected %lx\n", ptr, entry); 160 return -EINVAL; 161 } 162 163 #ifdef CONFIG_MPROFILE_KERNEL 164 /* When using -mkernel_profile there is no load to jump over */ 165 pop = ppc_inst(PPC_RAW_NOP()); 166 167 if (copy_inst_from_kernel_nofault(&op, (void *)(ip - 4))) { 168 pr_err("Fetching instruction at %lx failed.\n", ip - 4); 169 return -EFAULT; 170 } 171 172 /* We expect either a mflr r0, or a std r0, LRSAVE(r1) */ 173 if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_MFLR(_R0))) && 174 !ppc_inst_equal(op, ppc_inst(PPC_INST_STD_LR))) { 175 pr_err("Unexpected instruction %s around bl _mcount\n", 176 ppc_inst_as_str(op)); 177 return -EINVAL; 178 } 179 #else 180 /* 181 * Our original call site looks like: 182 * 183 * bl <tramp> 184 * ld r2,XX(r1) 185 * 186 * Milton Miller pointed out that we can not simply nop the branch. 187 * If a task was preempted when calling a trace function, the nops 188 * will remove the way to restore the TOC in r2 and the r2 TOC will 189 * get corrupted. 190 * 191 * Use a b +8 to jump over the load. 192 */ 193 194 pop = ppc_inst(PPC_INST_BRANCH | 8); /* b +8 */ 195 196 /* 197 * Check what is in the next instruction. We can see ld r2,40(r1), but 198 * on first pass after boot we will see mflr r0. 199 */ 200 if (copy_inst_from_kernel_nofault(&op, (void *)(ip + 4))) { 201 pr_err("Fetching op failed.\n"); 202 return -EFAULT; 203 } 204 205 if (!ppc_inst_equal(op, ppc_inst(PPC_INST_LD_TOC))) { 206 pr_err("Expected %08lx found %s\n", PPC_INST_LD_TOC, ppc_inst_as_str(op)); 207 return -EINVAL; 208 } 209 #endif /* CONFIG_MPROFILE_KERNEL */ 210 211 if (patch_instruction((u32 *)ip, pop)) { 212 pr_err("Patching NOP failed.\n"); 213 return -EPERM; 214 } 215 216 return 0; 217 } 218 219 #else /* !PPC64 */ 220 static int 221 __ftrace_make_nop(struct module *mod, 222 struct dyn_ftrace *rec, unsigned long addr) 223 { 224 ppc_inst_t op; 225 unsigned long ip = rec->ip; 226 unsigned long tramp, ptr; 227 228 if (copy_from_kernel_nofault(&op, (void *)ip, MCOUNT_INSN_SIZE)) 229 return -EFAULT; 230 231 /* Make sure that that this is still a 24bit jump */ 232 if (!is_bl_op(op)) { 233 pr_err("Not expected bl: opcode is %s\n", ppc_inst_as_str(op)); 234 return -EINVAL; 235 } 236 237 /* lets find where the pointer goes */ 238 tramp = find_bl_target(ip, op); 239 240 /* Find where the trampoline jumps to */ 241 if (module_trampoline_target(mod, tramp, &ptr)) { 242 pr_err("Failed to get trampoline target\n"); 243 return -EFAULT; 244 } 245 246 if (ptr != addr) { 247 pr_err("Trampoline location %08lx does not match addr\n", 248 tramp); 249 return -EINVAL; 250 } 251 252 op = ppc_inst(PPC_RAW_NOP()); 253 254 if (patch_instruction((u32 *)ip, op)) 255 return -EPERM; 256 257 return 0; 258 } 259 #endif /* PPC64 */ 260 #endif /* CONFIG_MODULES */ 261 262 static unsigned long find_ftrace_tramp(unsigned long ip) 263 { 264 int i; 265 ppc_inst_t instr; 266 267 /* 268 * We have the compiler generated long_branch tramps at the end 269 * and we prefer those 270 */ 271 for (i = NUM_FTRACE_TRAMPS - 1; i >= 0; i--) 272 if (!ftrace_tramps[i]) 273 continue; 274 else if (create_branch(&instr, (void *)ip, 275 ftrace_tramps[i], 0) == 0) 276 return ftrace_tramps[i]; 277 278 return 0; 279 } 280 281 static int add_ftrace_tramp(unsigned long tramp) 282 { 283 int i; 284 285 for (i = 0; i < NUM_FTRACE_TRAMPS; i++) 286 if (!ftrace_tramps[i]) { 287 ftrace_tramps[i] = tramp; 288 return 0; 289 } 290 291 return -1; 292 } 293 294 /* 295 * If this is a compiler generated long_branch trampoline (essentially, a 296 * trampoline that has a branch to _mcount()), we re-write the branch to 297 * instead go to ftrace_[regs_]caller() and note down the location of this 298 * trampoline. 299 */ 300 static int setup_mcount_compiler_tramp(unsigned long tramp) 301 { 302 int i; 303 ppc_inst_t op; 304 unsigned long ptr; 305 ppc_inst_t instr; 306 static unsigned long ftrace_plt_tramps[NUM_FTRACE_TRAMPS]; 307 308 /* Is this a known long jump tramp? */ 309 for (i = 0; i < NUM_FTRACE_TRAMPS; i++) 310 if (!ftrace_tramps[i]) 311 break; 312 else if (ftrace_tramps[i] == tramp) 313 return 0; 314 315 /* Is this a known plt tramp? */ 316 for (i = 0; i < NUM_FTRACE_TRAMPS; i++) 317 if (!ftrace_plt_tramps[i]) 318 break; 319 else if (ftrace_plt_tramps[i] == tramp) 320 return -1; 321 322 /* New trampoline -- read where this goes */ 323 if (copy_inst_from_kernel_nofault(&op, (void *)tramp)) { 324 pr_debug("Fetching opcode failed.\n"); 325 return -1; 326 } 327 328 /* Is this a 24 bit branch? */ 329 if (!is_b_op(op)) { 330 pr_debug("Trampoline is not a long branch tramp.\n"); 331 return -1; 332 } 333 334 /* lets find where the pointer goes */ 335 ptr = find_bl_target(tramp, op); 336 337 if (ptr != ppc_global_function_entry((void *)_mcount)) { 338 pr_debug("Trampoline target %p is not _mcount\n", (void *)ptr); 339 return -1; 340 } 341 342 /* Let's re-write the tramp to go to ftrace_[regs_]caller */ 343 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 344 ptr = ppc_global_function_entry((void *)ftrace_regs_caller); 345 #else 346 ptr = ppc_global_function_entry((void *)ftrace_caller); 347 #endif 348 if (create_branch(&instr, (void *)tramp, ptr, 0)) { 349 pr_debug("%ps is not reachable from existing mcount tramp\n", 350 (void *)ptr); 351 return -1; 352 } 353 354 if (patch_branch((u32 *)tramp, ptr, 0)) { 355 pr_debug("REL24 out of range!\n"); 356 return -1; 357 } 358 359 if (add_ftrace_tramp(tramp)) { 360 pr_debug("No tramp locations left\n"); 361 return -1; 362 } 363 364 return 0; 365 } 366 367 static int __ftrace_make_nop_kernel(struct dyn_ftrace *rec, unsigned long addr) 368 { 369 unsigned long tramp, ip = rec->ip; 370 ppc_inst_t op; 371 372 /* Read where this goes */ 373 if (copy_inst_from_kernel_nofault(&op, (void *)ip)) { 374 pr_err("Fetching opcode failed.\n"); 375 return -EFAULT; 376 } 377 378 /* Make sure that that this is still a 24bit jump */ 379 if (!is_bl_op(op)) { 380 pr_err("Not expected bl: opcode is %s\n", ppc_inst_as_str(op)); 381 return -EINVAL; 382 } 383 384 /* Let's find where the pointer goes */ 385 tramp = find_bl_target(ip, op); 386 387 pr_devel("ip:%lx jumps to %lx", ip, tramp); 388 389 if (setup_mcount_compiler_tramp(tramp)) { 390 /* Are other trampolines reachable? */ 391 if (!find_ftrace_tramp(ip)) { 392 pr_err("No ftrace trampolines reachable from %ps\n", 393 (void *)ip); 394 return -EINVAL; 395 } 396 } 397 398 if (patch_instruction((u32 *)ip, ppc_inst(PPC_RAW_NOP()))) { 399 pr_err("Patching NOP failed.\n"); 400 return -EPERM; 401 } 402 403 return 0; 404 } 405 406 int ftrace_make_nop(struct module *mod, 407 struct dyn_ftrace *rec, unsigned long addr) 408 { 409 unsigned long ip = rec->ip; 410 ppc_inst_t old, new; 411 412 /* 413 * If the calling address is more that 24 bits away, 414 * then we had to use a trampoline to make the call. 415 * Otherwise just update the call site. 416 */ 417 if (test_24bit_addr(ip, addr)) { 418 /* within range */ 419 old = ftrace_call_replace(ip, addr, 1); 420 new = ppc_inst(PPC_RAW_NOP()); 421 return ftrace_modify_code(ip, old, new); 422 } else if (core_kernel_text(ip)) 423 return __ftrace_make_nop_kernel(rec, addr); 424 425 #ifdef CONFIG_MODULES 426 /* 427 * Out of range jumps are called from modules. 428 * We should either already have a pointer to the module 429 * or it has been passed in. 430 */ 431 if (!rec->arch.mod) { 432 if (!mod) { 433 pr_err("No module loaded addr=%lx\n", addr); 434 return -EFAULT; 435 } 436 rec->arch.mod = mod; 437 } else if (mod) { 438 if (mod != rec->arch.mod) { 439 pr_err("Record mod %p not equal to passed in mod %p\n", 440 rec->arch.mod, mod); 441 return -EINVAL; 442 } 443 /* nothing to do if mod == rec->arch.mod */ 444 } else 445 mod = rec->arch.mod; 446 447 return __ftrace_make_nop(mod, rec, addr); 448 #else 449 /* We should not get here without modules */ 450 return -EINVAL; 451 #endif /* CONFIG_MODULES */ 452 } 453 454 #ifdef CONFIG_MODULES 455 #ifdef CONFIG_PPC64 456 /* 457 * Examine the existing instructions for __ftrace_make_call. 458 * They should effectively be a NOP, and follow formal constraints, 459 * depending on the ABI. Return false if they don't. 460 */ 461 #ifndef CONFIG_MPROFILE_KERNEL 462 static int 463 expected_nop_sequence(void *ip, ppc_inst_t op0, ppc_inst_t op1) 464 { 465 /* 466 * We expect to see: 467 * 468 * b +8 469 * ld r2,XX(r1) 470 * 471 * The load offset is different depending on the ABI. For simplicity 472 * just mask it out when doing the compare. 473 */ 474 if (!ppc_inst_equal(op0, ppc_inst(0x48000008)) || 475 (ppc_inst_val(op1) & 0xffff0000) != 0xe8410000) 476 return 0; 477 return 1; 478 } 479 #else 480 static int 481 expected_nop_sequence(void *ip, ppc_inst_t op0, ppc_inst_t op1) 482 { 483 /* look for patched "NOP" on ppc64 with -mprofile-kernel */ 484 if (!ppc_inst_equal(op0, ppc_inst(PPC_RAW_NOP()))) 485 return 0; 486 return 1; 487 } 488 #endif 489 490 static int 491 __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 492 { 493 ppc_inst_t op[2]; 494 ppc_inst_t instr; 495 void *ip = (void *)rec->ip; 496 unsigned long entry, ptr, tramp; 497 struct module *mod = rec->arch.mod; 498 499 /* read where this goes */ 500 if (copy_inst_from_kernel_nofault(op, ip)) 501 return -EFAULT; 502 503 if (copy_inst_from_kernel_nofault(op + 1, ip + 4)) 504 return -EFAULT; 505 506 if (!expected_nop_sequence(ip, op[0], op[1])) { 507 pr_err("Unexpected call sequence at %p: %s %s\n", 508 ip, ppc_inst_as_str(op[0]), ppc_inst_as_str(op[1])); 509 return -EINVAL; 510 } 511 512 /* If we never set up ftrace trampoline(s), then bail */ 513 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 514 if (!mod->arch.tramp || !mod->arch.tramp_regs) { 515 #else 516 if (!mod->arch.tramp) { 517 #endif 518 pr_err("No ftrace trampoline\n"); 519 return -EINVAL; 520 } 521 522 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 523 if (rec->flags & FTRACE_FL_REGS) 524 tramp = mod->arch.tramp_regs; 525 else 526 #endif 527 tramp = mod->arch.tramp; 528 529 if (module_trampoline_target(mod, tramp, &ptr)) { 530 pr_err("Failed to get trampoline target\n"); 531 return -EFAULT; 532 } 533 534 pr_devel("trampoline target %lx", ptr); 535 536 entry = ppc_global_function_entry((void *)addr); 537 /* This should match what was called */ 538 if (ptr != entry) { 539 pr_err("addr %lx does not match expected %lx\n", ptr, entry); 540 return -EINVAL; 541 } 542 543 /* Ensure branch is within 24 bits */ 544 if (create_branch(&instr, ip, tramp, BRANCH_SET_LINK)) { 545 pr_err("Branch out of range\n"); 546 return -EINVAL; 547 } 548 549 if (patch_branch(ip, tramp, BRANCH_SET_LINK)) { 550 pr_err("REL24 out of range!\n"); 551 return -EINVAL; 552 } 553 554 return 0; 555 } 556 557 #else /* !CONFIG_PPC64: */ 558 static int 559 __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 560 { 561 int err; 562 ppc_inst_t op; 563 u32 *ip = (u32 *)rec->ip; 564 struct module *mod = rec->arch.mod; 565 unsigned long tramp; 566 567 /* read where this goes */ 568 if (copy_inst_from_kernel_nofault(&op, ip)) 569 return -EFAULT; 570 571 /* It should be pointing to a nop */ 572 if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_NOP()))) { 573 pr_err("Expected NOP but have %s\n", ppc_inst_as_str(op)); 574 return -EINVAL; 575 } 576 577 /* If we never set up a trampoline to ftrace_caller, then bail */ 578 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 579 if (!mod->arch.tramp || !mod->arch.tramp_regs) { 580 #else 581 if (!mod->arch.tramp) { 582 #endif 583 pr_err("No ftrace trampoline\n"); 584 return -EINVAL; 585 } 586 587 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 588 if (rec->flags & FTRACE_FL_REGS) 589 tramp = mod->arch.tramp_regs; 590 else 591 #endif 592 tramp = mod->arch.tramp; 593 /* create the branch to the trampoline */ 594 err = create_branch(&op, ip, tramp, BRANCH_SET_LINK); 595 if (err) { 596 pr_err("REL24 out of range!\n"); 597 return -EINVAL; 598 } 599 600 pr_devel("write to %lx\n", rec->ip); 601 602 if (patch_instruction(ip, op)) 603 return -EPERM; 604 605 return 0; 606 } 607 #endif /* CONFIG_PPC64 */ 608 #endif /* CONFIG_MODULES */ 609 610 static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr) 611 { 612 ppc_inst_t op; 613 void *ip = (void *)rec->ip; 614 unsigned long tramp, entry, ptr; 615 616 /* Make sure we're being asked to patch branch to a known ftrace addr */ 617 entry = ppc_global_function_entry((void *)ftrace_caller); 618 ptr = ppc_global_function_entry((void *)addr); 619 620 if (ptr != entry) { 621 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 622 entry = ppc_global_function_entry((void *)ftrace_regs_caller); 623 if (ptr != entry) { 624 #endif 625 pr_err("Unknown ftrace addr to patch: %ps\n", (void *)ptr); 626 return -EINVAL; 627 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 628 } 629 #endif 630 } 631 632 /* Make sure we have a nop */ 633 if (copy_inst_from_kernel_nofault(&op, ip)) { 634 pr_err("Unable to read ftrace location %p\n", ip); 635 return -EFAULT; 636 } 637 638 if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_NOP()))) { 639 pr_err("Unexpected call sequence at %p: %s\n", ip, ppc_inst_as_str(op)); 640 return -EINVAL; 641 } 642 643 tramp = find_ftrace_tramp((unsigned long)ip); 644 if (!tramp) { 645 pr_err("No ftrace trampolines reachable from %ps\n", ip); 646 return -EINVAL; 647 } 648 649 if (patch_branch(ip, tramp, BRANCH_SET_LINK)) { 650 pr_err("Error patching branch to ftrace tramp!\n"); 651 return -EINVAL; 652 } 653 654 return 0; 655 } 656 657 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 658 { 659 unsigned long ip = rec->ip; 660 ppc_inst_t old, new; 661 662 /* 663 * If the calling address is more that 24 bits away, 664 * then we had to use a trampoline to make the call. 665 * Otherwise just update the call site. 666 */ 667 if (test_24bit_addr(ip, addr)) { 668 /* within range */ 669 old = ppc_inst(PPC_RAW_NOP()); 670 new = ftrace_call_replace(ip, addr, 1); 671 return ftrace_modify_code(ip, old, new); 672 } else if (core_kernel_text(ip)) 673 return __ftrace_make_call_kernel(rec, addr); 674 675 #ifdef CONFIG_MODULES 676 /* 677 * Out of range jumps are called from modules. 678 * Being that we are converting from nop, it had better 679 * already have a module defined. 680 */ 681 if (!rec->arch.mod) { 682 pr_err("No module loaded\n"); 683 return -EINVAL; 684 } 685 686 return __ftrace_make_call(rec, addr); 687 #else 688 /* We should not get here without modules */ 689 return -EINVAL; 690 #endif /* CONFIG_MODULES */ 691 } 692 693 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 694 #ifdef CONFIG_MODULES 695 static int 696 __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 697 unsigned long addr) 698 { 699 ppc_inst_t op; 700 unsigned long ip = rec->ip; 701 unsigned long entry, ptr, tramp; 702 struct module *mod = rec->arch.mod; 703 704 /* If we never set up ftrace trampolines, then bail */ 705 if (!mod->arch.tramp || !mod->arch.tramp_regs) { 706 pr_err("No ftrace trampoline\n"); 707 return -EINVAL; 708 } 709 710 /* read where this goes */ 711 if (copy_inst_from_kernel_nofault(&op, (void *)ip)) { 712 pr_err("Fetching opcode failed.\n"); 713 return -EFAULT; 714 } 715 716 /* Make sure that that this is still a 24bit jump */ 717 if (!is_bl_op(op)) { 718 pr_err("Not expected bl: opcode is %s\n", ppc_inst_as_str(op)); 719 return -EINVAL; 720 } 721 722 /* lets find where the pointer goes */ 723 tramp = find_bl_target(ip, op); 724 entry = ppc_global_function_entry((void *)old_addr); 725 726 pr_devel("ip:%lx jumps to %lx", ip, tramp); 727 728 if (tramp != entry) { 729 /* old_addr is not within range, so we must have used a trampoline */ 730 if (module_trampoline_target(mod, tramp, &ptr)) { 731 pr_err("Failed to get trampoline target\n"); 732 return -EFAULT; 733 } 734 735 pr_devel("trampoline target %lx", ptr); 736 737 /* This should match what was called */ 738 if (ptr != entry) { 739 pr_err("addr %lx does not match expected %lx\n", ptr, entry); 740 return -EINVAL; 741 } 742 } 743 744 /* The new target may be within range */ 745 if (test_24bit_addr(ip, addr)) { 746 /* within range */ 747 if (patch_branch((u32 *)ip, addr, BRANCH_SET_LINK)) { 748 pr_err("REL24 out of range!\n"); 749 return -EINVAL; 750 } 751 752 return 0; 753 } 754 755 if (rec->flags & FTRACE_FL_REGS) 756 tramp = mod->arch.tramp_regs; 757 else 758 tramp = mod->arch.tramp; 759 760 if (module_trampoline_target(mod, tramp, &ptr)) { 761 pr_err("Failed to get trampoline target\n"); 762 return -EFAULT; 763 } 764 765 pr_devel("trampoline target %lx", ptr); 766 767 entry = ppc_global_function_entry((void *)addr); 768 /* This should match what was called */ 769 if (ptr != entry) { 770 pr_err("addr %lx does not match expected %lx\n", ptr, entry); 771 return -EINVAL; 772 } 773 774 /* Ensure branch is within 24 bits */ 775 if (create_branch(&op, (u32 *)ip, tramp, BRANCH_SET_LINK)) { 776 pr_err("Branch out of range\n"); 777 return -EINVAL; 778 } 779 780 if (patch_branch((u32 *)ip, tramp, BRANCH_SET_LINK)) { 781 pr_err("REL24 out of range!\n"); 782 return -EINVAL; 783 } 784 785 return 0; 786 } 787 #endif 788 789 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 790 unsigned long addr) 791 { 792 unsigned long ip = rec->ip; 793 ppc_inst_t old, new; 794 795 /* 796 * If the calling address is more that 24 bits away, 797 * then we had to use a trampoline to make the call. 798 * Otherwise just update the call site. 799 */ 800 if (test_24bit_addr(ip, addr) && test_24bit_addr(ip, old_addr)) { 801 /* within range */ 802 old = ftrace_call_replace(ip, old_addr, 1); 803 new = ftrace_call_replace(ip, addr, 1); 804 return ftrace_modify_code(ip, old, new); 805 } else if (core_kernel_text(ip)) { 806 /* 807 * We always patch out of range locations to go to the regs 808 * variant, so there is nothing to do here 809 */ 810 return 0; 811 } 812 813 #ifdef CONFIG_MODULES 814 /* 815 * Out of range jumps are called from modules. 816 */ 817 if (!rec->arch.mod) { 818 pr_err("No module loaded\n"); 819 return -EINVAL; 820 } 821 822 return __ftrace_modify_call(rec, old_addr, addr); 823 #else 824 /* We should not get here without modules */ 825 return -EINVAL; 826 #endif /* CONFIG_MODULES */ 827 } 828 #endif 829 830 int ftrace_update_ftrace_func(ftrace_func_t func) 831 { 832 unsigned long ip = (unsigned long)(&ftrace_call); 833 ppc_inst_t old, new; 834 int ret; 835 836 old = ppc_inst_read((u32 *)&ftrace_call); 837 new = ftrace_call_replace(ip, (unsigned long)func, 1); 838 ret = ftrace_modify_code(ip, old, new); 839 840 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 841 /* Also update the regs callback function */ 842 if (!ret) { 843 ip = (unsigned long)(&ftrace_regs_call); 844 old = ppc_inst_read((u32 *)&ftrace_regs_call); 845 new = ftrace_call_replace(ip, (unsigned long)func, 1); 846 ret = ftrace_modify_code(ip, old, new); 847 } 848 #endif 849 850 return ret; 851 } 852 853 /* 854 * Use the default ftrace_modify_all_code, but without 855 * stop_machine(). 856 */ 857 void arch_ftrace_update_code(int command) 858 { 859 ftrace_modify_all_code(command); 860 } 861 862 #ifdef CONFIG_PPC64 863 #define PACATOC offsetof(struct paca_struct, kernel_toc) 864 865 extern unsigned int ftrace_tramp_text[], ftrace_tramp_init[]; 866 867 int __init ftrace_dyn_arch_init(void) 868 { 869 int i; 870 unsigned int *tramp[] = { ftrace_tramp_text, ftrace_tramp_init }; 871 u32 stub_insns[] = { 872 0xe98d0000 | PACATOC, /* ld r12,PACATOC(r13) */ 873 0x3d8c0000, /* addis r12,r12,<high> */ 874 0x398c0000, /* addi r12,r12,<low> */ 875 0x7d8903a6, /* mtctr r12 */ 876 0x4e800420, /* bctr */ 877 }; 878 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 879 unsigned long addr = ppc_global_function_entry((void *)ftrace_regs_caller); 880 #else 881 unsigned long addr = ppc_global_function_entry((void *)ftrace_caller); 882 #endif 883 long reladdr = addr - kernel_toc_addr(); 884 885 if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) { 886 pr_err("Address of %ps out of range of kernel_toc.\n", 887 (void *)addr); 888 return -1; 889 } 890 891 for (i = 0; i < 2; i++) { 892 memcpy(tramp[i], stub_insns, sizeof(stub_insns)); 893 tramp[i][1] |= PPC_HA(reladdr); 894 tramp[i][2] |= PPC_LO(reladdr); 895 add_ftrace_tramp((unsigned long)tramp[i]); 896 } 897 898 return 0; 899 } 900 #else 901 int __init ftrace_dyn_arch_init(void) 902 { 903 return 0; 904 } 905 #endif 906 #endif /* CONFIG_DYNAMIC_FTRACE */ 907 908 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 909 910 extern void ftrace_graph_call(void); 911 extern void ftrace_graph_stub(void); 912 913 int ftrace_enable_ftrace_graph_caller(void) 914 { 915 unsigned long ip = (unsigned long)(&ftrace_graph_call); 916 unsigned long addr = (unsigned long)(&ftrace_graph_caller); 917 unsigned long stub = (unsigned long)(&ftrace_graph_stub); 918 ppc_inst_t old, new; 919 920 old = ftrace_call_replace(ip, stub, 0); 921 new = ftrace_call_replace(ip, addr, 0); 922 923 return ftrace_modify_code(ip, old, new); 924 } 925 926 int ftrace_disable_ftrace_graph_caller(void) 927 { 928 unsigned long ip = (unsigned long)(&ftrace_graph_call); 929 unsigned long addr = (unsigned long)(&ftrace_graph_caller); 930 unsigned long stub = (unsigned long)(&ftrace_graph_stub); 931 ppc_inst_t old, new; 932 933 old = ftrace_call_replace(ip, addr, 0); 934 new = ftrace_call_replace(ip, stub, 0); 935 936 return ftrace_modify_code(ip, old, new); 937 } 938 939 /* 940 * Hook the return address and push it in the stack of return addrs 941 * in current thread info. Return the address we want to divert to. 942 */ 943 unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip, 944 unsigned long sp) 945 { 946 unsigned long return_hooker; 947 948 if (unlikely(ftrace_graph_is_dead())) 949 goto out; 950 951 if (unlikely(atomic_read(¤t->tracing_graph_pause))) 952 goto out; 953 954 return_hooker = ppc_function_entry(return_to_handler); 955 956 if (!function_graph_enter(parent, ip, 0, (unsigned long *)sp)) 957 parent = return_hooker; 958 out: 959 return parent; 960 } 961 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 962 963 #ifdef PPC64_ELF_ABI_v1 964 char *arch_ftrace_match_adjust(char *str, const char *search) 965 { 966 if (str[0] == '.' && search[0] != '.') 967 return str + 1; 968 else 969 return str; 970 } 971 #endif /* PPC64_ELF_ABI_v1 */ 972