1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Code for replacing ftrace calls with jumps. 4 * 5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> 6 * 7 * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box. 8 * 9 * Added function graph tracer code, taken from x86 that was written 10 * by Frederic Weisbecker, and ported to PPC by Steven Rostedt. 11 * 12 */ 13 14 #define pr_fmt(fmt) "ftrace-powerpc: " fmt 15 16 #include <linux/spinlock.h> 17 #include <linux/hardirq.h> 18 #include <linux/uaccess.h> 19 #include <linux/module.h> 20 #include <linux/ftrace.h> 21 #include <linux/percpu.h> 22 #include <linux/init.h> 23 #include <linux/list.h> 24 25 #include <asm/asm-prototypes.h> 26 #include <asm/cacheflush.h> 27 #include <asm/code-patching.h> 28 #include <asm/ftrace.h> 29 #include <asm/syscall.h> 30 31 32 #ifdef CONFIG_DYNAMIC_FTRACE 33 static unsigned int 34 ftrace_call_replace(unsigned long ip, unsigned long addr, int link) 35 { 36 unsigned int op; 37 38 addr = ppc_function_entry((void *)addr); 39 40 /* if (link) set op to 'bl' else 'b' */ 41 op = create_branch((unsigned int *)ip, addr, link ? 1 : 0); 42 43 return op; 44 } 45 46 static int 47 ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new) 48 { 49 unsigned int replaced; 50 51 /* 52 * Note: 53 * We are paranoid about modifying text, as if a bug was to happen, it 54 * could cause us to read or write to someplace that could cause harm. 55 * Carefully read and modify the code with probe_kernel_*(), and make 56 * sure what we read is what we expected it to be before modifying it. 57 */ 58 59 /* read the text we want to modify */ 60 if (probe_kernel_read(&replaced, (void *)ip, MCOUNT_INSN_SIZE)) 61 return -EFAULT; 62 63 /* Make sure it is what we expect it to be */ 64 if (replaced != old) { 65 pr_err("%p: replaced (%#x) != old (%#x)", 66 (void *)ip, replaced, old); 67 return -EINVAL; 68 } 69 70 /* replace the text with the new text */ 71 if (patch_instruction((unsigned int *)ip, new)) 72 return -EPERM; 73 74 return 0; 75 } 76 77 /* 78 * Helper functions that are the same for both PPC64 and PPC32. 79 */ 80 static int test_24bit_addr(unsigned long ip, unsigned long addr) 81 { 82 addr = ppc_function_entry((void *)addr); 83 84 /* use the create_branch to verify that this offset can be branched */ 85 return create_branch((unsigned int *)ip, addr, 0); 86 } 87 88 #ifdef CONFIG_MODULES 89 90 static int is_bl_op(unsigned int op) 91 { 92 return (op & 0xfc000003) == 0x48000001; 93 } 94 95 static unsigned long find_bl_target(unsigned long ip, unsigned int op) 96 { 97 static int offset; 98 99 offset = (op & 0x03fffffc); 100 /* make it signed */ 101 if (offset & 0x02000000) 102 offset |= 0xfe000000; 103 104 return ip + (long)offset; 105 } 106 107 #ifdef CONFIG_PPC64 108 static int 109 __ftrace_make_nop(struct module *mod, 110 struct dyn_ftrace *rec, unsigned long addr) 111 { 112 unsigned long entry, ptr, tramp; 113 unsigned long ip = rec->ip; 114 unsigned int op, pop; 115 116 /* read where this goes */ 117 if (probe_kernel_read(&op, (void *)ip, sizeof(int))) { 118 pr_err("Fetching opcode failed.\n"); 119 return -EFAULT; 120 } 121 122 /* Make sure that that this is still a 24bit jump */ 123 if (!is_bl_op(op)) { 124 pr_err("Not expected bl: opcode is %x\n", op); 125 return -EINVAL; 126 } 127 128 /* lets find where the pointer goes */ 129 tramp = find_bl_target(ip, op); 130 131 pr_devel("ip:%lx jumps to %lx", ip, tramp); 132 133 if (module_trampoline_target(mod, tramp, &ptr)) { 134 pr_err("Failed to get trampoline target\n"); 135 return -EFAULT; 136 } 137 138 pr_devel("trampoline target %lx", ptr); 139 140 entry = ppc_global_function_entry((void *)addr); 141 /* This should match what was called */ 142 if (ptr != entry) { 143 pr_err("addr %lx does not match expected %lx\n", ptr, entry); 144 return -EINVAL; 145 } 146 147 #ifdef CC_USING_MPROFILE_KERNEL 148 /* When using -mkernel_profile there is no load to jump over */ 149 pop = PPC_INST_NOP; 150 151 if (probe_kernel_read(&op, (void *)(ip - 4), 4)) { 152 pr_err("Fetching instruction at %lx failed.\n", ip - 4); 153 return -EFAULT; 154 } 155 156 /* We expect either a mflr r0, or a std r0, LRSAVE(r1) */ 157 if (op != PPC_INST_MFLR && op != PPC_INST_STD_LR) { 158 pr_err("Unexpected instruction %08x around bl _mcount\n", op); 159 return -EINVAL; 160 } 161 #else 162 /* 163 * Our original call site looks like: 164 * 165 * bl <tramp> 166 * ld r2,XX(r1) 167 * 168 * Milton Miller pointed out that we can not simply nop the branch. 169 * If a task was preempted when calling a trace function, the nops 170 * will remove the way to restore the TOC in r2 and the r2 TOC will 171 * get corrupted. 172 * 173 * Use a b +8 to jump over the load. 174 */ 175 176 pop = PPC_INST_BRANCH | 8; /* b +8 */ 177 178 /* 179 * Check what is in the next instruction. We can see ld r2,40(r1), but 180 * on first pass after boot we will see mflr r0. 181 */ 182 if (probe_kernel_read(&op, (void *)(ip+4), MCOUNT_INSN_SIZE)) { 183 pr_err("Fetching op failed.\n"); 184 return -EFAULT; 185 } 186 187 if (op != PPC_INST_LD_TOC) { 188 pr_err("Expected %08x found %08x\n", PPC_INST_LD_TOC, op); 189 return -EINVAL; 190 } 191 #endif /* CC_USING_MPROFILE_KERNEL */ 192 193 if (patch_instruction((unsigned int *)ip, pop)) { 194 pr_err("Patching NOP failed.\n"); 195 return -EPERM; 196 } 197 198 return 0; 199 } 200 201 #else /* !PPC64 */ 202 static int 203 __ftrace_make_nop(struct module *mod, 204 struct dyn_ftrace *rec, unsigned long addr) 205 { 206 unsigned int op; 207 unsigned int jmp[4]; 208 unsigned long ip = rec->ip; 209 unsigned long tramp; 210 211 if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE)) 212 return -EFAULT; 213 214 /* Make sure that that this is still a 24bit jump */ 215 if (!is_bl_op(op)) { 216 pr_err("Not expected bl: opcode is %x\n", op); 217 return -EINVAL; 218 } 219 220 /* lets find where the pointer goes */ 221 tramp = find_bl_target(ip, op); 222 223 /* 224 * On PPC32 the trampoline looks like: 225 * 0x3d, 0x80, 0x00, 0x00 lis r12,sym@ha 226 * 0x39, 0x8c, 0x00, 0x00 addi r12,r12,sym@l 227 * 0x7d, 0x89, 0x03, 0xa6 mtctr r12 228 * 0x4e, 0x80, 0x04, 0x20 bctr 229 */ 230 231 pr_devel("ip:%lx jumps to %lx", ip, tramp); 232 233 /* Find where the trampoline jumps to */ 234 if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) { 235 pr_err("Failed to read %lx\n", tramp); 236 return -EFAULT; 237 } 238 239 pr_devel(" %08x %08x ", jmp[0], jmp[1]); 240 241 /* verify that this is what we expect it to be */ 242 if (((jmp[0] & 0xffff0000) != 0x3d800000) || 243 ((jmp[1] & 0xffff0000) != 0x398c0000) || 244 (jmp[2] != 0x7d8903a6) || 245 (jmp[3] != 0x4e800420)) { 246 pr_err("Not a trampoline\n"); 247 return -EINVAL; 248 } 249 250 tramp = (jmp[1] & 0xffff) | 251 ((jmp[0] & 0xffff) << 16); 252 if (tramp & 0x8000) 253 tramp -= 0x10000; 254 255 pr_devel(" %lx ", tramp); 256 257 if (tramp != addr) { 258 pr_err("Trampoline location %08lx does not match addr\n", 259 tramp); 260 return -EINVAL; 261 } 262 263 op = PPC_INST_NOP; 264 265 if (patch_instruction((unsigned int *)ip, op)) 266 return -EPERM; 267 268 return 0; 269 } 270 #endif /* PPC64 */ 271 #endif /* CONFIG_MODULES */ 272 273 int ftrace_make_nop(struct module *mod, 274 struct dyn_ftrace *rec, unsigned long addr) 275 { 276 unsigned long ip = rec->ip; 277 unsigned int old, new; 278 279 /* 280 * If the calling address is more that 24 bits away, 281 * then we had to use a trampoline to make the call. 282 * Otherwise just update the call site. 283 */ 284 if (test_24bit_addr(ip, addr)) { 285 /* within range */ 286 old = ftrace_call_replace(ip, addr, 1); 287 new = PPC_INST_NOP; 288 return ftrace_modify_code(ip, old, new); 289 } 290 291 #ifdef CONFIG_MODULES 292 /* 293 * Out of range jumps are called from modules. 294 * We should either already have a pointer to the module 295 * or it has been passed in. 296 */ 297 if (!rec->arch.mod) { 298 if (!mod) { 299 pr_err("No module loaded addr=%lx\n", addr); 300 return -EFAULT; 301 } 302 rec->arch.mod = mod; 303 } else if (mod) { 304 if (mod != rec->arch.mod) { 305 pr_err("Record mod %p not equal to passed in mod %p\n", 306 rec->arch.mod, mod); 307 return -EINVAL; 308 } 309 /* nothing to do if mod == rec->arch.mod */ 310 } else 311 mod = rec->arch.mod; 312 313 return __ftrace_make_nop(mod, rec, addr); 314 #else 315 /* We should not get here without modules */ 316 return -EINVAL; 317 #endif /* CONFIG_MODULES */ 318 } 319 320 #ifdef CONFIG_MODULES 321 #ifdef CONFIG_PPC64 322 /* 323 * Examine the existing instructions for __ftrace_make_call. 324 * They should effectively be a NOP, and follow formal constraints, 325 * depending on the ABI. Return false if they don't. 326 */ 327 #ifndef CC_USING_MPROFILE_KERNEL 328 static int 329 expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1) 330 { 331 /* 332 * We expect to see: 333 * 334 * b +8 335 * ld r2,XX(r1) 336 * 337 * The load offset is different depending on the ABI. For simplicity 338 * just mask it out when doing the compare. 339 */ 340 if ((op0 != 0x48000008) || ((op1 & 0xffff0000) != 0xe8410000)) 341 return 0; 342 return 1; 343 } 344 #else 345 static int 346 expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1) 347 { 348 /* look for patched "NOP" on ppc64 with -mprofile-kernel */ 349 if (op0 != PPC_INST_NOP) 350 return 0; 351 return 1; 352 } 353 #endif 354 355 static int 356 __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 357 { 358 unsigned int op[2]; 359 void *ip = (void *)rec->ip; 360 361 /* read where this goes */ 362 if (probe_kernel_read(op, ip, sizeof(op))) 363 return -EFAULT; 364 365 if (!expected_nop_sequence(ip, op[0], op[1])) { 366 pr_err("Unexpected call sequence at %p: %x %x\n", 367 ip, op[0], op[1]); 368 return -EINVAL; 369 } 370 371 /* If we never set up a trampoline to ftrace_caller, then bail */ 372 if (!rec->arch.mod->arch.tramp) { 373 pr_err("No ftrace trampoline\n"); 374 return -EINVAL; 375 } 376 377 /* Ensure branch is within 24 bits */ 378 if (!create_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) { 379 pr_err("Branch out of range\n"); 380 return -EINVAL; 381 } 382 383 if (patch_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) { 384 pr_err("REL24 out of range!\n"); 385 return -EINVAL; 386 } 387 388 return 0; 389 } 390 391 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 392 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 393 unsigned long addr) 394 { 395 return ftrace_make_call(rec, addr); 396 } 397 #endif 398 399 #else /* !CONFIG_PPC64: */ 400 static int 401 __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 402 { 403 unsigned int op; 404 unsigned long ip = rec->ip; 405 406 /* read where this goes */ 407 if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE)) 408 return -EFAULT; 409 410 /* It should be pointing to a nop */ 411 if (op != PPC_INST_NOP) { 412 pr_err("Expected NOP but have %x\n", op); 413 return -EINVAL; 414 } 415 416 /* If we never set up a trampoline to ftrace_caller, then bail */ 417 if (!rec->arch.mod->arch.tramp) { 418 pr_err("No ftrace trampoline\n"); 419 return -EINVAL; 420 } 421 422 /* create the branch to the trampoline */ 423 op = create_branch((unsigned int *)ip, 424 rec->arch.mod->arch.tramp, BRANCH_SET_LINK); 425 if (!op) { 426 pr_err("REL24 out of range!\n"); 427 return -EINVAL; 428 } 429 430 pr_devel("write to %lx\n", rec->ip); 431 432 if (patch_instruction((unsigned int *)ip, op)) 433 return -EPERM; 434 435 return 0; 436 } 437 #endif /* CONFIG_PPC64 */ 438 #endif /* CONFIG_MODULES */ 439 440 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 441 { 442 unsigned long ip = rec->ip; 443 unsigned int old, new; 444 445 /* 446 * If the calling address is more that 24 bits away, 447 * then we had to use a trampoline to make the call. 448 * Otherwise just update the call site. 449 */ 450 if (test_24bit_addr(ip, addr)) { 451 /* within range */ 452 old = PPC_INST_NOP; 453 new = ftrace_call_replace(ip, addr, 1); 454 return ftrace_modify_code(ip, old, new); 455 } 456 457 #ifdef CONFIG_MODULES 458 /* 459 * Out of range jumps are called from modules. 460 * Being that we are converting from nop, it had better 461 * already have a module defined. 462 */ 463 if (!rec->arch.mod) { 464 pr_err("No module loaded\n"); 465 return -EINVAL; 466 } 467 468 return __ftrace_make_call(rec, addr); 469 #else 470 /* We should not get here without modules */ 471 return -EINVAL; 472 #endif /* CONFIG_MODULES */ 473 } 474 475 int ftrace_update_ftrace_func(ftrace_func_t func) 476 { 477 unsigned long ip = (unsigned long)(&ftrace_call); 478 unsigned int old, new; 479 int ret; 480 481 old = *(unsigned int *)&ftrace_call; 482 new = ftrace_call_replace(ip, (unsigned long)func, 1); 483 ret = ftrace_modify_code(ip, old, new); 484 485 return ret; 486 } 487 488 static int __ftrace_replace_code(struct dyn_ftrace *rec, int enable) 489 { 490 unsigned long ftrace_addr = (unsigned long)FTRACE_ADDR; 491 int ret; 492 493 ret = ftrace_update_record(rec, enable); 494 495 switch (ret) { 496 case FTRACE_UPDATE_IGNORE: 497 return 0; 498 case FTRACE_UPDATE_MAKE_CALL: 499 return ftrace_make_call(rec, ftrace_addr); 500 case FTRACE_UPDATE_MAKE_NOP: 501 return ftrace_make_nop(NULL, rec, ftrace_addr); 502 } 503 504 return 0; 505 } 506 507 void ftrace_replace_code(int enable) 508 { 509 struct ftrace_rec_iter *iter; 510 struct dyn_ftrace *rec; 511 int ret; 512 513 for (iter = ftrace_rec_iter_start(); iter; 514 iter = ftrace_rec_iter_next(iter)) { 515 rec = ftrace_rec_iter_record(iter); 516 ret = __ftrace_replace_code(rec, enable); 517 if (ret) { 518 ftrace_bug(ret, rec); 519 return; 520 } 521 } 522 } 523 524 /* 525 * Use the default ftrace_modify_all_code, but without 526 * stop_machine(). 527 */ 528 void arch_ftrace_update_code(int command) 529 { 530 ftrace_modify_all_code(command); 531 } 532 533 int __init ftrace_dyn_arch_init(void) 534 { 535 return 0; 536 } 537 #endif /* CONFIG_DYNAMIC_FTRACE */ 538 539 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 540 541 #ifdef CONFIG_DYNAMIC_FTRACE 542 extern void ftrace_graph_call(void); 543 extern void ftrace_graph_stub(void); 544 545 int ftrace_enable_ftrace_graph_caller(void) 546 { 547 unsigned long ip = (unsigned long)(&ftrace_graph_call); 548 unsigned long addr = (unsigned long)(&ftrace_graph_caller); 549 unsigned long stub = (unsigned long)(&ftrace_graph_stub); 550 unsigned int old, new; 551 552 old = ftrace_call_replace(ip, stub, 0); 553 new = ftrace_call_replace(ip, addr, 0); 554 555 return ftrace_modify_code(ip, old, new); 556 } 557 558 int ftrace_disable_ftrace_graph_caller(void) 559 { 560 unsigned long ip = (unsigned long)(&ftrace_graph_call); 561 unsigned long addr = (unsigned long)(&ftrace_graph_caller); 562 unsigned long stub = (unsigned long)(&ftrace_graph_stub); 563 unsigned int old, new; 564 565 old = ftrace_call_replace(ip, addr, 0); 566 new = ftrace_call_replace(ip, stub, 0); 567 568 return ftrace_modify_code(ip, old, new); 569 } 570 #endif /* CONFIG_DYNAMIC_FTRACE */ 571 572 /* 573 * Hook the return address and push it in the stack of return addrs 574 * in current thread info. Return the address we want to divert to. 575 */ 576 unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip) 577 { 578 struct ftrace_graph_ent trace; 579 unsigned long return_hooker; 580 581 if (unlikely(ftrace_graph_is_dead())) 582 goto out; 583 584 if (unlikely(atomic_read(¤t->tracing_graph_pause))) 585 goto out; 586 587 return_hooker = ppc_function_entry(return_to_handler); 588 589 trace.func = ip; 590 trace.depth = current->curr_ret_stack + 1; 591 592 /* Only trace if the calling function expects to */ 593 if (!ftrace_graph_entry(&trace)) 594 goto out; 595 596 if (ftrace_push_return_trace(parent, ip, &trace.depth, 0, 597 NULL) == -EBUSY) 598 goto out; 599 600 parent = return_hooker; 601 out: 602 return parent; 603 } 604 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 605 606 #if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64) 607 unsigned long __init arch_syscall_addr(int nr) 608 { 609 return sys_call_table[nr*2]; 610 } 611 #endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 */ 612 613 #ifdef PPC64_ELF_ABI_v1 614 char *arch_ftrace_match_adjust(char *str, const char *search) 615 { 616 if (str[0] == '.' && search[0] != '.') 617 return str + 1; 618 else 619 return str; 620 } 621 #endif /* PPC64_ELF_ABI_v1 */ 622