1 /* 2 * Code for replacing ftrace calls with jumps. 3 * 4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> 5 * 6 * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box. 7 * 8 * Added function graph tracer code, taken from x86 that was written 9 * by Frederic Weisbecker, and ported to PPC by Steven Rostedt. 10 * 11 */ 12 13 #define pr_fmt(fmt) "ftrace-powerpc: " fmt 14 15 #include <linux/spinlock.h> 16 #include <linux/hardirq.h> 17 #include <linux/uaccess.h> 18 #include <linux/module.h> 19 #include <linux/ftrace.h> 20 #include <linux/percpu.h> 21 #include <linux/init.h> 22 #include <linux/list.h> 23 24 #include <asm/asm-prototypes.h> 25 #include <asm/cacheflush.h> 26 #include <asm/code-patching.h> 27 #include <asm/ftrace.h> 28 #include <asm/syscall.h> 29 30 31 #ifdef CONFIG_DYNAMIC_FTRACE 32 static unsigned int 33 ftrace_call_replace(unsigned long ip, unsigned long addr, int link) 34 { 35 unsigned int op; 36 37 addr = ppc_function_entry((void *)addr); 38 39 /* if (link) set op to 'bl' else 'b' */ 40 op = create_branch((unsigned int *)ip, addr, link ? 1 : 0); 41 42 return op; 43 } 44 45 static int 46 ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new) 47 { 48 unsigned int replaced; 49 50 /* 51 * Note: 52 * We are paranoid about modifying text, as if a bug was to happen, it 53 * could cause us to read or write to someplace that could cause harm. 54 * Carefully read and modify the code with probe_kernel_*(), and make 55 * sure what we read is what we expected it to be before modifying it. 56 */ 57 58 /* read the text we want to modify */ 59 if (probe_kernel_read(&replaced, (void *)ip, MCOUNT_INSN_SIZE)) 60 return -EFAULT; 61 62 /* Make sure it is what we expect it to be */ 63 if (replaced != old) { 64 pr_err("%p: replaced (%#x) != old (%#x)", 65 (void *)ip, replaced, old); 66 return -EINVAL; 67 } 68 69 /* replace the text with the new text */ 70 if (patch_instruction((unsigned int *)ip, new)) 71 return -EPERM; 72 73 return 0; 74 } 75 76 /* 77 * Helper functions that are the same for both PPC64 and PPC32. 78 */ 79 static int test_24bit_addr(unsigned long ip, unsigned long addr) 80 { 81 addr = ppc_function_entry((void *)addr); 82 83 /* use the create_branch to verify that this offset can be branched */ 84 return create_branch((unsigned int *)ip, addr, 0); 85 } 86 87 #ifdef CONFIG_MODULES 88 89 static int is_bl_op(unsigned int op) 90 { 91 return (op & 0xfc000003) == 0x48000001; 92 } 93 94 static unsigned long find_bl_target(unsigned long ip, unsigned int op) 95 { 96 static int offset; 97 98 offset = (op & 0x03fffffc); 99 /* make it signed */ 100 if (offset & 0x02000000) 101 offset |= 0xfe000000; 102 103 return ip + (long)offset; 104 } 105 106 #ifdef CONFIG_PPC64 107 static int 108 __ftrace_make_nop(struct module *mod, 109 struct dyn_ftrace *rec, unsigned long addr) 110 { 111 unsigned long entry, ptr, tramp; 112 unsigned long ip = rec->ip; 113 unsigned int op, pop; 114 115 /* read where this goes */ 116 if (probe_kernel_read(&op, (void *)ip, sizeof(int))) { 117 pr_err("Fetching opcode failed.\n"); 118 return -EFAULT; 119 } 120 121 /* Make sure that that this is still a 24bit jump */ 122 if (!is_bl_op(op)) { 123 pr_err("Not expected bl: opcode is %x\n", op); 124 return -EINVAL; 125 } 126 127 /* lets find where the pointer goes */ 128 tramp = find_bl_target(ip, op); 129 130 pr_devel("ip:%lx jumps to %lx", ip, tramp); 131 132 if (module_trampoline_target(mod, tramp, &ptr)) { 133 pr_err("Failed to get trampoline target\n"); 134 return -EFAULT; 135 } 136 137 pr_devel("trampoline target %lx", ptr); 138 139 entry = ppc_global_function_entry((void *)addr); 140 /* This should match what was called */ 141 if (ptr != entry) { 142 pr_err("addr %lx does not match expected %lx\n", ptr, entry); 143 return -EINVAL; 144 } 145 146 #ifdef CC_USING_MPROFILE_KERNEL 147 /* When using -mkernel_profile there is no load to jump over */ 148 pop = PPC_INST_NOP; 149 150 if (probe_kernel_read(&op, (void *)(ip - 4), 4)) { 151 pr_err("Fetching instruction at %lx failed.\n", ip - 4); 152 return -EFAULT; 153 } 154 155 /* We expect either a mflr r0, or a std r0, LRSAVE(r1) */ 156 if (op != PPC_INST_MFLR && op != PPC_INST_STD_LR) { 157 pr_err("Unexpected instruction %08x around bl _mcount\n", op); 158 return -EINVAL; 159 } 160 #else 161 /* 162 * Our original call site looks like: 163 * 164 * bl <tramp> 165 * ld r2,XX(r1) 166 * 167 * Milton Miller pointed out that we can not simply nop the branch. 168 * If a task was preempted when calling a trace function, the nops 169 * will remove the way to restore the TOC in r2 and the r2 TOC will 170 * get corrupted. 171 * 172 * Use a b +8 to jump over the load. 173 */ 174 175 pop = PPC_INST_BRANCH | 8; /* b +8 */ 176 177 /* 178 * Check what is in the next instruction. We can see ld r2,40(r1), but 179 * on first pass after boot we will see mflr r0. 180 */ 181 if (probe_kernel_read(&op, (void *)(ip+4), MCOUNT_INSN_SIZE)) { 182 pr_err("Fetching op failed.\n"); 183 return -EFAULT; 184 } 185 186 if (op != PPC_INST_LD_TOC) { 187 pr_err("Expected %08x found %08x\n", PPC_INST_LD_TOC, op); 188 return -EINVAL; 189 } 190 #endif /* CC_USING_MPROFILE_KERNEL */ 191 192 if (patch_instruction((unsigned int *)ip, pop)) { 193 pr_err("Patching NOP failed.\n"); 194 return -EPERM; 195 } 196 197 return 0; 198 } 199 200 #else /* !PPC64 */ 201 static int 202 __ftrace_make_nop(struct module *mod, 203 struct dyn_ftrace *rec, unsigned long addr) 204 { 205 unsigned int op; 206 unsigned int jmp[4]; 207 unsigned long ip = rec->ip; 208 unsigned long tramp; 209 210 if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE)) 211 return -EFAULT; 212 213 /* Make sure that that this is still a 24bit jump */ 214 if (!is_bl_op(op)) { 215 pr_err("Not expected bl: opcode is %x\n", op); 216 return -EINVAL; 217 } 218 219 /* lets find where the pointer goes */ 220 tramp = find_bl_target(ip, op); 221 222 /* 223 * On PPC32 the trampoline looks like: 224 * 0x3d, 0x80, 0x00, 0x00 lis r12,sym@ha 225 * 0x39, 0x8c, 0x00, 0x00 addi r12,r12,sym@l 226 * 0x7d, 0x89, 0x03, 0xa6 mtctr r12 227 * 0x4e, 0x80, 0x04, 0x20 bctr 228 */ 229 230 pr_devel("ip:%lx jumps to %lx", ip, tramp); 231 232 /* Find where the trampoline jumps to */ 233 if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) { 234 pr_err("Failed to read %lx\n", tramp); 235 return -EFAULT; 236 } 237 238 pr_devel(" %08x %08x ", jmp[0], jmp[1]); 239 240 /* verify that this is what we expect it to be */ 241 if (((jmp[0] & 0xffff0000) != 0x3d800000) || 242 ((jmp[1] & 0xffff0000) != 0x398c0000) || 243 (jmp[2] != 0x7d8903a6) || 244 (jmp[3] != 0x4e800420)) { 245 pr_err("Not a trampoline\n"); 246 return -EINVAL; 247 } 248 249 tramp = (jmp[1] & 0xffff) | 250 ((jmp[0] & 0xffff) << 16); 251 if (tramp & 0x8000) 252 tramp -= 0x10000; 253 254 pr_devel(" %lx ", tramp); 255 256 if (tramp != addr) { 257 pr_err("Trampoline location %08lx does not match addr\n", 258 tramp); 259 return -EINVAL; 260 } 261 262 op = PPC_INST_NOP; 263 264 if (patch_instruction((unsigned int *)ip, op)) 265 return -EPERM; 266 267 return 0; 268 } 269 #endif /* PPC64 */ 270 #endif /* CONFIG_MODULES */ 271 272 int ftrace_make_nop(struct module *mod, 273 struct dyn_ftrace *rec, unsigned long addr) 274 { 275 unsigned long ip = rec->ip; 276 unsigned int old, new; 277 278 /* 279 * If the calling address is more that 24 bits away, 280 * then we had to use a trampoline to make the call. 281 * Otherwise just update the call site. 282 */ 283 if (test_24bit_addr(ip, addr)) { 284 /* within range */ 285 old = ftrace_call_replace(ip, addr, 1); 286 new = PPC_INST_NOP; 287 return ftrace_modify_code(ip, old, new); 288 } 289 290 #ifdef CONFIG_MODULES 291 /* 292 * Out of range jumps are called from modules. 293 * We should either already have a pointer to the module 294 * or it has been passed in. 295 */ 296 if (!rec->arch.mod) { 297 if (!mod) { 298 pr_err("No module loaded addr=%lx\n", addr); 299 return -EFAULT; 300 } 301 rec->arch.mod = mod; 302 } else if (mod) { 303 if (mod != rec->arch.mod) { 304 pr_err("Record mod %p not equal to passed in mod %p\n", 305 rec->arch.mod, mod); 306 return -EINVAL; 307 } 308 /* nothing to do if mod == rec->arch.mod */ 309 } else 310 mod = rec->arch.mod; 311 312 return __ftrace_make_nop(mod, rec, addr); 313 #else 314 /* We should not get here without modules */ 315 return -EINVAL; 316 #endif /* CONFIG_MODULES */ 317 } 318 319 #ifdef CONFIG_MODULES 320 #ifdef CONFIG_PPC64 321 /* 322 * Examine the existing instructions for __ftrace_make_call. 323 * They should effectively be a NOP, and follow formal constraints, 324 * depending on the ABI. Return false if they don't. 325 */ 326 #ifndef CC_USING_MPROFILE_KERNEL 327 static int 328 expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1) 329 { 330 /* 331 * We expect to see: 332 * 333 * b +8 334 * ld r2,XX(r1) 335 * 336 * The load offset is different depending on the ABI. For simplicity 337 * just mask it out when doing the compare. 338 */ 339 if ((op0 != 0x48000008) || ((op1 & 0xffff0000) != 0xe8410000)) 340 return 0; 341 return 1; 342 } 343 #else 344 static int 345 expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1) 346 { 347 /* look for patched "NOP" on ppc64 with -mprofile-kernel */ 348 if (op0 != PPC_INST_NOP) 349 return 0; 350 return 1; 351 } 352 #endif 353 354 static int 355 __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 356 { 357 unsigned int op[2]; 358 void *ip = (void *)rec->ip; 359 360 /* read where this goes */ 361 if (probe_kernel_read(op, ip, sizeof(op))) 362 return -EFAULT; 363 364 if (!expected_nop_sequence(ip, op[0], op[1])) { 365 pr_err("Unexpected call sequence at %p: %x %x\n", 366 ip, op[0], op[1]); 367 return -EINVAL; 368 } 369 370 /* If we never set up a trampoline to ftrace_caller, then bail */ 371 if (!rec->arch.mod->arch.tramp) { 372 pr_err("No ftrace trampoline\n"); 373 return -EINVAL; 374 } 375 376 /* Ensure branch is within 24 bits */ 377 if (!create_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) { 378 pr_err("Branch out of range\n"); 379 return -EINVAL; 380 } 381 382 if (patch_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) { 383 pr_err("REL24 out of range!\n"); 384 return -EINVAL; 385 } 386 387 return 0; 388 } 389 390 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS 391 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 392 unsigned long addr) 393 { 394 return ftrace_make_call(rec, addr); 395 } 396 #endif 397 398 #else /* !CONFIG_PPC64: */ 399 static int 400 __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 401 { 402 unsigned int op; 403 unsigned long ip = rec->ip; 404 405 /* read where this goes */ 406 if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE)) 407 return -EFAULT; 408 409 /* It should be pointing to a nop */ 410 if (op != PPC_INST_NOP) { 411 pr_err("Expected NOP but have %x\n", op); 412 return -EINVAL; 413 } 414 415 /* If we never set up a trampoline to ftrace_caller, then bail */ 416 if (!rec->arch.mod->arch.tramp) { 417 pr_err("No ftrace trampoline\n"); 418 return -EINVAL; 419 } 420 421 /* create the branch to the trampoline */ 422 op = create_branch((unsigned int *)ip, 423 rec->arch.mod->arch.tramp, BRANCH_SET_LINK); 424 if (!op) { 425 pr_err("REL24 out of range!\n"); 426 return -EINVAL; 427 } 428 429 pr_devel("write to %lx\n", rec->ip); 430 431 if (patch_instruction((unsigned int *)ip, op)) 432 return -EPERM; 433 434 return 0; 435 } 436 #endif /* CONFIG_PPC64 */ 437 #endif /* CONFIG_MODULES */ 438 439 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 440 { 441 unsigned long ip = rec->ip; 442 unsigned int old, new; 443 444 /* 445 * If the calling address is more that 24 bits away, 446 * then we had to use a trampoline to make the call. 447 * Otherwise just update the call site. 448 */ 449 if (test_24bit_addr(ip, addr)) { 450 /* within range */ 451 old = PPC_INST_NOP; 452 new = ftrace_call_replace(ip, addr, 1); 453 return ftrace_modify_code(ip, old, new); 454 } 455 456 #ifdef CONFIG_MODULES 457 /* 458 * Out of range jumps are called from modules. 459 * Being that we are converting from nop, it had better 460 * already have a module defined. 461 */ 462 if (!rec->arch.mod) { 463 pr_err("No module loaded\n"); 464 return -EINVAL; 465 } 466 467 return __ftrace_make_call(rec, addr); 468 #else 469 /* We should not get here without modules */ 470 return -EINVAL; 471 #endif /* CONFIG_MODULES */ 472 } 473 474 int ftrace_update_ftrace_func(ftrace_func_t func) 475 { 476 unsigned long ip = (unsigned long)(&ftrace_call); 477 unsigned int old, new; 478 int ret; 479 480 old = *(unsigned int *)&ftrace_call; 481 new = ftrace_call_replace(ip, (unsigned long)func, 1); 482 ret = ftrace_modify_code(ip, old, new); 483 484 return ret; 485 } 486 487 static int __ftrace_replace_code(struct dyn_ftrace *rec, int enable) 488 { 489 unsigned long ftrace_addr = (unsigned long)FTRACE_ADDR; 490 int ret; 491 492 ret = ftrace_update_record(rec, enable); 493 494 switch (ret) { 495 case FTRACE_UPDATE_IGNORE: 496 return 0; 497 case FTRACE_UPDATE_MAKE_CALL: 498 return ftrace_make_call(rec, ftrace_addr); 499 case FTRACE_UPDATE_MAKE_NOP: 500 return ftrace_make_nop(NULL, rec, ftrace_addr); 501 } 502 503 return 0; 504 } 505 506 void ftrace_replace_code(int enable) 507 { 508 struct ftrace_rec_iter *iter; 509 struct dyn_ftrace *rec; 510 int ret; 511 512 for (iter = ftrace_rec_iter_start(); iter; 513 iter = ftrace_rec_iter_next(iter)) { 514 rec = ftrace_rec_iter_record(iter); 515 ret = __ftrace_replace_code(rec, enable); 516 if (ret) { 517 ftrace_bug(ret, rec); 518 return; 519 } 520 } 521 } 522 523 /* 524 * Use the default ftrace_modify_all_code, but without 525 * stop_machine(). 526 */ 527 void arch_ftrace_update_code(int command) 528 { 529 ftrace_modify_all_code(command); 530 } 531 532 int __init ftrace_dyn_arch_init(void) 533 { 534 return 0; 535 } 536 #endif /* CONFIG_DYNAMIC_FTRACE */ 537 538 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 539 540 #ifdef CONFIG_DYNAMIC_FTRACE 541 extern void ftrace_graph_call(void); 542 extern void ftrace_graph_stub(void); 543 544 int ftrace_enable_ftrace_graph_caller(void) 545 { 546 unsigned long ip = (unsigned long)(&ftrace_graph_call); 547 unsigned long addr = (unsigned long)(&ftrace_graph_caller); 548 unsigned long stub = (unsigned long)(&ftrace_graph_stub); 549 unsigned int old, new; 550 551 old = ftrace_call_replace(ip, stub, 0); 552 new = ftrace_call_replace(ip, addr, 0); 553 554 return ftrace_modify_code(ip, old, new); 555 } 556 557 int ftrace_disable_ftrace_graph_caller(void) 558 { 559 unsigned long ip = (unsigned long)(&ftrace_graph_call); 560 unsigned long addr = (unsigned long)(&ftrace_graph_caller); 561 unsigned long stub = (unsigned long)(&ftrace_graph_stub); 562 unsigned int old, new; 563 564 old = ftrace_call_replace(ip, addr, 0); 565 new = ftrace_call_replace(ip, stub, 0); 566 567 return ftrace_modify_code(ip, old, new); 568 } 569 #endif /* CONFIG_DYNAMIC_FTRACE */ 570 571 /* 572 * Hook the return address and push it in the stack of return addrs 573 * in current thread info. Return the address we want to divert to. 574 */ 575 unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip) 576 { 577 struct ftrace_graph_ent trace; 578 unsigned long return_hooker; 579 580 if (unlikely(ftrace_graph_is_dead())) 581 goto out; 582 583 if (unlikely(atomic_read(¤t->tracing_graph_pause))) 584 goto out; 585 586 return_hooker = ppc_function_entry(return_to_handler); 587 588 trace.func = ip; 589 trace.depth = current->curr_ret_stack + 1; 590 591 /* Only trace if the calling function expects to */ 592 if (!ftrace_graph_entry(&trace)) 593 goto out; 594 595 if (ftrace_push_return_trace(parent, ip, &trace.depth, 0, 596 NULL) == -EBUSY) 597 goto out; 598 599 parent = return_hooker; 600 out: 601 return parent; 602 } 603 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 604 605 #if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64) 606 unsigned long __init arch_syscall_addr(int nr) 607 { 608 return sys_call_table[nr*2]; 609 } 610 #endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 */ 611 612 #ifdef PPC64_ELF_ABI_v1 613 char *arch_ftrace_match_adjust(char *str, const char *search) 614 { 615 if (str[0] == '.' && search[0] != '.') 616 return str + 1; 617 else 618 return str; 619 } 620 #endif /* PPC64_ELF_ABI_v1 */ 621