1 /* 2 * Kernel Probes (KProbes) 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 * 18 * Copyright (C) IBM Corporation, 2002, 2004 19 * 20 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel 21 * Probes initial implementation ( includes contributions from 22 * Rusty Russell). 23 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes 24 * interface to access function arguments. 25 * 2004-Nov Ananth N Mavinakayanahalli <ananth@in.ibm.com> kprobes port 26 * for PPC64 27 */ 28 29 #include <linux/kprobes.h> 30 #include <linux/ptrace.h> 31 #include <linux/preempt.h> 32 #include <linux/extable.h> 33 #include <linux/kdebug.h> 34 #include <linux/slab.h> 35 #include <asm/code-patching.h> 36 #include <asm/cacheflush.h> 37 #include <asm/sstep.h> 38 #include <asm/sections.h> 39 #include <linux/uaccess.h> 40 41 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; 42 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 43 44 struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}}; 45 46 bool arch_within_kprobe_blacklist(unsigned long addr) 47 { 48 return (addr >= (unsigned long)__kprobes_text_start && 49 addr < (unsigned long)__kprobes_text_end) || 50 (addr >= (unsigned long)_stext && 51 addr < (unsigned long)__head_end); 52 } 53 54 kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset) 55 { 56 kprobe_opcode_t *addr = NULL; 57 58 #ifdef PPC64_ELF_ABI_v2 59 /* PPC64 ABIv2 needs local entry point */ 60 addr = (kprobe_opcode_t *)kallsyms_lookup_name(name); 61 if (addr && !offset) { 62 #ifdef CONFIG_KPROBES_ON_FTRACE 63 unsigned long faddr; 64 /* 65 * Per livepatch.h, ftrace location is always within the first 66 * 16 bytes of a function on powerpc with -mprofile-kernel. 67 */ 68 faddr = ftrace_location_range((unsigned long)addr, 69 (unsigned long)addr + 16); 70 if (faddr) 71 addr = (kprobe_opcode_t *)faddr; 72 else 73 #endif 74 addr = (kprobe_opcode_t *)ppc_function_entry(addr); 75 } 76 #elif defined(PPC64_ELF_ABI_v1) 77 /* 78 * 64bit powerpc ABIv1 uses function descriptors: 79 * - Check for the dot variant of the symbol first. 80 * - If that fails, try looking up the symbol provided. 81 * 82 * This ensures we always get to the actual symbol and not 83 * the descriptor. 84 * 85 * Also handle <module:symbol> format. 86 */ 87 char dot_name[MODULE_NAME_LEN + 1 + KSYM_NAME_LEN]; 88 bool dot_appended = false; 89 const char *c; 90 ssize_t ret = 0; 91 int len = 0; 92 93 if ((c = strnchr(name, MODULE_NAME_LEN, ':')) != NULL) { 94 c++; 95 len = c - name; 96 memcpy(dot_name, name, len); 97 } else 98 c = name; 99 100 if (*c != '\0' && *c != '.') { 101 dot_name[len++] = '.'; 102 dot_appended = true; 103 } 104 ret = strscpy(dot_name + len, c, KSYM_NAME_LEN); 105 if (ret > 0) 106 addr = (kprobe_opcode_t *)kallsyms_lookup_name(dot_name); 107 108 /* Fallback to the original non-dot symbol lookup */ 109 if (!addr && dot_appended) 110 addr = (kprobe_opcode_t *)kallsyms_lookup_name(name); 111 #else 112 addr = (kprobe_opcode_t *)kallsyms_lookup_name(name); 113 #endif 114 115 return addr; 116 } 117 118 int arch_prepare_kprobe(struct kprobe *p) 119 { 120 int ret = 0; 121 kprobe_opcode_t insn = *p->addr; 122 123 if ((unsigned long)p->addr & 0x03) { 124 printk("Attempt to register kprobe at an unaligned address\n"); 125 ret = -EINVAL; 126 } else if (IS_MTMSRD(insn) || IS_RFID(insn) || IS_RFI(insn)) { 127 printk("Cannot register a kprobe on rfi/rfid or mtmsr[d]\n"); 128 ret = -EINVAL; 129 } 130 131 /* insn must be on a special executable page on ppc64. This is 132 * not explicitly required on ppc32 (right now), but it doesn't hurt */ 133 if (!ret) { 134 p->ainsn.insn = get_insn_slot(); 135 if (!p->ainsn.insn) 136 ret = -ENOMEM; 137 } 138 139 if (!ret) { 140 memcpy(p->ainsn.insn, p->addr, 141 MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); 142 p->opcode = *p->addr; 143 flush_icache_range((unsigned long)p->ainsn.insn, 144 (unsigned long)p->ainsn.insn + sizeof(kprobe_opcode_t)); 145 } 146 147 p->ainsn.boostable = 0; 148 return ret; 149 } 150 NOKPROBE_SYMBOL(arch_prepare_kprobe); 151 152 void arch_arm_kprobe(struct kprobe *p) 153 { 154 patch_instruction(p->addr, BREAKPOINT_INSTRUCTION); 155 } 156 NOKPROBE_SYMBOL(arch_arm_kprobe); 157 158 void arch_disarm_kprobe(struct kprobe *p) 159 { 160 patch_instruction(p->addr, p->opcode); 161 } 162 NOKPROBE_SYMBOL(arch_disarm_kprobe); 163 164 void arch_remove_kprobe(struct kprobe *p) 165 { 166 if (p->ainsn.insn) { 167 free_insn_slot(p->ainsn.insn, 0); 168 p->ainsn.insn = NULL; 169 } 170 } 171 NOKPROBE_SYMBOL(arch_remove_kprobe); 172 173 static nokprobe_inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) 174 { 175 enable_single_step(regs); 176 177 /* 178 * On powerpc we should single step on the original 179 * instruction even if the probed insn is a trap 180 * variant as values in regs could play a part in 181 * if the trap is taken or not 182 */ 183 regs->nip = (unsigned long)p->ainsn.insn; 184 } 185 186 static nokprobe_inline void save_previous_kprobe(struct kprobe_ctlblk *kcb) 187 { 188 kcb->prev_kprobe.kp = kprobe_running(); 189 kcb->prev_kprobe.status = kcb->kprobe_status; 190 kcb->prev_kprobe.saved_msr = kcb->kprobe_saved_msr; 191 } 192 193 static nokprobe_inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb) 194 { 195 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); 196 kcb->kprobe_status = kcb->prev_kprobe.status; 197 kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr; 198 } 199 200 static nokprobe_inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs, 201 struct kprobe_ctlblk *kcb) 202 { 203 __this_cpu_write(current_kprobe, p); 204 kcb->kprobe_saved_msr = regs->msr; 205 } 206 207 bool arch_kprobe_on_func_entry(unsigned long offset) 208 { 209 #ifdef PPC64_ELF_ABI_v2 210 #ifdef CONFIG_KPROBES_ON_FTRACE 211 return offset <= 16; 212 #else 213 return offset <= 8; 214 #endif 215 #else 216 return !offset; 217 #endif 218 } 219 220 void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) 221 { 222 ri->ret_addr = (kprobe_opcode_t *)regs->link; 223 224 /* Replace the return addr with trampoline addr */ 225 regs->link = (unsigned long)kretprobe_trampoline; 226 } 227 NOKPROBE_SYMBOL(arch_prepare_kretprobe); 228 229 static int try_to_emulate(struct kprobe *p, struct pt_regs *regs) 230 { 231 int ret; 232 unsigned int insn = *p->ainsn.insn; 233 234 /* regs->nip is also adjusted if emulate_step returns 1 */ 235 ret = emulate_step(regs, insn); 236 if (ret > 0) { 237 /* 238 * Once this instruction has been boosted 239 * successfully, set the boostable flag 240 */ 241 if (unlikely(p->ainsn.boostable == 0)) 242 p->ainsn.boostable = 1; 243 } else if (ret < 0) { 244 /* 245 * We don't allow kprobes on mtmsr(d)/rfi(d), etc. 246 * So, we should never get here... but, its still 247 * good to catch them, just in case... 248 */ 249 printk("Can't step on instruction %x\n", insn); 250 BUG(); 251 } else { 252 /* 253 * If we haven't previously emulated this instruction, then it 254 * can't be boosted. Note it down so we don't try to do so again. 255 * 256 * If, however, we had emulated this instruction in the past, 257 * then this is just an error with the current run (for 258 * instance, exceptions due to a load/store). We return 0 so 259 * that this is now single-stepped, but continue to try 260 * emulating it in subsequent probe hits. 261 */ 262 if (unlikely(p->ainsn.boostable != 1)) 263 p->ainsn.boostable = -1; 264 } 265 266 return ret; 267 } 268 NOKPROBE_SYMBOL(try_to_emulate); 269 270 int kprobe_handler(struct pt_regs *regs) 271 { 272 struct kprobe *p; 273 int ret = 0; 274 unsigned int *addr = (unsigned int *)regs->nip; 275 struct kprobe_ctlblk *kcb; 276 277 if (user_mode(regs)) 278 return 0; 279 280 /* 281 * We don't want to be preempted for the entire 282 * duration of kprobe processing 283 */ 284 preempt_disable(); 285 kcb = get_kprobe_ctlblk(); 286 287 /* Check we're not actually recursing */ 288 if (kprobe_running()) { 289 p = get_kprobe(addr); 290 if (p) { 291 kprobe_opcode_t insn = *p->ainsn.insn; 292 if (kcb->kprobe_status == KPROBE_HIT_SS && 293 is_trap(insn)) { 294 /* Turn off 'trace' bits */ 295 regs->msr &= ~MSR_SINGLESTEP; 296 regs->msr |= kcb->kprobe_saved_msr; 297 goto no_kprobe; 298 } 299 /* We have reentered the kprobe_handler(), since 300 * another probe was hit while within the handler. 301 * We here save the original kprobes variables and 302 * just single step on the instruction of the new probe 303 * without calling any user handlers. 304 */ 305 save_previous_kprobe(kcb); 306 set_current_kprobe(p, regs, kcb); 307 kprobes_inc_nmissed_count(p); 308 kcb->kprobe_status = KPROBE_REENTER; 309 if (p->ainsn.boostable >= 0) { 310 ret = try_to_emulate(p, regs); 311 312 if (ret > 0) { 313 restore_previous_kprobe(kcb); 314 preempt_enable_no_resched(); 315 return 1; 316 } 317 } 318 prepare_singlestep(p, regs); 319 return 1; 320 } else { 321 if (*addr != BREAKPOINT_INSTRUCTION) { 322 /* If trap variant, then it belongs not to us */ 323 kprobe_opcode_t cur_insn = *addr; 324 if (is_trap(cur_insn)) 325 goto no_kprobe; 326 /* The breakpoint instruction was removed by 327 * another cpu right after we hit, no further 328 * handling of this interrupt is appropriate 329 */ 330 ret = 1; 331 goto no_kprobe; 332 } 333 p = __this_cpu_read(current_kprobe); 334 if (p->break_handler && p->break_handler(p, regs)) { 335 if (!skip_singlestep(p, regs, kcb)) 336 goto ss_probe; 337 ret = 1; 338 } 339 } 340 goto no_kprobe; 341 } 342 343 p = get_kprobe(addr); 344 if (!p) { 345 if (*addr != BREAKPOINT_INSTRUCTION) { 346 /* 347 * PowerPC has multiple variants of the "trap" 348 * instruction. If the current instruction is a 349 * trap variant, it could belong to someone else 350 */ 351 kprobe_opcode_t cur_insn = *addr; 352 if (is_trap(cur_insn)) 353 goto no_kprobe; 354 /* 355 * The breakpoint instruction was removed right 356 * after we hit it. Another cpu has removed 357 * either a probepoint or a debugger breakpoint 358 * at this address. In either case, no further 359 * handling of this interrupt is appropriate. 360 */ 361 ret = 1; 362 } 363 /* Not one of ours: let kernel handle it */ 364 goto no_kprobe; 365 } 366 367 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 368 set_current_kprobe(p, regs, kcb); 369 if (p->pre_handler && p->pre_handler(p, regs)) 370 /* handler has already set things up, so skip ss setup */ 371 return 1; 372 373 ss_probe: 374 if (p->ainsn.boostable >= 0) { 375 ret = try_to_emulate(p, regs); 376 377 if (ret > 0) { 378 if (p->post_handler) 379 p->post_handler(p, regs, 0); 380 381 kcb->kprobe_status = KPROBE_HIT_SSDONE; 382 reset_current_kprobe(); 383 preempt_enable_no_resched(); 384 return 1; 385 } 386 } 387 prepare_singlestep(p, regs); 388 kcb->kprobe_status = KPROBE_HIT_SS; 389 return 1; 390 391 no_kprobe: 392 preempt_enable_no_resched(); 393 return ret; 394 } 395 NOKPROBE_SYMBOL(kprobe_handler); 396 397 /* 398 * Function return probe trampoline: 399 * - init_kprobes() establishes a probepoint here 400 * - When the probed function returns, this probe 401 * causes the handlers to fire 402 */ 403 asm(".global kretprobe_trampoline\n" 404 ".type kretprobe_trampoline, @function\n" 405 "kretprobe_trampoline:\n" 406 "nop\n" 407 "blr\n" 408 ".size kretprobe_trampoline, .-kretprobe_trampoline\n"); 409 410 /* 411 * Called when the probe at kretprobe trampoline is hit 412 */ 413 static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) 414 { 415 struct kretprobe_instance *ri = NULL; 416 struct hlist_head *head, empty_rp; 417 struct hlist_node *tmp; 418 unsigned long flags, orig_ret_address = 0; 419 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; 420 421 INIT_HLIST_HEAD(&empty_rp); 422 kretprobe_hash_lock(current, &head, &flags); 423 424 /* 425 * It is possible to have multiple instances associated with a given 426 * task either because an multiple functions in the call path 427 * have a return probe installed on them, and/or more than one return 428 * return probe was registered for a target function. 429 * 430 * We can handle this because: 431 * - instances are always inserted at the head of the list 432 * - when multiple return probes are registered for the same 433 * function, the first instance's ret_addr will point to the 434 * real return address, and all the rest will point to 435 * kretprobe_trampoline 436 */ 437 hlist_for_each_entry_safe(ri, tmp, head, hlist) { 438 if (ri->task != current) 439 /* another task is sharing our hash bucket */ 440 continue; 441 442 if (ri->rp && ri->rp->handler) 443 ri->rp->handler(ri, regs); 444 445 orig_ret_address = (unsigned long)ri->ret_addr; 446 recycle_rp_inst(ri, &empty_rp); 447 448 if (orig_ret_address != trampoline_address) 449 /* 450 * This is the real return address. Any other 451 * instances associated with this task are for 452 * other calls deeper on the call stack 453 */ 454 break; 455 } 456 457 kretprobe_assert(ri, orig_ret_address, trampoline_address); 458 459 /* 460 * We get here through one of two paths: 461 * 1. by taking a trap -> kprobe_handler() -> here 462 * 2. by optprobe branch -> optimized_callback() -> opt_pre_handler() -> here 463 * 464 * When going back through (1), we need regs->nip to be setup properly 465 * as it is used to determine the return address from the trap. 466 * For (2), since nip is not honoured with optprobes, we instead setup 467 * the link register properly so that the subsequent 'blr' in 468 * kretprobe_trampoline jumps back to the right instruction. 469 * 470 * For nip, we should set the address to the previous instruction since 471 * we end up emulating it in kprobe_handler(), which increments the nip 472 * again. 473 */ 474 regs->nip = orig_ret_address - 4; 475 regs->link = orig_ret_address; 476 477 kretprobe_hash_unlock(current, &flags); 478 479 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { 480 hlist_del(&ri->hlist); 481 kfree(ri); 482 } 483 484 return 0; 485 } 486 NOKPROBE_SYMBOL(trampoline_probe_handler); 487 488 /* 489 * Called after single-stepping. p->addr is the address of the 490 * instruction whose first byte has been replaced by the "breakpoint" 491 * instruction. To avoid the SMP problems that can occur when we 492 * temporarily put back the original opcode to single-step, we 493 * single-stepped a copy of the instruction. The address of this 494 * copy is p->ainsn.insn. 495 */ 496 int kprobe_post_handler(struct pt_regs *regs) 497 { 498 struct kprobe *cur = kprobe_running(); 499 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 500 501 if (!cur || user_mode(regs)) 502 return 0; 503 504 /* make sure we got here for instruction we have a kprobe on */ 505 if (((unsigned long)cur->ainsn.insn + 4) != regs->nip) 506 return 0; 507 508 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { 509 kcb->kprobe_status = KPROBE_HIT_SSDONE; 510 cur->post_handler(cur, regs, 0); 511 } 512 513 /* Adjust nip to after the single-stepped instruction */ 514 regs->nip = (unsigned long)cur->addr + 4; 515 regs->msr |= kcb->kprobe_saved_msr; 516 517 /*Restore back the original saved kprobes variables and continue. */ 518 if (kcb->kprobe_status == KPROBE_REENTER) { 519 restore_previous_kprobe(kcb); 520 goto out; 521 } 522 reset_current_kprobe(); 523 out: 524 preempt_enable_no_resched(); 525 526 /* 527 * if somebody else is singlestepping across a probe point, msr 528 * will have DE/SE set, in which case, continue the remaining processing 529 * of do_debug, as if this is not a probe hit. 530 */ 531 if (regs->msr & MSR_SINGLESTEP) 532 return 0; 533 534 return 1; 535 } 536 NOKPROBE_SYMBOL(kprobe_post_handler); 537 538 int kprobe_fault_handler(struct pt_regs *regs, int trapnr) 539 { 540 struct kprobe *cur = kprobe_running(); 541 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 542 const struct exception_table_entry *entry; 543 544 switch(kcb->kprobe_status) { 545 case KPROBE_HIT_SS: 546 case KPROBE_REENTER: 547 /* 548 * We are here because the instruction being single 549 * stepped caused a page fault. We reset the current 550 * kprobe and the nip points back to the probe address 551 * and allow the page fault handler to continue as a 552 * normal page fault. 553 */ 554 regs->nip = (unsigned long)cur->addr; 555 regs->msr &= ~MSR_SINGLESTEP; /* Turn off 'trace' bits */ 556 regs->msr |= kcb->kprobe_saved_msr; 557 if (kcb->kprobe_status == KPROBE_REENTER) 558 restore_previous_kprobe(kcb); 559 else 560 reset_current_kprobe(); 561 preempt_enable_no_resched(); 562 break; 563 case KPROBE_HIT_ACTIVE: 564 case KPROBE_HIT_SSDONE: 565 /* 566 * We increment the nmissed count for accounting, 567 * we can also use npre/npostfault count for accounting 568 * these specific fault cases. 569 */ 570 kprobes_inc_nmissed_count(cur); 571 572 /* 573 * We come here because instructions in the pre/post 574 * handler caused the page_fault, this could happen 575 * if handler tries to access user space by 576 * copy_from_user(), get_user() etc. Let the 577 * user-specified handler try to fix it first. 578 */ 579 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) 580 return 1; 581 582 /* 583 * In case the user-specified fault handler returned 584 * zero, try to fix up. 585 */ 586 if ((entry = search_exception_tables(regs->nip)) != NULL) { 587 regs->nip = extable_fixup(entry); 588 return 1; 589 } 590 591 /* 592 * fixup_exception() could not handle it, 593 * Let do_page_fault() fix it. 594 */ 595 break; 596 default: 597 break; 598 } 599 return 0; 600 } 601 NOKPROBE_SYMBOL(kprobe_fault_handler); 602 603 unsigned long arch_deref_entry_point(void *entry) 604 { 605 #ifdef PPC64_ELF_ABI_v1 606 if (!kernel_text_address((unsigned long)entry)) 607 return ppc_global_function_entry(entry); 608 else 609 #endif 610 return (unsigned long)entry; 611 } 612 NOKPROBE_SYMBOL(arch_deref_entry_point); 613 614 int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) 615 { 616 struct jprobe *jp = container_of(p, struct jprobe, kp); 617 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 618 619 memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs)); 620 621 /* setup return addr to the jprobe handler routine */ 622 regs->nip = arch_deref_entry_point(jp->entry); 623 #ifdef PPC64_ELF_ABI_v2 624 regs->gpr[12] = (unsigned long)jp->entry; 625 #elif defined(PPC64_ELF_ABI_v1) 626 regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc); 627 #endif 628 629 /* 630 * jprobes use jprobe_return() which skips the normal return 631 * path of the function, and this messes up the accounting of the 632 * function graph tracer. 633 * 634 * Pause function graph tracing while performing the jprobe function. 635 */ 636 pause_graph_tracing(); 637 638 return 1; 639 } 640 NOKPROBE_SYMBOL(setjmp_pre_handler); 641 642 void __used jprobe_return(void) 643 { 644 asm volatile("jprobe_return_trap:\n" 645 "trap\n" 646 ::: "memory"); 647 } 648 NOKPROBE_SYMBOL(jprobe_return); 649 650 int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) 651 { 652 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 653 654 if (regs->nip != ppc_kallsyms_lookup_name("jprobe_return_trap")) { 655 pr_debug("longjmp_break_handler NIP (0x%lx) does not match jprobe_return_trap (0x%lx)\n", 656 regs->nip, ppc_kallsyms_lookup_name("jprobe_return_trap")); 657 return 0; 658 } 659 660 memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs)); 661 /* It's OK to start function graph tracing again */ 662 unpause_graph_tracing(); 663 preempt_enable_no_resched(); 664 return 1; 665 } 666 NOKPROBE_SYMBOL(longjmp_break_handler); 667 668 static struct kprobe trampoline_p = { 669 .addr = (kprobe_opcode_t *) &kretprobe_trampoline, 670 .pre_handler = trampoline_probe_handler 671 }; 672 673 int __init arch_init_kprobes(void) 674 { 675 return register_kprobe(&trampoline_p); 676 } 677 678 int arch_trampoline_kprobe(struct kprobe *p) 679 { 680 if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline) 681 return 1; 682 683 return 0; 684 } 685 NOKPROBE_SYMBOL(arch_trampoline_kprobe); 686