1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Kernel Probes (KProbes) 4 * 5 * Copyright (C) IBM Corporation, 2002, 2004 6 * 7 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel 8 * Probes initial implementation ( includes contributions from 9 * Rusty Russell). 10 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes 11 * interface to access function arguments. 12 * 2004-Nov Ananth N Mavinakayanahalli <ananth@in.ibm.com> kprobes port 13 * for PPC64 14 */ 15 16 #include <linux/kprobes.h> 17 #include <linux/ptrace.h> 18 #include <linux/preempt.h> 19 #include <linux/extable.h> 20 #include <linux/kdebug.h> 21 #include <linux/slab.h> 22 #include <linux/moduleloader.h> 23 #include <linux/set_memory.h> 24 #include <asm/code-patching.h> 25 #include <asm/cacheflush.h> 26 #include <asm/sstep.h> 27 #include <asm/sections.h> 28 #include <asm/inst.h> 29 #include <linux/uaccess.h> 30 31 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; 32 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 33 34 struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}}; 35 36 bool arch_within_kprobe_blacklist(unsigned long addr) 37 { 38 return (addr >= (unsigned long)__kprobes_text_start && 39 addr < (unsigned long)__kprobes_text_end) || 40 (addr >= (unsigned long)_stext && 41 addr < (unsigned long)__head_end); 42 } 43 44 kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset) 45 { 46 kprobe_opcode_t *addr = NULL; 47 48 #ifdef CONFIG_PPC64_ELF_ABI_V2 49 /* PPC64 ABIv2 needs local entry point */ 50 addr = (kprobe_opcode_t *)kallsyms_lookup_name(name); 51 if (addr && !offset) { 52 #ifdef CONFIG_KPROBES_ON_FTRACE 53 unsigned long faddr; 54 /* 55 * Per livepatch.h, ftrace location is always within the first 56 * 16 bytes of a function on powerpc with -mprofile-kernel. 57 */ 58 faddr = ftrace_location_range((unsigned long)addr, 59 (unsigned long)addr + 16); 60 if (faddr) 61 addr = (kprobe_opcode_t *)faddr; 62 else 63 #endif 64 addr = (kprobe_opcode_t *)ppc_function_entry(addr); 65 } 66 #elif defined(CONFIG_PPC64_ELF_ABI_V1) 67 /* 68 * 64bit powerpc ABIv1 uses function descriptors: 69 * - Check for the dot variant of the symbol first. 70 * - If that fails, try looking up the symbol provided. 71 * 72 * This ensures we always get to the actual symbol and not 73 * the descriptor. 74 * 75 * Also handle <module:symbol> format. 76 */ 77 char dot_name[MODULE_NAME_LEN + 1 + KSYM_NAME_LEN]; 78 bool dot_appended = false; 79 const char *c; 80 ssize_t ret = 0; 81 int len = 0; 82 83 if ((c = strnchr(name, MODULE_NAME_LEN, ':')) != NULL) { 84 c++; 85 len = c - name; 86 memcpy(dot_name, name, len); 87 } else 88 c = name; 89 90 if (*c != '\0' && *c != '.') { 91 dot_name[len++] = '.'; 92 dot_appended = true; 93 } 94 ret = strscpy(dot_name + len, c, KSYM_NAME_LEN); 95 if (ret > 0) 96 addr = (kprobe_opcode_t *)kallsyms_lookup_name(dot_name); 97 98 /* Fallback to the original non-dot symbol lookup */ 99 if (!addr && dot_appended) 100 addr = (kprobe_opcode_t *)kallsyms_lookup_name(name); 101 #else 102 addr = (kprobe_opcode_t *)kallsyms_lookup_name(name); 103 #endif 104 105 return addr; 106 } 107 108 static bool arch_kprobe_on_func_entry(unsigned long offset) 109 { 110 #ifdef CONFIG_PPC64_ELF_ABI_V2 111 #ifdef CONFIG_KPROBES_ON_FTRACE 112 return offset <= 16; 113 #else 114 return offset <= 8; 115 #endif 116 #else 117 return !offset; 118 #endif 119 } 120 121 /* XXX try and fold the magic of kprobe_lookup_name() in this */ 122 kprobe_opcode_t *arch_adjust_kprobe_addr(unsigned long addr, unsigned long offset, 123 bool *on_func_entry) 124 { 125 *on_func_entry = arch_kprobe_on_func_entry(offset); 126 return (kprobe_opcode_t *)(addr + offset); 127 } 128 129 void *alloc_insn_page(void) 130 { 131 void *page; 132 133 page = module_alloc(PAGE_SIZE); 134 if (!page) 135 return NULL; 136 137 if (strict_module_rwx_enabled()) 138 set_memory_rox((unsigned long)page, 1); 139 140 return page; 141 } 142 143 int arch_prepare_kprobe(struct kprobe *p) 144 { 145 int ret = 0; 146 struct kprobe *prev; 147 ppc_inst_t insn = ppc_inst_read(p->addr); 148 149 if ((unsigned long)p->addr & 0x03) { 150 printk("Attempt to register kprobe at an unaligned address\n"); 151 ret = -EINVAL; 152 } else if (!can_single_step(ppc_inst_val(insn))) { 153 printk("Cannot register a kprobe on instructions that can't be single stepped\n"); 154 ret = -EINVAL; 155 } else if ((unsigned long)p->addr & ~PAGE_MASK && 156 ppc_inst_prefixed(ppc_inst_read(p->addr - 1))) { 157 printk("Cannot register a kprobe on the second word of prefixed instruction\n"); 158 ret = -EINVAL; 159 } 160 prev = get_kprobe(p->addr - 1); 161 162 /* 163 * When prev is a ftrace-based kprobe, we don't have an insn, and it 164 * doesn't probe for prefixed instruction. 165 */ 166 if (prev && !kprobe_ftrace(prev) && 167 ppc_inst_prefixed(ppc_inst_read(prev->ainsn.insn))) { 168 printk("Cannot register a kprobe on the second word of prefixed instruction\n"); 169 ret = -EINVAL; 170 } 171 172 /* insn must be on a special executable page on ppc64. This is 173 * not explicitly required on ppc32 (right now), but it doesn't hurt */ 174 if (!ret) { 175 p->ainsn.insn = get_insn_slot(); 176 if (!p->ainsn.insn) 177 ret = -ENOMEM; 178 } 179 180 if (!ret) { 181 patch_instruction(p->ainsn.insn, insn); 182 p->opcode = ppc_inst_val(insn); 183 } 184 185 p->ainsn.boostable = 0; 186 return ret; 187 } 188 NOKPROBE_SYMBOL(arch_prepare_kprobe); 189 190 void arch_arm_kprobe(struct kprobe *p) 191 { 192 WARN_ON_ONCE(patch_instruction(p->addr, ppc_inst(BREAKPOINT_INSTRUCTION))); 193 } 194 NOKPROBE_SYMBOL(arch_arm_kprobe); 195 196 void arch_disarm_kprobe(struct kprobe *p) 197 { 198 WARN_ON_ONCE(patch_instruction(p->addr, ppc_inst(p->opcode))); 199 } 200 NOKPROBE_SYMBOL(arch_disarm_kprobe); 201 202 void arch_remove_kprobe(struct kprobe *p) 203 { 204 if (p->ainsn.insn) { 205 free_insn_slot(p->ainsn.insn, 0); 206 p->ainsn.insn = NULL; 207 } 208 } 209 NOKPROBE_SYMBOL(arch_remove_kprobe); 210 211 static nokprobe_inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) 212 { 213 enable_single_step(regs); 214 215 /* 216 * On powerpc we should single step on the original 217 * instruction even if the probed insn is a trap 218 * variant as values in regs could play a part in 219 * if the trap is taken or not 220 */ 221 regs_set_return_ip(regs, (unsigned long)p->ainsn.insn); 222 } 223 224 static nokprobe_inline void save_previous_kprobe(struct kprobe_ctlblk *kcb) 225 { 226 kcb->prev_kprobe.kp = kprobe_running(); 227 kcb->prev_kprobe.status = kcb->kprobe_status; 228 kcb->prev_kprobe.saved_msr = kcb->kprobe_saved_msr; 229 } 230 231 static nokprobe_inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb) 232 { 233 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); 234 kcb->kprobe_status = kcb->prev_kprobe.status; 235 kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr; 236 } 237 238 static nokprobe_inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs, 239 struct kprobe_ctlblk *kcb) 240 { 241 __this_cpu_write(current_kprobe, p); 242 kcb->kprobe_saved_msr = regs->msr; 243 } 244 245 void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) 246 { 247 ri->ret_addr = (kprobe_opcode_t *)regs->link; 248 ri->fp = NULL; 249 250 /* Replace the return addr with trampoline addr */ 251 regs->link = (unsigned long)__kretprobe_trampoline; 252 } 253 NOKPROBE_SYMBOL(arch_prepare_kretprobe); 254 255 static int try_to_emulate(struct kprobe *p, struct pt_regs *regs) 256 { 257 int ret; 258 ppc_inst_t insn = ppc_inst_read(p->ainsn.insn); 259 260 /* regs->nip is also adjusted if emulate_step returns 1 */ 261 ret = emulate_step(regs, insn); 262 if (ret > 0) { 263 /* 264 * Once this instruction has been boosted 265 * successfully, set the boostable flag 266 */ 267 if (unlikely(p->ainsn.boostable == 0)) 268 p->ainsn.boostable = 1; 269 } else if (ret < 0) { 270 /* 271 * We don't allow kprobes on mtmsr(d)/rfi(d), etc. 272 * So, we should never get here... but, its still 273 * good to catch them, just in case... 274 */ 275 printk("Can't step on instruction %08lx\n", ppc_inst_as_ulong(insn)); 276 BUG(); 277 } else { 278 /* 279 * If we haven't previously emulated this instruction, then it 280 * can't be boosted. Note it down so we don't try to do so again. 281 * 282 * If, however, we had emulated this instruction in the past, 283 * then this is just an error with the current run (for 284 * instance, exceptions due to a load/store). We return 0 so 285 * that this is now single-stepped, but continue to try 286 * emulating it in subsequent probe hits. 287 */ 288 if (unlikely(p->ainsn.boostable != 1)) 289 p->ainsn.boostable = -1; 290 } 291 292 return ret; 293 } 294 NOKPROBE_SYMBOL(try_to_emulate); 295 296 int kprobe_handler(struct pt_regs *regs) 297 { 298 struct kprobe *p; 299 int ret = 0; 300 unsigned int *addr = (unsigned int *)regs->nip; 301 struct kprobe_ctlblk *kcb; 302 303 if (user_mode(regs)) 304 return 0; 305 306 if (!IS_ENABLED(CONFIG_BOOKE) && 307 (!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR))) 308 return 0; 309 310 /* 311 * We don't want to be preempted for the entire 312 * duration of kprobe processing 313 */ 314 preempt_disable(); 315 kcb = get_kprobe_ctlblk(); 316 317 p = get_kprobe(addr); 318 if (!p) { 319 unsigned int instr; 320 321 if (get_kernel_nofault(instr, addr)) 322 goto no_kprobe; 323 324 if (instr != BREAKPOINT_INSTRUCTION) { 325 /* 326 * PowerPC has multiple variants of the "trap" 327 * instruction. If the current instruction is a 328 * trap variant, it could belong to someone else 329 */ 330 if (is_trap(instr)) 331 goto no_kprobe; 332 /* 333 * The breakpoint instruction was removed right 334 * after we hit it. Another cpu has removed 335 * either a probepoint or a debugger breakpoint 336 * at this address. In either case, no further 337 * handling of this interrupt is appropriate. 338 */ 339 ret = 1; 340 } 341 /* Not one of ours: let kernel handle it */ 342 goto no_kprobe; 343 } 344 345 /* Check we're not actually recursing */ 346 if (kprobe_running()) { 347 kprobe_opcode_t insn = *p->ainsn.insn; 348 if (kcb->kprobe_status == KPROBE_HIT_SS && is_trap(insn)) { 349 /* Turn off 'trace' bits */ 350 regs_set_return_msr(regs, 351 (regs->msr & ~MSR_SINGLESTEP) | 352 kcb->kprobe_saved_msr); 353 goto no_kprobe; 354 } 355 356 /* 357 * We have reentered the kprobe_handler(), since another probe 358 * was hit while within the handler. We here save the original 359 * kprobes variables and just single step on the instruction of 360 * the new probe without calling any user handlers. 361 */ 362 save_previous_kprobe(kcb); 363 set_current_kprobe(p, regs, kcb); 364 kprobes_inc_nmissed_count(p); 365 kcb->kprobe_status = KPROBE_REENTER; 366 if (p->ainsn.boostable >= 0) { 367 ret = try_to_emulate(p, regs); 368 369 if (ret > 0) { 370 restore_previous_kprobe(kcb); 371 preempt_enable(); 372 return 1; 373 } 374 } 375 prepare_singlestep(p, regs); 376 return 1; 377 } 378 379 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 380 set_current_kprobe(p, regs, kcb); 381 if (p->pre_handler && p->pre_handler(p, regs)) { 382 /* handler changed execution path, so skip ss setup */ 383 reset_current_kprobe(); 384 preempt_enable(); 385 return 1; 386 } 387 388 if (p->ainsn.boostable >= 0) { 389 ret = try_to_emulate(p, regs); 390 391 if (ret > 0) { 392 if (p->post_handler) 393 p->post_handler(p, regs, 0); 394 395 kcb->kprobe_status = KPROBE_HIT_SSDONE; 396 reset_current_kprobe(); 397 preempt_enable(); 398 return 1; 399 } 400 } 401 prepare_singlestep(p, regs); 402 kcb->kprobe_status = KPROBE_HIT_SS; 403 return 1; 404 405 no_kprobe: 406 preempt_enable(); 407 return ret; 408 } 409 NOKPROBE_SYMBOL(kprobe_handler); 410 411 /* 412 * Function return probe trampoline: 413 * - init_kprobes() establishes a probepoint here 414 * - When the probed function returns, this probe 415 * causes the handlers to fire 416 */ 417 asm(".global __kretprobe_trampoline\n" 418 ".type __kretprobe_trampoline, @function\n" 419 "__kretprobe_trampoline:\n" 420 "nop\n" 421 "blr\n" 422 ".size __kretprobe_trampoline, .-__kretprobe_trampoline\n"); 423 424 /* 425 * Called when the probe at kretprobe trampoline is hit 426 */ 427 static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) 428 { 429 unsigned long orig_ret_address; 430 431 orig_ret_address = __kretprobe_trampoline_handler(regs, NULL); 432 /* 433 * We get here through one of two paths: 434 * 1. by taking a trap -> kprobe_handler() -> here 435 * 2. by optprobe branch -> optimized_callback() -> opt_pre_handler() -> here 436 * 437 * When going back through (1), we need regs->nip to be setup properly 438 * as it is used to determine the return address from the trap. 439 * For (2), since nip is not honoured with optprobes, we instead setup 440 * the link register properly so that the subsequent 'blr' in 441 * __kretprobe_trampoline jumps back to the right instruction. 442 * 443 * For nip, we should set the address to the previous instruction since 444 * we end up emulating it in kprobe_handler(), which increments the nip 445 * again. 446 */ 447 regs_set_return_ip(regs, orig_ret_address - 4); 448 regs->link = orig_ret_address; 449 450 return 0; 451 } 452 NOKPROBE_SYMBOL(trampoline_probe_handler); 453 454 /* 455 * Called after single-stepping. p->addr is the address of the 456 * instruction whose first byte has been replaced by the "breakpoint" 457 * instruction. To avoid the SMP problems that can occur when we 458 * temporarily put back the original opcode to single-step, we 459 * single-stepped a copy of the instruction. The address of this 460 * copy is p->ainsn.insn. 461 */ 462 int kprobe_post_handler(struct pt_regs *regs) 463 { 464 int len; 465 struct kprobe *cur = kprobe_running(); 466 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 467 468 if (!cur || user_mode(regs)) 469 return 0; 470 471 len = ppc_inst_len(ppc_inst_read(cur->ainsn.insn)); 472 /* make sure we got here for instruction we have a kprobe on */ 473 if (((unsigned long)cur->ainsn.insn + len) != regs->nip) 474 return 0; 475 476 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { 477 kcb->kprobe_status = KPROBE_HIT_SSDONE; 478 cur->post_handler(cur, regs, 0); 479 } 480 481 /* Adjust nip to after the single-stepped instruction */ 482 regs_set_return_ip(regs, (unsigned long)cur->addr + len); 483 regs_set_return_msr(regs, regs->msr | kcb->kprobe_saved_msr); 484 485 /*Restore back the original saved kprobes variables and continue. */ 486 if (kcb->kprobe_status == KPROBE_REENTER) { 487 restore_previous_kprobe(kcb); 488 goto out; 489 } 490 reset_current_kprobe(); 491 out: 492 preempt_enable(); 493 494 /* 495 * if somebody else is singlestepping across a probe point, msr 496 * will have DE/SE set, in which case, continue the remaining processing 497 * of do_debug, as if this is not a probe hit. 498 */ 499 if (regs->msr & MSR_SINGLESTEP) 500 return 0; 501 502 return 1; 503 } 504 NOKPROBE_SYMBOL(kprobe_post_handler); 505 506 int kprobe_fault_handler(struct pt_regs *regs, int trapnr) 507 { 508 struct kprobe *cur = kprobe_running(); 509 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 510 const struct exception_table_entry *entry; 511 512 switch(kcb->kprobe_status) { 513 case KPROBE_HIT_SS: 514 case KPROBE_REENTER: 515 /* 516 * We are here because the instruction being single 517 * stepped caused a page fault. We reset the current 518 * kprobe and the nip points back to the probe address 519 * and allow the page fault handler to continue as a 520 * normal page fault. 521 */ 522 regs_set_return_ip(regs, (unsigned long)cur->addr); 523 /* Turn off 'trace' bits */ 524 regs_set_return_msr(regs, 525 (regs->msr & ~MSR_SINGLESTEP) | 526 kcb->kprobe_saved_msr); 527 if (kcb->kprobe_status == KPROBE_REENTER) 528 restore_previous_kprobe(kcb); 529 else 530 reset_current_kprobe(); 531 preempt_enable(); 532 break; 533 case KPROBE_HIT_ACTIVE: 534 case KPROBE_HIT_SSDONE: 535 /* 536 * In case the user-specified fault handler returned 537 * zero, try to fix up. 538 */ 539 if ((entry = search_exception_tables(regs->nip)) != NULL) { 540 regs_set_return_ip(regs, extable_fixup(entry)); 541 return 1; 542 } 543 544 /* 545 * fixup_exception() could not handle it, 546 * Let do_page_fault() fix it. 547 */ 548 break; 549 default: 550 break; 551 } 552 return 0; 553 } 554 NOKPROBE_SYMBOL(kprobe_fault_handler); 555 556 static struct kprobe trampoline_p = { 557 .addr = (kprobe_opcode_t *) &__kretprobe_trampoline, 558 .pre_handler = trampoline_probe_handler 559 }; 560 561 int __init arch_init_kprobes(void) 562 { 563 return register_kprobe(&trampoline_p); 564 } 565 566 int arch_trampoline_kprobe(struct kprobe *p) 567 { 568 if (p->addr == (kprobe_opcode_t *)&__kretprobe_trampoline) 569 return 1; 570 571 return 0; 572 } 573 NOKPROBE_SYMBOL(arch_trampoline_kprobe); 574