1 /* 2 * Kernel probes (kprobes) for SuperH 3 * 4 * Copyright (C) 2007 Chris Smith <chris.smith@st.com> 5 * Copyright (C) 2006 Lineo Solutions, Inc. 6 * 7 * This file is subject to the terms and conditions of the GNU General Public 8 * License. See the file "COPYING" in the main directory of this archive 9 * for more details. 10 */ 11 #include <linux/kprobes.h> 12 #include <linux/module.h> 13 #include <linux/ptrace.h> 14 #include <linux/preempt.h> 15 #include <linux/kdebug.h> 16 #include <asm/cacheflush.h> 17 #include <asm/uaccess.h> 18 19 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; 20 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 21 22 static struct kprobe saved_current_opcode; 23 static struct kprobe saved_next_opcode; 24 static struct kprobe saved_next_opcode2; 25 26 #define OPCODE_JMP(x) (((x) & 0xF0FF) == 0x402b) 27 #define OPCODE_JSR(x) (((x) & 0xF0FF) == 0x400b) 28 #define OPCODE_BRA(x) (((x) & 0xF000) == 0xa000) 29 #define OPCODE_BRAF(x) (((x) & 0xF0FF) == 0x0023) 30 #define OPCODE_BSR(x) (((x) & 0xF000) == 0xb000) 31 #define OPCODE_BSRF(x) (((x) & 0xF0FF) == 0x0003) 32 33 #define OPCODE_BF_S(x) (((x) & 0xFF00) == 0x8f00) 34 #define OPCODE_BT_S(x) (((x) & 0xFF00) == 0x8d00) 35 36 #define OPCODE_BF(x) (((x) & 0xFF00) == 0x8b00) 37 #define OPCODE_BT(x) (((x) & 0xFF00) == 0x8900) 38 39 #define OPCODE_RTS(x) (((x) & 0x000F) == 0x000b) 40 #define OPCODE_RTE(x) (((x) & 0xFFFF) == 0x002b) 41 42 int __kprobes arch_prepare_kprobe(struct kprobe *p) 43 { 44 kprobe_opcode_t opcode = *(kprobe_opcode_t *) (p->addr); 45 46 if (OPCODE_RTE(opcode)) 47 return -EFAULT; /* Bad breakpoint */ 48 49 p->opcode = opcode; 50 51 return 0; 52 } 53 54 void __kprobes arch_copy_kprobe(struct kprobe *p) 55 { 56 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); 57 p->opcode = *p->addr; 58 } 59 60 void __kprobes arch_arm_kprobe(struct kprobe *p) 61 { 62 *p->addr = BREAKPOINT_INSTRUCTION; 63 flush_icache_range((unsigned long)p->addr, 64 (unsigned long)p->addr + sizeof(kprobe_opcode_t)); 65 } 66 67 void __kprobes arch_disarm_kprobe(struct kprobe *p) 68 { 69 *p->addr = p->opcode; 70 flush_icache_range((unsigned long)p->addr, 71 (unsigned long)p->addr + sizeof(kprobe_opcode_t)); 72 } 73 74 int __kprobes arch_trampoline_kprobe(struct kprobe *p) 75 { 76 if (*p->addr == BREAKPOINT_INSTRUCTION) 77 return 1; 78 79 return 0; 80 } 81 82 /** 83 * If an illegal slot instruction exception occurs for an address 84 * containing a kprobe, remove the probe. 85 * 86 * Returns 0 if the exception was handled successfully, 1 otherwise. 87 */ 88 int __kprobes kprobe_handle_illslot(unsigned long pc) 89 { 90 struct kprobe *p = get_kprobe((kprobe_opcode_t *) pc + 1); 91 92 if (p != NULL) { 93 printk("Warning: removing kprobe from delay slot: 0x%.8x\n", 94 (unsigned int)pc + 2); 95 unregister_kprobe(p); 96 return 0; 97 } 98 99 return 1; 100 } 101 102 void __kprobes arch_remove_kprobe(struct kprobe *p) 103 { 104 if (saved_next_opcode.addr != 0x0) { 105 arch_disarm_kprobe(p); 106 arch_disarm_kprobe(&saved_next_opcode); 107 saved_next_opcode.addr = 0x0; 108 saved_next_opcode.opcode = 0x0; 109 110 if (saved_next_opcode2.addr != 0x0) { 111 arch_disarm_kprobe(&saved_next_opcode2); 112 saved_next_opcode2.addr = 0x0; 113 saved_next_opcode2.opcode = 0x0; 114 } 115 } 116 } 117 118 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) 119 { 120 kcb->prev_kprobe.kp = kprobe_running(); 121 kcb->prev_kprobe.status = kcb->kprobe_status; 122 } 123 124 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) 125 { 126 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; 127 kcb->kprobe_status = kcb->prev_kprobe.status; 128 } 129 130 static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, 131 struct kprobe_ctlblk *kcb) 132 { 133 __get_cpu_var(current_kprobe) = p; 134 } 135 136 /* 137 * Singlestep is implemented by disabling the current kprobe and setting one 138 * on the next instruction, following branches. Two probes are set if the 139 * branch is conditional. 140 */ 141 static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) 142 { 143 kprobe_opcode_t *addr = NULL; 144 saved_current_opcode.addr = (kprobe_opcode_t *) (regs->pc); 145 addr = saved_current_opcode.addr; 146 147 if (p != NULL) { 148 arch_disarm_kprobe(p); 149 150 if (OPCODE_JSR(p->opcode) || OPCODE_JMP(p->opcode)) { 151 unsigned int reg_nr = ((p->opcode >> 8) & 0x000F); 152 saved_next_opcode.addr = 153 (kprobe_opcode_t *) regs->regs[reg_nr]; 154 } else if (OPCODE_BRA(p->opcode) || OPCODE_BSR(p->opcode)) { 155 unsigned long disp = (p->opcode & 0x0FFF); 156 saved_next_opcode.addr = 157 (kprobe_opcode_t *) (regs->pc + 4 + disp * 2); 158 159 } else if (OPCODE_BRAF(p->opcode) || OPCODE_BSRF(p->opcode)) { 160 unsigned int reg_nr = ((p->opcode >> 8) & 0x000F); 161 saved_next_opcode.addr = 162 (kprobe_opcode_t *) (regs->pc + 4 + 163 regs->regs[reg_nr]); 164 165 } else if (OPCODE_RTS(p->opcode)) { 166 saved_next_opcode.addr = (kprobe_opcode_t *) regs->pr; 167 168 } else if (OPCODE_BF(p->opcode) || OPCODE_BT(p->opcode)) { 169 unsigned long disp = (p->opcode & 0x00FF); 170 /* case 1 */ 171 saved_next_opcode.addr = p->addr + 1; 172 /* case 2 */ 173 saved_next_opcode2.addr = 174 (kprobe_opcode_t *) (regs->pc + 4 + disp * 2); 175 saved_next_opcode2.opcode = *(saved_next_opcode2.addr); 176 arch_arm_kprobe(&saved_next_opcode2); 177 178 } else if (OPCODE_BF_S(p->opcode) || OPCODE_BT_S(p->opcode)) { 179 unsigned long disp = (p->opcode & 0x00FF); 180 /* case 1 */ 181 saved_next_opcode.addr = p->addr + 2; 182 /* case 2 */ 183 saved_next_opcode2.addr = 184 (kprobe_opcode_t *) (regs->pc + 4 + disp * 2); 185 saved_next_opcode2.opcode = *(saved_next_opcode2.addr); 186 arch_arm_kprobe(&saved_next_opcode2); 187 188 } else { 189 saved_next_opcode.addr = p->addr + 1; 190 } 191 192 saved_next_opcode.opcode = *(saved_next_opcode.addr); 193 arch_arm_kprobe(&saved_next_opcode); 194 } 195 } 196 197 /* Called with kretprobe_lock held */ 198 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, 199 struct pt_regs *regs) 200 { 201 ri->ret_addr = (kprobe_opcode_t *) regs->pr; 202 203 /* Replace the return addr with trampoline addr */ 204 regs->pr = (unsigned long)kretprobe_trampoline; 205 } 206 207 static int __kprobes kprobe_handler(struct pt_regs *regs) 208 { 209 struct kprobe *p; 210 int ret = 0; 211 kprobe_opcode_t *addr = NULL; 212 struct kprobe_ctlblk *kcb; 213 214 /* 215 * We don't want to be preempted for the entire 216 * duration of kprobe processing 217 */ 218 preempt_disable(); 219 kcb = get_kprobe_ctlblk(); 220 221 addr = (kprobe_opcode_t *) (regs->pc); 222 223 /* Check we're not actually recursing */ 224 if (kprobe_running()) { 225 p = get_kprobe(addr); 226 if (p) { 227 if (kcb->kprobe_status == KPROBE_HIT_SS && 228 *p->ainsn.insn == BREAKPOINT_INSTRUCTION) { 229 goto no_kprobe; 230 } 231 /* We have reentered the kprobe_handler(), since 232 * another probe was hit while within the handler. 233 * We here save the original kprobes variables and 234 * just single step on the instruction of the new probe 235 * without calling any user handlers. 236 */ 237 save_previous_kprobe(kcb); 238 set_current_kprobe(p, regs, kcb); 239 kprobes_inc_nmissed_count(p); 240 prepare_singlestep(p, regs); 241 kcb->kprobe_status = KPROBE_REENTER; 242 return 1; 243 } else { 244 p = __get_cpu_var(current_kprobe); 245 if (p->break_handler && p->break_handler(p, regs)) { 246 goto ss_probe; 247 } 248 } 249 goto no_kprobe; 250 } 251 252 p = get_kprobe(addr); 253 if (!p) { 254 /* Not one of ours: let kernel handle it */ 255 if (*(kprobe_opcode_t *)addr != BREAKPOINT_INSTRUCTION) { 256 /* 257 * The breakpoint instruction was removed right 258 * after we hit it. Another cpu has removed 259 * either a probepoint or a debugger breakpoint 260 * at this address. In either case, no further 261 * handling of this interrupt is appropriate. 262 */ 263 ret = 1; 264 } 265 266 goto no_kprobe; 267 } 268 269 set_current_kprobe(p, regs, kcb); 270 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 271 272 if (p->pre_handler && p->pre_handler(p, regs)) 273 /* handler has already set things up, so skip ss setup */ 274 return 1; 275 276 ss_probe: 277 prepare_singlestep(p, regs); 278 kcb->kprobe_status = KPROBE_HIT_SS; 279 return 1; 280 281 no_kprobe: 282 preempt_enable_no_resched(); 283 return ret; 284 } 285 286 /* 287 * For function-return probes, init_kprobes() establishes a probepoint 288 * here. When a retprobed function returns, this probe is hit and 289 * trampoline_probe_handler() runs, calling the kretprobe's handler. 290 */ 291 static void __used kretprobe_trampoline_holder(void) 292 { 293 asm volatile (".globl kretprobe_trampoline\n" 294 "kretprobe_trampoline:\n\t" 295 "nop\n"); 296 } 297 298 /* 299 * Called when we hit the probe point at kretprobe_trampoline 300 */ 301 int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) 302 { 303 struct kretprobe_instance *ri = NULL; 304 struct hlist_head *head, empty_rp; 305 struct hlist_node *node, *tmp; 306 unsigned long flags, orig_ret_address = 0; 307 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; 308 309 INIT_HLIST_HEAD(&empty_rp); 310 kretprobe_hash_lock(current, &head, &flags); 311 312 /* 313 * It is possible to have multiple instances associated with a given 314 * task either because an multiple functions in the call path 315 * have a return probe installed on them, and/or more then one return 316 * return probe was registered for a target function. 317 * 318 * We can handle this because: 319 * - instances are always inserted at the head of the list 320 * - when multiple return probes are registered for the same 321 * function, the first instance's ret_addr will point to the 322 * real return address, and all the rest will point to 323 * kretprobe_trampoline 324 */ 325 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 326 if (ri->task != current) 327 /* another task is sharing our hash bucket */ 328 continue; 329 330 if (ri->rp && ri->rp->handler) { 331 __get_cpu_var(current_kprobe) = &ri->rp->kp; 332 ri->rp->handler(ri, regs); 333 __get_cpu_var(current_kprobe) = NULL; 334 } 335 336 orig_ret_address = (unsigned long)ri->ret_addr; 337 recycle_rp_inst(ri, &empty_rp); 338 339 if (orig_ret_address != trampoline_address) 340 /* 341 * This is the real return address. Any other 342 * instances associated with this task are for 343 * other calls deeper on the call stack 344 */ 345 break; 346 } 347 348 kretprobe_assert(ri, orig_ret_address, trampoline_address); 349 350 regs->pc = orig_ret_address; 351 kretprobe_hash_unlock(current, &flags); 352 353 preempt_enable_no_resched(); 354 355 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 356 hlist_del(&ri->hlist); 357 kfree(ri); 358 } 359 360 return orig_ret_address; 361 } 362 363 static int __kprobes post_kprobe_handler(struct pt_regs *regs) 364 { 365 struct kprobe *cur = kprobe_running(); 366 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 367 kprobe_opcode_t *addr = NULL; 368 struct kprobe *p = NULL; 369 370 if (!cur) 371 return 0; 372 373 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { 374 kcb->kprobe_status = KPROBE_HIT_SSDONE; 375 cur->post_handler(cur, regs, 0); 376 } 377 378 if (saved_next_opcode.addr != 0x0) { 379 arch_disarm_kprobe(&saved_next_opcode); 380 saved_next_opcode.addr = 0x0; 381 saved_next_opcode.opcode = 0x0; 382 383 addr = saved_current_opcode.addr; 384 saved_current_opcode.addr = 0x0; 385 386 p = get_kprobe(addr); 387 arch_arm_kprobe(p); 388 389 if (saved_next_opcode2.addr != 0x0) { 390 arch_disarm_kprobe(&saved_next_opcode2); 391 saved_next_opcode2.addr = 0x0; 392 saved_next_opcode2.opcode = 0x0; 393 } 394 } 395 396 /* Restore back the original saved kprobes variables and continue. */ 397 if (kcb->kprobe_status == KPROBE_REENTER) { 398 restore_previous_kprobe(kcb); 399 goto out; 400 } 401 402 reset_current_kprobe(); 403 404 out: 405 preempt_enable_no_resched(); 406 407 return 1; 408 } 409 410 int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) 411 { 412 struct kprobe *cur = kprobe_running(); 413 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 414 const struct exception_table_entry *entry; 415 416 switch (kcb->kprobe_status) { 417 case KPROBE_HIT_SS: 418 case KPROBE_REENTER: 419 /* 420 * We are here because the instruction being single 421 * stepped caused a page fault. We reset the current 422 * kprobe, point the pc back to the probe address 423 * and allow the page fault handler to continue as a 424 * normal page fault. 425 */ 426 regs->pc = (unsigned long)cur->addr; 427 if (kcb->kprobe_status == KPROBE_REENTER) 428 restore_previous_kprobe(kcb); 429 else 430 reset_current_kprobe(); 431 preempt_enable_no_resched(); 432 break; 433 case KPROBE_HIT_ACTIVE: 434 case KPROBE_HIT_SSDONE: 435 /* 436 * We increment the nmissed count for accounting, 437 * we can also use npre/npostfault count for accounting 438 * these specific fault cases. 439 */ 440 kprobes_inc_nmissed_count(cur); 441 442 /* 443 * We come here because instructions in the pre/post 444 * handler caused the page_fault, this could happen 445 * if handler tries to access user space by 446 * copy_from_user(), get_user() etc. Let the 447 * user-specified handler try to fix it first. 448 */ 449 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) 450 return 1; 451 452 /* 453 * In case the user-specified fault handler returned 454 * zero, try to fix up. 455 */ 456 if ((entry = search_exception_tables(regs->pc)) != NULL) { 457 regs->pc = entry->fixup; 458 return 1; 459 } 460 461 /* 462 * fixup_exception() could not handle it, 463 * Let do_page_fault() fix it. 464 */ 465 break; 466 default: 467 break; 468 } 469 470 return 0; 471 } 472 473 /* 474 * Wrapper routine to for handling exceptions. 475 */ 476 int __kprobes kprobe_exceptions_notify(struct notifier_block *self, 477 unsigned long val, void *data) 478 { 479 struct kprobe *p = NULL; 480 struct die_args *args = (struct die_args *)data; 481 int ret = NOTIFY_DONE; 482 kprobe_opcode_t *addr = NULL; 483 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 484 485 addr = (kprobe_opcode_t *) (args->regs->pc); 486 if (val == DIE_TRAP) { 487 if (!kprobe_running()) { 488 if (kprobe_handler(args->regs)) { 489 ret = NOTIFY_STOP; 490 } else { 491 /* Not a kprobe trap */ 492 ret = NOTIFY_DONE; 493 } 494 } else { 495 p = get_kprobe(addr); 496 if ((kcb->kprobe_status == KPROBE_HIT_SS) || 497 (kcb->kprobe_status == KPROBE_REENTER)) { 498 if (post_kprobe_handler(args->regs)) 499 ret = NOTIFY_STOP; 500 } else { 501 if (kprobe_handler(args->regs)) { 502 ret = NOTIFY_STOP; 503 } else { 504 p = __get_cpu_var(current_kprobe); 505 if (p->break_handler && 506 p->break_handler(p, args->regs)) 507 ret = NOTIFY_STOP; 508 } 509 } 510 } 511 } 512 513 return ret; 514 } 515 516 int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) 517 { 518 struct jprobe *jp = container_of(p, struct jprobe, kp); 519 unsigned long addr; 520 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 521 522 kcb->jprobe_saved_regs = *regs; 523 kcb->jprobe_saved_r15 = regs->regs[15]; 524 addr = kcb->jprobe_saved_r15; 525 526 /* 527 * TBD: As Linus pointed out, gcc assumes that the callee 528 * owns the argument space and could overwrite it, e.g. 529 * tailcall optimization. So, to be absolutely safe 530 * we also save and restore enough stack bytes to cover 531 * the argument area. 532 */ 533 memcpy(kcb->jprobes_stack, (kprobe_opcode_t *) addr, 534 MIN_STACK_SIZE(addr)); 535 536 regs->pc = (unsigned long)(jp->entry); 537 538 return 1; 539 } 540 541 void __kprobes jprobe_return(void) 542 { 543 asm volatile ("trapa #0x3a\n\t" "jprobe_return_end:\n\t" "nop\n\t"); 544 } 545 546 int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) 547 { 548 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 549 unsigned long stack_addr = kcb->jprobe_saved_r15; 550 u8 *addr = (u8 *)regs->pc; 551 552 if ((addr >= (u8 *)jprobe_return) && 553 (addr <= (u8 *)jprobe_return_end)) { 554 *regs = kcb->jprobe_saved_regs; 555 556 memcpy((kprobe_opcode_t *)stack_addr, kcb->jprobes_stack, 557 MIN_STACK_SIZE(stack_addr)); 558 559 kcb->kprobe_status = KPROBE_HIT_SS; 560 preempt_enable_no_resched(); 561 return 1; 562 } 563 564 return 0; 565 } 566 567 static struct kprobe trampoline_p = { 568 .addr = (kprobe_opcode_t *)&kretprobe_trampoline, 569 .pre_handler = trampoline_probe_handler 570 }; 571 572 int __init arch_init_kprobes(void) 573 { 574 saved_next_opcode.addr = 0x0; 575 saved_next_opcode.opcode = 0x0; 576 577 saved_current_opcode.addr = 0x0; 578 saved_current_opcode.opcode = 0x0; 579 580 saved_next_opcode2.addr = 0x0; 581 saved_next_opcode2.opcode = 0x0; 582 583 return register_kprobe(&trampoline_p); 584 } 585