1 /* arch/sparc64/kernel/kprobes.c 2 * 3 * Copyright (C) 2004 David S. Miller <davem@davemloft.net> 4 */ 5 6 #include <linux/kernel.h> 7 #include <linux/kprobes.h> 8 #include <linux/module.h> 9 #include <linux/kdebug.h> 10 #include <asm/signal.h> 11 #include <asm/cacheflush.h> 12 #include <asm/uaccess.h> 13 14 /* We do not have hardware single-stepping on sparc64. 15 * So we implement software single-stepping with breakpoint 16 * traps. The top-level scheme is similar to that used 17 * in the x86 kprobes implementation. 18 * 19 * In the kprobe->ainsn.insn[] array we store the original 20 * instruction at index zero and a break instruction at 21 * index one. 22 * 23 * When we hit a kprobe we: 24 * - Run the pre-handler 25 * - Remember "regs->tnpc" and interrupt level stored in 26 * "regs->tstate" so we can restore them later 27 * - Disable PIL interrupts 28 * - Set regs->tpc to point to kprobe->ainsn.insn[0] 29 * - Set regs->tnpc to point to kprobe->ainsn.insn[1] 30 * - Mark that we are actively in a kprobe 31 * 32 * At this point we wait for the second breakpoint at 33 * kprobe->ainsn.insn[1] to hit. When it does we: 34 * - Run the post-handler 35 * - Set regs->tpc to "remembered" regs->tnpc stored above, 36 * restore the PIL interrupt level in "regs->tstate" as well 37 * - Make any adjustments necessary to regs->tnpc in order 38 * to handle relative branches correctly. See below. 39 * - Mark that we are no longer actively in a kprobe. 40 */ 41 42 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; 43 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 44 45 struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}}; 46 47 int __kprobes arch_prepare_kprobe(struct kprobe *p) 48 { 49 if ((unsigned long) p->addr & 0x3UL) 50 return -EILSEQ; 51 52 p->ainsn.insn[0] = *p->addr; 53 flushi(&p->ainsn.insn[0]); 54 55 p->ainsn.insn[1] = BREAKPOINT_INSTRUCTION_2; 56 flushi(&p->ainsn.insn[1]); 57 58 p->opcode = *p->addr; 59 return 0; 60 } 61 62 void __kprobes arch_arm_kprobe(struct kprobe *p) 63 { 64 *p->addr = BREAKPOINT_INSTRUCTION; 65 flushi(p->addr); 66 } 67 68 void __kprobes arch_disarm_kprobe(struct kprobe *p) 69 { 70 *p->addr = p->opcode; 71 flushi(p->addr); 72 } 73 74 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) 75 { 76 kcb->prev_kprobe.kp = kprobe_running(); 77 kcb->prev_kprobe.status = kcb->kprobe_status; 78 kcb->prev_kprobe.orig_tnpc = kcb->kprobe_orig_tnpc; 79 kcb->prev_kprobe.orig_tstate_pil = kcb->kprobe_orig_tstate_pil; 80 } 81 82 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) 83 { 84 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; 85 kcb->kprobe_status = kcb->prev_kprobe.status; 86 kcb->kprobe_orig_tnpc = kcb->prev_kprobe.orig_tnpc; 87 kcb->kprobe_orig_tstate_pil = kcb->prev_kprobe.orig_tstate_pil; 88 } 89 90 static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, 91 struct kprobe_ctlblk *kcb) 92 { 93 __get_cpu_var(current_kprobe) = p; 94 kcb->kprobe_orig_tnpc = regs->tnpc; 95 kcb->kprobe_orig_tstate_pil = (regs->tstate & TSTATE_PIL); 96 } 97 98 static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs, 99 struct kprobe_ctlblk *kcb) 100 { 101 regs->tstate |= TSTATE_PIL; 102 103 /*single step inline, if it a breakpoint instruction*/ 104 if (p->opcode == BREAKPOINT_INSTRUCTION) { 105 regs->tpc = (unsigned long) p->addr; 106 regs->tnpc = kcb->kprobe_orig_tnpc; 107 } else { 108 regs->tpc = (unsigned long) &p->ainsn.insn[0]; 109 regs->tnpc = (unsigned long) &p->ainsn.insn[1]; 110 } 111 } 112 113 static int __kprobes kprobe_handler(struct pt_regs *regs) 114 { 115 struct kprobe *p; 116 void *addr = (void *) regs->tpc; 117 int ret = 0; 118 struct kprobe_ctlblk *kcb; 119 120 /* 121 * We don't want to be preempted for the entire 122 * duration of kprobe processing 123 */ 124 preempt_disable(); 125 kcb = get_kprobe_ctlblk(); 126 127 if (kprobe_running()) { 128 p = get_kprobe(addr); 129 if (p) { 130 if (kcb->kprobe_status == KPROBE_HIT_SS) { 131 regs->tstate = ((regs->tstate & ~TSTATE_PIL) | 132 kcb->kprobe_orig_tstate_pil); 133 goto no_kprobe; 134 } 135 /* We have reentered the kprobe_handler(), since 136 * another probe was hit while within the handler. 137 * We here save the original kprobes variables and 138 * just single step on the instruction of the new probe 139 * without calling any user handlers. 140 */ 141 save_previous_kprobe(kcb); 142 set_current_kprobe(p, regs, kcb); 143 kprobes_inc_nmissed_count(p); 144 kcb->kprobe_status = KPROBE_REENTER; 145 prepare_singlestep(p, regs, kcb); 146 return 1; 147 } else { 148 if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) { 149 /* The breakpoint instruction was removed by 150 * another cpu right after we hit, no further 151 * handling of this interrupt is appropriate 152 */ 153 ret = 1; 154 goto no_kprobe; 155 } 156 p = __get_cpu_var(current_kprobe); 157 if (p->break_handler && p->break_handler(p, regs)) 158 goto ss_probe; 159 } 160 goto no_kprobe; 161 } 162 163 p = get_kprobe(addr); 164 if (!p) { 165 if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) { 166 /* 167 * The breakpoint instruction was removed right 168 * after we hit it. Another cpu has removed 169 * either a probepoint or a debugger breakpoint 170 * at this address. In either case, no further 171 * handling of this interrupt is appropriate. 172 */ 173 ret = 1; 174 } 175 /* Not one of ours: let kernel handle it */ 176 goto no_kprobe; 177 } 178 179 set_current_kprobe(p, regs, kcb); 180 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 181 if (p->pre_handler && p->pre_handler(p, regs)) 182 return 1; 183 184 ss_probe: 185 prepare_singlestep(p, regs, kcb); 186 kcb->kprobe_status = KPROBE_HIT_SS; 187 return 1; 188 189 no_kprobe: 190 preempt_enable_no_resched(); 191 return ret; 192 } 193 194 /* If INSN is a relative control transfer instruction, 195 * return the corrected branch destination value. 196 * 197 * regs->tpc and regs->tnpc still hold the values of the 198 * program counters at the time of trap due to the execution 199 * of the BREAKPOINT_INSTRUCTION_2 at p->ainsn.insn[1] 200 * 201 */ 202 static unsigned long __kprobes relbranch_fixup(u32 insn, struct kprobe *p, 203 struct pt_regs *regs) 204 { 205 unsigned long real_pc = (unsigned long) p->addr; 206 207 /* Branch not taken, no mods necessary. */ 208 if (regs->tnpc == regs->tpc + 0x4UL) 209 return real_pc + 0x8UL; 210 211 /* The three cases are call, branch w/prediction, 212 * and traditional branch. 213 */ 214 if ((insn & 0xc0000000) == 0x40000000 || 215 (insn & 0xc1c00000) == 0x00400000 || 216 (insn & 0xc1c00000) == 0x00800000) { 217 unsigned long ainsn_addr; 218 219 ainsn_addr = (unsigned long) &p->ainsn.insn[0]; 220 221 /* The instruction did all the work for us 222 * already, just apply the offset to the correct 223 * instruction location. 224 */ 225 return (real_pc + (regs->tnpc - ainsn_addr)); 226 } 227 228 /* It is jmpl or some other absolute PC modification instruction, 229 * leave NPC as-is. 230 */ 231 return regs->tnpc; 232 } 233 234 /* If INSN is an instruction which writes it's PC location 235 * into a destination register, fix that up. 236 */ 237 static void __kprobes retpc_fixup(struct pt_regs *regs, u32 insn, 238 unsigned long real_pc) 239 { 240 unsigned long *slot = NULL; 241 242 /* Simplest case is 'call', which always uses %o7 */ 243 if ((insn & 0xc0000000) == 0x40000000) { 244 slot = ®s->u_regs[UREG_I7]; 245 } 246 247 /* 'jmpl' encodes the register inside of the opcode */ 248 if ((insn & 0xc1f80000) == 0x81c00000) { 249 unsigned long rd = ((insn >> 25) & 0x1f); 250 251 if (rd <= 15) { 252 slot = ®s->u_regs[rd]; 253 } else { 254 /* Hard case, it goes onto the stack. */ 255 flushw_all(); 256 257 rd -= 16; 258 slot = (unsigned long *) 259 (regs->u_regs[UREG_FP] + STACK_BIAS); 260 slot += rd; 261 } 262 } 263 if (slot != NULL) 264 *slot = real_pc; 265 } 266 267 /* 268 * Called after single-stepping. p->addr is the address of the 269 * instruction which has been replaced by the breakpoint 270 * instruction. To avoid the SMP problems that can occur when we 271 * temporarily put back the original opcode to single-step, we 272 * single-stepped a copy of the instruction. The address of this 273 * copy is &p->ainsn.insn[0]. 274 * 275 * This function prepares to return from the post-single-step 276 * breakpoint trap. 277 */ 278 static void __kprobes resume_execution(struct kprobe *p, 279 struct pt_regs *regs, struct kprobe_ctlblk *kcb) 280 { 281 u32 insn = p->ainsn.insn[0]; 282 283 regs->tnpc = relbranch_fixup(insn, p, regs); 284 285 /* This assignment must occur after relbranch_fixup() */ 286 regs->tpc = kcb->kprobe_orig_tnpc; 287 288 retpc_fixup(regs, insn, (unsigned long) p->addr); 289 290 regs->tstate = ((regs->tstate & ~TSTATE_PIL) | 291 kcb->kprobe_orig_tstate_pil); 292 } 293 294 static int __kprobes post_kprobe_handler(struct pt_regs *regs) 295 { 296 struct kprobe *cur = kprobe_running(); 297 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 298 299 if (!cur) 300 return 0; 301 302 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { 303 kcb->kprobe_status = KPROBE_HIT_SSDONE; 304 cur->post_handler(cur, regs, 0); 305 } 306 307 resume_execution(cur, regs, kcb); 308 309 /*Restore back the original saved kprobes variables and continue. */ 310 if (kcb->kprobe_status == KPROBE_REENTER) { 311 restore_previous_kprobe(kcb); 312 goto out; 313 } 314 reset_current_kprobe(); 315 out: 316 preempt_enable_no_resched(); 317 318 return 1; 319 } 320 321 int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) 322 { 323 struct kprobe *cur = kprobe_running(); 324 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 325 const struct exception_table_entry *entry; 326 327 switch(kcb->kprobe_status) { 328 case KPROBE_HIT_SS: 329 case KPROBE_REENTER: 330 /* 331 * We are here because the instruction being single 332 * stepped caused a page fault. We reset the current 333 * kprobe and the tpc points back to the probe address 334 * and allow the page fault handler to continue as a 335 * normal page fault. 336 */ 337 regs->tpc = (unsigned long)cur->addr; 338 regs->tnpc = kcb->kprobe_orig_tnpc; 339 regs->tstate = ((regs->tstate & ~TSTATE_PIL) | 340 kcb->kprobe_orig_tstate_pil); 341 if (kcb->kprobe_status == KPROBE_REENTER) 342 restore_previous_kprobe(kcb); 343 else 344 reset_current_kprobe(); 345 preempt_enable_no_resched(); 346 break; 347 case KPROBE_HIT_ACTIVE: 348 case KPROBE_HIT_SSDONE: 349 /* 350 * We increment the nmissed count for accounting, 351 * we can also use npre/npostfault count for accouting 352 * these specific fault cases. 353 */ 354 kprobes_inc_nmissed_count(cur); 355 356 /* 357 * We come here because instructions in the pre/post 358 * handler caused the page_fault, this could happen 359 * if handler tries to access user space by 360 * copy_from_user(), get_user() etc. Let the 361 * user-specified handler try to fix it first. 362 */ 363 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) 364 return 1; 365 366 /* 367 * In case the user-specified fault handler returned 368 * zero, try to fix up. 369 */ 370 371 entry = search_exception_tables(regs->tpc); 372 if (entry) { 373 regs->tpc = entry->fixup; 374 regs->tnpc = regs->tpc + 4; 375 return 1; 376 } 377 378 /* 379 * fixup_exception() could not handle it, 380 * Let do_page_fault() fix it. 381 */ 382 break; 383 default: 384 break; 385 } 386 387 return 0; 388 } 389 390 /* 391 * Wrapper routine to for handling exceptions. 392 */ 393 int __kprobes kprobe_exceptions_notify(struct notifier_block *self, 394 unsigned long val, void *data) 395 { 396 struct die_args *args = (struct die_args *)data; 397 int ret = NOTIFY_DONE; 398 399 if (args->regs && user_mode(args->regs)) 400 return ret; 401 402 switch (val) { 403 case DIE_DEBUG: 404 if (kprobe_handler(args->regs)) 405 ret = NOTIFY_STOP; 406 break; 407 case DIE_DEBUG_2: 408 if (post_kprobe_handler(args->regs)) 409 ret = NOTIFY_STOP; 410 break; 411 default: 412 break; 413 } 414 return ret; 415 } 416 417 asmlinkage void __kprobes kprobe_trap(unsigned long trap_level, 418 struct pt_regs *regs) 419 { 420 BUG_ON(trap_level != 0x170 && trap_level != 0x171); 421 422 if (user_mode(regs)) { 423 local_irq_enable(); 424 bad_trap(regs, trap_level); 425 return; 426 } 427 428 /* trap_level == 0x170 --> ta 0x70 429 * trap_level == 0x171 --> ta 0x71 430 */ 431 if (notify_die((trap_level == 0x170) ? DIE_DEBUG : DIE_DEBUG_2, 432 (trap_level == 0x170) ? "debug" : "debug_2", 433 regs, 0, trap_level, SIGTRAP) != NOTIFY_STOP) 434 bad_trap(regs, trap_level); 435 } 436 437 /* Jprobes support. */ 438 int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) 439 { 440 struct jprobe *jp = container_of(p, struct jprobe, kp); 441 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 442 443 memcpy(&(kcb->jprobe_saved_regs), regs, sizeof(*regs)); 444 445 regs->tpc = (unsigned long) jp->entry; 446 regs->tnpc = ((unsigned long) jp->entry) + 0x4UL; 447 regs->tstate |= TSTATE_PIL; 448 449 return 1; 450 } 451 452 void __kprobes jprobe_return(void) 453 { 454 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 455 register unsigned long orig_fp asm("g1"); 456 457 orig_fp = kcb->jprobe_saved_regs.u_regs[UREG_FP]; 458 __asm__ __volatile__("\n" 459 "1: cmp %%sp, %0\n\t" 460 "blu,a,pt %%xcc, 1b\n\t" 461 " restore\n\t" 462 ".globl jprobe_return_trap_instruction\n" 463 "jprobe_return_trap_instruction:\n\t" 464 "ta 0x70" 465 : /* no outputs */ 466 : "r" (orig_fp)); 467 } 468 469 extern void jprobe_return_trap_instruction(void); 470 471 int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) 472 { 473 u32 *addr = (u32 *) regs->tpc; 474 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 475 476 if (addr == (u32 *) jprobe_return_trap_instruction) { 477 memcpy(regs, &(kcb->jprobe_saved_regs), sizeof(*regs)); 478 preempt_enable_no_resched(); 479 return 1; 480 } 481 return 0; 482 } 483 484 /* The value stored in the return address register is actually 2 485 * instructions before where the callee will return to. 486 * Sequences usually look something like this 487 * 488 * call some_function <--- return register points here 489 * nop <--- call delay slot 490 * whatever <--- where callee returns to 491 * 492 * To keep trampoline_probe_handler logic simpler, we normalize the 493 * value kept in ri->ret_addr so we don't need to keep adjusting it 494 * back and forth. 495 */ 496 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, 497 struct pt_regs *regs) 498 { 499 ri->ret_addr = (kprobe_opcode_t *)(regs->u_regs[UREG_RETPC] + 8); 500 501 /* Replace the return addr with trampoline addr */ 502 regs->u_regs[UREG_RETPC] = 503 ((unsigned long)kretprobe_trampoline) - 8; 504 } 505 506 /* 507 * Called when the probe at kretprobe trampoline is hit 508 */ 509 int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) 510 { 511 struct kretprobe_instance *ri = NULL; 512 struct hlist_head *head, empty_rp; 513 struct hlist_node *node, *tmp; 514 unsigned long flags, orig_ret_address = 0; 515 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; 516 517 INIT_HLIST_HEAD(&empty_rp); 518 kretprobe_hash_lock(current, &head, &flags); 519 520 /* 521 * It is possible to have multiple instances associated with a given 522 * task either because an multiple functions in the call path 523 * have a return probe installed on them, and/or more than one return 524 * return probe was registered for a target function. 525 * 526 * We can handle this because: 527 * - instances are always inserted at the head of the list 528 * - when multiple return probes are registered for the same 529 * function, the first instance's ret_addr will point to the 530 * real return address, and all the rest will point to 531 * kretprobe_trampoline 532 */ 533 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 534 if (ri->task != current) 535 /* another task is sharing our hash bucket */ 536 continue; 537 538 if (ri->rp && ri->rp->handler) 539 ri->rp->handler(ri, regs); 540 541 orig_ret_address = (unsigned long)ri->ret_addr; 542 recycle_rp_inst(ri, &empty_rp); 543 544 if (orig_ret_address != trampoline_address) 545 /* 546 * This is the real return address. Any other 547 * instances associated with this task are for 548 * other calls deeper on the call stack 549 */ 550 break; 551 } 552 553 kretprobe_assert(ri, orig_ret_address, trampoline_address); 554 regs->tpc = orig_ret_address; 555 regs->tnpc = orig_ret_address + 4; 556 557 reset_current_kprobe(); 558 kretprobe_hash_unlock(current, &flags); 559 preempt_enable_no_resched(); 560 561 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 562 hlist_del(&ri->hlist); 563 kfree(ri); 564 } 565 /* 566 * By returning a non-zero value, we are telling 567 * kprobe_handler() that we don't want the post_handler 568 * to run (and have re-enabled preemption) 569 */ 570 return 1; 571 } 572 573 void kretprobe_trampoline_holder(void) 574 { 575 asm volatile(".global kretprobe_trampoline\n" 576 "kretprobe_trampoline:\n" 577 "\tnop\n" 578 "\tnop\n"); 579 } 580 static struct kprobe trampoline_p = { 581 .addr = (kprobe_opcode_t *) &kretprobe_trampoline, 582 .pre_handler = trampoline_probe_handler 583 }; 584 585 int __init arch_init_kprobes(void) 586 { 587 return register_kprobe(&trampoline_p); 588 } 589 590 int __kprobes arch_trampoline_kprobe(struct kprobe *p) 591 { 592 if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline) 593 return 1; 594 595 return 0; 596 } 597