1 // SPDX-License-Identifier: GPL-2.0 2 /* arch/sparc64/kernel/kprobes.c 3 * 4 * Copyright (C) 2004 David S. Miller <davem@davemloft.net> 5 */ 6 7 #include <linux/kernel.h> 8 #include <linux/kprobes.h> 9 #include <linux/extable.h> 10 #include <linux/kdebug.h> 11 #include <linux/slab.h> 12 #include <linux/context_tracking.h> 13 #include <asm/signal.h> 14 #include <asm/cacheflush.h> 15 #include <linux/uaccess.h> 16 17 /* We do not have hardware single-stepping on sparc64. 18 * So we implement software single-stepping with breakpoint 19 * traps. The top-level scheme is similar to that used 20 * in the x86 kprobes implementation. 21 * 22 * In the kprobe->ainsn.insn[] array we store the original 23 * instruction at index zero and a break instruction at 24 * index one. 25 * 26 * When we hit a kprobe we: 27 * - Run the pre-handler 28 * - Remember "regs->tnpc" and interrupt level stored in 29 * "regs->tstate" so we can restore them later 30 * - Disable PIL interrupts 31 * - Set regs->tpc to point to kprobe->ainsn.insn[0] 32 * - Set regs->tnpc to point to kprobe->ainsn.insn[1] 33 * - Mark that we are actively in a kprobe 34 * 35 * At this point we wait for the second breakpoint at 36 * kprobe->ainsn.insn[1] to hit. When it does we: 37 * - Run the post-handler 38 * - Set regs->tpc to "remembered" regs->tnpc stored above, 39 * restore the PIL interrupt level in "regs->tstate" as well 40 * - Make any adjustments necessary to regs->tnpc in order 41 * to handle relative branches correctly. See below. 42 * - Mark that we are no longer actively in a kprobe. 43 */ 44 45 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; 46 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 47 48 struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}}; 49 50 int __kprobes arch_prepare_kprobe(struct kprobe *p) 51 { 52 if ((unsigned long) p->addr & 0x3UL) 53 return -EILSEQ; 54 55 p->ainsn.insn[0] = *p->addr; 56 flushi(&p->ainsn.insn[0]); 57 58 p->ainsn.insn[1] = BREAKPOINT_INSTRUCTION_2; 59 flushi(&p->ainsn.insn[1]); 60 61 p->opcode = *p->addr; 62 return 0; 63 } 64 65 void __kprobes arch_arm_kprobe(struct kprobe *p) 66 { 67 *p->addr = BREAKPOINT_INSTRUCTION; 68 flushi(p->addr); 69 } 70 71 void __kprobes arch_disarm_kprobe(struct kprobe *p) 72 { 73 *p->addr = p->opcode; 74 flushi(p->addr); 75 } 76 77 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) 78 { 79 kcb->prev_kprobe.kp = kprobe_running(); 80 kcb->prev_kprobe.status = kcb->kprobe_status; 81 kcb->prev_kprobe.orig_tnpc = kcb->kprobe_orig_tnpc; 82 kcb->prev_kprobe.orig_tstate_pil = kcb->kprobe_orig_tstate_pil; 83 } 84 85 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) 86 { 87 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); 88 kcb->kprobe_status = kcb->prev_kprobe.status; 89 kcb->kprobe_orig_tnpc = kcb->prev_kprobe.orig_tnpc; 90 kcb->kprobe_orig_tstate_pil = kcb->prev_kprobe.orig_tstate_pil; 91 } 92 93 static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, 94 struct kprobe_ctlblk *kcb) 95 { 96 __this_cpu_write(current_kprobe, p); 97 kcb->kprobe_orig_tnpc = regs->tnpc; 98 kcb->kprobe_orig_tstate_pil = (regs->tstate & TSTATE_PIL); 99 } 100 101 static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs, 102 struct kprobe_ctlblk *kcb) 103 { 104 regs->tstate |= TSTATE_PIL; 105 106 /*single step inline, if it a breakpoint instruction*/ 107 if (p->opcode == BREAKPOINT_INSTRUCTION) { 108 regs->tpc = (unsigned long) p->addr; 109 regs->tnpc = kcb->kprobe_orig_tnpc; 110 } else { 111 regs->tpc = (unsigned long) &p->ainsn.insn[0]; 112 regs->tnpc = (unsigned long) &p->ainsn.insn[1]; 113 } 114 } 115 116 static int __kprobes kprobe_handler(struct pt_regs *regs) 117 { 118 struct kprobe *p; 119 void *addr = (void *) regs->tpc; 120 int ret = 0; 121 struct kprobe_ctlblk *kcb; 122 123 /* 124 * We don't want to be preempted for the entire 125 * duration of kprobe processing 126 */ 127 preempt_disable(); 128 kcb = get_kprobe_ctlblk(); 129 130 if (kprobe_running()) { 131 p = get_kprobe(addr); 132 if (p) { 133 if (kcb->kprobe_status == KPROBE_HIT_SS) { 134 regs->tstate = ((regs->tstate & ~TSTATE_PIL) | 135 kcb->kprobe_orig_tstate_pil); 136 goto no_kprobe; 137 } 138 /* We have reentered the kprobe_handler(), since 139 * another probe was hit while within the handler. 140 * We here save the original kprobes variables and 141 * just single step on the instruction of the new probe 142 * without calling any user handlers. 143 */ 144 save_previous_kprobe(kcb); 145 set_current_kprobe(p, regs, kcb); 146 kprobes_inc_nmissed_count(p); 147 kcb->kprobe_status = KPROBE_REENTER; 148 prepare_singlestep(p, regs, kcb); 149 return 1; 150 } else { 151 if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) { 152 /* The breakpoint instruction was removed by 153 * another cpu right after we hit, no further 154 * handling of this interrupt is appropriate 155 */ 156 ret = 1; 157 goto no_kprobe; 158 } 159 p = __this_cpu_read(current_kprobe); 160 if (p->break_handler && p->break_handler(p, regs)) 161 goto ss_probe; 162 } 163 goto no_kprobe; 164 } 165 166 p = get_kprobe(addr); 167 if (!p) { 168 if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) { 169 /* 170 * The breakpoint instruction was removed right 171 * after we hit it. Another cpu has removed 172 * either a probepoint or a debugger breakpoint 173 * at this address. In either case, no further 174 * handling of this interrupt is appropriate. 175 */ 176 ret = 1; 177 } 178 /* Not one of ours: let kernel handle it */ 179 goto no_kprobe; 180 } 181 182 set_current_kprobe(p, regs, kcb); 183 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 184 if (p->pre_handler && p->pre_handler(p, regs)) 185 return 1; 186 187 ss_probe: 188 prepare_singlestep(p, regs, kcb); 189 kcb->kprobe_status = KPROBE_HIT_SS; 190 return 1; 191 192 no_kprobe: 193 preempt_enable_no_resched(); 194 return ret; 195 } 196 197 /* If INSN is a relative control transfer instruction, 198 * return the corrected branch destination value. 199 * 200 * regs->tpc and regs->tnpc still hold the values of the 201 * program counters at the time of trap due to the execution 202 * of the BREAKPOINT_INSTRUCTION_2 at p->ainsn.insn[1] 203 * 204 */ 205 static unsigned long __kprobes relbranch_fixup(u32 insn, struct kprobe *p, 206 struct pt_regs *regs) 207 { 208 unsigned long real_pc = (unsigned long) p->addr; 209 210 /* Branch not taken, no mods necessary. */ 211 if (regs->tnpc == regs->tpc + 0x4UL) 212 return real_pc + 0x8UL; 213 214 /* The three cases are call, branch w/prediction, 215 * and traditional branch. 216 */ 217 if ((insn & 0xc0000000) == 0x40000000 || 218 (insn & 0xc1c00000) == 0x00400000 || 219 (insn & 0xc1c00000) == 0x00800000) { 220 unsigned long ainsn_addr; 221 222 ainsn_addr = (unsigned long) &p->ainsn.insn[0]; 223 224 /* The instruction did all the work for us 225 * already, just apply the offset to the correct 226 * instruction location. 227 */ 228 return (real_pc + (regs->tnpc - ainsn_addr)); 229 } 230 231 /* It is jmpl or some other absolute PC modification instruction, 232 * leave NPC as-is. 233 */ 234 return regs->tnpc; 235 } 236 237 /* If INSN is an instruction which writes it's PC location 238 * into a destination register, fix that up. 239 */ 240 static void __kprobes retpc_fixup(struct pt_regs *regs, u32 insn, 241 unsigned long real_pc) 242 { 243 unsigned long *slot = NULL; 244 245 /* Simplest case is 'call', which always uses %o7 */ 246 if ((insn & 0xc0000000) == 0x40000000) { 247 slot = ®s->u_regs[UREG_I7]; 248 } 249 250 /* 'jmpl' encodes the register inside of the opcode */ 251 if ((insn & 0xc1f80000) == 0x81c00000) { 252 unsigned long rd = ((insn >> 25) & 0x1f); 253 254 if (rd <= 15) { 255 slot = ®s->u_regs[rd]; 256 } else { 257 /* Hard case, it goes onto the stack. */ 258 flushw_all(); 259 260 rd -= 16; 261 slot = (unsigned long *) 262 (regs->u_regs[UREG_FP] + STACK_BIAS); 263 slot += rd; 264 } 265 } 266 if (slot != NULL) 267 *slot = real_pc; 268 } 269 270 /* 271 * Called after single-stepping. p->addr is the address of the 272 * instruction which has been replaced by the breakpoint 273 * instruction. To avoid the SMP problems that can occur when we 274 * temporarily put back the original opcode to single-step, we 275 * single-stepped a copy of the instruction. The address of this 276 * copy is &p->ainsn.insn[0]. 277 * 278 * This function prepares to return from the post-single-step 279 * breakpoint trap. 280 */ 281 static void __kprobes resume_execution(struct kprobe *p, 282 struct pt_regs *regs, struct kprobe_ctlblk *kcb) 283 { 284 u32 insn = p->ainsn.insn[0]; 285 286 regs->tnpc = relbranch_fixup(insn, p, regs); 287 288 /* This assignment must occur after relbranch_fixup() */ 289 regs->tpc = kcb->kprobe_orig_tnpc; 290 291 retpc_fixup(regs, insn, (unsigned long) p->addr); 292 293 regs->tstate = ((regs->tstate & ~TSTATE_PIL) | 294 kcb->kprobe_orig_tstate_pil); 295 } 296 297 static int __kprobes post_kprobe_handler(struct pt_regs *regs) 298 { 299 struct kprobe *cur = kprobe_running(); 300 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 301 302 if (!cur) 303 return 0; 304 305 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { 306 kcb->kprobe_status = KPROBE_HIT_SSDONE; 307 cur->post_handler(cur, regs, 0); 308 } 309 310 resume_execution(cur, regs, kcb); 311 312 /*Restore back the original saved kprobes variables and continue. */ 313 if (kcb->kprobe_status == KPROBE_REENTER) { 314 restore_previous_kprobe(kcb); 315 goto out; 316 } 317 reset_current_kprobe(); 318 out: 319 preempt_enable_no_resched(); 320 321 return 1; 322 } 323 324 int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) 325 { 326 struct kprobe *cur = kprobe_running(); 327 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 328 const struct exception_table_entry *entry; 329 330 switch(kcb->kprobe_status) { 331 case KPROBE_HIT_SS: 332 case KPROBE_REENTER: 333 /* 334 * We are here because the instruction being single 335 * stepped caused a page fault. We reset the current 336 * kprobe and the tpc points back to the probe address 337 * and allow the page fault handler to continue as a 338 * normal page fault. 339 */ 340 regs->tpc = (unsigned long)cur->addr; 341 regs->tnpc = kcb->kprobe_orig_tnpc; 342 regs->tstate = ((regs->tstate & ~TSTATE_PIL) | 343 kcb->kprobe_orig_tstate_pil); 344 if (kcb->kprobe_status == KPROBE_REENTER) 345 restore_previous_kprobe(kcb); 346 else 347 reset_current_kprobe(); 348 preempt_enable_no_resched(); 349 break; 350 case KPROBE_HIT_ACTIVE: 351 case KPROBE_HIT_SSDONE: 352 /* 353 * We increment the nmissed count for accounting, 354 * we can also use npre/npostfault count for accounting 355 * these specific fault cases. 356 */ 357 kprobes_inc_nmissed_count(cur); 358 359 /* 360 * We come here because instructions in the pre/post 361 * handler caused the page_fault, this could happen 362 * if handler tries to access user space by 363 * copy_from_user(), get_user() etc. Let the 364 * user-specified handler try to fix it first. 365 */ 366 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) 367 return 1; 368 369 /* 370 * In case the user-specified fault handler returned 371 * zero, try to fix up. 372 */ 373 374 entry = search_exception_tables(regs->tpc); 375 if (entry) { 376 regs->tpc = entry->fixup; 377 regs->tnpc = regs->tpc + 4; 378 return 1; 379 } 380 381 /* 382 * fixup_exception() could not handle it, 383 * Let do_page_fault() fix it. 384 */ 385 break; 386 default: 387 break; 388 } 389 390 return 0; 391 } 392 393 /* 394 * Wrapper routine to for handling exceptions. 395 */ 396 int __kprobes kprobe_exceptions_notify(struct notifier_block *self, 397 unsigned long val, void *data) 398 { 399 struct die_args *args = (struct die_args *)data; 400 int ret = NOTIFY_DONE; 401 402 if (args->regs && user_mode(args->regs)) 403 return ret; 404 405 switch (val) { 406 case DIE_DEBUG: 407 if (kprobe_handler(args->regs)) 408 ret = NOTIFY_STOP; 409 break; 410 case DIE_DEBUG_2: 411 if (post_kprobe_handler(args->regs)) 412 ret = NOTIFY_STOP; 413 break; 414 default: 415 break; 416 } 417 return ret; 418 } 419 420 asmlinkage void __kprobes kprobe_trap(unsigned long trap_level, 421 struct pt_regs *regs) 422 { 423 enum ctx_state prev_state = exception_enter(); 424 425 BUG_ON(trap_level != 0x170 && trap_level != 0x171); 426 427 if (user_mode(regs)) { 428 local_irq_enable(); 429 bad_trap(regs, trap_level); 430 goto out; 431 } 432 433 /* trap_level == 0x170 --> ta 0x70 434 * trap_level == 0x171 --> ta 0x71 435 */ 436 if (notify_die((trap_level == 0x170) ? DIE_DEBUG : DIE_DEBUG_2, 437 (trap_level == 0x170) ? "debug" : "debug_2", 438 regs, 0, trap_level, SIGTRAP) != NOTIFY_STOP) 439 bad_trap(regs, trap_level); 440 out: 441 exception_exit(prev_state); 442 } 443 444 /* Jprobes support. */ 445 int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) 446 { 447 struct jprobe *jp = container_of(p, struct jprobe, kp); 448 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 449 450 memcpy(&(kcb->jprobe_saved_regs), regs, sizeof(*regs)); 451 452 regs->tpc = (unsigned long) jp->entry; 453 regs->tnpc = ((unsigned long) jp->entry) + 0x4UL; 454 regs->tstate |= TSTATE_PIL; 455 456 return 1; 457 } 458 459 void __kprobes jprobe_return(void) 460 { 461 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 462 register unsigned long orig_fp asm("g1"); 463 464 orig_fp = kcb->jprobe_saved_regs.u_regs[UREG_FP]; 465 __asm__ __volatile__("\n" 466 "1: cmp %%sp, %0\n\t" 467 "blu,a,pt %%xcc, 1b\n\t" 468 " restore\n\t" 469 ".globl jprobe_return_trap_instruction\n" 470 "jprobe_return_trap_instruction:\n\t" 471 "ta 0x70" 472 : /* no outputs */ 473 : "r" (orig_fp)); 474 } 475 476 extern void jprobe_return_trap_instruction(void); 477 478 int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) 479 { 480 u32 *addr = (u32 *) regs->tpc; 481 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 482 483 if (addr == (u32 *) jprobe_return_trap_instruction) { 484 memcpy(regs, &(kcb->jprobe_saved_regs), sizeof(*regs)); 485 preempt_enable_no_resched(); 486 return 1; 487 } 488 return 0; 489 } 490 491 /* The value stored in the return address register is actually 2 492 * instructions before where the callee will return to. 493 * Sequences usually look something like this 494 * 495 * call some_function <--- return register points here 496 * nop <--- call delay slot 497 * whatever <--- where callee returns to 498 * 499 * To keep trampoline_probe_handler logic simpler, we normalize the 500 * value kept in ri->ret_addr so we don't need to keep adjusting it 501 * back and forth. 502 */ 503 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, 504 struct pt_regs *regs) 505 { 506 ri->ret_addr = (kprobe_opcode_t *)(regs->u_regs[UREG_RETPC] + 8); 507 508 /* Replace the return addr with trampoline addr */ 509 regs->u_regs[UREG_RETPC] = 510 ((unsigned long)kretprobe_trampoline) - 8; 511 } 512 513 /* 514 * Called when the probe at kretprobe trampoline is hit 515 */ 516 static int __kprobes trampoline_probe_handler(struct kprobe *p, 517 struct pt_regs *regs) 518 { 519 struct kretprobe_instance *ri = NULL; 520 struct hlist_head *head, empty_rp; 521 struct hlist_node *tmp; 522 unsigned long flags, orig_ret_address = 0; 523 unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; 524 525 INIT_HLIST_HEAD(&empty_rp); 526 kretprobe_hash_lock(current, &head, &flags); 527 528 /* 529 * It is possible to have multiple instances associated with a given 530 * task either because an multiple functions in the call path 531 * have a return probe installed on them, and/or more than one return 532 * return probe was registered for a target function. 533 * 534 * We can handle this because: 535 * - instances are always inserted at the head of the list 536 * - when multiple return probes are registered for the same 537 * function, the first instance's ret_addr will point to the 538 * real return address, and all the rest will point to 539 * kretprobe_trampoline 540 */ 541 hlist_for_each_entry_safe(ri, tmp, head, hlist) { 542 if (ri->task != current) 543 /* another task is sharing our hash bucket */ 544 continue; 545 546 if (ri->rp && ri->rp->handler) 547 ri->rp->handler(ri, regs); 548 549 orig_ret_address = (unsigned long)ri->ret_addr; 550 recycle_rp_inst(ri, &empty_rp); 551 552 if (orig_ret_address != trampoline_address) 553 /* 554 * This is the real return address. Any other 555 * instances associated with this task are for 556 * other calls deeper on the call stack 557 */ 558 break; 559 } 560 561 kretprobe_assert(ri, orig_ret_address, trampoline_address); 562 regs->tpc = orig_ret_address; 563 regs->tnpc = orig_ret_address + 4; 564 565 reset_current_kprobe(); 566 kretprobe_hash_unlock(current, &flags); 567 preempt_enable_no_resched(); 568 569 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { 570 hlist_del(&ri->hlist); 571 kfree(ri); 572 } 573 /* 574 * By returning a non-zero value, we are telling 575 * kprobe_handler() that we don't want the post_handler 576 * to run (and have re-enabled preemption) 577 */ 578 return 1; 579 } 580 581 static void __used kretprobe_trampoline_holder(void) 582 { 583 asm volatile(".global kretprobe_trampoline\n" 584 "kretprobe_trampoline:\n" 585 "\tnop\n" 586 "\tnop\n"); 587 } 588 static struct kprobe trampoline_p = { 589 .addr = (kprobe_opcode_t *) &kretprobe_trampoline, 590 .pre_handler = trampoline_probe_handler 591 }; 592 593 int __init arch_init_kprobes(void) 594 { 595 return register_kprobe(&trampoline_p); 596 } 597 598 int __kprobes arch_trampoline_kprobe(struct kprobe *p) 599 { 600 if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline) 601 return 1; 602 603 return 0; 604 } 605