1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Kernel Probes (KProbes) 4 * arch/mips/kernel/kprobes.c 5 * 6 * Copyright 2006 Sony Corp. 7 * Copyright 2010 Cavium Networks 8 * 9 * Some portions copied from the powerpc version. 10 * 11 * Copyright (C) IBM Corporation, 2002, 2004 12 */ 13 14 #define pr_fmt(fmt) "kprobes: " fmt 15 16 #include <linux/kprobes.h> 17 #include <linux/preempt.h> 18 #include <linux/uaccess.h> 19 #include <linux/kdebug.h> 20 #include <linux/slab.h> 21 22 #include <asm/ptrace.h> 23 #include <asm/branch.h> 24 #include <asm/break.h> 25 26 #include "probes-common.h" 27 28 static const union mips_instruction breakpoint_insn = { 29 .b_format = { 30 .opcode = spec_op, 31 .code = BRK_KPROBE_BP, 32 .func = break_op 33 } 34 }; 35 36 static const union mips_instruction breakpoint2_insn = { 37 .b_format = { 38 .opcode = spec_op, 39 .code = BRK_KPROBE_SSTEPBP, 40 .func = break_op 41 } 42 }; 43 44 DEFINE_PER_CPU(struct kprobe *, current_kprobe); 45 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 46 47 static int __kprobes insn_has_delayslot(union mips_instruction insn) 48 { 49 return __insn_has_delay_slot(insn); 50 } 51 52 /* 53 * insn_has_ll_or_sc function checks whether instruction is ll or sc 54 * one; putting breakpoint on top of atomic ll/sc pair is bad idea; 55 * so we need to prevent it and refuse kprobes insertion for such 56 * instructions; cannot do much about breakpoint in the middle of 57 * ll/sc pair; it is upto user to avoid those places 58 */ 59 static int __kprobes insn_has_ll_or_sc(union mips_instruction insn) 60 { 61 int ret = 0; 62 63 switch (insn.i_format.opcode) { 64 case ll_op: 65 case lld_op: 66 case sc_op: 67 case scd_op: 68 ret = 1; 69 break; 70 default: 71 break; 72 } 73 return ret; 74 } 75 76 int __kprobes arch_prepare_kprobe(struct kprobe *p) 77 { 78 union mips_instruction insn; 79 union mips_instruction prev_insn; 80 int ret = 0; 81 82 insn = p->addr[0]; 83 84 if (insn_has_ll_or_sc(insn)) { 85 pr_notice("Kprobes for ll and sc instructions are not supported\n"); 86 ret = -EINVAL; 87 goto out; 88 } 89 90 if (copy_from_kernel_nofault(&prev_insn, p->addr - 1, 91 sizeof(mips_instruction)) == 0 && 92 insn_has_delayslot(prev_insn)) { 93 pr_notice("Kprobes for branch delayslot are not supported\n"); 94 ret = -EINVAL; 95 goto out; 96 } 97 98 if (__insn_is_compact_branch(insn)) { 99 pr_notice("Kprobes for compact branches are not supported\n"); 100 ret = -EINVAL; 101 goto out; 102 } 103 104 /* insn: must be on special executable page on mips. */ 105 p->ainsn.insn = get_insn_slot(); 106 if (!p->ainsn.insn) { 107 ret = -ENOMEM; 108 goto out; 109 } 110 111 /* 112 * In the kprobe->ainsn.insn[] array we store the original 113 * instruction at index zero and a break trap instruction at 114 * index one. 115 * 116 * On MIPS arch if the instruction at probed address is a 117 * branch instruction, we need to execute the instruction at 118 * Branch Delayslot (BD) at the time of probe hit. As MIPS also 119 * doesn't have single stepping support, the BD instruction can 120 * not be executed in-line and it would be executed on SSOL slot 121 * using a normal breakpoint instruction in the next slot. 122 * So, read the instruction and save it for later execution. 123 */ 124 if (insn_has_delayslot(insn)) 125 memcpy(&p->ainsn.insn[0], p->addr + 1, sizeof(kprobe_opcode_t)); 126 else 127 memcpy(&p->ainsn.insn[0], p->addr, sizeof(kprobe_opcode_t)); 128 129 p->ainsn.insn[1] = breakpoint2_insn; 130 p->opcode = *p->addr; 131 132 out: 133 return ret; 134 } 135 136 void __kprobes arch_arm_kprobe(struct kprobe *p) 137 { 138 *p->addr = breakpoint_insn; 139 flush_insn_slot(p); 140 } 141 142 void __kprobes arch_disarm_kprobe(struct kprobe *p) 143 { 144 *p->addr = p->opcode; 145 flush_insn_slot(p); 146 } 147 148 void __kprobes arch_remove_kprobe(struct kprobe *p) 149 { 150 if (p->ainsn.insn) { 151 free_insn_slot(p->ainsn.insn, 0); 152 p->ainsn.insn = NULL; 153 } 154 } 155 156 static void save_previous_kprobe(struct kprobe_ctlblk *kcb) 157 { 158 kcb->prev_kprobe.kp = kprobe_running(); 159 kcb->prev_kprobe.status = kcb->kprobe_status; 160 kcb->prev_kprobe.old_SR = kcb->kprobe_old_SR; 161 kcb->prev_kprobe.saved_SR = kcb->kprobe_saved_SR; 162 kcb->prev_kprobe.saved_epc = kcb->kprobe_saved_epc; 163 } 164 165 static void restore_previous_kprobe(struct kprobe_ctlblk *kcb) 166 { 167 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); 168 kcb->kprobe_status = kcb->prev_kprobe.status; 169 kcb->kprobe_old_SR = kcb->prev_kprobe.old_SR; 170 kcb->kprobe_saved_SR = kcb->prev_kprobe.saved_SR; 171 kcb->kprobe_saved_epc = kcb->prev_kprobe.saved_epc; 172 } 173 174 static void set_current_kprobe(struct kprobe *p, struct pt_regs *regs, 175 struct kprobe_ctlblk *kcb) 176 { 177 __this_cpu_write(current_kprobe, p); 178 kcb->kprobe_saved_SR = kcb->kprobe_old_SR = (regs->cp0_status & ST0_IE); 179 kcb->kprobe_saved_epc = regs->cp0_epc; 180 } 181 182 /** 183 * evaluate_branch_instrucion - 184 * 185 * Evaluate the branch instruction at probed address during probe hit. The 186 * result of evaluation would be the updated epc. The insturction in delayslot 187 * would actually be single stepped using a normal breakpoint) on SSOL slot. 188 * 189 * The result is also saved in the kprobe control block for later use, 190 * in case we need to execute the delayslot instruction. The latter will be 191 * false for NOP instruction in dealyslot and the branch-likely instructions 192 * when the branch is taken. And for those cases we set a flag as 193 * SKIP_DELAYSLOT in the kprobe control block 194 */ 195 static int evaluate_branch_instruction(struct kprobe *p, struct pt_regs *regs, 196 struct kprobe_ctlblk *kcb) 197 { 198 union mips_instruction insn = p->opcode; 199 long epc; 200 int ret = 0; 201 202 epc = regs->cp0_epc; 203 if (epc & 3) 204 goto unaligned; 205 206 if (p->ainsn.insn->word == 0) 207 kcb->flags |= SKIP_DELAYSLOT; 208 else 209 kcb->flags &= ~SKIP_DELAYSLOT; 210 211 ret = __compute_return_epc_for_insn(regs, insn); 212 if (ret < 0) 213 return ret; 214 215 if (ret == BRANCH_LIKELY_TAKEN) 216 kcb->flags |= SKIP_DELAYSLOT; 217 218 kcb->target_epc = regs->cp0_epc; 219 220 return 0; 221 222 unaligned: 223 pr_notice("Failed to emulate branch instruction because of unaligned epc - sending SIGBUS to %s.\n", current->comm); 224 force_sig(SIGBUS); 225 return -EFAULT; 226 227 } 228 229 static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs, 230 struct kprobe_ctlblk *kcb) 231 { 232 int ret = 0; 233 234 regs->cp0_status &= ~ST0_IE; 235 236 /* single step inline if the instruction is a break */ 237 if (p->opcode.word == breakpoint_insn.word || 238 p->opcode.word == breakpoint2_insn.word) 239 regs->cp0_epc = (unsigned long)p->addr; 240 else if (insn_has_delayslot(p->opcode)) { 241 ret = evaluate_branch_instruction(p, regs, kcb); 242 if (ret < 0) 243 return; 244 } 245 regs->cp0_epc = (unsigned long)&p->ainsn.insn[0]; 246 } 247 248 /* 249 * Called after single-stepping. p->addr is the address of the 250 * instruction whose first byte has been replaced by the "break 0" 251 * instruction. To avoid the SMP problems that can occur when we 252 * temporarily put back the original opcode to single-step, we 253 * single-stepped a copy of the instruction. The address of this 254 * copy is p->ainsn.insn. 255 * 256 * This function prepares to return from the post-single-step 257 * breakpoint trap. In case of branch instructions, the target 258 * epc to be restored. 259 */ 260 static void __kprobes resume_execution(struct kprobe *p, 261 struct pt_regs *regs, 262 struct kprobe_ctlblk *kcb) 263 { 264 if (insn_has_delayslot(p->opcode)) 265 regs->cp0_epc = kcb->target_epc; 266 else { 267 unsigned long orig_epc = kcb->kprobe_saved_epc; 268 regs->cp0_epc = orig_epc + 4; 269 } 270 } 271 272 static int __kprobes kprobe_handler(struct pt_regs *regs) 273 { 274 struct kprobe *p; 275 int ret = 0; 276 kprobe_opcode_t *addr; 277 struct kprobe_ctlblk *kcb; 278 279 addr = (kprobe_opcode_t *) regs->cp0_epc; 280 281 /* 282 * We don't want to be preempted for the entire 283 * duration of kprobe processing 284 */ 285 preempt_disable(); 286 kcb = get_kprobe_ctlblk(); 287 288 /* Check we're not actually recursing */ 289 if (kprobe_running()) { 290 p = get_kprobe(addr); 291 if (p) { 292 if (kcb->kprobe_status == KPROBE_HIT_SS && 293 p->ainsn.insn->word == breakpoint_insn.word) { 294 regs->cp0_status &= ~ST0_IE; 295 regs->cp0_status |= kcb->kprobe_saved_SR; 296 goto no_kprobe; 297 } 298 /* 299 * We have reentered the kprobe_handler(), since 300 * another probe was hit while within the handler. 301 * We here save the original kprobes variables and 302 * just single step on the instruction of the new probe 303 * without calling any user handlers. 304 */ 305 save_previous_kprobe(kcb); 306 set_current_kprobe(p, regs, kcb); 307 kprobes_inc_nmissed_count(p); 308 prepare_singlestep(p, regs, kcb); 309 kcb->kprobe_status = KPROBE_REENTER; 310 if (kcb->flags & SKIP_DELAYSLOT) { 311 resume_execution(p, regs, kcb); 312 restore_previous_kprobe(kcb); 313 preempt_enable_no_resched(); 314 } 315 return 1; 316 } else if (addr->word != breakpoint_insn.word) { 317 /* 318 * The breakpoint instruction was removed by 319 * another cpu right after we hit, no further 320 * handling of this interrupt is appropriate 321 */ 322 ret = 1; 323 } 324 goto no_kprobe; 325 } 326 327 p = get_kprobe(addr); 328 if (!p) { 329 if (addr->word != breakpoint_insn.word) { 330 /* 331 * The breakpoint instruction was removed right 332 * after we hit it. Another cpu has removed 333 * either a probepoint or a debugger breakpoint 334 * at this address. In either case, no further 335 * handling of this interrupt is appropriate. 336 */ 337 ret = 1; 338 } 339 /* Not one of ours: let kernel handle it */ 340 goto no_kprobe; 341 } 342 343 set_current_kprobe(p, regs, kcb); 344 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 345 346 if (p->pre_handler && p->pre_handler(p, regs)) { 347 /* handler has already set things up, so skip ss setup */ 348 reset_current_kprobe(); 349 preempt_enable_no_resched(); 350 return 1; 351 } 352 353 prepare_singlestep(p, regs, kcb); 354 if (kcb->flags & SKIP_DELAYSLOT) { 355 kcb->kprobe_status = KPROBE_HIT_SSDONE; 356 if (p->post_handler) 357 p->post_handler(p, regs, 0); 358 resume_execution(p, regs, kcb); 359 preempt_enable_no_resched(); 360 } else 361 kcb->kprobe_status = KPROBE_HIT_SS; 362 363 return 1; 364 365 no_kprobe: 366 preempt_enable_no_resched(); 367 return ret; 368 369 } 370 371 static inline int post_kprobe_handler(struct pt_regs *regs) 372 { 373 struct kprobe *cur = kprobe_running(); 374 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 375 376 if (!cur) 377 return 0; 378 379 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { 380 kcb->kprobe_status = KPROBE_HIT_SSDONE; 381 cur->post_handler(cur, regs, 0); 382 } 383 384 resume_execution(cur, regs, kcb); 385 386 regs->cp0_status |= kcb->kprobe_saved_SR; 387 388 /* Restore back the original saved kprobes variables and continue. */ 389 if (kcb->kprobe_status == KPROBE_REENTER) { 390 restore_previous_kprobe(kcb); 391 goto out; 392 } 393 reset_current_kprobe(); 394 out: 395 preempt_enable_no_resched(); 396 397 return 1; 398 } 399 400 int kprobe_fault_handler(struct pt_regs *regs, int trapnr) 401 { 402 struct kprobe *cur = kprobe_running(); 403 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 404 405 if (kcb->kprobe_status & KPROBE_HIT_SS) { 406 resume_execution(cur, regs, kcb); 407 regs->cp0_status |= kcb->kprobe_old_SR; 408 409 reset_current_kprobe(); 410 preempt_enable_no_resched(); 411 } 412 return 0; 413 } 414 415 /* 416 * Wrapper routine for handling exceptions. 417 */ 418 int __kprobes kprobe_exceptions_notify(struct notifier_block *self, 419 unsigned long val, void *data) 420 { 421 422 struct die_args *args = (struct die_args *)data; 423 int ret = NOTIFY_DONE; 424 425 switch (val) { 426 case DIE_BREAK: 427 if (kprobe_handler(args->regs)) 428 ret = NOTIFY_STOP; 429 break; 430 case DIE_SSTEPBP: 431 if (post_kprobe_handler(args->regs)) 432 ret = NOTIFY_STOP; 433 break; 434 435 case DIE_PAGE_FAULT: 436 /* kprobe_running() needs smp_processor_id() */ 437 preempt_disable(); 438 439 if (kprobe_running() 440 && kprobe_fault_handler(args->regs, args->trapnr)) 441 ret = NOTIFY_STOP; 442 preempt_enable(); 443 break; 444 default: 445 break; 446 } 447 return ret; 448 } 449 450 /* 451 * Function return probe trampoline: 452 * - init_kprobes() establishes a probepoint here 453 * - When the probed function returns, this probe causes the 454 * handlers to fire 455 */ 456 static void __used kretprobe_trampoline_holder(void) 457 { 458 asm volatile( 459 ".set push\n\t" 460 /* Keep the assembler from reordering and placing JR here. */ 461 ".set noreorder\n\t" 462 "nop\n\t" 463 ".global __kretprobe_trampoline\n" 464 "__kretprobe_trampoline:\n\t" 465 "nop\n\t" 466 ".set pop" 467 : : : "memory"); 468 } 469 470 void __kretprobe_trampoline(void); 471 472 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, 473 struct pt_regs *regs) 474 { 475 ri->ret_addr = (kprobe_opcode_t *) regs->regs[31]; 476 ri->fp = NULL; 477 478 /* Replace the return addr with trampoline addr */ 479 regs->regs[31] = (unsigned long)__kretprobe_trampoline; 480 } 481 482 /* 483 * Called when the probe at kretprobe trampoline is hit 484 */ 485 static int __kprobes trampoline_probe_handler(struct kprobe *p, 486 struct pt_regs *regs) 487 { 488 instruction_pointer(regs) = __kretprobe_trampoline_handler(regs, NULL); 489 /* 490 * By returning a non-zero value, we are telling 491 * kprobe_handler() that we don't want the post_handler 492 * to run (and have re-enabled preemption) 493 */ 494 return 1; 495 } 496 497 int __kprobes arch_trampoline_kprobe(struct kprobe *p) 498 { 499 if (p->addr == (kprobe_opcode_t *)__kretprobe_trampoline) 500 return 1; 501 502 return 0; 503 } 504 505 static struct kprobe trampoline_p = { 506 .addr = (kprobe_opcode_t *)__kretprobe_trampoline, 507 .pre_handler = trampoline_probe_handler 508 }; 509 510 int __init arch_init_kprobes(void) 511 { 512 return register_kprobe(&trampoline_p); 513 } 514