1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <linux/kdebug.h> 3 #include <linux/kprobes.h> 4 #include <linux/preempt.h> 5 #include <asm/break.h> 6 7 static const union loongarch_instruction breakpoint_insn = { 8 .reg0i15_format = { 9 .opcode = break_op, 10 .immediate = BRK_KPROBE_BP, 11 } 12 }; 13 14 static const union loongarch_instruction singlestep_insn = { 15 .reg0i15_format = { 16 .opcode = break_op, 17 .immediate = BRK_KPROBE_SSTEPBP, 18 } 19 }; 20 21 DEFINE_PER_CPU(struct kprobe *, current_kprobe); 22 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 23 24 static bool insns_not_supported(union loongarch_instruction insn) 25 { 26 switch (insn.reg2i14_format.opcode) { 27 case llw_op: 28 case lld_op: 29 case scw_op: 30 case scd_op: 31 pr_notice("kprobe: ll and sc instructions are not supported\n"); 32 return true; 33 } 34 35 switch (insn.reg1i21_format.opcode) { 36 case bceqz_op: 37 pr_notice("kprobe: bceqz and bcnez instructions are not supported\n"); 38 return true; 39 } 40 41 return false; 42 } 43 NOKPROBE_SYMBOL(insns_not_supported); 44 45 static bool insns_need_simulation(struct kprobe *p) 46 { 47 if (is_pc_ins(&p->opcode)) 48 return true; 49 50 if (is_branch_ins(&p->opcode)) 51 return true; 52 53 return false; 54 } 55 NOKPROBE_SYMBOL(insns_need_simulation); 56 57 static void arch_simulate_insn(struct kprobe *p, struct pt_regs *regs) 58 { 59 if (is_pc_ins(&p->opcode)) 60 simu_pc(regs, p->opcode); 61 else if (is_branch_ins(&p->opcode)) 62 simu_branch(regs, p->opcode); 63 } 64 NOKPROBE_SYMBOL(arch_simulate_insn); 65 66 static void arch_prepare_ss_slot(struct kprobe *p) 67 { 68 p->ainsn.insn[0] = *p->addr; 69 p->ainsn.insn[1] = singlestep_insn; 70 p->ainsn.restore = (unsigned long)p->addr + LOONGARCH_INSN_SIZE; 71 } 72 NOKPROBE_SYMBOL(arch_prepare_ss_slot); 73 74 static void arch_prepare_simulate(struct kprobe *p) 75 { 76 p->ainsn.restore = 0; 77 } 78 NOKPROBE_SYMBOL(arch_prepare_simulate); 79 80 int arch_prepare_kprobe(struct kprobe *p) 81 { 82 if ((unsigned long)p->addr & 0x3) 83 return -EILSEQ; 84 85 /* copy instruction */ 86 p->opcode = *p->addr; 87 88 /* decode instruction */ 89 if (insns_not_supported(p->opcode)) 90 return -EINVAL; 91 92 if (insns_need_simulation(p)) { 93 p->ainsn.insn = NULL; 94 } else { 95 p->ainsn.insn = get_insn_slot(); 96 if (!p->ainsn.insn) 97 return -ENOMEM; 98 } 99 100 /* prepare the instruction */ 101 if (p->ainsn.insn) 102 arch_prepare_ss_slot(p); 103 else 104 arch_prepare_simulate(p); 105 106 return 0; 107 } 108 NOKPROBE_SYMBOL(arch_prepare_kprobe); 109 110 /* Install breakpoint in text */ 111 void arch_arm_kprobe(struct kprobe *p) 112 { 113 *p->addr = breakpoint_insn; 114 flush_insn_slot(p); 115 } 116 NOKPROBE_SYMBOL(arch_arm_kprobe); 117 118 /* Remove breakpoint from text */ 119 void arch_disarm_kprobe(struct kprobe *p) 120 { 121 *p->addr = p->opcode; 122 flush_insn_slot(p); 123 } 124 NOKPROBE_SYMBOL(arch_disarm_kprobe); 125 126 void arch_remove_kprobe(struct kprobe *p) 127 { 128 if (p->ainsn.insn) { 129 free_insn_slot(p->ainsn.insn, 0); 130 p->ainsn.insn = NULL; 131 } 132 } 133 NOKPROBE_SYMBOL(arch_remove_kprobe); 134 135 static void save_previous_kprobe(struct kprobe_ctlblk *kcb) 136 { 137 kcb->prev_kprobe.kp = kprobe_running(); 138 kcb->prev_kprobe.status = kcb->kprobe_status; 139 } 140 NOKPROBE_SYMBOL(save_previous_kprobe); 141 142 static void restore_previous_kprobe(struct kprobe_ctlblk *kcb) 143 { 144 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); 145 kcb->kprobe_status = kcb->prev_kprobe.status; 146 } 147 NOKPROBE_SYMBOL(restore_previous_kprobe); 148 149 static void set_current_kprobe(struct kprobe *p) 150 { 151 __this_cpu_write(current_kprobe, p); 152 } 153 NOKPROBE_SYMBOL(set_current_kprobe); 154 155 /* 156 * Interrupts need to be disabled before single-step mode is set, 157 * and not reenabled until after single-step mode ends. 158 * Without disabling interrupt on local CPU, there is a chance of 159 * interrupt occurrence in the period of exception return and start 160 * of out-of-line single-step, that result in wrongly single stepping 161 * into the interrupt handler. 162 */ 163 static void save_local_irqflag(struct kprobe_ctlblk *kcb, 164 struct pt_regs *regs) 165 { 166 kcb->saved_status = regs->csr_prmd; 167 regs->csr_prmd &= ~CSR_PRMD_PIE; 168 } 169 NOKPROBE_SYMBOL(save_local_irqflag); 170 171 static void restore_local_irqflag(struct kprobe_ctlblk *kcb, 172 struct pt_regs *regs) 173 { 174 regs->csr_prmd = kcb->saved_status; 175 } 176 NOKPROBE_SYMBOL(restore_local_irqflag); 177 178 static void post_kprobe_handler(struct kprobe *cur, struct kprobe_ctlblk *kcb, 179 struct pt_regs *regs) 180 { 181 /* return addr restore if non-branching insn */ 182 if (cur->ainsn.restore != 0) 183 instruction_pointer_set(regs, cur->ainsn.restore); 184 185 /* restore back original saved kprobe variables and continue */ 186 if (kcb->kprobe_status == KPROBE_REENTER) { 187 restore_previous_kprobe(kcb); 188 preempt_enable_no_resched(); 189 return; 190 } 191 192 /* 193 * update the kcb status even if the cur->post_handler is 194 * not set because reset_curent_kprobe() doesn't update kcb. 195 */ 196 kcb->kprobe_status = KPROBE_HIT_SSDONE; 197 if (cur->post_handler) 198 cur->post_handler(cur, regs, 0); 199 200 reset_current_kprobe(); 201 preempt_enable_no_resched(); 202 } 203 NOKPROBE_SYMBOL(post_kprobe_handler); 204 205 static void setup_singlestep(struct kprobe *p, struct pt_regs *regs, 206 struct kprobe_ctlblk *kcb, int reenter) 207 { 208 if (reenter) { 209 save_previous_kprobe(kcb); 210 set_current_kprobe(p); 211 kcb->kprobe_status = KPROBE_REENTER; 212 } else { 213 kcb->kprobe_status = KPROBE_HIT_SS; 214 } 215 216 if (p->ainsn.insn) { 217 /* IRQs and single stepping do not mix well */ 218 save_local_irqflag(kcb, regs); 219 /* set ip register to prepare for single stepping */ 220 regs->csr_era = (unsigned long)p->ainsn.insn; 221 } else { 222 /* simulate single steping */ 223 arch_simulate_insn(p, regs); 224 /* now go for post processing */ 225 post_kprobe_handler(p, kcb, regs); 226 } 227 } 228 NOKPROBE_SYMBOL(setup_singlestep); 229 230 static bool reenter_kprobe(struct kprobe *p, struct pt_regs *regs, 231 struct kprobe_ctlblk *kcb) 232 { 233 switch (kcb->kprobe_status) { 234 case KPROBE_HIT_SS: 235 case KPROBE_HIT_SSDONE: 236 case KPROBE_HIT_ACTIVE: 237 kprobes_inc_nmissed_count(p); 238 setup_singlestep(p, regs, kcb, 1); 239 break; 240 case KPROBE_REENTER: 241 pr_warn("Failed to recover from reentered kprobes.\n"); 242 dump_kprobe(p); 243 WARN_ON_ONCE(1); 244 break; 245 default: 246 WARN_ON(1); 247 return false; 248 } 249 250 return true; 251 } 252 NOKPROBE_SYMBOL(reenter_kprobe); 253 254 bool kprobe_breakpoint_handler(struct pt_regs *regs) 255 { 256 struct kprobe_ctlblk *kcb; 257 struct kprobe *p, *cur_kprobe; 258 kprobe_opcode_t *addr = (kprobe_opcode_t *)regs->csr_era; 259 260 /* 261 * We don't want to be preempted for the entire 262 * duration of kprobe processing. 263 */ 264 preempt_disable(); 265 kcb = get_kprobe_ctlblk(); 266 cur_kprobe = kprobe_running(); 267 268 p = get_kprobe(addr); 269 if (p) { 270 if (cur_kprobe) { 271 if (reenter_kprobe(p, regs, kcb)) 272 return true; 273 } else { 274 /* Probe hit */ 275 set_current_kprobe(p); 276 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 277 278 /* 279 * If we have no pre-handler or it returned 0, we 280 * continue with normal processing. If we have a 281 * pre-handler and it returned non-zero, it will 282 * modify the execution path and no need to single 283 * stepping. Let's just reset current kprobe and exit. 284 * 285 * pre_handler can hit a breakpoint and can step thru 286 * before return. 287 */ 288 if (!p->pre_handler || !p->pre_handler(p, regs)) { 289 setup_singlestep(p, regs, kcb, 0); 290 } else { 291 reset_current_kprobe(); 292 preempt_enable_no_resched(); 293 } 294 return true; 295 } 296 } 297 298 if (addr->word != breakpoint_insn.word) { 299 /* 300 * The breakpoint instruction was removed right 301 * after we hit it. Another cpu has removed 302 * either a probepoint or a debugger breakpoint 303 * at this address. In either case, no further 304 * handling of this interrupt is appropriate. 305 * Return back to original instruction, and continue. 306 */ 307 regs->csr_era = (unsigned long)addr; 308 preempt_enable_no_resched(); 309 return true; 310 } 311 312 preempt_enable_no_resched(); 313 return false; 314 } 315 NOKPROBE_SYMBOL(kprobe_breakpoint_handler); 316 317 bool kprobe_singlestep_handler(struct pt_regs *regs) 318 { 319 struct kprobe *cur = kprobe_running(); 320 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 321 unsigned long addr = instruction_pointer(regs); 322 323 if (cur && (kcb->kprobe_status & (KPROBE_HIT_SS | KPROBE_REENTER)) && 324 ((unsigned long)&cur->ainsn.insn[1] == addr)) { 325 restore_local_irqflag(kcb, regs); 326 post_kprobe_handler(cur, kcb, regs); 327 return true; 328 } 329 330 preempt_enable_no_resched(); 331 return false; 332 } 333 NOKPROBE_SYMBOL(kprobe_singlestep_handler); 334 335 bool kprobe_fault_handler(struct pt_regs *regs, int trapnr) 336 { 337 struct kprobe *cur = kprobe_running(); 338 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 339 340 switch (kcb->kprobe_status) { 341 case KPROBE_HIT_SS: 342 case KPROBE_REENTER: 343 /* 344 * We are here because the instruction being single 345 * stepped caused a page fault. We reset the current 346 * kprobe and the ip points back to the probe address 347 * and allow the page fault handler to continue as a 348 * normal page fault. 349 */ 350 regs->csr_era = (unsigned long)cur->addr; 351 WARN_ON_ONCE(!instruction_pointer(regs)); 352 353 if (kcb->kprobe_status == KPROBE_REENTER) { 354 restore_previous_kprobe(kcb); 355 } else { 356 restore_local_irqflag(kcb, regs); 357 reset_current_kprobe(); 358 } 359 preempt_enable_no_resched(); 360 break; 361 } 362 return false; 363 } 364 NOKPROBE_SYMBOL(kprobe_fault_handler); 365 366 /* 367 * Provide a blacklist of symbols identifying ranges which cannot be kprobed. 368 * This blacklist is exposed to userspace via debugfs (kprobes/blacklist). 369 */ 370 int __init arch_populate_kprobe_blacklist(void) 371 { 372 return kprobe_add_area_blacklist((unsigned long)__irqentry_text_start, 373 (unsigned long)__irqentry_text_end); 374 } 375 376 int __init arch_init_kprobes(void) 377 { 378 return 0; 379 } 380 381 /* ASM function that handles the kretprobes must not be probed */ 382 NOKPROBE_SYMBOL(__kretprobe_trampoline); 383 384 /* Called from __kretprobe_trampoline */ 385 void __used *trampoline_probe_handler(struct pt_regs *regs) 386 { 387 return (void *)kretprobe_trampoline_handler(regs, NULL); 388 } 389 NOKPROBE_SYMBOL(trampoline_probe_handler); 390 391 void arch_prepare_kretprobe(struct kretprobe_instance *ri, 392 struct pt_regs *regs) 393 { 394 ri->ret_addr = (kprobe_opcode_t *)regs->regs[1]; 395 ri->fp = NULL; 396 397 /* Replace the return addr with trampoline addr */ 398 regs->regs[1] = (unsigned long)&__kretprobe_trampoline; 399 } 400 NOKPROBE_SYMBOL(arch_prepare_kretprobe); 401 402 int arch_trampoline_kprobe(struct kprobe *p) 403 { 404 return 0; 405 } 406 NOKPROBE_SYMBOL(arch_trampoline_kprobe); 407