1 /* 2 * arch/arm/kernel/kprobes.c 3 * 4 * Kprobes on ARM 5 * 6 * Abhishek Sagar <sagar.abhishek@gmail.com> 7 * Copyright (C) 2006, 2007 Motorola Inc. 8 * 9 * Nicolas Pitre <nico@marvell.com> 10 * Copyright (C) 2007 Marvell Ltd. 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License version 2 as 14 * published by the Free Software Foundation. 15 * 16 * This program is distributed in the hope that it will be useful, 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * General Public License for more details. 20 */ 21 22 #include <linux/kernel.h> 23 #include <linux/kprobes.h> 24 #include <linux/module.h> 25 #include <linux/slab.h> 26 #include <linux/stop_machine.h> 27 #include <linux/sched/debug.h> 28 #include <linux/stringify.h> 29 #include <asm/traps.h> 30 #include <asm/opcodes.h> 31 #include <asm/cacheflush.h> 32 #include <linux/percpu.h> 33 #include <linux/bug.h> 34 #include <asm/patch.h> 35 #include <asm/sections.h> 36 37 #include "../decode-arm.h" 38 #include "../decode-thumb.h" 39 #include "core.h" 40 41 #define MIN_STACK_SIZE(addr) \ 42 min((unsigned long)MAX_STACK_SIZE, \ 43 (unsigned long)current_thread_info() + THREAD_START_SP - (addr)) 44 45 #define flush_insns(addr, size) \ 46 flush_icache_range((unsigned long)(addr), \ 47 (unsigned long)(addr) + \ 48 (size)) 49 50 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; 51 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 52 53 54 int __kprobes arch_prepare_kprobe(struct kprobe *p) 55 { 56 kprobe_opcode_t insn; 57 kprobe_opcode_t tmp_insn[MAX_INSN_SIZE]; 58 unsigned long addr = (unsigned long)p->addr; 59 bool thumb; 60 kprobe_decode_insn_t *decode_insn; 61 const union decode_action *actions; 62 int is; 63 const struct decode_checker **checkers; 64 65 #ifdef CONFIG_THUMB2_KERNEL 66 thumb = true; 67 addr &= ~1; /* Bit 0 would normally be set to indicate Thumb code */ 68 insn = __mem_to_opcode_thumb16(((u16 *)addr)[0]); 69 if (is_wide_instruction(insn)) { 70 u16 inst2 = __mem_to_opcode_thumb16(((u16 *)addr)[1]); 71 insn = __opcode_thumb32_compose(insn, inst2); 72 decode_insn = thumb32_probes_decode_insn; 73 actions = kprobes_t32_actions; 74 checkers = kprobes_t32_checkers; 75 } else { 76 decode_insn = thumb16_probes_decode_insn; 77 actions = kprobes_t16_actions; 78 checkers = kprobes_t16_checkers; 79 } 80 #else /* !CONFIG_THUMB2_KERNEL */ 81 thumb = false; 82 if (addr & 0x3) 83 return -EINVAL; 84 insn = __mem_to_opcode_arm(*p->addr); 85 decode_insn = arm_probes_decode_insn; 86 actions = kprobes_arm_actions; 87 checkers = kprobes_arm_checkers; 88 #endif 89 90 p->opcode = insn; 91 p->ainsn.insn = tmp_insn; 92 93 switch ((*decode_insn)(insn, &p->ainsn, true, actions, checkers)) { 94 case INSN_REJECTED: /* not supported */ 95 return -EINVAL; 96 97 case INSN_GOOD: /* instruction uses slot */ 98 p->ainsn.insn = get_insn_slot(); 99 if (!p->ainsn.insn) 100 return -ENOMEM; 101 for (is = 0; is < MAX_INSN_SIZE; ++is) 102 p->ainsn.insn[is] = tmp_insn[is]; 103 flush_insns(p->ainsn.insn, 104 sizeof(p->ainsn.insn[0]) * MAX_INSN_SIZE); 105 p->ainsn.insn_fn = (probes_insn_fn_t *) 106 ((uintptr_t)p->ainsn.insn | thumb); 107 break; 108 109 case INSN_GOOD_NO_SLOT: /* instruction doesn't need insn slot */ 110 p->ainsn.insn = NULL; 111 break; 112 } 113 114 /* 115 * Never instrument insn like 'str r0, [sp, +/-r1]'. Also, insn likes 116 * 'str r0, [sp, #-68]' should also be prohibited. 117 * See __und_svc. 118 */ 119 if ((p->ainsn.stack_space < 0) || 120 (p->ainsn.stack_space > MAX_STACK_SIZE)) 121 return -EINVAL; 122 123 return 0; 124 } 125 126 void __kprobes arch_arm_kprobe(struct kprobe *p) 127 { 128 unsigned int brkp; 129 void *addr; 130 131 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) { 132 /* Remove any Thumb flag */ 133 addr = (void *)((uintptr_t)p->addr & ~1); 134 135 if (is_wide_instruction(p->opcode)) 136 brkp = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION; 137 else 138 brkp = KPROBE_THUMB16_BREAKPOINT_INSTRUCTION; 139 } else { 140 kprobe_opcode_t insn = p->opcode; 141 142 addr = p->addr; 143 brkp = KPROBE_ARM_BREAKPOINT_INSTRUCTION; 144 145 if (insn >= 0xe0000000) 146 brkp |= 0xe0000000; /* Unconditional instruction */ 147 else 148 brkp |= insn & 0xf0000000; /* Copy condition from insn */ 149 } 150 151 patch_text(addr, brkp); 152 } 153 154 /* 155 * The actual disarming is done here on each CPU and synchronized using 156 * stop_machine. This synchronization is necessary on SMP to avoid removing 157 * a probe between the moment the 'Undefined Instruction' exception is raised 158 * and the moment the exception handler reads the faulting instruction from 159 * memory. It is also needed to atomically set the two half-words of a 32-bit 160 * Thumb breakpoint. 161 */ 162 struct patch { 163 void *addr; 164 unsigned int insn; 165 }; 166 167 static int __kprobes_remove_breakpoint(void *data) 168 { 169 struct patch *p = data; 170 __patch_text(p->addr, p->insn); 171 return 0; 172 } 173 174 void __kprobes kprobes_remove_breakpoint(void *addr, unsigned int insn) 175 { 176 struct patch p = { 177 .addr = addr, 178 .insn = insn, 179 }; 180 stop_machine_cpuslocked(__kprobes_remove_breakpoint, &p, 181 cpu_online_mask); 182 } 183 184 void __kprobes arch_disarm_kprobe(struct kprobe *p) 185 { 186 kprobes_remove_breakpoint((void *)((uintptr_t)p->addr & ~1), 187 p->opcode); 188 } 189 190 void __kprobes arch_remove_kprobe(struct kprobe *p) 191 { 192 if (p->ainsn.insn) { 193 free_insn_slot(p->ainsn.insn, 0); 194 p->ainsn.insn = NULL; 195 } 196 } 197 198 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) 199 { 200 kcb->prev_kprobe.kp = kprobe_running(); 201 kcb->prev_kprobe.status = kcb->kprobe_status; 202 } 203 204 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) 205 { 206 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); 207 kcb->kprobe_status = kcb->prev_kprobe.status; 208 } 209 210 static void __kprobes set_current_kprobe(struct kprobe *p) 211 { 212 __this_cpu_write(current_kprobe, p); 213 } 214 215 static void __kprobes 216 singlestep_skip(struct kprobe *p, struct pt_regs *regs) 217 { 218 #ifdef CONFIG_THUMB2_KERNEL 219 regs->ARM_cpsr = it_advance(regs->ARM_cpsr); 220 if (is_wide_instruction(p->opcode)) 221 regs->ARM_pc += 4; 222 else 223 regs->ARM_pc += 2; 224 #else 225 regs->ARM_pc += 4; 226 #endif 227 } 228 229 static inline void __kprobes 230 singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) 231 { 232 p->ainsn.insn_singlestep(p->opcode, &p->ainsn, regs); 233 } 234 235 /* 236 * Called with IRQs disabled. IRQs must remain disabled from that point 237 * all the way until processing this kprobe is complete. The current 238 * kprobes implementation cannot process more than one nested level of 239 * kprobe, and that level is reserved for user kprobe handlers, so we can't 240 * risk encountering a new kprobe in an interrupt handler. 241 */ 242 void __kprobes kprobe_handler(struct pt_regs *regs) 243 { 244 struct kprobe *p, *cur; 245 struct kprobe_ctlblk *kcb; 246 247 kcb = get_kprobe_ctlblk(); 248 cur = kprobe_running(); 249 250 #ifdef CONFIG_THUMB2_KERNEL 251 /* 252 * First look for a probe which was registered using an address with 253 * bit 0 set, this is the usual situation for pointers to Thumb code. 254 * If not found, fallback to looking for one with bit 0 clear. 255 */ 256 p = get_kprobe((kprobe_opcode_t *)(regs->ARM_pc | 1)); 257 if (!p) 258 p = get_kprobe((kprobe_opcode_t *)regs->ARM_pc); 259 260 #else /* ! CONFIG_THUMB2_KERNEL */ 261 p = get_kprobe((kprobe_opcode_t *)regs->ARM_pc); 262 #endif 263 264 if (p) { 265 if (!p->ainsn.insn_check_cc(regs->ARM_cpsr)) { 266 /* 267 * Probe hit but conditional execution check failed, 268 * so just skip the instruction and continue as if 269 * nothing had happened. 270 * In this case, we can skip recursing check too. 271 */ 272 singlestep_skip(p, regs); 273 } else if (cur) { 274 /* Kprobe is pending, so we're recursing. */ 275 switch (kcb->kprobe_status) { 276 case KPROBE_HIT_ACTIVE: 277 case KPROBE_HIT_SSDONE: 278 case KPROBE_HIT_SS: 279 /* A pre- or post-handler probe got us here. */ 280 kprobes_inc_nmissed_count(p); 281 save_previous_kprobe(kcb); 282 set_current_kprobe(p); 283 kcb->kprobe_status = KPROBE_REENTER; 284 singlestep(p, regs, kcb); 285 restore_previous_kprobe(kcb); 286 break; 287 case KPROBE_REENTER: 288 /* A nested probe was hit in FIQ, it is a BUG */ 289 pr_warn("Unrecoverable kprobe detected.\n"); 290 dump_kprobe(p); 291 /* fall through */ 292 default: 293 /* impossible cases */ 294 BUG(); 295 } 296 } else { 297 /* Probe hit and conditional execution check ok. */ 298 set_current_kprobe(p); 299 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 300 301 /* 302 * If we have no pre-handler or it returned 0, we 303 * continue with normal processing. If we have a 304 * pre-handler and it returned non-zero, it will 305 * modify the execution path and no need to single 306 * stepping. Let's just reset current kprobe and exit. 307 */ 308 if (!p->pre_handler || !p->pre_handler(p, regs)) { 309 kcb->kprobe_status = KPROBE_HIT_SS; 310 singlestep(p, regs, kcb); 311 if (p->post_handler) { 312 kcb->kprobe_status = KPROBE_HIT_SSDONE; 313 p->post_handler(p, regs, 0); 314 } 315 } 316 reset_current_kprobe(); 317 } 318 } else { 319 /* 320 * The probe was removed and a race is in progress. 321 * There is nothing we can do about it. Let's restart 322 * the instruction. By the time we can restart, the 323 * real instruction will be there. 324 */ 325 } 326 } 327 328 static int __kprobes kprobe_trap_handler(struct pt_regs *regs, unsigned int instr) 329 { 330 unsigned long flags; 331 local_irq_save(flags); 332 kprobe_handler(regs); 333 local_irq_restore(flags); 334 return 0; 335 } 336 337 int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr) 338 { 339 struct kprobe *cur = kprobe_running(); 340 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 341 342 switch (kcb->kprobe_status) { 343 case KPROBE_HIT_SS: 344 case KPROBE_REENTER: 345 /* 346 * We are here because the instruction being single 347 * stepped caused a page fault. We reset the current 348 * kprobe and the PC to point back to the probe address 349 * and allow the page fault handler to continue as a 350 * normal page fault. 351 */ 352 regs->ARM_pc = (long)cur->addr; 353 if (kcb->kprobe_status == KPROBE_REENTER) { 354 restore_previous_kprobe(kcb); 355 } else { 356 reset_current_kprobe(); 357 } 358 break; 359 360 case KPROBE_HIT_ACTIVE: 361 case KPROBE_HIT_SSDONE: 362 /* 363 * We increment the nmissed count for accounting, 364 * we can also use npre/npostfault count for accounting 365 * these specific fault cases. 366 */ 367 kprobes_inc_nmissed_count(cur); 368 369 /* 370 * We come here because instructions in the pre/post 371 * handler caused the page_fault, this could happen 372 * if handler tries to access user space by 373 * copy_from_user(), get_user() etc. Let the 374 * user-specified handler try to fix it. 375 */ 376 if (cur->fault_handler && cur->fault_handler(cur, regs, fsr)) 377 return 1; 378 break; 379 380 default: 381 break; 382 } 383 384 return 0; 385 } 386 387 int __kprobes kprobe_exceptions_notify(struct notifier_block *self, 388 unsigned long val, void *data) 389 { 390 /* 391 * notify_die() is currently never called on ARM, 392 * so this callback is currently empty. 393 */ 394 return NOTIFY_DONE; 395 } 396 397 /* 398 * When a retprobed function returns, trampoline_handler() is called, 399 * calling the kretprobe's handler. We construct a struct pt_regs to 400 * give a view of registers r0-r11 to the user return-handler. This is 401 * not a complete pt_regs structure, but that should be plenty sufficient 402 * for kretprobe handlers which should normally be interested in r0 only 403 * anyway. 404 */ 405 void __naked __kprobes kretprobe_trampoline(void) 406 { 407 __asm__ __volatile__ ( 408 "stmdb sp!, {r0 - r11} \n\t" 409 "mov r0, sp \n\t" 410 "bl trampoline_handler \n\t" 411 "mov lr, r0 \n\t" 412 "ldmia sp!, {r0 - r11} \n\t" 413 #ifdef CONFIG_THUMB2_KERNEL 414 "bx lr \n\t" 415 #else 416 "mov pc, lr \n\t" 417 #endif 418 : : : "memory"); 419 } 420 421 /* Called from kretprobe_trampoline */ 422 static __used __kprobes void *trampoline_handler(struct pt_regs *regs) 423 { 424 struct kretprobe_instance *ri = NULL; 425 struct hlist_head *head, empty_rp; 426 struct hlist_node *tmp; 427 unsigned long flags, orig_ret_address = 0; 428 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; 429 kprobe_opcode_t *correct_ret_addr = NULL; 430 431 INIT_HLIST_HEAD(&empty_rp); 432 kretprobe_hash_lock(current, &head, &flags); 433 434 /* 435 * It is possible to have multiple instances associated with a given 436 * task either because multiple functions in the call path have 437 * a return probe installed on them, and/or more than one return 438 * probe was registered for a target function. 439 * 440 * We can handle this because: 441 * - instances are always inserted at the head of the list 442 * - when multiple return probes are registered for the same 443 * function, the first instance's ret_addr will point to the 444 * real return address, and all the rest will point to 445 * kretprobe_trampoline 446 */ 447 hlist_for_each_entry_safe(ri, tmp, head, hlist) { 448 if (ri->task != current) 449 /* another task is sharing our hash bucket */ 450 continue; 451 452 orig_ret_address = (unsigned long)ri->ret_addr; 453 454 if (orig_ret_address != trampoline_address) 455 /* 456 * This is the real return address. Any other 457 * instances associated with this task are for 458 * other calls deeper on the call stack 459 */ 460 break; 461 } 462 463 kretprobe_assert(ri, orig_ret_address, trampoline_address); 464 465 correct_ret_addr = ri->ret_addr; 466 hlist_for_each_entry_safe(ri, tmp, head, hlist) { 467 if (ri->task != current) 468 /* another task is sharing our hash bucket */ 469 continue; 470 471 orig_ret_address = (unsigned long)ri->ret_addr; 472 if (ri->rp && ri->rp->handler) { 473 __this_cpu_write(current_kprobe, &ri->rp->kp); 474 get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; 475 ri->ret_addr = correct_ret_addr; 476 ri->rp->handler(ri, regs); 477 __this_cpu_write(current_kprobe, NULL); 478 } 479 480 recycle_rp_inst(ri, &empty_rp); 481 482 if (orig_ret_address != trampoline_address) 483 /* 484 * This is the real return address. Any other 485 * instances associated with this task are for 486 * other calls deeper on the call stack 487 */ 488 break; 489 } 490 491 kretprobe_hash_unlock(current, &flags); 492 493 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { 494 hlist_del(&ri->hlist); 495 kfree(ri); 496 } 497 498 return (void *)orig_ret_address; 499 } 500 501 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, 502 struct pt_regs *regs) 503 { 504 ri->ret_addr = (kprobe_opcode_t *)regs->ARM_lr; 505 506 /* Replace the return addr with trampoline addr. */ 507 regs->ARM_lr = (unsigned long)&kretprobe_trampoline; 508 } 509 510 int __kprobes arch_trampoline_kprobe(struct kprobe *p) 511 { 512 return 0; 513 } 514 515 #ifdef CONFIG_THUMB2_KERNEL 516 517 static struct undef_hook kprobes_thumb16_break_hook = { 518 .instr_mask = 0xffff, 519 .instr_val = KPROBE_THUMB16_BREAKPOINT_INSTRUCTION, 520 .cpsr_mask = MODE_MASK, 521 .cpsr_val = SVC_MODE, 522 .fn = kprobe_trap_handler, 523 }; 524 525 static struct undef_hook kprobes_thumb32_break_hook = { 526 .instr_mask = 0xffffffff, 527 .instr_val = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION, 528 .cpsr_mask = MODE_MASK, 529 .cpsr_val = SVC_MODE, 530 .fn = kprobe_trap_handler, 531 }; 532 533 #else /* !CONFIG_THUMB2_KERNEL */ 534 535 static struct undef_hook kprobes_arm_break_hook = { 536 .instr_mask = 0x0fffffff, 537 .instr_val = KPROBE_ARM_BREAKPOINT_INSTRUCTION, 538 .cpsr_mask = MODE_MASK, 539 .cpsr_val = SVC_MODE, 540 .fn = kprobe_trap_handler, 541 }; 542 543 #endif /* !CONFIG_THUMB2_KERNEL */ 544 545 int __init arch_init_kprobes() 546 { 547 arm_probes_decode_init(); 548 #ifdef CONFIG_THUMB2_KERNEL 549 register_undef_hook(&kprobes_thumb16_break_hook); 550 register_undef_hook(&kprobes_thumb32_break_hook); 551 #else 552 register_undef_hook(&kprobes_arm_break_hook); 553 #endif 554 return 0; 555 } 556 557 bool arch_within_kprobe_blacklist(unsigned long addr) 558 { 559 void *a = (void *)addr; 560 561 return __in_irqentry_text(addr) || 562 in_entry_text(addr) || 563 in_idmap_text(addr) || 564 memory_contains(__kprobes_text_start, __kprobes_text_end, a, 1); 565 } 566