1 /* 2 * arch/arm64/kernel/probes/kprobes.c 3 * 4 * Kprobes support for ARM64 5 * 6 * Copyright (C) 2013 Linaro Limited. 7 * Author: Sandeepa Prabhu <sandeepa.prabhu@linaro.org> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 */ 19 #include <linux/kasan.h> 20 #include <linux/kernel.h> 21 #include <linux/kprobes.h> 22 #include <linux/extable.h> 23 #include <linux/slab.h> 24 #include <linux/stop_machine.h> 25 #include <linux/sched/debug.h> 26 #include <linux/set_memory.h> 27 #include <linux/stringify.h> 28 #include <linux/vmalloc.h> 29 #include <asm/traps.h> 30 #include <asm/ptrace.h> 31 #include <asm/cacheflush.h> 32 #include <asm/debug-monitors.h> 33 #include <asm/system_misc.h> 34 #include <asm/insn.h> 35 #include <linux/uaccess.h> 36 #include <asm/irq.h> 37 #include <asm/sections.h> 38 39 #include "decode-insn.h" 40 41 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; 42 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 43 44 static void __kprobes 45 post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *); 46 47 static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode) 48 { 49 void *addrs[1]; 50 u32 insns[1]; 51 52 addrs[0] = addr; 53 insns[0] = opcode; 54 55 return aarch64_insn_patch_text(addrs, insns, 1); 56 } 57 58 static void __kprobes arch_prepare_ss_slot(struct kprobe *p) 59 { 60 /* prepare insn slot */ 61 patch_text(p->ainsn.api.insn, p->opcode); 62 63 flush_icache_range((uintptr_t) (p->ainsn.api.insn), 64 (uintptr_t) (p->ainsn.api.insn) + 65 MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); 66 67 /* 68 * Needs restoring of return address after stepping xol. 69 */ 70 p->ainsn.api.restore = (unsigned long) p->addr + 71 sizeof(kprobe_opcode_t); 72 } 73 74 static void __kprobes arch_prepare_simulate(struct kprobe *p) 75 { 76 /* This instructions is not executed xol. No need to adjust the PC */ 77 p->ainsn.api.restore = 0; 78 } 79 80 static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs) 81 { 82 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 83 84 if (p->ainsn.api.handler) 85 p->ainsn.api.handler((u32)p->opcode, (long)p->addr, regs); 86 87 /* single step simulated, now go for post processing */ 88 post_kprobe_handler(kcb, regs); 89 } 90 91 int __kprobes arch_prepare_kprobe(struct kprobe *p) 92 { 93 unsigned long probe_addr = (unsigned long)p->addr; 94 95 if (probe_addr & 0x3) 96 return -EINVAL; 97 98 /* copy instruction */ 99 p->opcode = le32_to_cpu(*p->addr); 100 101 if (search_exception_tables(probe_addr)) 102 return -EINVAL; 103 104 /* decode instruction */ 105 switch (arm_kprobe_decode_insn(p->addr, &p->ainsn)) { 106 case INSN_REJECTED: /* insn not supported */ 107 return -EINVAL; 108 109 case INSN_GOOD_NO_SLOT: /* insn need simulation */ 110 p->ainsn.api.insn = NULL; 111 break; 112 113 case INSN_GOOD: /* instruction uses slot */ 114 p->ainsn.api.insn = get_insn_slot(); 115 if (!p->ainsn.api.insn) 116 return -ENOMEM; 117 break; 118 } 119 120 /* prepare the instruction */ 121 if (p->ainsn.api.insn) 122 arch_prepare_ss_slot(p); 123 else 124 arch_prepare_simulate(p); 125 126 return 0; 127 } 128 129 void *alloc_insn_page(void) 130 { 131 void *page; 132 133 page = vmalloc_exec(PAGE_SIZE); 134 if (page) 135 set_memory_ro((unsigned long)page, 1); 136 137 return page; 138 } 139 140 /* arm kprobe: install breakpoint in text */ 141 void __kprobes arch_arm_kprobe(struct kprobe *p) 142 { 143 patch_text(p->addr, BRK64_OPCODE_KPROBES); 144 } 145 146 /* disarm kprobe: remove breakpoint from text */ 147 void __kprobes arch_disarm_kprobe(struct kprobe *p) 148 { 149 patch_text(p->addr, p->opcode); 150 } 151 152 void __kprobes arch_remove_kprobe(struct kprobe *p) 153 { 154 if (p->ainsn.api.insn) { 155 free_insn_slot(p->ainsn.api.insn, 0); 156 p->ainsn.api.insn = NULL; 157 } 158 } 159 160 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) 161 { 162 kcb->prev_kprobe.kp = kprobe_running(); 163 kcb->prev_kprobe.status = kcb->kprobe_status; 164 } 165 166 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) 167 { 168 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); 169 kcb->kprobe_status = kcb->prev_kprobe.status; 170 } 171 172 static void __kprobes set_current_kprobe(struct kprobe *p) 173 { 174 __this_cpu_write(current_kprobe, p); 175 } 176 177 /* 178 * When PSTATE.D is set (masked), then software step exceptions can not be 179 * generated. 180 * SPSR's D bit shows the value of PSTATE.D immediately before the 181 * exception was taken. PSTATE.D is set while entering into any exception 182 * mode, however software clears it for any normal (none-debug-exception) 183 * mode in the exception entry. Therefore, when we are entering into kprobe 184 * breakpoint handler from any normal mode then SPSR.D bit is already 185 * cleared, however it is set when we are entering from any debug exception 186 * mode. 187 * Since we always need to generate single step exception after a kprobe 188 * breakpoint exception therefore we need to clear it unconditionally, when 189 * we become sure that the current breakpoint exception is for kprobe. 190 */ 191 static void __kprobes 192 spsr_set_debug_flag(struct pt_regs *regs, int mask) 193 { 194 unsigned long spsr = regs->pstate; 195 196 if (mask) 197 spsr |= PSR_D_BIT; 198 else 199 spsr &= ~PSR_D_BIT; 200 201 regs->pstate = spsr; 202 } 203 204 /* 205 * Interrupts need to be disabled before single-step mode is set, and not 206 * reenabled until after single-step mode ends. 207 * Without disabling interrupt on local CPU, there is a chance of 208 * interrupt occurrence in the period of exception return and start of 209 * out-of-line single-step, that result in wrongly single stepping 210 * into the interrupt handler. 211 */ 212 static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb, 213 struct pt_regs *regs) 214 { 215 kcb->saved_irqflag = regs->pstate; 216 regs->pstate |= PSR_I_BIT; 217 } 218 219 static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb, 220 struct pt_regs *regs) 221 { 222 if (kcb->saved_irqflag & PSR_I_BIT) 223 regs->pstate |= PSR_I_BIT; 224 else 225 regs->pstate &= ~PSR_I_BIT; 226 } 227 228 static void __kprobes 229 set_ss_context(struct kprobe_ctlblk *kcb, unsigned long addr) 230 { 231 kcb->ss_ctx.ss_pending = true; 232 kcb->ss_ctx.match_addr = addr + sizeof(kprobe_opcode_t); 233 } 234 235 static void __kprobes clear_ss_context(struct kprobe_ctlblk *kcb) 236 { 237 kcb->ss_ctx.ss_pending = false; 238 kcb->ss_ctx.match_addr = 0; 239 } 240 241 static void __kprobes setup_singlestep(struct kprobe *p, 242 struct pt_regs *regs, 243 struct kprobe_ctlblk *kcb, int reenter) 244 { 245 unsigned long slot; 246 247 if (reenter) { 248 save_previous_kprobe(kcb); 249 set_current_kprobe(p); 250 kcb->kprobe_status = KPROBE_REENTER; 251 } else { 252 kcb->kprobe_status = KPROBE_HIT_SS; 253 } 254 255 256 if (p->ainsn.api.insn) { 257 /* prepare for single stepping */ 258 slot = (unsigned long)p->ainsn.api.insn; 259 260 set_ss_context(kcb, slot); /* mark pending ss */ 261 262 spsr_set_debug_flag(regs, 0); 263 264 /* IRQs and single stepping do not mix well. */ 265 kprobes_save_local_irqflag(kcb, regs); 266 kernel_enable_single_step(regs); 267 instruction_pointer_set(regs, slot); 268 } else { 269 /* insn simulation */ 270 arch_simulate_insn(p, regs); 271 } 272 } 273 274 static int __kprobes reenter_kprobe(struct kprobe *p, 275 struct pt_regs *regs, 276 struct kprobe_ctlblk *kcb) 277 { 278 switch (kcb->kprobe_status) { 279 case KPROBE_HIT_SSDONE: 280 case KPROBE_HIT_ACTIVE: 281 kprobes_inc_nmissed_count(p); 282 setup_singlestep(p, regs, kcb, 1); 283 break; 284 case KPROBE_HIT_SS: 285 case KPROBE_REENTER: 286 pr_warn("Unrecoverable kprobe detected.\n"); 287 dump_kprobe(p); 288 BUG(); 289 break; 290 default: 291 WARN_ON(1); 292 return 0; 293 } 294 295 return 1; 296 } 297 298 static void __kprobes 299 post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs) 300 { 301 struct kprobe *cur = kprobe_running(); 302 303 if (!cur) 304 return; 305 306 /* return addr restore if non-branching insn */ 307 if (cur->ainsn.api.restore != 0) 308 instruction_pointer_set(regs, cur->ainsn.api.restore); 309 310 /* restore back original saved kprobe variables and continue */ 311 if (kcb->kprobe_status == KPROBE_REENTER) { 312 restore_previous_kprobe(kcb); 313 return; 314 } 315 /* call post handler */ 316 kcb->kprobe_status = KPROBE_HIT_SSDONE; 317 if (cur->post_handler) { 318 /* post_handler can hit breakpoint and single step 319 * again, so we enable D-flag for recursive exception. 320 */ 321 cur->post_handler(cur, regs, 0); 322 } 323 324 reset_current_kprobe(); 325 } 326 327 int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr) 328 { 329 struct kprobe *cur = kprobe_running(); 330 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 331 332 switch (kcb->kprobe_status) { 333 case KPROBE_HIT_SS: 334 case KPROBE_REENTER: 335 /* 336 * We are here because the instruction being single 337 * stepped caused a page fault. We reset the current 338 * kprobe and the ip points back to the probe address 339 * and allow the page fault handler to continue as a 340 * normal page fault. 341 */ 342 instruction_pointer_set(regs, (unsigned long) cur->addr); 343 if (!instruction_pointer(regs)) 344 BUG(); 345 346 kernel_disable_single_step(); 347 348 if (kcb->kprobe_status == KPROBE_REENTER) 349 restore_previous_kprobe(kcb); 350 else 351 reset_current_kprobe(); 352 353 break; 354 case KPROBE_HIT_ACTIVE: 355 case KPROBE_HIT_SSDONE: 356 /* 357 * We increment the nmissed count for accounting, 358 * we can also use npre/npostfault count for accounting 359 * these specific fault cases. 360 */ 361 kprobes_inc_nmissed_count(cur); 362 363 /* 364 * We come here because instructions in the pre/post 365 * handler caused the page_fault, this could happen 366 * if handler tries to access user space by 367 * copy_from_user(), get_user() etc. Let the 368 * user-specified handler try to fix it first. 369 */ 370 if (cur->fault_handler && cur->fault_handler(cur, regs, fsr)) 371 return 1; 372 373 /* 374 * In case the user-specified fault handler returned 375 * zero, try to fix up. 376 */ 377 if (fixup_exception(regs)) 378 return 1; 379 } 380 return 0; 381 } 382 383 static void __kprobes kprobe_handler(struct pt_regs *regs) 384 { 385 struct kprobe *p, *cur_kprobe; 386 struct kprobe_ctlblk *kcb; 387 unsigned long addr = instruction_pointer(regs); 388 389 kcb = get_kprobe_ctlblk(); 390 cur_kprobe = kprobe_running(); 391 392 p = get_kprobe((kprobe_opcode_t *) addr); 393 394 if (p) { 395 if (cur_kprobe) { 396 if (reenter_kprobe(p, regs, kcb)) 397 return; 398 } else { 399 /* Probe hit */ 400 set_current_kprobe(p); 401 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 402 403 /* 404 * If we have no pre-handler or it returned 0, we 405 * continue with normal processing. If we have a 406 * pre-handler and it returned non-zero, it will 407 * modify the execution path and no need to single 408 * stepping. Let's just reset current kprobe and exit. 409 * 410 * pre_handler can hit a breakpoint and can step thru 411 * before return, keep PSTATE D-flag enabled until 412 * pre_handler return back. 413 */ 414 if (!p->pre_handler || !p->pre_handler(p, regs)) { 415 setup_singlestep(p, regs, kcb, 0); 416 } else 417 reset_current_kprobe(); 418 } 419 } 420 /* 421 * The breakpoint instruction was removed right 422 * after we hit it. Another cpu has removed 423 * either a probepoint or a debugger breakpoint 424 * at this address. In either case, no further 425 * handling of this interrupt is appropriate. 426 * Return back to original instruction, and continue. 427 */ 428 } 429 430 static int __kprobes 431 kprobe_ss_hit(struct kprobe_ctlblk *kcb, unsigned long addr) 432 { 433 if ((kcb->ss_ctx.ss_pending) 434 && (kcb->ss_ctx.match_addr == addr)) { 435 clear_ss_context(kcb); /* clear pending ss */ 436 return DBG_HOOK_HANDLED; 437 } 438 /* not ours, kprobes should ignore it */ 439 return DBG_HOOK_ERROR; 440 } 441 442 static int __kprobes 443 kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr) 444 { 445 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 446 int retval; 447 448 /* return error if this is not our step */ 449 retval = kprobe_ss_hit(kcb, instruction_pointer(regs)); 450 451 if (retval == DBG_HOOK_HANDLED) { 452 kprobes_restore_local_irqflag(kcb, regs); 453 kernel_disable_single_step(); 454 455 post_kprobe_handler(kcb, regs); 456 } 457 458 return retval; 459 } 460 461 static struct step_hook kprobes_step_hook = { 462 .fn = kprobe_single_step_handler, 463 }; 464 465 static int __kprobes 466 kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr) 467 { 468 kprobe_handler(regs); 469 return DBG_HOOK_HANDLED; 470 } 471 472 static struct break_hook kprobes_break_hook = { 473 .imm = KPROBES_BRK_IMM, 474 .fn = kprobe_breakpoint_handler, 475 }; 476 477 /* 478 * Provide a blacklist of symbols identifying ranges which cannot be kprobed. 479 * This blacklist is exposed to userspace via debugfs (kprobes/blacklist). 480 */ 481 int __init arch_populate_kprobe_blacklist(void) 482 { 483 int ret; 484 485 ret = kprobe_add_area_blacklist((unsigned long)__entry_text_start, 486 (unsigned long)__entry_text_end); 487 if (ret) 488 return ret; 489 ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start, 490 (unsigned long)__irqentry_text_end); 491 if (ret) 492 return ret; 493 ret = kprobe_add_area_blacklist((unsigned long)__exception_text_start, 494 (unsigned long)__exception_text_end); 495 if (ret) 496 return ret; 497 ret = kprobe_add_area_blacklist((unsigned long)__idmap_text_start, 498 (unsigned long)__idmap_text_end); 499 if (ret) 500 return ret; 501 ret = kprobe_add_area_blacklist((unsigned long)__hyp_text_start, 502 (unsigned long)__hyp_text_end); 503 if (ret || is_kernel_in_hyp_mode()) 504 return ret; 505 ret = kprobe_add_area_blacklist((unsigned long)__hyp_idmap_text_start, 506 (unsigned long)__hyp_idmap_text_end); 507 return ret; 508 } 509 510 void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs) 511 { 512 struct kretprobe_instance *ri = NULL; 513 struct hlist_head *head, empty_rp; 514 struct hlist_node *tmp; 515 unsigned long flags, orig_ret_address = 0; 516 unsigned long trampoline_address = 517 (unsigned long)&kretprobe_trampoline; 518 kprobe_opcode_t *correct_ret_addr = NULL; 519 520 INIT_HLIST_HEAD(&empty_rp); 521 kretprobe_hash_lock(current, &head, &flags); 522 523 /* 524 * It is possible to have multiple instances associated with a given 525 * task either because multiple functions in the call path have 526 * return probes installed on them, and/or more than one 527 * return probe was registered for a target function. 528 * 529 * We can handle this because: 530 * - instances are always pushed into the head of the list 531 * - when multiple return probes are registered for the same 532 * function, the (chronologically) first instance's ret_addr 533 * will be the real return address, and all the rest will 534 * point to kretprobe_trampoline. 535 */ 536 hlist_for_each_entry_safe(ri, tmp, head, hlist) { 537 if (ri->task != current) 538 /* another task is sharing our hash bucket */ 539 continue; 540 541 orig_ret_address = (unsigned long)ri->ret_addr; 542 543 if (orig_ret_address != trampoline_address) 544 /* 545 * This is the real return address. Any other 546 * instances associated with this task are for 547 * other calls deeper on the call stack 548 */ 549 break; 550 } 551 552 kretprobe_assert(ri, orig_ret_address, trampoline_address); 553 554 correct_ret_addr = ri->ret_addr; 555 hlist_for_each_entry_safe(ri, tmp, head, hlist) { 556 if (ri->task != current) 557 /* another task is sharing our hash bucket */ 558 continue; 559 560 orig_ret_address = (unsigned long)ri->ret_addr; 561 if (ri->rp && ri->rp->handler) { 562 __this_cpu_write(current_kprobe, &ri->rp->kp); 563 get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; 564 ri->ret_addr = correct_ret_addr; 565 ri->rp->handler(ri, regs); 566 __this_cpu_write(current_kprobe, NULL); 567 } 568 569 recycle_rp_inst(ri, &empty_rp); 570 571 if (orig_ret_address != trampoline_address) 572 /* 573 * This is the real return address. Any other 574 * instances associated with this task are for 575 * other calls deeper on the call stack 576 */ 577 break; 578 } 579 580 kretprobe_hash_unlock(current, &flags); 581 582 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { 583 hlist_del(&ri->hlist); 584 kfree(ri); 585 } 586 return (void *)orig_ret_address; 587 } 588 589 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, 590 struct pt_regs *regs) 591 { 592 ri->ret_addr = (kprobe_opcode_t *)regs->regs[30]; 593 594 /* replace return addr (x30) with trampoline */ 595 regs->regs[30] = (long)&kretprobe_trampoline; 596 } 597 598 int __kprobes arch_trampoline_kprobe(struct kprobe *p) 599 { 600 return 0; 601 } 602 603 int __init arch_init_kprobes(void) 604 { 605 register_kernel_break_hook(&kprobes_break_hook); 606 register_kernel_step_hook(&kprobes_step_hook); 607 608 return 0; 609 } 610