1 /* 2 * Kernel Probes (KProbes) 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 * 18 * Copyright IBM Corp. 2002, 2006 19 * 20 * s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com> 21 */ 22 23 #include <linux/kprobes.h> 24 #include <linux/ptrace.h> 25 #include <linux/preempt.h> 26 #include <linux/stop_machine.h> 27 #include <linux/kdebug.h> 28 #include <linux/uaccess.h> 29 #include <linux/extable.h> 30 #include <linux/module.h> 31 #include <linux/slab.h> 32 #include <linux/hardirq.h> 33 #include <linux/ftrace.h> 34 #include <asm/cacheflush.h> 35 #include <asm/sections.h> 36 #include <linux/uaccess.h> 37 #include <asm/dis.h> 38 39 DEFINE_PER_CPU(struct kprobe *, current_kprobe); 40 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 41 42 struct kretprobe_blackpoint kretprobe_blacklist[] = { }; 43 44 DEFINE_INSN_CACHE_OPS(dmainsn); 45 46 static void *alloc_dmainsn_page(void) 47 { 48 return (void *)__get_free_page(GFP_KERNEL | GFP_DMA); 49 } 50 51 static void free_dmainsn_page(void *page) 52 { 53 free_page((unsigned long)page); 54 } 55 56 struct kprobe_insn_cache kprobe_dmainsn_slots = { 57 .mutex = __MUTEX_INITIALIZER(kprobe_dmainsn_slots.mutex), 58 .alloc = alloc_dmainsn_page, 59 .free = free_dmainsn_page, 60 .pages = LIST_HEAD_INIT(kprobe_dmainsn_slots.pages), 61 .insn_size = MAX_INSN_SIZE, 62 }; 63 64 static void copy_instruction(struct kprobe *p) 65 { 66 unsigned long ip = (unsigned long) p->addr; 67 s64 disp, new_disp; 68 u64 addr, new_addr; 69 70 if (ftrace_location(ip) == ip) { 71 /* 72 * If kprobes patches the instruction that is morphed by 73 * ftrace make sure that kprobes always sees the branch 74 * "jg .+24" that skips the mcount block or the "brcl 0,0" 75 * in case of hotpatch. 76 */ 77 ftrace_generate_nop_insn((struct ftrace_insn *)p->ainsn.insn); 78 p->ainsn.is_ftrace_insn = 1; 79 } else 80 memcpy(p->ainsn.insn, p->addr, insn_length(*p->addr >> 8)); 81 p->opcode = p->ainsn.insn[0]; 82 if (!probe_is_insn_relative_long(p->ainsn.insn)) 83 return; 84 /* 85 * For pc-relative instructions in RIL-b or RIL-c format patch the 86 * RI2 displacement field. We have already made sure that the insn 87 * slot for the patched instruction is within the same 2GB area 88 * as the original instruction (either kernel image or module area). 89 * Therefore the new displacement will always fit. 90 */ 91 disp = *(s32 *)&p->ainsn.insn[1]; 92 addr = (u64)(unsigned long)p->addr; 93 new_addr = (u64)(unsigned long)p->ainsn.insn; 94 new_disp = ((addr + (disp * 2)) - new_addr) / 2; 95 *(s32 *)&p->ainsn.insn[1] = new_disp; 96 } 97 NOKPROBE_SYMBOL(copy_instruction); 98 99 static inline int is_kernel_addr(void *addr) 100 { 101 return addr < (void *)_end; 102 } 103 104 static int s390_get_insn_slot(struct kprobe *p) 105 { 106 /* 107 * Get an insn slot that is within the same 2GB area like the original 108 * instruction. That way instructions with a 32bit signed displacement 109 * field can be patched and executed within the insn slot. 110 */ 111 p->ainsn.insn = NULL; 112 if (is_kernel_addr(p->addr)) 113 p->ainsn.insn = get_dmainsn_slot(); 114 else if (is_module_addr(p->addr)) 115 p->ainsn.insn = get_insn_slot(); 116 return p->ainsn.insn ? 0 : -ENOMEM; 117 } 118 NOKPROBE_SYMBOL(s390_get_insn_slot); 119 120 static void s390_free_insn_slot(struct kprobe *p) 121 { 122 if (!p->ainsn.insn) 123 return; 124 if (is_kernel_addr(p->addr)) 125 free_dmainsn_slot(p->ainsn.insn, 0); 126 else 127 free_insn_slot(p->ainsn.insn, 0); 128 p->ainsn.insn = NULL; 129 } 130 NOKPROBE_SYMBOL(s390_free_insn_slot); 131 132 int arch_prepare_kprobe(struct kprobe *p) 133 { 134 if ((unsigned long) p->addr & 0x01) 135 return -EINVAL; 136 /* Make sure the probe isn't going on a difficult instruction */ 137 if (probe_is_prohibited_opcode(p->addr)) 138 return -EINVAL; 139 if (s390_get_insn_slot(p)) 140 return -ENOMEM; 141 copy_instruction(p); 142 return 0; 143 } 144 NOKPROBE_SYMBOL(arch_prepare_kprobe); 145 146 int arch_check_ftrace_location(struct kprobe *p) 147 { 148 return 0; 149 } 150 151 struct swap_insn_args { 152 struct kprobe *p; 153 unsigned int arm_kprobe : 1; 154 }; 155 156 static int swap_instruction(void *data) 157 { 158 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 159 unsigned long status = kcb->kprobe_status; 160 struct swap_insn_args *args = data; 161 struct ftrace_insn new_insn, *insn; 162 struct kprobe *p = args->p; 163 size_t len; 164 165 new_insn.opc = args->arm_kprobe ? BREAKPOINT_INSTRUCTION : p->opcode; 166 len = sizeof(new_insn.opc); 167 if (!p->ainsn.is_ftrace_insn) 168 goto skip_ftrace; 169 len = sizeof(new_insn); 170 insn = (struct ftrace_insn *) p->addr; 171 if (args->arm_kprobe) { 172 if (is_ftrace_nop(insn)) 173 new_insn.disp = KPROBE_ON_FTRACE_NOP; 174 else 175 new_insn.disp = KPROBE_ON_FTRACE_CALL; 176 } else { 177 ftrace_generate_call_insn(&new_insn, (unsigned long)p->addr); 178 if (insn->disp == KPROBE_ON_FTRACE_NOP) 179 ftrace_generate_nop_insn(&new_insn); 180 } 181 skip_ftrace: 182 kcb->kprobe_status = KPROBE_SWAP_INST; 183 s390_kernel_write(p->addr, &new_insn, len); 184 kcb->kprobe_status = status; 185 return 0; 186 } 187 NOKPROBE_SYMBOL(swap_instruction); 188 189 void arch_arm_kprobe(struct kprobe *p) 190 { 191 struct swap_insn_args args = {.p = p, .arm_kprobe = 1}; 192 193 stop_machine(swap_instruction, &args, NULL); 194 } 195 NOKPROBE_SYMBOL(arch_arm_kprobe); 196 197 void arch_disarm_kprobe(struct kprobe *p) 198 { 199 struct swap_insn_args args = {.p = p, .arm_kprobe = 0}; 200 201 stop_machine(swap_instruction, &args, NULL); 202 } 203 NOKPROBE_SYMBOL(arch_disarm_kprobe); 204 205 void arch_remove_kprobe(struct kprobe *p) 206 { 207 s390_free_insn_slot(p); 208 } 209 NOKPROBE_SYMBOL(arch_remove_kprobe); 210 211 static void enable_singlestep(struct kprobe_ctlblk *kcb, 212 struct pt_regs *regs, 213 unsigned long ip) 214 { 215 struct per_regs per_kprobe; 216 217 /* Set up the PER control registers %cr9-%cr11 */ 218 per_kprobe.control = PER_EVENT_IFETCH; 219 per_kprobe.start = ip; 220 per_kprobe.end = ip; 221 222 /* Save control regs and psw mask */ 223 __ctl_store(kcb->kprobe_saved_ctl, 9, 11); 224 kcb->kprobe_saved_imask = regs->psw.mask & 225 (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT); 226 227 /* Set PER control regs, turns on single step for the given address */ 228 __ctl_load(per_kprobe, 9, 11); 229 regs->psw.mask |= PSW_MASK_PER; 230 regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT); 231 regs->psw.addr = ip; 232 } 233 NOKPROBE_SYMBOL(enable_singlestep); 234 235 static void disable_singlestep(struct kprobe_ctlblk *kcb, 236 struct pt_regs *regs, 237 unsigned long ip) 238 { 239 /* Restore control regs and psw mask, set new psw address */ 240 __ctl_load(kcb->kprobe_saved_ctl, 9, 11); 241 regs->psw.mask &= ~PSW_MASK_PER; 242 regs->psw.mask |= kcb->kprobe_saved_imask; 243 regs->psw.addr = ip; 244 } 245 NOKPROBE_SYMBOL(disable_singlestep); 246 247 /* 248 * Activate a kprobe by storing its pointer to current_kprobe. The 249 * previous kprobe is stored in kcb->prev_kprobe. A stack of up to 250 * two kprobes can be active, see KPROBE_REENTER. 251 */ 252 static void push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p) 253 { 254 kcb->prev_kprobe.kp = __this_cpu_read(current_kprobe); 255 kcb->prev_kprobe.status = kcb->kprobe_status; 256 __this_cpu_write(current_kprobe, p); 257 } 258 NOKPROBE_SYMBOL(push_kprobe); 259 260 /* 261 * Deactivate a kprobe by backing up to the previous state. If the 262 * current state is KPROBE_REENTER prev_kprobe.kp will be non-NULL, 263 * for any other state prev_kprobe.kp will be NULL. 264 */ 265 static void pop_kprobe(struct kprobe_ctlblk *kcb) 266 { 267 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); 268 kcb->kprobe_status = kcb->prev_kprobe.status; 269 } 270 NOKPROBE_SYMBOL(pop_kprobe); 271 272 void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) 273 { 274 ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14]; 275 276 /* Replace the return addr with trampoline addr */ 277 regs->gprs[14] = (unsigned long) &kretprobe_trampoline; 278 } 279 NOKPROBE_SYMBOL(arch_prepare_kretprobe); 280 281 static void kprobe_reenter_check(struct kprobe_ctlblk *kcb, struct kprobe *p) 282 { 283 switch (kcb->kprobe_status) { 284 case KPROBE_HIT_SSDONE: 285 case KPROBE_HIT_ACTIVE: 286 kprobes_inc_nmissed_count(p); 287 break; 288 case KPROBE_HIT_SS: 289 case KPROBE_REENTER: 290 default: 291 /* 292 * A kprobe on the code path to single step an instruction 293 * is a BUG. The code path resides in the .kprobes.text 294 * section and is executed with interrupts disabled. 295 */ 296 printk(KERN_EMERG "Invalid kprobe detected at %p.\n", p->addr); 297 dump_kprobe(p); 298 BUG(); 299 } 300 } 301 NOKPROBE_SYMBOL(kprobe_reenter_check); 302 303 static int kprobe_handler(struct pt_regs *regs) 304 { 305 struct kprobe_ctlblk *kcb; 306 struct kprobe *p; 307 308 /* 309 * We want to disable preemption for the entire duration of kprobe 310 * processing. That includes the calls to the pre/post handlers 311 * and single stepping the kprobe instruction. 312 */ 313 preempt_disable(); 314 kcb = get_kprobe_ctlblk(); 315 p = get_kprobe((void *)(regs->psw.addr - 2)); 316 317 if (p) { 318 if (kprobe_running()) { 319 /* 320 * We have hit a kprobe while another is still 321 * active. This can happen in the pre and post 322 * handler. Single step the instruction of the 323 * new probe but do not call any handler function 324 * of this secondary kprobe. 325 * push_kprobe and pop_kprobe saves and restores 326 * the currently active kprobe. 327 */ 328 kprobe_reenter_check(kcb, p); 329 push_kprobe(kcb, p); 330 kcb->kprobe_status = KPROBE_REENTER; 331 } else { 332 /* 333 * If we have no pre-handler or it returned 0, we 334 * continue with single stepping. If we have a 335 * pre-handler and it returned non-zero, it prepped 336 * for calling the break_handler below on re-entry 337 * for jprobe processing, so get out doing nothing 338 * more here. 339 */ 340 push_kprobe(kcb, p); 341 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 342 if (p->pre_handler && p->pre_handler(p, regs)) 343 return 1; 344 kcb->kprobe_status = KPROBE_HIT_SS; 345 } 346 enable_singlestep(kcb, regs, (unsigned long) p->ainsn.insn); 347 return 1; 348 } else if (kprobe_running()) { 349 p = __this_cpu_read(current_kprobe); 350 if (p->break_handler && p->break_handler(p, regs)) { 351 /* 352 * Continuation after the jprobe completed and 353 * caused the jprobe_return trap. The jprobe 354 * break_handler "returns" to the original 355 * function that still has the kprobe breakpoint 356 * installed. We continue with single stepping. 357 */ 358 kcb->kprobe_status = KPROBE_HIT_SS; 359 enable_singlestep(kcb, regs, 360 (unsigned long) p->ainsn.insn); 361 return 1; 362 } /* else: 363 * No kprobe at this address and the current kprobe 364 * has no break handler (no jprobe!). The kernel just 365 * exploded, let the standard trap handler pick up the 366 * pieces. 367 */ 368 } /* else: 369 * No kprobe at this address and no active kprobe. The trap has 370 * not been caused by a kprobe breakpoint. The race of breakpoint 371 * vs. kprobe remove does not exist because on s390 as we use 372 * stop_machine to arm/disarm the breakpoints. 373 */ 374 preempt_enable_no_resched(); 375 return 0; 376 } 377 NOKPROBE_SYMBOL(kprobe_handler); 378 379 /* 380 * Function return probe trampoline: 381 * - init_kprobes() establishes a probepoint here 382 * - When the probed function returns, this probe 383 * causes the handlers to fire 384 */ 385 static void __used kretprobe_trampoline_holder(void) 386 { 387 asm volatile(".global kretprobe_trampoline\n" 388 "kretprobe_trampoline: bcr 0,0\n"); 389 } 390 391 /* 392 * Called when the probe at kretprobe trampoline is hit 393 */ 394 static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) 395 { 396 struct kretprobe_instance *ri; 397 struct hlist_head *head, empty_rp; 398 struct hlist_node *tmp; 399 unsigned long flags, orig_ret_address; 400 unsigned long trampoline_address; 401 kprobe_opcode_t *correct_ret_addr; 402 403 INIT_HLIST_HEAD(&empty_rp); 404 kretprobe_hash_lock(current, &head, &flags); 405 406 /* 407 * It is possible to have multiple instances associated with a given 408 * task either because an multiple functions in the call path 409 * have a return probe installed on them, and/or more than one return 410 * return probe was registered for a target function. 411 * 412 * We can handle this because: 413 * - instances are always inserted at the head of the list 414 * - when multiple return probes are registered for the same 415 * function, the first instance's ret_addr will point to the 416 * real return address, and all the rest will point to 417 * kretprobe_trampoline 418 */ 419 ri = NULL; 420 orig_ret_address = 0; 421 correct_ret_addr = NULL; 422 trampoline_address = (unsigned long) &kretprobe_trampoline; 423 hlist_for_each_entry_safe(ri, tmp, head, hlist) { 424 if (ri->task != current) 425 /* another task is sharing our hash bucket */ 426 continue; 427 428 orig_ret_address = (unsigned long) ri->ret_addr; 429 430 if (orig_ret_address != trampoline_address) 431 /* 432 * This is the real return address. Any other 433 * instances associated with this task are for 434 * other calls deeper on the call stack 435 */ 436 break; 437 } 438 439 kretprobe_assert(ri, orig_ret_address, trampoline_address); 440 441 correct_ret_addr = ri->ret_addr; 442 hlist_for_each_entry_safe(ri, tmp, head, hlist) { 443 if (ri->task != current) 444 /* another task is sharing our hash bucket */ 445 continue; 446 447 orig_ret_address = (unsigned long) ri->ret_addr; 448 449 if (ri->rp && ri->rp->handler) { 450 ri->ret_addr = correct_ret_addr; 451 ri->rp->handler(ri, regs); 452 } 453 454 recycle_rp_inst(ri, &empty_rp); 455 456 if (orig_ret_address != trampoline_address) 457 /* 458 * This is the real return address. Any other 459 * instances associated with this task are for 460 * other calls deeper on the call stack 461 */ 462 break; 463 } 464 465 regs->psw.addr = orig_ret_address; 466 467 pop_kprobe(get_kprobe_ctlblk()); 468 kretprobe_hash_unlock(current, &flags); 469 preempt_enable_no_resched(); 470 471 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { 472 hlist_del(&ri->hlist); 473 kfree(ri); 474 } 475 /* 476 * By returning a non-zero value, we are telling 477 * kprobe_handler() that we don't want the post_handler 478 * to run (and have re-enabled preemption) 479 */ 480 return 1; 481 } 482 NOKPROBE_SYMBOL(trampoline_probe_handler); 483 484 /* 485 * Called after single-stepping. p->addr is the address of the 486 * instruction whose first byte has been replaced by the "breakpoint" 487 * instruction. To avoid the SMP problems that can occur when we 488 * temporarily put back the original opcode to single-step, we 489 * single-stepped a copy of the instruction. The address of this 490 * copy is p->ainsn.insn. 491 */ 492 static void resume_execution(struct kprobe *p, struct pt_regs *regs) 493 { 494 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 495 unsigned long ip = regs->psw.addr; 496 int fixup = probe_get_fixup_type(p->ainsn.insn); 497 498 /* Check if the kprobes location is an enabled ftrace caller */ 499 if (p->ainsn.is_ftrace_insn) { 500 struct ftrace_insn *insn = (struct ftrace_insn *) p->addr; 501 struct ftrace_insn call_insn; 502 503 ftrace_generate_call_insn(&call_insn, (unsigned long) p->addr); 504 /* 505 * A kprobe on an enabled ftrace call site actually single 506 * stepped an unconditional branch (ftrace nop equivalent). 507 * Now we need to fixup things and pretend that a brasl r0,... 508 * was executed instead. 509 */ 510 if (insn->disp == KPROBE_ON_FTRACE_CALL) { 511 ip += call_insn.disp * 2 - MCOUNT_INSN_SIZE; 512 regs->gprs[0] = (unsigned long)p->addr + sizeof(*insn); 513 } 514 } 515 516 if (fixup & FIXUP_PSW_NORMAL) 517 ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn; 518 519 if (fixup & FIXUP_BRANCH_NOT_TAKEN) { 520 int ilen = insn_length(p->ainsn.insn[0] >> 8); 521 if (ip - (unsigned long) p->ainsn.insn == ilen) 522 ip = (unsigned long) p->addr + ilen; 523 } 524 525 if (fixup & FIXUP_RETURN_REGISTER) { 526 int reg = (p->ainsn.insn[0] & 0xf0) >> 4; 527 regs->gprs[reg] += (unsigned long) p->addr - 528 (unsigned long) p->ainsn.insn; 529 } 530 531 disable_singlestep(kcb, regs, ip); 532 } 533 NOKPROBE_SYMBOL(resume_execution); 534 535 static int post_kprobe_handler(struct pt_regs *regs) 536 { 537 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 538 struct kprobe *p = kprobe_running(); 539 540 if (!p) 541 return 0; 542 543 if (kcb->kprobe_status != KPROBE_REENTER && p->post_handler) { 544 kcb->kprobe_status = KPROBE_HIT_SSDONE; 545 p->post_handler(p, regs, 0); 546 } 547 548 resume_execution(p, regs); 549 pop_kprobe(kcb); 550 preempt_enable_no_resched(); 551 552 /* 553 * if somebody else is singlestepping across a probe point, psw mask 554 * will have PER set, in which case, continue the remaining processing 555 * of do_single_step, as if this is not a probe hit. 556 */ 557 if (regs->psw.mask & PSW_MASK_PER) 558 return 0; 559 560 return 1; 561 } 562 NOKPROBE_SYMBOL(post_kprobe_handler); 563 564 static int kprobe_trap_handler(struct pt_regs *regs, int trapnr) 565 { 566 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 567 struct kprobe *p = kprobe_running(); 568 const struct exception_table_entry *entry; 569 570 switch(kcb->kprobe_status) { 571 case KPROBE_SWAP_INST: 572 /* We are here because the instruction replacement failed */ 573 return 0; 574 case KPROBE_HIT_SS: 575 case KPROBE_REENTER: 576 /* 577 * We are here because the instruction being single 578 * stepped caused a page fault. We reset the current 579 * kprobe and the nip points back to the probe address 580 * and allow the page fault handler to continue as a 581 * normal page fault. 582 */ 583 disable_singlestep(kcb, regs, (unsigned long) p->addr); 584 pop_kprobe(kcb); 585 preempt_enable_no_resched(); 586 break; 587 case KPROBE_HIT_ACTIVE: 588 case KPROBE_HIT_SSDONE: 589 /* 590 * We increment the nmissed count for accounting, 591 * we can also use npre/npostfault count for accounting 592 * these specific fault cases. 593 */ 594 kprobes_inc_nmissed_count(p); 595 596 /* 597 * We come here because instructions in the pre/post 598 * handler caused the page_fault, this could happen 599 * if handler tries to access user space by 600 * copy_from_user(), get_user() etc. Let the 601 * user-specified handler try to fix it first. 602 */ 603 if (p->fault_handler && p->fault_handler(p, regs, trapnr)) 604 return 1; 605 606 /* 607 * In case the user-specified fault handler returned 608 * zero, try to fix up. 609 */ 610 entry = search_exception_tables(regs->psw.addr); 611 if (entry) { 612 regs->psw.addr = extable_fixup(entry); 613 return 1; 614 } 615 616 /* 617 * fixup_exception() could not handle it, 618 * Let do_page_fault() fix it. 619 */ 620 break; 621 default: 622 break; 623 } 624 return 0; 625 } 626 NOKPROBE_SYMBOL(kprobe_trap_handler); 627 628 int kprobe_fault_handler(struct pt_regs *regs, int trapnr) 629 { 630 int ret; 631 632 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) 633 local_irq_disable(); 634 ret = kprobe_trap_handler(regs, trapnr); 635 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) 636 local_irq_restore(regs->psw.mask & ~PSW_MASK_PER); 637 return ret; 638 } 639 NOKPROBE_SYMBOL(kprobe_fault_handler); 640 641 /* 642 * Wrapper routine to for handling exceptions. 643 */ 644 int kprobe_exceptions_notify(struct notifier_block *self, 645 unsigned long val, void *data) 646 { 647 struct die_args *args = (struct die_args *) data; 648 struct pt_regs *regs = args->regs; 649 int ret = NOTIFY_DONE; 650 651 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) 652 local_irq_disable(); 653 654 switch (val) { 655 case DIE_BPT: 656 if (kprobe_handler(regs)) 657 ret = NOTIFY_STOP; 658 break; 659 case DIE_SSTEP: 660 if (post_kprobe_handler(regs)) 661 ret = NOTIFY_STOP; 662 break; 663 case DIE_TRAP: 664 if (!preemptible() && kprobe_running() && 665 kprobe_trap_handler(regs, args->trapnr)) 666 ret = NOTIFY_STOP; 667 break; 668 default: 669 break; 670 } 671 672 if (regs->psw.mask & (PSW_MASK_IO | PSW_MASK_EXT)) 673 local_irq_restore(regs->psw.mask & ~PSW_MASK_PER); 674 675 return ret; 676 } 677 NOKPROBE_SYMBOL(kprobe_exceptions_notify); 678 679 int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) 680 { 681 struct jprobe *jp = container_of(p, struct jprobe, kp); 682 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 683 unsigned long stack; 684 685 memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs)); 686 687 /* setup return addr to the jprobe handler routine */ 688 regs->psw.addr = (unsigned long) jp->entry; 689 regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT); 690 691 /* r15 is the stack pointer */ 692 stack = (unsigned long) regs->gprs[15]; 693 694 memcpy(kcb->jprobes_stack, (void *) stack, MIN_STACK_SIZE(stack)); 695 696 /* 697 * jprobes use jprobe_return() which skips the normal return 698 * path of the function, and this messes up the accounting of the 699 * function graph tracer to get messed up. 700 * 701 * Pause function graph tracing while performing the jprobe function. 702 */ 703 pause_graph_tracing(); 704 return 1; 705 } 706 NOKPROBE_SYMBOL(setjmp_pre_handler); 707 708 void jprobe_return(void) 709 { 710 asm volatile(".word 0x0002"); 711 } 712 NOKPROBE_SYMBOL(jprobe_return); 713 714 int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) 715 { 716 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 717 unsigned long stack; 718 719 /* It's OK to start function graph tracing again */ 720 unpause_graph_tracing(); 721 722 stack = (unsigned long) kcb->jprobe_saved_regs.gprs[15]; 723 724 /* Put the regs back */ 725 memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs)); 726 /* put the stack back */ 727 memcpy((void *) stack, kcb->jprobes_stack, MIN_STACK_SIZE(stack)); 728 preempt_enable_no_resched(); 729 return 1; 730 } 731 NOKPROBE_SYMBOL(longjmp_break_handler); 732 733 static struct kprobe trampoline = { 734 .addr = (kprobe_opcode_t *) &kretprobe_trampoline, 735 .pre_handler = trampoline_probe_handler 736 }; 737 738 int __init arch_init_kprobes(void) 739 { 740 return register_kprobe(&trampoline); 741 } 742 743 int arch_trampoline_kprobe(struct kprobe *p) 744 { 745 return p->addr == (kprobe_opcode_t *) &kretprobe_trampoline; 746 } 747 NOKPROBE_SYMBOL(arch_trampoline_kprobe); 748