1 /* 2 * Kernel Probes (KProbes) 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 * 18 * Copyright (C) IBM Corporation, 2002, 2006 19 * 20 * s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com> 21 */ 22 23 #include <linux/kprobes.h> 24 #include <linux/ptrace.h> 25 #include <linux/preempt.h> 26 #include <linux/stop_machine.h> 27 #include <linux/kdebug.h> 28 #include <asm/cacheflush.h> 29 #include <asm/sections.h> 30 #include <asm/uaccess.h> 31 #include <linux/module.h> 32 33 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; 34 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 35 36 struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}}; 37 38 int __kprobes arch_prepare_kprobe(struct kprobe *p) 39 { 40 /* Make sure the probe isn't going on a difficult instruction */ 41 if (is_prohibited_opcode((kprobe_opcode_t *) p->addr)) 42 return -EINVAL; 43 44 if ((unsigned long)p->addr & 0x01) 45 return -EINVAL; 46 47 /* Use the get_insn_slot() facility for correctness */ 48 if (!(p->ainsn.insn = get_insn_slot())) 49 return -ENOMEM; 50 51 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); 52 53 get_instruction_type(&p->ainsn); 54 p->opcode = *p->addr; 55 return 0; 56 } 57 58 int __kprobes is_prohibited_opcode(kprobe_opcode_t *instruction) 59 { 60 switch (*(__u8 *) instruction) { 61 case 0x0c: /* bassm */ 62 case 0x0b: /* bsm */ 63 case 0x83: /* diag */ 64 case 0x44: /* ex */ 65 return -EINVAL; 66 } 67 switch (*(__u16 *) instruction) { 68 case 0x0101: /* pr */ 69 case 0xb25a: /* bsa */ 70 case 0xb240: /* bakr */ 71 case 0xb258: /* bsg */ 72 case 0xb218: /* pc */ 73 case 0xb228: /* pt */ 74 return -EINVAL; 75 } 76 return 0; 77 } 78 79 void __kprobes get_instruction_type(struct arch_specific_insn *ainsn) 80 { 81 /* default fixup method */ 82 ainsn->fixup = FIXUP_PSW_NORMAL; 83 84 /* save r1 operand */ 85 ainsn->reg = (*ainsn->insn & 0xf0) >> 4; 86 87 /* save the instruction length (pop 5-5) in bytes */ 88 switch (*(__u8 *) (ainsn->insn) >> 6) { 89 case 0: 90 ainsn->ilen = 2; 91 break; 92 case 1: 93 case 2: 94 ainsn->ilen = 4; 95 break; 96 case 3: 97 ainsn->ilen = 6; 98 break; 99 } 100 101 switch (*(__u8 *) ainsn->insn) { 102 case 0x05: /* balr */ 103 case 0x0d: /* basr */ 104 ainsn->fixup = FIXUP_RETURN_REGISTER; 105 /* if r2 = 0, no branch will be taken */ 106 if ((*ainsn->insn & 0x0f) == 0) 107 ainsn->fixup |= FIXUP_BRANCH_NOT_TAKEN; 108 break; 109 case 0x06: /* bctr */ 110 case 0x07: /* bcr */ 111 ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN; 112 break; 113 case 0x45: /* bal */ 114 case 0x4d: /* bas */ 115 ainsn->fixup = FIXUP_RETURN_REGISTER; 116 break; 117 case 0x47: /* bc */ 118 case 0x46: /* bct */ 119 case 0x86: /* bxh */ 120 case 0x87: /* bxle */ 121 ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN; 122 break; 123 case 0x82: /* lpsw */ 124 ainsn->fixup = FIXUP_NOT_REQUIRED; 125 break; 126 case 0xb2: /* lpswe */ 127 if (*(((__u8 *) ainsn->insn) + 1) == 0xb2) { 128 ainsn->fixup = FIXUP_NOT_REQUIRED; 129 } 130 break; 131 case 0xa7: /* bras */ 132 if ((*ainsn->insn & 0x0f) == 0x05) { 133 ainsn->fixup |= FIXUP_RETURN_REGISTER; 134 } 135 break; 136 case 0xc0: 137 if ((*ainsn->insn & 0x0f) == 0x00 /* larl */ 138 || (*ainsn->insn & 0x0f) == 0x05) /* brasl */ 139 ainsn->fixup |= FIXUP_RETURN_REGISTER; 140 break; 141 case 0xeb: 142 if (*(((__u8 *) ainsn->insn) + 5 ) == 0x44 || /* bxhg */ 143 *(((__u8 *) ainsn->insn) + 5) == 0x45) {/* bxleg */ 144 ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN; 145 } 146 break; 147 case 0xe3: /* bctg */ 148 if (*(((__u8 *) ainsn->insn) + 5) == 0x46) { 149 ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN; 150 } 151 break; 152 } 153 } 154 155 static int __kprobes swap_instruction(void *aref) 156 { 157 struct ins_replace_args *args = aref; 158 u32 *addr; 159 u32 instr; 160 int err = -EFAULT; 161 162 /* 163 * Text segment is read-only, hence we use stura to bypass dynamic 164 * address translation to exchange the instruction. Since stura 165 * always operates on four bytes, but we only want to exchange two 166 * bytes do some calculations to get things right. In addition we 167 * shall not cross any page boundaries (vmalloc area!) when writing 168 * the new instruction. 169 */ 170 addr = (u32 *)((unsigned long)args->ptr & -4UL); 171 if ((unsigned long)args->ptr & 2) 172 instr = ((*addr) & 0xffff0000) | args->new; 173 else 174 instr = ((*addr) & 0x0000ffff) | args->new << 16; 175 176 asm volatile( 177 " lra %1,0(%1)\n" 178 "0: stura %2,%1\n" 179 "1: la %0,0\n" 180 "2:\n" 181 EX_TABLE(0b,2b) 182 : "+d" (err) 183 : "a" (addr), "d" (instr) 184 : "memory", "cc"); 185 186 return err; 187 } 188 189 void __kprobes arch_arm_kprobe(struct kprobe *p) 190 { 191 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 192 unsigned long status = kcb->kprobe_status; 193 struct ins_replace_args args; 194 195 args.ptr = p->addr; 196 args.old = p->opcode; 197 args.new = BREAKPOINT_INSTRUCTION; 198 199 kcb->kprobe_status = KPROBE_SWAP_INST; 200 stop_machine_run(swap_instruction, &args, NR_CPUS); 201 kcb->kprobe_status = status; 202 } 203 204 void __kprobes arch_disarm_kprobe(struct kprobe *p) 205 { 206 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 207 unsigned long status = kcb->kprobe_status; 208 struct ins_replace_args args; 209 210 args.ptr = p->addr; 211 args.old = BREAKPOINT_INSTRUCTION; 212 args.new = p->opcode; 213 214 kcb->kprobe_status = KPROBE_SWAP_INST; 215 stop_machine_run(swap_instruction, &args, NR_CPUS); 216 kcb->kprobe_status = status; 217 } 218 219 void __kprobes arch_remove_kprobe(struct kprobe *p) 220 { 221 mutex_lock(&kprobe_mutex); 222 free_insn_slot(p->ainsn.insn, 0); 223 mutex_unlock(&kprobe_mutex); 224 } 225 226 static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) 227 { 228 per_cr_bits kprobe_per_regs[1]; 229 230 memset(kprobe_per_regs, 0, sizeof(per_cr_bits)); 231 regs->psw.addr = (unsigned long)p->ainsn.insn | PSW_ADDR_AMODE; 232 233 /* Set up the per control reg info, will pass to lctl */ 234 kprobe_per_regs[0].em_instruction_fetch = 1; 235 kprobe_per_regs[0].starting_addr = (unsigned long)p->ainsn.insn; 236 kprobe_per_regs[0].ending_addr = (unsigned long)p->ainsn.insn + 1; 237 238 /* Set the PER control regs, turns on single step for this address */ 239 __ctl_load(kprobe_per_regs, 9, 11); 240 regs->psw.mask |= PSW_MASK_PER; 241 regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK); 242 } 243 244 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) 245 { 246 kcb->prev_kprobe.kp = kprobe_running(); 247 kcb->prev_kprobe.status = kcb->kprobe_status; 248 kcb->prev_kprobe.kprobe_saved_imask = kcb->kprobe_saved_imask; 249 memcpy(kcb->prev_kprobe.kprobe_saved_ctl, kcb->kprobe_saved_ctl, 250 sizeof(kcb->kprobe_saved_ctl)); 251 } 252 253 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) 254 { 255 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; 256 kcb->kprobe_status = kcb->prev_kprobe.status; 257 kcb->kprobe_saved_imask = kcb->prev_kprobe.kprobe_saved_imask; 258 memcpy(kcb->kprobe_saved_ctl, kcb->prev_kprobe.kprobe_saved_ctl, 259 sizeof(kcb->kprobe_saved_ctl)); 260 } 261 262 static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, 263 struct kprobe_ctlblk *kcb) 264 { 265 __get_cpu_var(current_kprobe) = p; 266 /* Save the interrupt and per flags */ 267 kcb->kprobe_saved_imask = regs->psw.mask & 268 (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK); 269 /* Save the control regs that govern PER */ 270 __ctl_store(kcb->kprobe_saved_ctl, 9, 11); 271 } 272 273 /* Called with kretprobe_lock held */ 274 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, 275 struct pt_regs *regs) 276 { 277 ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14]; 278 279 /* Replace the return addr with trampoline addr */ 280 regs->gprs[14] = (unsigned long)&kretprobe_trampoline; 281 } 282 283 static int __kprobes kprobe_handler(struct pt_regs *regs) 284 { 285 struct kprobe *p; 286 int ret = 0; 287 unsigned long *addr = (unsigned long *) 288 ((regs->psw.addr & PSW_ADDR_INSN) - 2); 289 struct kprobe_ctlblk *kcb; 290 291 /* 292 * We don't want to be preempted for the entire 293 * duration of kprobe processing 294 */ 295 preempt_disable(); 296 kcb = get_kprobe_ctlblk(); 297 298 /* Check we're not actually recursing */ 299 if (kprobe_running()) { 300 p = get_kprobe(addr); 301 if (p) { 302 if (kcb->kprobe_status == KPROBE_HIT_SS && 303 *p->ainsn.insn == BREAKPOINT_INSTRUCTION) { 304 regs->psw.mask &= ~PSW_MASK_PER; 305 regs->psw.mask |= kcb->kprobe_saved_imask; 306 goto no_kprobe; 307 } 308 /* We have reentered the kprobe_handler(), since 309 * another probe was hit while within the handler. 310 * We here save the original kprobes variables and 311 * just single step on the instruction of the new probe 312 * without calling any user handlers. 313 */ 314 save_previous_kprobe(kcb); 315 set_current_kprobe(p, regs, kcb); 316 kprobes_inc_nmissed_count(p); 317 prepare_singlestep(p, regs); 318 kcb->kprobe_status = KPROBE_REENTER; 319 return 1; 320 } else { 321 p = __get_cpu_var(current_kprobe); 322 if (p->break_handler && p->break_handler(p, regs)) { 323 goto ss_probe; 324 } 325 } 326 goto no_kprobe; 327 } 328 329 p = get_kprobe(addr); 330 if (!p) 331 /* 332 * No kprobe at this address. The fault has not been 333 * caused by a kprobe breakpoint. The race of breakpoint 334 * vs. kprobe remove does not exist because on s390 we 335 * use stop_machine_run to arm/disarm the breakpoints. 336 */ 337 goto no_kprobe; 338 339 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 340 set_current_kprobe(p, regs, kcb); 341 if (p->pre_handler && p->pre_handler(p, regs)) 342 /* handler has already set things up, so skip ss setup */ 343 return 1; 344 345 ss_probe: 346 prepare_singlestep(p, regs); 347 kcb->kprobe_status = KPROBE_HIT_SS; 348 return 1; 349 350 no_kprobe: 351 preempt_enable_no_resched(); 352 return ret; 353 } 354 355 /* 356 * Function return probe trampoline: 357 * - init_kprobes() establishes a probepoint here 358 * - When the probed function returns, this probe 359 * causes the handlers to fire 360 */ 361 static void __used kretprobe_trampoline_holder(void) 362 { 363 asm volatile(".global kretprobe_trampoline\n" 364 "kretprobe_trampoline: bcr 0,0\n"); 365 } 366 367 /* 368 * Called when the probe at kretprobe trampoline is hit 369 */ 370 static int __kprobes trampoline_probe_handler(struct kprobe *p, 371 struct pt_regs *regs) 372 { 373 struct kretprobe_instance *ri = NULL; 374 struct hlist_head *head, empty_rp; 375 struct hlist_node *node, *tmp; 376 unsigned long flags, orig_ret_address = 0; 377 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; 378 379 INIT_HLIST_HEAD(&empty_rp); 380 spin_lock_irqsave(&kretprobe_lock, flags); 381 head = kretprobe_inst_table_head(current); 382 383 /* 384 * It is possible to have multiple instances associated with a given 385 * task either because an multiple functions in the call path 386 * have a return probe installed on them, and/or more then one return 387 * return probe was registered for a target function. 388 * 389 * We can handle this because: 390 * - instances are always inserted at the head of the list 391 * - when multiple return probes are registered for the same 392 * function, the first instance's ret_addr will point to the 393 * real return address, and all the rest will point to 394 * kretprobe_trampoline 395 */ 396 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 397 if (ri->task != current) 398 /* another task is sharing our hash bucket */ 399 continue; 400 401 if (ri->rp && ri->rp->handler) 402 ri->rp->handler(ri, regs); 403 404 orig_ret_address = (unsigned long)ri->ret_addr; 405 recycle_rp_inst(ri, &empty_rp); 406 407 if (orig_ret_address != trampoline_address) { 408 /* 409 * This is the real return address. Any other 410 * instances associated with this task are for 411 * other calls deeper on the call stack 412 */ 413 break; 414 } 415 } 416 kretprobe_assert(ri, orig_ret_address, trampoline_address); 417 regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE; 418 419 reset_current_kprobe(); 420 spin_unlock_irqrestore(&kretprobe_lock, flags); 421 preempt_enable_no_resched(); 422 423 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 424 hlist_del(&ri->hlist); 425 kfree(ri); 426 } 427 /* 428 * By returning a non-zero value, we are telling 429 * kprobe_handler() that we don't want the post_handler 430 * to run (and have re-enabled preemption) 431 */ 432 return 1; 433 } 434 435 /* 436 * Called after single-stepping. p->addr is the address of the 437 * instruction whose first byte has been replaced by the "breakpoint" 438 * instruction. To avoid the SMP problems that can occur when we 439 * temporarily put back the original opcode to single-step, we 440 * single-stepped a copy of the instruction. The address of this 441 * copy is p->ainsn.insn. 442 */ 443 static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) 444 { 445 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 446 447 regs->psw.addr &= PSW_ADDR_INSN; 448 449 if (p->ainsn.fixup & FIXUP_PSW_NORMAL) 450 regs->psw.addr = (unsigned long)p->addr + 451 ((unsigned long)regs->psw.addr - 452 (unsigned long)p->ainsn.insn); 453 454 if (p->ainsn.fixup & FIXUP_BRANCH_NOT_TAKEN) 455 if ((unsigned long)regs->psw.addr - 456 (unsigned long)p->ainsn.insn == p->ainsn.ilen) 457 regs->psw.addr = (unsigned long)p->addr + p->ainsn.ilen; 458 459 if (p->ainsn.fixup & FIXUP_RETURN_REGISTER) 460 regs->gprs[p->ainsn.reg] = ((unsigned long)p->addr + 461 (regs->gprs[p->ainsn.reg] - 462 (unsigned long)p->ainsn.insn)) 463 | PSW_ADDR_AMODE; 464 465 regs->psw.addr |= PSW_ADDR_AMODE; 466 /* turn off PER mode */ 467 regs->psw.mask &= ~PSW_MASK_PER; 468 /* Restore the original per control regs */ 469 __ctl_load(kcb->kprobe_saved_ctl, 9, 11); 470 regs->psw.mask |= kcb->kprobe_saved_imask; 471 } 472 473 static int __kprobes post_kprobe_handler(struct pt_regs *regs) 474 { 475 struct kprobe *cur = kprobe_running(); 476 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 477 478 if (!cur) 479 return 0; 480 481 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { 482 kcb->kprobe_status = KPROBE_HIT_SSDONE; 483 cur->post_handler(cur, regs, 0); 484 } 485 486 resume_execution(cur, regs); 487 488 /*Restore back the original saved kprobes variables and continue. */ 489 if (kcb->kprobe_status == KPROBE_REENTER) { 490 restore_previous_kprobe(kcb); 491 goto out; 492 } 493 reset_current_kprobe(); 494 out: 495 preempt_enable_no_resched(); 496 497 /* 498 * if somebody else is singlestepping across a probe point, psw mask 499 * will have PER set, in which case, continue the remaining processing 500 * of do_single_step, as if this is not a probe hit. 501 */ 502 if (regs->psw.mask & PSW_MASK_PER) { 503 return 0; 504 } 505 506 return 1; 507 } 508 509 int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) 510 { 511 struct kprobe *cur = kprobe_running(); 512 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 513 const struct exception_table_entry *entry; 514 515 switch(kcb->kprobe_status) { 516 case KPROBE_SWAP_INST: 517 /* We are here because the instruction replacement failed */ 518 return 0; 519 case KPROBE_HIT_SS: 520 case KPROBE_REENTER: 521 /* 522 * We are here because the instruction being single 523 * stepped caused a page fault. We reset the current 524 * kprobe and the nip points back to the probe address 525 * and allow the page fault handler to continue as a 526 * normal page fault. 527 */ 528 regs->psw.addr = (unsigned long)cur->addr | PSW_ADDR_AMODE; 529 regs->psw.mask &= ~PSW_MASK_PER; 530 regs->psw.mask |= kcb->kprobe_saved_imask; 531 if (kcb->kprobe_status == KPROBE_REENTER) 532 restore_previous_kprobe(kcb); 533 else 534 reset_current_kprobe(); 535 preempt_enable_no_resched(); 536 break; 537 case KPROBE_HIT_ACTIVE: 538 case KPROBE_HIT_SSDONE: 539 /* 540 * We increment the nmissed count for accounting, 541 * we can also use npre/npostfault count for accouting 542 * these specific fault cases. 543 */ 544 kprobes_inc_nmissed_count(cur); 545 546 /* 547 * We come here because instructions in the pre/post 548 * handler caused the page_fault, this could happen 549 * if handler tries to access user space by 550 * copy_from_user(), get_user() etc. Let the 551 * user-specified handler try to fix it first. 552 */ 553 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) 554 return 1; 555 556 /* 557 * In case the user-specified fault handler returned 558 * zero, try to fix up. 559 */ 560 entry = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); 561 if (entry) { 562 regs->psw.addr = entry->fixup | PSW_ADDR_AMODE; 563 return 1; 564 } 565 566 /* 567 * fixup_exception() could not handle it, 568 * Let do_page_fault() fix it. 569 */ 570 break; 571 default: 572 break; 573 } 574 return 0; 575 } 576 577 /* 578 * Wrapper routine to for handling exceptions. 579 */ 580 int __kprobes kprobe_exceptions_notify(struct notifier_block *self, 581 unsigned long val, void *data) 582 { 583 struct die_args *args = (struct die_args *)data; 584 int ret = NOTIFY_DONE; 585 586 switch (val) { 587 case DIE_BPT: 588 if (kprobe_handler(args->regs)) 589 ret = NOTIFY_STOP; 590 break; 591 case DIE_SSTEP: 592 if (post_kprobe_handler(args->regs)) 593 ret = NOTIFY_STOP; 594 break; 595 case DIE_TRAP: 596 /* kprobe_running() needs smp_processor_id() */ 597 preempt_disable(); 598 if (kprobe_running() && 599 kprobe_fault_handler(args->regs, args->trapnr)) 600 ret = NOTIFY_STOP; 601 preempt_enable(); 602 break; 603 default: 604 break; 605 } 606 return ret; 607 } 608 609 int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) 610 { 611 struct jprobe *jp = container_of(p, struct jprobe, kp); 612 unsigned long addr; 613 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 614 615 memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs)); 616 617 /* setup return addr to the jprobe handler routine */ 618 regs->psw.addr = (unsigned long)(jp->entry) | PSW_ADDR_AMODE; 619 620 /* r14 is the function return address */ 621 kcb->jprobe_saved_r14 = (unsigned long)regs->gprs[14]; 622 /* r15 is the stack pointer */ 623 kcb->jprobe_saved_r15 = (unsigned long)regs->gprs[15]; 624 addr = (unsigned long)kcb->jprobe_saved_r15; 625 626 memcpy(kcb->jprobes_stack, (kprobe_opcode_t *) addr, 627 MIN_STACK_SIZE(addr)); 628 return 1; 629 } 630 631 void __kprobes jprobe_return(void) 632 { 633 asm volatile(".word 0x0002"); 634 } 635 636 void __kprobes jprobe_return_end(void) 637 { 638 asm volatile("bcr 0,0"); 639 } 640 641 int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) 642 { 643 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 644 unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_r15); 645 646 /* Put the regs back */ 647 memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs)); 648 /* put the stack back */ 649 memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack, 650 MIN_STACK_SIZE(stack_addr)); 651 preempt_enable_no_resched(); 652 return 1; 653 } 654 655 static struct kprobe trampoline_p = { 656 .addr = (kprobe_opcode_t *) & kretprobe_trampoline, 657 .pre_handler = trampoline_probe_handler 658 }; 659 660 int __init arch_init_kprobes(void) 661 { 662 return register_kprobe(&trampoline_p); 663 } 664 665 int __kprobes arch_trampoline_kprobe(struct kprobe *p) 666 { 667 if (p->addr == (kprobe_opcode_t *) & kretprobe_trampoline) 668 return 1; 669 return 0; 670 } 671