1 /* 2 * Kernel Probes (KProbes) 3 * arch/ia64/kernel/kprobes.c 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 18 * 19 * Copyright (C) IBM Corporation, 2002, 2004 20 * Copyright (C) Intel Corporation, 2005 21 * 22 * 2005-Apr Rusty Lynch <rusty.lynch@intel.com> and Anil S Keshavamurthy 23 * <anil.s.keshavamurthy@intel.com> adapted from i386 24 */ 25 26 #include <linux/kprobes.h> 27 #include <linux/ptrace.h> 28 #include <linux/string.h> 29 #include <linux/slab.h> 30 #include <linux/preempt.h> 31 #include <linux/moduleloader.h> 32 #include <linux/kdebug.h> 33 34 #include <asm/pgtable.h> 35 #include <asm/sections.h> 36 #include <asm/uaccess.h> 37 38 extern void jprobe_inst_return(void); 39 40 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; 41 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 42 43 struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}}; 44 45 enum instruction_type {A, I, M, F, B, L, X, u}; 46 static enum instruction_type bundle_encoding[32][3] = { 47 { M, I, I }, /* 00 */ 48 { M, I, I }, /* 01 */ 49 { M, I, I }, /* 02 */ 50 { M, I, I }, /* 03 */ 51 { M, L, X }, /* 04 */ 52 { M, L, X }, /* 05 */ 53 { u, u, u }, /* 06 */ 54 { u, u, u }, /* 07 */ 55 { M, M, I }, /* 08 */ 56 { M, M, I }, /* 09 */ 57 { M, M, I }, /* 0A */ 58 { M, M, I }, /* 0B */ 59 { M, F, I }, /* 0C */ 60 { M, F, I }, /* 0D */ 61 { M, M, F }, /* 0E */ 62 { M, M, F }, /* 0F */ 63 { M, I, B }, /* 10 */ 64 { M, I, B }, /* 11 */ 65 { M, B, B }, /* 12 */ 66 { M, B, B }, /* 13 */ 67 { u, u, u }, /* 14 */ 68 { u, u, u }, /* 15 */ 69 { B, B, B }, /* 16 */ 70 { B, B, B }, /* 17 */ 71 { M, M, B }, /* 18 */ 72 { M, M, B }, /* 19 */ 73 { u, u, u }, /* 1A */ 74 { u, u, u }, /* 1B */ 75 { M, F, B }, /* 1C */ 76 { M, F, B }, /* 1D */ 77 { u, u, u }, /* 1E */ 78 { u, u, u }, /* 1F */ 79 }; 80 81 /* 82 * In this function we check to see if the instruction 83 * is IP relative instruction and update the kprobe 84 * inst flag accordingly 85 */ 86 static void __kprobes update_kprobe_inst_flag(uint template, uint slot, 87 uint major_opcode, 88 unsigned long kprobe_inst, 89 struct kprobe *p) 90 { 91 p->ainsn.inst_flag = 0; 92 p->ainsn.target_br_reg = 0; 93 p->ainsn.slot = slot; 94 95 /* Check for Break instruction 96 * Bits 37:40 Major opcode to be zero 97 * Bits 27:32 X6 to be zero 98 * Bits 32:35 X3 to be zero 99 */ 100 if ((!major_opcode) && (!((kprobe_inst >> 27) & 0x1FF)) ) { 101 /* is a break instruction */ 102 p->ainsn.inst_flag |= INST_FLAG_BREAK_INST; 103 return; 104 } 105 106 if (bundle_encoding[template][slot] == B) { 107 switch (major_opcode) { 108 case INDIRECT_CALL_OPCODE: 109 p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG; 110 p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7); 111 break; 112 case IP_RELATIVE_PREDICT_OPCODE: 113 case IP_RELATIVE_BRANCH_OPCODE: 114 p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR; 115 break; 116 case IP_RELATIVE_CALL_OPCODE: 117 p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR; 118 p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG; 119 p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7); 120 break; 121 } 122 } else if (bundle_encoding[template][slot] == X) { 123 switch (major_opcode) { 124 case LONG_CALL_OPCODE: 125 p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG; 126 p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7); 127 break; 128 } 129 } 130 return; 131 } 132 133 /* 134 * In this function we check to see if the instruction 135 * (qp) cmpx.crel.ctype p1,p2=r2,r3 136 * on which we are inserting kprobe is cmp instruction 137 * with ctype as unc. 138 */ 139 static uint __kprobes is_cmp_ctype_unc_inst(uint template, uint slot, 140 uint major_opcode, 141 unsigned long kprobe_inst) 142 { 143 cmp_inst_t cmp_inst; 144 uint ctype_unc = 0; 145 146 if (!((bundle_encoding[template][slot] == I) || 147 (bundle_encoding[template][slot] == M))) 148 goto out; 149 150 if (!((major_opcode == 0xC) || (major_opcode == 0xD) || 151 (major_opcode == 0xE))) 152 goto out; 153 154 cmp_inst.l = kprobe_inst; 155 if ((cmp_inst.f.x2 == 0) || (cmp_inst.f.x2 == 1)) { 156 /* Integer compare - Register Register (A6 type)*/ 157 if ((cmp_inst.f.tb == 0) && (cmp_inst.f.ta == 0) 158 &&(cmp_inst.f.c == 1)) 159 ctype_unc = 1; 160 } else if ((cmp_inst.f.x2 == 2)||(cmp_inst.f.x2 == 3)) { 161 /* Integer compare - Immediate Register (A8 type)*/ 162 if ((cmp_inst.f.ta == 0) &&(cmp_inst.f.c == 1)) 163 ctype_unc = 1; 164 } 165 out: 166 return ctype_unc; 167 } 168 169 /* 170 * In this function we check to see if the instruction 171 * on which we are inserting kprobe is supported. 172 * Returns qp value if supported 173 * Returns -EINVAL if unsupported 174 */ 175 static int __kprobes unsupported_inst(uint template, uint slot, 176 uint major_opcode, 177 unsigned long kprobe_inst, 178 unsigned long addr) 179 { 180 int qp; 181 182 qp = kprobe_inst & 0x3f; 183 if (is_cmp_ctype_unc_inst(template, slot, major_opcode, kprobe_inst)) { 184 if (slot == 1 && qp) { 185 printk(KERN_WARNING "Kprobes on cmp unc " 186 "instruction on slot 1 at <0x%lx> " 187 "is not supported\n", addr); 188 return -EINVAL; 189 190 } 191 qp = 0; 192 } 193 else if (bundle_encoding[template][slot] == I) { 194 if (major_opcode == 0) { 195 /* 196 * Check for Integer speculation instruction 197 * - Bit 33-35 to be equal to 0x1 198 */ 199 if (((kprobe_inst >> 33) & 0x7) == 1) { 200 printk(KERN_WARNING 201 "Kprobes on speculation inst at <0x%lx> not supported\n", 202 addr); 203 return -EINVAL; 204 } 205 /* 206 * IP relative mov instruction 207 * - Bit 27-35 to be equal to 0x30 208 */ 209 if (((kprobe_inst >> 27) & 0x1FF) == 0x30) { 210 printk(KERN_WARNING 211 "Kprobes on \"mov r1=ip\" at <0x%lx> not supported\n", 212 addr); 213 return -EINVAL; 214 215 } 216 } 217 else if ((major_opcode == 5) && !(kprobe_inst & (0xFUl << 33)) && 218 (kprobe_inst & (0x1UL << 12))) { 219 /* test bit instructions, tbit,tnat,tf 220 * bit 33-36 to be equal to 0 221 * bit 12 to be equal to 1 222 */ 223 if (slot == 1 && qp) { 224 printk(KERN_WARNING "Kprobes on test bit " 225 "instruction on slot at <0x%lx> " 226 "is not supported\n", addr); 227 return -EINVAL; 228 } 229 qp = 0; 230 } 231 } 232 else if (bundle_encoding[template][slot] == B) { 233 if (major_opcode == 7) { 234 /* IP-Relative Predict major code is 7 */ 235 printk(KERN_WARNING "Kprobes on IP-Relative" 236 "Predict is not supported\n"); 237 return -EINVAL; 238 } 239 else if (major_opcode == 2) { 240 /* Indirect Predict, major code is 2 241 * bit 27-32 to be equal to 10 or 11 242 */ 243 int x6=(kprobe_inst >> 27) & 0x3F; 244 if ((x6 == 0x10) || (x6 == 0x11)) { 245 printk(KERN_WARNING "Kprobes on " 246 "Indirect Predict is not supported\n"); 247 return -EINVAL; 248 } 249 } 250 } 251 /* kernel does not use float instruction, here for safety kprobe 252 * will judge whether it is fcmp/flass/float approximation instruction 253 */ 254 else if (unlikely(bundle_encoding[template][slot] == F)) { 255 if ((major_opcode == 4 || major_opcode == 5) && 256 (kprobe_inst & (0x1 << 12))) { 257 /* fcmp/fclass unc instruction */ 258 if (slot == 1 && qp) { 259 printk(KERN_WARNING "Kprobes on fcmp/fclass " 260 "instruction on slot at <0x%lx> " 261 "is not supported\n", addr); 262 return -EINVAL; 263 264 } 265 qp = 0; 266 } 267 if ((major_opcode == 0 || major_opcode == 1) && 268 (kprobe_inst & (0x1UL << 33))) { 269 /* float Approximation instruction */ 270 if (slot == 1 && qp) { 271 printk(KERN_WARNING "Kprobes on float Approx " 272 "instr at <0x%lx> is not supported\n", 273 addr); 274 return -EINVAL; 275 } 276 qp = 0; 277 } 278 } 279 return qp; 280 } 281 282 /* 283 * In this function we override the bundle with 284 * the break instruction at the given slot. 285 */ 286 static void __kprobes prepare_break_inst(uint template, uint slot, 287 uint major_opcode, 288 unsigned long kprobe_inst, 289 struct kprobe *p, 290 int qp) 291 { 292 unsigned long break_inst = BREAK_INST; 293 bundle_t *bundle = &p->opcode.bundle; 294 295 /* 296 * Copy the original kprobe_inst qualifying predicate(qp) 297 * to the break instruction 298 */ 299 break_inst |= qp; 300 301 switch (slot) { 302 case 0: 303 bundle->quad0.slot0 = break_inst; 304 break; 305 case 1: 306 bundle->quad0.slot1_p0 = break_inst; 307 bundle->quad1.slot1_p1 = break_inst >> (64-46); 308 break; 309 case 2: 310 bundle->quad1.slot2 = break_inst; 311 break; 312 } 313 314 /* 315 * Update the instruction flag, so that we can 316 * emulate the instruction properly after we 317 * single step on original instruction 318 */ 319 update_kprobe_inst_flag(template, slot, major_opcode, kprobe_inst, p); 320 } 321 322 static void __kprobes get_kprobe_inst(bundle_t *bundle, uint slot, 323 unsigned long *kprobe_inst, uint *major_opcode) 324 { 325 unsigned long kprobe_inst_p0, kprobe_inst_p1; 326 unsigned int template; 327 328 template = bundle->quad0.template; 329 330 switch (slot) { 331 case 0: 332 *major_opcode = (bundle->quad0.slot0 >> SLOT0_OPCODE_SHIFT); 333 *kprobe_inst = bundle->quad0.slot0; 334 break; 335 case 1: 336 *major_opcode = (bundle->quad1.slot1_p1 >> SLOT1_p1_OPCODE_SHIFT); 337 kprobe_inst_p0 = bundle->quad0.slot1_p0; 338 kprobe_inst_p1 = bundle->quad1.slot1_p1; 339 *kprobe_inst = kprobe_inst_p0 | (kprobe_inst_p1 << (64-46)); 340 break; 341 case 2: 342 *major_opcode = (bundle->quad1.slot2 >> SLOT2_OPCODE_SHIFT); 343 *kprobe_inst = bundle->quad1.slot2; 344 break; 345 } 346 } 347 348 /* Returns non-zero if the addr is in the Interrupt Vector Table */ 349 static int __kprobes in_ivt_functions(unsigned long addr) 350 { 351 return (addr >= (unsigned long)__start_ivt_text 352 && addr < (unsigned long)__end_ivt_text); 353 } 354 355 static int __kprobes valid_kprobe_addr(int template, int slot, 356 unsigned long addr) 357 { 358 if ((slot > 2) || ((bundle_encoding[template][1] == L) && slot > 1)) { 359 printk(KERN_WARNING "Attempting to insert unaligned kprobe " 360 "at 0x%lx\n", addr); 361 return -EINVAL; 362 } 363 364 if (in_ivt_functions(addr)) { 365 printk(KERN_WARNING "Kprobes can't be inserted inside " 366 "IVT functions at 0x%lx\n", addr); 367 return -EINVAL; 368 } 369 370 return 0; 371 } 372 373 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) 374 { 375 unsigned int i; 376 i = atomic_add_return(1, &kcb->prev_kprobe_index); 377 kcb->prev_kprobe[i-1].kp = kprobe_running(); 378 kcb->prev_kprobe[i-1].status = kcb->kprobe_status; 379 } 380 381 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) 382 { 383 unsigned int i; 384 i = atomic_read(&kcb->prev_kprobe_index); 385 __get_cpu_var(current_kprobe) = kcb->prev_kprobe[i-1].kp; 386 kcb->kprobe_status = kcb->prev_kprobe[i-1].status; 387 atomic_sub(1, &kcb->prev_kprobe_index); 388 } 389 390 static void __kprobes set_current_kprobe(struct kprobe *p, 391 struct kprobe_ctlblk *kcb) 392 { 393 __get_cpu_var(current_kprobe) = p; 394 } 395 396 static void kretprobe_trampoline(void) 397 { 398 } 399 400 /* 401 * At this point the target function has been tricked into 402 * returning into our trampoline. Lookup the associated instance 403 * and then: 404 * - call the handler function 405 * - cleanup by marking the instance as unused 406 * - long jump back to the original return address 407 */ 408 int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) 409 { 410 struct kretprobe_instance *ri = NULL; 411 struct hlist_head *head, empty_rp; 412 struct hlist_node *node, *tmp; 413 unsigned long flags, orig_ret_address = 0; 414 unsigned long trampoline_address = 415 ((struct fnptr *)kretprobe_trampoline)->ip; 416 417 INIT_HLIST_HEAD(&empty_rp); 418 spin_lock_irqsave(&kretprobe_lock, flags); 419 head = kretprobe_inst_table_head(current); 420 421 /* 422 * It is possible to have multiple instances associated with a given 423 * task either because an multiple functions in the call path 424 * have a return probe installed on them, and/or more then one return 425 * return probe was registered for a target function. 426 * 427 * We can handle this because: 428 * - instances are always inserted at the head of the list 429 * - when multiple return probes are registered for the same 430 * function, the first instance's ret_addr will point to the 431 * real return address, and all the rest will point to 432 * kretprobe_trampoline 433 */ 434 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 435 if (ri->task != current) 436 /* another task is sharing our hash bucket */ 437 continue; 438 439 orig_ret_address = (unsigned long)ri->ret_addr; 440 if (orig_ret_address != trampoline_address) 441 /* 442 * This is the real return address. Any other 443 * instances associated with this task are for 444 * other calls deeper on the call stack 445 */ 446 break; 447 } 448 449 regs->cr_iip = orig_ret_address; 450 451 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 452 if (ri->task != current) 453 /* another task is sharing our hash bucket */ 454 continue; 455 456 if (ri->rp && ri->rp->handler) 457 ri->rp->handler(ri, regs); 458 459 orig_ret_address = (unsigned long)ri->ret_addr; 460 recycle_rp_inst(ri, &empty_rp); 461 462 if (orig_ret_address != trampoline_address) 463 /* 464 * This is the real return address. Any other 465 * instances associated with this task are for 466 * other calls deeper on the call stack 467 */ 468 break; 469 } 470 471 kretprobe_assert(ri, orig_ret_address, trampoline_address); 472 473 reset_current_kprobe(); 474 spin_unlock_irqrestore(&kretprobe_lock, flags); 475 preempt_enable_no_resched(); 476 477 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 478 hlist_del(&ri->hlist); 479 kfree(ri); 480 } 481 /* 482 * By returning a non-zero value, we are telling 483 * kprobe_handler() that we don't want the post_handler 484 * to run (and have re-enabled preemption) 485 */ 486 return 1; 487 } 488 489 /* Called with kretprobe_lock held */ 490 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, 491 struct pt_regs *regs) 492 { 493 ri->ret_addr = (kprobe_opcode_t *)regs->b0; 494 495 /* Replace the return addr with trampoline addr */ 496 regs->b0 = ((struct fnptr *)kretprobe_trampoline)->ip; 497 } 498 499 int __kprobes arch_prepare_kprobe(struct kprobe *p) 500 { 501 unsigned long addr = (unsigned long) p->addr; 502 unsigned long *kprobe_addr = (unsigned long *)(addr & ~0xFULL); 503 unsigned long kprobe_inst=0; 504 unsigned int slot = addr & 0xf, template, major_opcode = 0; 505 bundle_t *bundle; 506 int qp; 507 508 bundle = &((kprobe_opcode_t *)kprobe_addr)->bundle; 509 template = bundle->quad0.template; 510 511 if(valid_kprobe_addr(template, slot, addr)) 512 return -EINVAL; 513 514 /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */ 515 if (slot == 1 && bundle_encoding[template][1] == L) 516 slot++; 517 518 /* Get kprobe_inst and major_opcode from the bundle */ 519 get_kprobe_inst(bundle, slot, &kprobe_inst, &major_opcode); 520 521 qp = unsupported_inst(template, slot, major_opcode, kprobe_inst, addr); 522 if (qp < 0) 523 return -EINVAL; 524 525 p->ainsn.insn = get_insn_slot(); 526 if (!p->ainsn.insn) 527 return -ENOMEM; 528 memcpy(&p->opcode, kprobe_addr, sizeof(kprobe_opcode_t)); 529 memcpy(p->ainsn.insn, kprobe_addr, sizeof(kprobe_opcode_t)); 530 531 prepare_break_inst(template, slot, major_opcode, kprobe_inst, p, qp); 532 533 return 0; 534 } 535 536 void __kprobes arch_arm_kprobe(struct kprobe *p) 537 { 538 unsigned long arm_addr; 539 bundle_t *src, *dest; 540 541 arm_addr = ((unsigned long)p->addr) & ~0xFUL; 542 dest = &((kprobe_opcode_t *)arm_addr)->bundle; 543 src = &p->opcode.bundle; 544 545 flush_icache_range((unsigned long)p->ainsn.insn, 546 (unsigned long)p->ainsn.insn + sizeof(kprobe_opcode_t)); 547 switch (p->ainsn.slot) { 548 case 0: 549 dest->quad0.slot0 = src->quad0.slot0; 550 break; 551 case 1: 552 dest->quad1.slot1_p1 = src->quad1.slot1_p1; 553 break; 554 case 2: 555 dest->quad1.slot2 = src->quad1.slot2; 556 break; 557 } 558 flush_icache_range(arm_addr, arm_addr + sizeof(kprobe_opcode_t)); 559 } 560 561 void __kprobes arch_disarm_kprobe(struct kprobe *p) 562 { 563 unsigned long arm_addr; 564 bundle_t *src, *dest; 565 566 arm_addr = ((unsigned long)p->addr) & ~0xFUL; 567 dest = &((kprobe_opcode_t *)arm_addr)->bundle; 568 /* p->ainsn.insn contains the original unaltered kprobe_opcode_t */ 569 src = &p->ainsn.insn->bundle; 570 switch (p->ainsn.slot) { 571 case 0: 572 dest->quad0.slot0 = src->quad0.slot0; 573 break; 574 case 1: 575 dest->quad1.slot1_p1 = src->quad1.slot1_p1; 576 break; 577 case 2: 578 dest->quad1.slot2 = src->quad1.slot2; 579 break; 580 } 581 flush_icache_range(arm_addr, arm_addr + sizeof(kprobe_opcode_t)); 582 } 583 584 void __kprobes arch_remove_kprobe(struct kprobe *p) 585 { 586 mutex_lock(&kprobe_mutex); 587 free_insn_slot(p->ainsn.insn, 0); 588 mutex_unlock(&kprobe_mutex); 589 } 590 /* 591 * We are resuming execution after a single step fault, so the pt_regs 592 * structure reflects the register state after we executed the instruction 593 * located in the kprobe (p->ainsn.insn.bundle). We still need to adjust 594 * the ip to point back to the original stack address. To set the IP address 595 * to original stack address, handle the case where we need to fixup the 596 * relative IP address and/or fixup branch register. 597 */ 598 static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) 599 { 600 unsigned long bundle_addr = (unsigned long) (&p->ainsn.insn->bundle); 601 unsigned long resume_addr = (unsigned long)p->addr & ~0xFULL; 602 unsigned long template; 603 int slot = ((unsigned long)p->addr & 0xf); 604 605 template = p->ainsn.insn->bundle.quad0.template; 606 607 if (slot == 1 && bundle_encoding[template][1] == L) 608 slot = 2; 609 610 if (p->ainsn.inst_flag) { 611 612 if (p->ainsn.inst_flag & INST_FLAG_FIX_RELATIVE_IP_ADDR) { 613 /* Fix relative IP address */ 614 regs->cr_iip = (regs->cr_iip - bundle_addr) + 615 resume_addr; 616 } 617 618 if (p->ainsn.inst_flag & INST_FLAG_FIX_BRANCH_REG) { 619 /* 620 * Fix target branch register, software convention is 621 * to use either b0 or b6 or b7, so just checking 622 * only those registers 623 */ 624 switch (p->ainsn.target_br_reg) { 625 case 0: 626 if ((regs->b0 == bundle_addr) || 627 (regs->b0 == bundle_addr + 0x10)) { 628 regs->b0 = (regs->b0 - bundle_addr) + 629 resume_addr; 630 } 631 break; 632 case 6: 633 if ((regs->b6 == bundle_addr) || 634 (regs->b6 == bundle_addr + 0x10)) { 635 regs->b6 = (regs->b6 - bundle_addr) + 636 resume_addr; 637 } 638 break; 639 case 7: 640 if ((regs->b7 == bundle_addr) || 641 (regs->b7 == bundle_addr + 0x10)) { 642 regs->b7 = (regs->b7 - bundle_addr) + 643 resume_addr; 644 } 645 break; 646 } /* end switch */ 647 } 648 goto turn_ss_off; 649 } 650 651 if (slot == 2) { 652 if (regs->cr_iip == bundle_addr + 0x10) { 653 regs->cr_iip = resume_addr + 0x10; 654 } 655 } else { 656 if (regs->cr_iip == bundle_addr) { 657 regs->cr_iip = resume_addr; 658 } 659 } 660 661 turn_ss_off: 662 /* Turn off Single Step bit */ 663 ia64_psr(regs)->ss = 0; 664 } 665 666 static void __kprobes prepare_ss(struct kprobe *p, struct pt_regs *regs) 667 { 668 unsigned long bundle_addr = (unsigned long) &p->ainsn.insn->bundle; 669 unsigned long slot = (unsigned long)p->addr & 0xf; 670 671 /* single step inline if break instruction */ 672 if (p->ainsn.inst_flag == INST_FLAG_BREAK_INST) 673 regs->cr_iip = (unsigned long)p->addr & ~0xFULL; 674 else 675 regs->cr_iip = bundle_addr & ~0xFULL; 676 677 if (slot > 2) 678 slot = 0; 679 680 ia64_psr(regs)->ri = slot; 681 682 /* turn on single stepping */ 683 ia64_psr(regs)->ss = 1; 684 } 685 686 static int __kprobes is_ia64_break_inst(struct pt_regs *regs) 687 { 688 unsigned int slot = ia64_psr(regs)->ri; 689 unsigned int template, major_opcode; 690 unsigned long kprobe_inst; 691 unsigned long *kprobe_addr = (unsigned long *)regs->cr_iip; 692 bundle_t bundle; 693 694 memcpy(&bundle, kprobe_addr, sizeof(bundle_t)); 695 template = bundle.quad0.template; 696 697 /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */ 698 if (slot == 1 && bundle_encoding[template][1] == L) 699 slot++; 700 701 /* Get Kprobe probe instruction at given slot*/ 702 get_kprobe_inst(&bundle, slot, &kprobe_inst, &major_opcode); 703 704 /* For break instruction, 705 * Bits 37:40 Major opcode to be zero 706 * Bits 27:32 X6 to be zero 707 * Bits 32:35 X3 to be zero 708 */ 709 if (major_opcode || ((kprobe_inst >> 27) & 0x1FF) ) { 710 /* Not a break instruction */ 711 return 0; 712 } 713 714 /* Is a break instruction */ 715 return 1; 716 } 717 718 static int __kprobes pre_kprobes_handler(struct die_args *args) 719 { 720 struct kprobe *p; 721 int ret = 0; 722 struct pt_regs *regs = args->regs; 723 kprobe_opcode_t *addr = (kprobe_opcode_t *)instruction_pointer(regs); 724 struct kprobe_ctlblk *kcb; 725 726 /* 727 * We don't want to be preempted for the entire 728 * duration of kprobe processing 729 */ 730 preempt_disable(); 731 kcb = get_kprobe_ctlblk(); 732 733 /* Handle recursion cases */ 734 if (kprobe_running()) { 735 p = get_kprobe(addr); 736 if (p) { 737 if ((kcb->kprobe_status == KPROBE_HIT_SS) && 738 (p->ainsn.inst_flag == INST_FLAG_BREAK_INST)) { 739 ia64_psr(regs)->ss = 0; 740 goto no_kprobe; 741 } 742 /* We have reentered the pre_kprobe_handler(), since 743 * another probe was hit while within the handler. 744 * We here save the original kprobes variables and 745 * just single step on the instruction of the new probe 746 * without calling any user handlers. 747 */ 748 save_previous_kprobe(kcb); 749 set_current_kprobe(p, kcb); 750 kprobes_inc_nmissed_count(p); 751 prepare_ss(p, regs); 752 kcb->kprobe_status = KPROBE_REENTER; 753 return 1; 754 } else if (args->err == __IA64_BREAK_JPROBE) { 755 /* 756 * jprobe instrumented function just completed 757 */ 758 p = __get_cpu_var(current_kprobe); 759 if (p->break_handler && p->break_handler(p, regs)) { 760 goto ss_probe; 761 } 762 } else if (!is_ia64_break_inst(regs)) { 763 /* The breakpoint instruction was removed by 764 * another cpu right after we hit, no further 765 * handling of this interrupt is appropriate 766 */ 767 ret = 1; 768 goto no_kprobe; 769 } else { 770 /* Not our break */ 771 goto no_kprobe; 772 } 773 } 774 775 p = get_kprobe(addr); 776 if (!p) { 777 if (!is_ia64_break_inst(regs)) { 778 /* 779 * The breakpoint instruction was removed right 780 * after we hit it. Another cpu has removed 781 * either a probepoint or a debugger breakpoint 782 * at this address. In either case, no further 783 * handling of this interrupt is appropriate. 784 */ 785 ret = 1; 786 787 } 788 789 /* Not one of our break, let kernel handle it */ 790 goto no_kprobe; 791 } 792 793 set_current_kprobe(p, kcb); 794 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 795 796 if (p->pre_handler && p->pre_handler(p, regs)) 797 /* 798 * Our pre-handler is specifically requesting that we just 799 * do a return. This is used for both the jprobe pre-handler 800 * and the kretprobe trampoline 801 */ 802 return 1; 803 804 ss_probe: 805 prepare_ss(p, regs); 806 kcb->kprobe_status = KPROBE_HIT_SS; 807 return 1; 808 809 no_kprobe: 810 preempt_enable_no_resched(); 811 return ret; 812 } 813 814 static int __kprobes post_kprobes_handler(struct pt_regs *regs) 815 { 816 struct kprobe *cur = kprobe_running(); 817 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 818 819 if (!cur) 820 return 0; 821 822 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { 823 kcb->kprobe_status = KPROBE_HIT_SSDONE; 824 cur->post_handler(cur, regs, 0); 825 } 826 827 resume_execution(cur, regs); 828 829 /*Restore back the original saved kprobes variables and continue. */ 830 if (kcb->kprobe_status == KPROBE_REENTER) { 831 restore_previous_kprobe(kcb); 832 goto out; 833 } 834 reset_current_kprobe(); 835 836 out: 837 preempt_enable_no_resched(); 838 return 1; 839 } 840 841 int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) 842 { 843 struct kprobe *cur = kprobe_running(); 844 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 845 846 847 switch(kcb->kprobe_status) { 848 case KPROBE_HIT_SS: 849 case KPROBE_REENTER: 850 /* 851 * We are here because the instruction being single 852 * stepped caused a page fault. We reset the current 853 * kprobe and the instruction pointer points back to 854 * the probe address and allow the page fault handler 855 * to continue as a normal page fault. 856 */ 857 regs->cr_iip = ((unsigned long)cur->addr) & ~0xFULL; 858 ia64_psr(regs)->ri = ((unsigned long)cur->addr) & 0xf; 859 if (kcb->kprobe_status == KPROBE_REENTER) 860 restore_previous_kprobe(kcb); 861 else 862 reset_current_kprobe(); 863 preempt_enable_no_resched(); 864 break; 865 case KPROBE_HIT_ACTIVE: 866 case KPROBE_HIT_SSDONE: 867 /* 868 * We increment the nmissed count for accounting, 869 * we can also use npre/npostfault count for accouting 870 * these specific fault cases. 871 */ 872 kprobes_inc_nmissed_count(cur); 873 874 /* 875 * We come here because instructions in the pre/post 876 * handler caused the page_fault, this could happen 877 * if handler tries to access user space by 878 * copy_from_user(), get_user() etc. Let the 879 * user-specified handler try to fix it first. 880 */ 881 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) 882 return 1; 883 /* 884 * In case the user-specified fault handler returned 885 * zero, try to fix up. 886 */ 887 if (ia64_done_with_exception(regs)) 888 return 1; 889 890 /* 891 * Let ia64_do_page_fault() fix it. 892 */ 893 break; 894 default: 895 break; 896 } 897 898 return 0; 899 } 900 901 int __kprobes kprobe_exceptions_notify(struct notifier_block *self, 902 unsigned long val, void *data) 903 { 904 struct die_args *args = (struct die_args *)data; 905 int ret = NOTIFY_DONE; 906 907 if (args->regs && user_mode(args->regs)) 908 return ret; 909 910 switch(val) { 911 case DIE_BREAK: 912 /* err is break number from ia64_bad_break() */ 913 if ((args->err >> 12) == (__IA64_BREAK_KPROBE >> 12) 914 || args->err == __IA64_BREAK_JPROBE 915 || args->err == 0) 916 if (pre_kprobes_handler(args)) 917 ret = NOTIFY_STOP; 918 break; 919 case DIE_FAULT: 920 /* err is vector number from ia64_fault() */ 921 if (args->err == 36) 922 if (post_kprobes_handler(args->regs)) 923 ret = NOTIFY_STOP; 924 break; 925 default: 926 break; 927 } 928 return ret; 929 } 930 931 struct param_bsp_cfm { 932 unsigned long ip; 933 unsigned long *bsp; 934 unsigned long cfm; 935 }; 936 937 static void ia64_get_bsp_cfm(struct unw_frame_info *info, void *arg) 938 { 939 unsigned long ip; 940 struct param_bsp_cfm *lp = arg; 941 942 do { 943 unw_get_ip(info, &ip); 944 if (ip == 0) 945 break; 946 if (ip == lp->ip) { 947 unw_get_bsp(info, (unsigned long*)&lp->bsp); 948 unw_get_cfm(info, (unsigned long*)&lp->cfm); 949 return; 950 } 951 } while (unw_unwind(info) >= 0); 952 lp->bsp = NULL; 953 lp->cfm = 0; 954 return; 955 } 956 957 unsigned long arch_deref_entry_point(void *entry) 958 { 959 return ((struct fnptr *)entry)->ip; 960 } 961 962 int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) 963 { 964 struct jprobe *jp = container_of(p, struct jprobe, kp); 965 unsigned long addr = arch_deref_entry_point(jp->entry); 966 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 967 struct param_bsp_cfm pa; 968 int bytes; 969 970 /* 971 * Callee owns the argument space and could overwrite it, eg 972 * tail call optimization. So to be absolutely safe 973 * we save the argument space before transferring the control 974 * to instrumented jprobe function which runs in 975 * the process context 976 */ 977 pa.ip = regs->cr_iip; 978 unw_init_running(ia64_get_bsp_cfm, &pa); 979 bytes = (char *)ia64_rse_skip_regs(pa.bsp, pa.cfm & 0x3f) 980 - (char *)pa.bsp; 981 memcpy( kcb->jprobes_saved_stacked_regs, 982 pa.bsp, 983 bytes ); 984 kcb->bsp = pa.bsp; 985 kcb->cfm = pa.cfm; 986 987 /* save architectural state */ 988 kcb->jprobe_saved_regs = *regs; 989 990 /* after rfi, execute the jprobe instrumented function */ 991 regs->cr_iip = addr & ~0xFULL; 992 ia64_psr(regs)->ri = addr & 0xf; 993 regs->r1 = ((struct fnptr *)(jp->entry))->gp; 994 995 /* 996 * fix the return address to our jprobe_inst_return() function 997 * in the jprobes.S file 998 */ 999 regs->b0 = ((struct fnptr *)(jprobe_inst_return))->ip; 1000 1001 return 1; 1002 } 1003 1004 /* ia64 does not need this */ 1005 void __kprobes jprobe_return(void) 1006 { 1007 } 1008 1009 int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) 1010 { 1011 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 1012 int bytes; 1013 1014 /* restoring architectural state */ 1015 *regs = kcb->jprobe_saved_regs; 1016 1017 /* restoring the original argument space */ 1018 flush_register_stack(); 1019 bytes = (char *)ia64_rse_skip_regs(kcb->bsp, kcb->cfm & 0x3f) 1020 - (char *)kcb->bsp; 1021 memcpy( kcb->bsp, 1022 kcb->jprobes_saved_stacked_regs, 1023 bytes ); 1024 invalidate_stacked_regs(); 1025 1026 preempt_enable_no_resched(); 1027 return 1; 1028 } 1029 1030 static struct kprobe trampoline_p = { 1031 .pre_handler = trampoline_probe_handler 1032 }; 1033 1034 int __init arch_init_kprobes(void) 1035 { 1036 trampoline_p.addr = 1037 (kprobe_opcode_t *)((struct fnptr *)kretprobe_trampoline)->ip; 1038 return register_kprobe(&trampoline_p); 1039 } 1040 1041 int __kprobes arch_trampoline_kprobe(struct kprobe *p) 1042 { 1043 if (p->addr == 1044 (kprobe_opcode_t *)((struct fnptr *)kretprobe_trampoline)->ip) 1045 return 1; 1046 1047 return 0; 1048 } 1049