1 /* 2 * Kernel Probes (KProbes) 3 * arch/ia64/kernel/kprobes.c 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 18 * 19 * Copyright (C) IBM Corporation, 2002, 2004 20 * Copyright (C) Intel Corporation, 2005 21 * 22 * 2005-Apr Rusty Lynch <rusty.lynch@intel.com> and Anil S Keshavamurthy 23 * <anil.s.keshavamurthy@intel.com> adapted from i386 24 */ 25 26 #include <linux/kprobes.h> 27 #include <linux/ptrace.h> 28 #include <linux/string.h> 29 #include <linux/slab.h> 30 #include <linux/preempt.h> 31 #include <linux/moduleloader.h> 32 #include <linux/kdebug.h> 33 34 #include <asm/pgtable.h> 35 #include <asm/sections.h> 36 #include <asm/uaccess.h> 37 38 extern void jprobe_inst_return(void); 39 40 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; 41 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 42 43 enum instruction_type {A, I, M, F, B, L, X, u}; 44 static enum instruction_type bundle_encoding[32][3] = { 45 { M, I, I }, /* 00 */ 46 { M, I, I }, /* 01 */ 47 { M, I, I }, /* 02 */ 48 { M, I, I }, /* 03 */ 49 { M, L, X }, /* 04 */ 50 { M, L, X }, /* 05 */ 51 { u, u, u }, /* 06 */ 52 { u, u, u }, /* 07 */ 53 { M, M, I }, /* 08 */ 54 { M, M, I }, /* 09 */ 55 { M, M, I }, /* 0A */ 56 { M, M, I }, /* 0B */ 57 { M, F, I }, /* 0C */ 58 { M, F, I }, /* 0D */ 59 { M, M, F }, /* 0E */ 60 { M, M, F }, /* 0F */ 61 { M, I, B }, /* 10 */ 62 { M, I, B }, /* 11 */ 63 { M, B, B }, /* 12 */ 64 { M, B, B }, /* 13 */ 65 { u, u, u }, /* 14 */ 66 { u, u, u }, /* 15 */ 67 { B, B, B }, /* 16 */ 68 { B, B, B }, /* 17 */ 69 { M, M, B }, /* 18 */ 70 { M, M, B }, /* 19 */ 71 { u, u, u }, /* 1A */ 72 { u, u, u }, /* 1B */ 73 { M, F, B }, /* 1C */ 74 { M, F, B }, /* 1D */ 75 { u, u, u }, /* 1E */ 76 { u, u, u }, /* 1F */ 77 }; 78 79 /* 80 * In this function we check to see if the instruction 81 * is IP relative instruction and update the kprobe 82 * inst flag accordingly 83 */ 84 static void __kprobes update_kprobe_inst_flag(uint template, uint slot, 85 uint major_opcode, 86 unsigned long kprobe_inst, 87 struct kprobe *p) 88 { 89 p->ainsn.inst_flag = 0; 90 p->ainsn.target_br_reg = 0; 91 p->ainsn.slot = slot; 92 93 /* Check for Break instruction 94 * Bits 37:40 Major opcode to be zero 95 * Bits 27:32 X6 to be zero 96 * Bits 32:35 X3 to be zero 97 */ 98 if ((!major_opcode) && (!((kprobe_inst >> 27) & 0x1FF)) ) { 99 /* is a break instruction */ 100 p->ainsn.inst_flag |= INST_FLAG_BREAK_INST; 101 return; 102 } 103 104 if (bundle_encoding[template][slot] == B) { 105 switch (major_opcode) { 106 case INDIRECT_CALL_OPCODE: 107 p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG; 108 p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7); 109 break; 110 case IP_RELATIVE_PREDICT_OPCODE: 111 case IP_RELATIVE_BRANCH_OPCODE: 112 p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR; 113 break; 114 case IP_RELATIVE_CALL_OPCODE: 115 p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR; 116 p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG; 117 p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7); 118 break; 119 } 120 } else if (bundle_encoding[template][slot] == X) { 121 switch (major_opcode) { 122 case LONG_CALL_OPCODE: 123 p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG; 124 p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7); 125 break; 126 } 127 } 128 return; 129 } 130 131 /* 132 * In this function we check to see if the instruction 133 * (qp) cmpx.crel.ctype p1,p2=r2,r3 134 * on which we are inserting kprobe is cmp instruction 135 * with ctype as unc. 136 */ 137 static uint __kprobes is_cmp_ctype_unc_inst(uint template, uint slot, 138 uint major_opcode, 139 unsigned long kprobe_inst) 140 { 141 cmp_inst_t cmp_inst; 142 uint ctype_unc = 0; 143 144 if (!((bundle_encoding[template][slot] == I) || 145 (bundle_encoding[template][slot] == M))) 146 goto out; 147 148 if (!((major_opcode == 0xC) || (major_opcode == 0xD) || 149 (major_opcode == 0xE))) 150 goto out; 151 152 cmp_inst.l = kprobe_inst; 153 if ((cmp_inst.f.x2 == 0) || (cmp_inst.f.x2 == 1)) { 154 /* Integer compare - Register Register (A6 type)*/ 155 if ((cmp_inst.f.tb == 0) && (cmp_inst.f.ta == 0) 156 &&(cmp_inst.f.c == 1)) 157 ctype_unc = 1; 158 } else if ((cmp_inst.f.x2 == 2)||(cmp_inst.f.x2 == 3)) { 159 /* Integer compare - Immediate Register (A8 type)*/ 160 if ((cmp_inst.f.ta == 0) &&(cmp_inst.f.c == 1)) 161 ctype_unc = 1; 162 } 163 out: 164 return ctype_unc; 165 } 166 167 /* 168 * In this function we check to see if the instruction 169 * on which we are inserting kprobe is supported. 170 * Returns qp value if supported 171 * Returns -EINVAL if unsupported 172 */ 173 static int __kprobes unsupported_inst(uint template, uint slot, 174 uint major_opcode, 175 unsigned long kprobe_inst, 176 unsigned long addr) 177 { 178 int qp; 179 180 qp = kprobe_inst & 0x3f; 181 if (is_cmp_ctype_unc_inst(template, slot, major_opcode, kprobe_inst)) { 182 if (slot == 1 && qp) { 183 printk(KERN_WARNING "Kprobes on cmp unc" 184 "instruction on slot 1 at <0x%lx>" 185 "is not supported\n", addr); 186 return -EINVAL; 187 188 } 189 qp = 0; 190 } 191 else if (bundle_encoding[template][slot] == I) { 192 if (major_opcode == 0) { 193 /* 194 * Check for Integer speculation instruction 195 * - Bit 33-35 to be equal to 0x1 196 */ 197 if (((kprobe_inst >> 33) & 0x7) == 1) { 198 printk(KERN_WARNING 199 "Kprobes on speculation inst at <0x%lx> not supported\n", 200 addr); 201 return -EINVAL; 202 } 203 /* 204 * IP relative mov instruction 205 * - Bit 27-35 to be equal to 0x30 206 */ 207 if (((kprobe_inst >> 27) & 0x1FF) == 0x30) { 208 printk(KERN_WARNING 209 "Kprobes on \"mov r1=ip\" at <0x%lx> not supported\n", 210 addr); 211 return -EINVAL; 212 213 } 214 } 215 else if ((major_opcode == 5) && !(kprobe_inst & (0xFUl << 33)) && 216 (kprobe_inst & (0x1UL << 12))) { 217 /* test bit instructions, tbit,tnat,tf 218 * bit 33-36 to be equal to 0 219 * bit 12 to be equal to 1 220 */ 221 if (slot == 1 && qp) { 222 printk(KERN_WARNING "Kprobes on test bit" 223 "instruction on slot at <0x%lx>" 224 "is not supported\n", addr); 225 return -EINVAL; 226 } 227 qp = 0; 228 } 229 } 230 else if (bundle_encoding[template][slot] == B) { 231 if (major_opcode == 7) { 232 /* IP-Relative Predict major code is 7 */ 233 printk(KERN_WARNING "Kprobes on IP-Relative" 234 "Predict is not supported\n"); 235 return -EINVAL; 236 } 237 else if (major_opcode == 2) { 238 /* Indirect Predict, major code is 2 239 * bit 27-32 to be equal to 10 or 11 240 */ 241 int x6=(kprobe_inst >> 27) & 0x3F; 242 if ((x6 == 0x10) || (x6 == 0x11)) { 243 printk(KERN_WARNING "Kprobes on" 244 "Indirect Predict is not supported\n"); 245 return -EINVAL; 246 } 247 } 248 } 249 /* kernel does not use float instruction, here for safety kprobe 250 * will judge whether it is fcmp/flass/float approximation instruction 251 */ 252 else if (unlikely(bundle_encoding[template][slot] == F)) { 253 if ((major_opcode == 4 || major_opcode == 5) && 254 (kprobe_inst & (0x1 << 12))) { 255 /* fcmp/fclass unc instruction */ 256 if (slot == 1 && qp) { 257 printk(KERN_WARNING "Kprobes on fcmp/fclass " 258 "instruction on slot at <0x%lx> " 259 "is not supported\n", addr); 260 return -EINVAL; 261 262 } 263 qp = 0; 264 } 265 if ((major_opcode == 0 || major_opcode == 1) && 266 (kprobe_inst & (0x1UL << 33))) { 267 /* float Approximation instruction */ 268 if (slot == 1 && qp) { 269 printk(KERN_WARNING "Kprobes on float Approx " 270 "instr at <0x%lx> is not supported\n", 271 addr); 272 return -EINVAL; 273 } 274 qp = 0; 275 } 276 } 277 return qp; 278 } 279 280 /* 281 * In this function we override the bundle with 282 * the break instruction at the given slot. 283 */ 284 static void __kprobes prepare_break_inst(uint template, uint slot, 285 uint major_opcode, 286 unsigned long kprobe_inst, 287 struct kprobe *p, 288 int qp) 289 { 290 unsigned long break_inst = BREAK_INST; 291 bundle_t *bundle = &p->opcode.bundle; 292 293 /* 294 * Copy the original kprobe_inst qualifying predicate(qp) 295 * to the break instruction 296 */ 297 break_inst |= qp; 298 299 switch (slot) { 300 case 0: 301 bundle->quad0.slot0 = break_inst; 302 break; 303 case 1: 304 bundle->quad0.slot1_p0 = break_inst; 305 bundle->quad1.slot1_p1 = break_inst >> (64-46); 306 break; 307 case 2: 308 bundle->quad1.slot2 = break_inst; 309 break; 310 } 311 312 /* 313 * Update the instruction flag, so that we can 314 * emulate the instruction properly after we 315 * single step on original instruction 316 */ 317 update_kprobe_inst_flag(template, slot, major_opcode, kprobe_inst, p); 318 } 319 320 static void __kprobes get_kprobe_inst(bundle_t *bundle, uint slot, 321 unsigned long *kprobe_inst, uint *major_opcode) 322 { 323 unsigned long kprobe_inst_p0, kprobe_inst_p1; 324 unsigned int template; 325 326 template = bundle->quad0.template; 327 328 switch (slot) { 329 case 0: 330 *major_opcode = (bundle->quad0.slot0 >> SLOT0_OPCODE_SHIFT); 331 *kprobe_inst = bundle->quad0.slot0; 332 break; 333 case 1: 334 *major_opcode = (bundle->quad1.slot1_p1 >> SLOT1_p1_OPCODE_SHIFT); 335 kprobe_inst_p0 = bundle->quad0.slot1_p0; 336 kprobe_inst_p1 = bundle->quad1.slot1_p1; 337 *kprobe_inst = kprobe_inst_p0 | (kprobe_inst_p1 << (64-46)); 338 break; 339 case 2: 340 *major_opcode = (bundle->quad1.slot2 >> SLOT2_OPCODE_SHIFT); 341 *kprobe_inst = bundle->quad1.slot2; 342 break; 343 } 344 } 345 346 /* Returns non-zero if the addr is in the Interrupt Vector Table */ 347 static int __kprobes in_ivt_functions(unsigned long addr) 348 { 349 return (addr >= (unsigned long)__start_ivt_text 350 && addr < (unsigned long)__end_ivt_text); 351 } 352 353 static int __kprobes valid_kprobe_addr(int template, int slot, 354 unsigned long addr) 355 { 356 if ((slot > 2) || ((bundle_encoding[template][1] == L) && slot > 1)) { 357 printk(KERN_WARNING "Attempting to insert unaligned kprobe " 358 "at 0x%lx\n", addr); 359 return -EINVAL; 360 } 361 362 if (in_ivt_functions(addr)) { 363 printk(KERN_WARNING "Kprobes can't be inserted inside " 364 "IVT functions at 0x%lx\n", addr); 365 return -EINVAL; 366 } 367 368 return 0; 369 } 370 371 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) 372 { 373 unsigned int i; 374 i = atomic_add_return(1, &kcb->prev_kprobe_index); 375 kcb->prev_kprobe[i-1].kp = kprobe_running(); 376 kcb->prev_kprobe[i-1].status = kcb->kprobe_status; 377 } 378 379 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) 380 { 381 unsigned int i; 382 i = atomic_sub_return(1, &kcb->prev_kprobe_index); 383 __get_cpu_var(current_kprobe) = kcb->prev_kprobe[i].kp; 384 kcb->kprobe_status = kcb->prev_kprobe[i].status; 385 } 386 387 static void __kprobes set_current_kprobe(struct kprobe *p, 388 struct kprobe_ctlblk *kcb) 389 { 390 __get_cpu_var(current_kprobe) = p; 391 } 392 393 static void kretprobe_trampoline(void) 394 { 395 } 396 397 /* 398 * At this point the target function has been tricked into 399 * returning into our trampoline. Lookup the associated instance 400 * and then: 401 * - call the handler function 402 * - cleanup by marking the instance as unused 403 * - long jump back to the original return address 404 */ 405 int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) 406 { 407 struct kretprobe_instance *ri = NULL; 408 struct hlist_head *head, empty_rp; 409 struct hlist_node *node, *tmp; 410 unsigned long flags, orig_ret_address = 0; 411 unsigned long trampoline_address = 412 ((struct fnptr *)kretprobe_trampoline)->ip; 413 414 INIT_HLIST_HEAD(&empty_rp); 415 spin_lock_irqsave(&kretprobe_lock, flags); 416 head = kretprobe_inst_table_head(current); 417 418 /* 419 * It is possible to have multiple instances associated with a given 420 * task either because an multiple functions in the call path 421 * have a return probe installed on them, and/or more then one return 422 * return probe was registered for a target function. 423 * 424 * We can handle this because: 425 * - instances are always inserted at the head of the list 426 * - when multiple return probes are registered for the same 427 * function, the first instance's ret_addr will point to the 428 * real return address, and all the rest will point to 429 * kretprobe_trampoline 430 */ 431 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 432 if (ri->task != current) 433 /* another task is sharing our hash bucket */ 434 continue; 435 436 if (ri->rp && ri->rp->handler) 437 ri->rp->handler(ri, regs); 438 439 orig_ret_address = (unsigned long)ri->ret_addr; 440 recycle_rp_inst(ri, &empty_rp); 441 442 if (orig_ret_address != trampoline_address) 443 /* 444 * This is the real return address. Any other 445 * instances associated with this task are for 446 * other calls deeper on the call stack 447 */ 448 break; 449 } 450 451 kretprobe_assert(ri, orig_ret_address, trampoline_address); 452 453 regs->cr_iip = orig_ret_address; 454 455 reset_current_kprobe(); 456 spin_unlock_irqrestore(&kretprobe_lock, flags); 457 preempt_enable_no_resched(); 458 459 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 460 hlist_del(&ri->hlist); 461 kfree(ri); 462 } 463 /* 464 * By returning a non-zero value, we are telling 465 * kprobe_handler() that we don't want the post_handler 466 * to run (and have re-enabled preemption) 467 */ 468 return 1; 469 } 470 471 /* Called with kretprobe_lock held */ 472 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, 473 struct pt_regs *regs) 474 { 475 ri->ret_addr = (kprobe_opcode_t *)regs->b0; 476 477 /* Replace the return addr with trampoline addr */ 478 regs->b0 = ((struct fnptr *)kretprobe_trampoline)->ip; 479 } 480 481 int __kprobes arch_prepare_kprobe(struct kprobe *p) 482 { 483 unsigned long addr = (unsigned long) p->addr; 484 unsigned long *kprobe_addr = (unsigned long *)(addr & ~0xFULL); 485 unsigned long kprobe_inst=0; 486 unsigned int slot = addr & 0xf, template, major_opcode = 0; 487 bundle_t *bundle; 488 int qp; 489 490 bundle = &((kprobe_opcode_t *)kprobe_addr)->bundle; 491 template = bundle->quad0.template; 492 493 if(valid_kprobe_addr(template, slot, addr)) 494 return -EINVAL; 495 496 /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */ 497 if (slot == 1 && bundle_encoding[template][1] == L) 498 slot++; 499 500 /* Get kprobe_inst and major_opcode from the bundle */ 501 get_kprobe_inst(bundle, slot, &kprobe_inst, &major_opcode); 502 503 qp = unsupported_inst(template, slot, major_opcode, kprobe_inst, addr); 504 if (qp < 0) 505 return -EINVAL; 506 507 p->ainsn.insn = get_insn_slot(); 508 if (!p->ainsn.insn) 509 return -ENOMEM; 510 memcpy(&p->opcode, kprobe_addr, sizeof(kprobe_opcode_t)); 511 memcpy(p->ainsn.insn, kprobe_addr, sizeof(kprobe_opcode_t)); 512 513 prepare_break_inst(template, slot, major_opcode, kprobe_inst, p, qp); 514 515 return 0; 516 } 517 518 void __kprobes arch_arm_kprobe(struct kprobe *p) 519 { 520 unsigned long arm_addr; 521 bundle_t *src, *dest; 522 523 arm_addr = ((unsigned long)p->addr) & ~0xFUL; 524 dest = &((kprobe_opcode_t *)arm_addr)->bundle; 525 src = &p->opcode.bundle; 526 527 flush_icache_range((unsigned long)p->ainsn.insn, 528 (unsigned long)p->ainsn.insn + sizeof(kprobe_opcode_t)); 529 switch (p->ainsn.slot) { 530 case 0: 531 dest->quad0.slot0 = src->quad0.slot0; 532 break; 533 case 1: 534 dest->quad1.slot1_p1 = src->quad1.slot1_p1; 535 break; 536 case 2: 537 dest->quad1.slot2 = src->quad1.slot2; 538 break; 539 } 540 flush_icache_range(arm_addr, arm_addr + sizeof(kprobe_opcode_t)); 541 } 542 543 void __kprobes arch_disarm_kprobe(struct kprobe *p) 544 { 545 unsigned long arm_addr; 546 bundle_t *src, *dest; 547 548 arm_addr = ((unsigned long)p->addr) & ~0xFUL; 549 dest = &((kprobe_opcode_t *)arm_addr)->bundle; 550 /* p->ainsn.insn contains the original unaltered kprobe_opcode_t */ 551 src = &p->ainsn.insn->bundle; 552 switch (p->ainsn.slot) { 553 case 0: 554 dest->quad0.slot0 = src->quad0.slot0; 555 break; 556 case 1: 557 dest->quad1.slot1_p1 = src->quad1.slot1_p1; 558 break; 559 case 2: 560 dest->quad1.slot2 = src->quad1.slot2; 561 break; 562 } 563 flush_icache_range(arm_addr, arm_addr + sizeof(kprobe_opcode_t)); 564 } 565 566 void __kprobes arch_remove_kprobe(struct kprobe *p) 567 { 568 mutex_lock(&kprobe_mutex); 569 free_insn_slot(p->ainsn.insn, 0); 570 mutex_unlock(&kprobe_mutex); 571 } 572 /* 573 * We are resuming execution after a single step fault, so the pt_regs 574 * structure reflects the register state after we executed the instruction 575 * located in the kprobe (p->ainsn.insn.bundle). We still need to adjust 576 * the ip to point back to the original stack address. To set the IP address 577 * to original stack address, handle the case where we need to fixup the 578 * relative IP address and/or fixup branch register. 579 */ 580 static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) 581 { 582 unsigned long bundle_addr = (unsigned long) (&p->ainsn.insn->bundle); 583 unsigned long resume_addr = (unsigned long)p->addr & ~0xFULL; 584 unsigned long template; 585 int slot = ((unsigned long)p->addr & 0xf); 586 587 template = p->ainsn.insn->bundle.quad0.template; 588 589 if (slot == 1 && bundle_encoding[template][1] == L) 590 slot = 2; 591 592 if (p->ainsn.inst_flag) { 593 594 if (p->ainsn.inst_flag & INST_FLAG_FIX_RELATIVE_IP_ADDR) { 595 /* Fix relative IP address */ 596 regs->cr_iip = (regs->cr_iip - bundle_addr) + 597 resume_addr; 598 } 599 600 if (p->ainsn.inst_flag & INST_FLAG_FIX_BRANCH_REG) { 601 /* 602 * Fix target branch register, software convention is 603 * to use either b0 or b6 or b7, so just checking 604 * only those registers 605 */ 606 switch (p->ainsn.target_br_reg) { 607 case 0: 608 if ((regs->b0 == bundle_addr) || 609 (regs->b0 == bundle_addr + 0x10)) { 610 regs->b0 = (regs->b0 - bundle_addr) + 611 resume_addr; 612 } 613 break; 614 case 6: 615 if ((regs->b6 == bundle_addr) || 616 (regs->b6 == bundle_addr + 0x10)) { 617 regs->b6 = (regs->b6 - bundle_addr) + 618 resume_addr; 619 } 620 break; 621 case 7: 622 if ((regs->b7 == bundle_addr) || 623 (regs->b7 == bundle_addr + 0x10)) { 624 regs->b7 = (regs->b7 - bundle_addr) + 625 resume_addr; 626 } 627 break; 628 } /* end switch */ 629 } 630 goto turn_ss_off; 631 } 632 633 if (slot == 2) { 634 if (regs->cr_iip == bundle_addr + 0x10) { 635 regs->cr_iip = resume_addr + 0x10; 636 } 637 } else { 638 if (regs->cr_iip == bundle_addr) { 639 regs->cr_iip = resume_addr; 640 } 641 } 642 643 turn_ss_off: 644 /* Turn off Single Step bit */ 645 ia64_psr(regs)->ss = 0; 646 } 647 648 static void __kprobes prepare_ss(struct kprobe *p, struct pt_regs *regs) 649 { 650 unsigned long bundle_addr = (unsigned long) &p->ainsn.insn->bundle; 651 unsigned long slot = (unsigned long)p->addr & 0xf; 652 653 /* single step inline if break instruction */ 654 if (p->ainsn.inst_flag == INST_FLAG_BREAK_INST) 655 regs->cr_iip = (unsigned long)p->addr & ~0xFULL; 656 else 657 regs->cr_iip = bundle_addr & ~0xFULL; 658 659 if (slot > 2) 660 slot = 0; 661 662 ia64_psr(regs)->ri = slot; 663 664 /* turn on single stepping */ 665 ia64_psr(regs)->ss = 1; 666 } 667 668 static int __kprobes is_ia64_break_inst(struct pt_regs *regs) 669 { 670 unsigned int slot = ia64_psr(regs)->ri; 671 unsigned int template, major_opcode; 672 unsigned long kprobe_inst; 673 unsigned long *kprobe_addr = (unsigned long *)regs->cr_iip; 674 bundle_t bundle; 675 676 memcpy(&bundle, kprobe_addr, sizeof(bundle_t)); 677 template = bundle.quad0.template; 678 679 /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */ 680 if (slot == 1 && bundle_encoding[template][1] == L) 681 slot++; 682 683 /* Get Kprobe probe instruction at given slot*/ 684 get_kprobe_inst(&bundle, slot, &kprobe_inst, &major_opcode); 685 686 /* For break instruction, 687 * Bits 37:40 Major opcode to be zero 688 * Bits 27:32 X6 to be zero 689 * Bits 32:35 X3 to be zero 690 */ 691 if (major_opcode || ((kprobe_inst >> 27) & 0x1FF) ) { 692 /* Not a break instruction */ 693 return 0; 694 } 695 696 /* Is a break instruction */ 697 return 1; 698 } 699 700 static int __kprobes pre_kprobes_handler(struct die_args *args) 701 { 702 struct kprobe *p; 703 int ret = 0; 704 struct pt_regs *regs = args->regs; 705 kprobe_opcode_t *addr = (kprobe_opcode_t *)instruction_pointer(regs); 706 struct kprobe_ctlblk *kcb; 707 708 /* 709 * We don't want to be preempted for the entire 710 * duration of kprobe processing 711 */ 712 preempt_disable(); 713 kcb = get_kprobe_ctlblk(); 714 715 /* Handle recursion cases */ 716 if (kprobe_running()) { 717 p = get_kprobe(addr); 718 if (p) { 719 if ((kcb->kprobe_status == KPROBE_HIT_SS) && 720 (p->ainsn.inst_flag == INST_FLAG_BREAK_INST)) { 721 ia64_psr(regs)->ss = 0; 722 goto no_kprobe; 723 } 724 /* We have reentered the pre_kprobe_handler(), since 725 * another probe was hit while within the handler. 726 * We here save the original kprobes variables and 727 * just single step on the instruction of the new probe 728 * without calling any user handlers. 729 */ 730 save_previous_kprobe(kcb); 731 set_current_kprobe(p, kcb); 732 kprobes_inc_nmissed_count(p); 733 prepare_ss(p, regs); 734 kcb->kprobe_status = KPROBE_REENTER; 735 return 1; 736 } else if (args->err == __IA64_BREAK_JPROBE) { 737 /* 738 * jprobe instrumented function just completed 739 */ 740 p = __get_cpu_var(current_kprobe); 741 if (p->break_handler && p->break_handler(p, regs)) { 742 goto ss_probe; 743 } 744 } else if (!is_ia64_break_inst(regs)) { 745 /* The breakpoint instruction was removed by 746 * another cpu right after we hit, no further 747 * handling of this interrupt is appropriate 748 */ 749 ret = 1; 750 goto no_kprobe; 751 } else { 752 /* Not our break */ 753 goto no_kprobe; 754 } 755 } 756 757 p = get_kprobe(addr); 758 if (!p) { 759 if (!is_ia64_break_inst(regs)) { 760 /* 761 * The breakpoint instruction was removed right 762 * after we hit it. Another cpu has removed 763 * either a probepoint or a debugger breakpoint 764 * at this address. In either case, no further 765 * handling of this interrupt is appropriate. 766 */ 767 ret = 1; 768 769 } 770 771 /* Not one of our break, let kernel handle it */ 772 goto no_kprobe; 773 } 774 775 set_current_kprobe(p, kcb); 776 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 777 778 if (p->pre_handler && p->pre_handler(p, regs)) 779 /* 780 * Our pre-handler is specifically requesting that we just 781 * do a return. This is used for both the jprobe pre-handler 782 * and the kretprobe trampoline 783 */ 784 return 1; 785 786 ss_probe: 787 prepare_ss(p, regs); 788 kcb->kprobe_status = KPROBE_HIT_SS; 789 return 1; 790 791 no_kprobe: 792 preempt_enable_no_resched(); 793 return ret; 794 } 795 796 static int __kprobes post_kprobes_handler(struct pt_regs *regs) 797 { 798 struct kprobe *cur = kprobe_running(); 799 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 800 801 if (!cur) 802 return 0; 803 804 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { 805 kcb->kprobe_status = KPROBE_HIT_SSDONE; 806 cur->post_handler(cur, regs, 0); 807 } 808 809 resume_execution(cur, regs); 810 811 /*Restore back the original saved kprobes variables and continue. */ 812 if (kcb->kprobe_status == KPROBE_REENTER) { 813 restore_previous_kprobe(kcb); 814 goto out; 815 } 816 reset_current_kprobe(); 817 818 out: 819 preempt_enable_no_resched(); 820 return 1; 821 } 822 823 int __kprobes kprobes_fault_handler(struct pt_regs *regs, int trapnr) 824 { 825 struct kprobe *cur = kprobe_running(); 826 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 827 828 829 switch(kcb->kprobe_status) { 830 case KPROBE_HIT_SS: 831 case KPROBE_REENTER: 832 /* 833 * We are here because the instruction being single 834 * stepped caused a page fault. We reset the current 835 * kprobe and the instruction pointer points back to 836 * the probe address and allow the page fault handler 837 * to continue as a normal page fault. 838 */ 839 regs->cr_iip = ((unsigned long)cur->addr) & ~0xFULL; 840 ia64_psr(regs)->ri = ((unsigned long)cur->addr) & 0xf; 841 if (kcb->kprobe_status == KPROBE_REENTER) 842 restore_previous_kprobe(kcb); 843 else 844 reset_current_kprobe(); 845 preempt_enable_no_resched(); 846 break; 847 case KPROBE_HIT_ACTIVE: 848 case KPROBE_HIT_SSDONE: 849 /* 850 * We increment the nmissed count for accounting, 851 * we can also use npre/npostfault count for accouting 852 * these specific fault cases. 853 */ 854 kprobes_inc_nmissed_count(cur); 855 856 /* 857 * We come here because instructions in the pre/post 858 * handler caused the page_fault, this could happen 859 * if handler tries to access user space by 860 * copy_from_user(), get_user() etc. Let the 861 * user-specified handler try to fix it first. 862 */ 863 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) 864 return 1; 865 /* 866 * In case the user-specified fault handler returned 867 * zero, try to fix up. 868 */ 869 if (ia64_done_with_exception(regs)) 870 return 1; 871 872 /* 873 * Let ia64_do_page_fault() fix it. 874 */ 875 break; 876 default: 877 break; 878 } 879 880 return 0; 881 } 882 883 int __kprobes kprobe_exceptions_notify(struct notifier_block *self, 884 unsigned long val, void *data) 885 { 886 struct die_args *args = (struct die_args *)data; 887 int ret = NOTIFY_DONE; 888 889 if (args->regs && user_mode(args->regs)) 890 return ret; 891 892 switch(val) { 893 case DIE_BREAK: 894 /* err is break number from ia64_bad_break() */ 895 if ((args->err >> 12) == (__IA64_BREAK_KPROBE >> 12) 896 || args->err == __IA64_BREAK_JPROBE 897 || args->err == 0) 898 if (pre_kprobes_handler(args)) 899 ret = NOTIFY_STOP; 900 break; 901 case DIE_FAULT: 902 /* err is vector number from ia64_fault() */ 903 if (args->err == 36) 904 if (post_kprobes_handler(args->regs)) 905 ret = NOTIFY_STOP; 906 break; 907 default: 908 break; 909 } 910 return ret; 911 } 912 913 struct param_bsp_cfm { 914 unsigned long ip; 915 unsigned long *bsp; 916 unsigned long cfm; 917 }; 918 919 static void ia64_get_bsp_cfm(struct unw_frame_info *info, void *arg) 920 { 921 unsigned long ip; 922 struct param_bsp_cfm *lp = arg; 923 924 do { 925 unw_get_ip(info, &ip); 926 if (ip == 0) 927 break; 928 if (ip == lp->ip) { 929 unw_get_bsp(info, (unsigned long*)&lp->bsp); 930 unw_get_cfm(info, (unsigned long*)&lp->cfm); 931 return; 932 } 933 } while (unw_unwind(info) >= 0); 934 lp->bsp = NULL; 935 lp->cfm = 0; 936 return; 937 } 938 939 int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) 940 { 941 struct jprobe *jp = container_of(p, struct jprobe, kp); 942 unsigned long addr = ((struct fnptr *)(jp->entry))->ip; 943 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 944 struct param_bsp_cfm pa; 945 int bytes; 946 947 /* 948 * Callee owns the argument space and could overwrite it, eg 949 * tail call optimization. So to be absolutely safe 950 * we save the argument space before transferring the control 951 * to instrumented jprobe function which runs in 952 * the process context 953 */ 954 pa.ip = regs->cr_iip; 955 unw_init_running(ia64_get_bsp_cfm, &pa); 956 bytes = (char *)ia64_rse_skip_regs(pa.bsp, pa.cfm & 0x3f) 957 - (char *)pa.bsp; 958 memcpy( kcb->jprobes_saved_stacked_regs, 959 pa.bsp, 960 bytes ); 961 kcb->bsp = pa.bsp; 962 kcb->cfm = pa.cfm; 963 964 /* save architectural state */ 965 kcb->jprobe_saved_regs = *regs; 966 967 /* after rfi, execute the jprobe instrumented function */ 968 regs->cr_iip = addr & ~0xFULL; 969 ia64_psr(regs)->ri = addr & 0xf; 970 regs->r1 = ((struct fnptr *)(jp->entry))->gp; 971 972 /* 973 * fix the return address to our jprobe_inst_return() function 974 * in the jprobes.S file 975 */ 976 regs->b0 = ((struct fnptr *)(jprobe_inst_return))->ip; 977 978 return 1; 979 } 980 981 int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) 982 { 983 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 984 int bytes; 985 986 /* restoring architectural state */ 987 *regs = kcb->jprobe_saved_regs; 988 989 /* restoring the original argument space */ 990 flush_register_stack(); 991 bytes = (char *)ia64_rse_skip_regs(kcb->bsp, kcb->cfm & 0x3f) 992 - (char *)kcb->bsp; 993 memcpy( kcb->bsp, 994 kcb->jprobes_saved_stacked_regs, 995 bytes ); 996 invalidate_stacked_regs(); 997 998 preempt_enable_no_resched(); 999 return 1; 1000 } 1001 1002 static struct kprobe trampoline_p = { 1003 .pre_handler = trampoline_probe_handler 1004 }; 1005 1006 int __init arch_init_kprobes(void) 1007 { 1008 trampoline_p.addr = 1009 (kprobe_opcode_t *)((struct fnptr *)kretprobe_trampoline)->ip; 1010 return register_kprobe(&trampoline_p); 1011 } 1012 1013 int __kprobes arch_trampoline_kprobe(struct kprobe *p) 1014 { 1015 if (p->addr == 1016 (kprobe_opcode_t *)((struct fnptr *)kretprobe_trampoline)->ip) 1017 return 1; 1018 1019 return 0; 1020 } 1021