1 /* 2 * Kernel Probes (KProbes) 3 * arch/ia64/kernel/kprobes.c 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 18 * 19 * Copyright (C) IBM Corporation, 2002, 2004 20 * Copyright (C) Intel Corporation, 2005 21 * 22 * 2005-Apr Rusty Lynch <rusty.lynch@intel.com> and Anil S Keshavamurthy 23 * <anil.s.keshavamurthy@intel.com> adapted from i386 24 */ 25 26 #include <linux/kprobes.h> 27 #include <linux/ptrace.h> 28 #include <linux/string.h> 29 #include <linux/slab.h> 30 #include <linux/preempt.h> 31 #include <linux/extable.h> 32 #include <linux/kdebug.h> 33 34 #include <asm/pgtable.h> 35 #include <asm/sections.h> 36 #include <asm/exception.h> 37 38 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; 39 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 40 41 struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}}; 42 43 enum instruction_type {A, I, M, F, B, L, X, u}; 44 static enum instruction_type bundle_encoding[32][3] = { 45 { M, I, I }, /* 00 */ 46 { M, I, I }, /* 01 */ 47 { M, I, I }, /* 02 */ 48 { M, I, I }, /* 03 */ 49 { M, L, X }, /* 04 */ 50 { M, L, X }, /* 05 */ 51 { u, u, u }, /* 06 */ 52 { u, u, u }, /* 07 */ 53 { M, M, I }, /* 08 */ 54 { M, M, I }, /* 09 */ 55 { M, M, I }, /* 0A */ 56 { M, M, I }, /* 0B */ 57 { M, F, I }, /* 0C */ 58 { M, F, I }, /* 0D */ 59 { M, M, F }, /* 0E */ 60 { M, M, F }, /* 0F */ 61 { M, I, B }, /* 10 */ 62 { M, I, B }, /* 11 */ 63 { M, B, B }, /* 12 */ 64 { M, B, B }, /* 13 */ 65 { u, u, u }, /* 14 */ 66 { u, u, u }, /* 15 */ 67 { B, B, B }, /* 16 */ 68 { B, B, B }, /* 17 */ 69 { M, M, B }, /* 18 */ 70 { M, M, B }, /* 19 */ 71 { u, u, u }, /* 1A */ 72 { u, u, u }, /* 1B */ 73 { M, F, B }, /* 1C */ 74 { M, F, B }, /* 1D */ 75 { u, u, u }, /* 1E */ 76 { u, u, u }, /* 1F */ 77 }; 78 79 /* Insert a long branch code */ 80 static void __kprobes set_brl_inst(void *from, void *to) 81 { 82 s64 rel = ((s64) to - (s64) from) >> 4; 83 bundle_t *brl; 84 brl = (bundle_t *) ((u64) from & ~0xf); 85 brl->quad0.template = 0x05; /* [MLX](stop) */ 86 brl->quad0.slot0 = NOP_M_INST; /* nop.m 0x0 */ 87 brl->quad0.slot1_p0 = ((rel >> 20) & 0x7fffffffff) << 2; 88 brl->quad1.slot1_p1 = (((rel >> 20) & 0x7fffffffff) << 2) >> (64 - 46); 89 /* brl.cond.sptk.many.clr rel<<4 (qp=0) */ 90 brl->quad1.slot2 = BRL_INST(rel >> 59, rel & 0xfffff); 91 } 92 93 /* 94 * In this function we check to see if the instruction 95 * is IP relative instruction and update the kprobe 96 * inst flag accordingly 97 */ 98 static void __kprobes update_kprobe_inst_flag(uint template, uint slot, 99 uint major_opcode, 100 unsigned long kprobe_inst, 101 struct kprobe *p) 102 { 103 p->ainsn.inst_flag = 0; 104 p->ainsn.target_br_reg = 0; 105 p->ainsn.slot = slot; 106 107 /* Check for Break instruction 108 * Bits 37:40 Major opcode to be zero 109 * Bits 27:32 X6 to be zero 110 * Bits 32:35 X3 to be zero 111 */ 112 if ((!major_opcode) && (!((kprobe_inst >> 27) & 0x1FF)) ) { 113 /* is a break instruction */ 114 p->ainsn.inst_flag |= INST_FLAG_BREAK_INST; 115 return; 116 } 117 118 if (bundle_encoding[template][slot] == B) { 119 switch (major_opcode) { 120 case INDIRECT_CALL_OPCODE: 121 p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG; 122 p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7); 123 break; 124 case IP_RELATIVE_PREDICT_OPCODE: 125 case IP_RELATIVE_BRANCH_OPCODE: 126 p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR; 127 break; 128 case IP_RELATIVE_CALL_OPCODE: 129 p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR; 130 p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG; 131 p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7); 132 break; 133 } 134 } else if (bundle_encoding[template][slot] == X) { 135 switch (major_opcode) { 136 case LONG_CALL_OPCODE: 137 p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG; 138 p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7); 139 break; 140 } 141 } 142 return; 143 } 144 145 /* 146 * In this function we check to see if the instruction 147 * (qp) cmpx.crel.ctype p1,p2=r2,r3 148 * on which we are inserting kprobe is cmp instruction 149 * with ctype as unc. 150 */ 151 static uint __kprobes is_cmp_ctype_unc_inst(uint template, uint slot, 152 uint major_opcode, 153 unsigned long kprobe_inst) 154 { 155 cmp_inst_t cmp_inst; 156 uint ctype_unc = 0; 157 158 if (!((bundle_encoding[template][slot] == I) || 159 (bundle_encoding[template][slot] == M))) 160 goto out; 161 162 if (!((major_opcode == 0xC) || (major_opcode == 0xD) || 163 (major_opcode == 0xE))) 164 goto out; 165 166 cmp_inst.l = kprobe_inst; 167 if ((cmp_inst.f.x2 == 0) || (cmp_inst.f.x2 == 1)) { 168 /* Integer compare - Register Register (A6 type)*/ 169 if ((cmp_inst.f.tb == 0) && (cmp_inst.f.ta == 0) 170 &&(cmp_inst.f.c == 1)) 171 ctype_unc = 1; 172 } else if ((cmp_inst.f.x2 == 2)||(cmp_inst.f.x2 == 3)) { 173 /* Integer compare - Immediate Register (A8 type)*/ 174 if ((cmp_inst.f.ta == 0) &&(cmp_inst.f.c == 1)) 175 ctype_unc = 1; 176 } 177 out: 178 return ctype_unc; 179 } 180 181 /* 182 * In this function we check to see if the instruction 183 * on which we are inserting kprobe is supported. 184 * Returns qp value if supported 185 * Returns -EINVAL if unsupported 186 */ 187 static int __kprobes unsupported_inst(uint template, uint slot, 188 uint major_opcode, 189 unsigned long kprobe_inst, 190 unsigned long addr) 191 { 192 int qp; 193 194 qp = kprobe_inst & 0x3f; 195 if (is_cmp_ctype_unc_inst(template, slot, major_opcode, kprobe_inst)) { 196 if (slot == 1 && qp) { 197 printk(KERN_WARNING "Kprobes on cmp unc " 198 "instruction on slot 1 at <0x%lx> " 199 "is not supported\n", addr); 200 return -EINVAL; 201 202 } 203 qp = 0; 204 } 205 else if (bundle_encoding[template][slot] == I) { 206 if (major_opcode == 0) { 207 /* 208 * Check for Integer speculation instruction 209 * - Bit 33-35 to be equal to 0x1 210 */ 211 if (((kprobe_inst >> 33) & 0x7) == 1) { 212 printk(KERN_WARNING 213 "Kprobes on speculation inst at <0x%lx> not supported\n", 214 addr); 215 return -EINVAL; 216 } 217 /* 218 * IP relative mov instruction 219 * - Bit 27-35 to be equal to 0x30 220 */ 221 if (((kprobe_inst >> 27) & 0x1FF) == 0x30) { 222 printk(KERN_WARNING 223 "Kprobes on \"mov r1=ip\" at <0x%lx> not supported\n", 224 addr); 225 return -EINVAL; 226 227 } 228 } 229 else if ((major_opcode == 5) && !(kprobe_inst & (0xFUl << 33)) && 230 (kprobe_inst & (0x1UL << 12))) { 231 /* test bit instructions, tbit,tnat,tf 232 * bit 33-36 to be equal to 0 233 * bit 12 to be equal to 1 234 */ 235 if (slot == 1 && qp) { 236 printk(KERN_WARNING "Kprobes on test bit " 237 "instruction on slot at <0x%lx> " 238 "is not supported\n", addr); 239 return -EINVAL; 240 } 241 qp = 0; 242 } 243 } 244 else if (bundle_encoding[template][slot] == B) { 245 if (major_opcode == 7) { 246 /* IP-Relative Predict major code is 7 */ 247 printk(KERN_WARNING "Kprobes on IP-Relative" 248 "Predict is not supported\n"); 249 return -EINVAL; 250 } 251 else if (major_opcode == 2) { 252 /* Indirect Predict, major code is 2 253 * bit 27-32 to be equal to 10 or 11 254 */ 255 int x6=(kprobe_inst >> 27) & 0x3F; 256 if ((x6 == 0x10) || (x6 == 0x11)) { 257 printk(KERN_WARNING "Kprobes on " 258 "Indirect Predict is not supported\n"); 259 return -EINVAL; 260 } 261 } 262 } 263 /* kernel does not use float instruction, here for safety kprobe 264 * will judge whether it is fcmp/flass/float approximation instruction 265 */ 266 else if (unlikely(bundle_encoding[template][slot] == F)) { 267 if ((major_opcode == 4 || major_opcode == 5) && 268 (kprobe_inst & (0x1 << 12))) { 269 /* fcmp/fclass unc instruction */ 270 if (slot == 1 && qp) { 271 printk(KERN_WARNING "Kprobes on fcmp/fclass " 272 "instruction on slot at <0x%lx> " 273 "is not supported\n", addr); 274 return -EINVAL; 275 276 } 277 qp = 0; 278 } 279 if ((major_opcode == 0 || major_opcode == 1) && 280 (kprobe_inst & (0x1UL << 33))) { 281 /* float Approximation instruction */ 282 if (slot == 1 && qp) { 283 printk(KERN_WARNING "Kprobes on float Approx " 284 "instr at <0x%lx> is not supported\n", 285 addr); 286 return -EINVAL; 287 } 288 qp = 0; 289 } 290 } 291 return qp; 292 } 293 294 /* 295 * In this function we override the bundle with 296 * the break instruction at the given slot. 297 */ 298 static void __kprobes prepare_break_inst(uint template, uint slot, 299 uint major_opcode, 300 unsigned long kprobe_inst, 301 struct kprobe *p, 302 int qp) 303 { 304 unsigned long break_inst = BREAK_INST; 305 bundle_t *bundle = &p->opcode.bundle; 306 307 /* 308 * Copy the original kprobe_inst qualifying predicate(qp) 309 * to the break instruction 310 */ 311 break_inst |= qp; 312 313 switch (slot) { 314 case 0: 315 bundle->quad0.slot0 = break_inst; 316 break; 317 case 1: 318 bundle->quad0.slot1_p0 = break_inst; 319 bundle->quad1.slot1_p1 = break_inst >> (64-46); 320 break; 321 case 2: 322 bundle->quad1.slot2 = break_inst; 323 break; 324 } 325 326 /* 327 * Update the instruction flag, so that we can 328 * emulate the instruction properly after we 329 * single step on original instruction 330 */ 331 update_kprobe_inst_flag(template, slot, major_opcode, kprobe_inst, p); 332 } 333 334 static void __kprobes get_kprobe_inst(bundle_t *bundle, uint slot, 335 unsigned long *kprobe_inst, uint *major_opcode) 336 { 337 unsigned long kprobe_inst_p0, kprobe_inst_p1; 338 unsigned int template; 339 340 template = bundle->quad0.template; 341 342 switch (slot) { 343 case 0: 344 *major_opcode = (bundle->quad0.slot0 >> SLOT0_OPCODE_SHIFT); 345 *kprobe_inst = bundle->quad0.slot0; 346 break; 347 case 1: 348 *major_opcode = (bundle->quad1.slot1_p1 >> SLOT1_p1_OPCODE_SHIFT); 349 kprobe_inst_p0 = bundle->quad0.slot1_p0; 350 kprobe_inst_p1 = bundle->quad1.slot1_p1; 351 *kprobe_inst = kprobe_inst_p0 | (kprobe_inst_p1 << (64-46)); 352 break; 353 case 2: 354 *major_opcode = (bundle->quad1.slot2 >> SLOT2_OPCODE_SHIFT); 355 *kprobe_inst = bundle->quad1.slot2; 356 break; 357 } 358 } 359 360 /* Returns non-zero if the addr is in the Interrupt Vector Table */ 361 static int __kprobes in_ivt_functions(unsigned long addr) 362 { 363 return (addr >= (unsigned long)__start_ivt_text 364 && addr < (unsigned long)__end_ivt_text); 365 } 366 367 static int __kprobes valid_kprobe_addr(int template, int slot, 368 unsigned long addr) 369 { 370 if ((slot > 2) || ((bundle_encoding[template][1] == L) && slot > 1)) { 371 printk(KERN_WARNING "Attempting to insert unaligned kprobe " 372 "at 0x%lx\n", addr); 373 return -EINVAL; 374 } 375 376 if (in_ivt_functions(addr)) { 377 printk(KERN_WARNING "Kprobes can't be inserted inside " 378 "IVT functions at 0x%lx\n", addr); 379 return -EINVAL; 380 } 381 382 return 0; 383 } 384 385 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) 386 { 387 unsigned int i; 388 i = atomic_add_return(1, &kcb->prev_kprobe_index); 389 kcb->prev_kprobe[i-1].kp = kprobe_running(); 390 kcb->prev_kprobe[i-1].status = kcb->kprobe_status; 391 } 392 393 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) 394 { 395 unsigned int i; 396 i = atomic_read(&kcb->prev_kprobe_index); 397 __this_cpu_write(current_kprobe, kcb->prev_kprobe[i-1].kp); 398 kcb->kprobe_status = kcb->prev_kprobe[i-1].status; 399 atomic_sub(1, &kcb->prev_kprobe_index); 400 } 401 402 static void __kprobes set_current_kprobe(struct kprobe *p, 403 struct kprobe_ctlblk *kcb) 404 { 405 __this_cpu_write(current_kprobe, p); 406 } 407 408 static void kretprobe_trampoline(void) 409 { 410 } 411 412 /* 413 * At this point the target function has been tricked into 414 * returning into our trampoline. Lookup the associated instance 415 * and then: 416 * - call the handler function 417 * - cleanup by marking the instance as unused 418 * - long jump back to the original return address 419 */ 420 int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) 421 { 422 struct kretprobe_instance *ri = NULL; 423 struct hlist_head *head, empty_rp; 424 struct hlist_node *tmp; 425 unsigned long flags, orig_ret_address = 0; 426 unsigned long trampoline_address = 427 ((struct fnptr *)kretprobe_trampoline)->ip; 428 429 INIT_HLIST_HEAD(&empty_rp); 430 kretprobe_hash_lock(current, &head, &flags); 431 432 /* 433 * It is possible to have multiple instances associated with a given 434 * task either because an multiple functions in the call path 435 * have a return probe installed on them, and/or more than one return 436 * return probe was registered for a target function. 437 * 438 * We can handle this because: 439 * - instances are always inserted at the head of the list 440 * - when multiple return probes are registered for the same 441 * function, the first instance's ret_addr will point to the 442 * real return address, and all the rest will point to 443 * kretprobe_trampoline 444 */ 445 hlist_for_each_entry_safe(ri, tmp, head, hlist) { 446 if (ri->task != current) 447 /* another task is sharing our hash bucket */ 448 continue; 449 450 orig_ret_address = (unsigned long)ri->ret_addr; 451 if (orig_ret_address != trampoline_address) 452 /* 453 * This is the real return address. Any other 454 * instances associated with this task are for 455 * other calls deeper on the call stack 456 */ 457 break; 458 } 459 460 regs->cr_iip = orig_ret_address; 461 462 hlist_for_each_entry_safe(ri, tmp, head, hlist) { 463 if (ri->task != current) 464 /* another task is sharing our hash bucket */ 465 continue; 466 467 if (ri->rp && ri->rp->handler) 468 ri->rp->handler(ri, regs); 469 470 orig_ret_address = (unsigned long)ri->ret_addr; 471 recycle_rp_inst(ri, &empty_rp); 472 473 if (orig_ret_address != trampoline_address) 474 /* 475 * This is the real return address. Any other 476 * instances associated with this task are for 477 * other calls deeper on the call stack 478 */ 479 break; 480 } 481 kretprobe_assert(ri, orig_ret_address, trampoline_address); 482 483 kretprobe_hash_unlock(current, &flags); 484 485 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { 486 hlist_del(&ri->hlist); 487 kfree(ri); 488 } 489 /* 490 * By returning a non-zero value, we are telling 491 * kprobe_handler() that we don't want the post_handler 492 * to run (and have re-enabled preemption) 493 */ 494 return 1; 495 } 496 497 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, 498 struct pt_regs *regs) 499 { 500 ri->ret_addr = (kprobe_opcode_t *)regs->b0; 501 502 /* Replace the return addr with trampoline addr */ 503 regs->b0 = ((struct fnptr *)kretprobe_trampoline)->ip; 504 } 505 506 /* Check the instruction in the slot is break */ 507 static int __kprobes __is_ia64_break_inst(bundle_t *bundle, uint slot) 508 { 509 unsigned int major_opcode; 510 unsigned int template = bundle->quad0.template; 511 unsigned long kprobe_inst; 512 513 /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */ 514 if (slot == 1 && bundle_encoding[template][1] == L) 515 slot++; 516 517 /* Get Kprobe probe instruction at given slot*/ 518 get_kprobe_inst(bundle, slot, &kprobe_inst, &major_opcode); 519 520 /* For break instruction, 521 * Bits 37:40 Major opcode to be zero 522 * Bits 27:32 X6 to be zero 523 * Bits 32:35 X3 to be zero 524 */ 525 if (major_opcode || ((kprobe_inst >> 27) & 0x1FF)) { 526 /* Not a break instruction */ 527 return 0; 528 } 529 530 /* Is a break instruction */ 531 return 1; 532 } 533 534 /* 535 * In this function, we check whether the target bundle modifies IP or 536 * it triggers an exception. If so, it cannot be boostable. 537 */ 538 static int __kprobes can_boost(bundle_t *bundle, uint slot, 539 unsigned long bundle_addr) 540 { 541 unsigned int template = bundle->quad0.template; 542 543 do { 544 if (search_exception_tables(bundle_addr + slot) || 545 __is_ia64_break_inst(bundle, slot)) 546 return 0; /* exception may occur in this bundle*/ 547 } while ((++slot) < 3); 548 template &= 0x1e; 549 if (template >= 0x10 /* including B unit */ || 550 template == 0x04 /* including X unit */ || 551 template == 0x06) /* undefined */ 552 return 0; 553 554 return 1; 555 } 556 557 /* Prepare long jump bundle and disables other boosters if need */ 558 static void __kprobes prepare_booster(struct kprobe *p) 559 { 560 unsigned long addr = (unsigned long)p->addr & ~0xFULL; 561 unsigned int slot = (unsigned long)p->addr & 0xf; 562 struct kprobe *other_kp; 563 564 if (can_boost(&p->ainsn.insn[0].bundle, slot, addr)) { 565 set_brl_inst(&p->ainsn.insn[1].bundle, (bundle_t *)addr + 1); 566 p->ainsn.inst_flag |= INST_FLAG_BOOSTABLE; 567 } 568 569 /* disables boosters in previous slots */ 570 for (; addr < (unsigned long)p->addr; addr++) { 571 other_kp = get_kprobe((void *)addr); 572 if (other_kp) 573 other_kp->ainsn.inst_flag &= ~INST_FLAG_BOOSTABLE; 574 } 575 } 576 577 int __kprobes arch_prepare_kprobe(struct kprobe *p) 578 { 579 unsigned long addr = (unsigned long) p->addr; 580 unsigned long *kprobe_addr = (unsigned long *)(addr & ~0xFULL); 581 unsigned long kprobe_inst=0; 582 unsigned int slot = addr & 0xf, template, major_opcode = 0; 583 bundle_t *bundle; 584 int qp; 585 586 bundle = &((kprobe_opcode_t *)kprobe_addr)->bundle; 587 template = bundle->quad0.template; 588 589 if(valid_kprobe_addr(template, slot, addr)) 590 return -EINVAL; 591 592 /* Move to slot 2, if bundle is MLX type and kprobe slot is 1 */ 593 if (slot == 1 && bundle_encoding[template][1] == L) 594 slot++; 595 596 /* Get kprobe_inst and major_opcode from the bundle */ 597 get_kprobe_inst(bundle, slot, &kprobe_inst, &major_opcode); 598 599 qp = unsupported_inst(template, slot, major_opcode, kprobe_inst, addr); 600 if (qp < 0) 601 return -EINVAL; 602 603 p->ainsn.insn = get_insn_slot(); 604 if (!p->ainsn.insn) 605 return -ENOMEM; 606 memcpy(&p->opcode, kprobe_addr, sizeof(kprobe_opcode_t)); 607 memcpy(p->ainsn.insn, kprobe_addr, sizeof(kprobe_opcode_t)); 608 609 prepare_break_inst(template, slot, major_opcode, kprobe_inst, p, qp); 610 611 prepare_booster(p); 612 613 return 0; 614 } 615 616 void __kprobes arch_arm_kprobe(struct kprobe *p) 617 { 618 unsigned long arm_addr; 619 bundle_t *src, *dest; 620 621 arm_addr = ((unsigned long)p->addr) & ~0xFUL; 622 dest = &((kprobe_opcode_t *)arm_addr)->bundle; 623 src = &p->opcode.bundle; 624 625 flush_icache_range((unsigned long)p->ainsn.insn, 626 (unsigned long)p->ainsn.insn + 627 sizeof(kprobe_opcode_t) * MAX_INSN_SIZE); 628 629 switch (p->ainsn.slot) { 630 case 0: 631 dest->quad0.slot0 = src->quad0.slot0; 632 break; 633 case 1: 634 dest->quad1.slot1_p1 = src->quad1.slot1_p1; 635 break; 636 case 2: 637 dest->quad1.slot2 = src->quad1.slot2; 638 break; 639 } 640 flush_icache_range(arm_addr, arm_addr + sizeof(kprobe_opcode_t)); 641 } 642 643 void __kprobes arch_disarm_kprobe(struct kprobe *p) 644 { 645 unsigned long arm_addr; 646 bundle_t *src, *dest; 647 648 arm_addr = ((unsigned long)p->addr) & ~0xFUL; 649 dest = &((kprobe_opcode_t *)arm_addr)->bundle; 650 /* p->ainsn.insn contains the original unaltered kprobe_opcode_t */ 651 src = &p->ainsn.insn->bundle; 652 switch (p->ainsn.slot) { 653 case 0: 654 dest->quad0.slot0 = src->quad0.slot0; 655 break; 656 case 1: 657 dest->quad1.slot1_p1 = src->quad1.slot1_p1; 658 break; 659 case 2: 660 dest->quad1.slot2 = src->quad1.slot2; 661 break; 662 } 663 flush_icache_range(arm_addr, arm_addr + sizeof(kprobe_opcode_t)); 664 } 665 666 void __kprobes arch_remove_kprobe(struct kprobe *p) 667 { 668 if (p->ainsn.insn) { 669 free_insn_slot(p->ainsn.insn, 670 p->ainsn.inst_flag & INST_FLAG_BOOSTABLE); 671 p->ainsn.insn = NULL; 672 } 673 } 674 /* 675 * We are resuming execution after a single step fault, so the pt_regs 676 * structure reflects the register state after we executed the instruction 677 * located in the kprobe (p->ainsn.insn->bundle). We still need to adjust 678 * the ip to point back to the original stack address. To set the IP address 679 * to original stack address, handle the case where we need to fixup the 680 * relative IP address and/or fixup branch register. 681 */ 682 static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) 683 { 684 unsigned long bundle_addr = (unsigned long) (&p->ainsn.insn->bundle); 685 unsigned long resume_addr = (unsigned long)p->addr & ~0xFULL; 686 unsigned long template; 687 int slot = ((unsigned long)p->addr & 0xf); 688 689 template = p->ainsn.insn->bundle.quad0.template; 690 691 if (slot == 1 && bundle_encoding[template][1] == L) 692 slot = 2; 693 694 if (p->ainsn.inst_flag & ~INST_FLAG_BOOSTABLE) { 695 696 if (p->ainsn.inst_flag & INST_FLAG_FIX_RELATIVE_IP_ADDR) { 697 /* Fix relative IP address */ 698 regs->cr_iip = (regs->cr_iip - bundle_addr) + 699 resume_addr; 700 } 701 702 if (p->ainsn.inst_flag & INST_FLAG_FIX_BRANCH_REG) { 703 /* 704 * Fix target branch register, software convention is 705 * to use either b0 or b6 or b7, so just checking 706 * only those registers 707 */ 708 switch (p->ainsn.target_br_reg) { 709 case 0: 710 if ((regs->b0 == bundle_addr) || 711 (regs->b0 == bundle_addr + 0x10)) { 712 regs->b0 = (regs->b0 - bundle_addr) + 713 resume_addr; 714 } 715 break; 716 case 6: 717 if ((regs->b6 == bundle_addr) || 718 (regs->b6 == bundle_addr + 0x10)) { 719 regs->b6 = (regs->b6 - bundle_addr) + 720 resume_addr; 721 } 722 break; 723 case 7: 724 if ((regs->b7 == bundle_addr) || 725 (regs->b7 == bundle_addr + 0x10)) { 726 regs->b7 = (regs->b7 - bundle_addr) + 727 resume_addr; 728 } 729 break; 730 } /* end switch */ 731 } 732 goto turn_ss_off; 733 } 734 735 if (slot == 2) { 736 if (regs->cr_iip == bundle_addr + 0x10) { 737 regs->cr_iip = resume_addr + 0x10; 738 } 739 } else { 740 if (regs->cr_iip == bundle_addr) { 741 regs->cr_iip = resume_addr; 742 } 743 } 744 745 turn_ss_off: 746 /* Turn off Single Step bit */ 747 ia64_psr(regs)->ss = 0; 748 } 749 750 static void __kprobes prepare_ss(struct kprobe *p, struct pt_regs *regs) 751 { 752 unsigned long bundle_addr = (unsigned long) &p->ainsn.insn->bundle; 753 unsigned long slot = (unsigned long)p->addr & 0xf; 754 755 /* single step inline if break instruction */ 756 if (p->ainsn.inst_flag == INST_FLAG_BREAK_INST) 757 regs->cr_iip = (unsigned long)p->addr & ~0xFULL; 758 else 759 regs->cr_iip = bundle_addr & ~0xFULL; 760 761 if (slot > 2) 762 slot = 0; 763 764 ia64_psr(regs)->ri = slot; 765 766 /* turn on single stepping */ 767 ia64_psr(regs)->ss = 1; 768 } 769 770 static int __kprobes is_ia64_break_inst(struct pt_regs *regs) 771 { 772 unsigned int slot = ia64_psr(regs)->ri; 773 unsigned long *kprobe_addr = (unsigned long *)regs->cr_iip; 774 bundle_t bundle; 775 776 memcpy(&bundle, kprobe_addr, sizeof(bundle_t)); 777 778 return __is_ia64_break_inst(&bundle, slot); 779 } 780 781 static int __kprobes pre_kprobes_handler(struct die_args *args) 782 { 783 struct kprobe *p; 784 int ret = 0; 785 struct pt_regs *regs = args->regs; 786 kprobe_opcode_t *addr = (kprobe_opcode_t *)instruction_pointer(regs); 787 struct kprobe_ctlblk *kcb; 788 789 /* 790 * We don't want to be preempted for the entire 791 * duration of kprobe processing 792 */ 793 preempt_disable(); 794 kcb = get_kprobe_ctlblk(); 795 796 /* Handle recursion cases */ 797 if (kprobe_running()) { 798 p = get_kprobe(addr); 799 if (p) { 800 if ((kcb->kprobe_status == KPROBE_HIT_SS) && 801 (p->ainsn.inst_flag == INST_FLAG_BREAK_INST)) { 802 ia64_psr(regs)->ss = 0; 803 goto no_kprobe; 804 } 805 /* We have reentered the pre_kprobe_handler(), since 806 * another probe was hit while within the handler. 807 * We here save the original kprobes variables and 808 * just single step on the instruction of the new probe 809 * without calling any user handlers. 810 */ 811 save_previous_kprobe(kcb); 812 set_current_kprobe(p, kcb); 813 kprobes_inc_nmissed_count(p); 814 prepare_ss(p, regs); 815 kcb->kprobe_status = KPROBE_REENTER; 816 return 1; 817 } else if (!is_ia64_break_inst(regs)) { 818 /* The breakpoint instruction was removed by 819 * another cpu right after we hit, no further 820 * handling of this interrupt is appropriate 821 */ 822 ret = 1; 823 goto no_kprobe; 824 } else { 825 /* Not our break */ 826 goto no_kprobe; 827 } 828 } 829 830 p = get_kprobe(addr); 831 if (!p) { 832 if (!is_ia64_break_inst(regs)) { 833 /* 834 * The breakpoint instruction was removed right 835 * after we hit it. Another cpu has removed 836 * either a probepoint or a debugger breakpoint 837 * at this address. In either case, no further 838 * handling of this interrupt is appropriate. 839 */ 840 ret = 1; 841 842 } 843 844 /* Not one of our break, let kernel handle it */ 845 goto no_kprobe; 846 } 847 848 set_current_kprobe(p, kcb); 849 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 850 851 if (p->pre_handler && p->pre_handler(p, regs)) { 852 reset_current_kprobe(); 853 preempt_enable_no_resched(); 854 return 1; 855 } 856 857 #if !defined(CONFIG_PREEMPT) 858 if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) { 859 /* Boost up -- we can execute copied instructions directly */ 860 ia64_psr(regs)->ri = p->ainsn.slot; 861 regs->cr_iip = (unsigned long)&p->ainsn.insn->bundle & ~0xFULL; 862 /* turn single stepping off */ 863 ia64_psr(regs)->ss = 0; 864 865 reset_current_kprobe(); 866 preempt_enable_no_resched(); 867 return 1; 868 } 869 #endif 870 prepare_ss(p, regs); 871 kcb->kprobe_status = KPROBE_HIT_SS; 872 return 1; 873 874 no_kprobe: 875 preempt_enable_no_resched(); 876 return ret; 877 } 878 879 static int __kprobes post_kprobes_handler(struct pt_regs *regs) 880 { 881 struct kprobe *cur = kprobe_running(); 882 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 883 884 if (!cur) 885 return 0; 886 887 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { 888 kcb->kprobe_status = KPROBE_HIT_SSDONE; 889 cur->post_handler(cur, regs, 0); 890 } 891 892 resume_execution(cur, regs); 893 894 /*Restore back the original saved kprobes variables and continue. */ 895 if (kcb->kprobe_status == KPROBE_REENTER) { 896 restore_previous_kprobe(kcb); 897 goto out; 898 } 899 reset_current_kprobe(); 900 901 out: 902 preempt_enable_no_resched(); 903 return 1; 904 } 905 906 int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) 907 { 908 struct kprobe *cur = kprobe_running(); 909 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 910 911 912 switch(kcb->kprobe_status) { 913 case KPROBE_HIT_SS: 914 case KPROBE_REENTER: 915 /* 916 * We are here because the instruction being single 917 * stepped caused a page fault. We reset the current 918 * kprobe and the instruction pointer points back to 919 * the probe address and allow the page fault handler 920 * to continue as a normal page fault. 921 */ 922 regs->cr_iip = ((unsigned long)cur->addr) & ~0xFULL; 923 ia64_psr(regs)->ri = ((unsigned long)cur->addr) & 0xf; 924 if (kcb->kprobe_status == KPROBE_REENTER) 925 restore_previous_kprobe(kcb); 926 else 927 reset_current_kprobe(); 928 preempt_enable_no_resched(); 929 break; 930 case KPROBE_HIT_ACTIVE: 931 case KPROBE_HIT_SSDONE: 932 /* 933 * We increment the nmissed count for accounting, 934 * we can also use npre/npostfault count for accounting 935 * these specific fault cases. 936 */ 937 kprobes_inc_nmissed_count(cur); 938 939 /* 940 * We come here because instructions in the pre/post 941 * handler caused the page_fault, this could happen 942 * if handler tries to access user space by 943 * copy_from_user(), get_user() etc. Let the 944 * user-specified handler try to fix it first. 945 */ 946 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) 947 return 1; 948 /* 949 * In case the user-specified fault handler returned 950 * zero, try to fix up. 951 */ 952 if (ia64_done_with_exception(regs)) 953 return 1; 954 955 /* 956 * Let ia64_do_page_fault() fix it. 957 */ 958 break; 959 default: 960 break; 961 } 962 963 return 0; 964 } 965 966 int __kprobes kprobe_exceptions_notify(struct notifier_block *self, 967 unsigned long val, void *data) 968 { 969 struct die_args *args = (struct die_args *)data; 970 int ret = NOTIFY_DONE; 971 972 if (args->regs && user_mode(args->regs)) 973 return ret; 974 975 switch(val) { 976 case DIE_BREAK: 977 /* err is break number from ia64_bad_break() */ 978 if ((args->err >> 12) == (__IA64_BREAK_KPROBE >> 12) 979 || args->err == 0) 980 if (pre_kprobes_handler(args)) 981 ret = NOTIFY_STOP; 982 break; 983 case DIE_FAULT: 984 /* err is vector number from ia64_fault() */ 985 if (args->err == 36) 986 if (post_kprobes_handler(args->regs)) 987 ret = NOTIFY_STOP; 988 break; 989 default: 990 break; 991 } 992 return ret; 993 } 994 995 struct param_bsp_cfm { 996 unsigned long ip; 997 unsigned long *bsp; 998 unsigned long cfm; 999 }; 1000 1001 static void ia64_get_bsp_cfm(struct unw_frame_info *info, void *arg) 1002 { 1003 unsigned long ip; 1004 struct param_bsp_cfm *lp = arg; 1005 1006 do { 1007 unw_get_ip(info, &ip); 1008 if (ip == 0) 1009 break; 1010 if (ip == lp->ip) { 1011 unw_get_bsp(info, (unsigned long*)&lp->bsp); 1012 unw_get_cfm(info, (unsigned long*)&lp->cfm); 1013 return; 1014 } 1015 } while (unw_unwind(info) >= 0); 1016 lp->bsp = NULL; 1017 lp->cfm = 0; 1018 return; 1019 } 1020 1021 unsigned long arch_deref_entry_point(void *entry) 1022 { 1023 return ((struct fnptr *)entry)->ip; 1024 } 1025 1026 static struct kprobe trampoline_p = { 1027 .pre_handler = trampoline_probe_handler 1028 }; 1029 1030 int __init arch_init_kprobes(void) 1031 { 1032 trampoline_p.addr = 1033 (kprobe_opcode_t *)((struct fnptr *)kretprobe_trampoline)->ip; 1034 return register_kprobe(&trampoline_p); 1035 } 1036 1037 int __kprobes arch_trampoline_kprobe(struct kprobe *p) 1038 { 1039 if (p->addr == 1040 (kprobe_opcode_t *)((struct fnptr *)kretprobe_trampoline)->ip) 1041 return 1; 1042 1043 return 0; 1044 } 1045