1 /* 2 * Kernel Probes (KProbes) 3 * kernel/kprobes.c 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 18 * 19 * Copyright (C) IBM Corporation, 2002, 2004 20 * 21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel 22 * Probes initial implementation (includes suggestions from 23 * Rusty Russell). 24 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with 25 * hlists and exceptions notifier as suggested by Andi Kleen. 26 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes 27 * interface to access function arguments. 28 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes 29 * exceptions notifier to be first on the priority list. 30 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston 31 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi 32 * <prasanna@in.ibm.com> added function-return probes. 33 */ 34 #include <linux/kprobes.h> 35 #include <linux/hash.h> 36 #include <linux/init.h> 37 #include <linux/slab.h> 38 #include <linux/module.h> 39 #include <linux/moduleloader.h> 40 #include <asm-generic/sections.h> 41 #include <asm/cacheflush.h> 42 #include <asm/errno.h> 43 #include <asm/kdebug.h> 44 45 #define KPROBE_HASH_BITS 6 46 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS) 47 48 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; 49 static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; 50 51 DECLARE_MUTEX(kprobe_mutex); /* Protects kprobe_table */ 52 DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */ 53 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; 54 55 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT 56 /* 57 * kprobe->ainsn.insn points to the copy of the instruction to be 58 * single-stepped. x86_64, POWER4 and above have no-exec support and 59 * stepping on the instruction on a vmalloced/kmalloced/data page 60 * is a recipe for disaster 61 */ 62 #define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t))) 63 64 struct kprobe_insn_page { 65 struct hlist_node hlist; 66 kprobe_opcode_t *insns; /* Page of instruction slots */ 67 char slot_used[INSNS_PER_PAGE]; 68 int nused; 69 }; 70 71 static struct hlist_head kprobe_insn_pages; 72 73 /** 74 * get_insn_slot() - Find a slot on an executable page for an instruction. 75 * We allocate an executable page if there's no room on existing ones. 76 */ 77 kprobe_opcode_t __kprobes *get_insn_slot(void) 78 { 79 struct kprobe_insn_page *kip; 80 struct hlist_node *pos; 81 82 hlist_for_each(pos, &kprobe_insn_pages) { 83 kip = hlist_entry(pos, struct kprobe_insn_page, hlist); 84 if (kip->nused < INSNS_PER_PAGE) { 85 int i; 86 for (i = 0; i < INSNS_PER_PAGE; i++) { 87 if (!kip->slot_used[i]) { 88 kip->slot_used[i] = 1; 89 kip->nused++; 90 return kip->insns + (i * MAX_INSN_SIZE); 91 } 92 } 93 /* Surprise! No unused slots. Fix kip->nused. */ 94 kip->nused = INSNS_PER_PAGE; 95 } 96 } 97 98 /* All out of space. Need to allocate a new page. Use slot 0.*/ 99 kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL); 100 if (!kip) { 101 return NULL; 102 } 103 104 /* 105 * Use module_alloc so this page is within +/- 2GB of where the 106 * kernel image and loaded module images reside. This is required 107 * so x86_64 can correctly handle the %rip-relative fixups. 108 */ 109 kip->insns = module_alloc(PAGE_SIZE); 110 if (!kip->insns) { 111 kfree(kip); 112 return NULL; 113 } 114 INIT_HLIST_NODE(&kip->hlist); 115 hlist_add_head(&kip->hlist, &kprobe_insn_pages); 116 memset(kip->slot_used, 0, INSNS_PER_PAGE); 117 kip->slot_used[0] = 1; 118 kip->nused = 1; 119 return kip->insns; 120 } 121 122 void __kprobes free_insn_slot(kprobe_opcode_t *slot) 123 { 124 struct kprobe_insn_page *kip; 125 struct hlist_node *pos; 126 127 hlist_for_each(pos, &kprobe_insn_pages) { 128 kip = hlist_entry(pos, struct kprobe_insn_page, hlist); 129 if (kip->insns <= slot && 130 slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) { 131 int i = (slot - kip->insns) / MAX_INSN_SIZE; 132 kip->slot_used[i] = 0; 133 kip->nused--; 134 if (kip->nused == 0) { 135 /* 136 * Page is no longer in use. Free it unless 137 * it's the last one. We keep the last one 138 * so as not to have to set it up again the 139 * next time somebody inserts a probe. 140 */ 141 hlist_del(&kip->hlist); 142 if (hlist_empty(&kprobe_insn_pages)) { 143 INIT_HLIST_NODE(&kip->hlist); 144 hlist_add_head(&kip->hlist, 145 &kprobe_insn_pages); 146 } else { 147 module_free(NULL, kip->insns); 148 kfree(kip); 149 } 150 } 151 return; 152 } 153 } 154 } 155 #endif 156 157 /* We have preemption disabled.. so it is safe to use __ versions */ 158 static inline void set_kprobe_instance(struct kprobe *kp) 159 { 160 __get_cpu_var(kprobe_instance) = kp; 161 } 162 163 static inline void reset_kprobe_instance(void) 164 { 165 __get_cpu_var(kprobe_instance) = NULL; 166 } 167 168 /* 169 * This routine is called either: 170 * - under the kprobe_mutex - during kprobe_[un]register() 171 * OR 172 * - with preemption disabled - from arch/xxx/kernel/kprobes.c 173 */ 174 struct kprobe __kprobes *get_kprobe(void *addr) 175 { 176 struct hlist_head *head; 177 struct hlist_node *node; 178 struct kprobe *p; 179 180 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)]; 181 hlist_for_each_entry_rcu(p, node, head, hlist) { 182 if (p->addr == addr) 183 return p; 184 } 185 return NULL; 186 } 187 188 /* 189 * Aggregate handlers for multiple kprobes support - these handlers 190 * take care of invoking the individual kprobe handlers on p->list 191 */ 192 static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) 193 { 194 struct kprobe *kp; 195 196 list_for_each_entry_rcu(kp, &p->list, list) { 197 if (kp->pre_handler) { 198 set_kprobe_instance(kp); 199 if (kp->pre_handler(kp, regs)) 200 return 1; 201 } 202 reset_kprobe_instance(); 203 } 204 return 0; 205 } 206 207 static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs, 208 unsigned long flags) 209 { 210 struct kprobe *kp; 211 212 list_for_each_entry_rcu(kp, &p->list, list) { 213 if (kp->post_handler) { 214 set_kprobe_instance(kp); 215 kp->post_handler(kp, regs, flags); 216 reset_kprobe_instance(); 217 } 218 } 219 return; 220 } 221 222 static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, 223 int trapnr) 224 { 225 struct kprobe *cur = __get_cpu_var(kprobe_instance); 226 227 /* 228 * if we faulted "during" the execution of a user specified 229 * probe handler, invoke just that probe's fault handler 230 */ 231 if (cur && cur->fault_handler) { 232 if (cur->fault_handler(cur, regs, trapnr)) 233 return 1; 234 } 235 return 0; 236 } 237 238 static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs) 239 { 240 struct kprobe *cur = __get_cpu_var(kprobe_instance); 241 int ret = 0; 242 243 if (cur && cur->break_handler) { 244 if (cur->break_handler(cur, regs)) 245 ret = 1; 246 } 247 reset_kprobe_instance(); 248 return ret; 249 } 250 251 /* Walks the list and increments nmissed count for multiprobe case */ 252 void __kprobes kprobes_inc_nmissed_count(struct kprobe *p) 253 { 254 struct kprobe *kp; 255 if (p->pre_handler != aggr_pre_handler) { 256 p->nmissed++; 257 } else { 258 list_for_each_entry_rcu(kp, &p->list, list) 259 kp->nmissed++; 260 } 261 return; 262 } 263 264 /* Called with kretprobe_lock held */ 265 struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp) 266 { 267 struct hlist_node *node; 268 struct kretprobe_instance *ri; 269 hlist_for_each_entry(ri, node, &rp->free_instances, uflist) 270 return ri; 271 return NULL; 272 } 273 274 /* Called with kretprobe_lock held */ 275 static struct kretprobe_instance __kprobes *get_used_rp_inst(struct kretprobe 276 *rp) 277 { 278 struct hlist_node *node; 279 struct kretprobe_instance *ri; 280 hlist_for_each_entry(ri, node, &rp->used_instances, uflist) 281 return ri; 282 return NULL; 283 } 284 285 /* Called with kretprobe_lock held */ 286 void __kprobes add_rp_inst(struct kretprobe_instance *ri) 287 { 288 /* 289 * Remove rp inst off the free list - 290 * Add it back when probed function returns 291 */ 292 hlist_del(&ri->uflist); 293 294 /* Add rp inst onto table */ 295 INIT_HLIST_NODE(&ri->hlist); 296 hlist_add_head(&ri->hlist, 297 &kretprobe_inst_table[hash_ptr(ri->task, KPROBE_HASH_BITS)]); 298 299 /* Also add this rp inst to the used list. */ 300 INIT_HLIST_NODE(&ri->uflist); 301 hlist_add_head(&ri->uflist, &ri->rp->used_instances); 302 } 303 304 /* Called with kretprobe_lock held */ 305 void __kprobes recycle_rp_inst(struct kretprobe_instance *ri) 306 { 307 /* remove rp inst off the rprobe_inst_table */ 308 hlist_del(&ri->hlist); 309 if (ri->rp) { 310 /* remove rp inst off the used list */ 311 hlist_del(&ri->uflist); 312 /* put rp inst back onto the free list */ 313 INIT_HLIST_NODE(&ri->uflist); 314 hlist_add_head(&ri->uflist, &ri->rp->free_instances); 315 } else 316 /* Unregistering */ 317 kfree(ri); 318 } 319 320 struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk) 321 { 322 return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)]; 323 } 324 325 /* 326 * This function is called from exit_thread or flush_thread when task tk's 327 * stack is being recycled so that we can recycle any function-return probe 328 * instances associated with this task. These left over instances represent 329 * probed functions that have been called but will never return. 330 */ 331 void __kprobes kprobe_flush_task(struct task_struct *tk) 332 { 333 struct kretprobe_instance *ri; 334 struct hlist_head *head; 335 struct hlist_node *node, *tmp; 336 unsigned long flags = 0; 337 338 spin_lock_irqsave(&kretprobe_lock, flags); 339 head = kretprobe_inst_table_head(current); 340 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 341 if (ri->task == tk) 342 recycle_rp_inst(ri); 343 } 344 spin_unlock_irqrestore(&kretprobe_lock, flags); 345 } 346 347 /* 348 * This kprobe pre_handler is registered with every kretprobe. When probe 349 * hits it will set up the return probe. 350 */ 351 static int __kprobes pre_handler_kretprobe(struct kprobe *p, 352 struct pt_regs *regs) 353 { 354 struct kretprobe *rp = container_of(p, struct kretprobe, kp); 355 unsigned long flags = 0; 356 357 /*TODO: consider to only swap the RA after the last pre_handler fired */ 358 spin_lock_irqsave(&kretprobe_lock, flags); 359 arch_prepare_kretprobe(rp, regs); 360 spin_unlock_irqrestore(&kretprobe_lock, flags); 361 return 0; 362 } 363 364 static inline void free_rp_inst(struct kretprobe *rp) 365 { 366 struct kretprobe_instance *ri; 367 while ((ri = get_free_rp_inst(rp)) != NULL) { 368 hlist_del(&ri->uflist); 369 kfree(ri); 370 } 371 } 372 373 /* 374 * Keep all fields in the kprobe consistent 375 */ 376 static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p) 377 { 378 memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t)); 379 memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn)); 380 } 381 382 /* 383 * Add the new probe to old_p->list. Fail if this is the 384 * second jprobe at the address - two jprobes can't coexist 385 */ 386 static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p) 387 { 388 struct kprobe *kp; 389 390 if (p->break_handler) { 391 list_for_each_entry_rcu(kp, &old_p->list, list) { 392 if (kp->break_handler) 393 return -EEXIST; 394 } 395 list_add_tail_rcu(&p->list, &old_p->list); 396 } else 397 list_add_rcu(&p->list, &old_p->list); 398 return 0; 399 } 400 401 /* 402 * Fill in the required fields of the "manager kprobe". Replace the 403 * earlier kprobe in the hlist with the manager kprobe 404 */ 405 static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p) 406 { 407 copy_kprobe(p, ap); 408 ap->addr = p->addr; 409 ap->pre_handler = aggr_pre_handler; 410 ap->post_handler = aggr_post_handler; 411 ap->fault_handler = aggr_fault_handler; 412 ap->break_handler = aggr_break_handler; 413 414 INIT_LIST_HEAD(&ap->list); 415 list_add_rcu(&p->list, &ap->list); 416 417 hlist_replace_rcu(&p->hlist, &ap->hlist); 418 } 419 420 /* 421 * This is the second or subsequent kprobe at the address - handle 422 * the intricacies 423 */ 424 static int __kprobes register_aggr_kprobe(struct kprobe *old_p, 425 struct kprobe *p) 426 { 427 int ret = 0; 428 struct kprobe *ap; 429 430 if (old_p->pre_handler == aggr_pre_handler) { 431 copy_kprobe(old_p, p); 432 ret = add_new_kprobe(old_p, p); 433 } else { 434 ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL); 435 if (!ap) 436 return -ENOMEM; 437 add_aggr_kprobe(ap, old_p); 438 copy_kprobe(ap, p); 439 ret = add_new_kprobe(ap, p); 440 } 441 return ret; 442 } 443 444 static int __kprobes in_kprobes_functions(unsigned long addr) 445 { 446 if (addr >= (unsigned long)__kprobes_text_start 447 && addr < (unsigned long)__kprobes_text_end) 448 return -EINVAL; 449 return 0; 450 } 451 452 int __kprobes register_kprobe(struct kprobe *p) 453 { 454 int ret = 0; 455 struct kprobe *old_p; 456 struct module *mod; 457 458 if ((!kernel_text_address((unsigned long) p->addr)) || 459 in_kprobes_functions((unsigned long) p->addr)) 460 return -EINVAL; 461 462 if ((mod = module_text_address((unsigned long) p->addr)) && 463 (unlikely(!try_module_get(mod)))) 464 return -EINVAL; 465 466 p->nmissed = 0; 467 down(&kprobe_mutex); 468 old_p = get_kprobe(p->addr); 469 if (old_p) { 470 ret = register_aggr_kprobe(old_p, p); 471 goto out; 472 } 473 474 if ((ret = arch_prepare_kprobe(p)) != 0) 475 goto out; 476 477 INIT_HLIST_NODE(&p->hlist); 478 hlist_add_head_rcu(&p->hlist, 479 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); 480 481 arch_arm_kprobe(p); 482 483 out: 484 up(&kprobe_mutex); 485 486 if (ret && mod) 487 module_put(mod); 488 return ret; 489 } 490 491 void __kprobes unregister_kprobe(struct kprobe *p) 492 { 493 struct module *mod; 494 struct kprobe *old_p, *list_p; 495 int cleanup_p; 496 497 down(&kprobe_mutex); 498 old_p = get_kprobe(p->addr); 499 if (unlikely(!old_p)) { 500 up(&kprobe_mutex); 501 return; 502 } 503 if (p != old_p) { 504 list_for_each_entry_rcu(list_p, &old_p->list, list) 505 if (list_p == p) 506 /* kprobe p is a valid probe */ 507 goto valid_p; 508 up(&kprobe_mutex); 509 return; 510 } 511 valid_p: 512 if ((old_p == p) || ((old_p->pre_handler == aggr_pre_handler) && 513 (p->list.next == &old_p->list) && 514 (p->list.prev == &old_p->list))) { 515 /* Only probe on the hash list */ 516 arch_disarm_kprobe(p); 517 hlist_del_rcu(&old_p->hlist); 518 cleanup_p = 1; 519 } else { 520 list_del_rcu(&p->list); 521 cleanup_p = 0; 522 } 523 524 up(&kprobe_mutex); 525 526 synchronize_sched(); 527 if ((mod = module_text_address((unsigned long)p->addr))) 528 module_put(mod); 529 530 if (cleanup_p) { 531 if (p != old_p) { 532 list_del_rcu(&p->list); 533 kfree(old_p); 534 } 535 arch_remove_kprobe(p); 536 } 537 } 538 539 static struct notifier_block kprobe_exceptions_nb = { 540 .notifier_call = kprobe_exceptions_notify, 541 .priority = 0x7fffffff /* we need to notified first */ 542 }; 543 544 int __kprobes register_jprobe(struct jprobe *jp) 545 { 546 /* Todo: Verify probepoint is a function entry point */ 547 jp->kp.pre_handler = setjmp_pre_handler; 548 jp->kp.break_handler = longjmp_break_handler; 549 550 return register_kprobe(&jp->kp); 551 } 552 553 void __kprobes unregister_jprobe(struct jprobe *jp) 554 { 555 unregister_kprobe(&jp->kp); 556 } 557 558 #ifdef ARCH_SUPPORTS_KRETPROBES 559 560 int __kprobes register_kretprobe(struct kretprobe *rp) 561 { 562 int ret = 0; 563 struct kretprobe_instance *inst; 564 int i; 565 566 rp->kp.pre_handler = pre_handler_kretprobe; 567 568 /* Pre-allocate memory for max kretprobe instances */ 569 if (rp->maxactive <= 0) { 570 #ifdef CONFIG_PREEMPT 571 rp->maxactive = max(10, 2 * NR_CPUS); 572 #else 573 rp->maxactive = NR_CPUS; 574 #endif 575 } 576 INIT_HLIST_HEAD(&rp->used_instances); 577 INIT_HLIST_HEAD(&rp->free_instances); 578 for (i = 0; i < rp->maxactive; i++) { 579 inst = kmalloc(sizeof(struct kretprobe_instance), GFP_KERNEL); 580 if (inst == NULL) { 581 free_rp_inst(rp); 582 return -ENOMEM; 583 } 584 INIT_HLIST_NODE(&inst->uflist); 585 hlist_add_head(&inst->uflist, &rp->free_instances); 586 } 587 588 rp->nmissed = 0; 589 /* Establish function entry probe point */ 590 if ((ret = register_kprobe(&rp->kp)) != 0) 591 free_rp_inst(rp); 592 return ret; 593 } 594 595 #else /* ARCH_SUPPORTS_KRETPROBES */ 596 597 int __kprobes register_kretprobe(struct kretprobe *rp) 598 { 599 return -ENOSYS; 600 } 601 602 #endif /* ARCH_SUPPORTS_KRETPROBES */ 603 604 void __kprobes unregister_kretprobe(struct kretprobe *rp) 605 { 606 unsigned long flags; 607 struct kretprobe_instance *ri; 608 609 unregister_kprobe(&rp->kp); 610 /* No race here */ 611 spin_lock_irqsave(&kretprobe_lock, flags); 612 free_rp_inst(rp); 613 while ((ri = get_used_rp_inst(rp)) != NULL) { 614 ri->rp = NULL; 615 hlist_del(&ri->uflist); 616 } 617 spin_unlock_irqrestore(&kretprobe_lock, flags); 618 } 619 620 static int __init init_kprobes(void) 621 { 622 int i, err = 0; 623 624 /* FIXME allocate the probe table, currently defined statically */ 625 /* initialize all list heads */ 626 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 627 INIT_HLIST_HEAD(&kprobe_table[i]); 628 INIT_HLIST_HEAD(&kretprobe_inst_table[i]); 629 } 630 631 err = arch_init_kprobes(); 632 if (!err) 633 err = register_die_notifier(&kprobe_exceptions_nb); 634 635 return err; 636 } 637 638 __initcall(init_kprobes); 639 640 EXPORT_SYMBOL_GPL(register_kprobe); 641 EXPORT_SYMBOL_GPL(unregister_kprobe); 642 EXPORT_SYMBOL_GPL(register_jprobe); 643 EXPORT_SYMBOL_GPL(unregister_jprobe); 644 EXPORT_SYMBOL_GPL(jprobe_return); 645 EXPORT_SYMBOL_GPL(register_kretprobe); 646 EXPORT_SYMBOL_GPL(unregister_kretprobe); 647 648