1 /* 2 * Kernel Probes (KProbes) 3 * kernel/kprobes.c 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 18 * 19 * Copyright (C) IBM Corporation, 2002, 2004 20 * 21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel 22 * Probes initial implementation (includes suggestions from 23 * Rusty Russell). 24 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with 25 * hlists and exceptions notifier as suggested by Andi Kleen. 26 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes 27 * interface to access function arguments. 28 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes 29 * exceptions notifier to be first on the priority list. 30 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston 31 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi 32 * <prasanna@in.ibm.com> added function-return probes. 33 */ 34 #include <linux/kprobes.h> 35 #include <linux/hash.h> 36 #include <linux/init.h> 37 #include <linux/slab.h> 38 #include <linux/stddef.h> 39 #include <linux/module.h> 40 #include <linux/moduleloader.h> 41 #include <linux/kallsyms.h> 42 #include <linux/freezer.h> 43 #include <linux/seq_file.h> 44 #include <linux/debugfs.h> 45 #include <linux/sysctl.h> 46 #include <linux/kdebug.h> 47 #include <linux/memory.h> 48 #include <linux/ftrace.h> 49 #include <linux/cpu.h> 50 #include <linux/jump_label.h> 51 52 #include <asm-generic/sections.h> 53 #include <asm/cacheflush.h> 54 #include <asm/errno.h> 55 #include <asm/uaccess.h> 56 57 #define KPROBE_HASH_BITS 6 58 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS) 59 60 61 /* 62 * Some oddball architectures like 64bit powerpc have function descriptors 63 * so this must be overridable. 64 */ 65 #ifndef kprobe_lookup_name 66 #define kprobe_lookup_name(name, addr) \ 67 addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name))) 68 #endif 69 70 static int kprobes_initialized; 71 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; 72 static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; 73 74 /* NOTE: change this value only with kprobe_mutex held */ 75 static bool kprobes_all_disarmed; 76 77 /* This protects kprobe_table and optimizing_list */ 78 static DEFINE_MUTEX(kprobe_mutex); 79 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; 80 static struct { 81 spinlock_t lock ____cacheline_aligned_in_smp; 82 } kretprobe_table_locks[KPROBE_TABLE_SIZE]; 83 84 static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash) 85 { 86 return &(kretprobe_table_locks[hash].lock); 87 } 88 89 /* 90 * Normally, functions that we'd want to prohibit kprobes in, are marked 91 * __kprobes. But, there are cases where such functions already belong to 92 * a different section (__sched for preempt_schedule) 93 * 94 * For such cases, we now have a blacklist 95 */ 96 static struct kprobe_blackpoint kprobe_blacklist[] = { 97 {"preempt_schedule",}, 98 {"native_get_debugreg",}, 99 {"irq_entries_start",}, 100 {"common_interrupt",}, 101 {"mcount",}, /* mcount can be called from everywhere */ 102 {NULL} /* Terminator */ 103 }; 104 105 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT 106 /* 107 * kprobe->ainsn.insn points to the copy of the instruction to be 108 * single-stepped. x86_64, POWER4 and above have no-exec support and 109 * stepping on the instruction on a vmalloced/kmalloced/data page 110 * is a recipe for disaster 111 */ 112 struct kprobe_insn_page { 113 struct list_head list; 114 kprobe_opcode_t *insns; /* Page of instruction slots */ 115 int nused; 116 int ngarbage; 117 char slot_used[]; 118 }; 119 120 #define KPROBE_INSN_PAGE_SIZE(slots) \ 121 (offsetof(struct kprobe_insn_page, slot_used) + \ 122 (sizeof(char) * (slots))) 123 124 struct kprobe_insn_cache { 125 struct list_head pages; /* list of kprobe_insn_page */ 126 size_t insn_size; /* size of instruction slot */ 127 int nr_garbage; 128 }; 129 130 static int slots_per_page(struct kprobe_insn_cache *c) 131 { 132 return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t)); 133 } 134 135 enum kprobe_slot_state { 136 SLOT_CLEAN = 0, 137 SLOT_DIRTY = 1, 138 SLOT_USED = 2, 139 }; 140 141 static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_slots */ 142 static struct kprobe_insn_cache kprobe_insn_slots = { 143 .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages), 144 .insn_size = MAX_INSN_SIZE, 145 .nr_garbage = 0, 146 }; 147 static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c); 148 149 /** 150 * __get_insn_slot() - Find a slot on an executable page for an instruction. 151 * We allocate an executable page if there's no room on existing ones. 152 */ 153 static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c) 154 { 155 struct kprobe_insn_page *kip; 156 157 retry: 158 list_for_each_entry(kip, &c->pages, list) { 159 if (kip->nused < slots_per_page(c)) { 160 int i; 161 for (i = 0; i < slots_per_page(c); i++) { 162 if (kip->slot_used[i] == SLOT_CLEAN) { 163 kip->slot_used[i] = SLOT_USED; 164 kip->nused++; 165 return kip->insns + (i * c->insn_size); 166 } 167 } 168 /* kip->nused is broken. Fix it. */ 169 kip->nused = slots_per_page(c); 170 WARN_ON(1); 171 } 172 } 173 174 /* If there are any garbage slots, collect it and try again. */ 175 if (c->nr_garbage && collect_garbage_slots(c) == 0) 176 goto retry; 177 178 /* All out of space. Need to allocate a new page. */ 179 kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL); 180 if (!kip) 181 return NULL; 182 183 /* 184 * Use module_alloc so this page is within +/- 2GB of where the 185 * kernel image and loaded module images reside. This is required 186 * so x86_64 can correctly handle the %rip-relative fixups. 187 */ 188 kip->insns = module_alloc(PAGE_SIZE); 189 if (!kip->insns) { 190 kfree(kip); 191 return NULL; 192 } 193 INIT_LIST_HEAD(&kip->list); 194 memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c)); 195 kip->slot_used[0] = SLOT_USED; 196 kip->nused = 1; 197 kip->ngarbage = 0; 198 list_add(&kip->list, &c->pages); 199 return kip->insns; 200 } 201 202 203 kprobe_opcode_t __kprobes *get_insn_slot(void) 204 { 205 kprobe_opcode_t *ret = NULL; 206 207 mutex_lock(&kprobe_insn_mutex); 208 ret = __get_insn_slot(&kprobe_insn_slots); 209 mutex_unlock(&kprobe_insn_mutex); 210 211 return ret; 212 } 213 214 /* Return 1 if all garbages are collected, otherwise 0. */ 215 static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx) 216 { 217 kip->slot_used[idx] = SLOT_CLEAN; 218 kip->nused--; 219 if (kip->nused == 0) { 220 /* 221 * Page is no longer in use. Free it unless 222 * it's the last one. We keep the last one 223 * so as not to have to set it up again the 224 * next time somebody inserts a probe. 225 */ 226 if (!list_is_singular(&kip->list)) { 227 list_del(&kip->list); 228 module_free(NULL, kip->insns); 229 kfree(kip); 230 } 231 return 1; 232 } 233 return 0; 234 } 235 236 static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c) 237 { 238 struct kprobe_insn_page *kip, *next; 239 240 /* Ensure no-one is interrupted on the garbages */ 241 synchronize_sched(); 242 243 list_for_each_entry_safe(kip, next, &c->pages, list) { 244 int i; 245 if (kip->ngarbage == 0) 246 continue; 247 kip->ngarbage = 0; /* we will collect all garbages */ 248 for (i = 0; i < slots_per_page(c); i++) { 249 if (kip->slot_used[i] == SLOT_DIRTY && 250 collect_one_slot(kip, i)) 251 break; 252 } 253 } 254 c->nr_garbage = 0; 255 return 0; 256 } 257 258 static void __kprobes __free_insn_slot(struct kprobe_insn_cache *c, 259 kprobe_opcode_t *slot, int dirty) 260 { 261 struct kprobe_insn_page *kip; 262 263 list_for_each_entry(kip, &c->pages, list) { 264 long idx = ((long)slot - (long)kip->insns) / 265 (c->insn_size * sizeof(kprobe_opcode_t)); 266 if (idx >= 0 && idx < slots_per_page(c)) { 267 WARN_ON(kip->slot_used[idx] != SLOT_USED); 268 if (dirty) { 269 kip->slot_used[idx] = SLOT_DIRTY; 270 kip->ngarbage++; 271 if (++c->nr_garbage > slots_per_page(c)) 272 collect_garbage_slots(c); 273 } else 274 collect_one_slot(kip, idx); 275 return; 276 } 277 } 278 /* Could not free this slot. */ 279 WARN_ON(1); 280 } 281 282 void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty) 283 { 284 mutex_lock(&kprobe_insn_mutex); 285 __free_insn_slot(&kprobe_insn_slots, slot, dirty); 286 mutex_unlock(&kprobe_insn_mutex); 287 } 288 #ifdef CONFIG_OPTPROBES 289 /* For optimized_kprobe buffer */ 290 static DEFINE_MUTEX(kprobe_optinsn_mutex); /* Protects kprobe_optinsn_slots */ 291 static struct kprobe_insn_cache kprobe_optinsn_slots = { 292 .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages), 293 /* .insn_size is initialized later */ 294 .nr_garbage = 0, 295 }; 296 /* Get a slot for optimized_kprobe buffer */ 297 kprobe_opcode_t __kprobes *get_optinsn_slot(void) 298 { 299 kprobe_opcode_t *ret = NULL; 300 301 mutex_lock(&kprobe_optinsn_mutex); 302 ret = __get_insn_slot(&kprobe_optinsn_slots); 303 mutex_unlock(&kprobe_optinsn_mutex); 304 305 return ret; 306 } 307 308 void __kprobes free_optinsn_slot(kprobe_opcode_t * slot, int dirty) 309 { 310 mutex_lock(&kprobe_optinsn_mutex); 311 __free_insn_slot(&kprobe_optinsn_slots, slot, dirty); 312 mutex_unlock(&kprobe_optinsn_mutex); 313 } 314 #endif 315 #endif 316 317 /* We have preemption disabled.. so it is safe to use __ versions */ 318 static inline void set_kprobe_instance(struct kprobe *kp) 319 { 320 __this_cpu_write(kprobe_instance, kp); 321 } 322 323 static inline void reset_kprobe_instance(void) 324 { 325 __this_cpu_write(kprobe_instance, NULL); 326 } 327 328 /* 329 * This routine is called either: 330 * - under the kprobe_mutex - during kprobe_[un]register() 331 * OR 332 * - with preemption disabled - from arch/xxx/kernel/kprobes.c 333 */ 334 struct kprobe __kprobes *get_kprobe(void *addr) 335 { 336 struct hlist_head *head; 337 struct hlist_node *node; 338 struct kprobe *p; 339 340 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)]; 341 hlist_for_each_entry_rcu(p, node, head, hlist) { 342 if (p->addr == addr) 343 return p; 344 } 345 346 return NULL; 347 } 348 349 static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs); 350 351 /* Return true if the kprobe is an aggregator */ 352 static inline int kprobe_aggrprobe(struct kprobe *p) 353 { 354 return p->pre_handler == aggr_pre_handler; 355 } 356 357 /* Return true(!0) if the kprobe is unused */ 358 static inline int kprobe_unused(struct kprobe *p) 359 { 360 return kprobe_aggrprobe(p) && kprobe_disabled(p) && 361 list_empty(&p->list); 362 } 363 364 /* 365 * Keep all fields in the kprobe consistent 366 */ 367 static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p) 368 { 369 memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t)); 370 memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn)); 371 } 372 373 #ifdef CONFIG_OPTPROBES 374 /* NOTE: change this value only with kprobe_mutex held */ 375 static bool kprobes_allow_optimization; 376 377 /* 378 * Call all pre_handler on the list, but ignores its return value. 379 * This must be called from arch-dep optimized caller. 380 */ 381 void __kprobes opt_pre_handler(struct kprobe *p, struct pt_regs *regs) 382 { 383 struct kprobe *kp; 384 385 list_for_each_entry_rcu(kp, &p->list, list) { 386 if (kp->pre_handler && likely(!kprobe_disabled(kp))) { 387 set_kprobe_instance(kp); 388 kp->pre_handler(kp, regs); 389 } 390 reset_kprobe_instance(); 391 } 392 } 393 394 /* Free optimized instructions and optimized_kprobe */ 395 static __kprobes void free_aggr_kprobe(struct kprobe *p) 396 { 397 struct optimized_kprobe *op; 398 399 op = container_of(p, struct optimized_kprobe, kp); 400 arch_remove_optimized_kprobe(op); 401 arch_remove_kprobe(p); 402 kfree(op); 403 } 404 405 /* Return true(!0) if the kprobe is ready for optimization. */ 406 static inline int kprobe_optready(struct kprobe *p) 407 { 408 struct optimized_kprobe *op; 409 410 if (kprobe_aggrprobe(p)) { 411 op = container_of(p, struct optimized_kprobe, kp); 412 return arch_prepared_optinsn(&op->optinsn); 413 } 414 415 return 0; 416 } 417 418 /* Return true(!0) if the kprobe is disarmed. Note: p must be on hash list */ 419 static inline int kprobe_disarmed(struct kprobe *p) 420 { 421 struct optimized_kprobe *op; 422 423 /* If kprobe is not aggr/opt probe, just return kprobe is disabled */ 424 if (!kprobe_aggrprobe(p)) 425 return kprobe_disabled(p); 426 427 op = container_of(p, struct optimized_kprobe, kp); 428 429 return kprobe_disabled(p) && list_empty(&op->list); 430 } 431 432 /* Return true(!0) if the probe is queued on (un)optimizing lists */ 433 static int __kprobes kprobe_queued(struct kprobe *p) 434 { 435 struct optimized_kprobe *op; 436 437 if (kprobe_aggrprobe(p)) { 438 op = container_of(p, struct optimized_kprobe, kp); 439 if (!list_empty(&op->list)) 440 return 1; 441 } 442 return 0; 443 } 444 445 /* 446 * Return an optimized kprobe whose optimizing code replaces 447 * instructions including addr (exclude breakpoint). 448 */ 449 static struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr) 450 { 451 int i; 452 struct kprobe *p = NULL; 453 struct optimized_kprobe *op; 454 455 /* Don't check i == 0, since that is a breakpoint case. */ 456 for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++) 457 p = get_kprobe((void *)(addr - i)); 458 459 if (p && kprobe_optready(p)) { 460 op = container_of(p, struct optimized_kprobe, kp); 461 if (arch_within_optimized_kprobe(op, addr)) 462 return p; 463 } 464 465 return NULL; 466 } 467 468 /* Optimization staging list, protected by kprobe_mutex */ 469 static LIST_HEAD(optimizing_list); 470 static LIST_HEAD(unoptimizing_list); 471 472 static void kprobe_optimizer(struct work_struct *work); 473 static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer); 474 static DECLARE_COMPLETION(optimizer_comp); 475 #define OPTIMIZE_DELAY 5 476 477 /* 478 * Optimize (replace a breakpoint with a jump) kprobes listed on 479 * optimizing_list. 480 */ 481 static __kprobes void do_optimize_kprobes(void) 482 { 483 /* Optimization never be done when disarmed */ 484 if (kprobes_all_disarmed || !kprobes_allow_optimization || 485 list_empty(&optimizing_list)) 486 return; 487 488 /* 489 * The optimization/unoptimization refers online_cpus via 490 * stop_machine() and cpu-hotplug modifies online_cpus. 491 * And same time, text_mutex will be held in cpu-hotplug and here. 492 * This combination can cause a deadlock (cpu-hotplug try to lock 493 * text_mutex but stop_machine can not be done because online_cpus 494 * has been changed) 495 * To avoid this deadlock, we need to call get_online_cpus() 496 * for preventing cpu-hotplug outside of text_mutex locking. 497 */ 498 get_online_cpus(); 499 mutex_lock(&text_mutex); 500 arch_optimize_kprobes(&optimizing_list); 501 mutex_unlock(&text_mutex); 502 put_online_cpus(); 503 } 504 505 /* 506 * Unoptimize (replace a jump with a breakpoint and remove the breakpoint 507 * if need) kprobes listed on unoptimizing_list. 508 */ 509 static __kprobes void do_unoptimize_kprobes(struct list_head *free_list) 510 { 511 struct optimized_kprobe *op, *tmp; 512 513 /* Unoptimization must be done anytime */ 514 if (list_empty(&unoptimizing_list)) 515 return; 516 517 /* Ditto to do_optimize_kprobes */ 518 get_online_cpus(); 519 mutex_lock(&text_mutex); 520 arch_unoptimize_kprobes(&unoptimizing_list, free_list); 521 /* Loop free_list for disarming */ 522 list_for_each_entry_safe(op, tmp, free_list, list) { 523 /* Disarm probes if marked disabled */ 524 if (kprobe_disabled(&op->kp)) 525 arch_disarm_kprobe(&op->kp); 526 if (kprobe_unused(&op->kp)) { 527 /* 528 * Remove unused probes from hash list. After waiting 529 * for synchronization, these probes are reclaimed. 530 * (reclaiming is done by do_free_cleaned_kprobes.) 531 */ 532 hlist_del_rcu(&op->kp.hlist); 533 } else 534 list_del_init(&op->list); 535 } 536 mutex_unlock(&text_mutex); 537 put_online_cpus(); 538 } 539 540 /* Reclaim all kprobes on the free_list */ 541 static __kprobes void do_free_cleaned_kprobes(struct list_head *free_list) 542 { 543 struct optimized_kprobe *op, *tmp; 544 545 list_for_each_entry_safe(op, tmp, free_list, list) { 546 BUG_ON(!kprobe_unused(&op->kp)); 547 list_del_init(&op->list); 548 free_aggr_kprobe(&op->kp); 549 } 550 } 551 552 /* Start optimizer after OPTIMIZE_DELAY passed */ 553 static __kprobes void kick_kprobe_optimizer(void) 554 { 555 if (!delayed_work_pending(&optimizing_work)) 556 schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY); 557 } 558 559 /* Kprobe jump optimizer */ 560 static __kprobes void kprobe_optimizer(struct work_struct *work) 561 { 562 LIST_HEAD(free_list); 563 564 /* Lock modules while optimizing kprobes */ 565 mutex_lock(&module_mutex); 566 mutex_lock(&kprobe_mutex); 567 568 /* 569 * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed) 570 * kprobes before waiting for quiesence period. 571 */ 572 do_unoptimize_kprobes(&free_list); 573 574 /* 575 * Step 2: Wait for quiesence period to ensure all running interrupts 576 * are done. Because optprobe may modify multiple instructions 577 * there is a chance that Nth instruction is interrupted. In that 578 * case, running interrupt can return to 2nd-Nth byte of jump 579 * instruction. This wait is for avoiding it. 580 */ 581 synchronize_sched(); 582 583 /* Step 3: Optimize kprobes after quiesence period */ 584 do_optimize_kprobes(); 585 586 /* Step 4: Free cleaned kprobes after quiesence period */ 587 do_free_cleaned_kprobes(&free_list); 588 589 mutex_unlock(&kprobe_mutex); 590 mutex_unlock(&module_mutex); 591 592 /* Step 5: Kick optimizer again if needed */ 593 if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) 594 kick_kprobe_optimizer(); 595 else 596 /* Wake up all waiters */ 597 complete_all(&optimizer_comp); 598 } 599 600 /* Wait for completing optimization and unoptimization */ 601 static __kprobes void wait_for_kprobe_optimizer(void) 602 { 603 if (delayed_work_pending(&optimizing_work)) 604 wait_for_completion(&optimizer_comp); 605 } 606 607 /* Optimize kprobe if p is ready to be optimized */ 608 static __kprobes void optimize_kprobe(struct kprobe *p) 609 { 610 struct optimized_kprobe *op; 611 612 /* Check if the kprobe is disabled or not ready for optimization. */ 613 if (!kprobe_optready(p) || !kprobes_allow_optimization || 614 (kprobe_disabled(p) || kprobes_all_disarmed)) 615 return; 616 617 /* Both of break_handler and post_handler are not supported. */ 618 if (p->break_handler || p->post_handler) 619 return; 620 621 op = container_of(p, struct optimized_kprobe, kp); 622 623 /* Check there is no other kprobes at the optimized instructions */ 624 if (arch_check_optimized_kprobe(op) < 0) 625 return; 626 627 /* Check if it is already optimized. */ 628 if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) 629 return; 630 op->kp.flags |= KPROBE_FLAG_OPTIMIZED; 631 632 if (!list_empty(&op->list)) 633 /* This is under unoptimizing. Just dequeue the probe */ 634 list_del_init(&op->list); 635 else { 636 list_add(&op->list, &optimizing_list); 637 kick_kprobe_optimizer(); 638 } 639 } 640 641 /* Short cut to direct unoptimizing */ 642 static __kprobes void force_unoptimize_kprobe(struct optimized_kprobe *op) 643 { 644 get_online_cpus(); 645 arch_unoptimize_kprobe(op); 646 put_online_cpus(); 647 if (kprobe_disabled(&op->kp)) 648 arch_disarm_kprobe(&op->kp); 649 } 650 651 /* Unoptimize a kprobe if p is optimized */ 652 static __kprobes void unoptimize_kprobe(struct kprobe *p, bool force) 653 { 654 struct optimized_kprobe *op; 655 656 if (!kprobe_aggrprobe(p) || kprobe_disarmed(p)) 657 return; /* This is not an optprobe nor optimized */ 658 659 op = container_of(p, struct optimized_kprobe, kp); 660 if (!kprobe_optimized(p)) { 661 /* Unoptimized or unoptimizing case */ 662 if (force && !list_empty(&op->list)) { 663 /* 664 * Only if this is unoptimizing kprobe and forced, 665 * forcibly unoptimize it. (No need to unoptimize 666 * unoptimized kprobe again :) 667 */ 668 list_del_init(&op->list); 669 force_unoptimize_kprobe(op); 670 } 671 return; 672 } 673 674 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; 675 if (!list_empty(&op->list)) { 676 /* Dequeue from the optimization queue */ 677 list_del_init(&op->list); 678 return; 679 } 680 /* Optimized kprobe case */ 681 if (force) 682 /* Forcibly update the code: this is a special case */ 683 force_unoptimize_kprobe(op); 684 else { 685 list_add(&op->list, &unoptimizing_list); 686 kick_kprobe_optimizer(); 687 } 688 } 689 690 /* Cancel unoptimizing for reusing */ 691 static void reuse_unused_kprobe(struct kprobe *ap) 692 { 693 struct optimized_kprobe *op; 694 695 BUG_ON(!kprobe_unused(ap)); 696 /* 697 * Unused kprobe MUST be on the way of delayed unoptimizing (means 698 * there is still a relative jump) and disabled. 699 */ 700 op = container_of(ap, struct optimized_kprobe, kp); 701 if (unlikely(list_empty(&op->list))) 702 printk(KERN_WARNING "Warning: found a stray unused " 703 "aggrprobe@%p\n", ap->addr); 704 /* Enable the probe again */ 705 ap->flags &= ~KPROBE_FLAG_DISABLED; 706 /* Optimize it again (remove from op->list) */ 707 BUG_ON(!kprobe_optready(ap)); 708 optimize_kprobe(ap); 709 } 710 711 /* Remove optimized instructions */ 712 static void __kprobes kill_optimized_kprobe(struct kprobe *p) 713 { 714 struct optimized_kprobe *op; 715 716 op = container_of(p, struct optimized_kprobe, kp); 717 if (!list_empty(&op->list)) 718 /* Dequeue from the (un)optimization queue */ 719 list_del_init(&op->list); 720 721 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; 722 /* Don't touch the code, because it is already freed. */ 723 arch_remove_optimized_kprobe(op); 724 } 725 726 /* Try to prepare optimized instructions */ 727 static __kprobes void prepare_optimized_kprobe(struct kprobe *p) 728 { 729 struct optimized_kprobe *op; 730 731 op = container_of(p, struct optimized_kprobe, kp); 732 arch_prepare_optimized_kprobe(op); 733 } 734 735 /* Allocate new optimized_kprobe and try to prepare optimized instructions */ 736 static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p) 737 { 738 struct optimized_kprobe *op; 739 740 op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL); 741 if (!op) 742 return NULL; 743 744 INIT_LIST_HEAD(&op->list); 745 op->kp.addr = p->addr; 746 arch_prepare_optimized_kprobe(op); 747 748 return &op->kp; 749 } 750 751 static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p); 752 753 /* 754 * Prepare an optimized_kprobe and optimize it 755 * NOTE: p must be a normal registered kprobe 756 */ 757 static __kprobes void try_to_optimize_kprobe(struct kprobe *p) 758 { 759 struct kprobe *ap; 760 struct optimized_kprobe *op; 761 762 ap = alloc_aggr_kprobe(p); 763 if (!ap) 764 return; 765 766 op = container_of(ap, struct optimized_kprobe, kp); 767 if (!arch_prepared_optinsn(&op->optinsn)) { 768 /* If failed to setup optimizing, fallback to kprobe */ 769 arch_remove_optimized_kprobe(op); 770 kfree(op); 771 return; 772 } 773 774 init_aggr_kprobe(ap, p); 775 optimize_kprobe(ap); 776 } 777 778 #ifdef CONFIG_SYSCTL 779 /* This should be called with kprobe_mutex locked */ 780 static void __kprobes optimize_all_kprobes(void) 781 { 782 struct hlist_head *head; 783 struct hlist_node *node; 784 struct kprobe *p; 785 unsigned int i; 786 787 /* If optimization is already allowed, just return */ 788 if (kprobes_allow_optimization) 789 return; 790 791 kprobes_allow_optimization = true; 792 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 793 head = &kprobe_table[i]; 794 hlist_for_each_entry_rcu(p, node, head, hlist) 795 if (!kprobe_disabled(p)) 796 optimize_kprobe(p); 797 } 798 printk(KERN_INFO "Kprobes globally optimized\n"); 799 } 800 801 /* This should be called with kprobe_mutex locked */ 802 static void __kprobes unoptimize_all_kprobes(void) 803 { 804 struct hlist_head *head; 805 struct hlist_node *node; 806 struct kprobe *p; 807 unsigned int i; 808 809 /* If optimization is already prohibited, just return */ 810 if (!kprobes_allow_optimization) 811 return; 812 813 kprobes_allow_optimization = false; 814 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 815 head = &kprobe_table[i]; 816 hlist_for_each_entry_rcu(p, node, head, hlist) { 817 if (!kprobe_disabled(p)) 818 unoptimize_kprobe(p, false); 819 } 820 } 821 /* Wait for unoptimizing completion */ 822 wait_for_kprobe_optimizer(); 823 printk(KERN_INFO "Kprobes globally unoptimized\n"); 824 } 825 826 int sysctl_kprobes_optimization; 827 int proc_kprobes_optimization_handler(struct ctl_table *table, int write, 828 void __user *buffer, size_t *length, 829 loff_t *ppos) 830 { 831 int ret; 832 833 mutex_lock(&kprobe_mutex); 834 sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0; 835 ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 836 837 if (sysctl_kprobes_optimization) 838 optimize_all_kprobes(); 839 else 840 unoptimize_all_kprobes(); 841 mutex_unlock(&kprobe_mutex); 842 843 return ret; 844 } 845 #endif /* CONFIG_SYSCTL */ 846 847 /* Put a breakpoint for a probe. Must be called with text_mutex locked */ 848 static void __kprobes __arm_kprobe(struct kprobe *p) 849 { 850 struct kprobe *_p; 851 852 /* Check collision with other optimized kprobes */ 853 _p = get_optimized_kprobe((unsigned long)p->addr); 854 if (unlikely(_p)) 855 /* Fallback to unoptimized kprobe */ 856 unoptimize_kprobe(_p, true); 857 858 arch_arm_kprobe(p); 859 optimize_kprobe(p); /* Try to optimize (add kprobe to a list) */ 860 } 861 862 /* Remove the breakpoint of a probe. Must be called with text_mutex locked */ 863 static void __kprobes __disarm_kprobe(struct kprobe *p, bool reopt) 864 { 865 struct kprobe *_p; 866 867 unoptimize_kprobe(p, false); /* Try to unoptimize */ 868 869 if (!kprobe_queued(p)) { 870 arch_disarm_kprobe(p); 871 /* If another kprobe was blocked, optimize it. */ 872 _p = get_optimized_kprobe((unsigned long)p->addr); 873 if (unlikely(_p) && reopt) 874 optimize_kprobe(_p); 875 } 876 /* TODO: reoptimize others after unoptimized this probe */ 877 } 878 879 #else /* !CONFIG_OPTPROBES */ 880 881 #define optimize_kprobe(p) do {} while (0) 882 #define unoptimize_kprobe(p, f) do {} while (0) 883 #define kill_optimized_kprobe(p) do {} while (0) 884 #define prepare_optimized_kprobe(p) do {} while (0) 885 #define try_to_optimize_kprobe(p) do {} while (0) 886 #define __arm_kprobe(p) arch_arm_kprobe(p) 887 #define __disarm_kprobe(p, o) arch_disarm_kprobe(p) 888 #define kprobe_disarmed(p) kprobe_disabled(p) 889 #define wait_for_kprobe_optimizer() do {} while (0) 890 891 /* There should be no unused kprobes can be reused without optimization */ 892 static void reuse_unused_kprobe(struct kprobe *ap) 893 { 894 printk(KERN_ERR "Error: There should be no unused kprobe here.\n"); 895 BUG_ON(kprobe_unused(ap)); 896 } 897 898 static __kprobes void free_aggr_kprobe(struct kprobe *p) 899 { 900 arch_remove_kprobe(p); 901 kfree(p); 902 } 903 904 static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p) 905 { 906 return kzalloc(sizeof(struct kprobe), GFP_KERNEL); 907 } 908 #endif /* CONFIG_OPTPROBES */ 909 910 /* Arm a kprobe with text_mutex */ 911 static void __kprobes arm_kprobe(struct kprobe *kp) 912 { 913 /* 914 * Here, since __arm_kprobe() doesn't use stop_machine(), 915 * this doesn't cause deadlock on text_mutex. So, we don't 916 * need get_online_cpus(). 917 */ 918 mutex_lock(&text_mutex); 919 __arm_kprobe(kp); 920 mutex_unlock(&text_mutex); 921 } 922 923 /* Disarm a kprobe with text_mutex */ 924 static void __kprobes disarm_kprobe(struct kprobe *kp) 925 { 926 /* Ditto */ 927 mutex_lock(&text_mutex); 928 __disarm_kprobe(kp, true); 929 mutex_unlock(&text_mutex); 930 } 931 932 /* 933 * Aggregate handlers for multiple kprobes support - these handlers 934 * take care of invoking the individual kprobe handlers on p->list 935 */ 936 static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) 937 { 938 struct kprobe *kp; 939 940 list_for_each_entry_rcu(kp, &p->list, list) { 941 if (kp->pre_handler && likely(!kprobe_disabled(kp))) { 942 set_kprobe_instance(kp); 943 if (kp->pre_handler(kp, regs)) 944 return 1; 945 } 946 reset_kprobe_instance(); 947 } 948 return 0; 949 } 950 951 static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs, 952 unsigned long flags) 953 { 954 struct kprobe *kp; 955 956 list_for_each_entry_rcu(kp, &p->list, list) { 957 if (kp->post_handler && likely(!kprobe_disabled(kp))) { 958 set_kprobe_instance(kp); 959 kp->post_handler(kp, regs, flags); 960 reset_kprobe_instance(); 961 } 962 } 963 } 964 965 static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, 966 int trapnr) 967 { 968 struct kprobe *cur = __this_cpu_read(kprobe_instance); 969 970 /* 971 * if we faulted "during" the execution of a user specified 972 * probe handler, invoke just that probe's fault handler 973 */ 974 if (cur && cur->fault_handler) { 975 if (cur->fault_handler(cur, regs, trapnr)) 976 return 1; 977 } 978 return 0; 979 } 980 981 static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs) 982 { 983 struct kprobe *cur = __this_cpu_read(kprobe_instance); 984 int ret = 0; 985 986 if (cur && cur->break_handler) { 987 if (cur->break_handler(cur, regs)) 988 ret = 1; 989 } 990 reset_kprobe_instance(); 991 return ret; 992 } 993 994 /* Walks the list and increments nmissed count for multiprobe case */ 995 void __kprobes kprobes_inc_nmissed_count(struct kprobe *p) 996 { 997 struct kprobe *kp; 998 if (!kprobe_aggrprobe(p)) { 999 p->nmissed++; 1000 } else { 1001 list_for_each_entry_rcu(kp, &p->list, list) 1002 kp->nmissed++; 1003 } 1004 return; 1005 } 1006 1007 void __kprobes recycle_rp_inst(struct kretprobe_instance *ri, 1008 struct hlist_head *head) 1009 { 1010 struct kretprobe *rp = ri->rp; 1011 1012 /* remove rp inst off the rprobe_inst_table */ 1013 hlist_del(&ri->hlist); 1014 INIT_HLIST_NODE(&ri->hlist); 1015 if (likely(rp)) { 1016 spin_lock(&rp->lock); 1017 hlist_add_head(&ri->hlist, &rp->free_instances); 1018 spin_unlock(&rp->lock); 1019 } else 1020 /* Unregistering */ 1021 hlist_add_head(&ri->hlist, head); 1022 } 1023 1024 void __kprobes kretprobe_hash_lock(struct task_struct *tsk, 1025 struct hlist_head **head, unsigned long *flags) 1026 __acquires(hlist_lock) 1027 { 1028 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); 1029 spinlock_t *hlist_lock; 1030 1031 *head = &kretprobe_inst_table[hash]; 1032 hlist_lock = kretprobe_table_lock_ptr(hash); 1033 spin_lock_irqsave(hlist_lock, *flags); 1034 } 1035 1036 static void __kprobes kretprobe_table_lock(unsigned long hash, 1037 unsigned long *flags) 1038 __acquires(hlist_lock) 1039 { 1040 spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); 1041 spin_lock_irqsave(hlist_lock, *flags); 1042 } 1043 1044 void __kprobes kretprobe_hash_unlock(struct task_struct *tsk, 1045 unsigned long *flags) 1046 __releases(hlist_lock) 1047 { 1048 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); 1049 spinlock_t *hlist_lock; 1050 1051 hlist_lock = kretprobe_table_lock_ptr(hash); 1052 spin_unlock_irqrestore(hlist_lock, *flags); 1053 } 1054 1055 static void __kprobes kretprobe_table_unlock(unsigned long hash, 1056 unsigned long *flags) 1057 __releases(hlist_lock) 1058 { 1059 spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); 1060 spin_unlock_irqrestore(hlist_lock, *flags); 1061 } 1062 1063 /* 1064 * This function is called from finish_task_switch when task tk becomes dead, 1065 * so that we can recycle any function-return probe instances associated 1066 * with this task. These left over instances represent probed functions 1067 * that have been called but will never return. 1068 */ 1069 void __kprobes kprobe_flush_task(struct task_struct *tk) 1070 { 1071 struct kretprobe_instance *ri; 1072 struct hlist_head *head, empty_rp; 1073 struct hlist_node *node, *tmp; 1074 unsigned long hash, flags = 0; 1075 1076 if (unlikely(!kprobes_initialized)) 1077 /* Early boot. kretprobe_table_locks not yet initialized. */ 1078 return; 1079 1080 hash = hash_ptr(tk, KPROBE_HASH_BITS); 1081 head = &kretprobe_inst_table[hash]; 1082 kretprobe_table_lock(hash, &flags); 1083 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 1084 if (ri->task == tk) 1085 recycle_rp_inst(ri, &empty_rp); 1086 } 1087 kretprobe_table_unlock(hash, &flags); 1088 INIT_HLIST_HEAD(&empty_rp); 1089 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 1090 hlist_del(&ri->hlist); 1091 kfree(ri); 1092 } 1093 } 1094 1095 static inline void free_rp_inst(struct kretprobe *rp) 1096 { 1097 struct kretprobe_instance *ri; 1098 struct hlist_node *pos, *next; 1099 1100 hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, hlist) { 1101 hlist_del(&ri->hlist); 1102 kfree(ri); 1103 } 1104 } 1105 1106 static void __kprobes cleanup_rp_inst(struct kretprobe *rp) 1107 { 1108 unsigned long flags, hash; 1109 struct kretprobe_instance *ri; 1110 struct hlist_node *pos, *next; 1111 struct hlist_head *head; 1112 1113 /* No race here */ 1114 for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) { 1115 kretprobe_table_lock(hash, &flags); 1116 head = &kretprobe_inst_table[hash]; 1117 hlist_for_each_entry_safe(ri, pos, next, head, hlist) { 1118 if (ri->rp == rp) 1119 ri->rp = NULL; 1120 } 1121 kretprobe_table_unlock(hash, &flags); 1122 } 1123 free_rp_inst(rp); 1124 } 1125 1126 /* 1127 * Add the new probe to ap->list. Fail if this is the 1128 * second jprobe at the address - two jprobes can't coexist 1129 */ 1130 static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p) 1131 { 1132 BUG_ON(kprobe_gone(ap) || kprobe_gone(p)); 1133 1134 if (p->break_handler || p->post_handler) 1135 unoptimize_kprobe(ap, true); /* Fall back to normal kprobe */ 1136 1137 if (p->break_handler) { 1138 if (ap->break_handler) 1139 return -EEXIST; 1140 list_add_tail_rcu(&p->list, &ap->list); 1141 ap->break_handler = aggr_break_handler; 1142 } else 1143 list_add_rcu(&p->list, &ap->list); 1144 if (p->post_handler && !ap->post_handler) 1145 ap->post_handler = aggr_post_handler; 1146 1147 if (kprobe_disabled(ap) && !kprobe_disabled(p)) { 1148 ap->flags &= ~KPROBE_FLAG_DISABLED; 1149 if (!kprobes_all_disarmed) 1150 /* Arm the breakpoint again. */ 1151 __arm_kprobe(ap); 1152 } 1153 return 0; 1154 } 1155 1156 /* 1157 * Fill in the required fields of the "manager kprobe". Replace the 1158 * earlier kprobe in the hlist with the manager kprobe 1159 */ 1160 static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p) 1161 { 1162 /* Copy p's insn slot to ap */ 1163 copy_kprobe(p, ap); 1164 flush_insn_slot(ap); 1165 ap->addr = p->addr; 1166 ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED; 1167 ap->pre_handler = aggr_pre_handler; 1168 ap->fault_handler = aggr_fault_handler; 1169 /* We don't care the kprobe which has gone. */ 1170 if (p->post_handler && !kprobe_gone(p)) 1171 ap->post_handler = aggr_post_handler; 1172 if (p->break_handler && !kprobe_gone(p)) 1173 ap->break_handler = aggr_break_handler; 1174 1175 INIT_LIST_HEAD(&ap->list); 1176 INIT_HLIST_NODE(&ap->hlist); 1177 1178 list_add_rcu(&p->list, &ap->list); 1179 hlist_replace_rcu(&p->hlist, &ap->hlist); 1180 } 1181 1182 /* 1183 * This is the second or subsequent kprobe at the address - handle 1184 * the intricacies 1185 */ 1186 static int __kprobes register_aggr_kprobe(struct kprobe *orig_p, 1187 struct kprobe *p) 1188 { 1189 int ret = 0; 1190 struct kprobe *ap = orig_p; 1191 1192 if (!kprobe_aggrprobe(orig_p)) { 1193 /* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */ 1194 ap = alloc_aggr_kprobe(orig_p); 1195 if (!ap) 1196 return -ENOMEM; 1197 init_aggr_kprobe(ap, orig_p); 1198 } else if (kprobe_unused(ap)) 1199 /* This probe is going to die. Rescue it */ 1200 reuse_unused_kprobe(ap); 1201 1202 if (kprobe_gone(ap)) { 1203 /* 1204 * Attempting to insert new probe at the same location that 1205 * had a probe in the module vaddr area which already 1206 * freed. So, the instruction slot has already been 1207 * released. We need a new slot for the new probe. 1208 */ 1209 ret = arch_prepare_kprobe(ap); 1210 if (ret) 1211 /* 1212 * Even if fail to allocate new slot, don't need to 1213 * free aggr_probe. It will be used next time, or 1214 * freed by unregister_kprobe. 1215 */ 1216 return ret; 1217 1218 /* Prepare optimized instructions if possible. */ 1219 prepare_optimized_kprobe(ap); 1220 1221 /* 1222 * Clear gone flag to prevent allocating new slot again, and 1223 * set disabled flag because it is not armed yet. 1224 */ 1225 ap->flags = (ap->flags & ~KPROBE_FLAG_GONE) 1226 | KPROBE_FLAG_DISABLED; 1227 } 1228 1229 /* Copy ap's insn slot to p */ 1230 copy_kprobe(ap, p); 1231 return add_new_kprobe(ap, p); 1232 } 1233 1234 static int __kprobes in_kprobes_functions(unsigned long addr) 1235 { 1236 struct kprobe_blackpoint *kb; 1237 1238 if (addr >= (unsigned long)__kprobes_text_start && 1239 addr < (unsigned long)__kprobes_text_end) 1240 return -EINVAL; 1241 /* 1242 * If there exists a kprobe_blacklist, verify and 1243 * fail any probe registration in the prohibited area 1244 */ 1245 for (kb = kprobe_blacklist; kb->name != NULL; kb++) { 1246 if (kb->start_addr) { 1247 if (addr >= kb->start_addr && 1248 addr < (kb->start_addr + kb->range)) 1249 return -EINVAL; 1250 } 1251 } 1252 return 0; 1253 } 1254 1255 /* 1256 * If we have a symbol_name argument, look it up and add the offset field 1257 * to it. This way, we can specify a relative address to a symbol. 1258 * This returns encoded errors if it fails to look up symbol or invalid 1259 * combination of parameters. 1260 */ 1261 static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p) 1262 { 1263 kprobe_opcode_t *addr = p->addr; 1264 1265 if ((p->symbol_name && p->addr) || 1266 (!p->symbol_name && !p->addr)) 1267 goto invalid; 1268 1269 if (p->symbol_name) { 1270 kprobe_lookup_name(p->symbol_name, addr); 1271 if (!addr) 1272 return ERR_PTR(-ENOENT); 1273 } 1274 1275 addr = (kprobe_opcode_t *)(((char *)addr) + p->offset); 1276 if (addr) 1277 return addr; 1278 1279 invalid: 1280 return ERR_PTR(-EINVAL); 1281 } 1282 1283 /* Check passed kprobe is valid and return kprobe in kprobe_table. */ 1284 static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p) 1285 { 1286 struct kprobe *ap, *list_p; 1287 1288 ap = get_kprobe(p->addr); 1289 if (unlikely(!ap)) 1290 return NULL; 1291 1292 if (p != ap) { 1293 list_for_each_entry_rcu(list_p, &ap->list, list) 1294 if (list_p == p) 1295 /* kprobe p is a valid probe */ 1296 goto valid; 1297 return NULL; 1298 } 1299 valid: 1300 return ap; 1301 } 1302 1303 /* Return error if the kprobe is being re-registered */ 1304 static inline int check_kprobe_rereg(struct kprobe *p) 1305 { 1306 int ret = 0; 1307 1308 mutex_lock(&kprobe_mutex); 1309 if (__get_valid_kprobe(p)) 1310 ret = -EINVAL; 1311 mutex_unlock(&kprobe_mutex); 1312 1313 return ret; 1314 } 1315 1316 int __kprobes register_kprobe(struct kprobe *p) 1317 { 1318 int ret = 0; 1319 struct kprobe *old_p; 1320 struct module *probed_mod; 1321 kprobe_opcode_t *addr; 1322 1323 addr = kprobe_addr(p); 1324 if (IS_ERR(addr)) 1325 return PTR_ERR(addr); 1326 p->addr = addr; 1327 1328 ret = check_kprobe_rereg(p); 1329 if (ret) 1330 return ret; 1331 1332 jump_label_lock(); 1333 preempt_disable(); 1334 if (!kernel_text_address((unsigned long) p->addr) || 1335 in_kprobes_functions((unsigned long) p->addr) || 1336 ftrace_text_reserved(p->addr, p->addr) || 1337 jump_label_text_reserved(p->addr, p->addr)) 1338 goto fail_with_jump_label; 1339 1340 /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */ 1341 p->flags &= KPROBE_FLAG_DISABLED; 1342 1343 /* 1344 * Check if are we probing a module. 1345 */ 1346 probed_mod = __module_text_address((unsigned long) p->addr); 1347 if (probed_mod) { 1348 /* Return -ENOENT if fail. */ 1349 ret = -ENOENT; 1350 /* 1351 * We must hold a refcount of the probed module while updating 1352 * its code to prohibit unexpected unloading. 1353 */ 1354 if (unlikely(!try_module_get(probed_mod))) 1355 goto fail_with_jump_label; 1356 1357 /* 1358 * If the module freed .init.text, we couldn't insert 1359 * kprobes in there. 1360 */ 1361 if (within_module_init((unsigned long)p->addr, probed_mod) && 1362 probed_mod->state != MODULE_STATE_COMING) { 1363 module_put(probed_mod); 1364 goto fail_with_jump_label; 1365 } 1366 /* ret will be updated by following code */ 1367 } 1368 preempt_enable(); 1369 jump_label_unlock(); 1370 1371 p->nmissed = 0; 1372 INIT_LIST_HEAD(&p->list); 1373 mutex_lock(&kprobe_mutex); 1374 1375 jump_label_lock(); /* needed to call jump_label_text_reserved() */ 1376 1377 get_online_cpus(); /* For avoiding text_mutex deadlock. */ 1378 mutex_lock(&text_mutex); 1379 1380 old_p = get_kprobe(p->addr); 1381 if (old_p) { 1382 /* Since this may unoptimize old_p, locking text_mutex. */ 1383 ret = register_aggr_kprobe(old_p, p); 1384 goto out; 1385 } 1386 1387 ret = arch_prepare_kprobe(p); 1388 if (ret) 1389 goto out; 1390 1391 INIT_HLIST_NODE(&p->hlist); 1392 hlist_add_head_rcu(&p->hlist, 1393 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); 1394 1395 if (!kprobes_all_disarmed && !kprobe_disabled(p)) 1396 __arm_kprobe(p); 1397 1398 /* Try to optimize kprobe */ 1399 try_to_optimize_kprobe(p); 1400 1401 out: 1402 mutex_unlock(&text_mutex); 1403 put_online_cpus(); 1404 jump_label_unlock(); 1405 mutex_unlock(&kprobe_mutex); 1406 1407 if (probed_mod) 1408 module_put(probed_mod); 1409 1410 return ret; 1411 1412 fail_with_jump_label: 1413 preempt_enable(); 1414 jump_label_unlock(); 1415 return ret; 1416 } 1417 EXPORT_SYMBOL_GPL(register_kprobe); 1418 1419 /* Check if all probes on the aggrprobe are disabled */ 1420 static int __kprobes aggr_kprobe_disabled(struct kprobe *ap) 1421 { 1422 struct kprobe *kp; 1423 1424 list_for_each_entry_rcu(kp, &ap->list, list) 1425 if (!kprobe_disabled(kp)) 1426 /* 1427 * There is an active probe on the list. 1428 * We can't disable this ap. 1429 */ 1430 return 0; 1431 1432 return 1; 1433 } 1434 1435 /* Disable one kprobe: Make sure called under kprobe_mutex is locked */ 1436 static struct kprobe *__kprobes __disable_kprobe(struct kprobe *p) 1437 { 1438 struct kprobe *orig_p; 1439 1440 /* Get an original kprobe for return */ 1441 orig_p = __get_valid_kprobe(p); 1442 if (unlikely(orig_p == NULL)) 1443 return NULL; 1444 1445 if (!kprobe_disabled(p)) { 1446 /* Disable probe if it is a child probe */ 1447 if (p != orig_p) 1448 p->flags |= KPROBE_FLAG_DISABLED; 1449 1450 /* Try to disarm and disable this/parent probe */ 1451 if (p == orig_p || aggr_kprobe_disabled(orig_p)) { 1452 disarm_kprobe(orig_p); 1453 orig_p->flags |= KPROBE_FLAG_DISABLED; 1454 } 1455 } 1456 1457 return orig_p; 1458 } 1459 1460 /* 1461 * Unregister a kprobe without a scheduler synchronization. 1462 */ 1463 static int __kprobes __unregister_kprobe_top(struct kprobe *p) 1464 { 1465 struct kprobe *ap, *list_p; 1466 1467 /* Disable kprobe. This will disarm it if needed. */ 1468 ap = __disable_kprobe(p); 1469 if (ap == NULL) 1470 return -EINVAL; 1471 1472 if (ap == p) 1473 /* 1474 * This probe is an independent(and non-optimized) kprobe 1475 * (not an aggrprobe). Remove from the hash list. 1476 */ 1477 goto disarmed; 1478 1479 /* Following process expects this probe is an aggrprobe */ 1480 WARN_ON(!kprobe_aggrprobe(ap)); 1481 1482 if (list_is_singular(&ap->list) && kprobe_disarmed(ap)) 1483 /* 1484 * !disarmed could be happen if the probe is under delayed 1485 * unoptimizing. 1486 */ 1487 goto disarmed; 1488 else { 1489 /* If disabling probe has special handlers, update aggrprobe */ 1490 if (p->break_handler && !kprobe_gone(p)) 1491 ap->break_handler = NULL; 1492 if (p->post_handler && !kprobe_gone(p)) { 1493 list_for_each_entry_rcu(list_p, &ap->list, list) { 1494 if ((list_p != p) && (list_p->post_handler)) 1495 goto noclean; 1496 } 1497 ap->post_handler = NULL; 1498 } 1499 noclean: 1500 /* 1501 * Remove from the aggrprobe: this path will do nothing in 1502 * __unregister_kprobe_bottom(). 1503 */ 1504 list_del_rcu(&p->list); 1505 if (!kprobe_disabled(ap) && !kprobes_all_disarmed) 1506 /* 1507 * Try to optimize this probe again, because post 1508 * handler may have been changed. 1509 */ 1510 optimize_kprobe(ap); 1511 } 1512 return 0; 1513 1514 disarmed: 1515 BUG_ON(!kprobe_disarmed(ap)); 1516 hlist_del_rcu(&ap->hlist); 1517 return 0; 1518 } 1519 1520 static void __kprobes __unregister_kprobe_bottom(struct kprobe *p) 1521 { 1522 struct kprobe *ap; 1523 1524 if (list_empty(&p->list)) 1525 /* This is an independent kprobe */ 1526 arch_remove_kprobe(p); 1527 else if (list_is_singular(&p->list)) { 1528 /* This is the last child of an aggrprobe */ 1529 ap = list_entry(p->list.next, struct kprobe, list); 1530 list_del(&p->list); 1531 free_aggr_kprobe(ap); 1532 } 1533 /* Otherwise, do nothing. */ 1534 } 1535 1536 int __kprobes register_kprobes(struct kprobe **kps, int num) 1537 { 1538 int i, ret = 0; 1539 1540 if (num <= 0) 1541 return -EINVAL; 1542 for (i = 0; i < num; i++) { 1543 ret = register_kprobe(kps[i]); 1544 if (ret < 0) { 1545 if (i > 0) 1546 unregister_kprobes(kps, i); 1547 break; 1548 } 1549 } 1550 return ret; 1551 } 1552 EXPORT_SYMBOL_GPL(register_kprobes); 1553 1554 void __kprobes unregister_kprobe(struct kprobe *p) 1555 { 1556 unregister_kprobes(&p, 1); 1557 } 1558 EXPORT_SYMBOL_GPL(unregister_kprobe); 1559 1560 void __kprobes unregister_kprobes(struct kprobe **kps, int num) 1561 { 1562 int i; 1563 1564 if (num <= 0) 1565 return; 1566 mutex_lock(&kprobe_mutex); 1567 for (i = 0; i < num; i++) 1568 if (__unregister_kprobe_top(kps[i]) < 0) 1569 kps[i]->addr = NULL; 1570 mutex_unlock(&kprobe_mutex); 1571 1572 synchronize_sched(); 1573 for (i = 0; i < num; i++) 1574 if (kps[i]->addr) 1575 __unregister_kprobe_bottom(kps[i]); 1576 } 1577 EXPORT_SYMBOL_GPL(unregister_kprobes); 1578 1579 static struct notifier_block kprobe_exceptions_nb = { 1580 .notifier_call = kprobe_exceptions_notify, 1581 .priority = 0x7fffffff /* we need to be notified first */ 1582 }; 1583 1584 unsigned long __weak arch_deref_entry_point(void *entry) 1585 { 1586 return (unsigned long)entry; 1587 } 1588 1589 int __kprobes register_jprobes(struct jprobe **jps, int num) 1590 { 1591 struct jprobe *jp; 1592 int ret = 0, i; 1593 1594 if (num <= 0) 1595 return -EINVAL; 1596 for (i = 0; i < num; i++) { 1597 unsigned long addr, offset; 1598 jp = jps[i]; 1599 addr = arch_deref_entry_point(jp->entry); 1600 1601 /* Verify probepoint is a function entry point */ 1602 if (kallsyms_lookup_size_offset(addr, NULL, &offset) && 1603 offset == 0) { 1604 jp->kp.pre_handler = setjmp_pre_handler; 1605 jp->kp.break_handler = longjmp_break_handler; 1606 ret = register_kprobe(&jp->kp); 1607 } else 1608 ret = -EINVAL; 1609 1610 if (ret < 0) { 1611 if (i > 0) 1612 unregister_jprobes(jps, i); 1613 break; 1614 } 1615 } 1616 return ret; 1617 } 1618 EXPORT_SYMBOL_GPL(register_jprobes); 1619 1620 int __kprobes register_jprobe(struct jprobe *jp) 1621 { 1622 return register_jprobes(&jp, 1); 1623 } 1624 EXPORT_SYMBOL_GPL(register_jprobe); 1625 1626 void __kprobes unregister_jprobe(struct jprobe *jp) 1627 { 1628 unregister_jprobes(&jp, 1); 1629 } 1630 EXPORT_SYMBOL_GPL(unregister_jprobe); 1631 1632 void __kprobes unregister_jprobes(struct jprobe **jps, int num) 1633 { 1634 int i; 1635 1636 if (num <= 0) 1637 return; 1638 mutex_lock(&kprobe_mutex); 1639 for (i = 0; i < num; i++) 1640 if (__unregister_kprobe_top(&jps[i]->kp) < 0) 1641 jps[i]->kp.addr = NULL; 1642 mutex_unlock(&kprobe_mutex); 1643 1644 synchronize_sched(); 1645 for (i = 0; i < num; i++) { 1646 if (jps[i]->kp.addr) 1647 __unregister_kprobe_bottom(&jps[i]->kp); 1648 } 1649 } 1650 EXPORT_SYMBOL_GPL(unregister_jprobes); 1651 1652 #ifdef CONFIG_KRETPROBES 1653 /* 1654 * This kprobe pre_handler is registered with every kretprobe. When probe 1655 * hits it will set up the return probe. 1656 */ 1657 static int __kprobes pre_handler_kretprobe(struct kprobe *p, 1658 struct pt_regs *regs) 1659 { 1660 struct kretprobe *rp = container_of(p, struct kretprobe, kp); 1661 unsigned long hash, flags = 0; 1662 struct kretprobe_instance *ri; 1663 1664 /*TODO: consider to only swap the RA after the last pre_handler fired */ 1665 hash = hash_ptr(current, KPROBE_HASH_BITS); 1666 spin_lock_irqsave(&rp->lock, flags); 1667 if (!hlist_empty(&rp->free_instances)) { 1668 ri = hlist_entry(rp->free_instances.first, 1669 struct kretprobe_instance, hlist); 1670 hlist_del(&ri->hlist); 1671 spin_unlock_irqrestore(&rp->lock, flags); 1672 1673 ri->rp = rp; 1674 ri->task = current; 1675 1676 if (rp->entry_handler && rp->entry_handler(ri, regs)) 1677 return 0; 1678 1679 arch_prepare_kretprobe(ri, regs); 1680 1681 /* XXX(hch): why is there no hlist_move_head? */ 1682 INIT_HLIST_NODE(&ri->hlist); 1683 kretprobe_table_lock(hash, &flags); 1684 hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]); 1685 kretprobe_table_unlock(hash, &flags); 1686 } else { 1687 rp->nmissed++; 1688 spin_unlock_irqrestore(&rp->lock, flags); 1689 } 1690 return 0; 1691 } 1692 1693 int __kprobes register_kretprobe(struct kretprobe *rp) 1694 { 1695 int ret = 0; 1696 struct kretprobe_instance *inst; 1697 int i; 1698 void *addr; 1699 1700 if (kretprobe_blacklist_size) { 1701 addr = kprobe_addr(&rp->kp); 1702 if (IS_ERR(addr)) 1703 return PTR_ERR(addr); 1704 1705 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) { 1706 if (kretprobe_blacklist[i].addr == addr) 1707 return -EINVAL; 1708 } 1709 } 1710 1711 rp->kp.pre_handler = pre_handler_kretprobe; 1712 rp->kp.post_handler = NULL; 1713 rp->kp.fault_handler = NULL; 1714 rp->kp.break_handler = NULL; 1715 1716 /* Pre-allocate memory for max kretprobe instances */ 1717 if (rp->maxactive <= 0) { 1718 #ifdef CONFIG_PREEMPT 1719 rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus()); 1720 #else 1721 rp->maxactive = num_possible_cpus(); 1722 #endif 1723 } 1724 spin_lock_init(&rp->lock); 1725 INIT_HLIST_HEAD(&rp->free_instances); 1726 for (i = 0; i < rp->maxactive; i++) { 1727 inst = kmalloc(sizeof(struct kretprobe_instance) + 1728 rp->data_size, GFP_KERNEL); 1729 if (inst == NULL) { 1730 free_rp_inst(rp); 1731 return -ENOMEM; 1732 } 1733 INIT_HLIST_NODE(&inst->hlist); 1734 hlist_add_head(&inst->hlist, &rp->free_instances); 1735 } 1736 1737 rp->nmissed = 0; 1738 /* Establish function entry probe point */ 1739 ret = register_kprobe(&rp->kp); 1740 if (ret != 0) 1741 free_rp_inst(rp); 1742 return ret; 1743 } 1744 EXPORT_SYMBOL_GPL(register_kretprobe); 1745 1746 int __kprobes register_kretprobes(struct kretprobe **rps, int num) 1747 { 1748 int ret = 0, i; 1749 1750 if (num <= 0) 1751 return -EINVAL; 1752 for (i = 0; i < num; i++) { 1753 ret = register_kretprobe(rps[i]); 1754 if (ret < 0) { 1755 if (i > 0) 1756 unregister_kretprobes(rps, i); 1757 break; 1758 } 1759 } 1760 return ret; 1761 } 1762 EXPORT_SYMBOL_GPL(register_kretprobes); 1763 1764 void __kprobes unregister_kretprobe(struct kretprobe *rp) 1765 { 1766 unregister_kretprobes(&rp, 1); 1767 } 1768 EXPORT_SYMBOL_GPL(unregister_kretprobe); 1769 1770 void __kprobes unregister_kretprobes(struct kretprobe **rps, int num) 1771 { 1772 int i; 1773 1774 if (num <= 0) 1775 return; 1776 mutex_lock(&kprobe_mutex); 1777 for (i = 0; i < num; i++) 1778 if (__unregister_kprobe_top(&rps[i]->kp) < 0) 1779 rps[i]->kp.addr = NULL; 1780 mutex_unlock(&kprobe_mutex); 1781 1782 synchronize_sched(); 1783 for (i = 0; i < num; i++) { 1784 if (rps[i]->kp.addr) { 1785 __unregister_kprobe_bottom(&rps[i]->kp); 1786 cleanup_rp_inst(rps[i]); 1787 } 1788 } 1789 } 1790 EXPORT_SYMBOL_GPL(unregister_kretprobes); 1791 1792 #else /* CONFIG_KRETPROBES */ 1793 int __kprobes register_kretprobe(struct kretprobe *rp) 1794 { 1795 return -ENOSYS; 1796 } 1797 EXPORT_SYMBOL_GPL(register_kretprobe); 1798 1799 int __kprobes register_kretprobes(struct kretprobe **rps, int num) 1800 { 1801 return -ENOSYS; 1802 } 1803 EXPORT_SYMBOL_GPL(register_kretprobes); 1804 1805 void __kprobes unregister_kretprobe(struct kretprobe *rp) 1806 { 1807 } 1808 EXPORT_SYMBOL_GPL(unregister_kretprobe); 1809 1810 void __kprobes unregister_kretprobes(struct kretprobe **rps, int num) 1811 { 1812 } 1813 EXPORT_SYMBOL_GPL(unregister_kretprobes); 1814 1815 static int __kprobes pre_handler_kretprobe(struct kprobe *p, 1816 struct pt_regs *regs) 1817 { 1818 return 0; 1819 } 1820 1821 #endif /* CONFIG_KRETPROBES */ 1822 1823 /* Set the kprobe gone and remove its instruction buffer. */ 1824 static void __kprobes kill_kprobe(struct kprobe *p) 1825 { 1826 struct kprobe *kp; 1827 1828 p->flags |= KPROBE_FLAG_GONE; 1829 if (kprobe_aggrprobe(p)) { 1830 /* 1831 * If this is an aggr_kprobe, we have to list all the 1832 * chained probes and mark them GONE. 1833 */ 1834 list_for_each_entry_rcu(kp, &p->list, list) 1835 kp->flags |= KPROBE_FLAG_GONE; 1836 p->post_handler = NULL; 1837 p->break_handler = NULL; 1838 kill_optimized_kprobe(p); 1839 } 1840 /* 1841 * Here, we can remove insn_slot safely, because no thread calls 1842 * the original probed function (which will be freed soon) any more. 1843 */ 1844 arch_remove_kprobe(p); 1845 } 1846 1847 /* Disable one kprobe */ 1848 int __kprobes disable_kprobe(struct kprobe *kp) 1849 { 1850 int ret = 0; 1851 1852 mutex_lock(&kprobe_mutex); 1853 1854 /* Disable this kprobe */ 1855 if (__disable_kprobe(kp) == NULL) 1856 ret = -EINVAL; 1857 1858 mutex_unlock(&kprobe_mutex); 1859 return ret; 1860 } 1861 EXPORT_SYMBOL_GPL(disable_kprobe); 1862 1863 /* Enable one kprobe */ 1864 int __kprobes enable_kprobe(struct kprobe *kp) 1865 { 1866 int ret = 0; 1867 struct kprobe *p; 1868 1869 mutex_lock(&kprobe_mutex); 1870 1871 /* Check whether specified probe is valid. */ 1872 p = __get_valid_kprobe(kp); 1873 if (unlikely(p == NULL)) { 1874 ret = -EINVAL; 1875 goto out; 1876 } 1877 1878 if (kprobe_gone(kp)) { 1879 /* This kprobe has gone, we couldn't enable it. */ 1880 ret = -EINVAL; 1881 goto out; 1882 } 1883 1884 if (p != kp) 1885 kp->flags &= ~KPROBE_FLAG_DISABLED; 1886 1887 if (!kprobes_all_disarmed && kprobe_disabled(p)) { 1888 p->flags &= ~KPROBE_FLAG_DISABLED; 1889 arm_kprobe(p); 1890 } 1891 out: 1892 mutex_unlock(&kprobe_mutex); 1893 return ret; 1894 } 1895 EXPORT_SYMBOL_GPL(enable_kprobe); 1896 1897 void __kprobes dump_kprobe(struct kprobe *kp) 1898 { 1899 printk(KERN_WARNING "Dumping kprobe:\n"); 1900 printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n", 1901 kp->symbol_name, kp->addr, kp->offset); 1902 } 1903 1904 /* Module notifier call back, checking kprobes on the module */ 1905 static int __kprobes kprobes_module_callback(struct notifier_block *nb, 1906 unsigned long val, void *data) 1907 { 1908 struct module *mod = data; 1909 struct hlist_head *head; 1910 struct hlist_node *node; 1911 struct kprobe *p; 1912 unsigned int i; 1913 int checkcore = (val == MODULE_STATE_GOING); 1914 1915 if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE) 1916 return NOTIFY_DONE; 1917 1918 /* 1919 * When MODULE_STATE_GOING was notified, both of module .text and 1920 * .init.text sections would be freed. When MODULE_STATE_LIVE was 1921 * notified, only .init.text section would be freed. We need to 1922 * disable kprobes which have been inserted in the sections. 1923 */ 1924 mutex_lock(&kprobe_mutex); 1925 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 1926 head = &kprobe_table[i]; 1927 hlist_for_each_entry_rcu(p, node, head, hlist) 1928 if (within_module_init((unsigned long)p->addr, mod) || 1929 (checkcore && 1930 within_module_core((unsigned long)p->addr, mod))) { 1931 /* 1932 * The vaddr this probe is installed will soon 1933 * be vfreed buy not synced to disk. Hence, 1934 * disarming the breakpoint isn't needed. 1935 */ 1936 kill_kprobe(p); 1937 } 1938 } 1939 mutex_unlock(&kprobe_mutex); 1940 return NOTIFY_DONE; 1941 } 1942 1943 static struct notifier_block kprobe_module_nb = { 1944 .notifier_call = kprobes_module_callback, 1945 .priority = 0 1946 }; 1947 1948 static int __init init_kprobes(void) 1949 { 1950 int i, err = 0; 1951 unsigned long offset = 0, size = 0; 1952 char *modname, namebuf[128]; 1953 const char *symbol_name; 1954 void *addr; 1955 struct kprobe_blackpoint *kb; 1956 1957 /* FIXME allocate the probe table, currently defined statically */ 1958 /* initialize all list heads */ 1959 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 1960 INIT_HLIST_HEAD(&kprobe_table[i]); 1961 INIT_HLIST_HEAD(&kretprobe_inst_table[i]); 1962 spin_lock_init(&(kretprobe_table_locks[i].lock)); 1963 } 1964 1965 /* 1966 * Lookup and populate the kprobe_blacklist. 1967 * 1968 * Unlike the kretprobe blacklist, we'll need to determine 1969 * the range of addresses that belong to the said functions, 1970 * since a kprobe need not necessarily be at the beginning 1971 * of a function. 1972 */ 1973 for (kb = kprobe_blacklist; kb->name != NULL; kb++) { 1974 kprobe_lookup_name(kb->name, addr); 1975 if (!addr) 1976 continue; 1977 1978 kb->start_addr = (unsigned long)addr; 1979 symbol_name = kallsyms_lookup(kb->start_addr, 1980 &size, &offset, &modname, namebuf); 1981 if (!symbol_name) 1982 kb->range = 0; 1983 else 1984 kb->range = size; 1985 } 1986 1987 if (kretprobe_blacklist_size) { 1988 /* lookup the function address from its name */ 1989 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) { 1990 kprobe_lookup_name(kretprobe_blacklist[i].name, 1991 kretprobe_blacklist[i].addr); 1992 if (!kretprobe_blacklist[i].addr) 1993 printk("kretprobe: lookup failed: %s\n", 1994 kretprobe_blacklist[i].name); 1995 } 1996 } 1997 1998 #if defined(CONFIG_OPTPROBES) 1999 #if defined(__ARCH_WANT_KPROBES_INSN_SLOT) 2000 /* Init kprobe_optinsn_slots */ 2001 kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE; 2002 #endif 2003 /* By default, kprobes can be optimized */ 2004 kprobes_allow_optimization = true; 2005 #endif 2006 2007 /* By default, kprobes are armed */ 2008 kprobes_all_disarmed = false; 2009 2010 err = arch_init_kprobes(); 2011 if (!err) 2012 err = register_die_notifier(&kprobe_exceptions_nb); 2013 if (!err) 2014 err = register_module_notifier(&kprobe_module_nb); 2015 2016 kprobes_initialized = (err == 0); 2017 2018 if (!err) 2019 init_test_probes(); 2020 return err; 2021 } 2022 2023 #ifdef CONFIG_DEBUG_FS 2024 static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p, 2025 const char *sym, int offset, char *modname, struct kprobe *pp) 2026 { 2027 char *kprobe_type; 2028 2029 if (p->pre_handler == pre_handler_kretprobe) 2030 kprobe_type = "r"; 2031 else if (p->pre_handler == setjmp_pre_handler) 2032 kprobe_type = "j"; 2033 else 2034 kprobe_type = "k"; 2035 2036 if (sym) 2037 seq_printf(pi, "%p %s %s+0x%x %s ", 2038 p->addr, kprobe_type, sym, offset, 2039 (modname ? modname : " ")); 2040 else 2041 seq_printf(pi, "%p %s %p ", 2042 p->addr, kprobe_type, p->addr); 2043 2044 if (!pp) 2045 pp = p; 2046 seq_printf(pi, "%s%s%s\n", 2047 (kprobe_gone(p) ? "[GONE]" : ""), 2048 ((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""), 2049 (kprobe_optimized(pp) ? "[OPTIMIZED]" : "")); 2050 } 2051 2052 static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos) 2053 { 2054 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL; 2055 } 2056 2057 static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos) 2058 { 2059 (*pos)++; 2060 if (*pos >= KPROBE_TABLE_SIZE) 2061 return NULL; 2062 return pos; 2063 } 2064 2065 static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v) 2066 { 2067 /* Nothing to do */ 2068 } 2069 2070 static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v) 2071 { 2072 struct hlist_head *head; 2073 struct hlist_node *node; 2074 struct kprobe *p, *kp; 2075 const char *sym = NULL; 2076 unsigned int i = *(loff_t *) v; 2077 unsigned long offset = 0; 2078 char *modname, namebuf[128]; 2079 2080 head = &kprobe_table[i]; 2081 preempt_disable(); 2082 hlist_for_each_entry_rcu(p, node, head, hlist) { 2083 sym = kallsyms_lookup((unsigned long)p->addr, NULL, 2084 &offset, &modname, namebuf); 2085 if (kprobe_aggrprobe(p)) { 2086 list_for_each_entry_rcu(kp, &p->list, list) 2087 report_probe(pi, kp, sym, offset, modname, p); 2088 } else 2089 report_probe(pi, p, sym, offset, modname, NULL); 2090 } 2091 preempt_enable(); 2092 return 0; 2093 } 2094 2095 static const struct seq_operations kprobes_seq_ops = { 2096 .start = kprobe_seq_start, 2097 .next = kprobe_seq_next, 2098 .stop = kprobe_seq_stop, 2099 .show = show_kprobe_addr 2100 }; 2101 2102 static int __kprobes kprobes_open(struct inode *inode, struct file *filp) 2103 { 2104 return seq_open(filp, &kprobes_seq_ops); 2105 } 2106 2107 static const struct file_operations debugfs_kprobes_operations = { 2108 .open = kprobes_open, 2109 .read = seq_read, 2110 .llseek = seq_lseek, 2111 .release = seq_release, 2112 }; 2113 2114 static void __kprobes arm_all_kprobes(void) 2115 { 2116 struct hlist_head *head; 2117 struct hlist_node *node; 2118 struct kprobe *p; 2119 unsigned int i; 2120 2121 mutex_lock(&kprobe_mutex); 2122 2123 /* If kprobes are armed, just return */ 2124 if (!kprobes_all_disarmed) 2125 goto already_enabled; 2126 2127 /* Arming kprobes doesn't optimize kprobe itself */ 2128 mutex_lock(&text_mutex); 2129 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2130 head = &kprobe_table[i]; 2131 hlist_for_each_entry_rcu(p, node, head, hlist) 2132 if (!kprobe_disabled(p)) 2133 __arm_kprobe(p); 2134 } 2135 mutex_unlock(&text_mutex); 2136 2137 kprobes_all_disarmed = false; 2138 printk(KERN_INFO "Kprobes globally enabled\n"); 2139 2140 already_enabled: 2141 mutex_unlock(&kprobe_mutex); 2142 return; 2143 } 2144 2145 static void __kprobes disarm_all_kprobes(void) 2146 { 2147 struct hlist_head *head; 2148 struct hlist_node *node; 2149 struct kprobe *p; 2150 unsigned int i; 2151 2152 mutex_lock(&kprobe_mutex); 2153 2154 /* If kprobes are already disarmed, just return */ 2155 if (kprobes_all_disarmed) { 2156 mutex_unlock(&kprobe_mutex); 2157 return; 2158 } 2159 2160 kprobes_all_disarmed = true; 2161 printk(KERN_INFO "Kprobes globally disabled\n"); 2162 2163 mutex_lock(&text_mutex); 2164 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2165 head = &kprobe_table[i]; 2166 hlist_for_each_entry_rcu(p, node, head, hlist) { 2167 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) 2168 __disarm_kprobe(p, false); 2169 } 2170 } 2171 mutex_unlock(&text_mutex); 2172 mutex_unlock(&kprobe_mutex); 2173 2174 /* Wait for disarming all kprobes by optimizer */ 2175 wait_for_kprobe_optimizer(); 2176 } 2177 2178 /* 2179 * XXX: The debugfs bool file interface doesn't allow for callbacks 2180 * when the bool state is switched. We can reuse that facility when 2181 * available 2182 */ 2183 static ssize_t read_enabled_file_bool(struct file *file, 2184 char __user *user_buf, size_t count, loff_t *ppos) 2185 { 2186 char buf[3]; 2187 2188 if (!kprobes_all_disarmed) 2189 buf[0] = '1'; 2190 else 2191 buf[0] = '0'; 2192 buf[1] = '\n'; 2193 buf[2] = 0x00; 2194 return simple_read_from_buffer(user_buf, count, ppos, buf, 2); 2195 } 2196 2197 static ssize_t write_enabled_file_bool(struct file *file, 2198 const char __user *user_buf, size_t count, loff_t *ppos) 2199 { 2200 char buf[32]; 2201 int buf_size; 2202 2203 buf_size = min(count, (sizeof(buf)-1)); 2204 if (copy_from_user(buf, user_buf, buf_size)) 2205 return -EFAULT; 2206 2207 switch (buf[0]) { 2208 case 'y': 2209 case 'Y': 2210 case '1': 2211 arm_all_kprobes(); 2212 break; 2213 case 'n': 2214 case 'N': 2215 case '0': 2216 disarm_all_kprobes(); 2217 break; 2218 } 2219 2220 return count; 2221 } 2222 2223 static const struct file_operations fops_kp = { 2224 .read = read_enabled_file_bool, 2225 .write = write_enabled_file_bool, 2226 .llseek = default_llseek, 2227 }; 2228 2229 static int __kprobes debugfs_kprobe_init(void) 2230 { 2231 struct dentry *dir, *file; 2232 unsigned int value = 1; 2233 2234 dir = debugfs_create_dir("kprobes", NULL); 2235 if (!dir) 2236 return -ENOMEM; 2237 2238 file = debugfs_create_file("list", 0444, dir, NULL, 2239 &debugfs_kprobes_operations); 2240 if (!file) { 2241 debugfs_remove(dir); 2242 return -ENOMEM; 2243 } 2244 2245 file = debugfs_create_file("enabled", 0600, dir, 2246 &value, &fops_kp); 2247 if (!file) { 2248 debugfs_remove(dir); 2249 return -ENOMEM; 2250 } 2251 2252 return 0; 2253 } 2254 2255 late_initcall(debugfs_kprobe_init); 2256 #endif /* CONFIG_DEBUG_FS */ 2257 2258 module_init(init_kprobes); 2259 2260 /* defined in arch/.../kernel/kprobes.c */ 2261 EXPORT_SYMBOL_GPL(jprobe_return); 2262