1 /* 2 * Kernel Probes (KProbes) 3 * kernel/kprobes.c 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 18 * 19 * Copyright (C) IBM Corporation, 2002, 2004 20 * 21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel 22 * Probes initial implementation (includes suggestions from 23 * Rusty Russell). 24 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with 25 * hlists and exceptions notifier as suggested by Andi Kleen. 26 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes 27 * interface to access function arguments. 28 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes 29 * exceptions notifier to be first on the priority list. 30 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston 31 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi 32 * <prasanna@in.ibm.com> added function-return probes. 33 */ 34 #include <linux/kprobes.h> 35 #include <linux/hash.h> 36 #include <linux/init.h> 37 #include <linux/slab.h> 38 #include <linux/stddef.h> 39 #include <linux/export.h> 40 #include <linux/moduleloader.h> 41 #include <linux/kallsyms.h> 42 #include <linux/freezer.h> 43 #include <linux/seq_file.h> 44 #include <linux/debugfs.h> 45 #include <linux/sysctl.h> 46 #include <linux/kdebug.h> 47 #include <linux/memory.h> 48 #include <linux/ftrace.h> 49 #include <linux/cpu.h> 50 #include <linux/jump_label.h> 51 52 #include <asm-generic/sections.h> 53 #include <asm/cacheflush.h> 54 #include <asm/errno.h> 55 #include <asm/uaccess.h> 56 57 #define KPROBE_HASH_BITS 6 58 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS) 59 60 61 /* 62 * Some oddball architectures like 64bit powerpc have function descriptors 63 * so this must be overridable. 64 */ 65 #ifndef kprobe_lookup_name 66 #define kprobe_lookup_name(name, addr) \ 67 addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name))) 68 #endif 69 70 static int kprobes_initialized; 71 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; 72 static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; 73 74 /* NOTE: change this value only with kprobe_mutex held */ 75 static bool kprobes_all_disarmed; 76 77 /* This protects kprobe_table and optimizing_list */ 78 static DEFINE_MUTEX(kprobe_mutex); 79 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; 80 static struct { 81 raw_spinlock_t lock ____cacheline_aligned_in_smp; 82 } kretprobe_table_locks[KPROBE_TABLE_SIZE]; 83 84 static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash) 85 { 86 return &(kretprobe_table_locks[hash].lock); 87 } 88 89 /* Blacklist -- list of struct kprobe_blacklist_entry */ 90 static LIST_HEAD(kprobe_blacklist); 91 92 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT 93 /* 94 * kprobe->ainsn.insn points to the copy of the instruction to be 95 * single-stepped. x86_64, POWER4 and above have no-exec support and 96 * stepping on the instruction on a vmalloced/kmalloced/data page 97 * is a recipe for disaster 98 */ 99 struct kprobe_insn_page { 100 struct list_head list; 101 kprobe_opcode_t *insns; /* Page of instruction slots */ 102 struct kprobe_insn_cache *cache; 103 int nused; 104 int ngarbage; 105 char slot_used[]; 106 }; 107 108 #define KPROBE_INSN_PAGE_SIZE(slots) \ 109 (offsetof(struct kprobe_insn_page, slot_used) + \ 110 (sizeof(char) * (slots))) 111 112 static int slots_per_page(struct kprobe_insn_cache *c) 113 { 114 return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t)); 115 } 116 117 enum kprobe_slot_state { 118 SLOT_CLEAN = 0, 119 SLOT_DIRTY = 1, 120 SLOT_USED = 2, 121 }; 122 123 static void *alloc_insn_page(void) 124 { 125 return module_alloc(PAGE_SIZE); 126 } 127 128 static void free_insn_page(void *page) 129 { 130 module_memfree(page); 131 } 132 133 struct kprobe_insn_cache kprobe_insn_slots = { 134 .mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex), 135 .alloc = alloc_insn_page, 136 .free = free_insn_page, 137 .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages), 138 .insn_size = MAX_INSN_SIZE, 139 .nr_garbage = 0, 140 }; 141 static int collect_garbage_slots(struct kprobe_insn_cache *c); 142 143 /** 144 * __get_insn_slot() - Find a slot on an executable page for an instruction. 145 * We allocate an executable page if there's no room on existing ones. 146 */ 147 kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c) 148 { 149 struct kprobe_insn_page *kip; 150 kprobe_opcode_t *slot = NULL; 151 152 mutex_lock(&c->mutex); 153 retry: 154 list_for_each_entry(kip, &c->pages, list) { 155 if (kip->nused < slots_per_page(c)) { 156 int i; 157 for (i = 0; i < slots_per_page(c); i++) { 158 if (kip->slot_used[i] == SLOT_CLEAN) { 159 kip->slot_used[i] = SLOT_USED; 160 kip->nused++; 161 slot = kip->insns + (i * c->insn_size); 162 goto out; 163 } 164 } 165 /* kip->nused is broken. Fix it. */ 166 kip->nused = slots_per_page(c); 167 WARN_ON(1); 168 } 169 } 170 171 /* If there are any garbage slots, collect it and try again. */ 172 if (c->nr_garbage && collect_garbage_slots(c) == 0) 173 goto retry; 174 175 /* All out of space. Need to allocate a new page. */ 176 kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL); 177 if (!kip) 178 goto out; 179 180 /* 181 * Use module_alloc so this page is within +/- 2GB of where the 182 * kernel image and loaded module images reside. This is required 183 * so x86_64 can correctly handle the %rip-relative fixups. 184 */ 185 kip->insns = c->alloc(); 186 if (!kip->insns) { 187 kfree(kip); 188 goto out; 189 } 190 INIT_LIST_HEAD(&kip->list); 191 memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c)); 192 kip->slot_used[0] = SLOT_USED; 193 kip->nused = 1; 194 kip->ngarbage = 0; 195 kip->cache = c; 196 list_add(&kip->list, &c->pages); 197 slot = kip->insns; 198 out: 199 mutex_unlock(&c->mutex); 200 return slot; 201 } 202 203 /* Return 1 if all garbages are collected, otherwise 0. */ 204 static int collect_one_slot(struct kprobe_insn_page *kip, int idx) 205 { 206 kip->slot_used[idx] = SLOT_CLEAN; 207 kip->nused--; 208 if (kip->nused == 0) { 209 /* 210 * Page is no longer in use. Free it unless 211 * it's the last one. We keep the last one 212 * so as not to have to set it up again the 213 * next time somebody inserts a probe. 214 */ 215 if (!list_is_singular(&kip->list)) { 216 list_del(&kip->list); 217 kip->cache->free(kip->insns); 218 kfree(kip); 219 } 220 return 1; 221 } 222 return 0; 223 } 224 225 static int collect_garbage_slots(struct kprobe_insn_cache *c) 226 { 227 struct kprobe_insn_page *kip, *next; 228 229 /* Ensure no-one is interrupted on the garbages */ 230 synchronize_sched(); 231 232 list_for_each_entry_safe(kip, next, &c->pages, list) { 233 int i; 234 if (kip->ngarbage == 0) 235 continue; 236 kip->ngarbage = 0; /* we will collect all garbages */ 237 for (i = 0; i < slots_per_page(c); i++) { 238 if (kip->slot_used[i] == SLOT_DIRTY && 239 collect_one_slot(kip, i)) 240 break; 241 } 242 } 243 c->nr_garbage = 0; 244 return 0; 245 } 246 247 void __free_insn_slot(struct kprobe_insn_cache *c, 248 kprobe_opcode_t *slot, int dirty) 249 { 250 struct kprobe_insn_page *kip; 251 252 mutex_lock(&c->mutex); 253 list_for_each_entry(kip, &c->pages, list) { 254 long idx = ((long)slot - (long)kip->insns) / 255 (c->insn_size * sizeof(kprobe_opcode_t)); 256 if (idx >= 0 && idx < slots_per_page(c)) { 257 WARN_ON(kip->slot_used[idx] != SLOT_USED); 258 if (dirty) { 259 kip->slot_used[idx] = SLOT_DIRTY; 260 kip->ngarbage++; 261 if (++c->nr_garbage > slots_per_page(c)) 262 collect_garbage_slots(c); 263 } else 264 collect_one_slot(kip, idx); 265 goto out; 266 } 267 } 268 /* Could not free this slot. */ 269 WARN_ON(1); 270 out: 271 mutex_unlock(&c->mutex); 272 } 273 274 #ifdef CONFIG_OPTPROBES 275 /* For optimized_kprobe buffer */ 276 struct kprobe_insn_cache kprobe_optinsn_slots = { 277 .mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex), 278 .alloc = alloc_insn_page, 279 .free = free_insn_page, 280 .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages), 281 /* .insn_size is initialized later */ 282 .nr_garbage = 0, 283 }; 284 #endif 285 #endif 286 287 /* We have preemption disabled.. so it is safe to use __ versions */ 288 static inline void set_kprobe_instance(struct kprobe *kp) 289 { 290 __this_cpu_write(kprobe_instance, kp); 291 } 292 293 static inline void reset_kprobe_instance(void) 294 { 295 __this_cpu_write(kprobe_instance, NULL); 296 } 297 298 /* 299 * This routine is called either: 300 * - under the kprobe_mutex - during kprobe_[un]register() 301 * OR 302 * - with preemption disabled - from arch/xxx/kernel/kprobes.c 303 */ 304 struct kprobe *get_kprobe(void *addr) 305 { 306 struct hlist_head *head; 307 struct kprobe *p; 308 309 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)]; 310 hlist_for_each_entry_rcu(p, head, hlist) { 311 if (p->addr == addr) 312 return p; 313 } 314 315 return NULL; 316 } 317 NOKPROBE_SYMBOL(get_kprobe); 318 319 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs); 320 321 /* Return true if the kprobe is an aggregator */ 322 static inline int kprobe_aggrprobe(struct kprobe *p) 323 { 324 return p->pre_handler == aggr_pre_handler; 325 } 326 327 /* Return true(!0) if the kprobe is unused */ 328 static inline int kprobe_unused(struct kprobe *p) 329 { 330 return kprobe_aggrprobe(p) && kprobe_disabled(p) && 331 list_empty(&p->list); 332 } 333 334 /* 335 * Keep all fields in the kprobe consistent 336 */ 337 static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p) 338 { 339 memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t)); 340 memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn)); 341 } 342 343 #ifdef CONFIG_OPTPROBES 344 /* NOTE: change this value only with kprobe_mutex held */ 345 static bool kprobes_allow_optimization; 346 347 /* 348 * Call all pre_handler on the list, but ignores its return value. 349 * This must be called from arch-dep optimized caller. 350 */ 351 void opt_pre_handler(struct kprobe *p, struct pt_regs *regs) 352 { 353 struct kprobe *kp; 354 355 list_for_each_entry_rcu(kp, &p->list, list) { 356 if (kp->pre_handler && likely(!kprobe_disabled(kp))) { 357 set_kprobe_instance(kp); 358 kp->pre_handler(kp, regs); 359 } 360 reset_kprobe_instance(); 361 } 362 } 363 NOKPROBE_SYMBOL(opt_pre_handler); 364 365 /* Free optimized instructions and optimized_kprobe */ 366 static void free_aggr_kprobe(struct kprobe *p) 367 { 368 struct optimized_kprobe *op; 369 370 op = container_of(p, struct optimized_kprobe, kp); 371 arch_remove_optimized_kprobe(op); 372 arch_remove_kprobe(p); 373 kfree(op); 374 } 375 376 /* Return true(!0) if the kprobe is ready for optimization. */ 377 static inline int kprobe_optready(struct kprobe *p) 378 { 379 struct optimized_kprobe *op; 380 381 if (kprobe_aggrprobe(p)) { 382 op = container_of(p, struct optimized_kprobe, kp); 383 return arch_prepared_optinsn(&op->optinsn); 384 } 385 386 return 0; 387 } 388 389 /* Return true(!0) if the kprobe is disarmed. Note: p must be on hash list */ 390 static inline int kprobe_disarmed(struct kprobe *p) 391 { 392 struct optimized_kprobe *op; 393 394 /* If kprobe is not aggr/opt probe, just return kprobe is disabled */ 395 if (!kprobe_aggrprobe(p)) 396 return kprobe_disabled(p); 397 398 op = container_of(p, struct optimized_kprobe, kp); 399 400 return kprobe_disabled(p) && list_empty(&op->list); 401 } 402 403 /* Return true(!0) if the probe is queued on (un)optimizing lists */ 404 static int kprobe_queued(struct kprobe *p) 405 { 406 struct optimized_kprobe *op; 407 408 if (kprobe_aggrprobe(p)) { 409 op = container_of(p, struct optimized_kprobe, kp); 410 if (!list_empty(&op->list)) 411 return 1; 412 } 413 return 0; 414 } 415 416 /* 417 * Return an optimized kprobe whose optimizing code replaces 418 * instructions including addr (exclude breakpoint). 419 */ 420 static struct kprobe *get_optimized_kprobe(unsigned long addr) 421 { 422 int i; 423 struct kprobe *p = NULL; 424 struct optimized_kprobe *op; 425 426 /* Don't check i == 0, since that is a breakpoint case. */ 427 for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++) 428 p = get_kprobe((void *)(addr - i)); 429 430 if (p && kprobe_optready(p)) { 431 op = container_of(p, struct optimized_kprobe, kp); 432 if (arch_within_optimized_kprobe(op, addr)) 433 return p; 434 } 435 436 return NULL; 437 } 438 439 /* Optimization staging list, protected by kprobe_mutex */ 440 static LIST_HEAD(optimizing_list); 441 static LIST_HEAD(unoptimizing_list); 442 static LIST_HEAD(freeing_list); 443 444 static void kprobe_optimizer(struct work_struct *work); 445 static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer); 446 #define OPTIMIZE_DELAY 5 447 448 /* 449 * Optimize (replace a breakpoint with a jump) kprobes listed on 450 * optimizing_list. 451 */ 452 static void do_optimize_kprobes(void) 453 { 454 /* Optimization never be done when disarmed */ 455 if (kprobes_all_disarmed || !kprobes_allow_optimization || 456 list_empty(&optimizing_list)) 457 return; 458 459 /* 460 * The optimization/unoptimization refers online_cpus via 461 * stop_machine() and cpu-hotplug modifies online_cpus. 462 * And same time, text_mutex will be held in cpu-hotplug and here. 463 * This combination can cause a deadlock (cpu-hotplug try to lock 464 * text_mutex but stop_machine can not be done because online_cpus 465 * has been changed) 466 * To avoid this deadlock, we need to call get_online_cpus() 467 * for preventing cpu-hotplug outside of text_mutex locking. 468 */ 469 get_online_cpus(); 470 mutex_lock(&text_mutex); 471 arch_optimize_kprobes(&optimizing_list); 472 mutex_unlock(&text_mutex); 473 put_online_cpus(); 474 } 475 476 /* 477 * Unoptimize (replace a jump with a breakpoint and remove the breakpoint 478 * if need) kprobes listed on unoptimizing_list. 479 */ 480 static void do_unoptimize_kprobes(void) 481 { 482 struct optimized_kprobe *op, *tmp; 483 484 /* Unoptimization must be done anytime */ 485 if (list_empty(&unoptimizing_list)) 486 return; 487 488 /* Ditto to do_optimize_kprobes */ 489 get_online_cpus(); 490 mutex_lock(&text_mutex); 491 arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list); 492 /* Loop free_list for disarming */ 493 list_for_each_entry_safe(op, tmp, &freeing_list, list) { 494 /* Disarm probes if marked disabled */ 495 if (kprobe_disabled(&op->kp)) 496 arch_disarm_kprobe(&op->kp); 497 if (kprobe_unused(&op->kp)) { 498 /* 499 * Remove unused probes from hash list. After waiting 500 * for synchronization, these probes are reclaimed. 501 * (reclaiming is done by do_free_cleaned_kprobes.) 502 */ 503 hlist_del_rcu(&op->kp.hlist); 504 } else 505 list_del_init(&op->list); 506 } 507 mutex_unlock(&text_mutex); 508 put_online_cpus(); 509 } 510 511 /* Reclaim all kprobes on the free_list */ 512 static void do_free_cleaned_kprobes(void) 513 { 514 struct optimized_kprobe *op, *tmp; 515 516 list_for_each_entry_safe(op, tmp, &freeing_list, list) { 517 BUG_ON(!kprobe_unused(&op->kp)); 518 list_del_init(&op->list); 519 free_aggr_kprobe(&op->kp); 520 } 521 } 522 523 /* Start optimizer after OPTIMIZE_DELAY passed */ 524 static void kick_kprobe_optimizer(void) 525 { 526 schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY); 527 } 528 529 /* Kprobe jump optimizer */ 530 static void kprobe_optimizer(struct work_struct *work) 531 { 532 mutex_lock(&kprobe_mutex); 533 /* Lock modules while optimizing kprobes */ 534 mutex_lock(&module_mutex); 535 536 /* 537 * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed) 538 * kprobes before waiting for quiesence period. 539 */ 540 do_unoptimize_kprobes(); 541 542 /* 543 * Step 2: Wait for quiesence period to ensure all running interrupts 544 * are done. Because optprobe may modify multiple instructions 545 * there is a chance that Nth instruction is interrupted. In that 546 * case, running interrupt can return to 2nd-Nth byte of jump 547 * instruction. This wait is for avoiding it. 548 */ 549 synchronize_sched(); 550 551 /* Step 3: Optimize kprobes after quiesence period */ 552 do_optimize_kprobes(); 553 554 /* Step 4: Free cleaned kprobes after quiesence period */ 555 do_free_cleaned_kprobes(); 556 557 mutex_unlock(&module_mutex); 558 mutex_unlock(&kprobe_mutex); 559 560 /* Step 5: Kick optimizer again if needed */ 561 if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) 562 kick_kprobe_optimizer(); 563 } 564 565 /* Wait for completing optimization and unoptimization */ 566 static void wait_for_kprobe_optimizer(void) 567 { 568 mutex_lock(&kprobe_mutex); 569 570 while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) { 571 mutex_unlock(&kprobe_mutex); 572 573 /* this will also make optimizing_work execute immmediately */ 574 flush_delayed_work(&optimizing_work); 575 /* @optimizing_work might not have been queued yet, relax */ 576 cpu_relax(); 577 578 mutex_lock(&kprobe_mutex); 579 } 580 581 mutex_unlock(&kprobe_mutex); 582 } 583 584 /* Optimize kprobe if p is ready to be optimized */ 585 static void optimize_kprobe(struct kprobe *p) 586 { 587 struct optimized_kprobe *op; 588 589 /* Check if the kprobe is disabled or not ready for optimization. */ 590 if (!kprobe_optready(p) || !kprobes_allow_optimization || 591 (kprobe_disabled(p) || kprobes_all_disarmed)) 592 return; 593 594 /* Both of break_handler and post_handler are not supported. */ 595 if (p->break_handler || p->post_handler) 596 return; 597 598 op = container_of(p, struct optimized_kprobe, kp); 599 600 /* Check there is no other kprobes at the optimized instructions */ 601 if (arch_check_optimized_kprobe(op) < 0) 602 return; 603 604 /* Check if it is already optimized. */ 605 if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) 606 return; 607 op->kp.flags |= KPROBE_FLAG_OPTIMIZED; 608 609 if (!list_empty(&op->list)) 610 /* This is under unoptimizing. Just dequeue the probe */ 611 list_del_init(&op->list); 612 else { 613 list_add(&op->list, &optimizing_list); 614 kick_kprobe_optimizer(); 615 } 616 } 617 618 /* Short cut to direct unoptimizing */ 619 static void force_unoptimize_kprobe(struct optimized_kprobe *op) 620 { 621 get_online_cpus(); 622 arch_unoptimize_kprobe(op); 623 put_online_cpus(); 624 if (kprobe_disabled(&op->kp)) 625 arch_disarm_kprobe(&op->kp); 626 } 627 628 /* Unoptimize a kprobe if p is optimized */ 629 static void unoptimize_kprobe(struct kprobe *p, bool force) 630 { 631 struct optimized_kprobe *op; 632 633 if (!kprobe_aggrprobe(p) || kprobe_disarmed(p)) 634 return; /* This is not an optprobe nor optimized */ 635 636 op = container_of(p, struct optimized_kprobe, kp); 637 if (!kprobe_optimized(p)) { 638 /* Unoptimized or unoptimizing case */ 639 if (force && !list_empty(&op->list)) { 640 /* 641 * Only if this is unoptimizing kprobe and forced, 642 * forcibly unoptimize it. (No need to unoptimize 643 * unoptimized kprobe again :) 644 */ 645 list_del_init(&op->list); 646 force_unoptimize_kprobe(op); 647 } 648 return; 649 } 650 651 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; 652 if (!list_empty(&op->list)) { 653 /* Dequeue from the optimization queue */ 654 list_del_init(&op->list); 655 return; 656 } 657 /* Optimized kprobe case */ 658 if (force) 659 /* Forcibly update the code: this is a special case */ 660 force_unoptimize_kprobe(op); 661 else { 662 list_add(&op->list, &unoptimizing_list); 663 kick_kprobe_optimizer(); 664 } 665 } 666 667 /* Cancel unoptimizing for reusing */ 668 static void reuse_unused_kprobe(struct kprobe *ap) 669 { 670 struct optimized_kprobe *op; 671 672 BUG_ON(!kprobe_unused(ap)); 673 /* 674 * Unused kprobe MUST be on the way of delayed unoptimizing (means 675 * there is still a relative jump) and disabled. 676 */ 677 op = container_of(ap, struct optimized_kprobe, kp); 678 if (unlikely(list_empty(&op->list))) 679 printk(KERN_WARNING "Warning: found a stray unused " 680 "aggrprobe@%p\n", ap->addr); 681 /* Enable the probe again */ 682 ap->flags &= ~KPROBE_FLAG_DISABLED; 683 /* Optimize it again (remove from op->list) */ 684 BUG_ON(!kprobe_optready(ap)); 685 optimize_kprobe(ap); 686 } 687 688 /* Remove optimized instructions */ 689 static void kill_optimized_kprobe(struct kprobe *p) 690 { 691 struct optimized_kprobe *op; 692 693 op = container_of(p, struct optimized_kprobe, kp); 694 if (!list_empty(&op->list)) 695 /* Dequeue from the (un)optimization queue */ 696 list_del_init(&op->list); 697 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; 698 699 if (kprobe_unused(p)) { 700 /* Enqueue if it is unused */ 701 list_add(&op->list, &freeing_list); 702 /* 703 * Remove unused probes from the hash list. After waiting 704 * for synchronization, this probe is reclaimed. 705 * (reclaiming is done by do_free_cleaned_kprobes().) 706 */ 707 hlist_del_rcu(&op->kp.hlist); 708 } 709 710 /* Don't touch the code, because it is already freed. */ 711 arch_remove_optimized_kprobe(op); 712 } 713 714 /* Try to prepare optimized instructions */ 715 static void prepare_optimized_kprobe(struct kprobe *p) 716 { 717 struct optimized_kprobe *op; 718 719 op = container_of(p, struct optimized_kprobe, kp); 720 arch_prepare_optimized_kprobe(op, p); 721 } 722 723 /* Allocate new optimized_kprobe and try to prepare optimized instructions */ 724 static struct kprobe *alloc_aggr_kprobe(struct kprobe *p) 725 { 726 struct optimized_kprobe *op; 727 728 op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL); 729 if (!op) 730 return NULL; 731 732 INIT_LIST_HEAD(&op->list); 733 op->kp.addr = p->addr; 734 arch_prepare_optimized_kprobe(op, p); 735 736 return &op->kp; 737 } 738 739 static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p); 740 741 /* 742 * Prepare an optimized_kprobe and optimize it 743 * NOTE: p must be a normal registered kprobe 744 */ 745 static void try_to_optimize_kprobe(struct kprobe *p) 746 { 747 struct kprobe *ap; 748 struct optimized_kprobe *op; 749 750 /* Impossible to optimize ftrace-based kprobe */ 751 if (kprobe_ftrace(p)) 752 return; 753 754 /* For preparing optimization, jump_label_text_reserved() is called */ 755 jump_label_lock(); 756 mutex_lock(&text_mutex); 757 758 ap = alloc_aggr_kprobe(p); 759 if (!ap) 760 goto out; 761 762 op = container_of(ap, struct optimized_kprobe, kp); 763 if (!arch_prepared_optinsn(&op->optinsn)) { 764 /* If failed to setup optimizing, fallback to kprobe */ 765 arch_remove_optimized_kprobe(op); 766 kfree(op); 767 goto out; 768 } 769 770 init_aggr_kprobe(ap, p); 771 optimize_kprobe(ap); /* This just kicks optimizer thread */ 772 773 out: 774 mutex_unlock(&text_mutex); 775 jump_label_unlock(); 776 } 777 778 #ifdef CONFIG_SYSCTL 779 static void optimize_all_kprobes(void) 780 { 781 struct hlist_head *head; 782 struct kprobe *p; 783 unsigned int i; 784 785 mutex_lock(&kprobe_mutex); 786 /* If optimization is already allowed, just return */ 787 if (kprobes_allow_optimization) 788 goto out; 789 790 kprobes_allow_optimization = true; 791 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 792 head = &kprobe_table[i]; 793 hlist_for_each_entry_rcu(p, head, hlist) 794 if (!kprobe_disabled(p)) 795 optimize_kprobe(p); 796 } 797 printk(KERN_INFO "Kprobes globally optimized\n"); 798 out: 799 mutex_unlock(&kprobe_mutex); 800 } 801 802 static void unoptimize_all_kprobes(void) 803 { 804 struct hlist_head *head; 805 struct kprobe *p; 806 unsigned int i; 807 808 mutex_lock(&kprobe_mutex); 809 /* If optimization is already prohibited, just return */ 810 if (!kprobes_allow_optimization) { 811 mutex_unlock(&kprobe_mutex); 812 return; 813 } 814 815 kprobes_allow_optimization = false; 816 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 817 head = &kprobe_table[i]; 818 hlist_for_each_entry_rcu(p, head, hlist) { 819 if (!kprobe_disabled(p)) 820 unoptimize_kprobe(p, false); 821 } 822 } 823 mutex_unlock(&kprobe_mutex); 824 825 /* Wait for unoptimizing completion */ 826 wait_for_kprobe_optimizer(); 827 printk(KERN_INFO "Kprobes globally unoptimized\n"); 828 } 829 830 static DEFINE_MUTEX(kprobe_sysctl_mutex); 831 int sysctl_kprobes_optimization; 832 int proc_kprobes_optimization_handler(struct ctl_table *table, int write, 833 void __user *buffer, size_t *length, 834 loff_t *ppos) 835 { 836 int ret; 837 838 mutex_lock(&kprobe_sysctl_mutex); 839 sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0; 840 ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 841 842 if (sysctl_kprobes_optimization) 843 optimize_all_kprobes(); 844 else 845 unoptimize_all_kprobes(); 846 mutex_unlock(&kprobe_sysctl_mutex); 847 848 return ret; 849 } 850 #endif /* CONFIG_SYSCTL */ 851 852 /* Put a breakpoint for a probe. Must be called with text_mutex locked */ 853 static void __arm_kprobe(struct kprobe *p) 854 { 855 struct kprobe *_p; 856 857 /* Check collision with other optimized kprobes */ 858 _p = get_optimized_kprobe((unsigned long)p->addr); 859 if (unlikely(_p)) 860 /* Fallback to unoptimized kprobe */ 861 unoptimize_kprobe(_p, true); 862 863 arch_arm_kprobe(p); 864 optimize_kprobe(p); /* Try to optimize (add kprobe to a list) */ 865 } 866 867 /* Remove the breakpoint of a probe. Must be called with text_mutex locked */ 868 static void __disarm_kprobe(struct kprobe *p, bool reopt) 869 { 870 struct kprobe *_p; 871 872 /* Try to unoptimize */ 873 unoptimize_kprobe(p, kprobes_all_disarmed); 874 875 if (!kprobe_queued(p)) { 876 arch_disarm_kprobe(p); 877 /* If another kprobe was blocked, optimize it. */ 878 _p = get_optimized_kprobe((unsigned long)p->addr); 879 if (unlikely(_p) && reopt) 880 optimize_kprobe(_p); 881 } 882 /* TODO: reoptimize others after unoptimized this probe */ 883 } 884 885 #else /* !CONFIG_OPTPROBES */ 886 887 #define optimize_kprobe(p) do {} while (0) 888 #define unoptimize_kprobe(p, f) do {} while (0) 889 #define kill_optimized_kprobe(p) do {} while (0) 890 #define prepare_optimized_kprobe(p) do {} while (0) 891 #define try_to_optimize_kprobe(p) do {} while (0) 892 #define __arm_kprobe(p) arch_arm_kprobe(p) 893 #define __disarm_kprobe(p, o) arch_disarm_kprobe(p) 894 #define kprobe_disarmed(p) kprobe_disabled(p) 895 #define wait_for_kprobe_optimizer() do {} while (0) 896 897 /* There should be no unused kprobes can be reused without optimization */ 898 static void reuse_unused_kprobe(struct kprobe *ap) 899 { 900 printk(KERN_ERR "Error: There should be no unused kprobe here.\n"); 901 BUG_ON(kprobe_unused(ap)); 902 } 903 904 static void free_aggr_kprobe(struct kprobe *p) 905 { 906 arch_remove_kprobe(p); 907 kfree(p); 908 } 909 910 static struct kprobe *alloc_aggr_kprobe(struct kprobe *p) 911 { 912 return kzalloc(sizeof(struct kprobe), GFP_KERNEL); 913 } 914 #endif /* CONFIG_OPTPROBES */ 915 916 #ifdef CONFIG_KPROBES_ON_FTRACE 917 static struct ftrace_ops kprobe_ftrace_ops __read_mostly = { 918 .func = kprobe_ftrace_handler, 919 .flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_IPMODIFY, 920 }; 921 static int kprobe_ftrace_enabled; 922 923 /* Must ensure p->addr is really on ftrace */ 924 static int prepare_kprobe(struct kprobe *p) 925 { 926 if (!kprobe_ftrace(p)) 927 return arch_prepare_kprobe(p); 928 929 return arch_prepare_kprobe_ftrace(p); 930 } 931 932 /* Caller must lock kprobe_mutex */ 933 static void arm_kprobe_ftrace(struct kprobe *p) 934 { 935 int ret; 936 937 ret = ftrace_set_filter_ip(&kprobe_ftrace_ops, 938 (unsigned long)p->addr, 0, 0); 939 WARN(ret < 0, "Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret); 940 kprobe_ftrace_enabled++; 941 if (kprobe_ftrace_enabled == 1) { 942 ret = register_ftrace_function(&kprobe_ftrace_ops); 943 WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret); 944 } 945 } 946 947 /* Caller must lock kprobe_mutex */ 948 static void disarm_kprobe_ftrace(struct kprobe *p) 949 { 950 int ret; 951 952 kprobe_ftrace_enabled--; 953 if (kprobe_ftrace_enabled == 0) { 954 ret = unregister_ftrace_function(&kprobe_ftrace_ops); 955 WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret); 956 } 957 ret = ftrace_set_filter_ip(&kprobe_ftrace_ops, 958 (unsigned long)p->addr, 1, 0); 959 WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret); 960 } 961 #else /* !CONFIG_KPROBES_ON_FTRACE */ 962 #define prepare_kprobe(p) arch_prepare_kprobe(p) 963 #define arm_kprobe_ftrace(p) do {} while (0) 964 #define disarm_kprobe_ftrace(p) do {} while (0) 965 #endif 966 967 /* Arm a kprobe with text_mutex */ 968 static void arm_kprobe(struct kprobe *kp) 969 { 970 if (unlikely(kprobe_ftrace(kp))) { 971 arm_kprobe_ftrace(kp); 972 return; 973 } 974 /* 975 * Here, since __arm_kprobe() doesn't use stop_machine(), 976 * this doesn't cause deadlock on text_mutex. So, we don't 977 * need get_online_cpus(). 978 */ 979 mutex_lock(&text_mutex); 980 __arm_kprobe(kp); 981 mutex_unlock(&text_mutex); 982 } 983 984 /* Disarm a kprobe with text_mutex */ 985 static void disarm_kprobe(struct kprobe *kp, bool reopt) 986 { 987 if (unlikely(kprobe_ftrace(kp))) { 988 disarm_kprobe_ftrace(kp); 989 return; 990 } 991 /* Ditto */ 992 mutex_lock(&text_mutex); 993 __disarm_kprobe(kp, reopt); 994 mutex_unlock(&text_mutex); 995 } 996 997 /* 998 * Aggregate handlers for multiple kprobes support - these handlers 999 * take care of invoking the individual kprobe handlers on p->list 1000 */ 1001 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) 1002 { 1003 struct kprobe *kp; 1004 1005 list_for_each_entry_rcu(kp, &p->list, list) { 1006 if (kp->pre_handler && likely(!kprobe_disabled(kp))) { 1007 set_kprobe_instance(kp); 1008 if (kp->pre_handler(kp, regs)) 1009 return 1; 1010 } 1011 reset_kprobe_instance(); 1012 } 1013 return 0; 1014 } 1015 NOKPROBE_SYMBOL(aggr_pre_handler); 1016 1017 static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs, 1018 unsigned long flags) 1019 { 1020 struct kprobe *kp; 1021 1022 list_for_each_entry_rcu(kp, &p->list, list) { 1023 if (kp->post_handler && likely(!kprobe_disabled(kp))) { 1024 set_kprobe_instance(kp); 1025 kp->post_handler(kp, regs, flags); 1026 reset_kprobe_instance(); 1027 } 1028 } 1029 } 1030 NOKPROBE_SYMBOL(aggr_post_handler); 1031 1032 static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, 1033 int trapnr) 1034 { 1035 struct kprobe *cur = __this_cpu_read(kprobe_instance); 1036 1037 /* 1038 * if we faulted "during" the execution of a user specified 1039 * probe handler, invoke just that probe's fault handler 1040 */ 1041 if (cur && cur->fault_handler) { 1042 if (cur->fault_handler(cur, regs, trapnr)) 1043 return 1; 1044 } 1045 return 0; 1046 } 1047 NOKPROBE_SYMBOL(aggr_fault_handler); 1048 1049 static int aggr_break_handler(struct kprobe *p, struct pt_regs *regs) 1050 { 1051 struct kprobe *cur = __this_cpu_read(kprobe_instance); 1052 int ret = 0; 1053 1054 if (cur && cur->break_handler) { 1055 if (cur->break_handler(cur, regs)) 1056 ret = 1; 1057 } 1058 reset_kprobe_instance(); 1059 return ret; 1060 } 1061 NOKPROBE_SYMBOL(aggr_break_handler); 1062 1063 /* Walks the list and increments nmissed count for multiprobe case */ 1064 void kprobes_inc_nmissed_count(struct kprobe *p) 1065 { 1066 struct kprobe *kp; 1067 if (!kprobe_aggrprobe(p)) { 1068 p->nmissed++; 1069 } else { 1070 list_for_each_entry_rcu(kp, &p->list, list) 1071 kp->nmissed++; 1072 } 1073 return; 1074 } 1075 NOKPROBE_SYMBOL(kprobes_inc_nmissed_count); 1076 1077 void recycle_rp_inst(struct kretprobe_instance *ri, 1078 struct hlist_head *head) 1079 { 1080 struct kretprobe *rp = ri->rp; 1081 1082 /* remove rp inst off the rprobe_inst_table */ 1083 hlist_del(&ri->hlist); 1084 INIT_HLIST_NODE(&ri->hlist); 1085 if (likely(rp)) { 1086 raw_spin_lock(&rp->lock); 1087 hlist_add_head(&ri->hlist, &rp->free_instances); 1088 raw_spin_unlock(&rp->lock); 1089 } else 1090 /* Unregistering */ 1091 hlist_add_head(&ri->hlist, head); 1092 } 1093 NOKPROBE_SYMBOL(recycle_rp_inst); 1094 1095 void kretprobe_hash_lock(struct task_struct *tsk, 1096 struct hlist_head **head, unsigned long *flags) 1097 __acquires(hlist_lock) 1098 { 1099 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); 1100 raw_spinlock_t *hlist_lock; 1101 1102 *head = &kretprobe_inst_table[hash]; 1103 hlist_lock = kretprobe_table_lock_ptr(hash); 1104 raw_spin_lock_irqsave(hlist_lock, *flags); 1105 } 1106 NOKPROBE_SYMBOL(kretprobe_hash_lock); 1107 1108 static void kretprobe_table_lock(unsigned long hash, 1109 unsigned long *flags) 1110 __acquires(hlist_lock) 1111 { 1112 raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); 1113 raw_spin_lock_irqsave(hlist_lock, *flags); 1114 } 1115 NOKPROBE_SYMBOL(kretprobe_table_lock); 1116 1117 void kretprobe_hash_unlock(struct task_struct *tsk, 1118 unsigned long *flags) 1119 __releases(hlist_lock) 1120 { 1121 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); 1122 raw_spinlock_t *hlist_lock; 1123 1124 hlist_lock = kretprobe_table_lock_ptr(hash); 1125 raw_spin_unlock_irqrestore(hlist_lock, *flags); 1126 } 1127 NOKPROBE_SYMBOL(kretprobe_hash_unlock); 1128 1129 static void kretprobe_table_unlock(unsigned long hash, 1130 unsigned long *flags) 1131 __releases(hlist_lock) 1132 { 1133 raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); 1134 raw_spin_unlock_irqrestore(hlist_lock, *flags); 1135 } 1136 NOKPROBE_SYMBOL(kretprobe_table_unlock); 1137 1138 /* 1139 * This function is called from finish_task_switch when task tk becomes dead, 1140 * so that we can recycle any function-return probe instances associated 1141 * with this task. These left over instances represent probed functions 1142 * that have been called but will never return. 1143 */ 1144 void kprobe_flush_task(struct task_struct *tk) 1145 { 1146 struct kretprobe_instance *ri; 1147 struct hlist_head *head, empty_rp; 1148 struct hlist_node *tmp; 1149 unsigned long hash, flags = 0; 1150 1151 if (unlikely(!kprobes_initialized)) 1152 /* Early boot. kretprobe_table_locks not yet initialized. */ 1153 return; 1154 1155 INIT_HLIST_HEAD(&empty_rp); 1156 hash = hash_ptr(tk, KPROBE_HASH_BITS); 1157 head = &kretprobe_inst_table[hash]; 1158 kretprobe_table_lock(hash, &flags); 1159 hlist_for_each_entry_safe(ri, tmp, head, hlist) { 1160 if (ri->task == tk) 1161 recycle_rp_inst(ri, &empty_rp); 1162 } 1163 kretprobe_table_unlock(hash, &flags); 1164 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { 1165 hlist_del(&ri->hlist); 1166 kfree(ri); 1167 } 1168 } 1169 NOKPROBE_SYMBOL(kprobe_flush_task); 1170 1171 static inline void free_rp_inst(struct kretprobe *rp) 1172 { 1173 struct kretprobe_instance *ri; 1174 struct hlist_node *next; 1175 1176 hlist_for_each_entry_safe(ri, next, &rp->free_instances, hlist) { 1177 hlist_del(&ri->hlist); 1178 kfree(ri); 1179 } 1180 } 1181 1182 static void cleanup_rp_inst(struct kretprobe *rp) 1183 { 1184 unsigned long flags, hash; 1185 struct kretprobe_instance *ri; 1186 struct hlist_node *next; 1187 struct hlist_head *head; 1188 1189 /* No race here */ 1190 for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) { 1191 kretprobe_table_lock(hash, &flags); 1192 head = &kretprobe_inst_table[hash]; 1193 hlist_for_each_entry_safe(ri, next, head, hlist) { 1194 if (ri->rp == rp) 1195 ri->rp = NULL; 1196 } 1197 kretprobe_table_unlock(hash, &flags); 1198 } 1199 free_rp_inst(rp); 1200 } 1201 NOKPROBE_SYMBOL(cleanup_rp_inst); 1202 1203 /* 1204 * Add the new probe to ap->list. Fail if this is the 1205 * second jprobe at the address - two jprobes can't coexist 1206 */ 1207 static int add_new_kprobe(struct kprobe *ap, struct kprobe *p) 1208 { 1209 BUG_ON(kprobe_gone(ap) || kprobe_gone(p)); 1210 1211 if (p->break_handler || p->post_handler) 1212 unoptimize_kprobe(ap, true); /* Fall back to normal kprobe */ 1213 1214 if (p->break_handler) { 1215 if (ap->break_handler) 1216 return -EEXIST; 1217 list_add_tail_rcu(&p->list, &ap->list); 1218 ap->break_handler = aggr_break_handler; 1219 } else 1220 list_add_rcu(&p->list, &ap->list); 1221 if (p->post_handler && !ap->post_handler) 1222 ap->post_handler = aggr_post_handler; 1223 1224 return 0; 1225 } 1226 1227 /* 1228 * Fill in the required fields of the "manager kprobe". Replace the 1229 * earlier kprobe in the hlist with the manager kprobe 1230 */ 1231 static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p) 1232 { 1233 /* Copy p's insn slot to ap */ 1234 copy_kprobe(p, ap); 1235 flush_insn_slot(ap); 1236 ap->addr = p->addr; 1237 ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED; 1238 ap->pre_handler = aggr_pre_handler; 1239 ap->fault_handler = aggr_fault_handler; 1240 /* We don't care the kprobe which has gone. */ 1241 if (p->post_handler && !kprobe_gone(p)) 1242 ap->post_handler = aggr_post_handler; 1243 if (p->break_handler && !kprobe_gone(p)) 1244 ap->break_handler = aggr_break_handler; 1245 1246 INIT_LIST_HEAD(&ap->list); 1247 INIT_HLIST_NODE(&ap->hlist); 1248 1249 list_add_rcu(&p->list, &ap->list); 1250 hlist_replace_rcu(&p->hlist, &ap->hlist); 1251 } 1252 1253 /* 1254 * This is the second or subsequent kprobe at the address - handle 1255 * the intricacies 1256 */ 1257 static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p) 1258 { 1259 int ret = 0; 1260 struct kprobe *ap = orig_p; 1261 1262 /* For preparing optimization, jump_label_text_reserved() is called */ 1263 jump_label_lock(); 1264 /* 1265 * Get online CPUs to avoid text_mutex deadlock.with stop machine, 1266 * which is invoked by unoptimize_kprobe() in add_new_kprobe() 1267 */ 1268 get_online_cpus(); 1269 mutex_lock(&text_mutex); 1270 1271 if (!kprobe_aggrprobe(orig_p)) { 1272 /* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */ 1273 ap = alloc_aggr_kprobe(orig_p); 1274 if (!ap) { 1275 ret = -ENOMEM; 1276 goto out; 1277 } 1278 init_aggr_kprobe(ap, orig_p); 1279 } else if (kprobe_unused(ap)) 1280 /* This probe is going to die. Rescue it */ 1281 reuse_unused_kprobe(ap); 1282 1283 if (kprobe_gone(ap)) { 1284 /* 1285 * Attempting to insert new probe at the same location that 1286 * had a probe in the module vaddr area which already 1287 * freed. So, the instruction slot has already been 1288 * released. We need a new slot for the new probe. 1289 */ 1290 ret = arch_prepare_kprobe(ap); 1291 if (ret) 1292 /* 1293 * Even if fail to allocate new slot, don't need to 1294 * free aggr_probe. It will be used next time, or 1295 * freed by unregister_kprobe. 1296 */ 1297 goto out; 1298 1299 /* Prepare optimized instructions if possible. */ 1300 prepare_optimized_kprobe(ap); 1301 1302 /* 1303 * Clear gone flag to prevent allocating new slot again, and 1304 * set disabled flag because it is not armed yet. 1305 */ 1306 ap->flags = (ap->flags & ~KPROBE_FLAG_GONE) 1307 | KPROBE_FLAG_DISABLED; 1308 } 1309 1310 /* Copy ap's insn slot to p */ 1311 copy_kprobe(ap, p); 1312 ret = add_new_kprobe(ap, p); 1313 1314 out: 1315 mutex_unlock(&text_mutex); 1316 put_online_cpus(); 1317 jump_label_unlock(); 1318 1319 if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) { 1320 ap->flags &= ~KPROBE_FLAG_DISABLED; 1321 if (!kprobes_all_disarmed) 1322 /* Arm the breakpoint again. */ 1323 arm_kprobe(ap); 1324 } 1325 return ret; 1326 } 1327 1328 bool __weak arch_within_kprobe_blacklist(unsigned long addr) 1329 { 1330 /* The __kprobes marked functions and entry code must not be probed */ 1331 return addr >= (unsigned long)__kprobes_text_start && 1332 addr < (unsigned long)__kprobes_text_end; 1333 } 1334 1335 static bool within_kprobe_blacklist(unsigned long addr) 1336 { 1337 struct kprobe_blacklist_entry *ent; 1338 1339 if (arch_within_kprobe_blacklist(addr)) 1340 return true; 1341 /* 1342 * If there exists a kprobe_blacklist, verify and 1343 * fail any probe registration in the prohibited area 1344 */ 1345 list_for_each_entry(ent, &kprobe_blacklist, list) { 1346 if (addr >= ent->start_addr && addr < ent->end_addr) 1347 return true; 1348 } 1349 1350 return false; 1351 } 1352 1353 /* 1354 * If we have a symbol_name argument, look it up and add the offset field 1355 * to it. This way, we can specify a relative address to a symbol. 1356 * This returns encoded errors if it fails to look up symbol or invalid 1357 * combination of parameters. 1358 */ 1359 static kprobe_opcode_t *kprobe_addr(struct kprobe *p) 1360 { 1361 kprobe_opcode_t *addr = p->addr; 1362 1363 if ((p->symbol_name && p->addr) || 1364 (!p->symbol_name && !p->addr)) 1365 goto invalid; 1366 1367 if (p->symbol_name) { 1368 kprobe_lookup_name(p->symbol_name, addr); 1369 if (!addr) 1370 return ERR_PTR(-ENOENT); 1371 } 1372 1373 addr = (kprobe_opcode_t *)(((char *)addr) + p->offset); 1374 if (addr) 1375 return addr; 1376 1377 invalid: 1378 return ERR_PTR(-EINVAL); 1379 } 1380 1381 /* Check passed kprobe is valid and return kprobe in kprobe_table. */ 1382 static struct kprobe *__get_valid_kprobe(struct kprobe *p) 1383 { 1384 struct kprobe *ap, *list_p; 1385 1386 ap = get_kprobe(p->addr); 1387 if (unlikely(!ap)) 1388 return NULL; 1389 1390 if (p != ap) { 1391 list_for_each_entry_rcu(list_p, &ap->list, list) 1392 if (list_p == p) 1393 /* kprobe p is a valid probe */ 1394 goto valid; 1395 return NULL; 1396 } 1397 valid: 1398 return ap; 1399 } 1400 1401 /* Return error if the kprobe is being re-registered */ 1402 static inline int check_kprobe_rereg(struct kprobe *p) 1403 { 1404 int ret = 0; 1405 1406 mutex_lock(&kprobe_mutex); 1407 if (__get_valid_kprobe(p)) 1408 ret = -EINVAL; 1409 mutex_unlock(&kprobe_mutex); 1410 1411 return ret; 1412 } 1413 1414 int __weak arch_check_ftrace_location(struct kprobe *p) 1415 { 1416 unsigned long ftrace_addr; 1417 1418 ftrace_addr = ftrace_location((unsigned long)p->addr); 1419 if (ftrace_addr) { 1420 #ifdef CONFIG_KPROBES_ON_FTRACE 1421 /* Given address is not on the instruction boundary */ 1422 if ((unsigned long)p->addr != ftrace_addr) 1423 return -EILSEQ; 1424 p->flags |= KPROBE_FLAG_FTRACE; 1425 #else /* !CONFIG_KPROBES_ON_FTRACE */ 1426 return -EINVAL; 1427 #endif 1428 } 1429 return 0; 1430 } 1431 1432 static int check_kprobe_address_safe(struct kprobe *p, 1433 struct module **probed_mod) 1434 { 1435 int ret; 1436 1437 ret = arch_check_ftrace_location(p); 1438 if (ret) 1439 return ret; 1440 jump_label_lock(); 1441 preempt_disable(); 1442 1443 /* Ensure it is not in reserved area nor out of text */ 1444 if (!kernel_text_address((unsigned long) p->addr) || 1445 within_kprobe_blacklist((unsigned long) p->addr) || 1446 jump_label_text_reserved(p->addr, p->addr)) { 1447 ret = -EINVAL; 1448 goto out; 1449 } 1450 1451 /* Check if are we probing a module */ 1452 *probed_mod = __module_text_address((unsigned long) p->addr); 1453 if (*probed_mod) { 1454 /* 1455 * We must hold a refcount of the probed module while updating 1456 * its code to prohibit unexpected unloading. 1457 */ 1458 if (unlikely(!try_module_get(*probed_mod))) { 1459 ret = -ENOENT; 1460 goto out; 1461 } 1462 1463 /* 1464 * If the module freed .init.text, we couldn't insert 1465 * kprobes in there. 1466 */ 1467 if (within_module_init((unsigned long)p->addr, *probed_mod) && 1468 (*probed_mod)->state != MODULE_STATE_COMING) { 1469 module_put(*probed_mod); 1470 *probed_mod = NULL; 1471 ret = -ENOENT; 1472 } 1473 } 1474 out: 1475 preempt_enable(); 1476 jump_label_unlock(); 1477 1478 return ret; 1479 } 1480 1481 int register_kprobe(struct kprobe *p) 1482 { 1483 int ret; 1484 struct kprobe *old_p; 1485 struct module *probed_mod; 1486 kprobe_opcode_t *addr; 1487 1488 /* Adjust probe address from symbol */ 1489 addr = kprobe_addr(p); 1490 if (IS_ERR(addr)) 1491 return PTR_ERR(addr); 1492 p->addr = addr; 1493 1494 ret = check_kprobe_rereg(p); 1495 if (ret) 1496 return ret; 1497 1498 /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */ 1499 p->flags &= KPROBE_FLAG_DISABLED; 1500 p->nmissed = 0; 1501 INIT_LIST_HEAD(&p->list); 1502 1503 ret = check_kprobe_address_safe(p, &probed_mod); 1504 if (ret) 1505 return ret; 1506 1507 mutex_lock(&kprobe_mutex); 1508 1509 old_p = get_kprobe(p->addr); 1510 if (old_p) { 1511 /* Since this may unoptimize old_p, locking text_mutex. */ 1512 ret = register_aggr_kprobe(old_p, p); 1513 goto out; 1514 } 1515 1516 mutex_lock(&text_mutex); /* Avoiding text modification */ 1517 ret = prepare_kprobe(p); 1518 mutex_unlock(&text_mutex); 1519 if (ret) 1520 goto out; 1521 1522 INIT_HLIST_NODE(&p->hlist); 1523 hlist_add_head_rcu(&p->hlist, 1524 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); 1525 1526 if (!kprobes_all_disarmed && !kprobe_disabled(p)) 1527 arm_kprobe(p); 1528 1529 /* Try to optimize kprobe */ 1530 try_to_optimize_kprobe(p); 1531 1532 out: 1533 mutex_unlock(&kprobe_mutex); 1534 1535 if (probed_mod) 1536 module_put(probed_mod); 1537 1538 return ret; 1539 } 1540 EXPORT_SYMBOL_GPL(register_kprobe); 1541 1542 /* Check if all probes on the aggrprobe are disabled */ 1543 static int aggr_kprobe_disabled(struct kprobe *ap) 1544 { 1545 struct kprobe *kp; 1546 1547 list_for_each_entry_rcu(kp, &ap->list, list) 1548 if (!kprobe_disabled(kp)) 1549 /* 1550 * There is an active probe on the list. 1551 * We can't disable this ap. 1552 */ 1553 return 0; 1554 1555 return 1; 1556 } 1557 1558 /* Disable one kprobe: Make sure called under kprobe_mutex is locked */ 1559 static struct kprobe *__disable_kprobe(struct kprobe *p) 1560 { 1561 struct kprobe *orig_p; 1562 1563 /* Get an original kprobe for return */ 1564 orig_p = __get_valid_kprobe(p); 1565 if (unlikely(orig_p == NULL)) 1566 return NULL; 1567 1568 if (!kprobe_disabled(p)) { 1569 /* Disable probe if it is a child probe */ 1570 if (p != orig_p) 1571 p->flags |= KPROBE_FLAG_DISABLED; 1572 1573 /* Try to disarm and disable this/parent probe */ 1574 if (p == orig_p || aggr_kprobe_disabled(orig_p)) { 1575 /* 1576 * If kprobes_all_disarmed is set, orig_p 1577 * should have already been disarmed, so 1578 * skip unneed disarming process. 1579 */ 1580 if (!kprobes_all_disarmed) 1581 disarm_kprobe(orig_p, true); 1582 orig_p->flags |= KPROBE_FLAG_DISABLED; 1583 } 1584 } 1585 1586 return orig_p; 1587 } 1588 1589 /* 1590 * Unregister a kprobe without a scheduler synchronization. 1591 */ 1592 static int __unregister_kprobe_top(struct kprobe *p) 1593 { 1594 struct kprobe *ap, *list_p; 1595 1596 /* Disable kprobe. This will disarm it if needed. */ 1597 ap = __disable_kprobe(p); 1598 if (ap == NULL) 1599 return -EINVAL; 1600 1601 if (ap == p) 1602 /* 1603 * This probe is an independent(and non-optimized) kprobe 1604 * (not an aggrprobe). Remove from the hash list. 1605 */ 1606 goto disarmed; 1607 1608 /* Following process expects this probe is an aggrprobe */ 1609 WARN_ON(!kprobe_aggrprobe(ap)); 1610 1611 if (list_is_singular(&ap->list) && kprobe_disarmed(ap)) 1612 /* 1613 * !disarmed could be happen if the probe is under delayed 1614 * unoptimizing. 1615 */ 1616 goto disarmed; 1617 else { 1618 /* If disabling probe has special handlers, update aggrprobe */ 1619 if (p->break_handler && !kprobe_gone(p)) 1620 ap->break_handler = NULL; 1621 if (p->post_handler && !kprobe_gone(p)) { 1622 list_for_each_entry_rcu(list_p, &ap->list, list) { 1623 if ((list_p != p) && (list_p->post_handler)) 1624 goto noclean; 1625 } 1626 ap->post_handler = NULL; 1627 } 1628 noclean: 1629 /* 1630 * Remove from the aggrprobe: this path will do nothing in 1631 * __unregister_kprobe_bottom(). 1632 */ 1633 list_del_rcu(&p->list); 1634 if (!kprobe_disabled(ap) && !kprobes_all_disarmed) 1635 /* 1636 * Try to optimize this probe again, because post 1637 * handler may have been changed. 1638 */ 1639 optimize_kprobe(ap); 1640 } 1641 return 0; 1642 1643 disarmed: 1644 BUG_ON(!kprobe_disarmed(ap)); 1645 hlist_del_rcu(&ap->hlist); 1646 return 0; 1647 } 1648 1649 static void __unregister_kprobe_bottom(struct kprobe *p) 1650 { 1651 struct kprobe *ap; 1652 1653 if (list_empty(&p->list)) 1654 /* This is an independent kprobe */ 1655 arch_remove_kprobe(p); 1656 else if (list_is_singular(&p->list)) { 1657 /* This is the last child of an aggrprobe */ 1658 ap = list_entry(p->list.next, struct kprobe, list); 1659 list_del(&p->list); 1660 free_aggr_kprobe(ap); 1661 } 1662 /* Otherwise, do nothing. */ 1663 } 1664 1665 int register_kprobes(struct kprobe **kps, int num) 1666 { 1667 int i, ret = 0; 1668 1669 if (num <= 0) 1670 return -EINVAL; 1671 for (i = 0; i < num; i++) { 1672 ret = register_kprobe(kps[i]); 1673 if (ret < 0) { 1674 if (i > 0) 1675 unregister_kprobes(kps, i); 1676 break; 1677 } 1678 } 1679 return ret; 1680 } 1681 EXPORT_SYMBOL_GPL(register_kprobes); 1682 1683 void unregister_kprobe(struct kprobe *p) 1684 { 1685 unregister_kprobes(&p, 1); 1686 } 1687 EXPORT_SYMBOL_GPL(unregister_kprobe); 1688 1689 void unregister_kprobes(struct kprobe **kps, int num) 1690 { 1691 int i; 1692 1693 if (num <= 0) 1694 return; 1695 mutex_lock(&kprobe_mutex); 1696 for (i = 0; i < num; i++) 1697 if (__unregister_kprobe_top(kps[i]) < 0) 1698 kps[i]->addr = NULL; 1699 mutex_unlock(&kprobe_mutex); 1700 1701 synchronize_sched(); 1702 for (i = 0; i < num; i++) 1703 if (kps[i]->addr) 1704 __unregister_kprobe_bottom(kps[i]); 1705 } 1706 EXPORT_SYMBOL_GPL(unregister_kprobes); 1707 1708 static struct notifier_block kprobe_exceptions_nb = { 1709 .notifier_call = kprobe_exceptions_notify, 1710 .priority = 0x7fffffff /* we need to be notified first */ 1711 }; 1712 1713 unsigned long __weak arch_deref_entry_point(void *entry) 1714 { 1715 return (unsigned long)entry; 1716 } 1717 1718 int register_jprobes(struct jprobe **jps, int num) 1719 { 1720 struct jprobe *jp; 1721 int ret = 0, i; 1722 1723 if (num <= 0) 1724 return -EINVAL; 1725 for (i = 0; i < num; i++) { 1726 unsigned long addr, offset; 1727 jp = jps[i]; 1728 addr = arch_deref_entry_point(jp->entry); 1729 1730 /* Verify probepoint is a function entry point */ 1731 if (kallsyms_lookup_size_offset(addr, NULL, &offset) && 1732 offset == 0) { 1733 jp->kp.pre_handler = setjmp_pre_handler; 1734 jp->kp.break_handler = longjmp_break_handler; 1735 ret = register_kprobe(&jp->kp); 1736 } else 1737 ret = -EINVAL; 1738 1739 if (ret < 0) { 1740 if (i > 0) 1741 unregister_jprobes(jps, i); 1742 break; 1743 } 1744 } 1745 return ret; 1746 } 1747 EXPORT_SYMBOL_GPL(register_jprobes); 1748 1749 int register_jprobe(struct jprobe *jp) 1750 { 1751 return register_jprobes(&jp, 1); 1752 } 1753 EXPORT_SYMBOL_GPL(register_jprobe); 1754 1755 void unregister_jprobe(struct jprobe *jp) 1756 { 1757 unregister_jprobes(&jp, 1); 1758 } 1759 EXPORT_SYMBOL_GPL(unregister_jprobe); 1760 1761 void unregister_jprobes(struct jprobe **jps, int num) 1762 { 1763 int i; 1764 1765 if (num <= 0) 1766 return; 1767 mutex_lock(&kprobe_mutex); 1768 for (i = 0; i < num; i++) 1769 if (__unregister_kprobe_top(&jps[i]->kp) < 0) 1770 jps[i]->kp.addr = NULL; 1771 mutex_unlock(&kprobe_mutex); 1772 1773 synchronize_sched(); 1774 for (i = 0; i < num; i++) { 1775 if (jps[i]->kp.addr) 1776 __unregister_kprobe_bottom(&jps[i]->kp); 1777 } 1778 } 1779 EXPORT_SYMBOL_GPL(unregister_jprobes); 1780 1781 #ifdef CONFIG_KRETPROBES 1782 /* 1783 * This kprobe pre_handler is registered with every kretprobe. When probe 1784 * hits it will set up the return probe. 1785 */ 1786 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs) 1787 { 1788 struct kretprobe *rp = container_of(p, struct kretprobe, kp); 1789 unsigned long hash, flags = 0; 1790 struct kretprobe_instance *ri; 1791 1792 /* 1793 * To avoid deadlocks, prohibit return probing in NMI contexts, 1794 * just skip the probe and increase the (inexact) 'nmissed' 1795 * statistical counter, so that the user is informed that 1796 * something happened: 1797 */ 1798 if (unlikely(in_nmi())) { 1799 rp->nmissed++; 1800 return 0; 1801 } 1802 1803 /* TODO: consider to only swap the RA after the last pre_handler fired */ 1804 hash = hash_ptr(current, KPROBE_HASH_BITS); 1805 raw_spin_lock_irqsave(&rp->lock, flags); 1806 if (!hlist_empty(&rp->free_instances)) { 1807 ri = hlist_entry(rp->free_instances.first, 1808 struct kretprobe_instance, hlist); 1809 hlist_del(&ri->hlist); 1810 raw_spin_unlock_irqrestore(&rp->lock, flags); 1811 1812 ri->rp = rp; 1813 ri->task = current; 1814 1815 if (rp->entry_handler && rp->entry_handler(ri, regs)) { 1816 raw_spin_lock_irqsave(&rp->lock, flags); 1817 hlist_add_head(&ri->hlist, &rp->free_instances); 1818 raw_spin_unlock_irqrestore(&rp->lock, flags); 1819 return 0; 1820 } 1821 1822 arch_prepare_kretprobe(ri, regs); 1823 1824 /* XXX(hch): why is there no hlist_move_head? */ 1825 INIT_HLIST_NODE(&ri->hlist); 1826 kretprobe_table_lock(hash, &flags); 1827 hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]); 1828 kretprobe_table_unlock(hash, &flags); 1829 } else { 1830 rp->nmissed++; 1831 raw_spin_unlock_irqrestore(&rp->lock, flags); 1832 } 1833 return 0; 1834 } 1835 NOKPROBE_SYMBOL(pre_handler_kretprobe); 1836 1837 int register_kretprobe(struct kretprobe *rp) 1838 { 1839 int ret = 0; 1840 struct kretprobe_instance *inst; 1841 int i; 1842 void *addr; 1843 1844 if (kretprobe_blacklist_size) { 1845 addr = kprobe_addr(&rp->kp); 1846 if (IS_ERR(addr)) 1847 return PTR_ERR(addr); 1848 1849 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) { 1850 if (kretprobe_blacklist[i].addr == addr) 1851 return -EINVAL; 1852 } 1853 } 1854 1855 rp->kp.pre_handler = pre_handler_kretprobe; 1856 rp->kp.post_handler = NULL; 1857 rp->kp.fault_handler = NULL; 1858 rp->kp.break_handler = NULL; 1859 1860 /* Pre-allocate memory for max kretprobe instances */ 1861 if (rp->maxactive <= 0) { 1862 #ifdef CONFIG_PREEMPT 1863 rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus()); 1864 #else 1865 rp->maxactive = num_possible_cpus(); 1866 #endif 1867 } 1868 raw_spin_lock_init(&rp->lock); 1869 INIT_HLIST_HEAD(&rp->free_instances); 1870 for (i = 0; i < rp->maxactive; i++) { 1871 inst = kmalloc(sizeof(struct kretprobe_instance) + 1872 rp->data_size, GFP_KERNEL); 1873 if (inst == NULL) { 1874 free_rp_inst(rp); 1875 return -ENOMEM; 1876 } 1877 INIT_HLIST_NODE(&inst->hlist); 1878 hlist_add_head(&inst->hlist, &rp->free_instances); 1879 } 1880 1881 rp->nmissed = 0; 1882 /* Establish function entry probe point */ 1883 ret = register_kprobe(&rp->kp); 1884 if (ret != 0) 1885 free_rp_inst(rp); 1886 return ret; 1887 } 1888 EXPORT_SYMBOL_GPL(register_kretprobe); 1889 1890 int register_kretprobes(struct kretprobe **rps, int num) 1891 { 1892 int ret = 0, i; 1893 1894 if (num <= 0) 1895 return -EINVAL; 1896 for (i = 0; i < num; i++) { 1897 ret = register_kretprobe(rps[i]); 1898 if (ret < 0) { 1899 if (i > 0) 1900 unregister_kretprobes(rps, i); 1901 break; 1902 } 1903 } 1904 return ret; 1905 } 1906 EXPORT_SYMBOL_GPL(register_kretprobes); 1907 1908 void unregister_kretprobe(struct kretprobe *rp) 1909 { 1910 unregister_kretprobes(&rp, 1); 1911 } 1912 EXPORT_SYMBOL_GPL(unregister_kretprobe); 1913 1914 void unregister_kretprobes(struct kretprobe **rps, int num) 1915 { 1916 int i; 1917 1918 if (num <= 0) 1919 return; 1920 mutex_lock(&kprobe_mutex); 1921 for (i = 0; i < num; i++) 1922 if (__unregister_kprobe_top(&rps[i]->kp) < 0) 1923 rps[i]->kp.addr = NULL; 1924 mutex_unlock(&kprobe_mutex); 1925 1926 synchronize_sched(); 1927 for (i = 0; i < num; i++) { 1928 if (rps[i]->kp.addr) { 1929 __unregister_kprobe_bottom(&rps[i]->kp); 1930 cleanup_rp_inst(rps[i]); 1931 } 1932 } 1933 } 1934 EXPORT_SYMBOL_GPL(unregister_kretprobes); 1935 1936 #else /* CONFIG_KRETPROBES */ 1937 int register_kretprobe(struct kretprobe *rp) 1938 { 1939 return -ENOSYS; 1940 } 1941 EXPORT_SYMBOL_GPL(register_kretprobe); 1942 1943 int register_kretprobes(struct kretprobe **rps, int num) 1944 { 1945 return -ENOSYS; 1946 } 1947 EXPORT_SYMBOL_GPL(register_kretprobes); 1948 1949 void unregister_kretprobe(struct kretprobe *rp) 1950 { 1951 } 1952 EXPORT_SYMBOL_GPL(unregister_kretprobe); 1953 1954 void unregister_kretprobes(struct kretprobe **rps, int num) 1955 { 1956 } 1957 EXPORT_SYMBOL_GPL(unregister_kretprobes); 1958 1959 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs) 1960 { 1961 return 0; 1962 } 1963 NOKPROBE_SYMBOL(pre_handler_kretprobe); 1964 1965 #endif /* CONFIG_KRETPROBES */ 1966 1967 /* Set the kprobe gone and remove its instruction buffer. */ 1968 static void kill_kprobe(struct kprobe *p) 1969 { 1970 struct kprobe *kp; 1971 1972 p->flags |= KPROBE_FLAG_GONE; 1973 if (kprobe_aggrprobe(p)) { 1974 /* 1975 * If this is an aggr_kprobe, we have to list all the 1976 * chained probes and mark them GONE. 1977 */ 1978 list_for_each_entry_rcu(kp, &p->list, list) 1979 kp->flags |= KPROBE_FLAG_GONE; 1980 p->post_handler = NULL; 1981 p->break_handler = NULL; 1982 kill_optimized_kprobe(p); 1983 } 1984 /* 1985 * Here, we can remove insn_slot safely, because no thread calls 1986 * the original probed function (which will be freed soon) any more. 1987 */ 1988 arch_remove_kprobe(p); 1989 } 1990 1991 /* Disable one kprobe */ 1992 int disable_kprobe(struct kprobe *kp) 1993 { 1994 int ret = 0; 1995 1996 mutex_lock(&kprobe_mutex); 1997 1998 /* Disable this kprobe */ 1999 if (__disable_kprobe(kp) == NULL) 2000 ret = -EINVAL; 2001 2002 mutex_unlock(&kprobe_mutex); 2003 return ret; 2004 } 2005 EXPORT_SYMBOL_GPL(disable_kprobe); 2006 2007 /* Enable one kprobe */ 2008 int enable_kprobe(struct kprobe *kp) 2009 { 2010 int ret = 0; 2011 struct kprobe *p; 2012 2013 mutex_lock(&kprobe_mutex); 2014 2015 /* Check whether specified probe is valid. */ 2016 p = __get_valid_kprobe(kp); 2017 if (unlikely(p == NULL)) { 2018 ret = -EINVAL; 2019 goto out; 2020 } 2021 2022 if (kprobe_gone(kp)) { 2023 /* This kprobe has gone, we couldn't enable it. */ 2024 ret = -EINVAL; 2025 goto out; 2026 } 2027 2028 if (p != kp) 2029 kp->flags &= ~KPROBE_FLAG_DISABLED; 2030 2031 if (!kprobes_all_disarmed && kprobe_disabled(p)) { 2032 p->flags &= ~KPROBE_FLAG_DISABLED; 2033 arm_kprobe(p); 2034 } 2035 out: 2036 mutex_unlock(&kprobe_mutex); 2037 return ret; 2038 } 2039 EXPORT_SYMBOL_GPL(enable_kprobe); 2040 2041 void dump_kprobe(struct kprobe *kp) 2042 { 2043 printk(KERN_WARNING "Dumping kprobe:\n"); 2044 printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n", 2045 kp->symbol_name, kp->addr, kp->offset); 2046 } 2047 NOKPROBE_SYMBOL(dump_kprobe); 2048 2049 /* 2050 * Lookup and populate the kprobe_blacklist. 2051 * 2052 * Unlike the kretprobe blacklist, we'll need to determine 2053 * the range of addresses that belong to the said functions, 2054 * since a kprobe need not necessarily be at the beginning 2055 * of a function. 2056 */ 2057 static int __init populate_kprobe_blacklist(unsigned long *start, 2058 unsigned long *end) 2059 { 2060 unsigned long *iter; 2061 struct kprobe_blacklist_entry *ent; 2062 unsigned long entry, offset = 0, size = 0; 2063 2064 for (iter = start; iter < end; iter++) { 2065 entry = arch_deref_entry_point((void *)*iter); 2066 2067 if (!kernel_text_address(entry) || 2068 !kallsyms_lookup_size_offset(entry, &size, &offset)) { 2069 pr_err("Failed to find blacklist at %p\n", 2070 (void *)entry); 2071 continue; 2072 } 2073 2074 ent = kmalloc(sizeof(*ent), GFP_KERNEL); 2075 if (!ent) 2076 return -ENOMEM; 2077 ent->start_addr = entry; 2078 ent->end_addr = entry + size; 2079 INIT_LIST_HEAD(&ent->list); 2080 list_add_tail(&ent->list, &kprobe_blacklist); 2081 } 2082 return 0; 2083 } 2084 2085 /* Module notifier call back, checking kprobes on the module */ 2086 static int kprobes_module_callback(struct notifier_block *nb, 2087 unsigned long val, void *data) 2088 { 2089 struct module *mod = data; 2090 struct hlist_head *head; 2091 struct kprobe *p; 2092 unsigned int i; 2093 int checkcore = (val == MODULE_STATE_GOING); 2094 2095 if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE) 2096 return NOTIFY_DONE; 2097 2098 /* 2099 * When MODULE_STATE_GOING was notified, both of module .text and 2100 * .init.text sections would be freed. When MODULE_STATE_LIVE was 2101 * notified, only .init.text section would be freed. We need to 2102 * disable kprobes which have been inserted in the sections. 2103 */ 2104 mutex_lock(&kprobe_mutex); 2105 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2106 head = &kprobe_table[i]; 2107 hlist_for_each_entry_rcu(p, head, hlist) 2108 if (within_module_init((unsigned long)p->addr, mod) || 2109 (checkcore && 2110 within_module_core((unsigned long)p->addr, mod))) { 2111 /* 2112 * The vaddr this probe is installed will soon 2113 * be vfreed buy not synced to disk. Hence, 2114 * disarming the breakpoint isn't needed. 2115 */ 2116 kill_kprobe(p); 2117 } 2118 } 2119 mutex_unlock(&kprobe_mutex); 2120 return NOTIFY_DONE; 2121 } 2122 2123 static struct notifier_block kprobe_module_nb = { 2124 .notifier_call = kprobes_module_callback, 2125 .priority = 0 2126 }; 2127 2128 /* Markers of _kprobe_blacklist section */ 2129 extern unsigned long __start_kprobe_blacklist[]; 2130 extern unsigned long __stop_kprobe_blacklist[]; 2131 2132 static int __init init_kprobes(void) 2133 { 2134 int i, err = 0; 2135 2136 /* FIXME allocate the probe table, currently defined statically */ 2137 /* initialize all list heads */ 2138 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2139 INIT_HLIST_HEAD(&kprobe_table[i]); 2140 INIT_HLIST_HEAD(&kretprobe_inst_table[i]); 2141 raw_spin_lock_init(&(kretprobe_table_locks[i].lock)); 2142 } 2143 2144 err = populate_kprobe_blacklist(__start_kprobe_blacklist, 2145 __stop_kprobe_blacklist); 2146 if (err) { 2147 pr_err("kprobes: failed to populate blacklist: %d\n", err); 2148 pr_err("Please take care of using kprobes.\n"); 2149 } 2150 2151 if (kretprobe_blacklist_size) { 2152 /* lookup the function address from its name */ 2153 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) { 2154 kprobe_lookup_name(kretprobe_blacklist[i].name, 2155 kretprobe_blacklist[i].addr); 2156 if (!kretprobe_blacklist[i].addr) 2157 printk("kretprobe: lookup failed: %s\n", 2158 kretprobe_blacklist[i].name); 2159 } 2160 } 2161 2162 #if defined(CONFIG_OPTPROBES) 2163 #if defined(__ARCH_WANT_KPROBES_INSN_SLOT) 2164 /* Init kprobe_optinsn_slots */ 2165 kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE; 2166 #endif 2167 /* By default, kprobes can be optimized */ 2168 kprobes_allow_optimization = true; 2169 #endif 2170 2171 /* By default, kprobes are armed */ 2172 kprobes_all_disarmed = false; 2173 2174 err = arch_init_kprobes(); 2175 if (!err) 2176 err = register_die_notifier(&kprobe_exceptions_nb); 2177 if (!err) 2178 err = register_module_notifier(&kprobe_module_nb); 2179 2180 kprobes_initialized = (err == 0); 2181 2182 if (!err) 2183 init_test_probes(); 2184 return err; 2185 } 2186 2187 #ifdef CONFIG_DEBUG_FS 2188 static void report_probe(struct seq_file *pi, struct kprobe *p, 2189 const char *sym, int offset, char *modname, struct kprobe *pp) 2190 { 2191 char *kprobe_type; 2192 2193 if (p->pre_handler == pre_handler_kretprobe) 2194 kprobe_type = "r"; 2195 else if (p->pre_handler == setjmp_pre_handler) 2196 kprobe_type = "j"; 2197 else 2198 kprobe_type = "k"; 2199 2200 if (sym) 2201 seq_printf(pi, "%p %s %s+0x%x %s ", 2202 p->addr, kprobe_type, sym, offset, 2203 (modname ? modname : " ")); 2204 else 2205 seq_printf(pi, "%p %s %p ", 2206 p->addr, kprobe_type, p->addr); 2207 2208 if (!pp) 2209 pp = p; 2210 seq_printf(pi, "%s%s%s%s\n", 2211 (kprobe_gone(p) ? "[GONE]" : ""), 2212 ((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""), 2213 (kprobe_optimized(pp) ? "[OPTIMIZED]" : ""), 2214 (kprobe_ftrace(pp) ? "[FTRACE]" : "")); 2215 } 2216 2217 static void *kprobe_seq_start(struct seq_file *f, loff_t *pos) 2218 { 2219 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL; 2220 } 2221 2222 static void *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos) 2223 { 2224 (*pos)++; 2225 if (*pos >= KPROBE_TABLE_SIZE) 2226 return NULL; 2227 return pos; 2228 } 2229 2230 static void kprobe_seq_stop(struct seq_file *f, void *v) 2231 { 2232 /* Nothing to do */ 2233 } 2234 2235 static int show_kprobe_addr(struct seq_file *pi, void *v) 2236 { 2237 struct hlist_head *head; 2238 struct kprobe *p, *kp; 2239 const char *sym = NULL; 2240 unsigned int i = *(loff_t *) v; 2241 unsigned long offset = 0; 2242 char *modname, namebuf[KSYM_NAME_LEN]; 2243 2244 head = &kprobe_table[i]; 2245 preempt_disable(); 2246 hlist_for_each_entry_rcu(p, head, hlist) { 2247 sym = kallsyms_lookup((unsigned long)p->addr, NULL, 2248 &offset, &modname, namebuf); 2249 if (kprobe_aggrprobe(p)) { 2250 list_for_each_entry_rcu(kp, &p->list, list) 2251 report_probe(pi, kp, sym, offset, modname, p); 2252 } else 2253 report_probe(pi, p, sym, offset, modname, NULL); 2254 } 2255 preempt_enable(); 2256 return 0; 2257 } 2258 2259 static const struct seq_operations kprobes_seq_ops = { 2260 .start = kprobe_seq_start, 2261 .next = kprobe_seq_next, 2262 .stop = kprobe_seq_stop, 2263 .show = show_kprobe_addr 2264 }; 2265 2266 static int kprobes_open(struct inode *inode, struct file *filp) 2267 { 2268 return seq_open(filp, &kprobes_seq_ops); 2269 } 2270 2271 static const struct file_operations debugfs_kprobes_operations = { 2272 .open = kprobes_open, 2273 .read = seq_read, 2274 .llseek = seq_lseek, 2275 .release = seq_release, 2276 }; 2277 2278 /* kprobes/blacklist -- shows which functions can not be probed */ 2279 static void *kprobe_blacklist_seq_start(struct seq_file *m, loff_t *pos) 2280 { 2281 return seq_list_start(&kprobe_blacklist, *pos); 2282 } 2283 2284 static void *kprobe_blacklist_seq_next(struct seq_file *m, void *v, loff_t *pos) 2285 { 2286 return seq_list_next(v, &kprobe_blacklist, pos); 2287 } 2288 2289 static int kprobe_blacklist_seq_show(struct seq_file *m, void *v) 2290 { 2291 struct kprobe_blacklist_entry *ent = 2292 list_entry(v, struct kprobe_blacklist_entry, list); 2293 2294 seq_printf(m, "0x%p-0x%p\t%ps\n", (void *)ent->start_addr, 2295 (void *)ent->end_addr, (void *)ent->start_addr); 2296 return 0; 2297 } 2298 2299 static const struct seq_operations kprobe_blacklist_seq_ops = { 2300 .start = kprobe_blacklist_seq_start, 2301 .next = kprobe_blacklist_seq_next, 2302 .stop = kprobe_seq_stop, /* Reuse void function */ 2303 .show = kprobe_blacklist_seq_show, 2304 }; 2305 2306 static int kprobe_blacklist_open(struct inode *inode, struct file *filp) 2307 { 2308 return seq_open(filp, &kprobe_blacklist_seq_ops); 2309 } 2310 2311 static const struct file_operations debugfs_kprobe_blacklist_ops = { 2312 .open = kprobe_blacklist_open, 2313 .read = seq_read, 2314 .llseek = seq_lseek, 2315 .release = seq_release, 2316 }; 2317 2318 static void arm_all_kprobes(void) 2319 { 2320 struct hlist_head *head; 2321 struct kprobe *p; 2322 unsigned int i; 2323 2324 mutex_lock(&kprobe_mutex); 2325 2326 /* If kprobes are armed, just return */ 2327 if (!kprobes_all_disarmed) 2328 goto already_enabled; 2329 2330 /* 2331 * optimize_kprobe() called by arm_kprobe() checks 2332 * kprobes_all_disarmed, so set kprobes_all_disarmed before 2333 * arm_kprobe. 2334 */ 2335 kprobes_all_disarmed = false; 2336 /* Arming kprobes doesn't optimize kprobe itself */ 2337 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2338 head = &kprobe_table[i]; 2339 hlist_for_each_entry_rcu(p, head, hlist) 2340 if (!kprobe_disabled(p)) 2341 arm_kprobe(p); 2342 } 2343 2344 printk(KERN_INFO "Kprobes globally enabled\n"); 2345 2346 already_enabled: 2347 mutex_unlock(&kprobe_mutex); 2348 return; 2349 } 2350 2351 static void disarm_all_kprobes(void) 2352 { 2353 struct hlist_head *head; 2354 struct kprobe *p; 2355 unsigned int i; 2356 2357 mutex_lock(&kprobe_mutex); 2358 2359 /* If kprobes are already disarmed, just return */ 2360 if (kprobes_all_disarmed) { 2361 mutex_unlock(&kprobe_mutex); 2362 return; 2363 } 2364 2365 kprobes_all_disarmed = true; 2366 printk(KERN_INFO "Kprobes globally disabled\n"); 2367 2368 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2369 head = &kprobe_table[i]; 2370 hlist_for_each_entry_rcu(p, head, hlist) { 2371 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) 2372 disarm_kprobe(p, false); 2373 } 2374 } 2375 mutex_unlock(&kprobe_mutex); 2376 2377 /* Wait for disarming all kprobes by optimizer */ 2378 wait_for_kprobe_optimizer(); 2379 } 2380 2381 /* 2382 * XXX: The debugfs bool file interface doesn't allow for callbacks 2383 * when the bool state is switched. We can reuse that facility when 2384 * available 2385 */ 2386 static ssize_t read_enabled_file_bool(struct file *file, 2387 char __user *user_buf, size_t count, loff_t *ppos) 2388 { 2389 char buf[3]; 2390 2391 if (!kprobes_all_disarmed) 2392 buf[0] = '1'; 2393 else 2394 buf[0] = '0'; 2395 buf[1] = '\n'; 2396 buf[2] = 0x00; 2397 return simple_read_from_buffer(user_buf, count, ppos, buf, 2); 2398 } 2399 2400 static ssize_t write_enabled_file_bool(struct file *file, 2401 const char __user *user_buf, size_t count, loff_t *ppos) 2402 { 2403 char buf[32]; 2404 size_t buf_size; 2405 2406 buf_size = min(count, (sizeof(buf)-1)); 2407 if (copy_from_user(buf, user_buf, buf_size)) 2408 return -EFAULT; 2409 2410 buf[buf_size] = '\0'; 2411 switch (buf[0]) { 2412 case 'y': 2413 case 'Y': 2414 case '1': 2415 arm_all_kprobes(); 2416 break; 2417 case 'n': 2418 case 'N': 2419 case '0': 2420 disarm_all_kprobes(); 2421 break; 2422 default: 2423 return -EINVAL; 2424 } 2425 2426 return count; 2427 } 2428 2429 static const struct file_operations fops_kp = { 2430 .read = read_enabled_file_bool, 2431 .write = write_enabled_file_bool, 2432 .llseek = default_llseek, 2433 }; 2434 2435 static int __init debugfs_kprobe_init(void) 2436 { 2437 struct dentry *dir, *file; 2438 unsigned int value = 1; 2439 2440 dir = debugfs_create_dir("kprobes", NULL); 2441 if (!dir) 2442 return -ENOMEM; 2443 2444 file = debugfs_create_file("list", 0444, dir, NULL, 2445 &debugfs_kprobes_operations); 2446 if (!file) 2447 goto error; 2448 2449 file = debugfs_create_file("enabled", 0600, dir, 2450 &value, &fops_kp); 2451 if (!file) 2452 goto error; 2453 2454 file = debugfs_create_file("blacklist", 0444, dir, NULL, 2455 &debugfs_kprobe_blacklist_ops); 2456 if (!file) 2457 goto error; 2458 2459 return 0; 2460 2461 error: 2462 debugfs_remove(dir); 2463 return -ENOMEM; 2464 } 2465 2466 late_initcall(debugfs_kprobe_init); 2467 #endif /* CONFIG_DEBUG_FS */ 2468 2469 module_init(init_kprobes); 2470 2471 /* defined in arch/.../kernel/kprobes.c */ 2472 EXPORT_SYMBOL_GPL(jprobe_return); 2473