1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Kernel Probes (KProbes) 4 * kernel/kprobes.c 5 * 6 * Copyright (C) IBM Corporation, 2002, 2004 7 * 8 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel 9 * Probes initial implementation (includes suggestions from 10 * Rusty Russell). 11 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with 12 * hlists and exceptions notifier as suggested by Andi Kleen. 13 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes 14 * interface to access function arguments. 15 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes 16 * exceptions notifier to be first on the priority list. 17 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston 18 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi 19 * <prasanna@in.ibm.com> added function-return probes. 20 */ 21 #include <linux/kprobes.h> 22 #include <linux/hash.h> 23 #include <linux/init.h> 24 #include <linux/slab.h> 25 #include <linux/stddef.h> 26 #include <linux/export.h> 27 #include <linux/moduleloader.h> 28 #include <linux/kallsyms.h> 29 #include <linux/freezer.h> 30 #include <linux/seq_file.h> 31 #include <linux/debugfs.h> 32 #include <linux/sysctl.h> 33 #include <linux/kdebug.h> 34 #include <linux/memory.h> 35 #include <linux/ftrace.h> 36 #include <linux/cpu.h> 37 #include <linux/jump_label.h> 38 39 #include <asm/sections.h> 40 #include <asm/cacheflush.h> 41 #include <asm/errno.h> 42 #include <linux/uaccess.h> 43 44 #define KPROBE_HASH_BITS 6 45 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS) 46 47 48 static int kprobes_initialized; 49 /* kprobe_table can be accessed by 50 * - Normal hlist traversal and RCU add/del under kprobe_mutex is held. 51 * Or 52 * - RCU hlist traversal under disabling preempt (breakpoint handlers) 53 */ 54 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; 55 static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; 56 57 /* NOTE: change this value only with kprobe_mutex held */ 58 static bool kprobes_all_disarmed; 59 60 /* This protects kprobe_table and optimizing_list */ 61 static DEFINE_MUTEX(kprobe_mutex); 62 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; 63 static struct { 64 raw_spinlock_t lock ____cacheline_aligned_in_smp; 65 } kretprobe_table_locks[KPROBE_TABLE_SIZE]; 66 67 kprobe_opcode_t * __weak kprobe_lookup_name(const char *name, 68 unsigned int __unused) 69 { 70 return ((kprobe_opcode_t *)(kallsyms_lookup_name(name))); 71 } 72 73 static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash) 74 { 75 return &(kretprobe_table_locks[hash].lock); 76 } 77 78 /* Blacklist -- list of struct kprobe_blacklist_entry */ 79 static LIST_HEAD(kprobe_blacklist); 80 81 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT 82 /* 83 * kprobe->ainsn.insn points to the copy of the instruction to be 84 * single-stepped. x86_64, POWER4 and above have no-exec support and 85 * stepping on the instruction on a vmalloced/kmalloced/data page 86 * is a recipe for disaster 87 */ 88 struct kprobe_insn_page { 89 struct list_head list; 90 kprobe_opcode_t *insns; /* Page of instruction slots */ 91 struct kprobe_insn_cache *cache; 92 int nused; 93 int ngarbage; 94 char slot_used[]; 95 }; 96 97 #define KPROBE_INSN_PAGE_SIZE(slots) \ 98 (offsetof(struct kprobe_insn_page, slot_used) + \ 99 (sizeof(char) * (slots))) 100 101 static int slots_per_page(struct kprobe_insn_cache *c) 102 { 103 return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t)); 104 } 105 106 enum kprobe_slot_state { 107 SLOT_CLEAN = 0, 108 SLOT_DIRTY = 1, 109 SLOT_USED = 2, 110 }; 111 112 void __weak *alloc_insn_page(void) 113 { 114 return module_alloc(PAGE_SIZE); 115 } 116 117 void __weak free_insn_page(void *page) 118 { 119 module_memfree(page); 120 } 121 122 struct kprobe_insn_cache kprobe_insn_slots = { 123 .mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex), 124 .alloc = alloc_insn_page, 125 .free = free_insn_page, 126 .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages), 127 .insn_size = MAX_INSN_SIZE, 128 .nr_garbage = 0, 129 }; 130 static int collect_garbage_slots(struct kprobe_insn_cache *c); 131 132 /** 133 * __get_insn_slot() - Find a slot on an executable page for an instruction. 134 * We allocate an executable page if there's no room on existing ones. 135 */ 136 kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c) 137 { 138 struct kprobe_insn_page *kip; 139 kprobe_opcode_t *slot = NULL; 140 141 /* Since the slot array is not protected by rcu, we need a mutex */ 142 mutex_lock(&c->mutex); 143 retry: 144 rcu_read_lock(); 145 list_for_each_entry_rcu(kip, &c->pages, list) { 146 if (kip->nused < slots_per_page(c)) { 147 int i; 148 for (i = 0; i < slots_per_page(c); i++) { 149 if (kip->slot_used[i] == SLOT_CLEAN) { 150 kip->slot_used[i] = SLOT_USED; 151 kip->nused++; 152 slot = kip->insns + (i * c->insn_size); 153 rcu_read_unlock(); 154 goto out; 155 } 156 } 157 /* kip->nused is broken. Fix it. */ 158 kip->nused = slots_per_page(c); 159 WARN_ON(1); 160 } 161 } 162 rcu_read_unlock(); 163 164 /* If there are any garbage slots, collect it and try again. */ 165 if (c->nr_garbage && collect_garbage_slots(c) == 0) 166 goto retry; 167 168 /* All out of space. Need to allocate a new page. */ 169 kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL); 170 if (!kip) 171 goto out; 172 173 /* 174 * Use module_alloc so this page is within +/- 2GB of where the 175 * kernel image and loaded module images reside. This is required 176 * so x86_64 can correctly handle the %rip-relative fixups. 177 */ 178 kip->insns = c->alloc(); 179 if (!kip->insns) { 180 kfree(kip); 181 goto out; 182 } 183 INIT_LIST_HEAD(&kip->list); 184 memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c)); 185 kip->slot_used[0] = SLOT_USED; 186 kip->nused = 1; 187 kip->ngarbage = 0; 188 kip->cache = c; 189 list_add_rcu(&kip->list, &c->pages); 190 slot = kip->insns; 191 out: 192 mutex_unlock(&c->mutex); 193 return slot; 194 } 195 196 /* Return 1 if all garbages are collected, otherwise 0. */ 197 static int collect_one_slot(struct kprobe_insn_page *kip, int idx) 198 { 199 kip->slot_used[idx] = SLOT_CLEAN; 200 kip->nused--; 201 if (kip->nused == 0) { 202 /* 203 * Page is no longer in use. Free it unless 204 * it's the last one. We keep the last one 205 * so as not to have to set it up again the 206 * next time somebody inserts a probe. 207 */ 208 if (!list_is_singular(&kip->list)) { 209 list_del_rcu(&kip->list); 210 synchronize_rcu(); 211 kip->cache->free(kip->insns); 212 kfree(kip); 213 } 214 return 1; 215 } 216 return 0; 217 } 218 219 static int collect_garbage_slots(struct kprobe_insn_cache *c) 220 { 221 struct kprobe_insn_page *kip, *next; 222 223 /* Ensure no-one is interrupted on the garbages */ 224 synchronize_rcu(); 225 226 list_for_each_entry_safe(kip, next, &c->pages, list) { 227 int i; 228 if (kip->ngarbage == 0) 229 continue; 230 kip->ngarbage = 0; /* we will collect all garbages */ 231 for (i = 0; i < slots_per_page(c); i++) { 232 if (kip->slot_used[i] == SLOT_DIRTY && collect_one_slot(kip, i)) 233 break; 234 } 235 } 236 c->nr_garbage = 0; 237 return 0; 238 } 239 240 void __free_insn_slot(struct kprobe_insn_cache *c, 241 kprobe_opcode_t *slot, int dirty) 242 { 243 struct kprobe_insn_page *kip; 244 long idx; 245 246 mutex_lock(&c->mutex); 247 rcu_read_lock(); 248 list_for_each_entry_rcu(kip, &c->pages, list) { 249 idx = ((long)slot - (long)kip->insns) / 250 (c->insn_size * sizeof(kprobe_opcode_t)); 251 if (idx >= 0 && idx < slots_per_page(c)) 252 goto out; 253 } 254 /* Could not find this slot. */ 255 WARN_ON(1); 256 kip = NULL; 257 out: 258 rcu_read_unlock(); 259 /* Mark and sweep: this may sleep */ 260 if (kip) { 261 /* Check double free */ 262 WARN_ON(kip->slot_used[idx] != SLOT_USED); 263 if (dirty) { 264 kip->slot_used[idx] = SLOT_DIRTY; 265 kip->ngarbage++; 266 if (++c->nr_garbage > slots_per_page(c)) 267 collect_garbage_slots(c); 268 } else { 269 collect_one_slot(kip, idx); 270 } 271 } 272 mutex_unlock(&c->mutex); 273 } 274 275 /* 276 * Check given address is on the page of kprobe instruction slots. 277 * This will be used for checking whether the address on a stack 278 * is on a text area or not. 279 */ 280 bool __is_insn_slot_addr(struct kprobe_insn_cache *c, unsigned long addr) 281 { 282 struct kprobe_insn_page *kip; 283 bool ret = false; 284 285 rcu_read_lock(); 286 list_for_each_entry_rcu(kip, &c->pages, list) { 287 if (addr >= (unsigned long)kip->insns && 288 addr < (unsigned long)kip->insns + PAGE_SIZE) { 289 ret = true; 290 break; 291 } 292 } 293 rcu_read_unlock(); 294 295 return ret; 296 } 297 298 #ifdef CONFIG_OPTPROBES 299 /* For optimized_kprobe buffer */ 300 struct kprobe_insn_cache kprobe_optinsn_slots = { 301 .mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex), 302 .alloc = alloc_insn_page, 303 .free = free_insn_page, 304 .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages), 305 /* .insn_size is initialized later */ 306 .nr_garbage = 0, 307 }; 308 #endif 309 #endif 310 311 /* We have preemption disabled.. so it is safe to use __ versions */ 312 static inline void set_kprobe_instance(struct kprobe *kp) 313 { 314 __this_cpu_write(kprobe_instance, kp); 315 } 316 317 static inline void reset_kprobe_instance(void) 318 { 319 __this_cpu_write(kprobe_instance, NULL); 320 } 321 322 /* 323 * This routine is called either: 324 * - under the kprobe_mutex - during kprobe_[un]register() 325 * OR 326 * - with preemption disabled - from arch/xxx/kernel/kprobes.c 327 */ 328 struct kprobe *get_kprobe(void *addr) 329 { 330 struct hlist_head *head; 331 struct kprobe *p; 332 333 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)]; 334 hlist_for_each_entry_rcu(p, head, hlist, 335 lockdep_is_held(&kprobe_mutex)) { 336 if (p->addr == addr) 337 return p; 338 } 339 340 return NULL; 341 } 342 NOKPROBE_SYMBOL(get_kprobe); 343 344 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs); 345 346 /* Return true if the kprobe is an aggregator */ 347 static inline int kprobe_aggrprobe(struct kprobe *p) 348 { 349 return p->pre_handler == aggr_pre_handler; 350 } 351 352 /* Return true(!0) if the kprobe is unused */ 353 static inline int kprobe_unused(struct kprobe *p) 354 { 355 return kprobe_aggrprobe(p) && kprobe_disabled(p) && 356 list_empty(&p->list); 357 } 358 359 /* 360 * Keep all fields in the kprobe consistent 361 */ 362 static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p) 363 { 364 memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t)); 365 memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn)); 366 } 367 368 #ifdef CONFIG_OPTPROBES 369 /* NOTE: change this value only with kprobe_mutex held */ 370 static bool kprobes_allow_optimization; 371 372 /* 373 * Call all pre_handler on the list, but ignores its return value. 374 * This must be called from arch-dep optimized caller. 375 */ 376 void opt_pre_handler(struct kprobe *p, struct pt_regs *regs) 377 { 378 struct kprobe *kp; 379 380 list_for_each_entry_rcu(kp, &p->list, list) { 381 if (kp->pre_handler && likely(!kprobe_disabled(kp))) { 382 set_kprobe_instance(kp); 383 kp->pre_handler(kp, regs); 384 } 385 reset_kprobe_instance(); 386 } 387 } 388 NOKPROBE_SYMBOL(opt_pre_handler); 389 390 /* Free optimized instructions and optimized_kprobe */ 391 static void free_aggr_kprobe(struct kprobe *p) 392 { 393 struct optimized_kprobe *op; 394 395 op = container_of(p, struct optimized_kprobe, kp); 396 arch_remove_optimized_kprobe(op); 397 arch_remove_kprobe(p); 398 kfree(op); 399 } 400 401 /* Return true(!0) if the kprobe is ready for optimization. */ 402 static inline int kprobe_optready(struct kprobe *p) 403 { 404 struct optimized_kprobe *op; 405 406 if (kprobe_aggrprobe(p)) { 407 op = container_of(p, struct optimized_kprobe, kp); 408 return arch_prepared_optinsn(&op->optinsn); 409 } 410 411 return 0; 412 } 413 414 /* Return true(!0) if the kprobe is disarmed. Note: p must be on hash list */ 415 static inline int kprobe_disarmed(struct kprobe *p) 416 { 417 struct optimized_kprobe *op; 418 419 /* If kprobe is not aggr/opt probe, just return kprobe is disabled */ 420 if (!kprobe_aggrprobe(p)) 421 return kprobe_disabled(p); 422 423 op = container_of(p, struct optimized_kprobe, kp); 424 425 return kprobe_disabled(p) && list_empty(&op->list); 426 } 427 428 /* Return true(!0) if the probe is queued on (un)optimizing lists */ 429 static int kprobe_queued(struct kprobe *p) 430 { 431 struct optimized_kprobe *op; 432 433 if (kprobe_aggrprobe(p)) { 434 op = container_of(p, struct optimized_kprobe, kp); 435 if (!list_empty(&op->list)) 436 return 1; 437 } 438 return 0; 439 } 440 441 /* 442 * Return an optimized kprobe whose optimizing code replaces 443 * instructions including addr (exclude breakpoint). 444 */ 445 static struct kprobe *get_optimized_kprobe(unsigned long addr) 446 { 447 int i; 448 struct kprobe *p = NULL; 449 struct optimized_kprobe *op; 450 451 /* Don't check i == 0, since that is a breakpoint case. */ 452 for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++) 453 p = get_kprobe((void *)(addr - i)); 454 455 if (p && kprobe_optready(p)) { 456 op = container_of(p, struct optimized_kprobe, kp); 457 if (arch_within_optimized_kprobe(op, addr)) 458 return p; 459 } 460 461 return NULL; 462 } 463 464 /* Optimization staging list, protected by kprobe_mutex */ 465 static LIST_HEAD(optimizing_list); 466 static LIST_HEAD(unoptimizing_list); 467 static LIST_HEAD(freeing_list); 468 469 static void kprobe_optimizer(struct work_struct *work); 470 static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer); 471 #define OPTIMIZE_DELAY 5 472 473 /* 474 * Optimize (replace a breakpoint with a jump) kprobes listed on 475 * optimizing_list. 476 */ 477 static void do_optimize_kprobes(void) 478 { 479 lockdep_assert_held(&text_mutex); 480 /* 481 * The optimization/unoptimization refers online_cpus via 482 * stop_machine() and cpu-hotplug modifies online_cpus. 483 * And same time, text_mutex will be held in cpu-hotplug and here. 484 * This combination can cause a deadlock (cpu-hotplug try to lock 485 * text_mutex but stop_machine can not be done because online_cpus 486 * has been changed) 487 * To avoid this deadlock, caller must have locked cpu hotplug 488 * for preventing cpu-hotplug outside of text_mutex locking. 489 */ 490 lockdep_assert_cpus_held(); 491 492 /* Optimization never be done when disarmed */ 493 if (kprobes_all_disarmed || !kprobes_allow_optimization || 494 list_empty(&optimizing_list)) 495 return; 496 497 arch_optimize_kprobes(&optimizing_list); 498 } 499 500 /* 501 * Unoptimize (replace a jump with a breakpoint and remove the breakpoint 502 * if need) kprobes listed on unoptimizing_list. 503 */ 504 static void do_unoptimize_kprobes(void) 505 { 506 struct optimized_kprobe *op, *tmp; 507 508 lockdep_assert_held(&text_mutex); 509 /* See comment in do_optimize_kprobes() */ 510 lockdep_assert_cpus_held(); 511 512 /* Unoptimization must be done anytime */ 513 if (list_empty(&unoptimizing_list)) 514 return; 515 516 arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list); 517 /* Loop free_list for disarming */ 518 list_for_each_entry_safe(op, tmp, &freeing_list, list) { 519 /* Switching from detour code to origin */ 520 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; 521 /* Disarm probes if marked disabled */ 522 if (kprobe_disabled(&op->kp)) 523 arch_disarm_kprobe(&op->kp); 524 if (kprobe_unused(&op->kp)) { 525 /* 526 * Remove unused probes from hash list. After waiting 527 * for synchronization, these probes are reclaimed. 528 * (reclaiming is done by do_free_cleaned_kprobes.) 529 */ 530 hlist_del_rcu(&op->kp.hlist); 531 } else 532 list_del_init(&op->list); 533 } 534 } 535 536 /* Reclaim all kprobes on the free_list */ 537 static void do_free_cleaned_kprobes(void) 538 { 539 struct optimized_kprobe *op, *tmp; 540 541 list_for_each_entry_safe(op, tmp, &freeing_list, list) { 542 list_del_init(&op->list); 543 if (WARN_ON_ONCE(!kprobe_unused(&op->kp))) { 544 /* 545 * This must not happen, but if there is a kprobe 546 * still in use, keep it on kprobes hash list. 547 */ 548 continue; 549 } 550 free_aggr_kprobe(&op->kp); 551 } 552 } 553 554 /* Start optimizer after OPTIMIZE_DELAY passed */ 555 static void kick_kprobe_optimizer(void) 556 { 557 schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY); 558 } 559 560 /* Kprobe jump optimizer */ 561 static void kprobe_optimizer(struct work_struct *work) 562 { 563 mutex_lock(&kprobe_mutex); 564 cpus_read_lock(); 565 mutex_lock(&text_mutex); 566 /* Lock modules while optimizing kprobes */ 567 mutex_lock(&module_mutex); 568 569 /* 570 * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed) 571 * kprobes before waiting for quiesence period. 572 */ 573 do_unoptimize_kprobes(); 574 575 /* 576 * Step 2: Wait for quiesence period to ensure all potentially 577 * preempted tasks to have normally scheduled. Because optprobe 578 * may modify multiple instructions, there is a chance that Nth 579 * instruction is preempted. In that case, such tasks can return 580 * to 2nd-Nth byte of jump instruction. This wait is for avoiding it. 581 * Note that on non-preemptive kernel, this is transparently converted 582 * to synchronoze_sched() to wait for all interrupts to have completed. 583 */ 584 synchronize_rcu_tasks(); 585 586 /* Step 3: Optimize kprobes after quiesence period */ 587 do_optimize_kprobes(); 588 589 /* Step 4: Free cleaned kprobes after quiesence period */ 590 do_free_cleaned_kprobes(); 591 592 mutex_unlock(&module_mutex); 593 mutex_unlock(&text_mutex); 594 cpus_read_unlock(); 595 596 /* Step 5: Kick optimizer again if needed */ 597 if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) 598 kick_kprobe_optimizer(); 599 600 mutex_unlock(&kprobe_mutex); 601 } 602 603 /* Wait for completing optimization and unoptimization */ 604 void wait_for_kprobe_optimizer(void) 605 { 606 mutex_lock(&kprobe_mutex); 607 608 while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) { 609 mutex_unlock(&kprobe_mutex); 610 611 /* this will also make optimizing_work execute immmediately */ 612 flush_delayed_work(&optimizing_work); 613 /* @optimizing_work might not have been queued yet, relax */ 614 cpu_relax(); 615 616 mutex_lock(&kprobe_mutex); 617 } 618 619 mutex_unlock(&kprobe_mutex); 620 } 621 622 static bool optprobe_queued_unopt(struct optimized_kprobe *op) 623 { 624 struct optimized_kprobe *_op; 625 626 list_for_each_entry(_op, &unoptimizing_list, list) { 627 if (op == _op) 628 return true; 629 } 630 631 return false; 632 } 633 634 /* Optimize kprobe if p is ready to be optimized */ 635 static void optimize_kprobe(struct kprobe *p) 636 { 637 struct optimized_kprobe *op; 638 639 /* Check if the kprobe is disabled or not ready for optimization. */ 640 if (!kprobe_optready(p) || !kprobes_allow_optimization || 641 (kprobe_disabled(p) || kprobes_all_disarmed)) 642 return; 643 644 /* kprobes with post_handler can not be optimized */ 645 if (p->post_handler) 646 return; 647 648 op = container_of(p, struct optimized_kprobe, kp); 649 650 /* Check there is no other kprobes at the optimized instructions */ 651 if (arch_check_optimized_kprobe(op) < 0) 652 return; 653 654 /* Check if it is already optimized. */ 655 if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) { 656 if (optprobe_queued_unopt(op)) { 657 /* This is under unoptimizing. Just dequeue the probe */ 658 list_del_init(&op->list); 659 } 660 return; 661 } 662 op->kp.flags |= KPROBE_FLAG_OPTIMIZED; 663 664 /* On unoptimizing/optimizing_list, op must have OPTIMIZED flag */ 665 if (WARN_ON_ONCE(!list_empty(&op->list))) 666 return; 667 668 list_add(&op->list, &optimizing_list); 669 kick_kprobe_optimizer(); 670 } 671 672 /* Short cut to direct unoptimizing */ 673 static void force_unoptimize_kprobe(struct optimized_kprobe *op) 674 { 675 lockdep_assert_cpus_held(); 676 arch_unoptimize_kprobe(op); 677 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; 678 } 679 680 /* Unoptimize a kprobe if p is optimized */ 681 static void unoptimize_kprobe(struct kprobe *p, bool force) 682 { 683 struct optimized_kprobe *op; 684 685 if (!kprobe_aggrprobe(p) || kprobe_disarmed(p)) 686 return; /* This is not an optprobe nor optimized */ 687 688 op = container_of(p, struct optimized_kprobe, kp); 689 if (!kprobe_optimized(p)) 690 return; 691 692 if (!list_empty(&op->list)) { 693 if (optprobe_queued_unopt(op)) { 694 /* Queued in unoptimizing queue */ 695 if (force) { 696 /* 697 * Forcibly unoptimize the kprobe here, and queue it 698 * in the freeing list for release afterwards. 699 */ 700 force_unoptimize_kprobe(op); 701 list_move(&op->list, &freeing_list); 702 } 703 } else { 704 /* Dequeue from the optimizing queue */ 705 list_del_init(&op->list); 706 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; 707 } 708 return; 709 } 710 711 /* Optimized kprobe case */ 712 if (force) { 713 /* Forcibly update the code: this is a special case */ 714 force_unoptimize_kprobe(op); 715 } else { 716 list_add(&op->list, &unoptimizing_list); 717 kick_kprobe_optimizer(); 718 } 719 } 720 721 /* Cancel unoptimizing for reusing */ 722 static int reuse_unused_kprobe(struct kprobe *ap) 723 { 724 struct optimized_kprobe *op; 725 726 /* 727 * Unused kprobe MUST be on the way of delayed unoptimizing (means 728 * there is still a relative jump) and disabled. 729 */ 730 op = container_of(ap, struct optimized_kprobe, kp); 731 WARN_ON_ONCE(list_empty(&op->list)); 732 /* Enable the probe again */ 733 ap->flags &= ~KPROBE_FLAG_DISABLED; 734 /* Optimize it again (remove from op->list) */ 735 if (!kprobe_optready(ap)) 736 return -EINVAL; 737 738 optimize_kprobe(ap); 739 return 0; 740 } 741 742 /* Remove optimized instructions */ 743 static void kill_optimized_kprobe(struct kprobe *p) 744 { 745 struct optimized_kprobe *op; 746 747 op = container_of(p, struct optimized_kprobe, kp); 748 if (!list_empty(&op->list)) 749 /* Dequeue from the (un)optimization queue */ 750 list_del_init(&op->list); 751 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; 752 753 if (kprobe_unused(p)) { 754 /* Enqueue if it is unused */ 755 list_add(&op->list, &freeing_list); 756 /* 757 * Remove unused probes from the hash list. After waiting 758 * for synchronization, this probe is reclaimed. 759 * (reclaiming is done by do_free_cleaned_kprobes().) 760 */ 761 hlist_del_rcu(&op->kp.hlist); 762 } 763 764 /* Don't touch the code, because it is already freed. */ 765 arch_remove_optimized_kprobe(op); 766 } 767 768 static inline 769 void __prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p) 770 { 771 if (!kprobe_ftrace(p)) 772 arch_prepare_optimized_kprobe(op, p); 773 } 774 775 /* Try to prepare optimized instructions */ 776 static void prepare_optimized_kprobe(struct kprobe *p) 777 { 778 struct optimized_kprobe *op; 779 780 op = container_of(p, struct optimized_kprobe, kp); 781 __prepare_optimized_kprobe(op, p); 782 } 783 784 /* Allocate new optimized_kprobe and try to prepare optimized instructions */ 785 static struct kprobe *alloc_aggr_kprobe(struct kprobe *p) 786 { 787 struct optimized_kprobe *op; 788 789 op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL); 790 if (!op) 791 return NULL; 792 793 INIT_LIST_HEAD(&op->list); 794 op->kp.addr = p->addr; 795 __prepare_optimized_kprobe(op, p); 796 797 return &op->kp; 798 } 799 800 static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p); 801 802 /* 803 * Prepare an optimized_kprobe and optimize it 804 * NOTE: p must be a normal registered kprobe 805 */ 806 static void try_to_optimize_kprobe(struct kprobe *p) 807 { 808 struct kprobe *ap; 809 struct optimized_kprobe *op; 810 811 /* Impossible to optimize ftrace-based kprobe */ 812 if (kprobe_ftrace(p)) 813 return; 814 815 /* For preparing optimization, jump_label_text_reserved() is called */ 816 cpus_read_lock(); 817 jump_label_lock(); 818 mutex_lock(&text_mutex); 819 820 ap = alloc_aggr_kprobe(p); 821 if (!ap) 822 goto out; 823 824 op = container_of(ap, struct optimized_kprobe, kp); 825 if (!arch_prepared_optinsn(&op->optinsn)) { 826 /* If failed to setup optimizing, fallback to kprobe */ 827 arch_remove_optimized_kprobe(op); 828 kfree(op); 829 goto out; 830 } 831 832 init_aggr_kprobe(ap, p); 833 optimize_kprobe(ap); /* This just kicks optimizer thread */ 834 835 out: 836 mutex_unlock(&text_mutex); 837 jump_label_unlock(); 838 cpus_read_unlock(); 839 } 840 841 #ifdef CONFIG_SYSCTL 842 static void optimize_all_kprobes(void) 843 { 844 struct hlist_head *head; 845 struct kprobe *p; 846 unsigned int i; 847 848 mutex_lock(&kprobe_mutex); 849 /* If optimization is already allowed, just return */ 850 if (kprobes_allow_optimization) 851 goto out; 852 853 cpus_read_lock(); 854 kprobes_allow_optimization = true; 855 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 856 head = &kprobe_table[i]; 857 hlist_for_each_entry(p, head, hlist) 858 if (!kprobe_disabled(p)) 859 optimize_kprobe(p); 860 } 861 cpus_read_unlock(); 862 printk(KERN_INFO "Kprobes globally optimized\n"); 863 out: 864 mutex_unlock(&kprobe_mutex); 865 } 866 867 static void unoptimize_all_kprobes(void) 868 { 869 struct hlist_head *head; 870 struct kprobe *p; 871 unsigned int i; 872 873 mutex_lock(&kprobe_mutex); 874 /* If optimization is already prohibited, just return */ 875 if (!kprobes_allow_optimization) { 876 mutex_unlock(&kprobe_mutex); 877 return; 878 } 879 880 cpus_read_lock(); 881 kprobes_allow_optimization = false; 882 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 883 head = &kprobe_table[i]; 884 hlist_for_each_entry(p, head, hlist) { 885 if (!kprobe_disabled(p)) 886 unoptimize_kprobe(p, false); 887 } 888 } 889 cpus_read_unlock(); 890 mutex_unlock(&kprobe_mutex); 891 892 /* Wait for unoptimizing completion */ 893 wait_for_kprobe_optimizer(); 894 printk(KERN_INFO "Kprobes globally unoptimized\n"); 895 } 896 897 static DEFINE_MUTEX(kprobe_sysctl_mutex); 898 int sysctl_kprobes_optimization; 899 int proc_kprobes_optimization_handler(struct ctl_table *table, int write, 900 void *buffer, size_t *length, 901 loff_t *ppos) 902 { 903 int ret; 904 905 mutex_lock(&kprobe_sysctl_mutex); 906 sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0; 907 ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 908 909 if (sysctl_kprobes_optimization) 910 optimize_all_kprobes(); 911 else 912 unoptimize_all_kprobes(); 913 mutex_unlock(&kprobe_sysctl_mutex); 914 915 return ret; 916 } 917 #endif /* CONFIG_SYSCTL */ 918 919 /* Put a breakpoint for a probe. Must be called with text_mutex locked */ 920 static void __arm_kprobe(struct kprobe *p) 921 { 922 struct kprobe *_p; 923 924 /* Check collision with other optimized kprobes */ 925 _p = get_optimized_kprobe((unsigned long)p->addr); 926 if (unlikely(_p)) 927 /* Fallback to unoptimized kprobe */ 928 unoptimize_kprobe(_p, true); 929 930 arch_arm_kprobe(p); 931 optimize_kprobe(p); /* Try to optimize (add kprobe to a list) */ 932 } 933 934 /* Remove the breakpoint of a probe. Must be called with text_mutex locked */ 935 static void __disarm_kprobe(struct kprobe *p, bool reopt) 936 { 937 struct kprobe *_p; 938 939 /* Try to unoptimize */ 940 unoptimize_kprobe(p, kprobes_all_disarmed); 941 942 if (!kprobe_queued(p)) { 943 arch_disarm_kprobe(p); 944 /* If another kprobe was blocked, optimize it. */ 945 _p = get_optimized_kprobe((unsigned long)p->addr); 946 if (unlikely(_p) && reopt) 947 optimize_kprobe(_p); 948 } 949 /* TODO: reoptimize others after unoptimized this probe */ 950 } 951 952 #else /* !CONFIG_OPTPROBES */ 953 954 #define optimize_kprobe(p) do {} while (0) 955 #define unoptimize_kprobe(p, f) do {} while (0) 956 #define kill_optimized_kprobe(p) do {} while (0) 957 #define prepare_optimized_kprobe(p) do {} while (0) 958 #define try_to_optimize_kprobe(p) do {} while (0) 959 #define __arm_kprobe(p) arch_arm_kprobe(p) 960 #define __disarm_kprobe(p, o) arch_disarm_kprobe(p) 961 #define kprobe_disarmed(p) kprobe_disabled(p) 962 #define wait_for_kprobe_optimizer() do {} while (0) 963 964 static int reuse_unused_kprobe(struct kprobe *ap) 965 { 966 /* 967 * If the optimized kprobe is NOT supported, the aggr kprobe is 968 * released at the same time that the last aggregated kprobe is 969 * unregistered. 970 * Thus there should be no chance to reuse unused kprobe. 971 */ 972 printk(KERN_ERR "Error: There should be no unused kprobe here.\n"); 973 return -EINVAL; 974 } 975 976 static void free_aggr_kprobe(struct kprobe *p) 977 { 978 arch_remove_kprobe(p); 979 kfree(p); 980 } 981 982 static struct kprobe *alloc_aggr_kprobe(struct kprobe *p) 983 { 984 return kzalloc(sizeof(struct kprobe), GFP_KERNEL); 985 } 986 #endif /* CONFIG_OPTPROBES */ 987 988 #ifdef CONFIG_KPROBES_ON_FTRACE 989 static struct ftrace_ops kprobe_ftrace_ops __read_mostly = { 990 .func = kprobe_ftrace_handler, 991 .flags = FTRACE_OPS_FL_SAVE_REGS, 992 }; 993 994 static struct ftrace_ops kprobe_ipmodify_ops __read_mostly = { 995 .func = kprobe_ftrace_handler, 996 .flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_IPMODIFY, 997 }; 998 999 static int kprobe_ipmodify_enabled; 1000 static int kprobe_ftrace_enabled; 1001 1002 /* Must ensure p->addr is really on ftrace */ 1003 static int prepare_kprobe(struct kprobe *p) 1004 { 1005 if (!kprobe_ftrace(p)) 1006 return arch_prepare_kprobe(p); 1007 1008 return arch_prepare_kprobe_ftrace(p); 1009 } 1010 1011 /* Caller must lock kprobe_mutex */ 1012 static int __arm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops, 1013 int *cnt) 1014 { 1015 int ret = 0; 1016 1017 ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 0, 0); 1018 if (ret) { 1019 pr_debug("Failed to arm kprobe-ftrace at %pS (%d)\n", 1020 p->addr, ret); 1021 return ret; 1022 } 1023 1024 if (*cnt == 0) { 1025 ret = register_ftrace_function(ops); 1026 if (ret) { 1027 pr_debug("Failed to init kprobe-ftrace (%d)\n", ret); 1028 goto err_ftrace; 1029 } 1030 } 1031 1032 (*cnt)++; 1033 return ret; 1034 1035 err_ftrace: 1036 /* 1037 * At this point, sinec ops is not registered, we should be sefe from 1038 * registering empty filter. 1039 */ 1040 ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0); 1041 return ret; 1042 } 1043 1044 static int arm_kprobe_ftrace(struct kprobe *p) 1045 { 1046 bool ipmodify = (p->post_handler != NULL); 1047 1048 return __arm_kprobe_ftrace(p, 1049 ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops, 1050 ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled); 1051 } 1052 1053 /* Caller must lock kprobe_mutex */ 1054 static int __disarm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops, 1055 int *cnt) 1056 { 1057 int ret = 0; 1058 1059 if (*cnt == 1) { 1060 ret = unregister_ftrace_function(ops); 1061 if (WARN(ret < 0, "Failed to unregister kprobe-ftrace (%d)\n", ret)) 1062 return ret; 1063 } 1064 1065 (*cnt)--; 1066 1067 ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0); 1068 WARN_ONCE(ret < 0, "Failed to disarm kprobe-ftrace at %pS (%d)\n", 1069 p->addr, ret); 1070 return ret; 1071 } 1072 1073 static int disarm_kprobe_ftrace(struct kprobe *p) 1074 { 1075 bool ipmodify = (p->post_handler != NULL); 1076 1077 return __disarm_kprobe_ftrace(p, 1078 ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops, 1079 ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled); 1080 } 1081 #else /* !CONFIG_KPROBES_ON_FTRACE */ 1082 #define prepare_kprobe(p) arch_prepare_kprobe(p) 1083 #define arm_kprobe_ftrace(p) (-ENODEV) 1084 #define disarm_kprobe_ftrace(p) (-ENODEV) 1085 #endif 1086 1087 /* Arm a kprobe with text_mutex */ 1088 static int arm_kprobe(struct kprobe *kp) 1089 { 1090 if (unlikely(kprobe_ftrace(kp))) 1091 return arm_kprobe_ftrace(kp); 1092 1093 cpus_read_lock(); 1094 mutex_lock(&text_mutex); 1095 __arm_kprobe(kp); 1096 mutex_unlock(&text_mutex); 1097 cpus_read_unlock(); 1098 1099 return 0; 1100 } 1101 1102 /* Disarm a kprobe with text_mutex */ 1103 static int disarm_kprobe(struct kprobe *kp, bool reopt) 1104 { 1105 if (unlikely(kprobe_ftrace(kp))) 1106 return disarm_kprobe_ftrace(kp); 1107 1108 cpus_read_lock(); 1109 mutex_lock(&text_mutex); 1110 __disarm_kprobe(kp, reopt); 1111 mutex_unlock(&text_mutex); 1112 cpus_read_unlock(); 1113 1114 return 0; 1115 } 1116 1117 /* 1118 * Aggregate handlers for multiple kprobes support - these handlers 1119 * take care of invoking the individual kprobe handlers on p->list 1120 */ 1121 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) 1122 { 1123 struct kprobe *kp; 1124 1125 list_for_each_entry_rcu(kp, &p->list, list) { 1126 if (kp->pre_handler && likely(!kprobe_disabled(kp))) { 1127 set_kprobe_instance(kp); 1128 if (kp->pre_handler(kp, regs)) 1129 return 1; 1130 } 1131 reset_kprobe_instance(); 1132 } 1133 return 0; 1134 } 1135 NOKPROBE_SYMBOL(aggr_pre_handler); 1136 1137 static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs, 1138 unsigned long flags) 1139 { 1140 struct kprobe *kp; 1141 1142 list_for_each_entry_rcu(kp, &p->list, list) { 1143 if (kp->post_handler && likely(!kprobe_disabled(kp))) { 1144 set_kprobe_instance(kp); 1145 kp->post_handler(kp, regs, flags); 1146 reset_kprobe_instance(); 1147 } 1148 } 1149 } 1150 NOKPROBE_SYMBOL(aggr_post_handler); 1151 1152 static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, 1153 int trapnr) 1154 { 1155 struct kprobe *cur = __this_cpu_read(kprobe_instance); 1156 1157 /* 1158 * if we faulted "during" the execution of a user specified 1159 * probe handler, invoke just that probe's fault handler 1160 */ 1161 if (cur && cur->fault_handler) { 1162 if (cur->fault_handler(cur, regs, trapnr)) 1163 return 1; 1164 } 1165 return 0; 1166 } 1167 NOKPROBE_SYMBOL(aggr_fault_handler); 1168 1169 /* Walks the list and increments nmissed count for multiprobe case */ 1170 void kprobes_inc_nmissed_count(struct kprobe *p) 1171 { 1172 struct kprobe *kp; 1173 if (!kprobe_aggrprobe(p)) { 1174 p->nmissed++; 1175 } else { 1176 list_for_each_entry_rcu(kp, &p->list, list) 1177 kp->nmissed++; 1178 } 1179 return; 1180 } 1181 NOKPROBE_SYMBOL(kprobes_inc_nmissed_count); 1182 1183 void recycle_rp_inst(struct kretprobe_instance *ri, 1184 struct hlist_head *head) 1185 { 1186 struct kretprobe *rp = ri->rp; 1187 1188 /* remove rp inst off the rprobe_inst_table */ 1189 hlist_del(&ri->hlist); 1190 INIT_HLIST_NODE(&ri->hlist); 1191 if (likely(rp)) { 1192 raw_spin_lock(&rp->lock); 1193 hlist_add_head(&ri->hlist, &rp->free_instances); 1194 raw_spin_unlock(&rp->lock); 1195 } else 1196 /* Unregistering */ 1197 hlist_add_head(&ri->hlist, head); 1198 } 1199 NOKPROBE_SYMBOL(recycle_rp_inst); 1200 1201 void kretprobe_hash_lock(struct task_struct *tsk, 1202 struct hlist_head **head, unsigned long *flags) 1203 __acquires(hlist_lock) 1204 { 1205 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); 1206 raw_spinlock_t *hlist_lock; 1207 1208 *head = &kretprobe_inst_table[hash]; 1209 hlist_lock = kretprobe_table_lock_ptr(hash); 1210 raw_spin_lock_irqsave(hlist_lock, *flags); 1211 } 1212 NOKPROBE_SYMBOL(kretprobe_hash_lock); 1213 1214 static void kretprobe_table_lock(unsigned long hash, 1215 unsigned long *flags) 1216 __acquires(hlist_lock) 1217 { 1218 raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); 1219 raw_spin_lock_irqsave(hlist_lock, *flags); 1220 } 1221 NOKPROBE_SYMBOL(kretprobe_table_lock); 1222 1223 void kretprobe_hash_unlock(struct task_struct *tsk, 1224 unsigned long *flags) 1225 __releases(hlist_lock) 1226 { 1227 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); 1228 raw_spinlock_t *hlist_lock; 1229 1230 hlist_lock = kretprobe_table_lock_ptr(hash); 1231 raw_spin_unlock_irqrestore(hlist_lock, *flags); 1232 } 1233 NOKPROBE_SYMBOL(kretprobe_hash_unlock); 1234 1235 static void kretprobe_table_unlock(unsigned long hash, 1236 unsigned long *flags) 1237 __releases(hlist_lock) 1238 { 1239 raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); 1240 raw_spin_unlock_irqrestore(hlist_lock, *flags); 1241 } 1242 NOKPROBE_SYMBOL(kretprobe_table_unlock); 1243 1244 struct kprobe kprobe_busy = { 1245 .addr = (void *) get_kprobe, 1246 }; 1247 1248 void kprobe_busy_begin(void) 1249 { 1250 struct kprobe_ctlblk *kcb; 1251 1252 preempt_disable(); 1253 __this_cpu_write(current_kprobe, &kprobe_busy); 1254 kcb = get_kprobe_ctlblk(); 1255 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 1256 } 1257 1258 void kprobe_busy_end(void) 1259 { 1260 __this_cpu_write(current_kprobe, NULL); 1261 preempt_enable(); 1262 } 1263 1264 /* 1265 * This function is called from finish_task_switch when task tk becomes dead, 1266 * so that we can recycle any function-return probe instances associated 1267 * with this task. These left over instances represent probed functions 1268 * that have been called but will never return. 1269 */ 1270 void kprobe_flush_task(struct task_struct *tk) 1271 { 1272 struct kretprobe_instance *ri; 1273 struct hlist_head *head, empty_rp; 1274 struct hlist_node *tmp; 1275 unsigned long hash, flags = 0; 1276 1277 if (unlikely(!kprobes_initialized)) 1278 /* Early boot. kretprobe_table_locks not yet initialized. */ 1279 return; 1280 1281 kprobe_busy_begin(); 1282 1283 INIT_HLIST_HEAD(&empty_rp); 1284 hash = hash_ptr(tk, KPROBE_HASH_BITS); 1285 head = &kretprobe_inst_table[hash]; 1286 kretprobe_table_lock(hash, &flags); 1287 hlist_for_each_entry_safe(ri, tmp, head, hlist) { 1288 if (ri->task == tk) 1289 recycle_rp_inst(ri, &empty_rp); 1290 } 1291 kretprobe_table_unlock(hash, &flags); 1292 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { 1293 hlist_del(&ri->hlist); 1294 kfree(ri); 1295 } 1296 1297 kprobe_busy_end(); 1298 } 1299 NOKPROBE_SYMBOL(kprobe_flush_task); 1300 1301 static inline void free_rp_inst(struct kretprobe *rp) 1302 { 1303 struct kretprobe_instance *ri; 1304 struct hlist_node *next; 1305 1306 hlist_for_each_entry_safe(ri, next, &rp->free_instances, hlist) { 1307 hlist_del(&ri->hlist); 1308 kfree(ri); 1309 } 1310 } 1311 1312 static void cleanup_rp_inst(struct kretprobe *rp) 1313 { 1314 unsigned long flags, hash; 1315 struct kretprobe_instance *ri; 1316 struct hlist_node *next; 1317 struct hlist_head *head; 1318 1319 /* No race here */ 1320 for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) { 1321 kretprobe_table_lock(hash, &flags); 1322 head = &kretprobe_inst_table[hash]; 1323 hlist_for_each_entry_safe(ri, next, head, hlist) { 1324 if (ri->rp == rp) 1325 ri->rp = NULL; 1326 } 1327 kretprobe_table_unlock(hash, &flags); 1328 } 1329 free_rp_inst(rp); 1330 } 1331 NOKPROBE_SYMBOL(cleanup_rp_inst); 1332 1333 /* Add the new probe to ap->list */ 1334 static int add_new_kprobe(struct kprobe *ap, struct kprobe *p) 1335 { 1336 if (p->post_handler) 1337 unoptimize_kprobe(ap, true); /* Fall back to normal kprobe */ 1338 1339 list_add_rcu(&p->list, &ap->list); 1340 if (p->post_handler && !ap->post_handler) 1341 ap->post_handler = aggr_post_handler; 1342 1343 return 0; 1344 } 1345 1346 /* 1347 * Fill in the required fields of the "manager kprobe". Replace the 1348 * earlier kprobe in the hlist with the manager kprobe 1349 */ 1350 static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p) 1351 { 1352 /* Copy p's insn slot to ap */ 1353 copy_kprobe(p, ap); 1354 flush_insn_slot(ap); 1355 ap->addr = p->addr; 1356 ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED; 1357 ap->pre_handler = aggr_pre_handler; 1358 ap->fault_handler = aggr_fault_handler; 1359 /* We don't care the kprobe which has gone. */ 1360 if (p->post_handler && !kprobe_gone(p)) 1361 ap->post_handler = aggr_post_handler; 1362 1363 INIT_LIST_HEAD(&ap->list); 1364 INIT_HLIST_NODE(&ap->hlist); 1365 1366 list_add_rcu(&p->list, &ap->list); 1367 hlist_replace_rcu(&p->hlist, &ap->hlist); 1368 } 1369 1370 /* 1371 * This is the second or subsequent kprobe at the address - handle 1372 * the intricacies 1373 */ 1374 static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p) 1375 { 1376 int ret = 0; 1377 struct kprobe *ap = orig_p; 1378 1379 cpus_read_lock(); 1380 1381 /* For preparing optimization, jump_label_text_reserved() is called */ 1382 jump_label_lock(); 1383 mutex_lock(&text_mutex); 1384 1385 if (!kprobe_aggrprobe(orig_p)) { 1386 /* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */ 1387 ap = alloc_aggr_kprobe(orig_p); 1388 if (!ap) { 1389 ret = -ENOMEM; 1390 goto out; 1391 } 1392 init_aggr_kprobe(ap, orig_p); 1393 } else if (kprobe_unused(ap)) { 1394 /* This probe is going to die. Rescue it */ 1395 ret = reuse_unused_kprobe(ap); 1396 if (ret) 1397 goto out; 1398 } 1399 1400 if (kprobe_gone(ap)) { 1401 /* 1402 * Attempting to insert new probe at the same location that 1403 * had a probe in the module vaddr area which already 1404 * freed. So, the instruction slot has already been 1405 * released. We need a new slot for the new probe. 1406 */ 1407 ret = arch_prepare_kprobe(ap); 1408 if (ret) 1409 /* 1410 * Even if fail to allocate new slot, don't need to 1411 * free aggr_probe. It will be used next time, or 1412 * freed by unregister_kprobe. 1413 */ 1414 goto out; 1415 1416 /* Prepare optimized instructions if possible. */ 1417 prepare_optimized_kprobe(ap); 1418 1419 /* 1420 * Clear gone flag to prevent allocating new slot again, and 1421 * set disabled flag because it is not armed yet. 1422 */ 1423 ap->flags = (ap->flags & ~KPROBE_FLAG_GONE) 1424 | KPROBE_FLAG_DISABLED; 1425 } 1426 1427 /* Copy ap's insn slot to p */ 1428 copy_kprobe(ap, p); 1429 ret = add_new_kprobe(ap, p); 1430 1431 out: 1432 mutex_unlock(&text_mutex); 1433 jump_label_unlock(); 1434 cpus_read_unlock(); 1435 1436 if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) { 1437 ap->flags &= ~KPROBE_FLAG_DISABLED; 1438 if (!kprobes_all_disarmed) { 1439 /* Arm the breakpoint again. */ 1440 ret = arm_kprobe(ap); 1441 if (ret) { 1442 ap->flags |= KPROBE_FLAG_DISABLED; 1443 list_del_rcu(&p->list); 1444 synchronize_rcu(); 1445 } 1446 } 1447 } 1448 return ret; 1449 } 1450 1451 bool __weak arch_within_kprobe_blacklist(unsigned long addr) 1452 { 1453 /* The __kprobes marked functions and entry code must not be probed */ 1454 return addr >= (unsigned long)__kprobes_text_start && 1455 addr < (unsigned long)__kprobes_text_end; 1456 } 1457 1458 static bool __within_kprobe_blacklist(unsigned long addr) 1459 { 1460 struct kprobe_blacklist_entry *ent; 1461 1462 if (arch_within_kprobe_blacklist(addr)) 1463 return true; 1464 /* 1465 * If there exists a kprobe_blacklist, verify and 1466 * fail any probe registration in the prohibited area 1467 */ 1468 list_for_each_entry(ent, &kprobe_blacklist, list) { 1469 if (addr >= ent->start_addr && addr < ent->end_addr) 1470 return true; 1471 } 1472 return false; 1473 } 1474 1475 bool within_kprobe_blacklist(unsigned long addr) 1476 { 1477 char symname[KSYM_NAME_LEN], *p; 1478 1479 if (__within_kprobe_blacklist(addr)) 1480 return true; 1481 1482 /* Check if the address is on a suffixed-symbol */ 1483 if (!lookup_symbol_name(addr, symname)) { 1484 p = strchr(symname, '.'); 1485 if (!p) 1486 return false; 1487 *p = '\0'; 1488 addr = (unsigned long)kprobe_lookup_name(symname, 0); 1489 if (addr) 1490 return __within_kprobe_blacklist(addr); 1491 } 1492 return false; 1493 } 1494 1495 /* 1496 * If we have a symbol_name argument, look it up and add the offset field 1497 * to it. This way, we can specify a relative address to a symbol. 1498 * This returns encoded errors if it fails to look up symbol or invalid 1499 * combination of parameters. 1500 */ 1501 static kprobe_opcode_t *_kprobe_addr(kprobe_opcode_t *addr, 1502 const char *symbol_name, unsigned int offset) 1503 { 1504 if ((symbol_name && addr) || (!symbol_name && !addr)) 1505 goto invalid; 1506 1507 if (symbol_name) { 1508 addr = kprobe_lookup_name(symbol_name, offset); 1509 if (!addr) 1510 return ERR_PTR(-ENOENT); 1511 } 1512 1513 addr = (kprobe_opcode_t *)(((char *)addr) + offset); 1514 if (addr) 1515 return addr; 1516 1517 invalid: 1518 return ERR_PTR(-EINVAL); 1519 } 1520 1521 static kprobe_opcode_t *kprobe_addr(struct kprobe *p) 1522 { 1523 return _kprobe_addr(p->addr, p->symbol_name, p->offset); 1524 } 1525 1526 /* Check passed kprobe is valid and return kprobe in kprobe_table. */ 1527 static struct kprobe *__get_valid_kprobe(struct kprobe *p) 1528 { 1529 struct kprobe *ap, *list_p; 1530 1531 lockdep_assert_held(&kprobe_mutex); 1532 1533 ap = get_kprobe(p->addr); 1534 if (unlikely(!ap)) 1535 return NULL; 1536 1537 if (p != ap) { 1538 list_for_each_entry(list_p, &ap->list, list) 1539 if (list_p == p) 1540 /* kprobe p is a valid probe */ 1541 goto valid; 1542 return NULL; 1543 } 1544 valid: 1545 return ap; 1546 } 1547 1548 /* Return error if the kprobe is being re-registered */ 1549 static inline int check_kprobe_rereg(struct kprobe *p) 1550 { 1551 int ret = 0; 1552 1553 mutex_lock(&kprobe_mutex); 1554 if (__get_valid_kprobe(p)) 1555 ret = -EINVAL; 1556 mutex_unlock(&kprobe_mutex); 1557 1558 return ret; 1559 } 1560 1561 int __weak arch_check_ftrace_location(struct kprobe *p) 1562 { 1563 unsigned long ftrace_addr; 1564 1565 ftrace_addr = ftrace_location((unsigned long)p->addr); 1566 if (ftrace_addr) { 1567 #ifdef CONFIG_KPROBES_ON_FTRACE 1568 /* Given address is not on the instruction boundary */ 1569 if ((unsigned long)p->addr != ftrace_addr) 1570 return -EILSEQ; 1571 p->flags |= KPROBE_FLAG_FTRACE; 1572 #else /* !CONFIG_KPROBES_ON_FTRACE */ 1573 return -EINVAL; 1574 #endif 1575 } 1576 return 0; 1577 } 1578 1579 static int check_kprobe_address_safe(struct kprobe *p, 1580 struct module **probed_mod) 1581 { 1582 int ret; 1583 1584 ret = arch_check_ftrace_location(p); 1585 if (ret) 1586 return ret; 1587 jump_label_lock(); 1588 preempt_disable(); 1589 1590 /* Ensure it is not in reserved area nor out of text */ 1591 if (!kernel_text_address((unsigned long) p->addr) || 1592 within_kprobe_blacklist((unsigned long) p->addr) || 1593 jump_label_text_reserved(p->addr, p->addr) || 1594 find_bug((unsigned long)p->addr)) { 1595 ret = -EINVAL; 1596 goto out; 1597 } 1598 1599 /* Check if are we probing a module */ 1600 *probed_mod = __module_text_address((unsigned long) p->addr); 1601 if (*probed_mod) { 1602 /* 1603 * We must hold a refcount of the probed module while updating 1604 * its code to prohibit unexpected unloading. 1605 */ 1606 if (unlikely(!try_module_get(*probed_mod))) { 1607 ret = -ENOENT; 1608 goto out; 1609 } 1610 1611 /* 1612 * If the module freed .init.text, we couldn't insert 1613 * kprobes in there. 1614 */ 1615 if (within_module_init((unsigned long)p->addr, *probed_mod) && 1616 (*probed_mod)->state != MODULE_STATE_COMING) { 1617 module_put(*probed_mod); 1618 *probed_mod = NULL; 1619 ret = -ENOENT; 1620 } 1621 } 1622 out: 1623 preempt_enable(); 1624 jump_label_unlock(); 1625 1626 return ret; 1627 } 1628 1629 int register_kprobe(struct kprobe *p) 1630 { 1631 int ret; 1632 struct kprobe *old_p; 1633 struct module *probed_mod; 1634 kprobe_opcode_t *addr; 1635 1636 /* Adjust probe address from symbol */ 1637 addr = kprobe_addr(p); 1638 if (IS_ERR(addr)) 1639 return PTR_ERR(addr); 1640 p->addr = addr; 1641 1642 ret = check_kprobe_rereg(p); 1643 if (ret) 1644 return ret; 1645 1646 /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */ 1647 p->flags &= KPROBE_FLAG_DISABLED; 1648 p->nmissed = 0; 1649 INIT_LIST_HEAD(&p->list); 1650 1651 ret = check_kprobe_address_safe(p, &probed_mod); 1652 if (ret) 1653 return ret; 1654 1655 mutex_lock(&kprobe_mutex); 1656 1657 old_p = get_kprobe(p->addr); 1658 if (old_p) { 1659 /* Since this may unoptimize old_p, locking text_mutex. */ 1660 ret = register_aggr_kprobe(old_p, p); 1661 goto out; 1662 } 1663 1664 cpus_read_lock(); 1665 /* Prevent text modification */ 1666 mutex_lock(&text_mutex); 1667 ret = prepare_kprobe(p); 1668 mutex_unlock(&text_mutex); 1669 cpus_read_unlock(); 1670 if (ret) 1671 goto out; 1672 1673 INIT_HLIST_NODE(&p->hlist); 1674 hlist_add_head_rcu(&p->hlist, 1675 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); 1676 1677 if (!kprobes_all_disarmed && !kprobe_disabled(p)) { 1678 ret = arm_kprobe(p); 1679 if (ret) { 1680 hlist_del_rcu(&p->hlist); 1681 synchronize_rcu(); 1682 goto out; 1683 } 1684 } 1685 1686 /* Try to optimize kprobe */ 1687 try_to_optimize_kprobe(p); 1688 out: 1689 mutex_unlock(&kprobe_mutex); 1690 1691 if (probed_mod) 1692 module_put(probed_mod); 1693 1694 return ret; 1695 } 1696 EXPORT_SYMBOL_GPL(register_kprobe); 1697 1698 /* Check if all probes on the aggrprobe are disabled */ 1699 static int aggr_kprobe_disabled(struct kprobe *ap) 1700 { 1701 struct kprobe *kp; 1702 1703 lockdep_assert_held(&kprobe_mutex); 1704 1705 list_for_each_entry(kp, &ap->list, list) 1706 if (!kprobe_disabled(kp)) 1707 /* 1708 * There is an active probe on the list. 1709 * We can't disable this ap. 1710 */ 1711 return 0; 1712 1713 return 1; 1714 } 1715 1716 /* Disable one kprobe: Make sure called under kprobe_mutex is locked */ 1717 static struct kprobe *__disable_kprobe(struct kprobe *p) 1718 { 1719 struct kprobe *orig_p; 1720 int ret; 1721 1722 /* Get an original kprobe for return */ 1723 orig_p = __get_valid_kprobe(p); 1724 if (unlikely(orig_p == NULL)) 1725 return ERR_PTR(-EINVAL); 1726 1727 if (!kprobe_disabled(p)) { 1728 /* Disable probe if it is a child probe */ 1729 if (p != orig_p) 1730 p->flags |= KPROBE_FLAG_DISABLED; 1731 1732 /* Try to disarm and disable this/parent probe */ 1733 if (p == orig_p || aggr_kprobe_disabled(orig_p)) { 1734 /* 1735 * If kprobes_all_disarmed is set, orig_p 1736 * should have already been disarmed, so 1737 * skip unneed disarming process. 1738 */ 1739 if (!kprobes_all_disarmed) { 1740 ret = disarm_kprobe(orig_p, true); 1741 if (ret) { 1742 p->flags &= ~KPROBE_FLAG_DISABLED; 1743 return ERR_PTR(ret); 1744 } 1745 } 1746 orig_p->flags |= KPROBE_FLAG_DISABLED; 1747 } 1748 } 1749 1750 return orig_p; 1751 } 1752 1753 /* 1754 * Unregister a kprobe without a scheduler synchronization. 1755 */ 1756 static int __unregister_kprobe_top(struct kprobe *p) 1757 { 1758 struct kprobe *ap, *list_p; 1759 1760 /* Disable kprobe. This will disarm it if needed. */ 1761 ap = __disable_kprobe(p); 1762 if (IS_ERR(ap)) 1763 return PTR_ERR(ap); 1764 1765 if (ap == p) 1766 /* 1767 * This probe is an independent(and non-optimized) kprobe 1768 * (not an aggrprobe). Remove from the hash list. 1769 */ 1770 goto disarmed; 1771 1772 /* Following process expects this probe is an aggrprobe */ 1773 WARN_ON(!kprobe_aggrprobe(ap)); 1774 1775 if (list_is_singular(&ap->list) && kprobe_disarmed(ap)) 1776 /* 1777 * !disarmed could be happen if the probe is under delayed 1778 * unoptimizing. 1779 */ 1780 goto disarmed; 1781 else { 1782 /* If disabling probe has special handlers, update aggrprobe */ 1783 if (p->post_handler && !kprobe_gone(p)) { 1784 list_for_each_entry(list_p, &ap->list, list) { 1785 if ((list_p != p) && (list_p->post_handler)) 1786 goto noclean; 1787 } 1788 ap->post_handler = NULL; 1789 } 1790 noclean: 1791 /* 1792 * Remove from the aggrprobe: this path will do nothing in 1793 * __unregister_kprobe_bottom(). 1794 */ 1795 list_del_rcu(&p->list); 1796 if (!kprobe_disabled(ap) && !kprobes_all_disarmed) 1797 /* 1798 * Try to optimize this probe again, because post 1799 * handler may have been changed. 1800 */ 1801 optimize_kprobe(ap); 1802 } 1803 return 0; 1804 1805 disarmed: 1806 hlist_del_rcu(&ap->hlist); 1807 return 0; 1808 } 1809 1810 static void __unregister_kprobe_bottom(struct kprobe *p) 1811 { 1812 struct kprobe *ap; 1813 1814 if (list_empty(&p->list)) 1815 /* This is an independent kprobe */ 1816 arch_remove_kprobe(p); 1817 else if (list_is_singular(&p->list)) { 1818 /* This is the last child of an aggrprobe */ 1819 ap = list_entry(p->list.next, struct kprobe, list); 1820 list_del(&p->list); 1821 free_aggr_kprobe(ap); 1822 } 1823 /* Otherwise, do nothing. */ 1824 } 1825 1826 int register_kprobes(struct kprobe **kps, int num) 1827 { 1828 int i, ret = 0; 1829 1830 if (num <= 0) 1831 return -EINVAL; 1832 for (i = 0; i < num; i++) { 1833 ret = register_kprobe(kps[i]); 1834 if (ret < 0) { 1835 if (i > 0) 1836 unregister_kprobes(kps, i); 1837 break; 1838 } 1839 } 1840 return ret; 1841 } 1842 EXPORT_SYMBOL_GPL(register_kprobes); 1843 1844 void unregister_kprobe(struct kprobe *p) 1845 { 1846 unregister_kprobes(&p, 1); 1847 } 1848 EXPORT_SYMBOL_GPL(unregister_kprobe); 1849 1850 void unregister_kprobes(struct kprobe **kps, int num) 1851 { 1852 int i; 1853 1854 if (num <= 0) 1855 return; 1856 mutex_lock(&kprobe_mutex); 1857 for (i = 0; i < num; i++) 1858 if (__unregister_kprobe_top(kps[i]) < 0) 1859 kps[i]->addr = NULL; 1860 mutex_unlock(&kprobe_mutex); 1861 1862 synchronize_rcu(); 1863 for (i = 0; i < num; i++) 1864 if (kps[i]->addr) 1865 __unregister_kprobe_bottom(kps[i]); 1866 } 1867 EXPORT_SYMBOL_GPL(unregister_kprobes); 1868 1869 int __weak kprobe_exceptions_notify(struct notifier_block *self, 1870 unsigned long val, void *data) 1871 { 1872 return NOTIFY_DONE; 1873 } 1874 NOKPROBE_SYMBOL(kprobe_exceptions_notify); 1875 1876 static struct notifier_block kprobe_exceptions_nb = { 1877 .notifier_call = kprobe_exceptions_notify, 1878 .priority = 0x7fffffff /* we need to be notified first */ 1879 }; 1880 1881 unsigned long __weak arch_deref_entry_point(void *entry) 1882 { 1883 return (unsigned long)entry; 1884 } 1885 1886 #ifdef CONFIG_KRETPROBES 1887 /* 1888 * This kprobe pre_handler is registered with every kretprobe. When probe 1889 * hits it will set up the return probe. 1890 */ 1891 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs) 1892 { 1893 struct kretprobe *rp = container_of(p, struct kretprobe, kp); 1894 unsigned long hash, flags = 0; 1895 struct kretprobe_instance *ri; 1896 1897 /* 1898 * To avoid deadlocks, prohibit return probing in NMI contexts, 1899 * just skip the probe and increase the (inexact) 'nmissed' 1900 * statistical counter, so that the user is informed that 1901 * something happened: 1902 */ 1903 if (unlikely(in_nmi())) { 1904 rp->nmissed++; 1905 return 0; 1906 } 1907 1908 /* TODO: consider to only swap the RA after the last pre_handler fired */ 1909 hash = hash_ptr(current, KPROBE_HASH_BITS); 1910 raw_spin_lock_irqsave(&rp->lock, flags); 1911 if (!hlist_empty(&rp->free_instances)) { 1912 ri = hlist_entry(rp->free_instances.first, 1913 struct kretprobe_instance, hlist); 1914 hlist_del(&ri->hlist); 1915 raw_spin_unlock_irqrestore(&rp->lock, flags); 1916 1917 ri->rp = rp; 1918 ri->task = current; 1919 1920 if (rp->entry_handler && rp->entry_handler(ri, regs)) { 1921 raw_spin_lock_irqsave(&rp->lock, flags); 1922 hlist_add_head(&ri->hlist, &rp->free_instances); 1923 raw_spin_unlock_irqrestore(&rp->lock, flags); 1924 return 0; 1925 } 1926 1927 arch_prepare_kretprobe(ri, regs); 1928 1929 /* XXX(hch): why is there no hlist_move_head? */ 1930 INIT_HLIST_NODE(&ri->hlist); 1931 kretprobe_table_lock(hash, &flags); 1932 hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]); 1933 kretprobe_table_unlock(hash, &flags); 1934 } else { 1935 rp->nmissed++; 1936 raw_spin_unlock_irqrestore(&rp->lock, flags); 1937 } 1938 return 0; 1939 } 1940 NOKPROBE_SYMBOL(pre_handler_kretprobe); 1941 1942 bool __weak arch_kprobe_on_func_entry(unsigned long offset) 1943 { 1944 return !offset; 1945 } 1946 1947 bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset) 1948 { 1949 kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset); 1950 1951 if (IS_ERR(kp_addr)) 1952 return false; 1953 1954 if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset) || 1955 !arch_kprobe_on_func_entry(offset)) 1956 return false; 1957 1958 return true; 1959 } 1960 1961 int register_kretprobe(struct kretprobe *rp) 1962 { 1963 int ret = 0; 1964 struct kretprobe_instance *inst; 1965 int i; 1966 void *addr; 1967 1968 if (!kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset)) 1969 return -EINVAL; 1970 1971 if (kretprobe_blacklist_size) { 1972 addr = kprobe_addr(&rp->kp); 1973 if (IS_ERR(addr)) 1974 return PTR_ERR(addr); 1975 1976 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) { 1977 if (kretprobe_blacklist[i].addr == addr) 1978 return -EINVAL; 1979 } 1980 } 1981 1982 rp->kp.pre_handler = pre_handler_kretprobe; 1983 rp->kp.post_handler = NULL; 1984 rp->kp.fault_handler = NULL; 1985 1986 /* Pre-allocate memory for max kretprobe instances */ 1987 if (rp->maxactive <= 0) { 1988 #ifdef CONFIG_PREEMPTION 1989 rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus()); 1990 #else 1991 rp->maxactive = num_possible_cpus(); 1992 #endif 1993 } 1994 raw_spin_lock_init(&rp->lock); 1995 INIT_HLIST_HEAD(&rp->free_instances); 1996 for (i = 0; i < rp->maxactive; i++) { 1997 inst = kmalloc(sizeof(struct kretprobe_instance) + 1998 rp->data_size, GFP_KERNEL); 1999 if (inst == NULL) { 2000 free_rp_inst(rp); 2001 return -ENOMEM; 2002 } 2003 INIT_HLIST_NODE(&inst->hlist); 2004 hlist_add_head(&inst->hlist, &rp->free_instances); 2005 } 2006 2007 rp->nmissed = 0; 2008 /* Establish function entry probe point */ 2009 ret = register_kprobe(&rp->kp); 2010 if (ret != 0) 2011 free_rp_inst(rp); 2012 return ret; 2013 } 2014 EXPORT_SYMBOL_GPL(register_kretprobe); 2015 2016 int register_kretprobes(struct kretprobe **rps, int num) 2017 { 2018 int ret = 0, i; 2019 2020 if (num <= 0) 2021 return -EINVAL; 2022 for (i = 0; i < num; i++) { 2023 ret = register_kretprobe(rps[i]); 2024 if (ret < 0) { 2025 if (i > 0) 2026 unregister_kretprobes(rps, i); 2027 break; 2028 } 2029 } 2030 return ret; 2031 } 2032 EXPORT_SYMBOL_GPL(register_kretprobes); 2033 2034 void unregister_kretprobe(struct kretprobe *rp) 2035 { 2036 unregister_kretprobes(&rp, 1); 2037 } 2038 EXPORT_SYMBOL_GPL(unregister_kretprobe); 2039 2040 void unregister_kretprobes(struct kretprobe **rps, int num) 2041 { 2042 int i; 2043 2044 if (num <= 0) 2045 return; 2046 mutex_lock(&kprobe_mutex); 2047 for (i = 0; i < num; i++) 2048 if (__unregister_kprobe_top(&rps[i]->kp) < 0) 2049 rps[i]->kp.addr = NULL; 2050 mutex_unlock(&kprobe_mutex); 2051 2052 synchronize_rcu(); 2053 for (i = 0; i < num; i++) { 2054 if (rps[i]->kp.addr) { 2055 __unregister_kprobe_bottom(&rps[i]->kp); 2056 cleanup_rp_inst(rps[i]); 2057 } 2058 } 2059 } 2060 EXPORT_SYMBOL_GPL(unregister_kretprobes); 2061 2062 #else /* CONFIG_KRETPROBES */ 2063 int register_kretprobe(struct kretprobe *rp) 2064 { 2065 return -ENOSYS; 2066 } 2067 EXPORT_SYMBOL_GPL(register_kretprobe); 2068 2069 int register_kretprobes(struct kretprobe **rps, int num) 2070 { 2071 return -ENOSYS; 2072 } 2073 EXPORT_SYMBOL_GPL(register_kretprobes); 2074 2075 void unregister_kretprobe(struct kretprobe *rp) 2076 { 2077 } 2078 EXPORT_SYMBOL_GPL(unregister_kretprobe); 2079 2080 void unregister_kretprobes(struct kretprobe **rps, int num) 2081 { 2082 } 2083 EXPORT_SYMBOL_GPL(unregister_kretprobes); 2084 2085 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs) 2086 { 2087 return 0; 2088 } 2089 NOKPROBE_SYMBOL(pre_handler_kretprobe); 2090 2091 #endif /* CONFIG_KRETPROBES */ 2092 2093 /* Set the kprobe gone and remove its instruction buffer. */ 2094 static void kill_kprobe(struct kprobe *p) 2095 { 2096 struct kprobe *kp; 2097 2098 lockdep_assert_held(&kprobe_mutex); 2099 2100 p->flags |= KPROBE_FLAG_GONE; 2101 if (kprobe_aggrprobe(p)) { 2102 /* 2103 * If this is an aggr_kprobe, we have to list all the 2104 * chained probes and mark them GONE. 2105 */ 2106 list_for_each_entry(kp, &p->list, list) 2107 kp->flags |= KPROBE_FLAG_GONE; 2108 p->post_handler = NULL; 2109 kill_optimized_kprobe(p); 2110 } 2111 /* 2112 * Here, we can remove insn_slot safely, because no thread calls 2113 * the original probed function (which will be freed soon) any more. 2114 */ 2115 arch_remove_kprobe(p); 2116 } 2117 2118 /* Disable one kprobe */ 2119 int disable_kprobe(struct kprobe *kp) 2120 { 2121 int ret = 0; 2122 struct kprobe *p; 2123 2124 mutex_lock(&kprobe_mutex); 2125 2126 /* Disable this kprobe */ 2127 p = __disable_kprobe(kp); 2128 if (IS_ERR(p)) 2129 ret = PTR_ERR(p); 2130 2131 mutex_unlock(&kprobe_mutex); 2132 return ret; 2133 } 2134 EXPORT_SYMBOL_GPL(disable_kprobe); 2135 2136 /* Enable one kprobe */ 2137 int enable_kprobe(struct kprobe *kp) 2138 { 2139 int ret = 0; 2140 struct kprobe *p; 2141 2142 mutex_lock(&kprobe_mutex); 2143 2144 /* Check whether specified probe is valid. */ 2145 p = __get_valid_kprobe(kp); 2146 if (unlikely(p == NULL)) { 2147 ret = -EINVAL; 2148 goto out; 2149 } 2150 2151 if (kprobe_gone(kp)) { 2152 /* This kprobe has gone, we couldn't enable it. */ 2153 ret = -EINVAL; 2154 goto out; 2155 } 2156 2157 if (p != kp) 2158 kp->flags &= ~KPROBE_FLAG_DISABLED; 2159 2160 if (!kprobes_all_disarmed && kprobe_disabled(p)) { 2161 p->flags &= ~KPROBE_FLAG_DISABLED; 2162 ret = arm_kprobe(p); 2163 if (ret) 2164 p->flags |= KPROBE_FLAG_DISABLED; 2165 } 2166 out: 2167 mutex_unlock(&kprobe_mutex); 2168 return ret; 2169 } 2170 EXPORT_SYMBOL_GPL(enable_kprobe); 2171 2172 /* Caller must NOT call this in usual path. This is only for critical case */ 2173 void dump_kprobe(struct kprobe *kp) 2174 { 2175 pr_err("Dumping kprobe:\n"); 2176 pr_err("Name: %s\nOffset: %x\nAddress: %pS\n", 2177 kp->symbol_name, kp->offset, kp->addr); 2178 } 2179 NOKPROBE_SYMBOL(dump_kprobe); 2180 2181 int kprobe_add_ksym_blacklist(unsigned long entry) 2182 { 2183 struct kprobe_blacklist_entry *ent; 2184 unsigned long offset = 0, size = 0; 2185 2186 if (!kernel_text_address(entry) || 2187 !kallsyms_lookup_size_offset(entry, &size, &offset)) 2188 return -EINVAL; 2189 2190 ent = kmalloc(sizeof(*ent), GFP_KERNEL); 2191 if (!ent) 2192 return -ENOMEM; 2193 ent->start_addr = entry; 2194 ent->end_addr = entry + size; 2195 INIT_LIST_HEAD(&ent->list); 2196 list_add_tail(&ent->list, &kprobe_blacklist); 2197 2198 return (int)size; 2199 } 2200 2201 /* Add all symbols in given area into kprobe blacklist */ 2202 int kprobe_add_area_blacklist(unsigned long start, unsigned long end) 2203 { 2204 unsigned long entry; 2205 int ret = 0; 2206 2207 for (entry = start; entry < end; entry += ret) { 2208 ret = kprobe_add_ksym_blacklist(entry); 2209 if (ret < 0) 2210 return ret; 2211 if (ret == 0) /* In case of alias symbol */ 2212 ret = 1; 2213 } 2214 return 0; 2215 } 2216 2217 /* Remove all symbols in given area from kprobe blacklist */ 2218 static void kprobe_remove_area_blacklist(unsigned long start, unsigned long end) 2219 { 2220 struct kprobe_blacklist_entry *ent, *n; 2221 2222 list_for_each_entry_safe(ent, n, &kprobe_blacklist, list) { 2223 if (ent->start_addr < start || ent->start_addr >= end) 2224 continue; 2225 list_del(&ent->list); 2226 kfree(ent); 2227 } 2228 } 2229 2230 static void kprobe_remove_ksym_blacklist(unsigned long entry) 2231 { 2232 kprobe_remove_area_blacklist(entry, entry + 1); 2233 } 2234 2235 int __init __weak arch_populate_kprobe_blacklist(void) 2236 { 2237 return 0; 2238 } 2239 2240 /* 2241 * Lookup and populate the kprobe_blacklist. 2242 * 2243 * Unlike the kretprobe blacklist, we'll need to determine 2244 * the range of addresses that belong to the said functions, 2245 * since a kprobe need not necessarily be at the beginning 2246 * of a function. 2247 */ 2248 static int __init populate_kprobe_blacklist(unsigned long *start, 2249 unsigned long *end) 2250 { 2251 unsigned long entry; 2252 unsigned long *iter; 2253 int ret; 2254 2255 for (iter = start; iter < end; iter++) { 2256 entry = arch_deref_entry_point((void *)*iter); 2257 ret = kprobe_add_ksym_blacklist(entry); 2258 if (ret == -EINVAL) 2259 continue; 2260 if (ret < 0) 2261 return ret; 2262 } 2263 2264 /* Symbols in __kprobes_text are blacklisted */ 2265 ret = kprobe_add_area_blacklist((unsigned long)__kprobes_text_start, 2266 (unsigned long)__kprobes_text_end); 2267 if (ret) 2268 return ret; 2269 2270 /* Symbols in noinstr section are blacklisted */ 2271 ret = kprobe_add_area_blacklist((unsigned long)__noinstr_text_start, 2272 (unsigned long)__noinstr_text_end); 2273 2274 return ret ? : arch_populate_kprobe_blacklist(); 2275 } 2276 2277 static void add_module_kprobe_blacklist(struct module *mod) 2278 { 2279 unsigned long start, end; 2280 int i; 2281 2282 if (mod->kprobe_blacklist) { 2283 for (i = 0; i < mod->num_kprobe_blacklist; i++) 2284 kprobe_add_ksym_blacklist(mod->kprobe_blacklist[i]); 2285 } 2286 2287 start = (unsigned long)mod->kprobes_text_start; 2288 if (start) { 2289 end = start + mod->kprobes_text_size; 2290 kprobe_add_area_blacklist(start, end); 2291 } 2292 2293 start = (unsigned long)mod->noinstr_text_start; 2294 if (start) { 2295 end = start + mod->noinstr_text_size; 2296 kprobe_add_area_blacklist(start, end); 2297 } 2298 } 2299 2300 static void remove_module_kprobe_blacklist(struct module *mod) 2301 { 2302 unsigned long start, end; 2303 int i; 2304 2305 if (mod->kprobe_blacklist) { 2306 for (i = 0; i < mod->num_kprobe_blacklist; i++) 2307 kprobe_remove_ksym_blacklist(mod->kprobe_blacklist[i]); 2308 } 2309 2310 start = (unsigned long)mod->kprobes_text_start; 2311 if (start) { 2312 end = start + mod->kprobes_text_size; 2313 kprobe_remove_area_blacklist(start, end); 2314 } 2315 2316 start = (unsigned long)mod->noinstr_text_start; 2317 if (start) { 2318 end = start + mod->noinstr_text_size; 2319 kprobe_remove_area_blacklist(start, end); 2320 } 2321 } 2322 2323 /* Module notifier call back, checking kprobes on the module */ 2324 static int kprobes_module_callback(struct notifier_block *nb, 2325 unsigned long val, void *data) 2326 { 2327 struct module *mod = data; 2328 struct hlist_head *head; 2329 struct kprobe *p; 2330 unsigned int i; 2331 int checkcore = (val == MODULE_STATE_GOING); 2332 2333 if (val == MODULE_STATE_COMING) { 2334 mutex_lock(&kprobe_mutex); 2335 add_module_kprobe_blacklist(mod); 2336 mutex_unlock(&kprobe_mutex); 2337 } 2338 if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE) 2339 return NOTIFY_DONE; 2340 2341 /* 2342 * When MODULE_STATE_GOING was notified, both of module .text and 2343 * .init.text sections would be freed. When MODULE_STATE_LIVE was 2344 * notified, only .init.text section would be freed. We need to 2345 * disable kprobes which have been inserted in the sections. 2346 */ 2347 mutex_lock(&kprobe_mutex); 2348 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2349 head = &kprobe_table[i]; 2350 hlist_for_each_entry(p, head, hlist) 2351 if (within_module_init((unsigned long)p->addr, mod) || 2352 (checkcore && 2353 within_module_core((unsigned long)p->addr, mod))) { 2354 /* 2355 * The vaddr this probe is installed will soon 2356 * be vfreed buy not synced to disk. Hence, 2357 * disarming the breakpoint isn't needed. 2358 * 2359 * Note, this will also move any optimized probes 2360 * that are pending to be removed from their 2361 * corresponding lists to the freeing_list and 2362 * will not be touched by the delayed 2363 * kprobe_optimizer work handler. 2364 */ 2365 kill_kprobe(p); 2366 } 2367 } 2368 if (val == MODULE_STATE_GOING) 2369 remove_module_kprobe_blacklist(mod); 2370 mutex_unlock(&kprobe_mutex); 2371 return NOTIFY_DONE; 2372 } 2373 2374 static struct notifier_block kprobe_module_nb = { 2375 .notifier_call = kprobes_module_callback, 2376 .priority = 0 2377 }; 2378 2379 /* Markers of _kprobe_blacklist section */ 2380 extern unsigned long __start_kprobe_blacklist[]; 2381 extern unsigned long __stop_kprobe_blacklist[]; 2382 2383 static int __init init_kprobes(void) 2384 { 2385 int i, err = 0; 2386 2387 /* FIXME allocate the probe table, currently defined statically */ 2388 /* initialize all list heads */ 2389 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2390 INIT_HLIST_HEAD(&kprobe_table[i]); 2391 INIT_HLIST_HEAD(&kretprobe_inst_table[i]); 2392 raw_spin_lock_init(&(kretprobe_table_locks[i].lock)); 2393 } 2394 2395 err = populate_kprobe_blacklist(__start_kprobe_blacklist, 2396 __stop_kprobe_blacklist); 2397 if (err) { 2398 pr_err("kprobes: failed to populate blacklist: %d\n", err); 2399 pr_err("Please take care of using kprobes.\n"); 2400 } 2401 2402 if (kretprobe_blacklist_size) { 2403 /* lookup the function address from its name */ 2404 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) { 2405 kretprobe_blacklist[i].addr = 2406 kprobe_lookup_name(kretprobe_blacklist[i].name, 0); 2407 if (!kretprobe_blacklist[i].addr) 2408 printk("kretprobe: lookup failed: %s\n", 2409 kretprobe_blacklist[i].name); 2410 } 2411 } 2412 2413 #if defined(CONFIG_OPTPROBES) 2414 #if defined(__ARCH_WANT_KPROBES_INSN_SLOT) 2415 /* Init kprobe_optinsn_slots */ 2416 kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE; 2417 #endif 2418 /* By default, kprobes can be optimized */ 2419 kprobes_allow_optimization = true; 2420 #endif 2421 2422 /* By default, kprobes are armed */ 2423 kprobes_all_disarmed = false; 2424 2425 err = arch_init_kprobes(); 2426 if (!err) 2427 err = register_die_notifier(&kprobe_exceptions_nb); 2428 if (!err) 2429 err = register_module_notifier(&kprobe_module_nb); 2430 2431 kprobes_initialized = (err == 0); 2432 2433 if (!err) 2434 init_test_probes(); 2435 return err; 2436 } 2437 subsys_initcall(init_kprobes); 2438 2439 #ifdef CONFIG_DEBUG_FS 2440 static void report_probe(struct seq_file *pi, struct kprobe *p, 2441 const char *sym, int offset, char *modname, struct kprobe *pp) 2442 { 2443 char *kprobe_type; 2444 void *addr = p->addr; 2445 2446 if (p->pre_handler == pre_handler_kretprobe) 2447 kprobe_type = "r"; 2448 else 2449 kprobe_type = "k"; 2450 2451 if (!kallsyms_show_value()) 2452 addr = NULL; 2453 2454 if (sym) 2455 seq_printf(pi, "%px %s %s+0x%x %s ", 2456 addr, kprobe_type, sym, offset, 2457 (modname ? modname : " ")); 2458 else /* try to use %pS */ 2459 seq_printf(pi, "%px %s %pS ", 2460 addr, kprobe_type, p->addr); 2461 2462 if (!pp) 2463 pp = p; 2464 seq_printf(pi, "%s%s%s%s\n", 2465 (kprobe_gone(p) ? "[GONE]" : ""), 2466 ((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""), 2467 (kprobe_optimized(pp) ? "[OPTIMIZED]" : ""), 2468 (kprobe_ftrace(pp) ? "[FTRACE]" : "")); 2469 } 2470 2471 static void *kprobe_seq_start(struct seq_file *f, loff_t *pos) 2472 { 2473 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL; 2474 } 2475 2476 static void *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos) 2477 { 2478 (*pos)++; 2479 if (*pos >= KPROBE_TABLE_SIZE) 2480 return NULL; 2481 return pos; 2482 } 2483 2484 static void kprobe_seq_stop(struct seq_file *f, void *v) 2485 { 2486 /* Nothing to do */ 2487 } 2488 2489 static int show_kprobe_addr(struct seq_file *pi, void *v) 2490 { 2491 struct hlist_head *head; 2492 struct kprobe *p, *kp; 2493 const char *sym = NULL; 2494 unsigned int i = *(loff_t *) v; 2495 unsigned long offset = 0; 2496 char *modname, namebuf[KSYM_NAME_LEN]; 2497 2498 head = &kprobe_table[i]; 2499 preempt_disable(); 2500 hlist_for_each_entry_rcu(p, head, hlist) { 2501 sym = kallsyms_lookup((unsigned long)p->addr, NULL, 2502 &offset, &modname, namebuf); 2503 if (kprobe_aggrprobe(p)) { 2504 list_for_each_entry_rcu(kp, &p->list, list) 2505 report_probe(pi, kp, sym, offset, modname, p); 2506 } else 2507 report_probe(pi, p, sym, offset, modname, NULL); 2508 } 2509 preempt_enable(); 2510 return 0; 2511 } 2512 2513 static const struct seq_operations kprobes_sops = { 2514 .start = kprobe_seq_start, 2515 .next = kprobe_seq_next, 2516 .stop = kprobe_seq_stop, 2517 .show = show_kprobe_addr 2518 }; 2519 2520 DEFINE_SEQ_ATTRIBUTE(kprobes); 2521 2522 /* kprobes/blacklist -- shows which functions can not be probed */ 2523 static void *kprobe_blacklist_seq_start(struct seq_file *m, loff_t *pos) 2524 { 2525 mutex_lock(&kprobe_mutex); 2526 return seq_list_start(&kprobe_blacklist, *pos); 2527 } 2528 2529 static void *kprobe_blacklist_seq_next(struct seq_file *m, void *v, loff_t *pos) 2530 { 2531 return seq_list_next(v, &kprobe_blacklist, pos); 2532 } 2533 2534 static int kprobe_blacklist_seq_show(struct seq_file *m, void *v) 2535 { 2536 struct kprobe_blacklist_entry *ent = 2537 list_entry(v, struct kprobe_blacklist_entry, list); 2538 2539 /* 2540 * If /proc/kallsyms is not showing kernel address, we won't 2541 * show them here either. 2542 */ 2543 if (!kallsyms_show_value()) 2544 seq_printf(m, "0x%px-0x%px\t%ps\n", NULL, NULL, 2545 (void *)ent->start_addr); 2546 else 2547 seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr, 2548 (void *)ent->end_addr, (void *)ent->start_addr); 2549 return 0; 2550 } 2551 2552 static void kprobe_blacklist_seq_stop(struct seq_file *f, void *v) 2553 { 2554 mutex_unlock(&kprobe_mutex); 2555 } 2556 2557 static const struct seq_operations kprobe_blacklist_sops = { 2558 .start = kprobe_blacklist_seq_start, 2559 .next = kprobe_blacklist_seq_next, 2560 .stop = kprobe_blacklist_seq_stop, 2561 .show = kprobe_blacklist_seq_show, 2562 }; 2563 DEFINE_SEQ_ATTRIBUTE(kprobe_blacklist); 2564 2565 static int arm_all_kprobes(void) 2566 { 2567 struct hlist_head *head; 2568 struct kprobe *p; 2569 unsigned int i, total = 0, errors = 0; 2570 int err, ret = 0; 2571 2572 mutex_lock(&kprobe_mutex); 2573 2574 /* If kprobes are armed, just return */ 2575 if (!kprobes_all_disarmed) 2576 goto already_enabled; 2577 2578 /* 2579 * optimize_kprobe() called by arm_kprobe() checks 2580 * kprobes_all_disarmed, so set kprobes_all_disarmed before 2581 * arm_kprobe. 2582 */ 2583 kprobes_all_disarmed = false; 2584 /* Arming kprobes doesn't optimize kprobe itself */ 2585 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2586 head = &kprobe_table[i]; 2587 /* Arm all kprobes on a best-effort basis */ 2588 hlist_for_each_entry(p, head, hlist) { 2589 if (!kprobe_disabled(p)) { 2590 err = arm_kprobe(p); 2591 if (err) { 2592 errors++; 2593 ret = err; 2594 } 2595 total++; 2596 } 2597 } 2598 } 2599 2600 if (errors) 2601 pr_warn("Kprobes globally enabled, but failed to arm %d out of %d probes\n", 2602 errors, total); 2603 else 2604 pr_info("Kprobes globally enabled\n"); 2605 2606 already_enabled: 2607 mutex_unlock(&kprobe_mutex); 2608 return ret; 2609 } 2610 2611 static int disarm_all_kprobes(void) 2612 { 2613 struct hlist_head *head; 2614 struct kprobe *p; 2615 unsigned int i, total = 0, errors = 0; 2616 int err, ret = 0; 2617 2618 mutex_lock(&kprobe_mutex); 2619 2620 /* If kprobes are already disarmed, just return */ 2621 if (kprobes_all_disarmed) { 2622 mutex_unlock(&kprobe_mutex); 2623 return 0; 2624 } 2625 2626 kprobes_all_disarmed = true; 2627 2628 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2629 head = &kprobe_table[i]; 2630 /* Disarm all kprobes on a best-effort basis */ 2631 hlist_for_each_entry(p, head, hlist) { 2632 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) { 2633 err = disarm_kprobe(p, false); 2634 if (err) { 2635 errors++; 2636 ret = err; 2637 } 2638 total++; 2639 } 2640 } 2641 } 2642 2643 if (errors) 2644 pr_warn("Kprobes globally disabled, but failed to disarm %d out of %d probes\n", 2645 errors, total); 2646 else 2647 pr_info("Kprobes globally disabled\n"); 2648 2649 mutex_unlock(&kprobe_mutex); 2650 2651 /* Wait for disarming all kprobes by optimizer */ 2652 wait_for_kprobe_optimizer(); 2653 2654 return ret; 2655 } 2656 2657 /* 2658 * XXX: The debugfs bool file interface doesn't allow for callbacks 2659 * when the bool state is switched. We can reuse that facility when 2660 * available 2661 */ 2662 static ssize_t read_enabled_file_bool(struct file *file, 2663 char __user *user_buf, size_t count, loff_t *ppos) 2664 { 2665 char buf[3]; 2666 2667 if (!kprobes_all_disarmed) 2668 buf[0] = '1'; 2669 else 2670 buf[0] = '0'; 2671 buf[1] = '\n'; 2672 buf[2] = 0x00; 2673 return simple_read_from_buffer(user_buf, count, ppos, buf, 2); 2674 } 2675 2676 static ssize_t write_enabled_file_bool(struct file *file, 2677 const char __user *user_buf, size_t count, loff_t *ppos) 2678 { 2679 char buf[32]; 2680 size_t buf_size; 2681 int ret = 0; 2682 2683 buf_size = min(count, (sizeof(buf)-1)); 2684 if (copy_from_user(buf, user_buf, buf_size)) 2685 return -EFAULT; 2686 2687 buf[buf_size] = '\0'; 2688 switch (buf[0]) { 2689 case 'y': 2690 case 'Y': 2691 case '1': 2692 ret = arm_all_kprobes(); 2693 break; 2694 case 'n': 2695 case 'N': 2696 case '0': 2697 ret = disarm_all_kprobes(); 2698 break; 2699 default: 2700 return -EINVAL; 2701 } 2702 2703 if (ret) 2704 return ret; 2705 2706 return count; 2707 } 2708 2709 static const struct file_operations fops_kp = { 2710 .read = read_enabled_file_bool, 2711 .write = write_enabled_file_bool, 2712 .llseek = default_llseek, 2713 }; 2714 2715 static int __init debugfs_kprobe_init(void) 2716 { 2717 struct dentry *dir; 2718 unsigned int value = 1; 2719 2720 dir = debugfs_create_dir("kprobes", NULL); 2721 2722 debugfs_create_file("list", 0400, dir, NULL, &kprobes_fops); 2723 2724 debugfs_create_file("enabled", 0600, dir, &value, &fops_kp); 2725 2726 debugfs_create_file("blacklist", 0400, dir, NULL, 2727 &kprobe_blacklist_fops); 2728 2729 return 0; 2730 } 2731 2732 late_initcall(debugfs_kprobe_init); 2733 #endif /* CONFIG_DEBUG_FS */ 2734