1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Kernel Probes (KProbes) 4 * 5 * Copyright (C) IBM Corporation, 2002, 2004 6 * 7 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel 8 * Probes initial implementation (includes suggestions from 9 * Rusty Russell). 10 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with 11 * hlists and exceptions notifier as suggested by Andi Kleen. 12 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes 13 * interface to access function arguments. 14 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes 15 * exceptions notifier to be first on the priority list. 16 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston 17 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi 18 * <prasanna@in.ibm.com> added function-return probes. 19 */ 20 21 #define pr_fmt(fmt) "kprobes: " fmt 22 23 #include <linux/kprobes.h> 24 #include <linux/hash.h> 25 #include <linux/init.h> 26 #include <linux/slab.h> 27 #include <linux/stddef.h> 28 #include <linux/export.h> 29 #include <linux/moduleloader.h> 30 #include <linux/kallsyms.h> 31 #include <linux/freezer.h> 32 #include <linux/seq_file.h> 33 #include <linux/debugfs.h> 34 #include <linux/sysctl.h> 35 #include <linux/kdebug.h> 36 #include <linux/memory.h> 37 #include <linux/ftrace.h> 38 #include <linux/cpu.h> 39 #include <linux/jump_label.h> 40 #include <linux/static_call.h> 41 #include <linux/perf_event.h> 42 43 #include <asm/sections.h> 44 #include <asm/cacheflush.h> 45 #include <asm/errno.h> 46 #include <linux/uaccess.h> 47 48 #define KPROBE_HASH_BITS 6 49 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS) 50 51 #if !defined(CONFIG_OPTPROBES) || !defined(CONFIG_SYSCTL) 52 #define kprobe_sysctls_init() do { } while (0) 53 #endif 54 55 static int kprobes_initialized; 56 /* kprobe_table can be accessed by 57 * - Normal hlist traversal and RCU add/del under 'kprobe_mutex' is held. 58 * Or 59 * - RCU hlist traversal under disabling preempt (breakpoint handlers) 60 */ 61 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; 62 63 /* NOTE: change this value only with 'kprobe_mutex' held */ 64 static bool kprobes_all_disarmed; 65 66 /* This protects 'kprobe_table' and 'optimizing_list' */ 67 static DEFINE_MUTEX(kprobe_mutex); 68 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance); 69 70 kprobe_opcode_t * __weak kprobe_lookup_name(const char *name, 71 unsigned int __unused) 72 { 73 return ((kprobe_opcode_t *)(kallsyms_lookup_name(name))); 74 } 75 76 /* 77 * Blacklist -- list of 'struct kprobe_blacklist_entry' to store info where 78 * kprobes can not probe. 79 */ 80 static LIST_HEAD(kprobe_blacklist); 81 82 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT 83 /* 84 * 'kprobe::ainsn.insn' points to the copy of the instruction to be 85 * single-stepped. x86_64, POWER4 and above have no-exec support and 86 * stepping on the instruction on a vmalloced/kmalloced/data page 87 * is a recipe for disaster 88 */ 89 struct kprobe_insn_page { 90 struct list_head list; 91 kprobe_opcode_t *insns; /* Page of instruction slots */ 92 struct kprobe_insn_cache *cache; 93 int nused; 94 int ngarbage; 95 char slot_used[]; 96 }; 97 98 #define KPROBE_INSN_PAGE_SIZE(slots) \ 99 (offsetof(struct kprobe_insn_page, slot_used) + \ 100 (sizeof(char) * (slots))) 101 102 static int slots_per_page(struct kprobe_insn_cache *c) 103 { 104 return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t)); 105 } 106 107 enum kprobe_slot_state { 108 SLOT_CLEAN = 0, 109 SLOT_DIRTY = 1, 110 SLOT_USED = 2, 111 }; 112 113 void __weak *alloc_insn_page(void) 114 { 115 /* 116 * Use module_alloc() so this page is within +/- 2GB of where the 117 * kernel image and loaded module images reside. This is required 118 * for most of the architectures. 119 * (e.g. x86-64 needs this to handle the %rip-relative fixups.) 120 */ 121 return module_alloc(PAGE_SIZE); 122 } 123 124 static void free_insn_page(void *page) 125 { 126 module_memfree(page); 127 } 128 129 struct kprobe_insn_cache kprobe_insn_slots = { 130 .mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex), 131 .alloc = alloc_insn_page, 132 .free = free_insn_page, 133 .sym = KPROBE_INSN_PAGE_SYM, 134 .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages), 135 .insn_size = MAX_INSN_SIZE, 136 .nr_garbage = 0, 137 }; 138 static int collect_garbage_slots(struct kprobe_insn_cache *c); 139 140 /** 141 * __get_insn_slot() - Find a slot on an executable page for an instruction. 142 * We allocate an executable page if there's no room on existing ones. 143 */ 144 kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c) 145 { 146 struct kprobe_insn_page *kip; 147 kprobe_opcode_t *slot = NULL; 148 149 /* Since the slot array is not protected by rcu, we need a mutex */ 150 mutex_lock(&c->mutex); 151 retry: 152 rcu_read_lock(); 153 list_for_each_entry_rcu(kip, &c->pages, list) { 154 if (kip->nused < slots_per_page(c)) { 155 int i; 156 157 for (i = 0; i < slots_per_page(c); i++) { 158 if (kip->slot_used[i] == SLOT_CLEAN) { 159 kip->slot_used[i] = SLOT_USED; 160 kip->nused++; 161 slot = kip->insns + (i * c->insn_size); 162 rcu_read_unlock(); 163 goto out; 164 } 165 } 166 /* kip->nused is broken. Fix it. */ 167 kip->nused = slots_per_page(c); 168 WARN_ON(1); 169 } 170 } 171 rcu_read_unlock(); 172 173 /* If there are any garbage slots, collect it and try again. */ 174 if (c->nr_garbage && collect_garbage_slots(c) == 0) 175 goto retry; 176 177 /* All out of space. Need to allocate a new page. */ 178 kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL); 179 if (!kip) 180 goto out; 181 182 kip->insns = c->alloc(); 183 if (!kip->insns) { 184 kfree(kip); 185 goto out; 186 } 187 INIT_LIST_HEAD(&kip->list); 188 memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c)); 189 kip->slot_used[0] = SLOT_USED; 190 kip->nused = 1; 191 kip->ngarbage = 0; 192 kip->cache = c; 193 list_add_rcu(&kip->list, &c->pages); 194 slot = kip->insns; 195 196 /* Record the perf ksymbol register event after adding the page */ 197 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, (unsigned long)kip->insns, 198 PAGE_SIZE, false, c->sym); 199 out: 200 mutex_unlock(&c->mutex); 201 return slot; 202 } 203 204 /* Return true if all garbages are collected, otherwise false. */ 205 static bool collect_one_slot(struct kprobe_insn_page *kip, int idx) 206 { 207 kip->slot_used[idx] = SLOT_CLEAN; 208 kip->nused--; 209 if (kip->nused == 0) { 210 /* 211 * Page is no longer in use. Free it unless 212 * it's the last one. We keep the last one 213 * so as not to have to set it up again the 214 * next time somebody inserts a probe. 215 */ 216 if (!list_is_singular(&kip->list)) { 217 /* 218 * Record perf ksymbol unregister event before removing 219 * the page. 220 */ 221 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, 222 (unsigned long)kip->insns, PAGE_SIZE, true, 223 kip->cache->sym); 224 list_del_rcu(&kip->list); 225 synchronize_rcu(); 226 kip->cache->free(kip->insns); 227 kfree(kip); 228 } 229 return true; 230 } 231 return false; 232 } 233 234 static int collect_garbage_slots(struct kprobe_insn_cache *c) 235 { 236 struct kprobe_insn_page *kip, *next; 237 238 /* Ensure no-one is interrupted on the garbages */ 239 synchronize_rcu(); 240 241 list_for_each_entry_safe(kip, next, &c->pages, list) { 242 int i; 243 244 if (kip->ngarbage == 0) 245 continue; 246 kip->ngarbage = 0; /* we will collect all garbages */ 247 for (i = 0; i < slots_per_page(c); i++) { 248 if (kip->slot_used[i] == SLOT_DIRTY && collect_one_slot(kip, i)) 249 break; 250 } 251 } 252 c->nr_garbage = 0; 253 return 0; 254 } 255 256 void __free_insn_slot(struct kprobe_insn_cache *c, 257 kprobe_opcode_t *slot, int dirty) 258 { 259 struct kprobe_insn_page *kip; 260 long idx; 261 262 mutex_lock(&c->mutex); 263 rcu_read_lock(); 264 list_for_each_entry_rcu(kip, &c->pages, list) { 265 idx = ((long)slot - (long)kip->insns) / 266 (c->insn_size * sizeof(kprobe_opcode_t)); 267 if (idx >= 0 && idx < slots_per_page(c)) 268 goto out; 269 } 270 /* Could not find this slot. */ 271 WARN_ON(1); 272 kip = NULL; 273 out: 274 rcu_read_unlock(); 275 /* Mark and sweep: this may sleep */ 276 if (kip) { 277 /* Check double free */ 278 WARN_ON(kip->slot_used[idx] != SLOT_USED); 279 if (dirty) { 280 kip->slot_used[idx] = SLOT_DIRTY; 281 kip->ngarbage++; 282 if (++c->nr_garbage > slots_per_page(c)) 283 collect_garbage_slots(c); 284 } else { 285 collect_one_slot(kip, idx); 286 } 287 } 288 mutex_unlock(&c->mutex); 289 } 290 291 /* 292 * Check given address is on the page of kprobe instruction slots. 293 * This will be used for checking whether the address on a stack 294 * is on a text area or not. 295 */ 296 bool __is_insn_slot_addr(struct kprobe_insn_cache *c, unsigned long addr) 297 { 298 struct kprobe_insn_page *kip; 299 bool ret = false; 300 301 rcu_read_lock(); 302 list_for_each_entry_rcu(kip, &c->pages, list) { 303 if (addr >= (unsigned long)kip->insns && 304 addr < (unsigned long)kip->insns + PAGE_SIZE) { 305 ret = true; 306 break; 307 } 308 } 309 rcu_read_unlock(); 310 311 return ret; 312 } 313 314 int kprobe_cache_get_kallsym(struct kprobe_insn_cache *c, unsigned int *symnum, 315 unsigned long *value, char *type, char *sym) 316 { 317 struct kprobe_insn_page *kip; 318 int ret = -ERANGE; 319 320 rcu_read_lock(); 321 list_for_each_entry_rcu(kip, &c->pages, list) { 322 if ((*symnum)--) 323 continue; 324 strscpy(sym, c->sym, KSYM_NAME_LEN); 325 *type = 't'; 326 *value = (unsigned long)kip->insns; 327 ret = 0; 328 break; 329 } 330 rcu_read_unlock(); 331 332 return ret; 333 } 334 335 #ifdef CONFIG_OPTPROBES 336 void __weak *alloc_optinsn_page(void) 337 { 338 return alloc_insn_page(); 339 } 340 341 void __weak free_optinsn_page(void *page) 342 { 343 free_insn_page(page); 344 } 345 346 /* For optimized_kprobe buffer */ 347 struct kprobe_insn_cache kprobe_optinsn_slots = { 348 .mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex), 349 .alloc = alloc_optinsn_page, 350 .free = free_optinsn_page, 351 .sym = KPROBE_OPTINSN_PAGE_SYM, 352 .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages), 353 /* .insn_size is initialized later */ 354 .nr_garbage = 0, 355 }; 356 #endif 357 #endif 358 359 /* We have preemption disabled.. so it is safe to use __ versions */ 360 static inline void set_kprobe_instance(struct kprobe *kp) 361 { 362 __this_cpu_write(kprobe_instance, kp); 363 } 364 365 static inline void reset_kprobe_instance(void) 366 { 367 __this_cpu_write(kprobe_instance, NULL); 368 } 369 370 /* 371 * This routine is called either: 372 * - under the 'kprobe_mutex' - during kprobe_[un]register(). 373 * OR 374 * - with preemption disabled - from architecture specific code. 375 */ 376 struct kprobe *get_kprobe(void *addr) 377 { 378 struct hlist_head *head; 379 struct kprobe *p; 380 381 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)]; 382 hlist_for_each_entry_rcu(p, head, hlist, 383 lockdep_is_held(&kprobe_mutex)) { 384 if (p->addr == addr) 385 return p; 386 } 387 388 return NULL; 389 } 390 NOKPROBE_SYMBOL(get_kprobe); 391 392 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs); 393 394 /* Return true if 'p' is an aggregator */ 395 static inline bool kprobe_aggrprobe(struct kprobe *p) 396 { 397 return p->pre_handler == aggr_pre_handler; 398 } 399 400 /* Return true if 'p' is unused */ 401 static inline bool kprobe_unused(struct kprobe *p) 402 { 403 return kprobe_aggrprobe(p) && kprobe_disabled(p) && 404 list_empty(&p->list); 405 } 406 407 /* Keep all fields in the kprobe consistent. */ 408 static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p) 409 { 410 memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t)); 411 memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn)); 412 } 413 414 #ifdef CONFIG_OPTPROBES 415 /* NOTE: This is protected by 'kprobe_mutex'. */ 416 static bool kprobes_allow_optimization; 417 418 /* 419 * Call all 'kprobe::pre_handler' on the list, but ignores its return value. 420 * This must be called from arch-dep optimized caller. 421 */ 422 void opt_pre_handler(struct kprobe *p, struct pt_regs *regs) 423 { 424 struct kprobe *kp; 425 426 list_for_each_entry_rcu(kp, &p->list, list) { 427 if (kp->pre_handler && likely(!kprobe_disabled(kp))) { 428 set_kprobe_instance(kp); 429 kp->pre_handler(kp, regs); 430 } 431 reset_kprobe_instance(); 432 } 433 } 434 NOKPROBE_SYMBOL(opt_pre_handler); 435 436 /* Free optimized instructions and optimized_kprobe */ 437 static void free_aggr_kprobe(struct kprobe *p) 438 { 439 struct optimized_kprobe *op; 440 441 op = container_of(p, struct optimized_kprobe, kp); 442 arch_remove_optimized_kprobe(op); 443 arch_remove_kprobe(p); 444 kfree(op); 445 } 446 447 /* Return true if the kprobe is ready for optimization. */ 448 static inline int kprobe_optready(struct kprobe *p) 449 { 450 struct optimized_kprobe *op; 451 452 if (kprobe_aggrprobe(p)) { 453 op = container_of(p, struct optimized_kprobe, kp); 454 return arch_prepared_optinsn(&op->optinsn); 455 } 456 457 return 0; 458 } 459 460 /* Return true if the kprobe is disarmed. Note: p must be on hash list */ 461 bool kprobe_disarmed(struct kprobe *p) 462 { 463 struct optimized_kprobe *op; 464 465 /* If kprobe is not aggr/opt probe, just return kprobe is disabled */ 466 if (!kprobe_aggrprobe(p)) 467 return kprobe_disabled(p); 468 469 op = container_of(p, struct optimized_kprobe, kp); 470 471 return kprobe_disabled(p) && list_empty(&op->list); 472 } 473 474 /* Return true if the probe is queued on (un)optimizing lists */ 475 static bool kprobe_queued(struct kprobe *p) 476 { 477 struct optimized_kprobe *op; 478 479 if (kprobe_aggrprobe(p)) { 480 op = container_of(p, struct optimized_kprobe, kp); 481 if (!list_empty(&op->list)) 482 return true; 483 } 484 return false; 485 } 486 487 /* 488 * Return an optimized kprobe whose optimizing code replaces 489 * instructions including 'addr' (exclude breakpoint). 490 */ 491 static struct kprobe *get_optimized_kprobe(kprobe_opcode_t *addr) 492 { 493 int i; 494 struct kprobe *p = NULL; 495 struct optimized_kprobe *op; 496 497 /* Don't check i == 0, since that is a breakpoint case. */ 498 for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH / sizeof(kprobe_opcode_t); i++) 499 p = get_kprobe(addr - i); 500 501 if (p && kprobe_optready(p)) { 502 op = container_of(p, struct optimized_kprobe, kp); 503 if (arch_within_optimized_kprobe(op, addr)) 504 return p; 505 } 506 507 return NULL; 508 } 509 510 /* Optimization staging list, protected by 'kprobe_mutex' */ 511 static LIST_HEAD(optimizing_list); 512 static LIST_HEAD(unoptimizing_list); 513 static LIST_HEAD(freeing_list); 514 515 static void kprobe_optimizer(struct work_struct *work); 516 static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer); 517 #define OPTIMIZE_DELAY 5 518 519 /* 520 * Optimize (replace a breakpoint with a jump) kprobes listed on 521 * 'optimizing_list'. 522 */ 523 static void do_optimize_kprobes(void) 524 { 525 lockdep_assert_held(&text_mutex); 526 /* 527 * The optimization/unoptimization refers 'online_cpus' via 528 * stop_machine() and cpu-hotplug modifies the 'online_cpus'. 529 * And same time, 'text_mutex' will be held in cpu-hotplug and here. 530 * This combination can cause a deadlock (cpu-hotplug tries to lock 531 * 'text_mutex' but stop_machine() can not be done because 532 * the 'online_cpus' has been changed) 533 * To avoid this deadlock, caller must have locked cpu-hotplug 534 * for preventing cpu-hotplug outside of 'text_mutex' locking. 535 */ 536 lockdep_assert_cpus_held(); 537 538 /* Optimization never be done when disarmed */ 539 if (kprobes_all_disarmed || !kprobes_allow_optimization || 540 list_empty(&optimizing_list)) 541 return; 542 543 arch_optimize_kprobes(&optimizing_list); 544 } 545 546 /* 547 * Unoptimize (replace a jump with a breakpoint and remove the breakpoint 548 * if need) kprobes listed on 'unoptimizing_list'. 549 */ 550 static void do_unoptimize_kprobes(void) 551 { 552 struct optimized_kprobe *op, *tmp; 553 554 lockdep_assert_held(&text_mutex); 555 /* See comment in do_optimize_kprobes() */ 556 lockdep_assert_cpus_held(); 557 558 if (!list_empty(&unoptimizing_list)) 559 arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list); 560 561 /* Loop on 'freeing_list' for disarming and removing from kprobe hash list */ 562 list_for_each_entry_safe(op, tmp, &freeing_list, list) { 563 /* Switching from detour code to origin */ 564 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; 565 /* Disarm probes if marked disabled and not gone */ 566 if (kprobe_disabled(&op->kp) && !kprobe_gone(&op->kp)) 567 arch_disarm_kprobe(&op->kp); 568 if (kprobe_unused(&op->kp)) { 569 /* 570 * Remove unused probes from hash list. After waiting 571 * for synchronization, these probes are reclaimed. 572 * (reclaiming is done by do_free_cleaned_kprobes().) 573 */ 574 hlist_del_rcu(&op->kp.hlist); 575 } else 576 list_del_init(&op->list); 577 } 578 } 579 580 /* Reclaim all kprobes on the 'freeing_list' */ 581 static void do_free_cleaned_kprobes(void) 582 { 583 struct optimized_kprobe *op, *tmp; 584 585 list_for_each_entry_safe(op, tmp, &freeing_list, list) { 586 list_del_init(&op->list); 587 if (WARN_ON_ONCE(!kprobe_unused(&op->kp))) { 588 /* 589 * This must not happen, but if there is a kprobe 590 * still in use, keep it on kprobes hash list. 591 */ 592 continue; 593 } 594 free_aggr_kprobe(&op->kp); 595 } 596 } 597 598 /* Start optimizer after OPTIMIZE_DELAY passed */ 599 static void kick_kprobe_optimizer(void) 600 { 601 schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY); 602 } 603 604 /* Kprobe jump optimizer */ 605 static void kprobe_optimizer(struct work_struct *work) 606 { 607 mutex_lock(&kprobe_mutex); 608 cpus_read_lock(); 609 mutex_lock(&text_mutex); 610 611 /* 612 * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed) 613 * kprobes before waiting for quiesence period. 614 */ 615 do_unoptimize_kprobes(); 616 617 /* 618 * Step 2: Wait for quiesence period to ensure all potentially 619 * preempted tasks to have normally scheduled. Because optprobe 620 * may modify multiple instructions, there is a chance that Nth 621 * instruction is preempted. In that case, such tasks can return 622 * to 2nd-Nth byte of jump instruction. This wait is for avoiding it. 623 * Note that on non-preemptive kernel, this is transparently converted 624 * to synchronoze_sched() to wait for all interrupts to have completed. 625 */ 626 synchronize_rcu_tasks(); 627 628 /* Step 3: Optimize kprobes after quiesence period */ 629 do_optimize_kprobes(); 630 631 /* Step 4: Free cleaned kprobes after quiesence period */ 632 do_free_cleaned_kprobes(); 633 634 mutex_unlock(&text_mutex); 635 cpus_read_unlock(); 636 637 /* Step 5: Kick optimizer again if needed */ 638 if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) 639 kick_kprobe_optimizer(); 640 641 mutex_unlock(&kprobe_mutex); 642 } 643 644 /* Wait for completing optimization and unoptimization */ 645 void wait_for_kprobe_optimizer(void) 646 { 647 mutex_lock(&kprobe_mutex); 648 649 while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) { 650 mutex_unlock(&kprobe_mutex); 651 652 /* This will also make 'optimizing_work' execute immmediately */ 653 flush_delayed_work(&optimizing_work); 654 /* 'optimizing_work' might not have been queued yet, relax */ 655 cpu_relax(); 656 657 mutex_lock(&kprobe_mutex); 658 } 659 660 mutex_unlock(&kprobe_mutex); 661 } 662 663 bool optprobe_queued_unopt(struct optimized_kprobe *op) 664 { 665 struct optimized_kprobe *_op; 666 667 list_for_each_entry(_op, &unoptimizing_list, list) { 668 if (op == _op) 669 return true; 670 } 671 672 return false; 673 } 674 675 /* Optimize kprobe if p is ready to be optimized */ 676 static void optimize_kprobe(struct kprobe *p) 677 { 678 struct optimized_kprobe *op; 679 680 /* Check if the kprobe is disabled or not ready for optimization. */ 681 if (!kprobe_optready(p) || !kprobes_allow_optimization || 682 (kprobe_disabled(p) || kprobes_all_disarmed)) 683 return; 684 685 /* kprobes with 'post_handler' can not be optimized */ 686 if (p->post_handler) 687 return; 688 689 op = container_of(p, struct optimized_kprobe, kp); 690 691 /* Check there is no other kprobes at the optimized instructions */ 692 if (arch_check_optimized_kprobe(op) < 0) 693 return; 694 695 /* Check if it is already optimized. */ 696 if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) { 697 if (optprobe_queued_unopt(op)) { 698 /* This is under unoptimizing. Just dequeue the probe */ 699 list_del_init(&op->list); 700 } 701 return; 702 } 703 op->kp.flags |= KPROBE_FLAG_OPTIMIZED; 704 705 /* 706 * On the 'unoptimizing_list' and 'optimizing_list', 707 * 'op' must have OPTIMIZED flag 708 */ 709 if (WARN_ON_ONCE(!list_empty(&op->list))) 710 return; 711 712 list_add(&op->list, &optimizing_list); 713 kick_kprobe_optimizer(); 714 } 715 716 /* Short cut to direct unoptimizing */ 717 static void force_unoptimize_kprobe(struct optimized_kprobe *op) 718 { 719 lockdep_assert_cpus_held(); 720 arch_unoptimize_kprobe(op); 721 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; 722 } 723 724 /* Unoptimize a kprobe if p is optimized */ 725 static void unoptimize_kprobe(struct kprobe *p, bool force) 726 { 727 struct optimized_kprobe *op; 728 729 if (!kprobe_aggrprobe(p) || kprobe_disarmed(p)) 730 return; /* This is not an optprobe nor optimized */ 731 732 op = container_of(p, struct optimized_kprobe, kp); 733 if (!kprobe_optimized(p)) 734 return; 735 736 if (!list_empty(&op->list)) { 737 if (optprobe_queued_unopt(op)) { 738 /* Queued in unoptimizing queue */ 739 if (force) { 740 /* 741 * Forcibly unoptimize the kprobe here, and queue it 742 * in the freeing list for release afterwards. 743 */ 744 force_unoptimize_kprobe(op); 745 list_move(&op->list, &freeing_list); 746 } 747 } else { 748 /* Dequeue from the optimizing queue */ 749 list_del_init(&op->list); 750 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; 751 } 752 return; 753 } 754 755 /* Optimized kprobe case */ 756 if (force) { 757 /* Forcibly update the code: this is a special case */ 758 force_unoptimize_kprobe(op); 759 } else { 760 list_add(&op->list, &unoptimizing_list); 761 kick_kprobe_optimizer(); 762 } 763 } 764 765 /* Cancel unoptimizing for reusing */ 766 static int reuse_unused_kprobe(struct kprobe *ap) 767 { 768 struct optimized_kprobe *op; 769 770 /* 771 * Unused kprobe MUST be on the way of delayed unoptimizing (means 772 * there is still a relative jump) and disabled. 773 */ 774 op = container_of(ap, struct optimized_kprobe, kp); 775 WARN_ON_ONCE(list_empty(&op->list)); 776 /* Enable the probe again */ 777 ap->flags &= ~KPROBE_FLAG_DISABLED; 778 /* Optimize it again. (remove from 'op->list') */ 779 if (!kprobe_optready(ap)) 780 return -EINVAL; 781 782 optimize_kprobe(ap); 783 return 0; 784 } 785 786 /* Remove optimized instructions */ 787 static void kill_optimized_kprobe(struct kprobe *p) 788 { 789 struct optimized_kprobe *op; 790 791 op = container_of(p, struct optimized_kprobe, kp); 792 if (!list_empty(&op->list)) 793 /* Dequeue from the (un)optimization queue */ 794 list_del_init(&op->list); 795 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED; 796 797 if (kprobe_unused(p)) { 798 /* 799 * Unused kprobe is on unoptimizing or freeing list. We move it 800 * to freeing_list and let the kprobe_optimizer() remove it from 801 * the kprobe hash list and free it. 802 */ 803 if (optprobe_queued_unopt(op)) 804 list_move(&op->list, &freeing_list); 805 } 806 807 /* Don't touch the code, because it is already freed. */ 808 arch_remove_optimized_kprobe(op); 809 } 810 811 static inline 812 void __prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p) 813 { 814 if (!kprobe_ftrace(p)) 815 arch_prepare_optimized_kprobe(op, p); 816 } 817 818 /* Try to prepare optimized instructions */ 819 static void prepare_optimized_kprobe(struct kprobe *p) 820 { 821 struct optimized_kprobe *op; 822 823 op = container_of(p, struct optimized_kprobe, kp); 824 __prepare_optimized_kprobe(op, p); 825 } 826 827 /* Allocate new optimized_kprobe and try to prepare optimized instructions. */ 828 static struct kprobe *alloc_aggr_kprobe(struct kprobe *p) 829 { 830 struct optimized_kprobe *op; 831 832 op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL); 833 if (!op) 834 return NULL; 835 836 INIT_LIST_HEAD(&op->list); 837 op->kp.addr = p->addr; 838 __prepare_optimized_kprobe(op, p); 839 840 return &op->kp; 841 } 842 843 static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p); 844 845 /* 846 * Prepare an optimized_kprobe and optimize it. 847 * NOTE: 'p' must be a normal registered kprobe. 848 */ 849 static void try_to_optimize_kprobe(struct kprobe *p) 850 { 851 struct kprobe *ap; 852 struct optimized_kprobe *op; 853 854 /* Impossible to optimize ftrace-based kprobe. */ 855 if (kprobe_ftrace(p)) 856 return; 857 858 /* For preparing optimization, jump_label_text_reserved() is called. */ 859 cpus_read_lock(); 860 jump_label_lock(); 861 mutex_lock(&text_mutex); 862 863 ap = alloc_aggr_kprobe(p); 864 if (!ap) 865 goto out; 866 867 op = container_of(ap, struct optimized_kprobe, kp); 868 if (!arch_prepared_optinsn(&op->optinsn)) { 869 /* If failed to setup optimizing, fallback to kprobe. */ 870 arch_remove_optimized_kprobe(op); 871 kfree(op); 872 goto out; 873 } 874 875 init_aggr_kprobe(ap, p); 876 optimize_kprobe(ap); /* This just kicks optimizer thread. */ 877 878 out: 879 mutex_unlock(&text_mutex); 880 jump_label_unlock(); 881 cpus_read_unlock(); 882 } 883 884 static void optimize_all_kprobes(void) 885 { 886 struct hlist_head *head; 887 struct kprobe *p; 888 unsigned int i; 889 890 mutex_lock(&kprobe_mutex); 891 /* If optimization is already allowed, just return. */ 892 if (kprobes_allow_optimization) 893 goto out; 894 895 cpus_read_lock(); 896 kprobes_allow_optimization = true; 897 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 898 head = &kprobe_table[i]; 899 hlist_for_each_entry(p, head, hlist) 900 if (!kprobe_disabled(p)) 901 optimize_kprobe(p); 902 } 903 cpus_read_unlock(); 904 pr_info("kprobe jump-optimization is enabled. All kprobes are optimized if possible.\n"); 905 out: 906 mutex_unlock(&kprobe_mutex); 907 } 908 909 #ifdef CONFIG_SYSCTL 910 static void unoptimize_all_kprobes(void) 911 { 912 struct hlist_head *head; 913 struct kprobe *p; 914 unsigned int i; 915 916 mutex_lock(&kprobe_mutex); 917 /* If optimization is already prohibited, just return. */ 918 if (!kprobes_allow_optimization) { 919 mutex_unlock(&kprobe_mutex); 920 return; 921 } 922 923 cpus_read_lock(); 924 kprobes_allow_optimization = false; 925 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 926 head = &kprobe_table[i]; 927 hlist_for_each_entry(p, head, hlist) { 928 if (!kprobe_disabled(p)) 929 unoptimize_kprobe(p, false); 930 } 931 } 932 cpus_read_unlock(); 933 mutex_unlock(&kprobe_mutex); 934 935 /* Wait for unoptimizing completion. */ 936 wait_for_kprobe_optimizer(); 937 pr_info("kprobe jump-optimization is disabled. All kprobes are based on software breakpoint.\n"); 938 } 939 940 static DEFINE_MUTEX(kprobe_sysctl_mutex); 941 static int sysctl_kprobes_optimization; 942 static int proc_kprobes_optimization_handler(struct ctl_table *table, 943 int write, void *buffer, 944 size_t *length, loff_t *ppos) 945 { 946 int ret; 947 948 mutex_lock(&kprobe_sysctl_mutex); 949 sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0; 950 ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 951 952 if (sysctl_kprobes_optimization) 953 optimize_all_kprobes(); 954 else 955 unoptimize_all_kprobes(); 956 mutex_unlock(&kprobe_sysctl_mutex); 957 958 return ret; 959 } 960 961 static struct ctl_table kprobe_sysctls[] = { 962 { 963 .procname = "kprobes-optimization", 964 .data = &sysctl_kprobes_optimization, 965 .maxlen = sizeof(int), 966 .mode = 0644, 967 .proc_handler = proc_kprobes_optimization_handler, 968 .extra1 = SYSCTL_ZERO, 969 .extra2 = SYSCTL_ONE, 970 }, 971 {} 972 }; 973 974 static void __init kprobe_sysctls_init(void) 975 { 976 register_sysctl_init("debug", kprobe_sysctls); 977 } 978 #endif /* CONFIG_SYSCTL */ 979 980 /* Put a breakpoint for a probe. */ 981 static void __arm_kprobe(struct kprobe *p) 982 { 983 struct kprobe *_p; 984 985 lockdep_assert_held(&text_mutex); 986 987 /* Find the overlapping optimized kprobes. */ 988 _p = get_optimized_kprobe(p->addr); 989 if (unlikely(_p)) 990 /* Fallback to unoptimized kprobe */ 991 unoptimize_kprobe(_p, true); 992 993 arch_arm_kprobe(p); 994 optimize_kprobe(p); /* Try to optimize (add kprobe to a list) */ 995 } 996 997 /* Remove the breakpoint of a probe. */ 998 static void __disarm_kprobe(struct kprobe *p, bool reopt) 999 { 1000 struct kprobe *_p; 1001 1002 lockdep_assert_held(&text_mutex); 1003 1004 /* Try to unoptimize */ 1005 unoptimize_kprobe(p, kprobes_all_disarmed); 1006 1007 if (!kprobe_queued(p)) { 1008 arch_disarm_kprobe(p); 1009 /* If another kprobe was blocked, re-optimize it. */ 1010 _p = get_optimized_kprobe(p->addr); 1011 if (unlikely(_p) && reopt) 1012 optimize_kprobe(_p); 1013 } 1014 /* 1015 * TODO: Since unoptimization and real disarming will be done by 1016 * the worker thread, we can not check whether another probe are 1017 * unoptimized because of this probe here. It should be re-optimized 1018 * by the worker thread. 1019 */ 1020 } 1021 1022 #else /* !CONFIG_OPTPROBES */ 1023 1024 #define optimize_kprobe(p) do {} while (0) 1025 #define unoptimize_kprobe(p, f) do {} while (0) 1026 #define kill_optimized_kprobe(p) do {} while (0) 1027 #define prepare_optimized_kprobe(p) do {} while (0) 1028 #define try_to_optimize_kprobe(p) do {} while (0) 1029 #define __arm_kprobe(p) arch_arm_kprobe(p) 1030 #define __disarm_kprobe(p, o) arch_disarm_kprobe(p) 1031 #define kprobe_disarmed(p) kprobe_disabled(p) 1032 #define wait_for_kprobe_optimizer() do {} while (0) 1033 1034 static int reuse_unused_kprobe(struct kprobe *ap) 1035 { 1036 /* 1037 * If the optimized kprobe is NOT supported, the aggr kprobe is 1038 * released at the same time that the last aggregated kprobe is 1039 * unregistered. 1040 * Thus there should be no chance to reuse unused kprobe. 1041 */ 1042 WARN_ON_ONCE(1); 1043 return -EINVAL; 1044 } 1045 1046 static void free_aggr_kprobe(struct kprobe *p) 1047 { 1048 arch_remove_kprobe(p); 1049 kfree(p); 1050 } 1051 1052 static struct kprobe *alloc_aggr_kprobe(struct kprobe *p) 1053 { 1054 return kzalloc(sizeof(struct kprobe), GFP_KERNEL); 1055 } 1056 #endif /* CONFIG_OPTPROBES */ 1057 1058 #ifdef CONFIG_KPROBES_ON_FTRACE 1059 static struct ftrace_ops kprobe_ftrace_ops __read_mostly = { 1060 .func = kprobe_ftrace_handler, 1061 .flags = FTRACE_OPS_FL_SAVE_REGS, 1062 }; 1063 1064 static struct ftrace_ops kprobe_ipmodify_ops __read_mostly = { 1065 .func = kprobe_ftrace_handler, 1066 .flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_IPMODIFY, 1067 }; 1068 1069 static int kprobe_ipmodify_enabled; 1070 static int kprobe_ftrace_enabled; 1071 bool kprobe_ftrace_disabled; 1072 1073 static int __arm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops, 1074 int *cnt) 1075 { 1076 int ret; 1077 1078 lockdep_assert_held(&kprobe_mutex); 1079 1080 ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 0, 0); 1081 if (WARN_ONCE(ret < 0, "Failed to arm kprobe-ftrace at %pS (error %d)\n", p->addr, ret)) 1082 return ret; 1083 1084 if (*cnt == 0) { 1085 ret = register_ftrace_function(ops); 1086 if (WARN(ret < 0, "Failed to register kprobe-ftrace (error %d)\n", ret)) 1087 goto err_ftrace; 1088 } 1089 1090 (*cnt)++; 1091 return ret; 1092 1093 err_ftrace: 1094 /* 1095 * At this point, sinec ops is not registered, we should be sefe from 1096 * registering empty filter. 1097 */ 1098 ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0); 1099 return ret; 1100 } 1101 1102 static int arm_kprobe_ftrace(struct kprobe *p) 1103 { 1104 bool ipmodify = (p->post_handler != NULL); 1105 1106 return __arm_kprobe_ftrace(p, 1107 ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops, 1108 ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled); 1109 } 1110 1111 static int __disarm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops, 1112 int *cnt) 1113 { 1114 int ret; 1115 1116 lockdep_assert_held(&kprobe_mutex); 1117 1118 if (*cnt == 1) { 1119 ret = unregister_ftrace_function(ops); 1120 if (WARN(ret < 0, "Failed to unregister kprobe-ftrace (error %d)\n", ret)) 1121 return ret; 1122 } 1123 1124 (*cnt)--; 1125 1126 ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0); 1127 WARN_ONCE(ret < 0, "Failed to disarm kprobe-ftrace at %pS (error %d)\n", 1128 p->addr, ret); 1129 return ret; 1130 } 1131 1132 static int disarm_kprobe_ftrace(struct kprobe *p) 1133 { 1134 bool ipmodify = (p->post_handler != NULL); 1135 1136 return __disarm_kprobe_ftrace(p, 1137 ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops, 1138 ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled); 1139 } 1140 1141 void kprobe_ftrace_kill(void) 1142 { 1143 kprobe_ftrace_disabled = true; 1144 } 1145 #else /* !CONFIG_KPROBES_ON_FTRACE */ 1146 static inline int arm_kprobe_ftrace(struct kprobe *p) 1147 { 1148 return -ENODEV; 1149 } 1150 1151 static inline int disarm_kprobe_ftrace(struct kprobe *p) 1152 { 1153 return -ENODEV; 1154 } 1155 #endif 1156 1157 static int prepare_kprobe(struct kprobe *p) 1158 { 1159 /* Must ensure p->addr is really on ftrace */ 1160 if (kprobe_ftrace(p)) 1161 return arch_prepare_kprobe_ftrace(p); 1162 1163 return arch_prepare_kprobe(p); 1164 } 1165 1166 static int arm_kprobe(struct kprobe *kp) 1167 { 1168 if (unlikely(kprobe_ftrace(kp))) 1169 return arm_kprobe_ftrace(kp); 1170 1171 cpus_read_lock(); 1172 mutex_lock(&text_mutex); 1173 __arm_kprobe(kp); 1174 mutex_unlock(&text_mutex); 1175 cpus_read_unlock(); 1176 1177 return 0; 1178 } 1179 1180 static int disarm_kprobe(struct kprobe *kp, bool reopt) 1181 { 1182 if (unlikely(kprobe_ftrace(kp))) 1183 return disarm_kprobe_ftrace(kp); 1184 1185 cpus_read_lock(); 1186 mutex_lock(&text_mutex); 1187 __disarm_kprobe(kp, reopt); 1188 mutex_unlock(&text_mutex); 1189 cpus_read_unlock(); 1190 1191 return 0; 1192 } 1193 1194 /* 1195 * Aggregate handlers for multiple kprobes support - these handlers 1196 * take care of invoking the individual kprobe handlers on p->list 1197 */ 1198 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) 1199 { 1200 struct kprobe *kp; 1201 1202 list_for_each_entry_rcu(kp, &p->list, list) { 1203 if (kp->pre_handler && likely(!kprobe_disabled(kp))) { 1204 set_kprobe_instance(kp); 1205 if (kp->pre_handler(kp, regs)) 1206 return 1; 1207 } 1208 reset_kprobe_instance(); 1209 } 1210 return 0; 1211 } 1212 NOKPROBE_SYMBOL(aggr_pre_handler); 1213 1214 static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs, 1215 unsigned long flags) 1216 { 1217 struct kprobe *kp; 1218 1219 list_for_each_entry_rcu(kp, &p->list, list) { 1220 if (kp->post_handler && likely(!kprobe_disabled(kp))) { 1221 set_kprobe_instance(kp); 1222 kp->post_handler(kp, regs, flags); 1223 reset_kprobe_instance(); 1224 } 1225 } 1226 } 1227 NOKPROBE_SYMBOL(aggr_post_handler); 1228 1229 /* Walks the list and increments 'nmissed' if 'p' has child probes. */ 1230 void kprobes_inc_nmissed_count(struct kprobe *p) 1231 { 1232 struct kprobe *kp; 1233 1234 if (!kprobe_aggrprobe(p)) { 1235 p->nmissed++; 1236 } else { 1237 list_for_each_entry_rcu(kp, &p->list, list) 1238 kp->nmissed++; 1239 } 1240 } 1241 NOKPROBE_SYMBOL(kprobes_inc_nmissed_count); 1242 1243 static struct kprobe kprobe_busy = { 1244 .addr = (void *) get_kprobe, 1245 }; 1246 1247 void kprobe_busy_begin(void) 1248 { 1249 struct kprobe_ctlblk *kcb; 1250 1251 preempt_disable(); 1252 __this_cpu_write(current_kprobe, &kprobe_busy); 1253 kcb = get_kprobe_ctlblk(); 1254 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 1255 } 1256 1257 void kprobe_busy_end(void) 1258 { 1259 __this_cpu_write(current_kprobe, NULL); 1260 preempt_enable(); 1261 } 1262 1263 /* Add the new probe to 'ap->list'. */ 1264 static int add_new_kprobe(struct kprobe *ap, struct kprobe *p) 1265 { 1266 if (p->post_handler) 1267 unoptimize_kprobe(ap, true); /* Fall back to normal kprobe */ 1268 1269 list_add_rcu(&p->list, &ap->list); 1270 if (p->post_handler && !ap->post_handler) 1271 ap->post_handler = aggr_post_handler; 1272 1273 return 0; 1274 } 1275 1276 /* 1277 * Fill in the required fields of the aggregator kprobe. Replace the 1278 * earlier kprobe in the hlist with the aggregator kprobe. 1279 */ 1280 static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p) 1281 { 1282 /* Copy the insn slot of 'p' to 'ap'. */ 1283 copy_kprobe(p, ap); 1284 flush_insn_slot(ap); 1285 ap->addr = p->addr; 1286 ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED; 1287 ap->pre_handler = aggr_pre_handler; 1288 /* We don't care the kprobe which has gone. */ 1289 if (p->post_handler && !kprobe_gone(p)) 1290 ap->post_handler = aggr_post_handler; 1291 1292 INIT_LIST_HEAD(&ap->list); 1293 INIT_HLIST_NODE(&ap->hlist); 1294 1295 list_add_rcu(&p->list, &ap->list); 1296 hlist_replace_rcu(&p->hlist, &ap->hlist); 1297 } 1298 1299 /* 1300 * This registers the second or subsequent kprobe at the same address. 1301 */ 1302 static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p) 1303 { 1304 int ret = 0; 1305 struct kprobe *ap = orig_p; 1306 1307 cpus_read_lock(); 1308 1309 /* For preparing optimization, jump_label_text_reserved() is called */ 1310 jump_label_lock(); 1311 mutex_lock(&text_mutex); 1312 1313 if (!kprobe_aggrprobe(orig_p)) { 1314 /* If 'orig_p' is not an 'aggr_kprobe', create new one. */ 1315 ap = alloc_aggr_kprobe(orig_p); 1316 if (!ap) { 1317 ret = -ENOMEM; 1318 goto out; 1319 } 1320 init_aggr_kprobe(ap, orig_p); 1321 } else if (kprobe_unused(ap)) { 1322 /* This probe is going to die. Rescue it */ 1323 ret = reuse_unused_kprobe(ap); 1324 if (ret) 1325 goto out; 1326 } 1327 1328 if (kprobe_gone(ap)) { 1329 /* 1330 * Attempting to insert new probe at the same location that 1331 * had a probe in the module vaddr area which already 1332 * freed. So, the instruction slot has already been 1333 * released. We need a new slot for the new probe. 1334 */ 1335 ret = arch_prepare_kprobe(ap); 1336 if (ret) 1337 /* 1338 * Even if fail to allocate new slot, don't need to 1339 * free the 'ap'. It will be used next time, or 1340 * freed by unregister_kprobe(). 1341 */ 1342 goto out; 1343 1344 /* Prepare optimized instructions if possible. */ 1345 prepare_optimized_kprobe(ap); 1346 1347 /* 1348 * Clear gone flag to prevent allocating new slot again, and 1349 * set disabled flag because it is not armed yet. 1350 */ 1351 ap->flags = (ap->flags & ~KPROBE_FLAG_GONE) 1352 | KPROBE_FLAG_DISABLED; 1353 } 1354 1355 /* Copy the insn slot of 'p' to 'ap'. */ 1356 copy_kprobe(ap, p); 1357 ret = add_new_kprobe(ap, p); 1358 1359 out: 1360 mutex_unlock(&text_mutex); 1361 jump_label_unlock(); 1362 cpus_read_unlock(); 1363 1364 if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) { 1365 ap->flags &= ~KPROBE_FLAG_DISABLED; 1366 if (!kprobes_all_disarmed) { 1367 /* Arm the breakpoint again. */ 1368 ret = arm_kprobe(ap); 1369 if (ret) { 1370 ap->flags |= KPROBE_FLAG_DISABLED; 1371 list_del_rcu(&p->list); 1372 synchronize_rcu(); 1373 } 1374 } 1375 } 1376 return ret; 1377 } 1378 1379 bool __weak arch_within_kprobe_blacklist(unsigned long addr) 1380 { 1381 /* The '__kprobes' functions and entry code must not be probed. */ 1382 return addr >= (unsigned long)__kprobes_text_start && 1383 addr < (unsigned long)__kprobes_text_end; 1384 } 1385 1386 static bool __within_kprobe_blacklist(unsigned long addr) 1387 { 1388 struct kprobe_blacklist_entry *ent; 1389 1390 if (arch_within_kprobe_blacklist(addr)) 1391 return true; 1392 /* 1393 * If 'kprobe_blacklist' is defined, check the address and 1394 * reject any probe registration in the prohibited area. 1395 */ 1396 list_for_each_entry(ent, &kprobe_blacklist, list) { 1397 if (addr >= ent->start_addr && addr < ent->end_addr) 1398 return true; 1399 } 1400 return false; 1401 } 1402 1403 bool within_kprobe_blacklist(unsigned long addr) 1404 { 1405 char symname[KSYM_NAME_LEN], *p; 1406 1407 if (__within_kprobe_blacklist(addr)) 1408 return true; 1409 1410 /* Check if the address is on a suffixed-symbol */ 1411 if (!lookup_symbol_name(addr, symname)) { 1412 p = strchr(symname, '.'); 1413 if (!p) 1414 return false; 1415 *p = '\0'; 1416 addr = (unsigned long)kprobe_lookup_name(symname, 0); 1417 if (addr) 1418 return __within_kprobe_blacklist(addr); 1419 } 1420 return false; 1421 } 1422 1423 /* 1424 * arch_adjust_kprobe_addr - adjust the address 1425 * @addr: symbol base address 1426 * @offset: offset within the symbol 1427 * @on_func_entry: was this @addr+@offset on the function entry 1428 * 1429 * Typically returns @addr + @offset, except for special cases where the 1430 * function might be prefixed by a CFI landing pad, in that case any offset 1431 * inside the landing pad is mapped to the first 'real' instruction of the 1432 * symbol. 1433 * 1434 * Specifically, for things like IBT/BTI, skip the resp. ENDBR/BTI.C 1435 * instruction at +0. 1436 */ 1437 kprobe_opcode_t *__weak arch_adjust_kprobe_addr(unsigned long addr, 1438 unsigned long offset, 1439 bool *on_func_entry) 1440 { 1441 *on_func_entry = !offset; 1442 return (kprobe_opcode_t *)(addr + offset); 1443 } 1444 1445 /* 1446 * If 'symbol_name' is specified, look it up and add the 'offset' 1447 * to it. This way, we can specify a relative address to a symbol. 1448 * This returns encoded errors if it fails to look up symbol or invalid 1449 * combination of parameters. 1450 */ 1451 static kprobe_opcode_t * 1452 _kprobe_addr(kprobe_opcode_t *addr, const char *symbol_name, 1453 unsigned long offset, bool *on_func_entry) 1454 { 1455 if ((symbol_name && addr) || (!symbol_name && !addr)) 1456 goto invalid; 1457 1458 if (symbol_name) { 1459 /* 1460 * Input: @sym + @offset 1461 * Output: @addr + @offset 1462 * 1463 * NOTE: kprobe_lookup_name() does *NOT* fold the offset 1464 * argument into it's output! 1465 */ 1466 addr = kprobe_lookup_name(symbol_name, offset); 1467 if (!addr) 1468 return ERR_PTR(-ENOENT); 1469 } 1470 1471 /* 1472 * So here we have @addr + @offset, displace it into a new 1473 * @addr' + @offset' where @addr' is the symbol start address. 1474 */ 1475 addr = (void *)addr + offset; 1476 if (!kallsyms_lookup_size_offset((unsigned long)addr, NULL, &offset)) 1477 return ERR_PTR(-ENOENT); 1478 addr = (void *)addr - offset; 1479 1480 /* 1481 * Then ask the architecture to re-combine them, taking care of 1482 * magical function entry details while telling us if this was indeed 1483 * at the start of the function. 1484 */ 1485 addr = arch_adjust_kprobe_addr((unsigned long)addr, offset, on_func_entry); 1486 if (addr) 1487 return addr; 1488 1489 invalid: 1490 return ERR_PTR(-EINVAL); 1491 } 1492 1493 static kprobe_opcode_t *kprobe_addr(struct kprobe *p) 1494 { 1495 bool on_func_entry; 1496 return _kprobe_addr(p->addr, p->symbol_name, p->offset, &on_func_entry); 1497 } 1498 1499 /* 1500 * Check the 'p' is valid and return the aggregator kprobe 1501 * at the same address. 1502 */ 1503 static struct kprobe *__get_valid_kprobe(struct kprobe *p) 1504 { 1505 struct kprobe *ap, *list_p; 1506 1507 lockdep_assert_held(&kprobe_mutex); 1508 1509 ap = get_kprobe(p->addr); 1510 if (unlikely(!ap)) 1511 return NULL; 1512 1513 if (p != ap) { 1514 list_for_each_entry(list_p, &ap->list, list) 1515 if (list_p == p) 1516 /* kprobe p is a valid probe */ 1517 goto valid; 1518 return NULL; 1519 } 1520 valid: 1521 return ap; 1522 } 1523 1524 /* 1525 * Warn and return error if the kprobe is being re-registered since 1526 * there must be a software bug. 1527 */ 1528 static inline int warn_kprobe_rereg(struct kprobe *p) 1529 { 1530 int ret = 0; 1531 1532 mutex_lock(&kprobe_mutex); 1533 if (WARN_ON_ONCE(__get_valid_kprobe(p))) 1534 ret = -EINVAL; 1535 mutex_unlock(&kprobe_mutex); 1536 1537 return ret; 1538 } 1539 1540 static int check_ftrace_location(struct kprobe *p) 1541 { 1542 unsigned long addr = (unsigned long)p->addr; 1543 1544 if (ftrace_location(addr) == addr) { 1545 #ifdef CONFIG_KPROBES_ON_FTRACE 1546 p->flags |= KPROBE_FLAG_FTRACE; 1547 #else /* !CONFIG_KPROBES_ON_FTRACE */ 1548 return -EINVAL; 1549 #endif 1550 } 1551 return 0; 1552 } 1553 1554 static bool is_cfi_preamble_symbol(unsigned long addr) 1555 { 1556 char symbuf[KSYM_NAME_LEN]; 1557 1558 if (lookup_symbol_name(addr, symbuf)) 1559 return false; 1560 1561 return str_has_prefix(symbuf, "__cfi_") || 1562 str_has_prefix(symbuf, "__pfx_"); 1563 } 1564 1565 static int check_kprobe_address_safe(struct kprobe *p, 1566 struct module **probed_mod) 1567 { 1568 int ret; 1569 1570 ret = check_ftrace_location(p); 1571 if (ret) 1572 return ret; 1573 jump_label_lock(); 1574 preempt_disable(); 1575 1576 /* Ensure the address is in a text area, and find a module if exists. */ 1577 *probed_mod = NULL; 1578 if (!core_kernel_text((unsigned long) p->addr)) { 1579 *probed_mod = __module_text_address((unsigned long) p->addr); 1580 if (!(*probed_mod)) { 1581 ret = -EINVAL; 1582 goto out; 1583 } 1584 } 1585 /* Ensure it is not in reserved area. */ 1586 if (in_gate_area_no_mm((unsigned long) p->addr) || 1587 within_kprobe_blacklist((unsigned long) p->addr) || 1588 jump_label_text_reserved(p->addr, p->addr) || 1589 static_call_text_reserved(p->addr, p->addr) || 1590 find_bug((unsigned long)p->addr) || 1591 is_cfi_preamble_symbol((unsigned long)p->addr)) { 1592 ret = -EINVAL; 1593 goto out; 1594 } 1595 1596 /* Get module refcount and reject __init functions for loaded modules. */ 1597 if (*probed_mod) { 1598 /* 1599 * We must hold a refcount of the probed module while updating 1600 * its code to prohibit unexpected unloading. 1601 */ 1602 if (unlikely(!try_module_get(*probed_mod))) { 1603 ret = -ENOENT; 1604 goto out; 1605 } 1606 1607 /* 1608 * If the module freed '.init.text', we couldn't insert 1609 * kprobes in there. 1610 */ 1611 if (within_module_init((unsigned long)p->addr, *probed_mod) && 1612 (*probed_mod)->state != MODULE_STATE_COMING) { 1613 module_put(*probed_mod); 1614 *probed_mod = NULL; 1615 ret = -ENOENT; 1616 } 1617 } 1618 out: 1619 preempt_enable(); 1620 jump_label_unlock(); 1621 1622 return ret; 1623 } 1624 1625 int register_kprobe(struct kprobe *p) 1626 { 1627 int ret; 1628 struct kprobe *old_p; 1629 struct module *probed_mod; 1630 kprobe_opcode_t *addr; 1631 bool on_func_entry; 1632 1633 /* Adjust probe address from symbol */ 1634 addr = _kprobe_addr(p->addr, p->symbol_name, p->offset, &on_func_entry); 1635 if (IS_ERR(addr)) 1636 return PTR_ERR(addr); 1637 p->addr = addr; 1638 1639 ret = warn_kprobe_rereg(p); 1640 if (ret) 1641 return ret; 1642 1643 /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */ 1644 p->flags &= KPROBE_FLAG_DISABLED; 1645 p->nmissed = 0; 1646 INIT_LIST_HEAD(&p->list); 1647 1648 ret = check_kprobe_address_safe(p, &probed_mod); 1649 if (ret) 1650 return ret; 1651 1652 mutex_lock(&kprobe_mutex); 1653 1654 if (on_func_entry) 1655 p->flags |= KPROBE_FLAG_ON_FUNC_ENTRY; 1656 1657 old_p = get_kprobe(p->addr); 1658 if (old_p) { 1659 /* Since this may unoptimize 'old_p', locking 'text_mutex'. */ 1660 ret = register_aggr_kprobe(old_p, p); 1661 goto out; 1662 } 1663 1664 cpus_read_lock(); 1665 /* Prevent text modification */ 1666 mutex_lock(&text_mutex); 1667 ret = prepare_kprobe(p); 1668 mutex_unlock(&text_mutex); 1669 cpus_read_unlock(); 1670 if (ret) 1671 goto out; 1672 1673 INIT_HLIST_NODE(&p->hlist); 1674 hlist_add_head_rcu(&p->hlist, 1675 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); 1676 1677 if (!kprobes_all_disarmed && !kprobe_disabled(p)) { 1678 ret = arm_kprobe(p); 1679 if (ret) { 1680 hlist_del_rcu(&p->hlist); 1681 synchronize_rcu(); 1682 goto out; 1683 } 1684 } 1685 1686 /* Try to optimize kprobe */ 1687 try_to_optimize_kprobe(p); 1688 out: 1689 mutex_unlock(&kprobe_mutex); 1690 1691 if (probed_mod) 1692 module_put(probed_mod); 1693 1694 return ret; 1695 } 1696 EXPORT_SYMBOL_GPL(register_kprobe); 1697 1698 /* Check if all probes on the 'ap' are disabled. */ 1699 static bool aggr_kprobe_disabled(struct kprobe *ap) 1700 { 1701 struct kprobe *kp; 1702 1703 lockdep_assert_held(&kprobe_mutex); 1704 1705 list_for_each_entry(kp, &ap->list, list) 1706 if (!kprobe_disabled(kp)) 1707 /* 1708 * Since there is an active probe on the list, 1709 * we can't disable this 'ap'. 1710 */ 1711 return false; 1712 1713 return true; 1714 } 1715 1716 static struct kprobe *__disable_kprobe(struct kprobe *p) 1717 { 1718 struct kprobe *orig_p; 1719 int ret; 1720 1721 lockdep_assert_held(&kprobe_mutex); 1722 1723 /* Get an original kprobe for return */ 1724 orig_p = __get_valid_kprobe(p); 1725 if (unlikely(orig_p == NULL)) 1726 return ERR_PTR(-EINVAL); 1727 1728 if (!kprobe_disabled(p)) { 1729 /* Disable probe if it is a child probe */ 1730 if (p != orig_p) 1731 p->flags |= KPROBE_FLAG_DISABLED; 1732 1733 /* Try to disarm and disable this/parent probe */ 1734 if (p == orig_p || aggr_kprobe_disabled(orig_p)) { 1735 /* 1736 * Don't be lazy here. Even if 'kprobes_all_disarmed' 1737 * is false, 'orig_p' might not have been armed yet. 1738 * Note arm_all_kprobes() __tries__ to arm all kprobes 1739 * on the best effort basis. 1740 */ 1741 if (!kprobes_all_disarmed && !kprobe_disabled(orig_p)) { 1742 ret = disarm_kprobe(orig_p, true); 1743 if (ret) { 1744 p->flags &= ~KPROBE_FLAG_DISABLED; 1745 return ERR_PTR(ret); 1746 } 1747 } 1748 orig_p->flags |= KPROBE_FLAG_DISABLED; 1749 } 1750 } 1751 1752 return orig_p; 1753 } 1754 1755 /* 1756 * Unregister a kprobe without a scheduler synchronization. 1757 */ 1758 static int __unregister_kprobe_top(struct kprobe *p) 1759 { 1760 struct kprobe *ap, *list_p; 1761 1762 /* Disable kprobe. This will disarm it if needed. */ 1763 ap = __disable_kprobe(p); 1764 if (IS_ERR(ap)) 1765 return PTR_ERR(ap); 1766 1767 if (ap == p) 1768 /* 1769 * This probe is an independent(and non-optimized) kprobe 1770 * (not an aggrprobe). Remove from the hash list. 1771 */ 1772 goto disarmed; 1773 1774 /* Following process expects this probe is an aggrprobe */ 1775 WARN_ON(!kprobe_aggrprobe(ap)); 1776 1777 if (list_is_singular(&ap->list) && kprobe_disarmed(ap)) 1778 /* 1779 * !disarmed could be happen if the probe is under delayed 1780 * unoptimizing. 1781 */ 1782 goto disarmed; 1783 else { 1784 /* If disabling probe has special handlers, update aggrprobe */ 1785 if (p->post_handler && !kprobe_gone(p)) { 1786 list_for_each_entry(list_p, &ap->list, list) { 1787 if ((list_p != p) && (list_p->post_handler)) 1788 goto noclean; 1789 } 1790 /* 1791 * For the kprobe-on-ftrace case, we keep the 1792 * post_handler setting to identify this aggrprobe 1793 * armed with kprobe_ipmodify_ops. 1794 */ 1795 if (!kprobe_ftrace(ap)) 1796 ap->post_handler = NULL; 1797 } 1798 noclean: 1799 /* 1800 * Remove from the aggrprobe: this path will do nothing in 1801 * __unregister_kprobe_bottom(). 1802 */ 1803 list_del_rcu(&p->list); 1804 if (!kprobe_disabled(ap) && !kprobes_all_disarmed) 1805 /* 1806 * Try to optimize this probe again, because post 1807 * handler may have been changed. 1808 */ 1809 optimize_kprobe(ap); 1810 } 1811 return 0; 1812 1813 disarmed: 1814 hlist_del_rcu(&ap->hlist); 1815 return 0; 1816 } 1817 1818 static void __unregister_kprobe_bottom(struct kprobe *p) 1819 { 1820 struct kprobe *ap; 1821 1822 if (list_empty(&p->list)) 1823 /* This is an independent kprobe */ 1824 arch_remove_kprobe(p); 1825 else if (list_is_singular(&p->list)) { 1826 /* This is the last child of an aggrprobe */ 1827 ap = list_entry(p->list.next, struct kprobe, list); 1828 list_del(&p->list); 1829 free_aggr_kprobe(ap); 1830 } 1831 /* Otherwise, do nothing. */ 1832 } 1833 1834 int register_kprobes(struct kprobe **kps, int num) 1835 { 1836 int i, ret = 0; 1837 1838 if (num <= 0) 1839 return -EINVAL; 1840 for (i = 0; i < num; i++) { 1841 ret = register_kprobe(kps[i]); 1842 if (ret < 0) { 1843 if (i > 0) 1844 unregister_kprobes(kps, i); 1845 break; 1846 } 1847 } 1848 return ret; 1849 } 1850 EXPORT_SYMBOL_GPL(register_kprobes); 1851 1852 void unregister_kprobe(struct kprobe *p) 1853 { 1854 unregister_kprobes(&p, 1); 1855 } 1856 EXPORT_SYMBOL_GPL(unregister_kprobe); 1857 1858 void unregister_kprobes(struct kprobe **kps, int num) 1859 { 1860 int i; 1861 1862 if (num <= 0) 1863 return; 1864 mutex_lock(&kprobe_mutex); 1865 for (i = 0; i < num; i++) 1866 if (__unregister_kprobe_top(kps[i]) < 0) 1867 kps[i]->addr = NULL; 1868 mutex_unlock(&kprobe_mutex); 1869 1870 synchronize_rcu(); 1871 for (i = 0; i < num; i++) 1872 if (kps[i]->addr) 1873 __unregister_kprobe_bottom(kps[i]); 1874 } 1875 EXPORT_SYMBOL_GPL(unregister_kprobes); 1876 1877 int __weak kprobe_exceptions_notify(struct notifier_block *self, 1878 unsigned long val, void *data) 1879 { 1880 return NOTIFY_DONE; 1881 } 1882 NOKPROBE_SYMBOL(kprobe_exceptions_notify); 1883 1884 static struct notifier_block kprobe_exceptions_nb = { 1885 .notifier_call = kprobe_exceptions_notify, 1886 .priority = 0x7fffffff /* we need to be notified first */ 1887 }; 1888 1889 #ifdef CONFIG_KRETPROBES 1890 1891 #if !defined(CONFIG_KRETPROBE_ON_RETHOOK) 1892 static void free_rp_inst_rcu(struct rcu_head *head) 1893 { 1894 struct kretprobe_instance *ri = container_of(head, struct kretprobe_instance, rcu); 1895 1896 if (refcount_dec_and_test(&ri->rph->ref)) 1897 kfree(ri->rph); 1898 kfree(ri); 1899 } 1900 NOKPROBE_SYMBOL(free_rp_inst_rcu); 1901 1902 static void recycle_rp_inst(struct kretprobe_instance *ri) 1903 { 1904 struct kretprobe *rp = get_kretprobe(ri); 1905 1906 if (likely(rp)) 1907 freelist_add(&ri->freelist, &rp->freelist); 1908 else 1909 call_rcu(&ri->rcu, free_rp_inst_rcu); 1910 } 1911 NOKPROBE_SYMBOL(recycle_rp_inst); 1912 1913 /* 1914 * This function is called from delayed_put_task_struct() when a task is 1915 * dead and cleaned up to recycle any kretprobe instances associated with 1916 * this task. These left over instances represent probed functions that 1917 * have been called but will never return. 1918 */ 1919 void kprobe_flush_task(struct task_struct *tk) 1920 { 1921 struct kretprobe_instance *ri; 1922 struct llist_node *node; 1923 1924 /* Early boot, not yet initialized. */ 1925 if (unlikely(!kprobes_initialized)) 1926 return; 1927 1928 kprobe_busy_begin(); 1929 1930 node = __llist_del_all(&tk->kretprobe_instances); 1931 while (node) { 1932 ri = container_of(node, struct kretprobe_instance, llist); 1933 node = node->next; 1934 1935 recycle_rp_inst(ri); 1936 } 1937 1938 kprobe_busy_end(); 1939 } 1940 NOKPROBE_SYMBOL(kprobe_flush_task); 1941 1942 static inline void free_rp_inst(struct kretprobe *rp) 1943 { 1944 struct kretprobe_instance *ri; 1945 struct freelist_node *node; 1946 int count = 0; 1947 1948 node = rp->freelist.head; 1949 while (node) { 1950 ri = container_of(node, struct kretprobe_instance, freelist); 1951 node = node->next; 1952 1953 kfree(ri); 1954 count++; 1955 } 1956 1957 if (refcount_sub_and_test(count, &rp->rph->ref)) { 1958 kfree(rp->rph); 1959 rp->rph = NULL; 1960 } 1961 } 1962 1963 /* This assumes the 'tsk' is the current task or the is not running. */ 1964 static kprobe_opcode_t *__kretprobe_find_ret_addr(struct task_struct *tsk, 1965 struct llist_node **cur) 1966 { 1967 struct kretprobe_instance *ri = NULL; 1968 struct llist_node *node = *cur; 1969 1970 if (!node) 1971 node = tsk->kretprobe_instances.first; 1972 else 1973 node = node->next; 1974 1975 while (node) { 1976 ri = container_of(node, struct kretprobe_instance, llist); 1977 if (ri->ret_addr != kretprobe_trampoline_addr()) { 1978 *cur = node; 1979 return ri->ret_addr; 1980 } 1981 node = node->next; 1982 } 1983 return NULL; 1984 } 1985 NOKPROBE_SYMBOL(__kretprobe_find_ret_addr); 1986 1987 /** 1988 * kretprobe_find_ret_addr -- Find correct return address modified by kretprobe 1989 * @tsk: Target task 1990 * @fp: A frame pointer 1991 * @cur: a storage of the loop cursor llist_node pointer for next call 1992 * 1993 * Find the correct return address modified by a kretprobe on @tsk in unsigned 1994 * long type. If it finds the return address, this returns that address value, 1995 * or this returns 0. 1996 * The @tsk must be 'current' or a task which is not running. @fp is a hint 1997 * to get the currect return address - which is compared with the 1998 * kretprobe_instance::fp field. The @cur is a loop cursor for searching the 1999 * kretprobe return addresses on the @tsk. The '*@cur' should be NULL at the 2000 * first call, but '@cur' itself must NOT NULL. 2001 */ 2002 unsigned long kretprobe_find_ret_addr(struct task_struct *tsk, void *fp, 2003 struct llist_node **cur) 2004 { 2005 struct kretprobe_instance *ri = NULL; 2006 kprobe_opcode_t *ret; 2007 2008 if (WARN_ON_ONCE(!cur)) 2009 return 0; 2010 2011 do { 2012 ret = __kretprobe_find_ret_addr(tsk, cur); 2013 if (!ret) 2014 break; 2015 ri = container_of(*cur, struct kretprobe_instance, llist); 2016 } while (ri->fp != fp); 2017 2018 return (unsigned long)ret; 2019 } 2020 NOKPROBE_SYMBOL(kretprobe_find_ret_addr); 2021 2022 void __weak arch_kretprobe_fixup_return(struct pt_regs *regs, 2023 kprobe_opcode_t *correct_ret_addr) 2024 { 2025 /* 2026 * Do nothing by default. Please fill this to update the fake return 2027 * address on the stack with the correct one on each arch if possible. 2028 */ 2029 } 2030 2031 unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs, 2032 void *frame_pointer) 2033 { 2034 struct kretprobe_instance *ri = NULL; 2035 struct llist_node *first, *node = NULL; 2036 kprobe_opcode_t *correct_ret_addr; 2037 struct kretprobe *rp; 2038 2039 /* Find correct address and all nodes for this frame. */ 2040 correct_ret_addr = __kretprobe_find_ret_addr(current, &node); 2041 if (!correct_ret_addr) { 2042 pr_err("kretprobe: Return address not found, not execute handler. Maybe there is a bug in the kernel.\n"); 2043 BUG_ON(1); 2044 } 2045 2046 /* 2047 * Set the return address as the instruction pointer, because if the 2048 * user handler calls stack_trace_save_regs() with this 'regs', 2049 * the stack trace will start from the instruction pointer. 2050 */ 2051 instruction_pointer_set(regs, (unsigned long)correct_ret_addr); 2052 2053 /* Run the user handler of the nodes. */ 2054 first = current->kretprobe_instances.first; 2055 while (first) { 2056 ri = container_of(first, struct kretprobe_instance, llist); 2057 2058 if (WARN_ON_ONCE(ri->fp != frame_pointer)) 2059 break; 2060 2061 rp = get_kretprobe(ri); 2062 if (rp && rp->handler) { 2063 struct kprobe *prev = kprobe_running(); 2064 2065 __this_cpu_write(current_kprobe, &rp->kp); 2066 ri->ret_addr = correct_ret_addr; 2067 rp->handler(ri, regs); 2068 __this_cpu_write(current_kprobe, prev); 2069 } 2070 if (first == node) 2071 break; 2072 2073 first = first->next; 2074 } 2075 2076 arch_kretprobe_fixup_return(regs, correct_ret_addr); 2077 2078 /* Unlink all nodes for this frame. */ 2079 first = current->kretprobe_instances.first; 2080 current->kretprobe_instances.first = node->next; 2081 node->next = NULL; 2082 2083 /* Recycle free instances. */ 2084 while (first) { 2085 ri = container_of(first, struct kretprobe_instance, llist); 2086 first = first->next; 2087 2088 recycle_rp_inst(ri); 2089 } 2090 2091 return (unsigned long)correct_ret_addr; 2092 } 2093 NOKPROBE_SYMBOL(__kretprobe_trampoline_handler) 2094 2095 /* 2096 * This kprobe pre_handler is registered with every kretprobe. When probe 2097 * hits it will set up the return probe. 2098 */ 2099 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs) 2100 { 2101 struct kretprobe *rp = container_of(p, struct kretprobe, kp); 2102 struct kretprobe_instance *ri; 2103 struct freelist_node *fn; 2104 2105 fn = freelist_try_get(&rp->freelist); 2106 if (!fn) { 2107 rp->nmissed++; 2108 return 0; 2109 } 2110 2111 ri = container_of(fn, struct kretprobe_instance, freelist); 2112 2113 if (rp->entry_handler && rp->entry_handler(ri, regs)) { 2114 freelist_add(&ri->freelist, &rp->freelist); 2115 return 0; 2116 } 2117 2118 arch_prepare_kretprobe(ri, regs); 2119 2120 __llist_add(&ri->llist, ¤t->kretprobe_instances); 2121 2122 return 0; 2123 } 2124 NOKPROBE_SYMBOL(pre_handler_kretprobe); 2125 #else /* CONFIG_KRETPROBE_ON_RETHOOK */ 2126 /* 2127 * This kprobe pre_handler is registered with every kretprobe. When probe 2128 * hits it will set up the return probe. 2129 */ 2130 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs) 2131 { 2132 struct kretprobe *rp = container_of(p, struct kretprobe, kp); 2133 struct kretprobe_instance *ri; 2134 struct rethook_node *rhn; 2135 2136 rhn = rethook_try_get(rp->rh); 2137 if (!rhn) { 2138 rp->nmissed++; 2139 return 0; 2140 } 2141 2142 ri = container_of(rhn, struct kretprobe_instance, node); 2143 2144 if (rp->entry_handler && rp->entry_handler(ri, regs)) 2145 rethook_recycle(rhn); 2146 else 2147 rethook_hook(rhn, regs, kprobe_ftrace(p)); 2148 2149 return 0; 2150 } 2151 NOKPROBE_SYMBOL(pre_handler_kretprobe); 2152 2153 static void kretprobe_rethook_handler(struct rethook_node *rh, void *data, 2154 unsigned long ret_addr, 2155 struct pt_regs *regs) 2156 { 2157 struct kretprobe *rp = (struct kretprobe *)data; 2158 struct kretprobe_instance *ri; 2159 struct kprobe_ctlblk *kcb; 2160 2161 /* The data must NOT be null. This means rethook data structure is broken. */ 2162 if (WARN_ON_ONCE(!data) || !rp->handler) 2163 return; 2164 2165 __this_cpu_write(current_kprobe, &rp->kp); 2166 kcb = get_kprobe_ctlblk(); 2167 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 2168 2169 ri = container_of(rh, struct kretprobe_instance, node); 2170 rp->handler(ri, regs); 2171 2172 __this_cpu_write(current_kprobe, NULL); 2173 } 2174 NOKPROBE_SYMBOL(kretprobe_rethook_handler); 2175 2176 #endif /* !CONFIG_KRETPROBE_ON_RETHOOK */ 2177 2178 /** 2179 * kprobe_on_func_entry() -- check whether given address is function entry 2180 * @addr: Target address 2181 * @sym: Target symbol name 2182 * @offset: The offset from the symbol or the address 2183 * 2184 * This checks whether the given @addr+@offset or @sym+@offset is on the 2185 * function entry address or not. 2186 * This returns 0 if it is the function entry, or -EINVAL if it is not. 2187 * And also it returns -ENOENT if it fails the symbol or address lookup. 2188 * Caller must pass @addr or @sym (either one must be NULL), or this 2189 * returns -EINVAL. 2190 */ 2191 int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset) 2192 { 2193 bool on_func_entry; 2194 kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset, &on_func_entry); 2195 2196 if (IS_ERR(kp_addr)) 2197 return PTR_ERR(kp_addr); 2198 2199 if (!on_func_entry) 2200 return -EINVAL; 2201 2202 return 0; 2203 } 2204 2205 int register_kretprobe(struct kretprobe *rp) 2206 { 2207 int ret; 2208 struct kretprobe_instance *inst; 2209 int i; 2210 void *addr; 2211 2212 ret = kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset); 2213 if (ret) 2214 return ret; 2215 2216 /* If only 'rp->kp.addr' is specified, check reregistering kprobes */ 2217 if (rp->kp.addr && warn_kprobe_rereg(&rp->kp)) 2218 return -EINVAL; 2219 2220 if (kretprobe_blacklist_size) { 2221 addr = kprobe_addr(&rp->kp); 2222 if (IS_ERR(addr)) 2223 return PTR_ERR(addr); 2224 2225 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) { 2226 if (kretprobe_blacklist[i].addr == addr) 2227 return -EINVAL; 2228 } 2229 } 2230 2231 if (rp->data_size > KRETPROBE_MAX_DATA_SIZE) 2232 return -E2BIG; 2233 2234 rp->kp.pre_handler = pre_handler_kretprobe; 2235 rp->kp.post_handler = NULL; 2236 2237 /* Pre-allocate memory for max kretprobe instances */ 2238 if (rp->maxactive <= 0) 2239 rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus()); 2240 2241 #ifdef CONFIG_KRETPROBE_ON_RETHOOK 2242 rp->rh = rethook_alloc((void *)rp, kretprobe_rethook_handler); 2243 if (!rp->rh) 2244 return -ENOMEM; 2245 2246 for (i = 0; i < rp->maxactive; i++) { 2247 inst = kzalloc(struct_size(inst, data, rp->data_size), GFP_KERNEL); 2248 if (inst == NULL) { 2249 rethook_free(rp->rh); 2250 rp->rh = NULL; 2251 return -ENOMEM; 2252 } 2253 rethook_add_node(rp->rh, &inst->node); 2254 } 2255 rp->nmissed = 0; 2256 /* Establish function entry probe point */ 2257 ret = register_kprobe(&rp->kp); 2258 if (ret != 0) { 2259 rethook_free(rp->rh); 2260 rp->rh = NULL; 2261 } 2262 #else /* !CONFIG_KRETPROBE_ON_RETHOOK */ 2263 rp->freelist.head = NULL; 2264 rp->rph = kzalloc(sizeof(struct kretprobe_holder), GFP_KERNEL); 2265 if (!rp->rph) 2266 return -ENOMEM; 2267 2268 rcu_assign_pointer(rp->rph->rp, rp); 2269 for (i = 0; i < rp->maxactive; i++) { 2270 inst = kzalloc(struct_size(inst, data, rp->data_size), GFP_KERNEL); 2271 if (inst == NULL) { 2272 refcount_set(&rp->rph->ref, i); 2273 free_rp_inst(rp); 2274 return -ENOMEM; 2275 } 2276 inst->rph = rp->rph; 2277 freelist_add(&inst->freelist, &rp->freelist); 2278 } 2279 refcount_set(&rp->rph->ref, i); 2280 2281 rp->nmissed = 0; 2282 /* Establish function entry probe point */ 2283 ret = register_kprobe(&rp->kp); 2284 if (ret != 0) 2285 free_rp_inst(rp); 2286 #endif 2287 return ret; 2288 } 2289 EXPORT_SYMBOL_GPL(register_kretprobe); 2290 2291 int register_kretprobes(struct kretprobe **rps, int num) 2292 { 2293 int ret = 0, i; 2294 2295 if (num <= 0) 2296 return -EINVAL; 2297 for (i = 0; i < num; i++) { 2298 ret = register_kretprobe(rps[i]); 2299 if (ret < 0) { 2300 if (i > 0) 2301 unregister_kretprobes(rps, i); 2302 break; 2303 } 2304 } 2305 return ret; 2306 } 2307 EXPORT_SYMBOL_GPL(register_kretprobes); 2308 2309 void unregister_kretprobe(struct kretprobe *rp) 2310 { 2311 unregister_kretprobes(&rp, 1); 2312 } 2313 EXPORT_SYMBOL_GPL(unregister_kretprobe); 2314 2315 void unregister_kretprobes(struct kretprobe **rps, int num) 2316 { 2317 int i; 2318 2319 if (num <= 0) 2320 return; 2321 mutex_lock(&kprobe_mutex); 2322 for (i = 0; i < num; i++) { 2323 if (__unregister_kprobe_top(&rps[i]->kp) < 0) 2324 rps[i]->kp.addr = NULL; 2325 #ifdef CONFIG_KRETPROBE_ON_RETHOOK 2326 rethook_free(rps[i]->rh); 2327 #else 2328 rcu_assign_pointer(rps[i]->rph->rp, NULL); 2329 #endif 2330 } 2331 mutex_unlock(&kprobe_mutex); 2332 2333 synchronize_rcu(); 2334 for (i = 0; i < num; i++) { 2335 if (rps[i]->kp.addr) { 2336 __unregister_kprobe_bottom(&rps[i]->kp); 2337 #ifndef CONFIG_KRETPROBE_ON_RETHOOK 2338 free_rp_inst(rps[i]); 2339 #endif 2340 } 2341 } 2342 } 2343 EXPORT_SYMBOL_GPL(unregister_kretprobes); 2344 2345 #else /* CONFIG_KRETPROBES */ 2346 int register_kretprobe(struct kretprobe *rp) 2347 { 2348 return -EOPNOTSUPP; 2349 } 2350 EXPORT_SYMBOL_GPL(register_kretprobe); 2351 2352 int register_kretprobes(struct kretprobe **rps, int num) 2353 { 2354 return -EOPNOTSUPP; 2355 } 2356 EXPORT_SYMBOL_GPL(register_kretprobes); 2357 2358 void unregister_kretprobe(struct kretprobe *rp) 2359 { 2360 } 2361 EXPORT_SYMBOL_GPL(unregister_kretprobe); 2362 2363 void unregister_kretprobes(struct kretprobe **rps, int num) 2364 { 2365 } 2366 EXPORT_SYMBOL_GPL(unregister_kretprobes); 2367 2368 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs) 2369 { 2370 return 0; 2371 } 2372 NOKPROBE_SYMBOL(pre_handler_kretprobe); 2373 2374 #endif /* CONFIG_KRETPROBES */ 2375 2376 /* Set the kprobe gone and remove its instruction buffer. */ 2377 static void kill_kprobe(struct kprobe *p) 2378 { 2379 struct kprobe *kp; 2380 2381 lockdep_assert_held(&kprobe_mutex); 2382 2383 /* 2384 * The module is going away. We should disarm the kprobe which 2385 * is using ftrace, because ftrace framework is still available at 2386 * 'MODULE_STATE_GOING' notification. 2387 */ 2388 if (kprobe_ftrace(p) && !kprobe_disabled(p) && !kprobes_all_disarmed) 2389 disarm_kprobe_ftrace(p); 2390 2391 p->flags |= KPROBE_FLAG_GONE; 2392 if (kprobe_aggrprobe(p)) { 2393 /* 2394 * If this is an aggr_kprobe, we have to list all the 2395 * chained probes and mark them GONE. 2396 */ 2397 list_for_each_entry(kp, &p->list, list) 2398 kp->flags |= KPROBE_FLAG_GONE; 2399 p->post_handler = NULL; 2400 kill_optimized_kprobe(p); 2401 } 2402 /* 2403 * Here, we can remove insn_slot safely, because no thread calls 2404 * the original probed function (which will be freed soon) any more. 2405 */ 2406 arch_remove_kprobe(p); 2407 } 2408 2409 /* Disable one kprobe */ 2410 int disable_kprobe(struct kprobe *kp) 2411 { 2412 int ret = 0; 2413 struct kprobe *p; 2414 2415 mutex_lock(&kprobe_mutex); 2416 2417 /* Disable this kprobe */ 2418 p = __disable_kprobe(kp); 2419 if (IS_ERR(p)) 2420 ret = PTR_ERR(p); 2421 2422 mutex_unlock(&kprobe_mutex); 2423 return ret; 2424 } 2425 EXPORT_SYMBOL_GPL(disable_kprobe); 2426 2427 /* Enable one kprobe */ 2428 int enable_kprobe(struct kprobe *kp) 2429 { 2430 int ret = 0; 2431 struct kprobe *p; 2432 2433 mutex_lock(&kprobe_mutex); 2434 2435 /* Check whether specified probe is valid. */ 2436 p = __get_valid_kprobe(kp); 2437 if (unlikely(p == NULL)) { 2438 ret = -EINVAL; 2439 goto out; 2440 } 2441 2442 if (kprobe_gone(kp)) { 2443 /* This kprobe has gone, we couldn't enable it. */ 2444 ret = -EINVAL; 2445 goto out; 2446 } 2447 2448 if (p != kp) 2449 kp->flags &= ~KPROBE_FLAG_DISABLED; 2450 2451 if (!kprobes_all_disarmed && kprobe_disabled(p)) { 2452 p->flags &= ~KPROBE_FLAG_DISABLED; 2453 ret = arm_kprobe(p); 2454 if (ret) { 2455 p->flags |= KPROBE_FLAG_DISABLED; 2456 if (p != kp) 2457 kp->flags |= KPROBE_FLAG_DISABLED; 2458 } 2459 } 2460 out: 2461 mutex_unlock(&kprobe_mutex); 2462 return ret; 2463 } 2464 EXPORT_SYMBOL_GPL(enable_kprobe); 2465 2466 /* Caller must NOT call this in usual path. This is only for critical case */ 2467 void dump_kprobe(struct kprobe *kp) 2468 { 2469 pr_err("Dump kprobe:\n.symbol_name = %s, .offset = %x, .addr = %pS\n", 2470 kp->symbol_name, kp->offset, kp->addr); 2471 } 2472 NOKPROBE_SYMBOL(dump_kprobe); 2473 2474 int kprobe_add_ksym_blacklist(unsigned long entry) 2475 { 2476 struct kprobe_blacklist_entry *ent; 2477 unsigned long offset = 0, size = 0; 2478 2479 if (!kernel_text_address(entry) || 2480 !kallsyms_lookup_size_offset(entry, &size, &offset)) 2481 return -EINVAL; 2482 2483 ent = kmalloc(sizeof(*ent), GFP_KERNEL); 2484 if (!ent) 2485 return -ENOMEM; 2486 ent->start_addr = entry; 2487 ent->end_addr = entry + size; 2488 INIT_LIST_HEAD(&ent->list); 2489 list_add_tail(&ent->list, &kprobe_blacklist); 2490 2491 return (int)size; 2492 } 2493 2494 /* Add all symbols in given area into kprobe blacklist */ 2495 int kprobe_add_area_blacklist(unsigned long start, unsigned long end) 2496 { 2497 unsigned long entry; 2498 int ret = 0; 2499 2500 for (entry = start; entry < end; entry += ret) { 2501 ret = kprobe_add_ksym_blacklist(entry); 2502 if (ret < 0) 2503 return ret; 2504 if (ret == 0) /* In case of alias symbol */ 2505 ret = 1; 2506 } 2507 return 0; 2508 } 2509 2510 /* Remove all symbols in given area from kprobe blacklist */ 2511 static void kprobe_remove_area_blacklist(unsigned long start, unsigned long end) 2512 { 2513 struct kprobe_blacklist_entry *ent, *n; 2514 2515 list_for_each_entry_safe(ent, n, &kprobe_blacklist, list) { 2516 if (ent->start_addr < start || ent->start_addr >= end) 2517 continue; 2518 list_del(&ent->list); 2519 kfree(ent); 2520 } 2521 } 2522 2523 static void kprobe_remove_ksym_blacklist(unsigned long entry) 2524 { 2525 kprobe_remove_area_blacklist(entry, entry + 1); 2526 } 2527 2528 int __weak arch_kprobe_get_kallsym(unsigned int *symnum, unsigned long *value, 2529 char *type, char *sym) 2530 { 2531 return -ERANGE; 2532 } 2533 2534 int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, char *type, 2535 char *sym) 2536 { 2537 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT 2538 if (!kprobe_cache_get_kallsym(&kprobe_insn_slots, &symnum, value, type, sym)) 2539 return 0; 2540 #ifdef CONFIG_OPTPROBES 2541 if (!kprobe_cache_get_kallsym(&kprobe_optinsn_slots, &symnum, value, type, sym)) 2542 return 0; 2543 #endif 2544 #endif 2545 if (!arch_kprobe_get_kallsym(&symnum, value, type, sym)) 2546 return 0; 2547 return -ERANGE; 2548 } 2549 2550 int __init __weak arch_populate_kprobe_blacklist(void) 2551 { 2552 return 0; 2553 } 2554 2555 /* 2556 * Lookup and populate the kprobe_blacklist. 2557 * 2558 * Unlike the kretprobe blacklist, we'll need to determine 2559 * the range of addresses that belong to the said functions, 2560 * since a kprobe need not necessarily be at the beginning 2561 * of a function. 2562 */ 2563 static int __init populate_kprobe_blacklist(unsigned long *start, 2564 unsigned long *end) 2565 { 2566 unsigned long entry; 2567 unsigned long *iter; 2568 int ret; 2569 2570 for (iter = start; iter < end; iter++) { 2571 entry = (unsigned long)dereference_symbol_descriptor((void *)*iter); 2572 ret = kprobe_add_ksym_blacklist(entry); 2573 if (ret == -EINVAL) 2574 continue; 2575 if (ret < 0) 2576 return ret; 2577 } 2578 2579 /* Symbols in '__kprobes_text' are blacklisted */ 2580 ret = kprobe_add_area_blacklist((unsigned long)__kprobes_text_start, 2581 (unsigned long)__kprobes_text_end); 2582 if (ret) 2583 return ret; 2584 2585 /* Symbols in 'noinstr' section are blacklisted */ 2586 ret = kprobe_add_area_blacklist((unsigned long)__noinstr_text_start, 2587 (unsigned long)__noinstr_text_end); 2588 2589 return ret ? : arch_populate_kprobe_blacklist(); 2590 } 2591 2592 static void add_module_kprobe_blacklist(struct module *mod) 2593 { 2594 unsigned long start, end; 2595 int i; 2596 2597 if (mod->kprobe_blacklist) { 2598 for (i = 0; i < mod->num_kprobe_blacklist; i++) 2599 kprobe_add_ksym_blacklist(mod->kprobe_blacklist[i]); 2600 } 2601 2602 start = (unsigned long)mod->kprobes_text_start; 2603 if (start) { 2604 end = start + mod->kprobes_text_size; 2605 kprobe_add_area_blacklist(start, end); 2606 } 2607 2608 start = (unsigned long)mod->noinstr_text_start; 2609 if (start) { 2610 end = start + mod->noinstr_text_size; 2611 kprobe_add_area_blacklist(start, end); 2612 } 2613 } 2614 2615 static void remove_module_kprobe_blacklist(struct module *mod) 2616 { 2617 unsigned long start, end; 2618 int i; 2619 2620 if (mod->kprobe_blacklist) { 2621 for (i = 0; i < mod->num_kprobe_blacklist; i++) 2622 kprobe_remove_ksym_blacklist(mod->kprobe_blacklist[i]); 2623 } 2624 2625 start = (unsigned long)mod->kprobes_text_start; 2626 if (start) { 2627 end = start + mod->kprobes_text_size; 2628 kprobe_remove_area_blacklist(start, end); 2629 } 2630 2631 start = (unsigned long)mod->noinstr_text_start; 2632 if (start) { 2633 end = start + mod->noinstr_text_size; 2634 kprobe_remove_area_blacklist(start, end); 2635 } 2636 } 2637 2638 /* Module notifier call back, checking kprobes on the module */ 2639 static int kprobes_module_callback(struct notifier_block *nb, 2640 unsigned long val, void *data) 2641 { 2642 struct module *mod = data; 2643 struct hlist_head *head; 2644 struct kprobe *p; 2645 unsigned int i; 2646 int checkcore = (val == MODULE_STATE_GOING); 2647 2648 if (val == MODULE_STATE_COMING) { 2649 mutex_lock(&kprobe_mutex); 2650 add_module_kprobe_blacklist(mod); 2651 mutex_unlock(&kprobe_mutex); 2652 } 2653 if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE) 2654 return NOTIFY_DONE; 2655 2656 /* 2657 * When 'MODULE_STATE_GOING' was notified, both of module '.text' and 2658 * '.init.text' sections would be freed. When 'MODULE_STATE_LIVE' was 2659 * notified, only '.init.text' section would be freed. We need to 2660 * disable kprobes which have been inserted in the sections. 2661 */ 2662 mutex_lock(&kprobe_mutex); 2663 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2664 head = &kprobe_table[i]; 2665 hlist_for_each_entry(p, head, hlist) 2666 if (within_module_init((unsigned long)p->addr, mod) || 2667 (checkcore && 2668 within_module_core((unsigned long)p->addr, mod))) { 2669 /* 2670 * The vaddr this probe is installed will soon 2671 * be vfreed buy not synced to disk. Hence, 2672 * disarming the breakpoint isn't needed. 2673 * 2674 * Note, this will also move any optimized probes 2675 * that are pending to be removed from their 2676 * corresponding lists to the 'freeing_list' and 2677 * will not be touched by the delayed 2678 * kprobe_optimizer() work handler. 2679 */ 2680 kill_kprobe(p); 2681 } 2682 } 2683 if (val == MODULE_STATE_GOING) 2684 remove_module_kprobe_blacklist(mod); 2685 mutex_unlock(&kprobe_mutex); 2686 return NOTIFY_DONE; 2687 } 2688 2689 static struct notifier_block kprobe_module_nb = { 2690 .notifier_call = kprobes_module_callback, 2691 .priority = 0 2692 }; 2693 2694 void kprobe_free_init_mem(void) 2695 { 2696 void *start = (void *)(&__init_begin); 2697 void *end = (void *)(&__init_end); 2698 struct hlist_head *head; 2699 struct kprobe *p; 2700 int i; 2701 2702 mutex_lock(&kprobe_mutex); 2703 2704 /* Kill all kprobes on initmem because the target code has been freed. */ 2705 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2706 head = &kprobe_table[i]; 2707 hlist_for_each_entry(p, head, hlist) { 2708 if (start <= (void *)p->addr && (void *)p->addr < end) 2709 kill_kprobe(p); 2710 } 2711 } 2712 2713 mutex_unlock(&kprobe_mutex); 2714 } 2715 2716 static int __init init_kprobes(void) 2717 { 2718 int i, err; 2719 2720 /* FIXME allocate the probe table, currently defined statically */ 2721 /* initialize all list heads */ 2722 for (i = 0; i < KPROBE_TABLE_SIZE; i++) 2723 INIT_HLIST_HEAD(&kprobe_table[i]); 2724 2725 err = populate_kprobe_blacklist(__start_kprobe_blacklist, 2726 __stop_kprobe_blacklist); 2727 if (err) 2728 pr_err("Failed to populate blacklist (error %d), kprobes not restricted, be careful using them!\n", err); 2729 2730 if (kretprobe_blacklist_size) { 2731 /* lookup the function address from its name */ 2732 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) { 2733 kretprobe_blacklist[i].addr = 2734 kprobe_lookup_name(kretprobe_blacklist[i].name, 0); 2735 if (!kretprobe_blacklist[i].addr) 2736 pr_err("Failed to lookup symbol '%s' for kretprobe blacklist. Maybe the target function is removed or renamed.\n", 2737 kretprobe_blacklist[i].name); 2738 } 2739 } 2740 2741 /* By default, kprobes are armed */ 2742 kprobes_all_disarmed = false; 2743 2744 #if defined(CONFIG_OPTPROBES) && defined(__ARCH_WANT_KPROBES_INSN_SLOT) 2745 /* Init 'kprobe_optinsn_slots' for allocation */ 2746 kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE; 2747 #endif 2748 2749 err = arch_init_kprobes(); 2750 if (!err) 2751 err = register_die_notifier(&kprobe_exceptions_nb); 2752 if (!err) 2753 err = register_module_notifier(&kprobe_module_nb); 2754 2755 kprobes_initialized = (err == 0); 2756 kprobe_sysctls_init(); 2757 return err; 2758 } 2759 early_initcall(init_kprobes); 2760 2761 #if defined(CONFIG_OPTPROBES) 2762 static int __init init_optprobes(void) 2763 { 2764 /* 2765 * Enable kprobe optimization - this kicks the optimizer which 2766 * depends on synchronize_rcu_tasks() and ksoftirqd, that is 2767 * not spawned in early initcall. So delay the optimization. 2768 */ 2769 optimize_all_kprobes(); 2770 2771 return 0; 2772 } 2773 subsys_initcall(init_optprobes); 2774 #endif 2775 2776 #ifdef CONFIG_DEBUG_FS 2777 static void report_probe(struct seq_file *pi, struct kprobe *p, 2778 const char *sym, int offset, char *modname, struct kprobe *pp) 2779 { 2780 char *kprobe_type; 2781 void *addr = p->addr; 2782 2783 if (p->pre_handler == pre_handler_kretprobe) 2784 kprobe_type = "r"; 2785 else 2786 kprobe_type = "k"; 2787 2788 if (!kallsyms_show_value(pi->file->f_cred)) 2789 addr = NULL; 2790 2791 if (sym) 2792 seq_printf(pi, "%px %s %s+0x%x %s ", 2793 addr, kprobe_type, sym, offset, 2794 (modname ? modname : " ")); 2795 else /* try to use %pS */ 2796 seq_printf(pi, "%px %s %pS ", 2797 addr, kprobe_type, p->addr); 2798 2799 if (!pp) 2800 pp = p; 2801 seq_printf(pi, "%s%s%s%s\n", 2802 (kprobe_gone(p) ? "[GONE]" : ""), 2803 ((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""), 2804 (kprobe_optimized(pp) ? "[OPTIMIZED]" : ""), 2805 (kprobe_ftrace(pp) ? "[FTRACE]" : "")); 2806 } 2807 2808 static void *kprobe_seq_start(struct seq_file *f, loff_t *pos) 2809 { 2810 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL; 2811 } 2812 2813 static void *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos) 2814 { 2815 (*pos)++; 2816 if (*pos >= KPROBE_TABLE_SIZE) 2817 return NULL; 2818 return pos; 2819 } 2820 2821 static void kprobe_seq_stop(struct seq_file *f, void *v) 2822 { 2823 /* Nothing to do */ 2824 } 2825 2826 static int show_kprobe_addr(struct seq_file *pi, void *v) 2827 { 2828 struct hlist_head *head; 2829 struct kprobe *p, *kp; 2830 const char *sym = NULL; 2831 unsigned int i = *(loff_t *) v; 2832 unsigned long offset = 0; 2833 char *modname, namebuf[KSYM_NAME_LEN]; 2834 2835 head = &kprobe_table[i]; 2836 preempt_disable(); 2837 hlist_for_each_entry_rcu(p, head, hlist) { 2838 sym = kallsyms_lookup((unsigned long)p->addr, NULL, 2839 &offset, &modname, namebuf); 2840 if (kprobe_aggrprobe(p)) { 2841 list_for_each_entry_rcu(kp, &p->list, list) 2842 report_probe(pi, kp, sym, offset, modname, p); 2843 } else 2844 report_probe(pi, p, sym, offset, modname, NULL); 2845 } 2846 preempt_enable(); 2847 return 0; 2848 } 2849 2850 static const struct seq_operations kprobes_sops = { 2851 .start = kprobe_seq_start, 2852 .next = kprobe_seq_next, 2853 .stop = kprobe_seq_stop, 2854 .show = show_kprobe_addr 2855 }; 2856 2857 DEFINE_SEQ_ATTRIBUTE(kprobes); 2858 2859 /* kprobes/blacklist -- shows which functions can not be probed */ 2860 static void *kprobe_blacklist_seq_start(struct seq_file *m, loff_t *pos) 2861 { 2862 mutex_lock(&kprobe_mutex); 2863 return seq_list_start(&kprobe_blacklist, *pos); 2864 } 2865 2866 static void *kprobe_blacklist_seq_next(struct seq_file *m, void *v, loff_t *pos) 2867 { 2868 return seq_list_next(v, &kprobe_blacklist, pos); 2869 } 2870 2871 static int kprobe_blacklist_seq_show(struct seq_file *m, void *v) 2872 { 2873 struct kprobe_blacklist_entry *ent = 2874 list_entry(v, struct kprobe_blacklist_entry, list); 2875 2876 /* 2877 * If '/proc/kallsyms' is not showing kernel address, we won't 2878 * show them here either. 2879 */ 2880 if (!kallsyms_show_value(m->file->f_cred)) 2881 seq_printf(m, "0x%px-0x%px\t%ps\n", NULL, NULL, 2882 (void *)ent->start_addr); 2883 else 2884 seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr, 2885 (void *)ent->end_addr, (void *)ent->start_addr); 2886 return 0; 2887 } 2888 2889 static void kprobe_blacklist_seq_stop(struct seq_file *f, void *v) 2890 { 2891 mutex_unlock(&kprobe_mutex); 2892 } 2893 2894 static const struct seq_operations kprobe_blacklist_sops = { 2895 .start = kprobe_blacklist_seq_start, 2896 .next = kprobe_blacklist_seq_next, 2897 .stop = kprobe_blacklist_seq_stop, 2898 .show = kprobe_blacklist_seq_show, 2899 }; 2900 DEFINE_SEQ_ATTRIBUTE(kprobe_blacklist); 2901 2902 static int arm_all_kprobes(void) 2903 { 2904 struct hlist_head *head; 2905 struct kprobe *p; 2906 unsigned int i, total = 0, errors = 0; 2907 int err, ret = 0; 2908 2909 mutex_lock(&kprobe_mutex); 2910 2911 /* If kprobes are armed, just return */ 2912 if (!kprobes_all_disarmed) 2913 goto already_enabled; 2914 2915 /* 2916 * optimize_kprobe() called by arm_kprobe() checks 2917 * kprobes_all_disarmed, so set kprobes_all_disarmed before 2918 * arm_kprobe. 2919 */ 2920 kprobes_all_disarmed = false; 2921 /* Arming kprobes doesn't optimize kprobe itself */ 2922 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2923 head = &kprobe_table[i]; 2924 /* Arm all kprobes on a best-effort basis */ 2925 hlist_for_each_entry(p, head, hlist) { 2926 if (!kprobe_disabled(p)) { 2927 err = arm_kprobe(p); 2928 if (err) { 2929 errors++; 2930 ret = err; 2931 } 2932 total++; 2933 } 2934 } 2935 } 2936 2937 if (errors) 2938 pr_warn("Kprobes globally enabled, but failed to enable %d out of %d probes. Please check which kprobes are kept disabled via debugfs.\n", 2939 errors, total); 2940 else 2941 pr_info("Kprobes globally enabled\n"); 2942 2943 already_enabled: 2944 mutex_unlock(&kprobe_mutex); 2945 return ret; 2946 } 2947 2948 static int disarm_all_kprobes(void) 2949 { 2950 struct hlist_head *head; 2951 struct kprobe *p; 2952 unsigned int i, total = 0, errors = 0; 2953 int err, ret = 0; 2954 2955 mutex_lock(&kprobe_mutex); 2956 2957 /* If kprobes are already disarmed, just return */ 2958 if (kprobes_all_disarmed) { 2959 mutex_unlock(&kprobe_mutex); 2960 return 0; 2961 } 2962 2963 kprobes_all_disarmed = true; 2964 2965 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 2966 head = &kprobe_table[i]; 2967 /* Disarm all kprobes on a best-effort basis */ 2968 hlist_for_each_entry(p, head, hlist) { 2969 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) { 2970 err = disarm_kprobe(p, false); 2971 if (err) { 2972 errors++; 2973 ret = err; 2974 } 2975 total++; 2976 } 2977 } 2978 } 2979 2980 if (errors) 2981 pr_warn("Kprobes globally disabled, but failed to disable %d out of %d probes. Please check which kprobes are kept enabled via debugfs.\n", 2982 errors, total); 2983 else 2984 pr_info("Kprobes globally disabled\n"); 2985 2986 mutex_unlock(&kprobe_mutex); 2987 2988 /* Wait for disarming all kprobes by optimizer */ 2989 wait_for_kprobe_optimizer(); 2990 2991 return ret; 2992 } 2993 2994 /* 2995 * XXX: The debugfs bool file interface doesn't allow for callbacks 2996 * when the bool state is switched. We can reuse that facility when 2997 * available 2998 */ 2999 static ssize_t read_enabled_file_bool(struct file *file, 3000 char __user *user_buf, size_t count, loff_t *ppos) 3001 { 3002 char buf[3]; 3003 3004 if (!kprobes_all_disarmed) 3005 buf[0] = '1'; 3006 else 3007 buf[0] = '0'; 3008 buf[1] = '\n'; 3009 buf[2] = 0x00; 3010 return simple_read_from_buffer(user_buf, count, ppos, buf, 2); 3011 } 3012 3013 static ssize_t write_enabled_file_bool(struct file *file, 3014 const char __user *user_buf, size_t count, loff_t *ppos) 3015 { 3016 bool enable; 3017 int ret; 3018 3019 ret = kstrtobool_from_user(user_buf, count, &enable); 3020 if (ret) 3021 return ret; 3022 3023 ret = enable ? arm_all_kprobes() : disarm_all_kprobes(); 3024 if (ret) 3025 return ret; 3026 3027 return count; 3028 } 3029 3030 static const struct file_operations fops_kp = { 3031 .read = read_enabled_file_bool, 3032 .write = write_enabled_file_bool, 3033 .llseek = default_llseek, 3034 }; 3035 3036 static int __init debugfs_kprobe_init(void) 3037 { 3038 struct dentry *dir; 3039 3040 dir = debugfs_create_dir("kprobes", NULL); 3041 3042 debugfs_create_file("list", 0400, dir, NULL, &kprobes_fops); 3043 3044 debugfs_create_file("enabled", 0600, dir, NULL, &fops_kp); 3045 3046 debugfs_create_file("blacklist", 0400, dir, NULL, 3047 &kprobe_blacklist_fops); 3048 3049 return 0; 3050 } 3051 3052 late_initcall(debugfs_kprobe_init); 3053 #endif /* CONFIG_DEBUG_FS */ 3054