1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * KFENCE guarded object allocator and fault handling. 4 * 5 * Copyright (C) 2020, Google LLC. 6 */ 7 8 #define pr_fmt(fmt) "kfence: " fmt 9 10 #include <linux/atomic.h> 11 #include <linux/bug.h> 12 #include <linux/debugfs.h> 13 #include <linux/hash.h> 14 #include <linux/irq_work.h> 15 #include <linux/jhash.h> 16 #include <linux/kcsan-checks.h> 17 #include <linux/kfence.h> 18 #include <linux/kmemleak.h> 19 #include <linux/list.h> 20 #include <linux/lockdep.h> 21 #include <linux/log2.h> 22 #include <linux/memblock.h> 23 #include <linux/moduleparam.h> 24 #include <linux/random.h> 25 #include <linux/rcupdate.h> 26 #include <linux/sched/clock.h> 27 #include <linux/sched/sysctl.h> 28 #include <linux/seq_file.h> 29 #include <linux/slab.h> 30 #include <linux/spinlock.h> 31 #include <linux/string.h> 32 33 #include <asm/kfence.h> 34 35 #include "kfence.h" 36 37 /* Disables KFENCE on the first warning assuming an irrecoverable error. */ 38 #define KFENCE_WARN_ON(cond) \ 39 ({ \ 40 const bool __cond = WARN_ON(cond); \ 41 if (unlikely(__cond)) { \ 42 WRITE_ONCE(kfence_enabled, false); \ 43 disabled_by_warn = true; \ 44 } \ 45 __cond; \ 46 }) 47 48 /* === Data ================================================================= */ 49 50 static bool kfence_enabled __read_mostly; 51 static bool disabled_by_warn __read_mostly; 52 53 unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL; 54 EXPORT_SYMBOL_GPL(kfence_sample_interval); /* Export for test modules. */ 55 56 #ifdef MODULE_PARAM_PREFIX 57 #undef MODULE_PARAM_PREFIX 58 #endif 59 #define MODULE_PARAM_PREFIX "kfence." 60 61 static int kfence_enable_late(void); 62 static int param_set_sample_interval(const char *val, const struct kernel_param *kp) 63 { 64 unsigned long num; 65 int ret = kstrtoul(val, 0, &num); 66 67 if (ret < 0) 68 return ret; 69 70 if (!num) /* Using 0 to indicate KFENCE is disabled. */ 71 WRITE_ONCE(kfence_enabled, false); 72 73 *((unsigned long *)kp->arg) = num; 74 75 if (num && !READ_ONCE(kfence_enabled) && system_state != SYSTEM_BOOTING) 76 return disabled_by_warn ? -EINVAL : kfence_enable_late(); 77 return 0; 78 } 79 80 static int param_get_sample_interval(char *buffer, const struct kernel_param *kp) 81 { 82 if (!READ_ONCE(kfence_enabled)) 83 return sprintf(buffer, "0\n"); 84 85 return param_get_ulong(buffer, kp); 86 } 87 88 static const struct kernel_param_ops sample_interval_param_ops = { 89 .set = param_set_sample_interval, 90 .get = param_get_sample_interval, 91 }; 92 module_param_cb(sample_interval, &sample_interval_param_ops, &kfence_sample_interval, 0600); 93 94 /* Pool usage% threshold when currently covered allocations are skipped. */ 95 static unsigned long kfence_skip_covered_thresh __read_mostly = 75; 96 module_param_named(skip_covered_thresh, kfence_skip_covered_thresh, ulong, 0644); 97 98 /* If true, use a deferrable timer. */ 99 static bool kfence_deferrable __read_mostly = IS_ENABLED(CONFIG_KFENCE_DEFERRABLE); 100 module_param_named(deferrable, kfence_deferrable, bool, 0444); 101 102 /* The pool of pages used for guard pages and objects. */ 103 char *__kfence_pool __read_mostly; 104 EXPORT_SYMBOL(__kfence_pool); /* Export for test modules. */ 105 106 /* 107 * Per-object metadata, with one-to-one mapping of object metadata to 108 * backing pages (in __kfence_pool). 109 */ 110 static_assert(CONFIG_KFENCE_NUM_OBJECTS > 0); 111 struct kfence_metadata kfence_metadata[CONFIG_KFENCE_NUM_OBJECTS]; 112 113 /* Freelist with available objects. */ 114 static struct list_head kfence_freelist = LIST_HEAD_INIT(kfence_freelist); 115 static DEFINE_RAW_SPINLOCK(kfence_freelist_lock); /* Lock protecting freelist. */ 116 117 /* 118 * The static key to set up a KFENCE allocation; or if static keys are not used 119 * to gate allocations, to avoid a load and compare if KFENCE is disabled. 120 */ 121 DEFINE_STATIC_KEY_FALSE(kfence_allocation_key); 122 123 /* Gates the allocation, ensuring only one succeeds in a given period. */ 124 atomic_t kfence_allocation_gate = ATOMIC_INIT(1); 125 126 /* 127 * A Counting Bloom filter of allocation coverage: limits currently covered 128 * allocations of the same source filling up the pool. 129 * 130 * Assuming a range of 15%-85% unique allocations in the pool at any point in 131 * time, the below parameters provide a probablity of 0.02-0.33 for false 132 * positive hits respectively: 133 * 134 * P(alloc_traces) = (1 - e^(-HNUM * (alloc_traces / SIZE)) ^ HNUM 135 */ 136 #define ALLOC_COVERED_HNUM 2 137 #define ALLOC_COVERED_ORDER (const_ilog2(CONFIG_KFENCE_NUM_OBJECTS) + 2) 138 #define ALLOC_COVERED_SIZE (1 << ALLOC_COVERED_ORDER) 139 #define ALLOC_COVERED_HNEXT(h) hash_32(h, ALLOC_COVERED_ORDER) 140 #define ALLOC_COVERED_MASK (ALLOC_COVERED_SIZE - 1) 141 static atomic_t alloc_covered[ALLOC_COVERED_SIZE]; 142 143 /* Stack depth used to determine uniqueness of an allocation. */ 144 #define UNIQUE_ALLOC_STACK_DEPTH ((size_t)8) 145 146 /* 147 * Randomness for stack hashes, making the same collisions across reboots and 148 * different machines less likely. 149 */ 150 static u32 stack_hash_seed __ro_after_init; 151 152 /* Statistics counters for debugfs. */ 153 enum kfence_counter_id { 154 KFENCE_COUNTER_ALLOCATED, 155 KFENCE_COUNTER_ALLOCS, 156 KFENCE_COUNTER_FREES, 157 KFENCE_COUNTER_ZOMBIES, 158 KFENCE_COUNTER_BUGS, 159 KFENCE_COUNTER_SKIP_INCOMPAT, 160 KFENCE_COUNTER_SKIP_CAPACITY, 161 KFENCE_COUNTER_SKIP_COVERED, 162 KFENCE_COUNTER_COUNT, 163 }; 164 static atomic_long_t counters[KFENCE_COUNTER_COUNT]; 165 static const char *const counter_names[] = { 166 [KFENCE_COUNTER_ALLOCATED] = "currently allocated", 167 [KFENCE_COUNTER_ALLOCS] = "total allocations", 168 [KFENCE_COUNTER_FREES] = "total frees", 169 [KFENCE_COUNTER_ZOMBIES] = "zombie allocations", 170 [KFENCE_COUNTER_BUGS] = "total bugs", 171 [KFENCE_COUNTER_SKIP_INCOMPAT] = "skipped allocations (incompatible)", 172 [KFENCE_COUNTER_SKIP_CAPACITY] = "skipped allocations (capacity)", 173 [KFENCE_COUNTER_SKIP_COVERED] = "skipped allocations (covered)", 174 }; 175 static_assert(ARRAY_SIZE(counter_names) == KFENCE_COUNTER_COUNT); 176 177 /* === Internals ============================================================ */ 178 179 static inline bool should_skip_covered(void) 180 { 181 unsigned long thresh = (CONFIG_KFENCE_NUM_OBJECTS * kfence_skip_covered_thresh) / 100; 182 183 return atomic_long_read(&counters[KFENCE_COUNTER_ALLOCATED]) > thresh; 184 } 185 186 static u32 get_alloc_stack_hash(unsigned long *stack_entries, size_t num_entries) 187 { 188 num_entries = min(num_entries, UNIQUE_ALLOC_STACK_DEPTH); 189 num_entries = filter_irq_stacks(stack_entries, num_entries); 190 return jhash(stack_entries, num_entries * sizeof(stack_entries[0]), stack_hash_seed); 191 } 192 193 /* 194 * Adds (or subtracts) count @val for allocation stack trace hash 195 * @alloc_stack_hash from Counting Bloom filter. 196 */ 197 static void alloc_covered_add(u32 alloc_stack_hash, int val) 198 { 199 int i; 200 201 for (i = 0; i < ALLOC_COVERED_HNUM; i++) { 202 atomic_add(val, &alloc_covered[alloc_stack_hash & ALLOC_COVERED_MASK]); 203 alloc_stack_hash = ALLOC_COVERED_HNEXT(alloc_stack_hash); 204 } 205 } 206 207 /* 208 * Returns true if the allocation stack trace hash @alloc_stack_hash is 209 * currently contained (non-zero count) in Counting Bloom filter. 210 */ 211 static bool alloc_covered_contains(u32 alloc_stack_hash) 212 { 213 int i; 214 215 for (i = 0; i < ALLOC_COVERED_HNUM; i++) { 216 if (!atomic_read(&alloc_covered[alloc_stack_hash & ALLOC_COVERED_MASK])) 217 return false; 218 alloc_stack_hash = ALLOC_COVERED_HNEXT(alloc_stack_hash); 219 } 220 221 return true; 222 } 223 224 static bool kfence_protect(unsigned long addr) 225 { 226 return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), true)); 227 } 228 229 static bool kfence_unprotect(unsigned long addr) 230 { 231 return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false)); 232 } 233 234 static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta) 235 { 236 unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2; 237 unsigned long pageaddr = (unsigned long)&__kfence_pool[offset]; 238 239 /* The checks do not affect performance; only called from slow-paths. */ 240 241 /* Only call with a pointer into kfence_metadata. */ 242 if (KFENCE_WARN_ON(meta < kfence_metadata || 243 meta >= kfence_metadata + CONFIG_KFENCE_NUM_OBJECTS)) 244 return 0; 245 246 /* 247 * This metadata object only ever maps to 1 page; verify that the stored 248 * address is in the expected range. 249 */ 250 if (KFENCE_WARN_ON(ALIGN_DOWN(meta->addr, PAGE_SIZE) != pageaddr)) 251 return 0; 252 253 return pageaddr; 254 } 255 256 /* 257 * Update the object's metadata state, including updating the alloc/free stacks 258 * depending on the state transition. 259 */ 260 static noinline void 261 metadata_update_state(struct kfence_metadata *meta, enum kfence_object_state next, 262 unsigned long *stack_entries, size_t num_stack_entries) 263 { 264 struct kfence_track *track = 265 next == KFENCE_OBJECT_FREED ? &meta->free_track : &meta->alloc_track; 266 267 lockdep_assert_held(&meta->lock); 268 269 if (stack_entries) { 270 memcpy(track->stack_entries, stack_entries, 271 num_stack_entries * sizeof(stack_entries[0])); 272 } else { 273 /* 274 * Skip over 1 (this) functions; noinline ensures we do not 275 * accidentally skip over the caller by never inlining. 276 */ 277 num_stack_entries = stack_trace_save(track->stack_entries, KFENCE_STACK_DEPTH, 1); 278 } 279 track->num_stack_entries = num_stack_entries; 280 track->pid = task_pid_nr(current); 281 track->cpu = raw_smp_processor_id(); 282 track->ts_nsec = local_clock(); /* Same source as printk timestamps. */ 283 284 /* 285 * Pairs with READ_ONCE() in 286 * kfence_shutdown_cache(), 287 * kfence_handle_page_fault(). 288 */ 289 WRITE_ONCE(meta->state, next); 290 } 291 292 /* Write canary byte to @addr. */ 293 static inline bool set_canary_byte(u8 *addr) 294 { 295 *addr = KFENCE_CANARY_PATTERN(addr); 296 return true; 297 } 298 299 /* Check canary byte at @addr. */ 300 static inline bool check_canary_byte(u8 *addr) 301 { 302 struct kfence_metadata *meta; 303 unsigned long flags; 304 305 if (likely(*addr == KFENCE_CANARY_PATTERN(addr))) 306 return true; 307 308 atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]); 309 310 meta = addr_to_metadata((unsigned long)addr); 311 raw_spin_lock_irqsave(&meta->lock, flags); 312 kfence_report_error((unsigned long)addr, false, NULL, meta, KFENCE_ERROR_CORRUPTION); 313 raw_spin_unlock_irqrestore(&meta->lock, flags); 314 315 return false; 316 } 317 318 /* __always_inline this to ensure we won't do an indirect call to fn. */ 319 static __always_inline void for_each_canary(const struct kfence_metadata *meta, bool (*fn)(u8 *)) 320 { 321 const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE); 322 unsigned long addr; 323 324 /* 325 * We'll iterate over each canary byte per-side until fn() returns 326 * false. However, we'll still iterate over the canary bytes to the 327 * right of the object even if there was an error in the canary bytes to 328 * the left of the object. Specifically, if check_canary_byte() 329 * generates an error, showing both sides might give more clues as to 330 * what the error is about when displaying which bytes were corrupted. 331 */ 332 333 /* Apply to left of object. */ 334 for (addr = pageaddr; addr < meta->addr; addr++) { 335 if (!fn((u8 *)addr)) 336 break; 337 } 338 339 /* Apply to right of object. */ 340 for (addr = meta->addr + meta->size; addr < pageaddr + PAGE_SIZE; addr++) { 341 if (!fn((u8 *)addr)) 342 break; 343 } 344 } 345 346 static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp, 347 unsigned long *stack_entries, size_t num_stack_entries, 348 u32 alloc_stack_hash) 349 { 350 struct kfence_metadata *meta = NULL; 351 unsigned long flags; 352 struct slab *slab; 353 void *addr; 354 355 /* Try to obtain a free object. */ 356 raw_spin_lock_irqsave(&kfence_freelist_lock, flags); 357 if (!list_empty(&kfence_freelist)) { 358 meta = list_entry(kfence_freelist.next, struct kfence_metadata, list); 359 list_del_init(&meta->list); 360 } 361 raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags); 362 if (!meta) { 363 atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_CAPACITY]); 364 return NULL; 365 } 366 367 if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) { 368 /* 369 * This is extremely unlikely -- we are reporting on a 370 * use-after-free, which locked meta->lock, and the reporting 371 * code via printk calls kmalloc() which ends up in 372 * kfence_alloc() and tries to grab the same object that we're 373 * reporting on. While it has never been observed, lockdep does 374 * report that there is a possibility of deadlock. Fix it by 375 * using trylock and bailing out gracefully. 376 */ 377 raw_spin_lock_irqsave(&kfence_freelist_lock, flags); 378 /* Put the object back on the freelist. */ 379 list_add_tail(&meta->list, &kfence_freelist); 380 raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags); 381 382 return NULL; 383 } 384 385 meta->addr = metadata_to_pageaddr(meta); 386 /* Unprotect if we're reusing this page. */ 387 if (meta->state == KFENCE_OBJECT_FREED) 388 kfence_unprotect(meta->addr); 389 390 /* 391 * Note: for allocations made before RNG initialization, will always 392 * return zero. We still benefit from enabling KFENCE as early as 393 * possible, even when the RNG is not yet available, as this will allow 394 * KFENCE to detect bugs due to earlier allocations. The only downside 395 * is that the out-of-bounds accesses detected are deterministic for 396 * such allocations. 397 */ 398 if (prandom_u32_max(2)) { 399 /* Allocate on the "right" side, re-calculate address. */ 400 meta->addr += PAGE_SIZE - size; 401 meta->addr = ALIGN_DOWN(meta->addr, cache->align); 402 } 403 404 addr = (void *)meta->addr; 405 406 /* Update remaining metadata. */ 407 metadata_update_state(meta, KFENCE_OBJECT_ALLOCATED, stack_entries, num_stack_entries); 408 /* Pairs with READ_ONCE() in kfence_shutdown_cache(). */ 409 WRITE_ONCE(meta->cache, cache); 410 meta->size = size; 411 meta->alloc_stack_hash = alloc_stack_hash; 412 raw_spin_unlock_irqrestore(&meta->lock, flags); 413 414 alloc_covered_add(alloc_stack_hash, 1); 415 416 /* Set required slab fields. */ 417 slab = virt_to_slab((void *)meta->addr); 418 slab->slab_cache = cache; 419 #if defined(CONFIG_SLUB) 420 slab->objects = 1; 421 #elif defined(CONFIG_SLAB) 422 slab->s_mem = addr; 423 #endif 424 425 /* Memory initialization. */ 426 for_each_canary(meta, set_canary_byte); 427 428 /* 429 * We check slab_want_init_on_alloc() ourselves, rather than letting 430 * SL*B do the initialization, as otherwise we might overwrite KFENCE's 431 * redzone. 432 */ 433 if (unlikely(slab_want_init_on_alloc(gfp, cache))) 434 memzero_explicit(addr, size); 435 if (cache->ctor) 436 cache->ctor(addr); 437 438 if (CONFIG_KFENCE_STRESS_TEST_FAULTS && !prandom_u32_max(CONFIG_KFENCE_STRESS_TEST_FAULTS)) 439 kfence_protect(meta->addr); /* Random "faults" by protecting the object. */ 440 441 atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCATED]); 442 atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCS]); 443 444 return addr; 445 } 446 447 static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool zombie) 448 { 449 struct kcsan_scoped_access assert_page_exclusive; 450 unsigned long flags; 451 bool init; 452 453 raw_spin_lock_irqsave(&meta->lock, flags); 454 455 if (meta->state != KFENCE_OBJECT_ALLOCATED || meta->addr != (unsigned long)addr) { 456 /* Invalid or double-free, bail out. */ 457 atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]); 458 kfence_report_error((unsigned long)addr, false, NULL, meta, 459 KFENCE_ERROR_INVALID_FREE); 460 raw_spin_unlock_irqrestore(&meta->lock, flags); 461 return; 462 } 463 464 /* Detect racy use-after-free, or incorrect reallocation of this page by KFENCE. */ 465 kcsan_begin_scoped_access((void *)ALIGN_DOWN((unsigned long)addr, PAGE_SIZE), PAGE_SIZE, 466 KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT, 467 &assert_page_exclusive); 468 469 if (CONFIG_KFENCE_STRESS_TEST_FAULTS) 470 kfence_unprotect((unsigned long)addr); /* To check canary bytes. */ 471 472 /* Restore page protection if there was an OOB access. */ 473 if (meta->unprotected_page) { 474 memzero_explicit((void *)ALIGN_DOWN(meta->unprotected_page, PAGE_SIZE), PAGE_SIZE); 475 kfence_protect(meta->unprotected_page); 476 meta->unprotected_page = 0; 477 } 478 479 /* Mark the object as freed. */ 480 metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0); 481 init = slab_want_init_on_free(meta->cache); 482 raw_spin_unlock_irqrestore(&meta->lock, flags); 483 484 alloc_covered_add(meta->alloc_stack_hash, -1); 485 486 /* Check canary bytes for memory corruption. */ 487 for_each_canary(meta, check_canary_byte); 488 489 /* 490 * Clear memory if init-on-free is set. While we protect the page, the 491 * data is still there, and after a use-after-free is detected, we 492 * unprotect the page, so the data is still accessible. 493 */ 494 if (!zombie && unlikely(init)) 495 memzero_explicit(addr, meta->size); 496 497 /* Protect to detect use-after-frees. */ 498 kfence_protect((unsigned long)addr); 499 500 kcsan_end_scoped_access(&assert_page_exclusive); 501 if (!zombie) { 502 /* Add it to the tail of the freelist for reuse. */ 503 raw_spin_lock_irqsave(&kfence_freelist_lock, flags); 504 KFENCE_WARN_ON(!list_empty(&meta->list)); 505 list_add_tail(&meta->list, &kfence_freelist); 506 raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags); 507 508 atomic_long_dec(&counters[KFENCE_COUNTER_ALLOCATED]); 509 atomic_long_inc(&counters[KFENCE_COUNTER_FREES]); 510 } else { 511 /* See kfence_shutdown_cache(). */ 512 atomic_long_inc(&counters[KFENCE_COUNTER_ZOMBIES]); 513 } 514 } 515 516 static void rcu_guarded_free(struct rcu_head *h) 517 { 518 struct kfence_metadata *meta = container_of(h, struct kfence_metadata, rcu_head); 519 520 kfence_guarded_free((void *)meta->addr, meta, false); 521 } 522 523 /* 524 * Initialization of the KFENCE pool after its allocation. 525 * Returns 0 on success; otherwise returns the address up to 526 * which partial initialization succeeded. 527 */ 528 static unsigned long kfence_init_pool(void) 529 { 530 unsigned long addr = (unsigned long)__kfence_pool; 531 struct page *pages; 532 int i; 533 534 if (!arch_kfence_init_pool()) 535 return addr; 536 537 pages = virt_to_page(addr); 538 539 /* 540 * Set up object pages: they must have PG_slab set, to avoid freeing 541 * these as real pages. 542 * 543 * We also want to avoid inserting kfence_free() in the kfree() 544 * fast-path in SLUB, and therefore need to ensure kfree() correctly 545 * enters __slab_free() slow-path. 546 */ 547 for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) { 548 struct slab *slab = page_slab(&pages[i]); 549 550 if (!i || (i % 2)) 551 continue; 552 553 /* Verify we do not have a compound head page. */ 554 if (WARN_ON(compound_head(&pages[i]) != &pages[i])) 555 return addr; 556 557 __folio_set_slab(slab_folio(slab)); 558 #ifdef CONFIG_MEMCG 559 slab->memcg_data = (unsigned long)&kfence_metadata[i / 2 - 1].objcg | 560 MEMCG_DATA_OBJCGS; 561 #endif 562 } 563 564 /* 565 * Protect the first 2 pages. The first page is mostly unnecessary, and 566 * merely serves as an extended guard page. However, adding one 567 * additional page in the beginning gives us an even number of pages, 568 * which simplifies the mapping of address to metadata index. 569 */ 570 for (i = 0; i < 2; i++) { 571 if (unlikely(!kfence_protect(addr))) 572 return addr; 573 574 addr += PAGE_SIZE; 575 } 576 577 for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) { 578 struct kfence_metadata *meta = &kfence_metadata[i]; 579 580 /* Initialize metadata. */ 581 INIT_LIST_HEAD(&meta->list); 582 raw_spin_lock_init(&meta->lock); 583 meta->state = KFENCE_OBJECT_UNUSED; 584 meta->addr = addr; /* Initialize for validation in metadata_to_pageaddr(). */ 585 list_add_tail(&meta->list, &kfence_freelist); 586 587 /* Protect the right redzone. */ 588 if (unlikely(!kfence_protect(addr + PAGE_SIZE))) 589 return addr; 590 591 addr += 2 * PAGE_SIZE; 592 } 593 594 /* 595 * The pool is live and will never be deallocated from this point on. 596 * Remove the pool object from the kmemleak object tree, as it would 597 * otherwise overlap with allocations returned by kfence_alloc(), which 598 * are registered with kmemleak through the slab post-alloc hook. 599 */ 600 kmemleak_free(__kfence_pool); 601 602 return 0; 603 } 604 605 static bool __init kfence_init_pool_early(void) 606 { 607 unsigned long addr; 608 609 if (!__kfence_pool) 610 return false; 611 612 addr = kfence_init_pool(); 613 614 if (!addr) 615 return true; 616 617 /* 618 * Only release unprotected pages, and do not try to go back and change 619 * page attributes due to risk of failing to do so as well. If changing 620 * page attributes for some pages fails, it is very likely that it also 621 * fails for the first page, and therefore expect addr==__kfence_pool in 622 * most failure cases. 623 */ 624 for (char *p = (char *)addr; p < __kfence_pool + KFENCE_POOL_SIZE; p += PAGE_SIZE) { 625 struct slab *slab = virt_to_slab(p); 626 627 if (!slab) 628 continue; 629 #ifdef CONFIG_MEMCG 630 slab->memcg_data = 0; 631 #endif 632 __folio_clear_slab(slab_folio(slab)); 633 } 634 memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool)); 635 __kfence_pool = NULL; 636 return false; 637 } 638 639 static bool kfence_init_pool_late(void) 640 { 641 unsigned long addr, free_size; 642 643 addr = kfence_init_pool(); 644 645 if (!addr) 646 return true; 647 648 /* Same as above. */ 649 free_size = KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool); 650 #ifdef CONFIG_CONTIG_ALLOC 651 free_contig_range(page_to_pfn(virt_to_page(addr)), free_size / PAGE_SIZE); 652 #else 653 free_pages_exact((void *)addr, free_size); 654 #endif 655 __kfence_pool = NULL; 656 return false; 657 } 658 659 /* === DebugFS Interface ==================================================== */ 660 661 static int stats_show(struct seq_file *seq, void *v) 662 { 663 int i; 664 665 seq_printf(seq, "enabled: %i\n", READ_ONCE(kfence_enabled)); 666 for (i = 0; i < KFENCE_COUNTER_COUNT; i++) 667 seq_printf(seq, "%s: %ld\n", counter_names[i], atomic_long_read(&counters[i])); 668 669 return 0; 670 } 671 DEFINE_SHOW_ATTRIBUTE(stats); 672 673 /* 674 * debugfs seq_file operations for /sys/kernel/debug/kfence/objects. 675 * start_object() and next_object() return the object index + 1, because NULL is used 676 * to stop iteration. 677 */ 678 static void *start_object(struct seq_file *seq, loff_t *pos) 679 { 680 if (*pos < CONFIG_KFENCE_NUM_OBJECTS) 681 return (void *)((long)*pos + 1); 682 return NULL; 683 } 684 685 static void stop_object(struct seq_file *seq, void *v) 686 { 687 } 688 689 static void *next_object(struct seq_file *seq, void *v, loff_t *pos) 690 { 691 ++*pos; 692 if (*pos < CONFIG_KFENCE_NUM_OBJECTS) 693 return (void *)((long)*pos + 1); 694 return NULL; 695 } 696 697 static int show_object(struct seq_file *seq, void *v) 698 { 699 struct kfence_metadata *meta = &kfence_metadata[(long)v - 1]; 700 unsigned long flags; 701 702 raw_spin_lock_irqsave(&meta->lock, flags); 703 kfence_print_object(seq, meta); 704 raw_spin_unlock_irqrestore(&meta->lock, flags); 705 seq_puts(seq, "---------------------------------\n"); 706 707 return 0; 708 } 709 710 static const struct seq_operations object_seqops = { 711 .start = start_object, 712 .next = next_object, 713 .stop = stop_object, 714 .show = show_object, 715 }; 716 717 static int open_objects(struct inode *inode, struct file *file) 718 { 719 return seq_open(file, &object_seqops); 720 } 721 722 static const struct file_operations objects_fops = { 723 .open = open_objects, 724 .read = seq_read, 725 .llseek = seq_lseek, 726 .release = seq_release, 727 }; 728 729 static int __init kfence_debugfs_init(void) 730 { 731 struct dentry *kfence_dir = debugfs_create_dir("kfence", NULL); 732 733 debugfs_create_file("stats", 0444, kfence_dir, NULL, &stats_fops); 734 debugfs_create_file("objects", 0400, kfence_dir, NULL, &objects_fops); 735 return 0; 736 } 737 738 late_initcall(kfence_debugfs_init); 739 740 /* === Allocation Gate Timer ================================================ */ 741 742 static struct delayed_work kfence_timer; 743 744 #ifdef CONFIG_KFENCE_STATIC_KEYS 745 /* Wait queue to wake up allocation-gate timer task. */ 746 static DECLARE_WAIT_QUEUE_HEAD(allocation_wait); 747 748 static void wake_up_kfence_timer(struct irq_work *work) 749 { 750 wake_up(&allocation_wait); 751 } 752 static DEFINE_IRQ_WORK(wake_up_kfence_timer_work, wake_up_kfence_timer); 753 #endif 754 755 /* 756 * Set up delayed work, which will enable and disable the static key. We need to 757 * use a work queue (rather than a simple timer), since enabling and disabling a 758 * static key cannot be done from an interrupt. 759 * 760 * Note: Toggling a static branch currently causes IPIs, and here we'll end up 761 * with a total of 2 IPIs to all CPUs. If this ends up a problem in future (with 762 * more aggressive sampling intervals), we could get away with a variant that 763 * avoids IPIs, at the cost of not immediately capturing allocations if the 764 * instructions remain cached. 765 */ 766 static void toggle_allocation_gate(struct work_struct *work) 767 { 768 if (!READ_ONCE(kfence_enabled)) 769 return; 770 771 atomic_set(&kfence_allocation_gate, 0); 772 #ifdef CONFIG_KFENCE_STATIC_KEYS 773 /* Enable static key, and await allocation to happen. */ 774 static_branch_enable(&kfence_allocation_key); 775 776 if (sysctl_hung_task_timeout_secs) { 777 /* 778 * During low activity with no allocations we might wait a 779 * while; let's avoid the hung task warning. 780 */ 781 wait_event_idle_timeout(allocation_wait, atomic_read(&kfence_allocation_gate), 782 sysctl_hung_task_timeout_secs * HZ / 2); 783 } else { 784 wait_event_idle(allocation_wait, atomic_read(&kfence_allocation_gate)); 785 } 786 787 /* Disable static key and reset timer. */ 788 static_branch_disable(&kfence_allocation_key); 789 #endif 790 queue_delayed_work(system_unbound_wq, &kfence_timer, 791 msecs_to_jiffies(kfence_sample_interval)); 792 } 793 794 /* === Public interface ===================================================== */ 795 796 void __init kfence_alloc_pool(void) 797 { 798 if (!kfence_sample_interval) 799 return; 800 801 __kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE); 802 803 if (!__kfence_pool) 804 pr_err("failed to allocate pool\n"); 805 } 806 807 static void kfence_init_enable(void) 808 { 809 if (!IS_ENABLED(CONFIG_KFENCE_STATIC_KEYS)) 810 static_branch_enable(&kfence_allocation_key); 811 812 if (kfence_deferrable) 813 INIT_DEFERRABLE_WORK(&kfence_timer, toggle_allocation_gate); 814 else 815 INIT_DELAYED_WORK(&kfence_timer, toggle_allocation_gate); 816 817 WRITE_ONCE(kfence_enabled, true); 818 queue_delayed_work(system_unbound_wq, &kfence_timer, 0); 819 820 pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE, 821 CONFIG_KFENCE_NUM_OBJECTS, (void *)__kfence_pool, 822 (void *)(__kfence_pool + KFENCE_POOL_SIZE)); 823 } 824 825 void __init kfence_init(void) 826 { 827 stack_hash_seed = (u32)random_get_entropy(); 828 829 /* Setting kfence_sample_interval to 0 on boot disables KFENCE. */ 830 if (!kfence_sample_interval) 831 return; 832 833 if (!kfence_init_pool_early()) { 834 pr_err("%s failed\n", __func__); 835 return; 836 } 837 838 kfence_init_enable(); 839 } 840 841 static int kfence_init_late(void) 842 { 843 const unsigned long nr_pages = KFENCE_POOL_SIZE / PAGE_SIZE; 844 #ifdef CONFIG_CONTIG_ALLOC 845 struct page *pages; 846 847 pages = alloc_contig_pages(nr_pages, GFP_KERNEL, first_online_node, NULL); 848 if (!pages) 849 return -ENOMEM; 850 __kfence_pool = page_to_virt(pages); 851 #else 852 if (nr_pages > MAX_ORDER_NR_PAGES) { 853 pr_warn("KFENCE_NUM_OBJECTS too large for buddy allocator\n"); 854 return -EINVAL; 855 } 856 __kfence_pool = alloc_pages_exact(KFENCE_POOL_SIZE, GFP_KERNEL); 857 if (!__kfence_pool) 858 return -ENOMEM; 859 #endif 860 861 if (!kfence_init_pool_late()) { 862 pr_err("%s failed\n", __func__); 863 return -EBUSY; 864 } 865 866 kfence_init_enable(); 867 return 0; 868 } 869 870 static int kfence_enable_late(void) 871 { 872 if (!__kfence_pool) 873 return kfence_init_late(); 874 875 WRITE_ONCE(kfence_enabled, true); 876 queue_delayed_work(system_unbound_wq, &kfence_timer, 0); 877 return 0; 878 } 879 880 void kfence_shutdown_cache(struct kmem_cache *s) 881 { 882 unsigned long flags; 883 struct kfence_metadata *meta; 884 int i; 885 886 for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) { 887 bool in_use; 888 889 meta = &kfence_metadata[i]; 890 891 /* 892 * If we observe some inconsistent cache and state pair where we 893 * should have returned false here, cache destruction is racing 894 * with either kmem_cache_alloc() or kmem_cache_free(). Taking 895 * the lock will not help, as different critical section 896 * serialization will have the same outcome. 897 */ 898 if (READ_ONCE(meta->cache) != s || 899 READ_ONCE(meta->state) != KFENCE_OBJECT_ALLOCATED) 900 continue; 901 902 raw_spin_lock_irqsave(&meta->lock, flags); 903 in_use = meta->cache == s && meta->state == KFENCE_OBJECT_ALLOCATED; 904 raw_spin_unlock_irqrestore(&meta->lock, flags); 905 906 if (in_use) { 907 /* 908 * This cache still has allocations, and we should not 909 * release them back into the freelist so they can still 910 * safely be used and retain the kernel's default 911 * behaviour of keeping the allocations alive (leak the 912 * cache); however, they effectively become "zombie 913 * allocations" as the KFENCE objects are the only ones 914 * still in use and the owning cache is being destroyed. 915 * 916 * We mark them freed, so that any subsequent use shows 917 * more useful error messages that will include stack 918 * traces of the user of the object, the original 919 * allocation, and caller to shutdown_cache(). 920 */ 921 kfence_guarded_free((void *)meta->addr, meta, /*zombie=*/true); 922 } 923 } 924 925 for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) { 926 meta = &kfence_metadata[i]; 927 928 /* See above. */ 929 if (READ_ONCE(meta->cache) != s || READ_ONCE(meta->state) != KFENCE_OBJECT_FREED) 930 continue; 931 932 raw_spin_lock_irqsave(&meta->lock, flags); 933 if (meta->cache == s && meta->state == KFENCE_OBJECT_FREED) 934 meta->cache = NULL; 935 raw_spin_unlock_irqrestore(&meta->lock, flags); 936 } 937 } 938 939 void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) 940 { 941 unsigned long stack_entries[KFENCE_STACK_DEPTH]; 942 size_t num_stack_entries; 943 u32 alloc_stack_hash; 944 945 /* 946 * Perform size check before switching kfence_allocation_gate, so that 947 * we don't disable KFENCE without making an allocation. 948 */ 949 if (size > PAGE_SIZE) { 950 atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]); 951 return NULL; 952 } 953 954 /* 955 * Skip allocations from non-default zones, including DMA. We cannot 956 * guarantee that pages in the KFENCE pool will have the requested 957 * properties (e.g. reside in DMAable memory). 958 */ 959 if ((flags & GFP_ZONEMASK) || 960 (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32))) { 961 atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]); 962 return NULL; 963 } 964 965 if (atomic_inc_return(&kfence_allocation_gate) > 1) 966 return NULL; 967 #ifdef CONFIG_KFENCE_STATIC_KEYS 968 /* 969 * waitqueue_active() is fully ordered after the update of 970 * kfence_allocation_gate per atomic_inc_return(). 971 */ 972 if (waitqueue_active(&allocation_wait)) { 973 /* 974 * Calling wake_up() here may deadlock when allocations happen 975 * from within timer code. Use an irq_work to defer it. 976 */ 977 irq_work_queue(&wake_up_kfence_timer_work); 978 } 979 #endif 980 981 if (!READ_ONCE(kfence_enabled)) 982 return NULL; 983 984 num_stack_entries = stack_trace_save(stack_entries, KFENCE_STACK_DEPTH, 0); 985 986 /* 987 * Do expensive check for coverage of allocation in slow-path after 988 * allocation_gate has already become non-zero, even though it might 989 * mean not making any allocation within a given sample interval. 990 * 991 * This ensures reasonable allocation coverage when the pool is almost 992 * full, including avoiding long-lived allocations of the same source 993 * filling up the pool (e.g. pagecache allocations). 994 */ 995 alloc_stack_hash = get_alloc_stack_hash(stack_entries, num_stack_entries); 996 if (should_skip_covered() && alloc_covered_contains(alloc_stack_hash)) { 997 atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_COVERED]); 998 return NULL; 999 } 1000 1001 return kfence_guarded_alloc(s, size, flags, stack_entries, num_stack_entries, 1002 alloc_stack_hash); 1003 } 1004 1005 size_t kfence_ksize(const void *addr) 1006 { 1007 const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr); 1008 1009 /* 1010 * Read locklessly -- if there is a race with __kfence_alloc(), this is 1011 * either a use-after-free or invalid access. 1012 */ 1013 return meta ? meta->size : 0; 1014 } 1015 1016 void *kfence_object_start(const void *addr) 1017 { 1018 const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr); 1019 1020 /* 1021 * Read locklessly -- if there is a race with __kfence_alloc(), this is 1022 * either a use-after-free or invalid access. 1023 */ 1024 return meta ? (void *)meta->addr : NULL; 1025 } 1026 1027 void __kfence_free(void *addr) 1028 { 1029 struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr); 1030 1031 #ifdef CONFIG_MEMCG 1032 KFENCE_WARN_ON(meta->objcg); 1033 #endif 1034 /* 1035 * If the objects of the cache are SLAB_TYPESAFE_BY_RCU, defer freeing 1036 * the object, as the object page may be recycled for other-typed 1037 * objects once it has been freed. meta->cache may be NULL if the cache 1038 * was destroyed. 1039 */ 1040 if (unlikely(meta->cache && (meta->cache->flags & SLAB_TYPESAFE_BY_RCU))) 1041 call_rcu(&meta->rcu_head, rcu_guarded_free); 1042 else 1043 kfence_guarded_free(addr, meta, false); 1044 } 1045 1046 bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs) 1047 { 1048 const int page_index = (addr - (unsigned long)__kfence_pool) / PAGE_SIZE; 1049 struct kfence_metadata *to_report = NULL; 1050 enum kfence_error_type error_type; 1051 unsigned long flags; 1052 1053 if (!is_kfence_address((void *)addr)) 1054 return false; 1055 1056 if (!READ_ONCE(kfence_enabled)) /* If disabled at runtime ... */ 1057 return kfence_unprotect(addr); /* ... unprotect and proceed. */ 1058 1059 atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]); 1060 1061 if (page_index % 2) { 1062 /* This is a redzone, report a buffer overflow. */ 1063 struct kfence_metadata *meta; 1064 int distance = 0; 1065 1066 meta = addr_to_metadata(addr - PAGE_SIZE); 1067 if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) { 1068 to_report = meta; 1069 /* Data race ok; distance calculation approximate. */ 1070 distance = addr - data_race(meta->addr + meta->size); 1071 } 1072 1073 meta = addr_to_metadata(addr + PAGE_SIZE); 1074 if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) { 1075 /* Data race ok; distance calculation approximate. */ 1076 if (!to_report || distance > data_race(meta->addr) - addr) 1077 to_report = meta; 1078 } 1079 1080 if (!to_report) 1081 goto out; 1082 1083 raw_spin_lock_irqsave(&to_report->lock, flags); 1084 to_report->unprotected_page = addr; 1085 error_type = KFENCE_ERROR_OOB; 1086 1087 /* 1088 * If the object was freed before we took the look we can still 1089 * report this as an OOB -- the report will simply show the 1090 * stacktrace of the free as well. 1091 */ 1092 } else { 1093 to_report = addr_to_metadata(addr); 1094 if (!to_report) 1095 goto out; 1096 1097 raw_spin_lock_irqsave(&to_report->lock, flags); 1098 error_type = KFENCE_ERROR_UAF; 1099 /* 1100 * We may race with __kfence_alloc(), and it is possible that a 1101 * freed object may be reallocated. We simply report this as a 1102 * use-after-free, with the stack trace showing the place where 1103 * the object was re-allocated. 1104 */ 1105 } 1106 1107 out: 1108 if (to_report) { 1109 kfence_report_error(addr, is_write, regs, to_report, error_type); 1110 raw_spin_unlock_irqrestore(&to_report->lock, flags); 1111 } else { 1112 /* This may be a UAF or OOB access, but we can't be sure. */ 1113 kfence_report_error(addr, is_write, regs, NULL, KFENCE_ERROR_INVALID); 1114 } 1115 1116 return kfence_unprotect(addr); /* Unprotect and let access proceed. */ 1117 } 1118