xref: /openbmc/linux/mm/kfence/core.c (revision b625fe69)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * KFENCE guarded object allocator and fault handling.
4  *
5  * Copyright (C) 2020, Google LLC.
6  */
7 
8 #define pr_fmt(fmt) "kfence: " fmt
9 
10 #include <linux/atomic.h>
11 #include <linux/bug.h>
12 #include <linux/debugfs.h>
13 #include <linux/hash.h>
14 #include <linux/irq_work.h>
15 #include <linux/jhash.h>
16 #include <linux/kcsan-checks.h>
17 #include <linux/kfence.h>
18 #include <linux/kmemleak.h>
19 #include <linux/list.h>
20 #include <linux/lockdep.h>
21 #include <linux/log2.h>
22 #include <linux/memblock.h>
23 #include <linux/moduleparam.h>
24 #include <linux/random.h>
25 #include <linux/rcupdate.h>
26 #include <linux/sched/clock.h>
27 #include <linux/sched/sysctl.h>
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
30 #include <linux/spinlock.h>
31 #include <linux/string.h>
32 
33 #include <asm/kfence.h>
34 
35 #include "kfence.h"
36 
37 /* Disables KFENCE on the first warning assuming an irrecoverable error. */
38 #define KFENCE_WARN_ON(cond)                                                   \
39 	({                                                                     \
40 		const bool __cond = WARN_ON(cond);                             \
41 		if (unlikely(__cond))                                          \
42 			WRITE_ONCE(kfence_enabled, false);                     \
43 		__cond;                                                        \
44 	})
45 
46 /* === Data ================================================================= */
47 
48 static bool kfence_enabled __read_mostly;
49 
50 unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL;
51 EXPORT_SYMBOL_GPL(kfence_sample_interval); /* Export for test modules. */
52 
53 #ifdef MODULE_PARAM_PREFIX
54 #undef MODULE_PARAM_PREFIX
55 #endif
56 #define MODULE_PARAM_PREFIX "kfence."
57 
58 static int param_set_sample_interval(const char *val, const struct kernel_param *kp)
59 {
60 	unsigned long num;
61 	int ret = kstrtoul(val, 0, &num);
62 
63 	if (ret < 0)
64 		return ret;
65 
66 	if (!num) /* Using 0 to indicate KFENCE is disabled. */
67 		WRITE_ONCE(kfence_enabled, false);
68 	else if (!READ_ONCE(kfence_enabled) && system_state != SYSTEM_BOOTING)
69 		return -EINVAL; /* Cannot (re-)enable KFENCE on-the-fly. */
70 
71 	*((unsigned long *)kp->arg) = num;
72 	return 0;
73 }
74 
75 static int param_get_sample_interval(char *buffer, const struct kernel_param *kp)
76 {
77 	if (!READ_ONCE(kfence_enabled))
78 		return sprintf(buffer, "0\n");
79 
80 	return param_get_ulong(buffer, kp);
81 }
82 
83 static const struct kernel_param_ops sample_interval_param_ops = {
84 	.set = param_set_sample_interval,
85 	.get = param_get_sample_interval,
86 };
87 module_param_cb(sample_interval, &sample_interval_param_ops, &kfence_sample_interval, 0600);
88 
89 /* Pool usage% threshold when currently covered allocations are skipped. */
90 static unsigned long kfence_skip_covered_thresh __read_mostly = 75;
91 module_param_named(skip_covered_thresh, kfence_skip_covered_thresh, ulong, 0644);
92 
93 /* The pool of pages used for guard pages and objects. */
94 char *__kfence_pool __ro_after_init;
95 EXPORT_SYMBOL(__kfence_pool); /* Export for test modules. */
96 
97 /*
98  * Per-object metadata, with one-to-one mapping of object metadata to
99  * backing pages (in __kfence_pool).
100  */
101 static_assert(CONFIG_KFENCE_NUM_OBJECTS > 0);
102 struct kfence_metadata kfence_metadata[CONFIG_KFENCE_NUM_OBJECTS];
103 
104 /* Freelist with available objects. */
105 static struct list_head kfence_freelist = LIST_HEAD_INIT(kfence_freelist);
106 static DEFINE_RAW_SPINLOCK(kfence_freelist_lock); /* Lock protecting freelist. */
107 
108 /*
109  * The static key to set up a KFENCE allocation; or if static keys are not used
110  * to gate allocations, to avoid a load and compare if KFENCE is disabled.
111  */
112 DEFINE_STATIC_KEY_FALSE(kfence_allocation_key);
113 
114 /* Gates the allocation, ensuring only one succeeds in a given period. */
115 atomic_t kfence_allocation_gate = ATOMIC_INIT(1);
116 
117 /*
118  * A Counting Bloom filter of allocation coverage: limits currently covered
119  * allocations of the same source filling up the pool.
120  *
121  * Assuming a range of 15%-85% unique allocations in the pool at any point in
122  * time, the below parameters provide a probablity of 0.02-0.33 for false
123  * positive hits respectively:
124  *
125  *	P(alloc_traces) = (1 - e^(-HNUM * (alloc_traces / SIZE)) ^ HNUM
126  */
127 #define ALLOC_COVERED_HNUM	2
128 #define ALLOC_COVERED_ORDER	(const_ilog2(CONFIG_KFENCE_NUM_OBJECTS) + 2)
129 #define ALLOC_COVERED_SIZE	(1 << ALLOC_COVERED_ORDER)
130 #define ALLOC_COVERED_HNEXT(h)	hash_32(h, ALLOC_COVERED_ORDER)
131 #define ALLOC_COVERED_MASK	(ALLOC_COVERED_SIZE - 1)
132 static atomic_t alloc_covered[ALLOC_COVERED_SIZE];
133 
134 /* Stack depth used to determine uniqueness of an allocation. */
135 #define UNIQUE_ALLOC_STACK_DEPTH ((size_t)8)
136 
137 /*
138  * Randomness for stack hashes, making the same collisions across reboots and
139  * different machines less likely.
140  */
141 static u32 stack_hash_seed __ro_after_init;
142 
143 /* Statistics counters for debugfs. */
144 enum kfence_counter_id {
145 	KFENCE_COUNTER_ALLOCATED,
146 	KFENCE_COUNTER_ALLOCS,
147 	KFENCE_COUNTER_FREES,
148 	KFENCE_COUNTER_ZOMBIES,
149 	KFENCE_COUNTER_BUGS,
150 	KFENCE_COUNTER_SKIP_INCOMPAT,
151 	KFENCE_COUNTER_SKIP_CAPACITY,
152 	KFENCE_COUNTER_SKIP_COVERED,
153 	KFENCE_COUNTER_COUNT,
154 };
155 static atomic_long_t counters[KFENCE_COUNTER_COUNT];
156 static const char *const counter_names[] = {
157 	[KFENCE_COUNTER_ALLOCATED]	= "currently allocated",
158 	[KFENCE_COUNTER_ALLOCS]		= "total allocations",
159 	[KFENCE_COUNTER_FREES]		= "total frees",
160 	[KFENCE_COUNTER_ZOMBIES]	= "zombie allocations",
161 	[KFENCE_COUNTER_BUGS]		= "total bugs",
162 	[KFENCE_COUNTER_SKIP_INCOMPAT]	= "skipped allocations (incompatible)",
163 	[KFENCE_COUNTER_SKIP_CAPACITY]	= "skipped allocations (capacity)",
164 	[KFENCE_COUNTER_SKIP_COVERED]	= "skipped allocations (covered)",
165 };
166 static_assert(ARRAY_SIZE(counter_names) == KFENCE_COUNTER_COUNT);
167 
168 /* === Internals ============================================================ */
169 
170 static inline bool should_skip_covered(void)
171 {
172 	unsigned long thresh = (CONFIG_KFENCE_NUM_OBJECTS * kfence_skip_covered_thresh) / 100;
173 
174 	return atomic_long_read(&counters[KFENCE_COUNTER_ALLOCATED]) > thresh;
175 }
176 
177 static u32 get_alloc_stack_hash(unsigned long *stack_entries, size_t num_entries)
178 {
179 	num_entries = min(num_entries, UNIQUE_ALLOC_STACK_DEPTH);
180 	num_entries = filter_irq_stacks(stack_entries, num_entries);
181 	return jhash(stack_entries, num_entries * sizeof(stack_entries[0]), stack_hash_seed);
182 }
183 
184 /*
185  * Adds (or subtracts) count @val for allocation stack trace hash
186  * @alloc_stack_hash from Counting Bloom filter.
187  */
188 static void alloc_covered_add(u32 alloc_stack_hash, int val)
189 {
190 	int i;
191 
192 	for (i = 0; i < ALLOC_COVERED_HNUM; i++) {
193 		atomic_add(val, &alloc_covered[alloc_stack_hash & ALLOC_COVERED_MASK]);
194 		alloc_stack_hash = ALLOC_COVERED_HNEXT(alloc_stack_hash);
195 	}
196 }
197 
198 /*
199  * Returns true if the allocation stack trace hash @alloc_stack_hash is
200  * currently contained (non-zero count) in Counting Bloom filter.
201  */
202 static bool alloc_covered_contains(u32 alloc_stack_hash)
203 {
204 	int i;
205 
206 	for (i = 0; i < ALLOC_COVERED_HNUM; i++) {
207 		if (!atomic_read(&alloc_covered[alloc_stack_hash & ALLOC_COVERED_MASK]))
208 			return false;
209 		alloc_stack_hash = ALLOC_COVERED_HNEXT(alloc_stack_hash);
210 	}
211 
212 	return true;
213 }
214 
215 static bool kfence_protect(unsigned long addr)
216 {
217 	return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), true));
218 }
219 
220 static bool kfence_unprotect(unsigned long addr)
221 {
222 	return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false));
223 }
224 
225 static inline struct kfence_metadata *addr_to_metadata(unsigned long addr)
226 {
227 	long index;
228 
229 	/* The checks do not affect performance; only called from slow-paths. */
230 
231 	if (!is_kfence_address((void *)addr))
232 		return NULL;
233 
234 	/*
235 	 * May be an invalid index if called with an address at the edge of
236 	 * __kfence_pool, in which case we would report an "invalid access"
237 	 * error.
238 	 */
239 	index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1;
240 	if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS)
241 		return NULL;
242 
243 	return &kfence_metadata[index];
244 }
245 
246 static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta)
247 {
248 	unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2;
249 	unsigned long pageaddr = (unsigned long)&__kfence_pool[offset];
250 
251 	/* The checks do not affect performance; only called from slow-paths. */
252 
253 	/* Only call with a pointer into kfence_metadata. */
254 	if (KFENCE_WARN_ON(meta < kfence_metadata ||
255 			   meta >= kfence_metadata + CONFIG_KFENCE_NUM_OBJECTS))
256 		return 0;
257 
258 	/*
259 	 * This metadata object only ever maps to 1 page; verify that the stored
260 	 * address is in the expected range.
261 	 */
262 	if (KFENCE_WARN_ON(ALIGN_DOWN(meta->addr, PAGE_SIZE) != pageaddr))
263 		return 0;
264 
265 	return pageaddr;
266 }
267 
268 /*
269  * Update the object's metadata state, including updating the alloc/free stacks
270  * depending on the state transition.
271  */
272 static noinline void
273 metadata_update_state(struct kfence_metadata *meta, enum kfence_object_state next,
274 		      unsigned long *stack_entries, size_t num_stack_entries)
275 {
276 	struct kfence_track *track =
277 		next == KFENCE_OBJECT_FREED ? &meta->free_track : &meta->alloc_track;
278 
279 	lockdep_assert_held(&meta->lock);
280 
281 	if (stack_entries) {
282 		memcpy(track->stack_entries, stack_entries,
283 		       num_stack_entries * sizeof(stack_entries[0]));
284 	} else {
285 		/*
286 		 * Skip over 1 (this) functions; noinline ensures we do not
287 		 * accidentally skip over the caller by never inlining.
288 		 */
289 		num_stack_entries = stack_trace_save(track->stack_entries, KFENCE_STACK_DEPTH, 1);
290 	}
291 	track->num_stack_entries = num_stack_entries;
292 	track->pid = task_pid_nr(current);
293 	track->cpu = raw_smp_processor_id();
294 	track->ts_nsec = local_clock(); /* Same source as printk timestamps. */
295 
296 	/*
297 	 * Pairs with READ_ONCE() in
298 	 *	kfence_shutdown_cache(),
299 	 *	kfence_handle_page_fault().
300 	 */
301 	WRITE_ONCE(meta->state, next);
302 }
303 
304 /* Write canary byte to @addr. */
305 static inline bool set_canary_byte(u8 *addr)
306 {
307 	*addr = KFENCE_CANARY_PATTERN(addr);
308 	return true;
309 }
310 
311 /* Check canary byte at @addr. */
312 static inline bool check_canary_byte(u8 *addr)
313 {
314 	struct kfence_metadata *meta;
315 	unsigned long flags;
316 
317 	if (likely(*addr == KFENCE_CANARY_PATTERN(addr)))
318 		return true;
319 
320 	atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
321 
322 	meta = addr_to_metadata((unsigned long)addr);
323 	raw_spin_lock_irqsave(&meta->lock, flags);
324 	kfence_report_error((unsigned long)addr, false, NULL, meta, KFENCE_ERROR_CORRUPTION);
325 	raw_spin_unlock_irqrestore(&meta->lock, flags);
326 
327 	return false;
328 }
329 
330 /* __always_inline this to ensure we won't do an indirect call to fn. */
331 static __always_inline void for_each_canary(const struct kfence_metadata *meta, bool (*fn)(u8 *))
332 {
333 	const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE);
334 	unsigned long addr;
335 
336 	/*
337 	 * We'll iterate over each canary byte per-side until fn() returns
338 	 * false. However, we'll still iterate over the canary bytes to the
339 	 * right of the object even if there was an error in the canary bytes to
340 	 * the left of the object. Specifically, if check_canary_byte()
341 	 * generates an error, showing both sides might give more clues as to
342 	 * what the error is about when displaying which bytes were corrupted.
343 	 */
344 
345 	/* Apply to left of object. */
346 	for (addr = pageaddr; addr < meta->addr; addr++) {
347 		if (!fn((u8 *)addr))
348 			break;
349 	}
350 
351 	/* Apply to right of object. */
352 	for (addr = meta->addr + meta->size; addr < pageaddr + PAGE_SIZE; addr++) {
353 		if (!fn((u8 *)addr))
354 			break;
355 	}
356 }
357 
358 static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp,
359 				  unsigned long *stack_entries, size_t num_stack_entries,
360 				  u32 alloc_stack_hash)
361 {
362 	struct kfence_metadata *meta = NULL;
363 	unsigned long flags;
364 	struct slab *slab;
365 	void *addr;
366 
367 	/* Try to obtain a free object. */
368 	raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
369 	if (!list_empty(&kfence_freelist)) {
370 		meta = list_entry(kfence_freelist.next, struct kfence_metadata, list);
371 		list_del_init(&meta->list);
372 	}
373 	raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
374 	if (!meta) {
375 		atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_CAPACITY]);
376 		return NULL;
377 	}
378 
379 	if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) {
380 		/*
381 		 * This is extremely unlikely -- we are reporting on a
382 		 * use-after-free, which locked meta->lock, and the reporting
383 		 * code via printk calls kmalloc() which ends up in
384 		 * kfence_alloc() and tries to grab the same object that we're
385 		 * reporting on. While it has never been observed, lockdep does
386 		 * report that there is a possibility of deadlock. Fix it by
387 		 * using trylock and bailing out gracefully.
388 		 */
389 		raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
390 		/* Put the object back on the freelist. */
391 		list_add_tail(&meta->list, &kfence_freelist);
392 		raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
393 
394 		return NULL;
395 	}
396 
397 	meta->addr = metadata_to_pageaddr(meta);
398 	/* Unprotect if we're reusing this page. */
399 	if (meta->state == KFENCE_OBJECT_FREED)
400 		kfence_unprotect(meta->addr);
401 
402 	/*
403 	 * Note: for allocations made before RNG initialization, will always
404 	 * return zero. We still benefit from enabling KFENCE as early as
405 	 * possible, even when the RNG is not yet available, as this will allow
406 	 * KFENCE to detect bugs due to earlier allocations. The only downside
407 	 * is that the out-of-bounds accesses detected are deterministic for
408 	 * such allocations.
409 	 */
410 	if (prandom_u32_max(2)) {
411 		/* Allocate on the "right" side, re-calculate address. */
412 		meta->addr += PAGE_SIZE - size;
413 		meta->addr = ALIGN_DOWN(meta->addr, cache->align);
414 	}
415 
416 	addr = (void *)meta->addr;
417 
418 	/* Update remaining metadata. */
419 	metadata_update_state(meta, KFENCE_OBJECT_ALLOCATED, stack_entries, num_stack_entries);
420 	/* Pairs with READ_ONCE() in kfence_shutdown_cache(). */
421 	WRITE_ONCE(meta->cache, cache);
422 	meta->size = size;
423 	meta->alloc_stack_hash = alloc_stack_hash;
424 	raw_spin_unlock_irqrestore(&meta->lock, flags);
425 
426 	alloc_covered_add(alloc_stack_hash, 1);
427 
428 	/* Set required slab fields. */
429 	slab = virt_to_slab((void *)meta->addr);
430 	slab->slab_cache = cache;
431 #if defined(CONFIG_SLUB)
432 	slab->objects = 1;
433 #elif defined(CONFIG_SLAB)
434 	slab->s_mem = addr;
435 #endif
436 
437 	/* Memory initialization. */
438 	for_each_canary(meta, set_canary_byte);
439 
440 	/*
441 	 * We check slab_want_init_on_alloc() ourselves, rather than letting
442 	 * SL*B do the initialization, as otherwise we might overwrite KFENCE's
443 	 * redzone.
444 	 */
445 	if (unlikely(slab_want_init_on_alloc(gfp, cache)))
446 		memzero_explicit(addr, size);
447 	if (cache->ctor)
448 		cache->ctor(addr);
449 
450 	if (CONFIG_KFENCE_STRESS_TEST_FAULTS && !prandom_u32_max(CONFIG_KFENCE_STRESS_TEST_FAULTS))
451 		kfence_protect(meta->addr); /* Random "faults" by protecting the object. */
452 
453 	atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCATED]);
454 	atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCS]);
455 
456 	return addr;
457 }
458 
459 static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool zombie)
460 {
461 	struct kcsan_scoped_access assert_page_exclusive;
462 	unsigned long flags;
463 	bool init;
464 
465 	raw_spin_lock_irqsave(&meta->lock, flags);
466 
467 	if (meta->state != KFENCE_OBJECT_ALLOCATED || meta->addr != (unsigned long)addr) {
468 		/* Invalid or double-free, bail out. */
469 		atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
470 		kfence_report_error((unsigned long)addr, false, NULL, meta,
471 				    KFENCE_ERROR_INVALID_FREE);
472 		raw_spin_unlock_irqrestore(&meta->lock, flags);
473 		return;
474 	}
475 
476 	/* Detect racy use-after-free, or incorrect reallocation of this page by KFENCE. */
477 	kcsan_begin_scoped_access((void *)ALIGN_DOWN((unsigned long)addr, PAGE_SIZE), PAGE_SIZE,
478 				  KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT,
479 				  &assert_page_exclusive);
480 
481 	if (CONFIG_KFENCE_STRESS_TEST_FAULTS)
482 		kfence_unprotect((unsigned long)addr); /* To check canary bytes. */
483 
484 	/* Restore page protection if there was an OOB access. */
485 	if (meta->unprotected_page) {
486 		memzero_explicit((void *)ALIGN_DOWN(meta->unprotected_page, PAGE_SIZE), PAGE_SIZE);
487 		kfence_protect(meta->unprotected_page);
488 		meta->unprotected_page = 0;
489 	}
490 
491 	/* Mark the object as freed. */
492 	metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0);
493 	init = slab_want_init_on_free(meta->cache);
494 	raw_spin_unlock_irqrestore(&meta->lock, flags);
495 
496 	alloc_covered_add(meta->alloc_stack_hash, -1);
497 
498 	/* Check canary bytes for memory corruption. */
499 	for_each_canary(meta, check_canary_byte);
500 
501 	/*
502 	 * Clear memory if init-on-free is set. While we protect the page, the
503 	 * data is still there, and after a use-after-free is detected, we
504 	 * unprotect the page, so the data is still accessible.
505 	 */
506 	if (!zombie && unlikely(init))
507 		memzero_explicit(addr, meta->size);
508 
509 	/* Protect to detect use-after-frees. */
510 	kfence_protect((unsigned long)addr);
511 
512 	kcsan_end_scoped_access(&assert_page_exclusive);
513 	if (!zombie) {
514 		/* Add it to the tail of the freelist for reuse. */
515 		raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
516 		KFENCE_WARN_ON(!list_empty(&meta->list));
517 		list_add_tail(&meta->list, &kfence_freelist);
518 		raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
519 
520 		atomic_long_dec(&counters[KFENCE_COUNTER_ALLOCATED]);
521 		atomic_long_inc(&counters[KFENCE_COUNTER_FREES]);
522 	} else {
523 		/* See kfence_shutdown_cache(). */
524 		atomic_long_inc(&counters[KFENCE_COUNTER_ZOMBIES]);
525 	}
526 }
527 
528 static void rcu_guarded_free(struct rcu_head *h)
529 {
530 	struct kfence_metadata *meta = container_of(h, struct kfence_metadata, rcu_head);
531 
532 	kfence_guarded_free((void *)meta->addr, meta, false);
533 }
534 
535 static bool __init kfence_init_pool(void)
536 {
537 	unsigned long addr = (unsigned long)__kfence_pool;
538 	struct page *pages;
539 	int i;
540 
541 	if (!__kfence_pool)
542 		return false;
543 
544 	if (!arch_kfence_init_pool())
545 		goto err;
546 
547 	pages = virt_to_page(addr);
548 
549 	/*
550 	 * Set up object pages: they must have PG_slab set, to avoid freeing
551 	 * these as real pages.
552 	 *
553 	 * We also want to avoid inserting kfence_free() in the kfree()
554 	 * fast-path in SLUB, and therefore need to ensure kfree() correctly
555 	 * enters __slab_free() slow-path.
556 	 */
557 	for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
558 		if (!i || (i % 2))
559 			continue;
560 
561 		/* Verify we do not have a compound head page. */
562 		if (WARN_ON(compound_head(&pages[i]) != &pages[i]))
563 			goto err;
564 
565 		__SetPageSlab(&pages[i]);
566 	}
567 
568 	/*
569 	 * Protect the first 2 pages. The first page is mostly unnecessary, and
570 	 * merely serves as an extended guard page. However, adding one
571 	 * additional page in the beginning gives us an even number of pages,
572 	 * which simplifies the mapping of address to metadata index.
573 	 */
574 	for (i = 0; i < 2; i++) {
575 		if (unlikely(!kfence_protect(addr)))
576 			goto err;
577 
578 		addr += PAGE_SIZE;
579 	}
580 
581 	for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
582 		struct kfence_metadata *meta = &kfence_metadata[i];
583 
584 		/* Initialize metadata. */
585 		INIT_LIST_HEAD(&meta->list);
586 		raw_spin_lock_init(&meta->lock);
587 		meta->state = KFENCE_OBJECT_UNUSED;
588 		meta->addr = addr; /* Initialize for validation in metadata_to_pageaddr(). */
589 		list_add_tail(&meta->list, &kfence_freelist);
590 
591 		/* Protect the right redzone. */
592 		if (unlikely(!kfence_protect(addr + PAGE_SIZE)))
593 			goto err;
594 
595 		addr += 2 * PAGE_SIZE;
596 	}
597 
598 	/*
599 	 * The pool is live and will never be deallocated from this point on.
600 	 * Remove the pool object from the kmemleak object tree, as it would
601 	 * otherwise overlap with allocations returned by kfence_alloc(), which
602 	 * are registered with kmemleak through the slab post-alloc hook.
603 	 */
604 	kmemleak_free(__kfence_pool);
605 
606 	return true;
607 
608 err:
609 	/*
610 	 * Only release unprotected pages, and do not try to go back and change
611 	 * page attributes due to risk of failing to do so as well. If changing
612 	 * page attributes for some pages fails, it is very likely that it also
613 	 * fails for the first page, and therefore expect addr==__kfence_pool in
614 	 * most failure cases.
615 	 */
616 	memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool));
617 	__kfence_pool = NULL;
618 	return false;
619 }
620 
621 /* === DebugFS Interface ==================================================== */
622 
623 static int stats_show(struct seq_file *seq, void *v)
624 {
625 	int i;
626 
627 	seq_printf(seq, "enabled: %i\n", READ_ONCE(kfence_enabled));
628 	for (i = 0; i < KFENCE_COUNTER_COUNT; i++)
629 		seq_printf(seq, "%s: %ld\n", counter_names[i], atomic_long_read(&counters[i]));
630 
631 	return 0;
632 }
633 DEFINE_SHOW_ATTRIBUTE(stats);
634 
635 /*
636  * debugfs seq_file operations for /sys/kernel/debug/kfence/objects.
637  * start_object() and next_object() return the object index + 1, because NULL is used
638  * to stop iteration.
639  */
640 static void *start_object(struct seq_file *seq, loff_t *pos)
641 {
642 	if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
643 		return (void *)((long)*pos + 1);
644 	return NULL;
645 }
646 
647 static void stop_object(struct seq_file *seq, void *v)
648 {
649 }
650 
651 static void *next_object(struct seq_file *seq, void *v, loff_t *pos)
652 {
653 	++*pos;
654 	if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
655 		return (void *)((long)*pos + 1);
656 	return NULL;
657 }
658 
659 static int show_object(struct seq_file *seq, void *v)
660 {
661 	struct kfence_metadata *meta = &kfence_metadata[(long)v - 1];
662 	unsigned long flags;
663 
664 	raw_spin_lock_irqsave(&meta->lock, flags);
665 	kfence_print_object(seq, meta);
666 	raw_spin_unlock_irqrestore(&meta->lock, flags);
667 	seq_puts(seq, "---------------------------------\n");
668 
669 	return 0;
670 }
671 
672 static const struct seq_operations object_seqops = {
673 	.start = start_object,
674 	.next = next_object,
675 	.stop = stop_object,
676 	.show = show_object,
677 };
678 
679 static int open_objects(struct inode *inode, struct file *file)
680 {
681 	return seq_open(file, &object_seqops);
682 }
683 
684 static const struct file_operations objects_fops = {
685 	.open = open_objects,
686 	.read = seq_read,
687 	.llseek = seq_lseek,
688 	.release = seq_release,
689 };
690 
691 static int __init kfence_debugfs_init(void)
692 {
693 	struct dentry *kfence_dir = debugfs_create_dir("kfence", NULL);
694 
695 	debugfs_create_file("stats", 0444, kfence_dir, NULL, &stats_fops);
696 	debugfs_create_file("objects", 0400, kfence_dir, NULL, &objects_fops);
697 	return 0;
698 }
699 
700 late_initcall(kfence_debugfs_init);
701 
702 /* === Allocation Gate Timer ================================================ */
703 
704 #ifdef CONFIG_KFENCE_STATIC_KEYS
705 /* Wait queue to wake up allocation-gate timer task. */
706 static DECLARE_WAIT_QUEUE_HEAD(allocation_wait);
707 
708 static void wake_up_kfence_timer(struct irq_work *work)
709 {
710 	wake_up(&allocation_wait);
711 }
712 static DEFINE_IRQ_WORK(wake_up_kfence_timer_work, wake_up_kfence_timer);
713 #endif
714 
715 /*
716  * Set up delayed work, which will enable and disable the static key. We need to
717  * use a work queue (rather than a simple timer), since enabling and disabling a
718  * static key cannot be done from an interrupt.
719  *
720  * Note: Toggling a static branch currently causes IPIs, and here we'll end up
721  * with a total of 2 IPIs to all CPUs. If this ends up a problem in future (with
722  * more aggressive sampling intervals), we could get away with a variant that
723  * avoids IPIs, at the cost of not immediately capturing allocations if the
724  * instructions remain cached.
725  */
726 static struct delayed_work kfence_timer;
727 static void toggle_allocation_gate(struct work_struct *work)
728 {
729 	if (!READ_ONCE(kfence_enabled))
730 		return;
731 
732 	atomic_set(&kfence_allocation_gate, 0);
733 #ifdef CONFIG_KFENCE_STATIC_KEYS
734 	/* Enable static key, and await allocation to happen. */
735 	static_branch_enable(&kfence_allocation_key);
736 
737 	if (sysctl_hung_task_timeout_secs) {
738 		/*
739 		 * During low activity with no allocations we might wait a
740 		 * while; let's avoid the hung task warning.
741 		 */
742 		wait_event_idle_timeout(allocation_wait, atomic_read(&kfence_allocation_gate),
743 					sysctl_hung_task_timeout_secs * HZ / 2);
744 	} else {
745 		wait_event_idle(allocation_wait, atomic_read(&kfence_allocation_gate));
746 	}
747 
748 	/* Disable static key and reset timer. */
749 	static_branch_disable(&kfence_allocation_key);
750 #endif
751 	queue_delayed_work(system_unbound_wq, &kfence_timer,
752 			   msecs_to_jiffies(kfence_sample_interval));
753 }
754 static DECLARE_DELAYED_WORK(kfence_timer, toggle_allocation_gate);
755 
756 /* === Public interface ===================================================== */
757 
758 void __init kfence_alloc_pool(void)
759 {
760 	if (!kfence_sample_interval)
761 		return;
762 
763 	__kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
764 
765 	if (!__kfence_pool)
766 		pr_err("failed to allocate pool\n");
767 }
768 
769 void __init kfence_init(void)
770 {
771 	/* Setting kfence_sample_interval to 0 on boot disables KFENCE. */
772 	if (!kfence_sample_interval)
773 		return;
774 
775 	stack_hash_seed = (u32)random_get_entropy();
776 	if (!kfence_init_pool()) {
777 		pr_err("%s failed\n", __func__);
778 		return;
779 	}
780 
781 	if (!IS_ENABLED(CONFIG_KFENCE_STATIC_KEYS))
782 		static_branch_enable(&kfence_allocation_key);
783 	WRITE_ONCE(kfence_enabled, true);
784 	queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
785 	pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE,
786 		CONFIG_KFENCE_NUM_OBJECTS, (void *)__kfence_pool,
787 		(void *)(__kfence_pool + KFENCE_POOL_SIZE));
788 }
789 
790 void kfence_shutdown_cache(struct kmem_cache *s)
791 {
792 	unsigned long flags;
793 	struct kfence_metadata *meta;
794 	int i;
795 
796 	for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
797 		bool in_use;
798 
799 		meta = &kfence_metadata[i];
800 
801 		/*
802 		 * If we observe some inconsistent cache and state pair where we
803 		 * should have returned false here, cache destruction is racing
804 		 * with either kmem_cache_alloc() or kmem_cache_free(). Taking
805 		 * the lock will not help, as different critical section
806 		 * serialization will have the same outcome.
807 		 */
808 		if (READ_ONCE(meta->cache) != s ||
809 		    READ_ONCE(meta->state) != KFENCE_OBJECT_ALLOCATED)
810 			continue;
811 
812 		raw_spin_lock_irqsave(&meta->lock, flags);
813 		in_use = meta->cache == s && meta->state == KFENCE_OBJECT_ALLOCATED;
814 		raw_spin_unlock_irqrestore(&meta->lock, flags);
815 
816 		if (in_use) {
817 			/*
818 			 * This cache still has allocations, and we should not
819 			 * release them back into the freelist so they can still
820 			 * safely be used and retain the kernel's default
821 			 * behaviour of keeping the allocations alive (leak the
822 			 * cache); however, they effectively become "zombie
823 			 * allocations" as the KFENCE objects are the only ones
824 			 * still in use and the owning cache is being destroyed.
825 			 *
826 			 * We mark them freed, so that any subsequent use shows
827 			 * more useful error messages that will include stack
828 			 * traces of the user of the object, the original
829 			 * allocation, and caller to shutdown_cache().
830 			 */
831 			kfence_guarded_free((void *)meta->addr, meta, /*zombie=*/true);
832 		}
833 	}
834 
835 	for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
836 		meta = &kfence_metadata[i];
837 
838 		/* See above. */
839 		if (READ_ONCE(meta->cache) != s || READ_ONCE(meta->state) != KFENCE_OBJECT_FREED)
840 			continue;
841 
842 		raw_spin_lock_irqsave(&meta->lock, flags);
843 		if (meta->cache == s && meta->state == KFENCE_OBJECT_FREED)
844 			meta->cache = NULL;
845 		raw_spin_unlock_irqrestore(&meta->lock, flags);
846 	}
847 }
848 
849 void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
850 {
851 	unsigned long stack_entries[KFENCE_STACK_DEPTH];
852 	size_t num_stack_entries;
853 	u32 alloc_stack_hash;
854 
855 	/*
856 	 * Perform size check before switching kfence_allocation_gate, so that
857 	 * we don't disable KFENCE without making an allocation.
858 	 */
859 	if (size > PAGE_SIZE) {
860 		atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]);
861 		return NULL;
862 	}
863 
864 	/*
865 	 * Skip allocations from non-default zones, including DMA. We cannot
866 	 * guarantee that pages in the KFENCE pool will have the requested
867 	 * properties (e.g. reside in DMAable memory).
868 	 */
869 	if ((flags & GFP_ZONEMASK) ||
870 	    (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32))) {
871 		atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]);
872 		return NULL;
873 	}
874 
875 	if (atomic_inc_return(&kfence_allocation_gate) > 1)
876 		return NULL;
877 #ifdef CONFIG_KFENCE_STATIC_KEYS
878 	/*
879 	 * waitqueue_active() is fully ordered after the update of
880 	 * kfence_allocation_gate per atomic_inc_return().
881 	 */
882 	if (waitqueue_active(&allocation_wait)) {
883 		/*
884 		 * Calling wake_up() here may deadlock when allocations happen
885 		 * from within timer code. Use an irq_work to defer it.
886 		 */
887 		irq_work_queue(&wake_up_kfence_timer_work);
888 	}
889 #endif
890 
891 	if (!READ_ONCE(kfence_enabled))
892 		return NULL;
893 
894 	num_stack_entries = stack_trace_save(stack_entries, KFENCE_STACK_DEPTH, 0);
895 
896 	/*
897 	 * Do expensive check for coverage of allocation in slow-path after
898 	 * allocation_gate has already become non-zero, even though it might
899 	 * mean not making any allocation within a given sample interval.
900 	 *
901 	 * This ensures reasonable allocation coverage when the pool is almost
902 	 * full, including avoiding long-lived allocations of the same source
903 	 * filling up the pool (e.g. pagecache allocations).
904 	 */
905 	alloc_stack_hash = get_alloc_stack_hash(stack_entries, num_stack_entries);
906 	if (should_skip_covered() && alloc_covered_contains(alloc_stack_hash)) {
907 		atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_COVERED]);
908 		return NULL;
909 	}
910 
911 	return kfence_guarded_alloc(s, size, flags, stack_entries, num_stack_entries,
912 				    alloc_stack_hash);
913 }
914 
915 size_t kfence_ksize(const void *addr)
916 {
917 	const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
918 
919 	/*
920 	 * Read locklessly -- if there is a race with __kfence_alloc(), this is
921 	 * either a use-after-free or invalid access.
922 	 */
923 	return meta ? meta->size : 0;
924 }
925 
926 void *kfence_object_start(const void *addr)
927 {
928 	const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
929 
930 	/*
931 	 * Read locklessly -- if there is a race with __kfence_alloc(), this is
932 	 * either a use-after-free or invalid access.
933 	 */
934 	return meta ? (void *)meta->addr : NULL;
935 }
936 
937 void __kfence_free(void *addr)
938 {
939 	struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
940 
941 	/*
942 	 * If the objects of the cache are SLAB_TYPESAFE_BY_RCU, defer freeing
943 	 * the object, as the object page may be recycled for other-typed
944 	 * objects once it has been freed. meta->cache may be NULL if the cache
945 	 * was destroyed.
946 	 */
947 	if (unlikely(meta->cache && (meta->cache->flags & SLAB_TYPESAFE_BY_RCU)))
948 		call_rcu(&meta->rcu_head, rcu_guarded_free);
949 	else
950 		kfence_guarded_free(addr, meta, false);
951 }
952 
953 bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs)
954 {
955 	const int page_index = (addr - (unsigned long)__kfence_pool) / PAGE_SIZE;
956 	struct kfence_metadata *to_report = NULL;
957 	enum kfence_error_type error_type;
958 	unsigned long flags;
959 
960 	if (!is_kfence_address((void *)addr))
961 		return false;
962 
963 	if (!READ_ONCE(kfence_enabled)) /* If disabled at runtime ... */
964 		return kfence_unprotect(addr); /* ... unprotect and proceed. */
965 
966 	atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
967 
968 	if (page_index % 2) {
969 		/* This is a redzone, report a buffer overflow. */
970 		struct kfence_metadata *meta;
971 		int distance = 0;
972 
973 		meta = addr_to_metadata(addr - PAGE_SIZE);
974 		if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) {
975 			to_report = meta;
976 			/* Data race ok; distance calculation approximate. */
977 			distance = addr - data_race(meta->addr + meta->size);
978 		}
979 
980 		meta = addr_to_metadata(addr + PAGE_SIZE);
981 		if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) {
982 			/* Data race ok; distance calculation approximate. */
983 			if (!to_report || distance > data_race(meta->addr) - addr)
984 				to_report = meta;
985 		}
986 
987 		if (!to_report)
988 			goto out;
989 
990 		raw_spin_lock_irqsave(&to_report->lock, flags);
991 		to_report->unprotected_page = addr;
992 		error_type = KFENCE_ERROR_OOB;
993 
994 		/*
995 		 * If the object was freed before we took the look we can still
996 		 * report this as an OOB -- the report will simply show the
997 		 * stacktrace of the free as well.
998 		 */
999 	} else {
1000 		to_report = addr_to_metadata(addr);
1001 		if (!to_report)
1002 			goto out;
1003 
1004 		raw_spin_lock_irqsave(&to_report->lock, flags);
1005 		error_type = KFENCE_ERROR_UAF;
1006 		/*
1007 		 * We may race with __kfence_alloc(), and it is possible that a
1008 		 * freed object may be reallocated. We simply report this as a
1009 		 * use-after-free, with the stack trace showing the place where
1010 		 * the object was re-allocated.
1011 		 */
1012 	}
1013 
1014 out:
1015 	if (to_report) {
1016 		kfence_report_error(addr, is_write, regs, to_report, error_type);
1017 		raw_spin_unlock_irqrestore(&to_report->lock, flags);
1018 	} else {
1019 		/* This may be a UAF or OOB access, but we can't be sure. */
1020 		kfence_report_error(addr, is_write, regs, NULL, KFENCE_ERROR_INVALID);
1021 	}
1022 
1023 	return kfence_unprotect(addr); /* Unprotect and let access proceed. */
1024 }
1025