xref: /openbmc/linux/mm/kfence/core.c (revision 360823a09426347ea8f232b0b0b5156d0aed0302)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * KFENCE guarded object allocator and fault handling.
4  *
5  * Copyright (C) 2020, Google LLC.
6  */
7 
8 #define pr_fmt(fmt) "kfence: " fmt
9 
10 #include <linux/atomic.h>
11 #include <linux/bug.h>
12 #include <linux/debugfs.h>
13 #include <linux/hash.h>
14 #include <linux/irq_work.h>
15 #include <linux/jhash.h>
16 #include <linux/kcsan-checks.h>
17 #include <linux/kfence.h>
18 #include <linux/kmemleak.h>
19 #include <linux/list.h>
20 #include <linux/lockdep.h>
21 #include <linux/log2.h>
22 #include <linux/memblock.h>
23 #include <linux/moduleparam.h>
24 #include <linux/nodemask.h>
25 #include <linux/notifier.h>
26 #include <linux/panic_notifier.h>
27 #include <linux/random.h>
28 #include <linux/rcupdate.h>
29 #include <linux/sched/clock.h>
30 #include <linux/seq_file.h>
31 #include <linux/slab.h>
32 #include <linux/spinlock.h>
33 #include <linux/string.h>
34 
35 #include <asm/kfence.h>
36 
37 #include "kfence.h"
38 
39 /* Disables KFENCE on the first warning assuming an irrecoverable error. */
40 #define KFENCE_WARN_ON(cond)                                                   \
41 	({                                                                     \
42 		const bool __cond = WARN_ON(cond);                             \
43 		if (unlikely(__cond)) {                                        \
44 			WRITE_ONCE(kfence_enabled, false);                     \
45 			disabled_by_warn = true;                               \
46 		}                                                              \
47 		__cond;                                                        \
48 	})
49 
50 /* === Data ================================================================= */
51 
52 static bool kfence_enabled __read_mostly;
53 static bool disabled_by_warn __read_mostly;
54 
55 unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL;
56 EXPORT_SYMBOL_GPL(kfence_sample_interval); /* Export for test modules. */
57 
58 #ifdef MODULE_PARAM_PREFIX
59 #undef MODULE_PARAM_PREFIX
60 #endif
61 #define MODULE_PARAM_PREFIX "kfence."
62 
63 static int kfence_enable_late(void);
param_set_sample_interval(const char * val,const struct kernel_param * kp)64 static int param_set_sample_interval(const char *val, const struct kernel_param *kp)
65 {
66 	unsigned long num;
67 	int ret = kstrtoul(val, 0, &num);
68 
69 	if (ret < 0)
70 		return ret;
71 
72 	/* Using 0 to indicate KFENCE is disabled. */
73 	if (!num && READ_ONCE(kfence_enabled)) {
74 		pr_info("disabled\n");
75 		WRITE_ONCE(kfence_enabled, false);
76 	}
77 
78 	*((unsigned long *)kp->arg) = num;
79 
80 	if (num && !READ_ONCE(kfence_enabled) && system_state != SYSTEM_BOOTING)
81 		return disabled_by_warn ? -EINVAL : kfence_enable_late();
82 	return 0;
83 }
84 
param_get_sample_interval(char * buffer,const struct kernel_param * kp)85 static int param_get_sample_interval(char *buffer, const struct kernel_param *kp)
86 {
87 	if (!READ_ONCE(kfence_enabled))
88 		return sprintf(buffer, "0\n");
89 
90 	return param_get_ulong(buffer, kp);
91 }
92 
93 static const struct kernel_param_ops sample_interval_param_ops = {
94 	.set = param_set_sample_interval,
95 	.get = param_get_sample_interval,
96 };
97 module_param_cb(sample_interval, &sample_interval_param_ops, &kfence_sample_interval, 0600);
98 
99 /* Pool usage% threshold when currently covered allocations are skipped. */
100 static unsigned long kfence_skip_covered_thresh __read_mostly = 75;
101 module_param_named(skip_covered_thresh, kfence_skip_covered_thresh, ulong, 0644);
102 
103 /* If true, use a deferrable timer. */
104 static bool kfence_deferrable __read_mostly = IS_ENABLED(CONFIG_KFENCE_DEFERRABLE);
105 module_param_named(deferrable, kfence_deferrable, bool, 0444);
106 
107 /* If true, check all canary bytes on panic. */
108 static bool kfence_check_on_panic __read_mostly;
109 module_param_named(check_on_panic, kfence_check_on_panic, bool, 0444);
110 
111 /* The pool of pages used for guard pages and objects. */
112 char *__kfence_pool __read_mostly;
113 EXPORT_SYMBOL(__kfence_pool); /* Export for test modules. */
114 
115 /*
116  * Per-object metadata, with one-to-one mapping of object metadata to
117  * backing pages (in __kfence_pool).
118  */
119 static_assert(CONFIG_KFENCE_NUM_OBJECTS > 0);
120 struct kfence_metadata *kfence_metadata __read_mostly;
121 
122 /*
123  * If kfence_metadata is not NULL, it may be accessed by kfence_shutdown_cache().
124  * So introduce kfence_metadata_init to initialize metadata, and then make
125  * kfence_metadata visible after initialization is successful. This prevents
126  * potential UAF or access to uninitialized metadata.
127  */
128 static struct kfence_metadata *kfence_metadata_init __read_mostly;
129 
130 /* Freelist with available objects. */
131 static struct list_head kfence_freelist = LIST_HEAD_INIT(kfence_freelist);
132 static DEFINE_RAW_SPINLOCK(kfence_freelist_lock); /* Lock protecting freelist. */
133 
134 /*
135  * The static key to set up a KFENCE allocation; or if static keys are not used
136  * to gate allocations, to avoid a load and compare if KFENCE is disabled.
137  */
138 DEFINE_STATIC_KEY_FALSE(kfence_allocation_key);
139 
140 /* Gates the allocation, ensuring only one succeeds in a given period. */
141 atomic_t kfence_allocation_gate = ATOMIC_INIT(1);
142 
143 /*
144  * A Counting Bloom filter of allocation coverage: limits currently covered
145  * allocations of the same source filling up the pool.
146  *
147  * Assuming a range of 15%-85% unique allocations in the pool at any point in
148  * time, the below parameters provide a probablity of 0.02-0.33 for false
149  * positive hits respectively:
150  *
151  *	P(alloc_traces) = (1 - e^(-HNUM * (alloc_traces / SIZE)) ^ HNUM
152  */
153 #define ALLOC_COVERED_HNUM	2
154 #define ALLOC_COVERED_ORDER	(const_ilog2(CONFIG_KFENCE_NUM_OBJECTS) + 2)
155 #define ALLOC_COVERED_SIZE	(1 << ALLOC_COVERED_ORDER)
156 #define ALLOC_COVERED_HNEXT(h)	hash_32(h, ALLOC_COVERED_ORDER)
157 #define ALLOC_COVERED_MASK	(ALLOC_COVERED_SIZE - 1)
158 static atomic_t alloc_covered[ALLOC_COVERED_SIZE];
159 
160 /* Stack depth used to determine uniqueness of an allocation. */
161 #define UNIQUE_ALLOC_STACK_DEPTH ((size_t)8)
162 
163 /*
164  * Randomness for stack hashes, making the same collisions across reboots and
165  * different machines less likely.
166  */
167 static u32 stack_hash_seed __ro_after_init;
168 
169 /* Statistics counters for debugfs. */
170 enum kfence_counter_id {
171 	KFENCE_COUNTER_ALLOCATED,
172 	KFENCE_COUNTER_ALLOCS,
173 	KFENCE_COUNTER_FREES,
174 	KFENCE_COUNTER_ZOMBIES,
175 	KFENCE_COUNTER_BUGS,
176 	KFENCE_COUNTER_SKIP_INCOMPAT,
177 	KFENCE_COUNTER_SKIP_CAPACITY,
178 	KFENCE_COUNTER_SKIP_COVERED,
179 	KFENCE_COUNTER_COUNT,
180 };
181 static atomic_long_t counters[KFENCE_COUNTER_COUNT];
182 static const char *const counter_names[] = {
183 	[KFENCE_COUNTER_ALLOCATED]	= "currently allocated",
184 	[KFENCE_COUNTER_ALLOCS]		= "total allocations",
185 	[KFENCE_COUNTER_FREES]		= "total frees",
186 	[KFENCE_COUNTER_ZOMBIES]	= "zombie allocations",
187 	[KFENCE_COUNTER_BUGS]		= "total bugs",
188 	[KFENCE_COUNTER_SKIP_INCOMPAT]	= "skipped allocations (incompatible)",
189 	[KFENCE_COUNTER_SKIP_CAPACITY]	= "skipped allocations (capacity)",
190 	[KFENCE_COUNTER_SKIP_COVERED]	= "skipped allocations (covered)",
191 };
192 static_assert(ARRAY_SIZE(counter_names) == KFENCE_COUNTER_COUNT);
193 
194 /* === Internals ============================================================ */
195 
should_skip_covered(void)196 static inline bool should_skip_covered(void)
197 {
198 	unsigned long thresh = (CONFIG_KFENCE_NUM_OBJECTS * kfence_skip_covered_thresh) / 100;
199 
200 	return atomic_long_read(&counters[KFENCE_COUNTER_ALLOCATED]) > thresh;
201 }
202 
get_alloc_stack_hash(unsigned long * stack_entries,size_t num_entries)203 static u32 get_alloc_stack_hash(unsigned long *stack_entries, size_t num_entries)
204 {
205 	num_entries = min(num_entries, UNIQUE_ALLOC_STACK_DEPTH);
206 	num_entries = filter_irq_stacks(stack_entries, num_entries);
207 	return jhash(stack_entries, num_entries * sizeof(stack_entries[0]), stack_hash_seed);
208 }
209 
210 /*
211  * Adds (or subtracts) count @val for allocation stack trace hash
212  * @alloc_stack_hash from Counting Bloom filter.
213  */
alloc_covered_add(u32 alloc_stack_hash,int val)214 static void alloc_covered_add(u32 alloc_stack_hash, int val)
215 {
216 	int i;
217 
218 	for (i = 0; i < ALLOC_COVERED_HNUM; i++) {
219 		atomic_add(val, &alloc_covered[alloc_stack_hash & ALLOC_COVERED_MASK]);
220 		alloc_stack_hash = ALLOC_COVERED_HNEXT(alloc_stack_hash);
221 	}
222 }
223 
224 /*
225  * Returns true if the allocation stack trace hash @alloc_stack_hash is
226  * currently contained (non-zero count) in Counting Bloom filter.
227  */
alloc_covered_contains(u32 alloc_stack_hash)228 static bool alloc_covered_contains(u32 alloc_stack_hash)
229 {
230 	int i;
231 
232 	for (i = 0; i < ALLOC_COVERED_HNUM; i++) {
233 		if (!atomic_read(&alloc_covered[alloc_stack_hash & ALLOC_COVERED_MASK]))
234 			return false;
235 		alloc_stack_hash = ALLOC_COVERED_HNEXT(alloc_stack_hash);
236 	}
237 
238 	return true;
239 }
240 
kfence_protect(unsigned long addr)241 static bool kfence_protect(unsigned long addr)
242 {
243 	return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), true));
244 }
245 
kfence_unprotect(unsigned long addr)246 static bool kfence_unprotect(unsigned long addr)
247 {
248 	return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false));
249 }
250 
metadata_to_pageaddr(const struct kfence_metadata * meta)251 static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta)
252 {
253 	unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2;
254 	unsigned long pageaddr = (unsigned long)&__kfence_pool[offset];
255 
256 	/* The checks do not affect performance; only called from slow-paths. */
257 
258 	/* Only call with a pointer into kfence_metadata. */
259 	if (KFENCE_WARN_ON(meta < kfence_metadata ||
260 			   meta >= kfence_metadata + CONFIG_KFENCE_NUM_OBJECTS))
261 		return 0;
262 
263 	/*
264 	 * This metadata object only ever maps to 1 page; verify that the stored
265 	 * address is in the expected range.
266 	 */
267 	if (KFENCE_WARN_ON(ALIGN_DOWN(meta->addr, PAGE_SIZE) != pageaddr))
268 		return 0;
269 
270 	return pageaddr;
271 }
272 
273 /*
274  * Update the object's metadata state, including updating the alloc/free stacks
275  * depending on the state transition.
276  */
277 static noinline void
metadata_update_state(struct kfence_metadata * meta,enum kfence_object_state next,unsigned long * stack_entries,size_t num_stack_entries)278 metadata_update_state(struct kfence_metadata *meta, enum kfence_object_state next,
279 		      unsigned long *stack_entries, size_t num_stack_entries)
280 {
281 	struct kfence_track *track =
282 		next == KFENCE_OBJECT_FREED ? &meta->free_track : &meta->alloc_track;
283 
284 	lockdep_assert_held(&meta->lock);
285 
286 	if (stack_entries) {
287 		memcpy(track->stack_entries, stack_entries,
288 		       num_stack_entries * sizeof(stack_entries[0]));
289 	} else {
290 		/*
291 		 * Skip over 1 (this) functions; noinline ensures we do not
292 		 * accidentally skip over the caller by never inlining.
293 		 */
294 		num_stack_entries = stack_trace_save(track->stack_entries, KFENCE_STACK_DEPTH, 1);
295 	}
296 	track->num_stack_entries = num_stack_entries;
297 	track->pid = task_pid_nr(current);
298 	track->cpu = raw_smp_processor_id();
299 	track->ts_nsec = local_clock(); /* Same source as printk timestamps. */
300 
301 	/*
302 	 * Pairs with READ_ONCE() in
303 	 *	kfence_shutdown_cache(),
304 	 *	kfence_handle_page_fault().
305 	 */
306 	WRITE_ONCE(meta->state, next);
307 }
308 
309 /* Check canary byte at @addr. */
check_canary_byte(u8 * addr)310 static inline bool check_canary_byte(u8 *addr)
311 {
312 	struct kfence_metadata *meta;
313 	unsigned long flags;
314 
315 	if (likely(*addr == KFENCE_CANARY_PATTERN_U8(addr)))
316 		return true;
317 
318 	atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
319 
320 	meta = addr_to_metadata((unsigned long)addr);
321 	raw_spin_lock_irqsave(&meta->lock, flags);
322 	kfence_report_error((unsigned long)addr, false, NULL, meta, KFENCE_ERROR_CORRUPTION);
323 	raw_spin_unlock_irqrestore(&meta->lock, flags);
324 
325 	return false;
326 }
327 
set_canary(const struct kfence_metadata * meta)328 static inline void set_canary(const struct kfence_metadata *meta)
329 {
330 	const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE);
331 	unsigned long addr = pageaddr;
332 
333 	/*
334 	 * The canary may be written to part of the object memory, but it does
335 	 * not affect it. The user should initialize the object before using it.
336 	 */
337 	for (; addr < meta->addr; addr += sizeof(u64))
338 		*((u64 *)addr) = KFENCE_CANARY_PATTERN_U64;
339 
340 	addr = ALIGN_DOWN(meta->addr + meta->size, sizeof(u64));
341 	for (; addr - pageaddr < PAGE_SIZE; addr += sizeof(u64))
342 		*((u64 *)addr) = KFENCE_CANARY_PATTERN_U64;
343 }
344 
check_canary(const struct kfence_metadata * meta)345 static inline void check_canary(const struct kfence_metadata *meta)
346 {
347 	const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE);
348 	unsigned long addr = pageaddr;
349 
350 	/*
351 	 * We'll iterate over each canary byte per-side until a corrupted byte
352 	 * is found. However, we'll still iterate over the canary bytes to the
353 	 * right of the object even if there was an error in the canary bytes to
354 	 * the left of the object. Specifically, if check_canary_byte()
355 	 * generates an error, showing both sides might give more clues as to
356 	 * what the error is about when displaying which bytes were corrupted.
357 	 */
358 
359 	/* Apply to left of object. */
360 	for (; meta->addr - addr >= sizeof(u64); addr += sizeof(u64)) {
361 		if (unlikely(*((u64 *)addr) != KFENCE_CANARY_PATTERN_U64))
362 			break;
363 	}
364 
365 	/*
366 	 * If the canary is corrupted in a certain 64 bytes, or the canary
367 	 * memory cannot be completely covered by multiple consecutive 64 bytes,
368 	 * it needs to be checked one by one.
369 	 */
370 	for (; addr < meta->addr; addr++) {
371 		if (unlikely(!check_canary_byte((u8 *)addr)))
372 			break;
373 	}
374 
375 	/* Apply to right of object. */
376 	for (addr = meta->addr + meta->size; addr % sizeof(u64) != 0; addr++) {
377 		if (unlikely(!check_canary_byte((u8 *)addr)))
378 			return;
379 	}
380 	for (; addr - pageaddr < PAGE_SIZE; addr += sizeof(u64)) {
381 		if (unlikely(*((u64 *)addr) != KFENCE_CANARY_PATTERN_U64)) {
382 
383 			for (; addr - pageaddr < PAGE_SIZE; addr++) {
384 				if (!check_canary_byte((u8 *)addr))
385 					return;
386 			}
387 		}
388 	}
389 }
390 
kfence_guarded_alloc(struct kmem_cache * cache,size_t size,gfp_t gfp,unsigned long * stack_entries,size_t num_stack_entries,u32 alloc_stack_hash)391 static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp,
392 				  unsigned long *stack_entries, size_t num_stack_entries,
393 				  u32 alloc_stack_hash)
394 {
395 	struct kfence_metadata *meta = NULL;
396 	unsigned long flags;
397 	struct slab *slab;
398 	void *addr;
399 	const bool random_right_allocate = get_random_u32_below(2);
400 	const bool random_fault = CONFIG_KFENCE_STRESS_TEST_FAULTS &&
401 				  !get_random_u32_below(CONFIG_KFENCE_STRESS_TEST_FAULTS);
402 
403 	/* Try to obtain a free object. */
404 	raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
405 	if (!list_empty(&kfence_freelist)) {
406 		meta = list_entry(kfence_freelist.next, struct kfence_metadata, list);
407 		list_del_init(&meta->list);
408 	}
409 	raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
410 	if (!meta) {
411 		atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_CAPACITY]);
412 		return NULL;
413 	}
414 
415 	if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) {
416 		/*
417 		 * This is extremely unlikely -- we are reporting on a
418 		 * use-after-free, which locked meta->lock, and the reporting
419 		 * code via printk calls kmalloc() which ends up in
420 		 * kfence_alloc() and tries to grab the same object that we're
421 		 * reporting on. While it has never been observed, lockdep does
422 		 * report that there is a possibility of deadlock. Fix it by
423 		 * using trylock and bailing out gracefully.
424 		 */
425 		raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
426 		/* Put the object back on the freelist. */
427 		list_add_tail(&meta->list, &kfence_freelist);
428 		raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
429 
430 		return NULL;
431 	}
432 
433 	meta->addr = metadata_to_pageaddr(meta);
434 	/* Unprotect if we're reusing this page. */
435 	if (meta->state == KFENCE_OBJECT_FREED)
436 		kfence_unprotect(meta->addr);
437 
438 	/*
439 	 * Note: for allocations made before RNG initialization, will always
440 	 * return zero. We still benefit from enabling KFENCE as early as
441 	 * possible, even when the RNG is not yet available, as this will allow
442 	 * KFENCE to detect bugs due to earlier allocations. The only downside
443 	 * is that the out-of-bounds accesses detected are deterministic for
444 	 * such allocations.
445 	 */
446 	if (random_right_allocate) {
447 		/* Allocate on the "right" side, re-calculate address. */
448 		meta->addr += PAGE_SIZE - size;
449 		meta->addr = ALIGN_DOWN(meta->addr, cache->align);
450 	}
451 
452 	addr = (void *)meta->addr;
453 
454 	/* Update remaining metadata. */
455 	metadata_update_state(meta, KFENCE_OBJECT_ALLOCATED, stack_entries, num_stack_entries);
456 	/* Pairs with READ_ONCE() in kfence_shutdown_cache(). */
457 	WRITE_ONCE(meta->cache, cache);
458 	meta->size = size;
459 	meta->alloc_stack_hash = alloc_stack_hash;
460 	raw_spin_unlock_irqrestore(&meta->lock, flags);
461 
462 	alloc_covered_add(alloc_stack_hash, 1);
463 
464 	/* Set required slab fields. */
465 	slab = virt_to_slab((void *)meta->addr);
466 	slab->slab_cache = cache;
467 #if defined(CONFIG_SLUB)
468 	slab->objects = 1;
469 #elif defined(CONFIG_SLAB)
470 	slab->s_mem = addr;
471 #endif
472 
473 	/* Memory initialization. */
474 	set_canary(meta);
475 
476 	/*
477 	 * We check slab_want_init_on_alloc() ourselves, rather than letting
478 	 * SL*B do the initialization, as otherwise we might overwrite KFENCE's
479 	 * redzone.
480 	 */
481 	if (unlikely(slab_want_init_on_alloc(gfp, cache)))
482 		memzero_explicit(addr, size);
483 	if (cache->ctor)
484 		cache->ctor(addr);
485 
486 	if (random_fault)
487 		kfence_protect(meta->addr); /* Random "faults" by protecting the object. */
488 
489 	atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCATED]);
490 	atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCS]);
491 
492 	return addr;
493 }
494 
kfence_guarded_free(void * addr,struct kfence_metadata * meta,bool zombie)495 static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool zombie)
496 {
497 	struct kcsan_scoped_access assert_page_exclusive;
498 	unsigned long flags;
499 	bool init;
500 
501 	raw_spin_lock_irqsave(&meta->lock, flags);
502 
503 	if (meta->state != KFENCE_OBJECT_ALLOCATED || meta->addr != (unsigned long)addr) {
504 		/* Invalid or double-free, bail out. */
505 		atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
506 		kfence_report_error((unsigned long)addr, false, NULL, meta,
507 				    KFENCE_ERROR_INVALID_FREE);
508 		raw_spin_unlock_irqrestore(&meta->lock, flags);
509 		return;
510 	}
511 
512 	/* Detect racy use-after-free, or incorrect reallocation of this page by KFENCE. */
513 	kcsan_begin_scoped_access((void *)ALIGN_DOWN((unsigned long)addr, PAGE_SIZE), PAGE_SIZE,
514 				  KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT,
515 				  &assert_page_exclusive);
516 
517 	if (CONFIG_KFENCE_STRESS_TEST_FAULTS)
518 		kfence_unprotect((unsigned long)addr); /* To check canary bytes. */
519 
520 	/* Restore page protection if there was an OOB access. */
521 	if (meta->unprotected_page) {
522 		memzero_explicit((void *)ALIGN_DOWN(meta->unprotected_page, PAGE_SIZE), PAGE_SIZE);
523 		kfence_protect(meta->unprotected_page);
524 		meta->unprotected_page = 0;
525 	}
526 
527 	/* Mark the object as freed. */
528 	metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0);
529 	init = slab_want_init_on_free(meta->cache);
530 	raw_spin_unlock_irqrestore(&meta->lock, flags);
531 
532 	alloc_covered_add(meta->alloc_stack_hash, -1);
533 
534 	/* Check canary bytes for memory corruption. */
535 	check_canary(meta);
536 
537 	/*
538 	 * Clear memory if init-on-free is set. While we protect the page, the
539 	 * data is still there, and after a use-after-free is detected, we
540 	 * unprotect the page, so the data is still accessible.
541 	 */
542 	if (!zombie && unlikely(init))
543 		memzero_explicit(addr, meta->size);
544 
545 	/* Protect to detect use-after-frees. */
546 	kfence_protect((unsigned long)addr);
547 
548 	kcsan_end_scoped_access(&assert_page_exclusive);
549 	if (!zombie) {
550 		/* Add it to the tail of the freelist for reuse. */
551 		raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
552 		KFENCE_WARN_ON(!list_empty(&meta->list));
553 		list_add_tail(&meta->list, &kfence_freelist);
554 		raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
555 
556 		atomic_long_dec(&counters[KFENCE_COUNTER_ALLOCATED]);
557 		atomic_long_inc(&counters[KFENCE_COUNTER_FREES]);
558 	} else {
559 		/* See kfence_shutdown_cache(). */
560 		atomic_long_inc(&counters[KFENCE_COUNTER_ZOMBIES]);
561 	}
562 }
563 
rcu_guarded_free(struct rcu_head * h)564 static void rcu_guarded_free(struct rcu_head *h)
565 {
566 	struct kfence_metadata *meta = container_of(h, struct kfence_metadata, rcu_head);
567 
568 	kfence_guarded_free((void *)meta->addr, meta, false);
569 }
570 
571 /*
572  * Initialization of the KFENCE pool after its allocation.
573  * Returns 0 on success; otherwise returns the address up to
574  * which partial initialization succeeded.
575  */
kfence_init_pool(void)576 static unsigned long kfence_init_pool(void)
577 {
578 	unsigned long addr;
579 	struct page *pages;
580 	int i;
581 
582 	if (!arch_kfence_init_pool())
583 		return (unsigned long)__kfence_pool;
584 
585 	addr = (unsigned long)__kfence_pool;
586 	pages = virt_to_page(__kfence_pool);
587 
588 	/*
589 	 * Set up object pages: they must have PG_slab set, to avoid freeing
590 	 * these as real pages.
591 	 *
592 	 * We also want to avoid inserting kfence_free() in the kfree()
593 	 * fast-path in SLUB, and therefore need to ensure kfree() correctly
594 	 * enters __slab_free() slow-path.
595 	 */
596 	for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
597 		struct slab *slab = page_slab(nth_page(pages, i));
598 
599 		if (!i || (i % 2))
600 			continue;
601 
602 		__folio_set_slab(slab_folio(slab));
603 #ifdef CONFIG_MEMCG
604 		slab->memcg_data = (unsigned long)&kfence_metadata_init[i / 2 - 1].objcg |
605 				   MEMCG_DATA_OBJCGS;
606 #endif
607 	}
608 
609 	/*
610 	 * Protect the first 2 pages. The first page is mostly unnecessary, and
611 	 * merely serves as an extended guard page. However, adding one
612 	 * additional page in the beginning gives us an even number of pages,
613 	 * which simplifies the mapping of address to metadata index.
614 	 */
615 	for (i = 0; i < 2; i++) {
616 		if (unlikely(!kfence_protect(addr)))
617 			return addr;
618 
619 		addr += PAGE_SIZE;
620 	}
621 
622 	for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
623 		struct kfence_metadata *meta = &kfence_metadata_init[i];
624 
625 		/* Initialize metadata. */
626 		INIT_LIST_HEAD(&meta->list);
627 		raw_spin_lock_init(&meta->lock);
628 		meta->state = KFENCE_OBJECT_UNUSED;
629 		meta->addr = addr; /* Initialize for validation in metadata_to_pageaddr(). */
630 		list_add_tail(&meta->list, &kfence_freelist);
631 
632 		/* Protect the right redzone. */
633 		if (unlikely(!kfence_protect(addr + PAGE_SIZE)))
634 			goto reset_slab;
635 
636 		addr += 2 * PAGE_SIZE;
637 	}
638 
639 	/*
640 	 * Make kfence_metadata visible only when initialization is successful.
641 	 * Otherwise, if the initialization fails and kfence_metadata is freed,
642 	 * it may cause UAF in kfence_shutdown_cache().
643 	 */
644 	smp_store_release(&kfence_metadata, kfence_metadata_init);
645 	return 0;
646 
647 reset_slab:
648 	for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
649 		struct slab *slab = page_slab(nth_page(pages, i));
650 
651 		if (!i || (i % 2))
652 			continue;
653 #ifdef CONFIG_MEMCG
654 		slab->memcg_data = 0;
655 #endif
656 		__folio_clear_slab(slab_folio(slab));
657 	}
658 
659 	return addr;
660 }
661 
kfence_init_pool_early(void)662 static bool __init kfence_init_pool_early(void)
663 {
664 	unsigned long addr;
665 
666 	if (!__kfence_pool)
667 		return false;
668 
669 	addr = kfence_init_pool();
670 
671 	if (!addr) {
672 		/*
673 		 * The pool is live and will never be deallocated from this point on.
674 		 * Ignore the pool object from the kmemleak phys object tree, as it would
675 		 * otherwise overlap with allocations returned by kfence_alloc(), which
676 		 * are registered with kmemleak through the slab post-alloc hook.
677 		 */
678 		kmemleak_ignore_phys(__pa(__kfence_pool));
679 		return true;
680 	}
681 
682 	/*
683 	 * Only release unprotected pages, and do not try to go back and change
684 	 * page attributes due to risk of failing to do so as well. If changing
685 	 * page attributes for some pages fails, it is very likely that it also
686 	 * fails for the first page, and therefore expect addr==__kfence_pool in
687 	 * most failure cases.
688 	 */
689 	memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool));
690 	__kfence_pool = NULL;
691 
692 	memblock_free_late(__pa(kfence_metadata_init), KFENCE_METADATA_SIZE);
693 	kfence_metadata_init = NULL;
694 
695 	return false;
696 }
697 
698 /* === DebugFS Interface ==================================================== */
699 
stats_show(struct seq_file * seq,void * v)700 static int stats_show(struct seq_file *seq, void *v)
701 {
702 	int i;
703 
704 	seq_printf(seq, "enabled: %i\n", READ_ONCE(kfence_enabled));
705 	for (i = 0; i < KFENCE_COUNTER_COUNT; i++)
706 		seq_printf(seq, "%s: %ld\n", counter_names[i], atomic_long_read(&counters[i]));
707 
708 	return 0;
709 }
710 DEFINE_SHOW_ATTRIBUTE(stats);
711 
712 /*
713  * debugfs seq_file operations for /sys/kernel/debug/kfence/objects.
714  * start_object() and next_object() return the object index + 1, because NULL is used
715  * to stop iteration.
716  */
start_object(struct seq_file * seq,loff_t * pos)717 static void *start_object(struct seq_file *seq, loff_t *pos)
718 {
719 	if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
720 		return (void *)((long)*pos + 1);
721 	return NULL;
722 }
723 
stop_object(struct seq_file * seq,void * v)724 static void stop_object(struct seq_file *seq, void *v)
725 {
726 }
727 
next_object(struct seq_file * seq,void * v,loff_t * pos)728 static void *next_object(struct seq_file *seq, void *v, loff_t *pos)
729 {
730 	++*pos;
731 	if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
732 		return (void *)((long)*pos + 1);
733 	return NULL;
734 }
735 
show_object(struct seq_file * seq,void * v)736 static int show_object(struct seq_file *seq, void *v)
737 {
738 	struct kfence_metadata *meta = &kfence_metadata[(long)v - 1];
739 	unsigned long flags;
740 
741 	raw_spin_lock_irqsave(&meta->lock, flags);
742 	kfence_print_object(seq, meta);
743 	raw_spin_unlock_irqrestore(&meta->lock, flags);
744 	seq_puts(seq, "---------------------------------\n");
745 
746 	return 0;
747 }
748 
749 static const struct seq_operations objects_sops = {
750 	.start = start_object,
751 	.next = next_object,
752 	.stop = stop_object,
753 	.show = show_object,
754 };
755 DEFINE_SEQ_ATTRIBUTE(objects);
756 
kfence_debugfs_init(void)757 static int kfence_debugfs_init(void)
758 {
759 	struct dentry *kfence_dir;
760 
761 	if (!READ_ONCE(kfence_enabled))
762 		return 0;
763 
764 	kfence_dir = debugfs_create_dir("kfence", NULL);
765 	debugfs_create_file("stats", 0444, kfence_dir, NULL, &stats_fops);
766 	debugfs_create_file("objects", 0400, kfence_dir, NULL, &objects_fops);
767 	return 0;
768 }
769 
770 late_initcall(kfence_debugfs_init);
771 
772 /* === Panic Notifier ====================================================== */
773 
kfence_check_all_canary(void)774 static void kfence_check_all_canary(void)
775 {
776 	int i;
777 
778 	for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
779 		struct kfence_metadata *meta = &kfence_metadata[i];
780 
781 		if (meta->state == KFENCE_OBJECT_ALLOCATED)
782 			check_canary(meta);
783 	}
784 }
785 
kfence_check_canary_callback(struct notifier_block * nb,unsigned long reason,void * arg)786 static int kfence_check_canary_callback(struct notifier_block *nb,
787 					unsigned long reason, void *arg)
788 {
789 	kfence_check_all_canary();
790 	return NOTIFY_OK;
791 }
792 
793 static struct notifier_block kfence_check_canary_notifier = {
794 	.notifier_call = kfence_check_canary_callback,
795 };
796 
797 /* === Allocation Gate Timer ================================================ */
798 
799 static struct delayed_work kfence_timer;
800 
801 #ifdef CONFIG_KFENCE_STATIC_KEYS
802 /* Wait queue to wake up allocation-gate timer task. */
803 static DECLARE_WAIT_QUEUE_HEAD(allocation_wait);
804 
wake_up_kfence_timer(struct irq_work * work)805 static void wake_up_kfence_timer(struct irq_work *work)
806 {
807 	wake_up(&allocation_wait);
808 }
809 static DEFINE_IRQ_WORK(wake_up_kfence_timer_work, wake_up_kfence_timer);
810 #endif
811 
812 /*
813  * Set up delayed work, which will enable and disable the static key. We need to
814  * use a work queue (rather than a simple timer), since enabling and disabling a
815  * static key cannot be done from an interrupt.
816  *
817  * Note: Toggling a static branch currently causes IPIs, and here we'll end up
818  * with a total of 2 IPIs to all CPUs. If this ends up a problem in future (with
819  * more aggressive sampling intervals), we could get away with a variant that
820  * avoids IPIs, at the cost of not immediately capturing allocations if the
821  * instructions remain cached.
822  */
toggle_allocation_gate(struct work_struct * work)823 static void toggle_allocation_gate(struct work_struct *work)
824 {
825 	if (!READ_ONCE(kfence_enabled))
826 		return;
827 
828 	atomic_set(&kfence_allocation_gate, 0);
829 #ifdef CONFIG_KFENCE_STATIC_KEYS
830 	/* Enable static key, and await allocation to happen. */
831 	static_branch_enable(&kfence_allocation_key);
832 
833 	wait_event_idle(allocation_wait, atomic_read(&kfence_allocation_gate));
834 
835 	/* Disable static key and reset timer. */
836 	static_branch_disable(&kfence_allocation_key);
837 #endif
838 	queue_delayed_work(system_unbound_wq, &kfence_timer,
839 			   msecs_to_jiffies(kfence_sample_interval));
840 }
841 
842 /* === Public interface ===================================================== */
843 
kfence_alloc_pool_and_metadata(void)844 void __init kfence_alloc_pool_and_metadata(void)
845 {
846 	if (!kfence_sample_interval)
847 		return;
848 
849 	/*
850 	 * If the pool has already been initialized by arch, there is no need to
851 	 * re-allocate the memory pool.
852 	 */
853 	if (!__kfence_pool)
854 		__kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
855 
856 	if (!__kfence_pool) {
857 		pr_err("failed to allocate pool\n");
858 		return;
859 	}
860 
861 	/* The memory allocated by memblock has been zeroed out. */
862 	kfence_metadata_init = memblock_alloc(KFENCE_METADATA_SIZE, PAGE_SIZE);
863 	if (!kfence_metadata_init) {
864 		pr_err("failed to allocate metadata\n");
865 		memblock_free(__kfence_pool, KFENCE_POOL_SIZE);
866 		__kfence_pool = NULL;
867 	}
868 }
869 
kfence_init_enable(void)870 static void kfence_init_enable(void)
871 {
872 	if (!IS_ENABLED(CONFIG_KFENCE_STATIC_KEYS))
873 		static_branch_enable(&kfence_allocation_key);
874 
875 	if (kfence_deferrable)
876 		INIT_DEFERRABLE_WORK(&kfence_timer, toggle_allocation_gate);
877 	else
878 		INIT_DELAYED_WORK(&kfence_timer, toggle_allocation_gate);
879 
880 	if (kfence_check_on_panic)
881 		atomic_notifier_chain_register(&panic_notifier_list, &kfence_check_canary_notifier);
882 
883 	WRITE_ONCE(kfence_enabled, true);
884 	queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
885 
886 	pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE,
887 		CONFIG_KFENCE_NUM_OBJECTS, (void *)__kfence_pool,
888 		(void *)(__kfence_pool + KFENCE_POOL_SIZE));
889 }
890 
kfence_init(void)891 void __init kfence_init(void)
892 {
893 	stack_hash_seed = get_random_u32();
894 
895 	/* Setting kfence_sample_interval to 0 on boot disables KFENCE. */
896 	if (!kfence_sample_interval)
897 		return;
898 
899 	if (!kfence_init_pool_early()) {
900 		pr_err("%s failed\n", __func__);
901 		return;
902 	}
903 
904 	kfence_init_enable();
905 }
906 
kfence_init_late(void)907 static int kfence_init_late(void)
908 {
909 	const unsigned long nr_pages_pool = KFENCE_POOL_SIZE / PAGE_SIZE;
910 	const unsigned long nr_pages_meta = KFENCE_METADATA_SIZE / PAGE_SIZE;
911 	unsigned long addr = (unsigned long)__kfence_pool;
912 	unsigned long free_size = KFENCE_POOL_SIZE;
913 	int err = -ENOMEM;
914 
915 #ifdef CONFIG_CONTIG_ALLOC
916 	struct page *pages;
917 
918 	pages = alloc_contig_pages(nr_pages_pool, GFP_KERNEL, first_online_node,
919 				   NULL);
920 	if (!pages)
921 		return -ENOMEM;
922 
923 	__kfence_pool = page_to_virt(pages);
924 	pages = alloc_contig_pages(nr_pages_meta, GFP_KERNEL, first_online_node,
925 				   NULL);
926 	if (pages)
927 		kfence_metadata_init = page_to_virt(pages);
928 #else
929 	if (nr_pages_pool > MAX_ORDER_NR_PAGES ||
930 	    nr_pages_meta > MAX_ORDER_NR_PAGES) {
931 		pr_warn("KFENCE_NUM_OBJECTS too large for buddy allocator\n");
932 		return -EINVAL;
933 	}
934 
935 	__kfence_pool = alloc_pages_exact(KFENCE_POOL_SIZE, GFP_KERNEL);
936 	if (!__kfence_pool)
937 		return -ENOMEM;
938 
939 	kfence_metadata_init = alloc_pages_exact(KFENCE_METADATA_SIZE, GFP_KERNEL);
940 #endif
941 
942 	if (!kfence_metadata_init)
943 		goto free_pool;
944 
945 	memzero_explicit(kfence_metadata_init, KFENCE_METADATA_SIZE);
946 	addr = kfence_init_pool();
947 	if (!addr) {
948 		kfence_init_enable();
949 		kfence_debugfs_init();
950 		return 0;
951 	}
952 
953 	pr_err("%s failed\n", __func__);
954 	free_size = KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool);
955 	err = -EBUSY;
956 
957 #ifdef CONFIG_CONTIG_ALLOC
958 	free_contig_range(page_to_pfn(virt_to_page((void *)kfence_metadata_init)),
959 			  nr_pages_meta);
960 free_pool:
961 	free_contig_range(page_to_pfn(virt_to_page((void *)addr)),
962 			  free_size / PAGE_SIZE);
963 #else
964 	free_pages_exact((void *)kfence_metadata_init, KFENCE_METADATA_SIZE);
965 free_pool:
966 	free_pages_exact((void *)addr, free_size);
967 #endif
968 
969 	kfence_metadata_init = NULL;
970 	__kfence_pool = NULL;
971 	return err;
972 }
973 
kfence_enable_late(void)974 static int kfence_enable_late(void)
975 {
976 	if (!__kfence_pool)
977 		return kfence_init_late();
978 
979 	WRITE_ONCE(kfence_enabled, true);
980 	queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
981 	pr_info("re-enabled\n");
982 	return 0;
983 }
984 
kfence_shutdown_cache(struct kmem_cache * s)985 void kfence_shutdown_cache(struct kmem_cache *s)
986 {
987 	unsigned long flags;
988 	struct kfence_metadata *meta;
989 	int i;
990 
991 	/* Pairs with release in kfence_init_pool(). */
992 	if (!smp_load_acquire(&kfence_metadata))
993 		return;
994 
995 	for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
996 		bool in_use;
997 
998 		meta = &kfence_metadata[i];
999 
1000 		/*
1001 		 * If we observe some inconsistent cache and state pair where we
1002 		 * should have returned false here, cache destruction is racing
1003 		 * with either kmem_cache_alloc() or kmem_cache_free(). Taking
1004 		 * the lock will not help, as different critical section
1005 		 * serialization will have the same outcome.
1006 		 */
1007 		if (READ_ONCE(meta->cache) != s ||
1008 		    READ_ONCE(meta->state) != KFENCE_OBJECT_ALLOCATED)
1009 			continue;
1010 
1011 		raw_spin_lock_irqsave(&meta->lock, flags);
1012 		in_use = meta->cache == s && meta->state == KFENCE_OBJECT_ALLOCATED;
1013 		raw_spin_unlock_irqrestore(&meta->lock, flags);
1014 
1015 		if (in_use) {
1016 			/*
1017 			 * This cache still has allocations, and we should not
1018 			 * release them back into the freelist so they can still
1019 			 * safely be used and retain the kernel's default
1020 			 * behaviour of keeping the allocations alive (leak the
1021 			 * cache); however, they effectively become "zombie
1022 			 * allocations" as the KFENCE objects are the only ones
1023 			 * still in use and the owning cache is being destroyed.
1024 			 *
1025 			 * We mark them freed, so that any subsequent use shows
1026 			 * more useful error messages that will include stack
1027 			 * traces of the user of the object, the original
1028 			 * allocation, and caller to shutdown_cache().
1029 			 */
1030 			kfence_guarded_free((void *)meta->addr, meta, /*zombie=*/true);
1031 		}
1032 	}
1033 
1034 	for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
1035 		meta = &kfence_metadata[i];
1036 
1037 		/* See above. */
1038 		if (READ_ONCE(meta->cache) != s || READ_ONCE(meta->state) != KFENCE_OBJECT_FREED)
1039 			continue;
1040 
1041 		raw_spin_lock_irqsave(&meta->lock, flags);
1042 		if (meta->cache == s && meta->state == KFENCE_OBJECT_FREED)
1043 			meta->cache = NULL;
1044 		raw_spin_unlock_irqrestore(&meta->lock, flags);
1045 	}
1046 }
1047 
__kfence_alloc(struct kmem_cache * s,size_t size,gfp_t flags)1048 void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
1049 {
1050 	unsigned long stack_entries[KFENCE_STACK_DEPTH];
1051 	size_t num_stack_entries;
1052 	u32 alloc_stack_hash;
1053 
1054 	/*
1055 	 * Perform size check before switching kfence_allocation_gate, so that
1056 	 * we don't disable KFENCE without making an allocation.
1057 	 */
1058 	if (size > PAGE_SIZE) {
1059 		atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]);
1060 		return NULL;
1061 	}
1062 
1063 	/*
1064 	 * Skip allocations from non-default zones, including DMA. We cannot
1065 	 * guarantee that pages in the KFENCE pool will have the requested
1066 	 * properties (e.g. reside in DMAable memory).
1067 	 */
1068 	if ((flags & GFP_ZONEMASK) ||
1069 	    ((flags & __GFP_THISNODE) && num_online_nodes() > 1) ||
1070 	    (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32))) {
1071 		atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]);
1072 		return NULL;
1073 	}
1074 
1075 	/*
1076 	 * Skip allocations for this slab, if KFENCE has been disabled for
1077 	 * this slab.
1078 	 */
1079 	if (s->flags & SLAB_SKIP_KFENCE)
1080 		return NULL;
1081 
1082 	if (atomic_inc_return(&kfence_allocation_gate) > 1)
1083 		return NULL;
1084 #ifdef CONFIG_KFENCE_STATIC_KEYS
1085 	/*
1086 	 * waitqueue_active() is fully ordered after the update of
1087 	 * kfence_allocation_gate per atomic_inc_return().
1088 	 */
1089 	if (waitqueue_active(&allocation_wait)) {
1090 		/*
1091 		 * Calling wake_up() here may deadlock when allocations happen
1092 		 * from within timer code. Use an irq_work to defer it.
1093 		 */
1094 		irq_work_queue(&wake_up_kfence_timer_work);
1095 	}
1096 #endif
1097 
1098 	if (!READ_ONCE(kfence_enabled))
1099 		return NULL;
1100 
1101 	num_stack_entries = stack_trace_save(stack_entries, KFENCE_STACK_DEPTH, 0);
1102 
1103 	/*
1104 	 * Do expensive check for coverage of allocation in slow-path after
1105 	 * allocation_gate has already become non-zero, even though it might
1106 	 * mean not making any allocation within a given sample interval.
1107 	 *
1108 	 * This ensures reasonable allocation coverage when the pool is almost
1109 	 * full, including avoiding long-lived allocations of the same source
1110 	 * filling up the pool (e.g. pagecache allocations).
1111 	 */
1112 	alloc_stack_hash = get_alloc_stack_hash(stack_entries, num_stack_entries);
1113 	if (should_skip_covered() && alloc_covered_contains(alloc_stack_hash)) {
1114 		atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_COVERED]);
1115 		return NULL;
1116 	}
1117 
1118 	return kfence_guarded_alloc(s, size, flags, stack_entries, num_stack_entries,
1119 				    alloc_stack_hash);
1120 }
1121 
kfence_ksize(const void * addr)1122 size_t kfence_ksize(const void *addr)
1123 {
1124 	const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
1125 
1126 	/*
1127 	 * Read locklessly -- if there is a race with __kfence_alloc(), this is
1128 	 * either a use-after-free or invalid access.
1129 	 */
1130 	return meta ? meta->size : 0;
1131 }
1132 
kfence_object_start(const void * addr)1133 void *kfence_object_start(const void *addr)
1134 {
1135 	const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
1136 
1137 	/*
1138 	 * Read locklessly -- if there is a race with __kfence_alloc(), this is
1139 	 * either a use-after-free or invalid access.
1140 	 */
1141 	return meta ? (void *)meta->addr : NULL;
1142 }
1143 
__kfence_free(void * addr)1144 void __kfence_free(void *addr)
1145 {
1146 	struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
1147 
1148 #ifdef CONFIG_MEMCG
1149 	KFENCE_WARN_ON(meta->objcg);
1150 #endif
1151 	/*
1152 	 * If the objects of the cache are SLAB_TYPESAFE_BY_RCU, defer freeing
1153 	 * the object, as the object page may be recycled for other-typed
1154 	 * objects once it has been freed. meta->cache may be NULL if the cache
1155 	 * was destroyed.
1156 	 */
1157 	if (unlikely(meta->cache && (meta->cache->flags & SLAB_TYPESAFE_BY_RCU)))
1158 		call_rcu(&meta->rcu_head, rcu_guarded_free);
1159 	else
1160 		kfence_guarded_free(addr, meta, false);
1161 }
1162 
kfence_handle_page_fault(unsigned long addr,bool is_write,struct pt_regs * regs)1163 bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs)
1164 {
1165 	const int page_index = (addr - (unsigned long)__kfence_pool) / PAGE_SIZE;
1166 	struct kfence_metadata *to_report = NULL;
1167 	enum kfence_error_type error_type;
1168 	unsigned long flags;
1169 
1170 	if (!is_kfence_address((void *)addr))
1171 		return false;
1172 
1173 	if (!READ_ONCE(kfence_enabled)) /* If disabled at runtime ... */
1174 		return kfence_unprotect(addr); /* ... unprotect and proceed. */
1175 
1176 	atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
1177 
1178 	if (page_index % 2) {
1179 		/* This is a redzone, report a buffer overflow. */
1180 		struct kfence_metadata *meta;
1181 		int distance = 0;
1182 
1183 		meta = addr_to_metadata(addr - PAGE_SIZE);
1184 		if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) {
1185 			to_report = meta;
1186 			/* Data race ok; distance calculation approximate. */
1187 			distance = addr - data_race(meta->addr + meta->size);
1188 		}
1189 
1190 		meta = addr_to_metadata(addr + PAGE_SIZE);
1191 		if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) {
1192 			/* Data race ok; distance calculation approximate. */
1193 			if (!to_report || distance > data_race(meta->addr) - addr)
1194 				to_report = meta;
1195 		}
1196 
1197 		if (!to_report)
1198 			goto out;
1199 
1200 		raw_spin_lock_irqsave(&to_report->lock, flags);
1201 		to_report->unprotected_page = addr;
1202 		error_type = KFENCE_ERROR_OOB;
1203 
1204 		/*
1205 		 * If the object was freed before we took the look we can still
1206 		 * report this as an OOB -- the report will simply show the
1207 		 * stacktrace of the free as well.
1208 		 */
1209 	} else {
1210 		to_report = addr_to_metadata(addr);
1211 		if (!to_report)
1212 			goto out;
1213 
1214 		raw_spin_lock_irqsave(&to_report->lock, flags);
1215 		error_type = KFENCE_ERROR_UAF;
1216 		/*
1217 		 * We may race with __kfence_alloc(), and it is possible that a
1218 		 * freed object may be reallocated. We simply report this as a
1219 		 * use-after-free, with the stack trace showing the place where
1220 		 * the object was re-allocated.
1221 		 */
1222 	}
1223 
1224 out:
1225 	if (to_report) {
1226 		kfence_report_error(addr, is_write, regs, to_report, error_type);
1227 		raw_spin_unlock_irqrestore(&to_report->lock, flags);
1228 	} else {
1229 		/* This may be a UAF or OOB access, but we can't be sure. */
1230 		kfence_report_error(addr, is_write, regs, NULL, KFENCE_ERROR_INVALID);
1231 	}
1232 
1233 	return kfence_unprotect(addr); /* Unprotect and let access proceed. */
1234 }
1235