xref: /openbmc/linux/mm/kfence/kfence_test.c (revision ed84ef1c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Test cases for KFENCE memory safety error detector. Since the interface with
4  * which KFENCE's reports are obtained is via the console, this is the output we
5  * should verify. For each test case checks the presence (or absence) of
6  * generated reports. Relies on 'console' tracepoint to capture reports as they
7  * appear in the kernel log.
8  *
9  * Copyright (C) 2020, Google LLC.
10  * Author: Alexander Potapenko <glider@google.com>
11  *         Marco Elver <elver@google.com>
12  */
13 
14 #include <kunit/test.h>
15 #include <linux/jiffies.h>
16 #include <linux/kernel.h>
17 #include <linux/kfence.h>
18 #include <linux/mm.h>
19 #include <linux/random.h>
20 #include <linux/slab.h>
21 #include <linux/spinlock.h>
22 #include <linux/string.h>
23 #include <linux/tracepoint.h>
24 #include <trace/events/printk.h>
25 
26 #include <asm/kfence.h>
27 
28 #include "kfence.h"
29 
30 /* May be overridden by <asm/kfence.h>. */
31 #ifndef arch_kfence_test_address
32 #define arch_kfence_test_address(addr) (addr)
33 #endif
34 
35 /* Report as observed from console. */
36 static struct {
37 	spinlock_t lock;
38 	int nlines;
39 	char lines[2][256];
40 } observed = {
41 	.lock = __SPIN_LOCK_UNLOCKED(observed.lock),
42 };
43 
44 /* Probe for console output: obtains observed lines of interest. */
45 static void probe_console(void *ignore, const char *buf, size_t len)
46 {
47 	unsigned long flags;
48 	int nlines;
49 
50 	spin_lock_irqsave(&observed.lock, flags);
51 	nlines = observed.nlines;
52 
53 	if (strnstr(buf, "BUG: KFENCE: ", len) && strnstr(buf, "test_", len)) {
54 		/*
55 		 * KFENCE report and related to the test.
56 		 *
57 		 * The provided @buf is not NUL-terminated; copy no more than
58 		 * @len bytes and let strscpy() add the missing NUL-terminator.
59 		 */
60 		strscpy(observed.lines[0], buf, min(len + 1, sizeof(observed.lines[0])));
61 		nlines = 1;
62 	} else if (nlines == 1 && (strnstr(buf, "at 0x", len) || strnstr(buf, "of 0x", len))) {
63 		strscpy(observed.lines[nlines++], buf, min(len + 1, sizeof(observed.lines[0])));
64 	}
65 
66 	WRITE_ONCE(observed.nlines, nlines); /* Publish new nlines. */
67 	spin_unlock_irqrestore(&observed.lock, flags);
68 }
69 
70 /* Check if a report related to the test exists. */
71 static bool report_available(void)
72 {
73 	return READ_ONCE(observed.nlines) == ARRAY_SIZE(observed.lines);
74 }
75 
76 /* Information we expect in a report. */
77 struct expect_report {
78 	enum kfence_error_type type; /* The type or error. */
79 	void *fn; /* Function pointer to expected function where access occurred. */
80 	char *addr; /* Address at which the bad access occurred. */
81 	bool is_write; /* Is access a write. */
82 };
83 
84 static const char *get_access_type(const struct expect_report *r)
85 {
86 	return r->is_write ? "write" : "read";
87 }
88 
89 /* Check observed report matches information in @r. */
90 static bool report_matches(const struct expect_report *r)
91 {
92 	unsigned long addr = (unsigned long)r->addr;
93 	bool ret = false;
94 	unsigned long flags;
95 	typeof(observed.lines) expect;
96 	const char *end;
97 	char *cur;
98 
99 	/* Doubled-checked locking. */
100 	if (!report_available())
101 		return false;
102 
103 	/* Generate expected report contents. */
104 
105 	/* Title */
106 	cur = expect[0];
107 	end = &expect[0][sizeof(expect[0]) - 1];
108 	switch (r->type) {
109 	case KFENCE_ERROR_OOB:
110 		cur += scnprintf(cur, end - cur, "BUG: KFENCE: out-of-bounds %s",
111 				 get_access_type(r));
112 		break;
113 	case KFENCE_ERROR_UAF:
114 		cur += scnprintf(cur, end - cur, "BUG: KFENCE: use-after-free %s",
115 				 get_access_type(r));
116 		break;
117 	case KFENCE_ERROR_CORRUPTION:
118 		cur += scnprintf(cur, end - cur, "BUG: KFENCE: memory corruption");
119 		break;
120 	case KFENCE_ERROR_INVALID:
121 		cur += scnprintf(cur, end - cur, "BUG: KFENCE: invalid %s",
122 				 get_access_type(r));
123 		break;
124 	case KFENCE_ERROR_INVALID_FREE:
125 		cur += scnprintf(cur, end - cur, "BUG: KFENCE: invalid free");
126 		break;
127 	}
128 
129 	scnprintf(cur, end - cur, " in %pS", r->fn);
130 	/* The exact offset won't match, remove it; also strip module name. */
131 	cur = strchr(expect[0], '+');
132 	if (cur)
133 		*cur = '\0';
134 
135 	/* Access information */
136 	cur = expect[1];
137 	end = &expect[1][sizeof(expect[1]) - 1];
138 
139 	switch (r->type) {
140 	case KFENCE_ERROR_OOB:
141 		cur += scnprintf(cur, end - cur, "Out-of-bounds %s at", get_access_type(r));
142 		addr = arch_kfence_test_address(addr);
143 		break;
144 	case KFENCE_ERROR_UAF:
145 		cur += scnprintf(cur, end - cur, "Use-after-free %s at", get_access_type(r));
146 		addr = arch_kfence_test_address(addr);
147 		break;
148 	case KFENCE_ERROR_CORRUPTION:
149 		cur += scnprintf(cur, end - cur, "Corrupted memory at");
150 		break;
151 	case KFENCE_ERROR_INVALID:
152 		cur += scnprintf(cur, end - cur, "Invalid %s at", get_access_type(r));
153 		addr = arch_kfence_test_address(addr);
154 		break;
155 	case KFENCE_ERROR_INVALID_FREE:
156 		cur += scnprintf(cur, end - cur, "Invalid free of");
157 		break;
158 	}
159 
160 	cur += scnprintf(cur, end - cur, " 0x%p", (void *)addr);
161 
162 	spin_lock_irqsave(&observed.lock, flags);
163 	if (!report_available())
164 		goto out; /* A new report is being captured. */
165 
166 	/* Finally match expected output to what we actually observed. */
167 	ret = strstr(observed.lines[0], expect[0]) && strstr(observed.lines[1], expect[1]);
168 out:
169 	spin_unlock_irqrestore(&observed.lock, flags);
170 	return ret;
171 }
172 
173 /* ===== Test cases ===== */
174 
175 #define TEST_PRIV_WANT_MEMCACHE ((void *)1)
176 
177 /* Cache used by tests; if NULL, allocate from kmalloc instead. */
178 static struct kmem_cache *test_cache;
179 
180 static size_t setup_test_cache(struct kunit *test, size_t size, slab_flags_t flags,
181 			       void (*ctor)(void *))
182 {
183 	if (test->priv != TEST_PRIV_WANT_MEMCACHE)
184 		return size;
185 
186 	kunit_info(test, "%s: size=%zu, ctor=%ps\n", __func__, size, ctor);
187 
188 	/*
189 	 * Use SLAB_NOLEAKTRACE to prevent merging with existing caches. Any
190 	 * other flag in SLAB_NEVER_MERGE also works. Use SLAB_ACCOUNT to
191 	 * allocate via memcg, if enabled.
192 	 */
193 	flags |= SLAB_NOLEAKTRACE | SLAB_ACCOUNT;
194 	test_cache = kmem_cache_create("test", size, 1, flags, ctor);
195 	KUNIT_ASSERT_TRUE_MSG(test, test_cache, "could not create cache");
196 
197 	return size;
198 }
199 
200 static void test_cache_destroy(void)
201 {
202 	if (!test_cache)
203 		return;
204 
205 	kmem_cache_destroy(test_cache);
206 	test_cache = NULL;
207 }
208 
209 static inline size_t kmalloc_cache_alignment(size_t size)
210 {
211 	return kmalloc_caches[kmalloc_type(GFP_KERNEL)][__kmalloc_index(size, false)]->align;
212 }
213 
214 /* Must always inline to match stack trace against caller. */
215 static __always_inline void test_free(void *ptr)
216 {
217 	if (test_cache)
218 		kmem_cache_free(test_cache, ptr);
219 	else
220 		kfree(ptr);
221 }
222 
223 /*
224  * If this should be a KFENCE allocation, and on which side the allocation and
225  * the closest guard page should be.
226  */
227 enum allocation_policy {
228 	ALLOCATE_ANY, /* KFENCE, any side. */
229 	ALLOCATE_LEFT, /* KFENCE, left side of page. */
230 	ALLOCATE_RIGHT, /* KFENCE, right side of page. */
231 	ALLOCATE_NONE, /* No KFENCE allocation. */
232 };
233 
234 /*
235  * Try to get a guarded allocation from KFENCE. Uses either kmalloc() or the
236  * current test_cache if set up.
237  */
238 static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocation_policy policy)
239 {
240 	void *alloc;
241 	unsigned long timeout, resched_after;
242 	const char *policy_name;
243 
244 	switch (policy) {
245 	case ALLOCATE_ANY:
246 		policy_name = "any";
247 		break;
248 	case ALLOCATE_LEFT:
249 		policy_name = "left";
250 		break;
251 	case ALLOCATE_RIGHT:
252 		policy_name = "right";
253 		break;
254 	case ALLOCATE_NONE:
255 		policy_name = "none";
256 		break;
257 	}
258 
259 	kunit_info(test, "%s: size=%zu, gfp=%x, policy=%s, cache=%i\n", __func__, size, gfp,
260 		   policy_name, !!test_cache);
261 
262 	/*
263 	 * 100x the sample interval should be more than enough to ensure we get
264 	 * a KFENCE allocation eventually.
265 	 */
266 	timeout = jiffies + msecs_to_jiffies(100 * CONFIG_KFENCE_SAMPLE_INTERVAL);
267 	/*
268 	 * Especially for non-preemption kernels, ensure the allocation-gate
269 	 * timer can catch up: after @resched_after, every failed allocation
270 	 * attempt yields, to ensure the allocation-gate timer is scheduled.
271 	 */
272 	resched_after = jiffies + msecs_to_jiffies(CONFIG_KFENCE_SAMPLE_INTERVAL);
273 	do {
274 		if (test_cache)
275 			alloc = kmem_cache_alloc(test_cache, gfp);
276 		else
277 			alloc = kmalloc(size, gfp);
278 
279 		if (is_kfence_address(alloc)) {
280 			struct page *page = virt_to_head_page(alloc);
281 			struct kmem_cache *s = test_cache ?:
282 					kmalloc_caches[kmalloc_type(GFP_KERNEL)][__kmalloc_index(size, false)];
283 
284 			/*
285 			 * Verify that various helpers return the right values
286 			 * even for KFENCE objects; these are required so that
287 			 * memcg accounting works correctly.
288 			 */
289 			KUNIT_EXPECT_EQ(test, obj_to_index(s, page, alloc), 0U);
290 			KUNIT_EXPECT_EQ(test, objs_per_slab_page(s, page), 1);
291 
292 			if (policy == ALLOCATE_ANY)
293 				return alloc;
294 			if (policy == ALLOCATE_LEFT && IS_ALIGNED((unsigned long)alloc, PAGE_SIZE))
295 				return alloc;
296 			if (policy == ALLOCATE_RIGHT &&
297 			    !IS_ALIGNED((unsigned long)alloc, PAGE_SIZE))
298 				return alloc;
299 		} else if (policy == ALLOCATE_NONE)
300 			return alloc;
301 
302 		test_free(alloc);
303 
304 		if (time_after(jiffies, resched_after))
305 			cond_resched();
306 	} while (time_before(jiffies, timeout));
307 
308 	KUNIT_ASSERT_TRUE_MSG(test, false, "failed to allocate from KFENCE");
309 	return NULL; /* Unreachable. */
310 }
311 
312 static void test_out_of_bounds_read(struct kunit *test)
313 {
314 	size_t size = 32;
315 	struct expect_report expect = {
316 		.type = KFENCE_ERROR_OOB,
317 		.fn = test_out_of_bounds_read,
318 		.is_write = false,
319 	};
320 	char *buf;
321 
322 	setup_test_cache(test, size, 0, NULL);
323 
324 	/*
325 	 * If we don't have our own cache, adjust based on alignment, so that we
326 	 * actually access guard pages on either side.
327 	 */
328 	if (!test_cache)
329 		size = kmalloc_cache_alignment(size);
330 
331 	/* Test both sides. */
332 
333 	buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT);
334 	expect.addr = buf - 1;
335 	READ_ONCE(*expect.addr);
336 	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
337 	test_free(buf);
338 
339 	buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT);
340 	expect.addr = buf + size;
341 	READ_ONCE(*expect.addr);
342 	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
343 	test_free(buf);
344 }
345 
346 static void test_out_of_bounds_write(struct kunit *test)
347 {
348 	size_t size = 32;
349 	struct expect_report expect = {
350 		.type = KFENCE_ERROR_OOB,
351 		.fn = test_out_of_bounds_write,
352 		.is_write = true,
353 	};
354 	char *buf;
355 
356 	setup_test_cache(test, size, 0, NULL);
357 	buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT);
358 	expect.addr = buf - 1;
359 	WRITE_ONCE(*expect.addr, 42);
360 	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
361 	test_free(buf);
362 }
363 
364 static void test_use_after_free_read(struct kunit *test)
365 {
366 	const size_t size = 32;
367 	struct expect_report expect = {
368 		.type = KFENCE_ERROR_UAF,
369 		.fn = test_use_after_free_read,
370 		.is_write = false,
371 	};
372 
373 	setup_test_cache(test, size, 0, NULL);
374 	expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
375 	test_free(expect.addr);
376 	READ_ONCE(*expect.addr);
377 	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
378 }
379 
380 static void test_double_free(struct kunit *test)
381 {
382 	const size_t size = 32;
383 	struct expect_report expect = {
384 		.type = KFENCE_ERROR_INVALID_FREE,
385 		.fn = test_double_free,
386 	};
387 
388 	setup_test_cache(test, size, 0, NULL);
389 	expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
390 	test_free(expect.addr);
391 	test_free(expect.addr); /* Double-free. */
392 	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
393 }
394 
395 static void test_invalid_addr_free(struct kunit *test)
396 {
397 	const size_t size = 32;
398 	struct expect_report expect = {
399 		.type = KFENCE_ERROR_INVALID_FREE,
400 		.fn = test_invalid_addr_free,
401 	};
402 	char *buf;
403 
404 	setup_test_cache(test, size, 0, NULL);
405 	buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
406 	expect.addr = buf + 1; /* Free on invalid address. */
407 	test_free(expect.addr); /* Invalid address free. */
408 	test_free(buf); /* No error. */
409 	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
410 }
411 
412 static void test_corruption(struct kunit *test)
413 {
414 	size_t size = 32;
415 	struct expect_report expect = {
416 		.type = KFENCE_ERROR_CORRUPTION,
417 		.fn = test_corruption,
418 	};
419 	char *buf;
420 
421 	setup_test_cache(test, size, 0, NULL);
422 
423 	/* Test both sides. */
424 
425 	buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT);
426 	expect.addr = buf + size;
427 	WRITE_ONCE(*expect.addr, 42);
428 	test_free(buf);
429 	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
430 
431 	buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT);
432 	expect.addr = buf - 1;
433 	WRITE_ONCE(*expect.addr, 42);
434 	test_free(buf);
435 	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
436 }
437 
438 /*
439  * KFENCE is unable to detect an OOB if the allocation's alignment requirements
440  * leave a gap between the object and the guard page. Specifically, an
441  * allocation of e.g. 73 bytes is aligned on 8 and 128 bytes for SLUB or SLAB
442  * respectively. Therefore it is impossible for the allocated object to
443  * contiguously line up with the right guard page.
444  *
445  * However, we test that an access to memory beyond the gap results in KFENCE
446  * detecting an OOB access.
447  */
448 static void test_kmalloc_aligned_oob_read(struct kunit *test)
449 {
450 	const size_t size = 73;
451 	const size_t align = kmalloc_cache_alignment(size);
452 	struct expect_report expect = {
453 		.type = KFENCE_ERROR_OOB,
454 		.fn = test_kmalloc_aligned_oob_read,
455 		.is_write = false,
456 	};
457 	char *buf;
458 
459 	buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT);
460 
461 	/*
462 	 * The object is offset to the right, so there won't be an OOB to the
463 	 * left of it.
464 	 */
465 	READ_ONCE(*(buf - 1));
466 	KUNIT_EXPECT_FALSE(test, report_available());
467 
468 	/*
469 	 * @buf must be aligned on @align, therefore buf + size belongs to the
470 	 * same page -> no OOB.
471 	 */
472 	READ_ONCE(*(buf + size));
473 	KUNIT_EXPECT_FALSE(test, report_available());
474 
475 	/* Overflowing by @align bytes will result in an OOB. */
476 	expect.addr = buf + size + align;
477 	READ_ONCE(*expect.addr);
478 	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
479 
480 	test_free(buf);
481 }
482 
483 static void test_kmalloc_aligned_oob_write(struct kunit *test)
484 {
485 	const size_t size = 73;
486 	struct expect_report expect = {
487 		.type = KFENCE_ERROR_CORRUPTION,
488 		.fn = test_kmalloc_aligned_oob_write,
489 	};
490 	char *buf;
491 
492 	buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT);
493 	/*
494 	 * The object is offset to the right, so we won't get a page
495 	 * fault immediately after it.
496 	 */
497 	expect.addr = buf + size;
498 	WRITE_ONCE(*expect.addr, READ_ONCE(*expect.addr) + 1);
499 	KUNIT_EXPECT_FALSE(test, report_available());
500 	test_free(buf);
501 	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
502 }
503 
504 /* Test cache shrinking and destroying with KFENCE. */
505 static void test_shrink_memcache(struct kunit *test)
506 {
507 	const size_t size = 32;
508 	void *buf;
509 
510 	setup_test_cache(test, size, 0, NULL);
511 	KUNIT_EXPECT_TRUE(test, test_cache);
512 	buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
513 	kmem_cache_shrink(test_cache);
514 	test_free(buf);
515 
516 	KUNIT_EXPECT_FALSE(test, report_available());
517 }
518 
519 static void ctor_set_x(void *obj)
520 {
521 	/* Every object has at least 8 bytes. */
522 	memset(obj, 'x', 8);
523 }
524 
525 /* Ensure that SL*B does not modify KFENCE objects on bulk free. */
526 static void test_free_bulk(struct kunit *test)
527 {
528 	int iter;
529 
530 	for (iter = 0; iter < 5; iter++) {
531 		const size_t size = setup_test_cache(test, 8 + prandom_u32_max(300), 0,
532 						     (iter & 1) ? ctor_set_x : NULL);
533 		void *objects[] = {
534 			test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT),
535 			test_alloc(test, size, GFP_KERNEL, ALLOCATE_NONE),
536 			test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT),
537 			test_alloc(test, size, GFP_KERNEL, ALLOCATE_NONE),
538 			test_alloc(test, size, GFP_KERNEL, ALLOCATE_NONE),
539 		};
540 
541 		kmem_cache_free_bulk(test_cache, ARRAY_SIZE(objects), objects);
542 		KUNIT_ASSERT_FALSE(test, report_available());
543 		test_cache_destroy();
544 	}
545 }
546 
547 /* Test init-on-free works. */
548 static void test_init_on_free(struct kunit *test)
549 {
550 	const size_t size = 32;
551 	struct expect_report expect = {
552 		.type = KFENCE_ERROR_UAF,
553 		.fn = test_init_on_free,
554 		.is_write = false,
555 	};
556 	int i;
557 
558 	if (!IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON))
559 		return;
560 	/* Assume it hasn't been disabled on command line. */
561 
562 	setup_test_cache(test, size, 0, NULL);
563 	expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
564 	for (i = 0; i < size; i++)
565 		expect.addr[i] = i + 1;
566 	test_free(expect.addr);
567 
568 	for (i = 0; i < size; i++) {
569 		/*
570 		 * This may fail if the page was recycled by KFENCE and then
571 		 * written to again -- this however, is near impossible with a
572 		 * default config.
573 		 */
574 		KUNIT_EXPECT_EQ(test, expect.addr[i], (char)0);
575 
576 		if (!i) /* Only check first access to not fail test if page is ever re-protected. */
577 			KUNIT_EXPECT_TRUE(test, report_matches(&expect));
578 	}
579 }
580 
581 /* Ensure that constructors work properly. */
582 static void test_memcache_ctor(struct kunit *test)
583 {
584 	const size_t size = 32;
585 	char *buf;
586 	int i;
587 
588 	setup_test_cache(test, size, 0, ctor_set_x);
589 	buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
590 
591 	for (i = 0; i < 8; i++)
592 		KUNIT_EXPECT_EQ(test, buf[i], (char)'x');
593 
594 	test_free(buf);
595 
596 	KUNIT_EXPECT_FALSE(test, report_available());
597 }
598 
599 /* Test that memory is zeroed if requested. */
600 static void test_gfpzero(struct kunit *test)
601 {
602 	const size_t size = PAGE_SIZE; /* PAGE_SIZE so we can use ALLOCATE_ANY. */
603 	char *buf1, *buf2;
604 	int i;
605 
606 	if (CONFIG_KFENCE_SAMPLE_INTERVAL > 100) {
607 		kunit_warn(test, "skipping ... would take too long\n");
608 		return;
609 	}
610 
611 	setup_test_cache(test, size, 0, NULL);
612 	buf1 = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
613 	for (i = 0; i < size; i++)
614 		buf1[i] = i + 1;
615 	test_free(buf1);
616 
617 	/* Try to get same address again -- this can take a while. */
618 	for (i = 0;; i++) {
619 		buf2 = test_alloc(test, size, GFP_KERNEL | __GFP_ZERO, ALLOCATE_ANY);
620 		if (buf1 == buf2)
621 			break;
622 		test_free(buf2);
623 
624 		if (i == CONFIG_KFENCE_NUM_OBJECTS) {
625 			kunit_warn(test, "giving up ... cannot get same object back\n");
626 			return;
627 		}
628 	}
629 
630 	for (i = 0; i < size; i++)
631 		KUNIT_EXPECT_EQ(test, buf2[i], (char)0);
632 
633 	test_free(buf2);
634 
635 	KUNIT_EXPECT_FALSE(test, report_available());
636 }
637 
638 static void test_invalid_access(struct kunit *test)
639 {
640 	const struct expect_report expect = {
641 		.type = KFENCE_ERROR_INVALID,
642 		.fn = test_invalid_access,
643 		.addr = &__kfence_pool[10],
644 		.is_write = false,
645 	};
646 
647 	READ_ONCE(__kfence_pool[10]);
648 	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
649 }
650 
651 /* Test SLAB_TYPESAFE_BY_RCU works. */
652 static void test_memcache_typesafe_by_rcu(struct kunit *test)
653 {
654 	const size_t size = 32;
655 	struct expect_report expect = {
656 		.type = KFENCE_ERROR_UAF,
657 		.fn = test_memcache_typesafe_by_rcu,
658 		.is_write = false,
659 	};
660 
661 	setup_test_cache(test, size, SLAB_TYPESAFE_BY_RCU, NULL);
662 	KUNIT_EXPECT_TRUE(test, test_cache); /* Want memcache. */
663 
664 	expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
665 	*expect.addr = 42;
666 
667 	rcu_read_lock();
668 	test_free(expect.addr);
669 	KUNIT_EXPECT_EQ(test, *expect.addr, (char)42);
670 	/*
671 	 * Up to this point, memory should not have been freed yet, and
672 	 * therefore there should be no KFENCE report from the above access.
673 	 */
674 	rcu_read_unlock();
675 
676 	/* Above access to @expect.addr should not have generated a report! */
677 	KUNIT_EXPECT_FALSE(test, report_available());
678 
679 	/* Only after rcu_barrier() is the memory guaranteed to be freed. */
680 	rcu_barrier();
681 
682 	/* Expect use-after-free. */
683 	KUNIT_EXPECT_EQ(test, *expect.addr, (char)42);
684 	KUNIT_EXPECT_TRUE(test, report_matches(&expect));
685 }
686 
687 /* Test krealloc(). */
688 static void test_krealloc(struct kunit *test)
689 {
690 	const size_t size = 32;
691 	const struct expect_report expect = {
692 		.type = KFENCE_ERROR_UAF,
693 		.fn = test_krealloc,
694 		.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY),
695 		.is_write = false,
696 	};
697 	char *buf = expect.addr;
698 	int i;
699 
700 	KUNIT_EXPECT_FALSE(test, test_cache);
701 	KUNIT_EXPECT_EQ(test, ksize(buf), size); /* Precise size match after KFENCE alloc. */
702 	for (i = 0; i < size; i++)
703 		buf[i] = i + 1;
704 
705 	/* Check that we successfully change the size. */
706 	buf = krealloc(buf, size * 3, GFP_KERNEL); /* Grow. */
707 	/* Note: Might no longer be a KFENCE alloc. */
708 	KUNIT_EXPECT_GE(test, ksize(buf), size * 3);
709 	for (i = 0; i < size; i++)
710 		KUNIT_EXPECT_EQ(test, buf[i], (char)(i + 1));
711 	for (; i < size * 3; i++) /* Fill to extra bytes. */
712 		buf[i] = i + 1;
713 
714 	buf = krealloc(buf, size * 2, GFP_KERNEL); /* Shrink. */
715 	KUNIT_EXPECT_GE(test, ksize(buf), size * 2);
716 	for (i = 0; i < size * 2; i++)
717 		KUNIT_EXPECT_EQ(test, buf[i], (char)(i + 1));
718 
719 	buf = krealloc(buf, 0, GFP_KERNEL); /* Free. */
720 	KUNIT_EXPECT_EQ(test, (unsigned long)buf, (unsigned long)ZERO_SIZE_PTR);
721 	KUNIT_ASSERT_FALSE(test, report_available()); /* No reports yet! */
722 
723 	READ_ONCE(*expect.addr); /* Ensure krealloc() actually freed earlier KFENCE object. */
724 	KUNIT_ASSERT_TRUE(test, report_matches(&expect));
725 }
726 
727 /* Test that some objects from a bulk allocation belong to KFENCE pool. */
728 static void test_memcache_alloc_bulk(struct kunit *test)
729 {
730 	const size_t size = 32;
731 	bool pass = false;
732 	unsigned long timeout;
733 
734 	setup_test_cache(test, size, 0, NULL);
735 	KUNIT_EXPECT_TRUE(test, test_cache); /* Want memcache. */
736 	/*
737 	 * 100x the sample interval should be more than enough to ensure we get
738 	 * a KFENCE allocation eventually.
739 	 */
740 	timeout = jiffies + msecs_to_jiffies(100 * CONFIG_KFENCE_SAMPLE_INTERVAL);
741 	do {
742 		void *objects[100];
743 		int i, num = kmem_cache_alloc_bulk(test_cache, GFP_ATOMIC, ARRAY_SIZE(objects),
744 						   objects);
745 		if (!num)
746 			continue;
747 		for (i = 0; i < ARRAY_SIZE(objects); i++) {
748 			if (is_kfence_address(objects[i])) {
749 				pass = true;
750 				break;
751 			}
752 		}
753 		kmem_cache_free_bulk(test_cache, num, objects);
754 		/*
755 		 * kmem_cache_alloc_bulk() disables interrupts, and calling it
756 		 * in a tight loop may not give KFENCE a chance to switch the
757 		 * static branch. Call cond_resched() to let KFENCE chime in.
758 		 */
759 		cond_resched();
760 	} while (!pass && time_before(jiffies, timeout));
761 
762 	KUNIT_EXPECT_TRUE(test, pass);
763 	KUNIT_EXPECT_FALSE(test, report_available());
764 }
765 
766 /*
767  * KUnit does not provide a way to provide arguments to tests, and we encode
768  * additional info in the name. Set up 2 tests per test case, one using the
769  * default allocator, and another using a custom memcache (suffix '-memcache').
770  */
771 #define KFENCE_KUNIT_CASE(test_name)						\
772 	{ .run_case = test_name, .name = #test_name },				\
773 	{ .run_case = test_name, .name = #test_name "-memcache" }
774 
775 static struct kunit_case kfence_test_cases[] = {
776 	KFENCE_KUNIT_CASE(test_out_of_bounds_read),
777 	KFENCE_KUNIT_CASE(test_out_of_bounds_write),
778 	KFENCE_KUNIT_CASE(test_use_after_free_read),
779 	KFENCE_KUNIT_CASE(test_double_free),
780 	KFENCE_KUNIT_CASE(test_invalid_addr_free),
781 	KFENCE_KUNIT_CASE(test_corruption),
782 	KFENCE_KUNIT_CASE(test_free_bulk),
783 	KFENCE_KUNIT_CASE(test_init_on_free),
784 	KUNIT_CASE(test_kmalloc_aligned_oob_read),
785 	KUNIT_CASE(test_kmalloc_aligned_oob_write),
786 	KUNIT_CASE(test_shrink_memcache),
787 	KUNIT_CASE(test_memcache_ctor),
788 	KUNIT_CASE(test_invalid_access),
789 	KUNIT_CASE(test_gfpzero),
790 	KUNIT_CASE(test_memcache_typesafe_by_rcu),
791 	KUNIT_CASE(test_krealloc),
792 	KUNIT_CASE(test_memcache_alloc_bulk),
793 	{},
794 };
795 
796 /* ===== End test cases ===== */
797 
798 static int test_init(struct kunit *test)
799 {
800 	unsigned long flags;
801 	int i;
802 
803 	if (!__kfence_pool)
804 		return -EINVAL;
805 
806 	spin_lock_irqsave(&observed.lock, flags);
807 	for (i = 0; i < ARRAY_SIZE(observed.lines); i++)
808 		observed.lines[i][0] = '\0';
809 	observed.nlines = 0;
810 	spin_unlock_irqrestore(&observed.lock, flags);
811 
812 	/* Any test with 'memcache' in its name will want a memcache. */
813 	if (strstr(test->name, "memcache"))
814 		test->priv = TEST_PRIV_WANT_MEMCACHE;
815 	else
816 		test->priv = NULL;
817 
818 	return 0;
819 }
820 
821 static void test_exit(struct kunit *test)
822 {
823 	test_cache_destroy();
824 }
825 
826 static struct kunit_suite kfence_test_suite = {
827 	.name = "kfence",
828 	.test_cases = kfence_test_cases,
829 	.init = test_init,
830 	.exit = test_exit,
831 };
832 static struct kunit_suite *kfence_test_suites[] = { &kfence_test_suite, NULL };
833 
834 static void register_tracepoints(struct tracepoint *tp, void *ignore)
835 {
836 	check_trace_callback_type_console(probe_console);
837 	if (!strcmp(tp->name, "console"))
838 		WARN_ON(tracepoint_probe_register(tp, probe_console, NULL));
839 }
840 
841 static void unregister_tracepoints(struct tracepoint *tp, void *ignore)
842 {
843 	if (!strcmp(tp->name, "console"))
844 		tracepoint_probe_unregister(tp, probe_console, NULL);
845 }
846 
847 /*
848  * We only want to do tracepoints setup and teardown once, therefore we have to
849  * customize the init and exit functions and cannot rely on kunit_test_suite().
850  */
851 static int __init kfence_test_init(void)
852 {
853 	/*
854 	 * Because we want to be able to build the test as a module, we need to
855 	 * iterate through all known tracepoints, since the static registration
856 	 * won't work here.
857 	 */
858 	for_each_kernel_tracepoint(register_tracepoints, NULL);
859 	return __kunit_test_suites_init(kfence_test_suites);
860 }
861 
862 static void kfence_test_exit(void)
863 {
864 	__kunit_test_suites_exit(kfence_test_suites);
865 	for_each_kernel_tracepoint(unregister_tracepoints, NULL);
866 	tracepoint_synchronize_unregister();
867 }
868 
869 late_initcall_sync(kfence_test_init);
870 module_exit(kfence_test_exit);
871 
872 MODULE_LICENSE("GPL v2");
873 MODULE_AUTHOR("Alexander Potapenko <glider@google.com>, Marco Elver <elver@google.com>");
874