xref: /openbmc/linux/mm/kasan/kasan_test.c (revision d6e646b8)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5  * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
6  */
7 
8 #define pr_fmt(fmt) "kasan_test: " fmt
9 
10 #include <kunit/test.h>
11 #include <linux/bitops.h>
12 #include <linux/delay.h>
13 #include <linux/io.h>
14 #include <linux/kasan.h>
15 #include <linux/kernel.h>
16 #include <linux/mm.h>
17 #include <linux/mman.h>
18 #include <linux/module.h>
19 #include <linux/printk.h>
20 #include <linux/random.h>
21 #include <linux/set_memory.h>
22 #include <linux/slab.h>
23 #include <linux/string.h>
24 #include <linux/tracepoint.h>
25 #include <linux/uaccess.h>
26 #include <linux/vmalloc.h>
27 #include <trace/events/printk.h>
28 
29 #include <asm/page.h>
30 
31 #include "kasan.h"
32 
33 #define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
34 
35 static bool multishot;
36 
37 /* Fields set based on lines observed in the console. */
38 static struct {
39 	bool report_found;
40 	bool async_fault;
41 } test_status;
42 
43 /*
44  * Some tests use these global variables to store return values from function
45  * calls that could otherwise be eliminated by the compiler as dead code.
46  */
47 void *kasan_ptr_result;
48 int kasan_int_result;
49 
50 /* Probe for console output: obtains test_status lines of interest. */
probe_console(void * ignore,const char * buf,size_t len)51 static void probe_console(void *ignore, const char *buf, size_t len)
52 {
53 	if (strnstr(buf, "BUG: KASAN: ", len))
54 		WRITE_ONCE(test_status.report_found, true);
55 	else if (strnstr(buf, "Asynchronous fault: ", len))
56 		WRITE_ONCE(test_status.async_fault, true);
57 }
58 
kasan_suite_init(struct kunit_suite * suite)59 static int kasan_suite_init(struct kunit_suite *suite)
60 {
61 	if (!kasan_enabled()) {
62 		pr_err("Can't run KASAN tests with KASAN disabled");
63 		return -1;
64 	}
65 
66 	/* Stop failing KUnit tests on KASAN reports. */
67 	kasan_kunit_test_suite_start();
68 
69 	/*
70 	 * Temporarily enable multi-shot mode. Otherwise, KASAN would only
71 	 * report the first detected bug and panic the kernel if panic_on_warn
72 	 * is enabled.
73 	 */
74 	multishot = kasan_save_enable_multi_shot();
75 
76 	register_trace_console(probe_console, NULL);
77 	return 0;
78 }
79 
kasan_suite_exit(struct kunit_suite * suite)80 static void kasan_suite_exit(struct kunit_suite *suite)
81 {
82 	kasan_kunit_test_suite_end();
83 	kasan_restore_multi_shot(multishot);
84 	unregister_trace_console(probe_console, NULL);
85 	tracepoint_synchronize_unregister();
86 }
87 
kasan_test_exit(struct kunit * test)88 static void kasan_test_exit(struct kunit *test)
89 {
90 	KUNIT_EXPECT_FALSE(test, READ_ONCE(test_status.report_found));
91 }
92 
93 /**
94  * KUNIT_EXPECT_KASAN_FAIL() - check that the executed expression produces a
95  * KASAN report; causes a test failure otherwise. This relies on a KUnit
96  * resource named "kasan_status". Do not use this name for KUnit resources
97  * outside of KASAN tests.
98  *
99  * For hardware tag-based KASAN, when a synchronous tag fault happens, tag
100  * checking is auto-disabled. When this happens, this test handler reenables
101  * tag checking. As tag checking can be only disabled or enabled per CPU,
102  * this handler disables migration (preemption).
103  *
104  * Since the compiler doesn't see that the expression can change the test_status
105  * fields, it can reorder or optimize away the accesses to those fields.
106  * Use READ/WRITE_ONCE() for the accesses and compiler barriers around the
107  * expression to prevent that.
108  *
109  * In between KUNIT_EXPECT_KASAN_FAIL checks, test_status.report_found is kept
110  * as false. This allows detecting KASAN reports that happen outside of the
111  * checks by asserting !test_status.report_found at the start of
112  * KUNIT_EXPECT_KASAN_FAIL and in kasan_test_exit.
113  */
114 #define KUNIT_EXPECT_KASAN_FAIL(test, expression) do {			\
115 	if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) &&				\
116 	    kasan_sync_fault_possible())				\
117 		migrate_disable();					\
118 	KUNIT_EXPECT_FALSE(test, READ_ONCE(test_status.report_found));	\
119 	barrier();							\
120 	expression;							\
121 	barrier();							\
122 	if (kasan_async_fault_possible())				\
123 		kasan_force_async_fault();				\
124 	if (!READ_ONCE(test_status.report_found)) {			\
125 		KUNIT_FAIL(test, KUNIT_SUBTEST_INDENT "KASAN failure "	\
126 				"expected in \"" #expression		\
127 				 "\", but none occurred");		\
128 	}								\
129 	if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) &&				\
130 	    kasan_sync_fault_possible()) {				\
131 		if (READ_ONCE(test_status.report_found) &&		\
132 		    !READ_ONCE(test_status.async_fault))		\
133 			kasan_enable_hw_tags();				\
134 		migrate_enable();					\
135 	}								\
136 	WRITE_ONCE(test_status.report_found, false);			\
137 	WRITE_ONCE(test_status.async_fault, false);			\
138 } while (0)
139 
140 #define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do {			\
141 	if (!IS_ENABLED(config))					\
142 		kunit_skip((test), "Test requires " #config "=y");	\
143 } while (0)
144 
145 #define KASAN_TEST_NEEDS_CONFIG_OFF(test, config) do {			\
146 	if (IS_ENABLED(config))						\
147 		kunit_skip((test), "Test requires " #config "=n");	\
148 } while (0)
149 
150 #define KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test) do {		\
151 	if (IS_ENABLED(CONFIG_KASAN_HW_TAGS))				\
152 		break;  /* No compiler instrumentation. */		\
153 	if (IS_ENABLED(CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX))	\
154 		break;  /* Should always be instrumented! */		\
155 	if (IS_ENABLED(CONFIG_GENERIC_ENTRY))				\
156 		kunit_skip((test), "Test requires checked mem*()");	\
157 } while (0)
158 
kmalloc_oob_right(struct kunit * test)159 static void kmalloc_oob_right(struct kunit *test)
160 {
161 	char *ptr;
162 	size_t size = 128 - KASAN_GRANULE_SIZE - 5;
163 
164 	ptr = kmalloc(size, GFP_KERNEL);
165 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
166 
167 	OPTIMIZER_HIDE_VAR(ptr);
168 	/*
169 	 * An unaligned access past the requested kmalloc size.
170 	 * Only generic KASAN can precisely detect these.
171 	 */
172 	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
173 		KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 'x');
174 
175 	/*
176 	 * An aligned access into the first out-of-bounds granule that falls
177 	 * within the aligned kmalloc object.
178 	 */
179 	KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + 5] = 'y');
180 
181 	/* Out-of-bounds access past the aligned kmalloc object. */
182 	KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] =
183 					ptr[size + KASAN_GRANULE_SIZE + 5]);
184 
185 	kfree(ptr);
186 }
187 
kmalloc_oob_left(struct kunit * test)188 static void kmalloc_oob_left(struct kunit *test)
189 {
190 	char *ptr;
191 	size_t size = 15;
192 
193 	ptr = kmalloc(size, GFP_KERNEL);
194 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
195 
196 	OPTIMIZER_HIDE_VAR(ptr);
197 	KUNIT_EXPECT_KASAN_FAIL(test, *ptr = *(ptr - 1));
198 	kfree(ptr);
199 }
200 
kmalloc_node_oob_right(struct kunit * test)201 static void kmalloc_node_oob_right(struct kunit *test)
202 {
203 	char *ptr;
204 	size_t size = 4096;
205 
206 	ptr = kmalloc_node(size, GFP_KERNEL, 0);
207 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
208 
209 	OPTIMIZER_HIDE_VAR(ptr);
210 	KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]);
211 	kfree(ptr);
212 }
213 
214 /*
215  * These kmalloc_pagealloc_* tests try allocating a memory chunk that doesn't
216  * fit into a slab cache and therefore is allocated via the page allocator
217  * fallback. Since this kind of fallback is only implemented for SLUB, these
218  * tests are limited to that allocator.
219  */
kmalloc_pagealloc_oob_right(struct kunit * test)220 static void kmalloc_pagealloc_oob_right(struct kunit *test)
221 {
222 	char *ptr;
223 	size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
224 
225 	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
226 
227 	ptr = kmalloc(size, GFP_KERNEL);
228 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
229 
230 	OPTIMIZER_HIDE_VAR(ptr);
231 	KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 0);
232 
233 	kfree(ptr);
234 }
235 
kmalloc_pagealloc_uaf(struct kunit * test)236 static void kmalloc_pagealloc_uaf(struct kunit *test)
237 {
238 	char *ptr;
239 	size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
240 
241 	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
242 
243 	ptr = kmalloc(size, GFP_KERNEL);
244 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
245 	kfree(ptr);
246 
247 	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
248 }
249 
kmalloc_pagealloc_invalid_free(struct kunit * test)250 static void kmalloc_pagealloc_invalid_free(struct kunit *test)
251 {
252 	char *ptr;
253 	size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
254 
255 	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
256 
257 	ptr = kmalloc(size, GFP_KERNEL);
258 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
259 
260 	KUNIT_EXPECT_KASAN_FAIL(test, kfree(ptr + 1));
261 }
262 
pagealloc_oob_right(struct kunit * test)263 static void pagealloc_oob_right(struct kunit *test)
264 {
265 	char *ptr;
266 	struct page *pages;
267 	size_t order = 4;
268 	size_t size = (1UL << (PAGE_SHIFT + order));
269 
270 	/*
271 	 * With generic KASAN page allocations have no redzones, thus
272 	 * out-of-bounds detection is not guaranteed.
273 	 * See https://bugzilla.kernel.org/show_bug.cgi?id=210503.
274 	 */
275 	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
276 
277 	pages = alloc_pages(GFP_KERNEL, order);
278 	ptr = page_address(pages);
279 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
280 
281 	KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]);
282 	free_pages((unsigned long)ptr, order);
283 }
284 
pagealloc_uaf(struct kunit * test)285 static void pagealloc_uaf(struct kunit *test)
286 {
287 	char *ptr;
288 	struct page *pages;
289 	size_t order = 4;
290 
291 	pages = alloc_pages(GFP_KERNEL, order);
292 	ptr = page_address(pages);
293 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
294 	free_pages((unsigned long)ptr, order);
295 
296 	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
297 }
298 
kmalloc_large_oob_right(struct kunit * test)299 static void kmalloc_large_oob_right(struct kunit *test)
300 {
301 	char *ptr;
302 	size_t size = KMALLOC_MAX_CACHE_SIZE - 256;
303 
304 	/*
305 	 * Allocate a chunk that is large enough, but still fits into a slab
306 	 * and does not trigger the page allocator fallback in SLUB.
307 	 */
308 	ptr = kmalloc(size, GFP_KERNEL);
309 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
310 
311 	OPTIMIZER_HIDE_VAR(ptr);
312 	KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
313 	kfree(ptr);
314 }
315 
krealloc_more_oob_helper(struct kunit * test,size_t size1,size_t size2)316 static void krealloc_more_oob_helper(struct kunit *test,
317 					size_t size1, size_t size2)
318 {
319 	char *ptr1, *ptr2;
320 	size_t middle;
321 
322 	KUNIT_ASSERT_LT(test, size1, size2);
323 	middle = size1 + (size2 - size1) / 2;
324 
325 	ptr1 = kmalloc(size1, GFP_KERNEL);
326 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
327 
328 	ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
329 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
330 
331 	/* Suppress -Warray-bounds warnings. */
332 	OPTIMIZER_HIDE_VAR(ptr2);
333 
334 	/* All offsets up to size2 must be accessible. */
335 	ptr2[size1 - 1] = 'x';
336 	ptr2[size1] = 'x';
337 	ptr2[middle] = 'x';
338 	ptr2[size2 - 1] = 'x';
339 
340 	/* Generic mode is precise, so unaligned size2 must be inaccessible. */
341 	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
342 		KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
343 
344 	/* For all modes first aligned offset after size2 must be inaccessible. */
345 	KUNIT_EXPECT_KASAN_FAIL(test,
346 		ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
347 
348 	kfree(ptr2);
349 }
350 
krealloc_less_oob_helper(struct kunit * test,size_t size1,size_t size2)351 static void krealloc_less_oob_helper(struct kunit *test,
352 					size_t size1, size_t size2)
353 {
354 	char *ptr1, *ptr2;
355 	size_t middle;
356 
357 	KUNIT_ASSERT_LT(test, size2, size1);
358 	middle = size2 + (size1 - size2) / 2;
359 
360 	ptr1 = kmalloc(size1, GFP_KERNEL);
361 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
362 
363 	ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
364 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
365 
366 	/* Suppress -Warray-bounds warnings. */
367 	OPTIMIZER_HIDE_VAR(ptr2);
368 
369 	/* Must be accessible for all modes. */
370 	ptr2[size2 - 1] = 'x';
371 
372 	/* Generic mode is precise, so unaligned size2 must be inaccessible. */
373 	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
374 		KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
375 
376 	/* For all modes first aligned offset after size2 must be inaccessible. */
377 	KUNIT_EXPECT_KASAN_FAIL(test,
378 		ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
379 
380 	/*
381 	 * For all modes all size2, middle, and size1 should land in separate
382 	 * granules and thus the latter two offsets should be inaccessible.
383 	 */
384 	KUNIT_EXPECT_LE(test, round_up(size2, KASAN_GRANULE_SIZE),
385 				round_down(middle, KASAN_GRANULE_SIZE));
386 	KUNIT_EXPECT_LE(test, round_up(middle, KASAN_GRANULE_SIZE),
387 				round_down(size1, KASAN_GRANULE_SIZE));
388 	KUNIT_EXPECT_KASAN_FAIL(test, ptr2[middle] = 'x');
389 	KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1 - 1] = 'x');
390 	KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1] = 'x');
391 
392 	kfree(ptr2);
393 }
394 
krealloc_more_oob(struct kunit * test)395 static void krealloc_more_oob(struct kunit *test)
396 {
397 	krealloc_more_oob_helper(test, 201, 235);
398 }
399 
krealloc_less_oob(struct kunit * test)400 static void krealloc_less_oob(struct kunit *test)
401 {
402 	krealloc_less_oob_helper(test, 235, 201);
403 }
404 
krealloc_pagealloc_more_oob(struct kunit * test)405 static void krealloc_pagealloc_more_oob(struct kunit *test)
406 {
407 	/* page_alloc fallback in only implemented for SLUB. */
408 	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
409 
410 	krealloc_more_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 201,
411 					KMALLOC_MAX_CACHE_SIZE + 235);
412 }
413 
krealloc_pagealloc_less_oob(struct kunit * test)414 static void krealloc_pagealloc_less_oob(struct kunit *test)
415 {
416 	/* page_alloc fallback in only implemented for SLUB. */
417 	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
418 
419 	krealloc_less_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 235,
420 					KMALLOC_MAX_CACHE_SIZE + 201);
421 }
422 
423 /*
424  * Check that krealloc() detects a use-after-free, returns NULL,
425  * and doesn't unpoison the freed object.
426  */
krealloc_uaf(struct kunit * test)427 static void krealloc_uaf(struct kunit *test)
428 {
429 	char *ptr1, *ptr2;
430 	int size1 = 201;
431 	int size2 = 235;
432 
433 	ptr1 = kmalloc(size1, GFP_KERNEL);
434 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
435 	kfree(ptr1);
436 
437 	KUNIT_EXPECT_KASAN_FAIL(test, ptr2 = krealloc(ptr1, size2, GFP_KERNEL));
438 	KUNIT_ASSERT_NULL(test, ptr2);
439 	KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)ptr1);
440 }
441 
kmalloc_oob_16(struct kunit * test)442 static void kmalloc_oob_16(struct kunit *test)
443 {
444 	struct {
445 		u64 words[2];
446 	} *ptr1, *ptr2;
447 
448 	KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
449 
450 	/* This test is specifically crafted for the generic mode. */
451 	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
452 
453 	/* RELOC_HIDE to prevent gcc from warning about short alloc */
454 	ptr1 = RELOC_HIDE(kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL), 0);
455 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
456 
457 	ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
458 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
459 
460 	OPTIMIZER_HIDE_VAR(ptr1);
461 	OPTIMIZER_HIDE_VAR(ptr2);
462 	KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
463 	kfree(ptr1);
464 	kfree(ptr2);
465 }
466 
kmalloc_uaf_16(struct kunit * test)467 static void kmalloc_uaf_16(struct kunit *test)
468 {
469 	struct {
470 		u64 words[2];
471 	} *ptr1, *ptr2;
472 
473 	KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
474 
475 	ptr1 = kmalloc(sizeof(*ptr1), GFP_KERNEL);
476 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
477 
478 	ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
479 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
480 	kfree(ptr2);
481 
482 	KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
483 	kfree(ptr1);
484 }
485 
486 /*
487  * Note: in the memset tests below, the written range touches both valid and
488  * invalid memory. This makes sure that the instrumentation does not only check
489  * the starting address but the whole range.
490  */
491 
kmalloc_oob_memset_2(struct kunit * test)492 static void kmalloc_oob_memset_2(struct kunit *test)
493 {
494 	char *ptr;
495 	size_t size = 128 - KASAN_GRANULE_SIZE;
496 
497 	KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
498 
499 	ptr = kmalloc(size, GFP_KERNEL);
500 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
501 
502 	OPTIMIZER_HIDE_VAR(size);
503 	KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 1, 0, 2));
504 	kfree(ptr);
505 }
506 
kmalloc_oob_memset_4(struct kunit * test)507 static void kmalloc_oob_memset_4(struct kunit *test)
508 {
509 	char *ptr;
510 	size_t size = 128 - KASAN_GRANULE_SIZE;
511 
512 	KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
513 
514 	ptr = kmalloc(size, GFP_KERNEL);
515 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
516 
517 	OPTIMIZER_HIDE_VAR(size);
518 	KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 3, 0, 4));
519 	kfree(ptr);
520 }
521 
kmalloc_oob_memset_8(struct kunit * test)522 static void kmalloc_oob_memset_8(struct kunit *test)
523 {
524 	char *ptr;
525 	size_t size = 128 - KASAN_GRANULE_SIZE;
526 
527 	KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
528 
529 	ptr = kmalloc(size, GFP_KERNEL);
530 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
531 
532 	OPTIMIZER_HIDE_VAR(size);
533 	KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 7, 0, 8));
534 	kfree(ptr);
535 }
536 
kmalloc_oob_memset_16(struct kunit * test)537 static void kmalloc_oob_memset_16(struct kunit *test)
538 {
539 	char *ptr;
540 	size_t size = 128 - KASAN_GRANULE_SIZE;
541 
542 	KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
543 
544 	ptr = kmalloc(size, GFP_KERNEL);
545 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
546 
547 	OPTIMIZER_HIDE_VAR(size);
548 	KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 15, 0, 16));
549 	kfree(ptr);
550 }
551 
kmalloc_oob_in_memset(struct kunit * test)552 static void kmalloc_oob_in_memset(struct kunit *test)
553 {
554 	char *ptr;
555 	size_t size = 128 - KASAN_GRANULE_SIZE;
556 
557 	KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
558 
559 	ptr = kmalloc(size, GFP_KERNEL);
560 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
561 
562 	OPTIMIZER_HIDE_VAR(ptr);
563 	OPTIMIZER_HIDE_VAR(size);
564 	KUNIT_EXPECT_KASAN_FAIL(test,
565 				memset(ptr, 0, size + KASAN_GRANULE_SIZE));
566 	kfree(ptr);
567 }
568 
kmalloc_memmove_negative_size(struct kunit * test)569 static void kmalloc_memmove_negative_size(struct kunit *test)
570 {
571 	char *ptr;
572 	size_t size = 64;
573 	size_t invalid_size = -2;
574 
575 	KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
576 
577 	/*
578 	 * Hardware tag-based mode doesn't check memmove for negative size.
579 	 * As a result, this test introduces a side-effect memory corruption,
580 	 * which can result in a crash.
581 	 */
582 	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_HW_TAGS);
583 
584 	ptr = kmalloc(size, GFP_KERNEL);
585 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
586 
587 	memset((char *)ptr, 0, 64);
588 	OPTIMIZER_HIDE_VAR(ptr);
589 	OPTIMIZER_HIDE_VAR(invalid_size);
590 	KUNIT_EXPECT_KASAN_FAIL(test,
591 		memmove((char *)ptr, (char *)ptr + 4, invalid_size));
592 	kfree(ptr);
593 }
594 
kmalloc_memmove_invalid_size(struct kunit * test)595 static void kmalloc_memmove_invalid_size(struct kunit *test)
596 {
597 	char *ptr;
598 	size_t size = 64;
599 	size_t invalid_size = size;
600 
601 	KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
602 
603 	ptr = kmalloc(size, GFP_KERNEL);
604 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
605 
606 	memset((char *)ptr, 0, 64);
607 	OPTIMIZER_HIDE_VAR(ptr);
608 	OPTIMIZER_HIDE_VAR(invalid_size);
609 	KUNIT_EXPECT_KASAN_FAIL(test,
610 		memmove((char *)ptr, (char *)ptr + 4, invalid_size));
611 	kfree(ptr);
612 }
613 
kmalloc_uaf(struct kunit * test)614 static void kmalloc_uaf(struct kunit *test)
615 {
616 	char *ptr;
617 	size_t size = 10;
618 
619 	ptr = kmalloc(size, GFP_KERNEL);
620 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
621 
622 	kfree(ptr);
623 	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[8]);
624 }
625 
kmalloc_uaf_memset(struct kunit * test)626 static void kmalloc_uaf_memset(struct kunit *test)
627 {
628 	char *ptr;
629 	size_t size = 33;
630 
631 	KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test);
632 
633 	/*
634 	 * Only generic KASAN uses quarantine, which is required to avoid a
635 	 * kernel memory corruption this test causes.
636 	 */
637 	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
638 
639 	ptr = kmalloc(size, GFP_KERNEL);
640 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
641 
642 	kfree(ptr);
643 	KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size));
644 }
645 
kmalloc_uaf2(struct kunit * test)646 static void kmalloc_uaf2(struct kunit *test)
647 {
648 	char *ptr1, *ptr2;
649 	size_t size = 43;
650 	int counter = 0;
651 
652 again:
653 	ptr1 = kmalloc(size, GFP_KERNEL);
654 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
655 
656 	kfree(ptr1);
657 
658 	ptr2 = kmalloc(size, GFP_KERNEL);
659 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
660 
661 	/*
662 	 * For tag-based KASAN ptr1 and ptr2 tags might happen to be the same.
663 	 * Allow up to 16 attempts at generating different tags.
664 	 */
665 	if (!IS_ENABLED(CONFIG_KASAN_GENERIC) && ptr1 == ptr2 && counter++ < 16) {
666 		kfree(ptr2);
667 		goto again;
668 	}
669 
670 	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[40]);
671 	KUNIT_EXPECT_PTR_NE(test, ptr1, ptr2);
672 
673 	kfree(ptr2);
674 }
675 
676 /*
677  * Check that KASAN detects use-after-free when another object was allocated in
678  * the same slot. Relevant for the tag-based modes, which do not use quarantine.
679  */
kmalloc_uaf3(struct kunit * test)680 static void kmalloc_uaf3(struct kunit *test)
681 {
682 	char *ptr1, *ptr2;
683 	size_t size = 100;
684 
685 	/* This test is specifically crafted for tag-based modes. */
686 	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
687 
688 	ptr1 = kmalloc(size, GFP_KERNEL);
689 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
690 	kfree(ptr1);
691 
692 	ptr2 = kmalloc(size, GFP_KERNEL);
693 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
694 	kfree(ptr2);
695 
696 	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[8]);
697 }
698 
kfree_via_page(struct kunit * test)699 static void kfree_via_page(struct kunit *test)
700 {
701 	char *ptr;
702 	size_t size = 8;
703 	struct page *page;
704 	unsigned long offset;
705 
706 	ptr = kmalloc(size, GFP_KERNEL);
707 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
708 
709 	page = virt_to_page(ptr);
710 	offset = offset_in_page(ptr);
711 	kfree(page_address(page) + offset);
712 }
713 
kfree_via_phys(struct kunit * test)714 static void kfree_via_phys(struct kunit *test)
715 {
716 	char *ptr;
717 	size_t size = 8;
718 	phys_addr_t phys;
719 
720 	ptr = kmalloc(size, GFP_KERNEL);
721 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
722 
723 	phys = virt_to_phys(ptr);
724 	kfree(phys_to_virt(phys));
725 }
726 
kmem_cache_oob(struct kunit * test)727 static void kmem_cache_oob(struct kunit *test)
728 {
729 	char *p;
730 	size_t size = 200;
731 	struct kmem_cache *cache;
732 
733 	cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
734 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
735 
736 	p = kmem_cache_alloc(cache, GFP_KERNEL);
737 	if (!p) {
738 		kunit_err(test, "Allocation failed: %s\n", __func__);
739 		kmem_cache_destroy(cache);
740 		return;
741 	}
742 
743 	KUNIT_EXPECT_KASAN_FAIL(test, *p = p[size + OOB_TAG_OFF]);
744 
745 	kmem_cache_free(cache, p);
746 	kmem_cache_destroy(cache);
747 }
748 
kmem_cache_accounted(struct kunit * test)749 static void kmem_cache_accounted(struct kunit *test)
750 {
751 	int i;
752 	char *p;
753 	size_t size = 200;
754 	struct kmem_cache *cache;
755 
756 	cache = kmem_cache_create("test_cache", size, 0, SLAB_ACCOUNT, NULL);
757 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
758 
759 	/*
760 	 * Several allocations with a delay to allow for lazy per memcg kmem
761 	 * cache creation.
762 	 */
763 	for (i = 0; i < 5; i++) {
764 		p = kmem_cache_alloc(cache, GFP_KERNEL);
765 		if (!p)
766 			goto free_cache;
767 
768 		kmem_cache_free(cache, p);
769 		msleep(100);
770 	}
771 
772 free_cache:
773 	kmem_cache_destroy(cache);
774 }
775 
kmem_cache_bulk(struct kunit * test)776 static void kmem_cache_bulk(struct kunit *test)
777 {
778 	struct kmem_cache *cache;
779 	size_t size = 200;
780 	char *p[10];
781 	bool ret;
782 	int i;
783 
784 	cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
785 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
786 
787 	ret = kmem_cache_alloc_bulk(cache, GFP_KERNEL, ARRAY_SIZE(p), (void **)&p);
788 	if (!ret) {
789 		kunit_err(test, "Allocation failed: %s\n", __func__);
790 		kmem_cache_destroy(cache);
791 		return;
792 	}
793 
794 	for (i = 0; i < ARRAY_SIZE(p); i++)
795 		p[i][0] = p[i][size - 1] = 42;
796 
797 	kmem_cache_free_bulk(cache, ARRAY_SIZE(p), (void **)&p);
798 	kmem_cache_destroy(cache);
799 }
800 
801 static char global_array[10];
802 
kasan_global_oob_right(struct kunit * test)803 static void kasan_global_oob_right(struct kunit *test)
804 {
805 	/*
806 	 * Deliberate out-of-bounds access. To prevent CONFIG_UBSAN_LOCAL_BOUNDS
807 	 * from failing here and panicking the kernel, access the array via a
808 	 * volatile pointer, which will prevent the compiler from being able to
809 	 * determine the array bounds.
810 	 *
811 	 * This access uses a volatile pointer to char (char *volatile) rather
812 	 * than the more conventional pointer to volatile char (volatile char *)
813 	 * because we want to prevent the compiler from making inferences about
814 	 * the pointer itself (i.e. its array bounds), not the data that it
815 	 * refers to.
816 	 */
817 	char *volatile array = global_array;
818 	char *p = &array[ARRAY_SIZE(global_array) + 3];
819 
820 	/* Only generic mode instruments globals. */
821 	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
822 
823 	KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
824 }
825 
kasan_global_oob_left(struct kunit * test)826 static void kasan_global_oob_left(struct kunit *test)
827 {
828 	char *volatile array = global_array;
829 	char *p = array - 3;
830 
831 	/*
832 	 * GCC is known to fail this test, skip it.
833 	 * See https://bugzilla.kernel.org/show_bug.cgi?id=215051.
834 	 */
835 	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_CC_IS_CLANG);
836 	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
837 	KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
838 }
839 
840 /* Check that ksize() does NOT unpoison whole object. */
ksize_unpoisons_memory(struct kunit * test)841 static void ksize_unpoisons_memory(struct kunit *test)
842 {
843 	char *ptr;
844 	size_t size = 128 - KASAN_GRANULE_SIZE - 5;
845 	size_t real_size;
846 
847 	ptr = kmalloc(size, GFP_KERNEL);
848 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
849 
850 	real_size = ksize(ptr);
851 	KUNIT_EXPECT_GT(test, real_size, size);
852 
853 	OPTIMIZER_HIDE_VAR(ptr);
854 
855 	/* These accesses shouldn't trigger a KASAN report. */
856 	ptr[0] = 'x';
857 	ptr[size - 1] = 'x';
858 
859 	/* These must trigger a KASAN report. */
860 	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
861 		KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
862 	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size + 5]);
863 	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size - 1]);
864 
865 	kfree(ptr);
866 }
867 
868 /*
869  * Check that a use-after-free is detected by ksize() and via normal accesses
870  * after it.
871  */
ksize_uaf(struct kunit * test)872 static void ksize_uaf(struct kunit *test)
873 {
874 	char *ptr;
875 	int size = 128 - KASAN_GRANULE_SIZE;
876 
877 	ptr = kmalloc(size, GFP_KERNEL);
878 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
879 	kfree(ptr);
880 
881 	OPTIMIZER_HIDE_VAR(ptr);
882 	KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr));
883 	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]);
884 	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]);
885 }
886 
kasan_stack_oob(struct kunit * test)887 static void kasan_stack_oob(struct kunit *test)
888 {
889 	char stack_array[10];
890 	/* See comment in kasan_global_oob_right. */
891 	char *volatile array = stack_array;
892 	char *p = &array[ARRAY_SIZE(stack_array) + OOB_TAG_OFF];
893 
894 	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
895 
896 	KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
897 }
898 
kasan_alloca_oob_left(struct kunit * test)899 static void kasan_alloca_oob_left(struct kunit *test)
900 {
901 	volatile int i = 10;
902 	char alloca_array[i];
903 	/* See comment in kasan_global_oob_right. */
904 	char *volatile array = alloca_array;
905 	char *p = array - 1;
906 
907 	/* Only generic mode instruments dynamic allocas. */
908 	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
909 	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
910 
911 	KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
912 }
913 
kasan_alloca_oob_right(struct kunit * test)914 static void kasan_alloca_oob_right(struct kunit *test)
915 {
916 	volatile int i = 10;
917 	char alloca_array[i];
918 	/* See comment in kasan_global_oob_right. */
919 	char *volatile array = alloca_array;
920 	char *p = array + i;
921 
922 	/* Only generic mode instruments dynamic allocas. */
923 	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
924 	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
925 
926 	KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
927 }
928 
kmem_cache_double_free(struct kunit * test)929 static void kmem_cache_double_free(struct kunit *test)
930 {
931 	char *p;
932 	size_t size = 200;
933 	struct kmem_cache *cache;
934 
935 	cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
936 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
937 
938 	p = kmem_cache_alloc(cache, GFP_KERNEL);
939 	if (!p) {
940 		kunit_err(test, "Allocation failed: %s\n", __func__);
941 		kmem_cache_destroy(cache);
942 		return;
943 	}
944 
945 	kmem_cache_free(cache, p);
946 	KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p));
947 	kmem_cache_destroy(cache);
948 }
949 
kmem_cache_invalid_free(struct kunit * test)950 static void kmem_cache_invalid_free(struct kunit *test)
951 {
952 	char *p;
953 	size_t size = 200;
954 	struct kmem_cache *cache;
955 
956 	cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
957 				  NULL);
958 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
959 
960 	p = kmem_cache_alloc(cache, GFP_KERNEL);
961 	if (!p) {
962 		kunit_err(test, "Allocation failed: %s\n", __func__);
963 		kmem_cache_destroy(cache);
964 		return;
965 	}
966 
967 	/* Trigger invalid free, the object doesn't get freed. */
968 	KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p + 1));
969 
970 	/*
971 	 * Properly free the object to prevent the "Objects remaining in
972 	 * test_cache on __kmem_cache_shutdown" BUG failure.
973 	 */
974 	kmem_cache_free(cache, p);
975 
976 	kmem_cache_destroy(cache);
977 }
978 
empty_cache_ctor(void * object)979 static void empty_cache_ctor(void *object) { }
980 
kmem_cache_double_destroy(struct kunit * test)981 static void kmem_cache_double_destroy(struct kunit *test)
982 {
983 	struct kmem_cache *cache;
984 
985 	/* Provide a constructor to prevent cache merging. */
986 	cache = kmem_cache_create("test_cache", 200, 0, 0, empty_cache_ctor);
987 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
988 	kmem_cache_destroy(cache);
989 	KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_destroy(cache));
990 }
991 
kasan_memchr(struct kunit * test)992 static void kasan_memchr(struct kunit *test)
993 {
994 	char *ptr;
995 	size_t size = 24;
996 
997 	/*
998 	 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
999 	 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
1000 	 */
1001 	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
1002 
1003 	if (OOB_TAG_OFF)
1004 		size = round_up(size, OOB_TAG_OFF);
1005 
1006 	ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
1007 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1008 
1009 	OPTIMIZER_HIDE_VAR(ptr);
1010 	OPTIMIZER_HIDE_VAR(size);
1011 	KUNIT_EXPECT_KASAN_FAIL(test,
1012 		kasan_ptr_result = memchr(ptr, '1', size + 1));
1013 
1014 	kfree(ptr);
1015 }
1016 
kasan_memcmp(struct kunit * test)1017 static void kasan_memcmp(struct kunit *test)
1018 {
1019 	char *ptr;
1020 	size_t size = 24;
1021 	int arr[9];
1022 
1023 	/*
1024 	 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
1025 	 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
1026 	 */
1027 	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
1028 
1029 	if (OOB_TAG_OFF)
1030 		size = round_up(size, OOB_TAG_OFF);
1031 
1032 	ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
1033 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1034 	memset(arr, 0, sizeof(arr));
1035 
1036 	OPTIMIZER_HIDE_VAR(ptr);
1037 	OPTIMIZER_HIDE_VAR(size);
1038 	KUNIT_EXPECT_KASAN_FAIL(test,
1039 		kasan_int_result = memcmp(ptr, arr, size+1));
1040 	kfree(ptr);
1041 }
1042 
kasan_strings(struct kunit * test)1043 static void kasan_strings(struct kunit *test)
1044 {
1045 	char *ptr;
1046 	size_t size = 24;
1047 
1048 	/*
1049 	 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
1050 	 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
1051 	 */
1052 	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
1053 
1054 	ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
1055 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1056 
1057 	kfree(ptr);
1058 
1059 	/*
1060 	 * Try to cause only 1 invalid access (less spam in dmesg).
1061 	 * For that we need ptr to point to zeroed byte.
1062 	 * Skip metadata that could be stored in freed object so ptr
1063 	 * will likely point to zeroed byte.
1064 	 */
1065 	ptr += 16;
1066 	KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strchr(ptr, '1'));
1067 
1068 	KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strrchr(ptr, '1'));
1069 
1070 	KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strcmp(ptr, "2"));
1071 
1072 	KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strncmp(ptr, "2", 1));
1073 
1074 	KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strlen(ptr));
1075 
1076 	KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strnlen(ptr, 1));
1077 }
1078 
kasan_bitops_modify(struct kunit * test,int nr,void * addr)1079 static void kasan_bitops_modify(struct kunit *test, int nr, void *addr)
1080 {
1081 	KUNIT_EXPECT_KASAN_FAIL(test, set_bit(nr, addr));
1082 	KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(nr, addr));
1083 	KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(nr, addr));
1084 	KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(nr, addr));
1085 	KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(nr, addr));
1086 	KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(nr, addr));
1087 	KUNIT_EXPECT_KASAN_FAIL(test, change_bit(nr, addr));
1088 	KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(nr, addr));
1089 }
1090 
kasan_bitops_test_and_modify(struct kunit * test,int nr,void * addr)1091 static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr)
1092 {
1093 	KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit(nr, addr));
1094 	KUNIT_EXPECT_KASAN_FAIL(test, __test_and_set_bit(nr, addr));
1095 	KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit_lock(nr, addr));
1096 	KUNIT_EXPECT_KASAN_FAIL(test, test_and_clear_bit(nr, addr));
1097 	KUNIT_EXPECT_KASAN_FAIL(test, __test_and_clear_bit(nr, addr));
1098 	KUNIT_EXPECT_KASAN_FAIL(test, test_and_change_bit(nr, addr));
1099 	KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr));
1100 	KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr));
1101 
1102 #if defined(clear_bit_unlock_is_negative_byte)
1103 	KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result =
1104 				clear_bit_unlock_is_negative_byte(nr, addr));
1105 #endif
1106 }
1107 
kasan_bitops_generic(struct kunit * test)1108 static void kasan_bitops_generic(struct kunit *test)
1109 {
1110 	long *bits;
1111 
1112 	/* This test is specifically crafted for the generic mode. */
1113 	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
1114 
1115 	/*
1116 	 * Allocate 1 more byte, which causes kzalloc to round up to 16 bytes;
1117 	 * this way we do not actually corrupt other memory.
1118 	 */
1119 	bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
1120 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
1121 
1122 	/*
1123 	 * Below calls try to access bit within allocated memory; however, the
1124 	 * below accesses are still out-of-bounds, since bitops are defined to
1125 	 * operate on the whole long the bit is in.
1126 	 */
1127 	kasan_bitops_modify(test, BITS_PER_LONG, bits);
1128 
1129 	/*
1130 	 * Below calls try to access bit beyond allocated memory.
1131 	 */
1132 	kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, bits);
1133 
1134 	kfree(bits);
1135 }
1136 
kasan_bitops_tags(struct kunit * test)1137 static void kasan_bitops_tags(struct kunit *test)
1138 {
1139 	long *bits;
1140 
1141 	/* This test is specifically crafted for tag-based modes. */
1142 	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1143 
1144 	/* kmalloc-64 cache will be used and the last 16 bytes will be the redzone. */
1145 	bits = kzalloc(48, GFP_KERNEL);
1146 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
1147 
1148 	/* Do the accesses past the 48 allocated bytes, but within the redone. */
1149 	kasan_bitops_modify(test, BITS_PER_LONG, (void *)bits + 48);
1150 	kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, (void *)bits + 48);
1151 
1152 	kfree(bits);
1153 }
1154 
kmalloc_double_kzfree(struct kunit * test)1155 static void kmalloc_double_kzfree(struct kunit *test)
1156 {
1157 	char *ptr;
1158 	size_t size = 16;
1159 
1160 	ptr = kmalloc(size, GFP_KERNEL);
1161 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1162 
1163 	kfree_sensitive(ptr);
1164 	KUNIT_EXPECT_KASAN_FAIL(test, kfree_sensitive(ptr));
1165 }
1166 
1167 /*
1168  * The two tests below check that Generic KASAN prints auxiliary stack traces
1169  * for RCU callbacks and workqueues. The reports need to be inspected manually.
1170  *
1171  * These tests are still enabled for other KASAN modes to make sure that all
1172  * modes report bad accesses in tested scenarios.
1173  */
1174 
1175 static struct kasan_rcu_info {
1176 	int i;
1177 	struct rcu_head rcu;
1178 } *global_rcu_ptr;
1179 
rcu_uaf_reclaim(struct rcu_head * rp)1180 static void rcu_uaf_reclaim(struct rcu_head *rp)
1181 {
1182 	struct kasan_rcu_info *fp =
1183 		container_of(rp, struct kasan_rcu_info, rcu);
1184 
1185 	kfree(fp);
1186 	((volatile struct kasan_rcu_info *)fp)->i;
1187 }
1188 
rcu_uaf(struct kunit * test)1189 static void rcu_uaf(struct kunit *test)
1190 {
1191 	struct kasan_rcu_info *ptr;
1192 
1193 	ptr = kmalloc(sizeof(struct kasan_rcu_info), GFP_KERNEL);
1194 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1195 
1196 	global_rcu_ptr = rcu_dereference_protected(
1197 				(struct kasan_rcu_info __rcu *)ptr, NULL);
1198 
1199 	KUNIT_EXPECT_KASAN_FAIL(test,
1200 		call_rcu(&global_rcu_ptr->rcu, rcu_uaf_reclaim);
1201 		rcu_barrier());
1202 }
1203 
workqueue_uaf_work(struct work_struct * work)1204 static void workqueue_uaf_work(struct work_struct *work)
1205 {
1206 	kfree(work);
1207 }
1208 
workqueue_uaf(struct kunit * test)1209 static void workqueue_uaf(struct kunit *test)
1210 {
1211 	struct workqueue_struct *workqueue;
1212 	struct work_struct *work;
1213 
1214 	workqueue = create_workqueue("kasan_workqueue_test");
1215 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, workqueue);
1216 
1217 	work = kmalloc(sizeof(struct work_struct), GFP_KERNEL);
1218 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, work);
1219 
1220 	INIT_WORK(work, workqueue_uaf_work);
1221 	queue_work(workqueue, work);
1222 	destroy_workqueue(workqueue);
1223 
1224 	KUNIT_EXPECT_KASAN_FAIL(test,
1225 		((volatile struct work_struct *)work)->data);
1226 }
1227 
vmalloc_helpers_tags(struct kunit * test)1228 static void vmalloc_helpers_tags(struct kunit *test)
1229 {
1230 	void *ptr;
1231 
1232 	/* This test is intended for tag-based modes. */
1233 	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1234 
1235 	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
1236 
1237 	ptr = vmalloc(PAGE_SIZE);
1238 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1239 
1240 	/* Check that the returned pointer is tagged. */
1241 	KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1242 	KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1243 
1244 	/* Make sure exported vmalloc helpers handle tagged pointers. */
1245 	KUNIT_ASSERT_TRUE(test, is_vmalloc_addr(ptr));
1246 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vmalloc_to_page(ptr));
1247 
1248 #if !IS_MODULE(CONFIG_KASAN_KUNIT_TEST)
1249 	{
1250 		int rv;
1251 
1252 		/* Make sure vmalloc'ed memory permissions can be changed. */
1253 		rv = set_memory_ro((unsigned long)ptr, 1);
1254 		KUNIT_ASSERT_GE(test, rv, 0);
1255 		rv = set_memory_rw((unsigned long)ptr, 1);
1256 		KUNIT_ASSERT_GE(test, rv, 0);
1257 	}
1258 #endif
1259 
1260 	vfree(ptr);
1261 }
1262 
vmalloc_oob(struct kunit * test)1263 static void vmalloc_oob(struct kunit *test)
1264 {
1265 	char *v_ptr, *p_ptr;
1266 	struct page *page;
1267 	size_t size = PAGE_SIZE / 2 - KASAN_GRANULE_SIZE - 5;
1268 
1269 	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
1270 
1271 	v_ptr = vmalloc(size);
1272 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
1273 
1274 	OPTIMIZER_HIDE_VAR(v_ptr);
1275 
1276 	/*
1277 	 * We have to be careful not to hit the guard page in vmalloc tests.
1278 	 * The MMU will catch that and crash us.
1279 	 */
1280 
1281 	/* Make sure in-bounds accesses are valid. */
1282 	v_ptr[0] = 0;
1283 	v_ptr[size - 1] = 0;
1284 
1285 	/*
1286 	 * An unaligned access past the requested vmalloc size.
1287 	 * Only generic KASAN can precisely detect these.
1288 	 */
1289 	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
1290 		KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)v_ptr)[size]);
1291 
1292 	/* An aligned access into the first out-of-bounds granule. */
1293 	KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)v_ptr)[size + 5]);
1294 
1295 	/* Check that in-bounds accesses to the physical page are valid. */
1296 	page = vmalloc_to_page(v_ptr);
1297 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, page);
1298 	p_ptr = page_address(page);
1299 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
1300 	p_ptr[0] = 0;
1301 
1302 	vfree(v_ptr);
1303 
1304 	/*
1305 	 * We can't check for use-after-unmap bugs in this nor in the following
1306 	 * vmalloc tests, as the page might be fully unmapped and accessing it
1307 	 * will crash the kernel.
1308 	 */
1309 }
1310 
vmap_tags(struct kunit * test)1311 static void vmap_tags(struct kunit *test)
1312 {
1313 	char *p_ptr, *v_ptr;
1314 	struct page *p_page, *v_page;
1315 
1316 	/*
1317 	 * This test is specifically crafted for the software tag-based mode,
1318 	 * the only tag-based mode that poisons vmap mappings.
1319 	 */
1320 	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS);
1321 
1322 	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
1323 
1324 	p_page = alloc_pages(GFP_KERNEL, 1);
1325 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_page);
1326 	p_ptr = page_address(p_page);
1327 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
1328 
1329 	v_ptr = vmap(&p_page, 1, VM_MAP, PAGE_KERNEL);
1330 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
1331 
1332 	/*
1333 	 * We can't check for out-of-bounds bugs in this nor in the following
1334 	 * vmalloc tests, as allocations have page granularity and accessing
1335 	 * the guard page will crash the kernel.
1336 	 */
1337 
1338 	KUNIT_EXPECT_GE(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_MIN);
1339 	KUNIT_EXPECT_LT(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_KERNEL);
1340 
1341 	/* Make sure that in-bounds accesses through both pointers work. */
1342 	*p_ptr = 0;
1343 	*v_ptr = 0;
1344 
1345 	/* Make sure vmalloc_to_page() correctly recovers the page pointer. */
1346 	v_page = vmalloc_to_page(v_ptr);
1347 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_page);
1348 	KUNIT_EXPECT_PTR_EQ(test, p_page, v_page);
1349 
1350 	vunmap(v_ptr);
1351 	free_pages((unsigned long)p_ptr, 1);
1352 }
1353 
vm_map_ram_tags(struct kunit * test)1354 static void vm_map_ram_tags(struct kunit *test)
1355 {
1356 	char *p_ptr, *v_ptr;
1357 	struct page *page;
1358 
1359 	/*
1360 	 * This test is specifically crafted for the software tag-based mode,
1361 	 * the only tag-based mode that poisons vm_map_ram mappings.
1362 	 */
1363 	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS);
1364 
1365 	page = alloc_pages(GFP_KERNEL, 1);
1366 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, page);
1367 	p_ptr = page_address(page);
1368 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr);
1369 
1370 	v_ptr = vm_map_ram(&page, 1, -1);
1371 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr);
1372 
1373 	KUNIT_EXPECT_GE(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_MIN);
1374 	KUNIT_EXPECT_LT(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_KERNEL);
1375 
1376 	/* Make sure that in-bounds accesses through both pointers work. */
1377 	*p_ptr = 0;
1378 	*v_ptr = 0;
1379 
1380 	vm_unmap_ram(v_ptr, 1);
1381 	free_pages((unsigned long)p_ptr, 1);
1382 }
1383 
vmalloc_percpu(struct kunit * test)1384 static void vmalloc_percpu(struct kunit *test)
1385 {
1386 	char __percpu *ptr;
1387 	int cpu;
1388 
1389 	/*
1390 	 * This test is specifically crafted for the software tag-based mode,
1391 	 * the only tag-based mode that poisons percpu mappings.
1392 	 */
1393 	KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS);
1394 
1395 	ptr = __alloc_percpu(PAGE_SIZE, PAGE_SIZE);
1396 
1397 	for_each_possible_cpu(cpu) {
1398 		char *c_ptr = per_cpu_ptr(ptr, cpu);
1399 
1400 		KUNIT_EXPECT_GE(test, (u8)get_tag(c_ptr), (u8)KASAN_TAG_MIN);
1401 		KUNIT_EXPECT_LT(test, (u8)get_tag(c_ptr), (u8)KASAN_TAG_KERNEL);
1402 
1403 		/* Make sure that in-bounds accesses don't crash the kernel. */
1404 		*c_ptr = 0;
1405 	}
1406 
1407 	free_percpu(ptr);
1408 }
1409 
1410 /*
1411  * Check that the assigned pointer tag falls within the [KASAN_TAG_MIN,
1412  * KASAN_TAG_KERNEL) range (note: excluding the match-all tag) for tag-based
1413  * modes.
1414  */
match_all_not_assigned(struct kunit * test)1415 static void match_all_not_assigned(struct kunit *test)
1416 {
1417 	char *ptr;
1418 	struct page *pages;
1419 	int i, size, order;
1420 
1421 	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1422 
1423 	for (i = 0; i < 256; i++) {
1424 		size = get_random_u32_inclusive(1, 1024);
1425 		ptr = kmalloc(size, GFP_KERNEL);
1426 		KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1427 		KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1428 		KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1429 		kfree(ptr);
1430 	}
1431 
1432 	for (i = 0; i < 256; i++) {
1433 		order = get_random_u32_inclusive(1, 4);
1434 		pages = alloc_pages(GFP_KERNEL, order);
1435 		ptr = page_address(pages);
1436 		KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1437 		KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1438 		KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1439 		free_pages((unsigned long)ptr, order);
1440 	}
1441 
1442 	if (!IS_ENABLED(CONFIG_KASAN_VMALLOC))
1443 		return;
1444 
1445 	for (i = 0; i < 256; i++) {
1446 		size = get_random_u32_inclusive(1, 1024);
1447 		ptr = vmalloc(size);
1448 		KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1449 		KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
1450 		KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1451 		vfree(ptr);
1452 	}
1453 }
1454 
1455 /* Check that 0xff works as a match-all pointer tag for tag-based modes. */
match_all_ptr_tag(struct kunit * test)1456 static void match_all_ptr_tag(struct kunit *test)
1457 {
1458 	char *ptr;
1459 	u8 tag;
1460 
1461 	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1462 
1463 	ptr = kmalloc(128, GFP_KERNEL);
1464 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1465 
1466 	/* Backup the assigned tag. */
1467 	tag = get_tag(ptr);
1468 	KUNIT_EXPECT_NE(test, tag, (u8)KASAN_TAG_KERNEL);
1469 
1470 	/* Reset the tag to 0xff.*/
1471 	ptr = set_tag(ptr, KASAN_TAG_KERNEL);
1472 
1473 	/* This access shouldn't trigger a KASAN report. */
1474 	*ptr = 0;
1475 
1476 	/* Recover the pointer tag and free. */
1477 	ptr = set_tag(ptr, tag);
1478 	kfree(ptr);
1479 }
1480 
1481 /* Check that there are no match-all memory tags for tag-based modes. */
match_all_mem_tag(struct kunit * test)1482 static void match_all_mem_tag(struct kunit *test)
1483 {
1484 	char *ptr;
1485 	int tag;
1486 
1487 	KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
1488 
1489 	ptr = kmalloc(128, GFP_KERNEL);
1490 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
1491 	KUNIT_EXPECT_NE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
1492 
1493 	/* For each possible tag value not matching the pointer tag. */
1494 	for (tag = KASAN_TAG_MIN; tag <= KASAN_TAG_KERNEL; tag++) {
1495 		if (tag == get_tag(ptr))
1496 			continue;
1497 
1498 		/* Mark the first memory granule with the chosen memory tag. */
1499 		kasan_poison(ptr, KASAN_GRANULE_SIZE, (u8)tag, false);
1500 
1501 		/* This access must cause a KASAN report. */
1502 		KUNIT_EXPECT_KASAN_FAIL(test, *ptr = 0);
1503 	}
1504 
1505 	/* Recover the memory tag and free. */
1506 	kasan_poison(ptr, KASAN_GRANULE_SIZE, get_tag(ptr), false);
1507 	kfree(ptr);
1508 }
1509 
1510 static struct kunit_case kasan_kunit_test_cases[] = {
1511 	KUNIT_CASE(kmalloc_oob_right),
1512 	KUNIT_CASE(kmalloc_oob_left),
1513 	KUNIT_CASE(kmalloc_node_oob_right),
1514 	KUNIT_CASE(kmalloc_pagealloc_oob_right),
1515 	KUNIT_CASE(kmalloc_pagealloc_uaf),
1516 	KUNIT_CASE(kmalloc_pagealloc_invalid_free),
1517 	KUNIT_CASE(pagealloc_oob_right),
1518 	KUNIT_CASE(pagealloc_uaf),
1519 	KUNIT_CASE(kmalloc_large_oob_right),
1520 	KUNIT_CASE(krealloc_more_oob),
1521 	KUNIT_CASE(krealloc_less_oob),
1522 	KUNIT_CASE(krealloc_pagealloc_more_oob),
1523 	KUNIT_CASE(krealloc_pagealloc_less_oob),
1524 	KUNIT_CASE(krealloc_uaf),
1525 	KUNIT_CASE(kmalloc_oob_16),
1526 	KUNIT_CASE(kmalloc_uaf_16),
1527 	KUNIT_CASE(kmalloc_oob_in_memset),
1528 	KUNIT_CASE(kmalloc_oob_memset_2),
1529 	KUNIT_CASE(kmalloc_oob_memset_4),
1530 	KUNIT_CASE(kmalloc_oob_memset_8),
1531 	KUNIT_CASE(kmalloc_oob_memset_16),
1532 	KUNIT_CASE(kmalloc_memmove_negative_size),
1533 	KUNIT_CASE(kmalloc_memmove_invalid_size),
1534 	KUNIT_CASE(kmalloc_uaf),
1535 	KUNIT_CASE(kmalloc_uaf_memset),
1536 	KUNIT_CASE(kmalloc_uaf2),
1537 	KUNIT_CASE(kmalloc_uaf3),
1538 	KUNIT_CASE(kfree_via_page),
1539 	KUNIT_CASE(kfree_via_phys),
1540 	KUNIT_CASE(kmem_cache_oob),
1541 	KUNIT_CASE(kmem_cache_accounted),
1542 	KUNIT_CASE(kmem_cache_bulk),
1543 	KUNIT_CASE(kasan_global_oob_right),
1544 	KUNIT_CASE(kasan_global_oob_left),
1545 	KUNIT_CASE(kasan_stack_oob),
1546 	KUNIT_CASE(kasan_alloca_oob_left),
1547 	KUNIT_CASE(kasan_alloca_oob_right),
1548 	KUNIT_CASE(ksize_unpoisons_memory),
1549 	KUNIT_CASE(ksize_uaf),
1550 	KUNIT_CASE(kmem_cache_double_free),
1551 	KUNIT_CASE(kmem_cache_invalid_free),
1552 	KUNIT_CASE(kmem_cache_double_destroy),
1553 	KUNIT_CASE(kasan_memchr),
1554 	KUNIT_CASE(kasan_memcmp),
1555 	KUNIT_CASE(kasan_strings),
1556 	KUNIT_CASE(kasan_bitops_generic),
1557 	KUNIT_CASE(kasan_bitops_tags),
1558 	KUNIT_CASE(kmalloc_double_kzfree),
1559 	KUNIT_CASE(rcu_uaf),
1560 	KUNIT_CASE(workqueue_uaf),
1561 	KUNIT_CASE(vmalloc_helpers_tags),
1562 	KUNIT_CASE(vmalloc_oob),
1563 	KUNIT_CASE(vmap_tags),
1564 	KUNIT_CASE(vm_map_ram_tags),
1565 	KUNIT_CASE(vmalloc_percpu),
1566 	KUNIT_CASE(match_all_not_assigned),
1567 	KUNIT_CASE(match_all_ptr_tag),
1568 	KUNIT_CASE(match_all_mem_tag),
1569 	{}
1570 };
1571 
1572 static struct kunit_suite kasan_kunit_test_suite = {
1573 	.name = "kasan",
1574 	.test_cases = kasan_kunit_test_cases,
1575 	.exit = kasan_test_exit,
1576 	.suite_init = kasan_suite_init,
1577 	.suite_exit = kasan_suite_exit,
1578 };
1579 
1580 kunit_test_suite(kasan_kunit_test_suite);
1581 
1582 MODULE_LICENSE("GPL");
1583