1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * 4 * Copyright (c) 2014 Samsung Electronics Co., Ltd. 5 * Author: Andrey Ryabinin <a.ryabinin@samsung.com> 6 */ 7 8 #define pr_fmt(fmt) "kasan_test: " fmt 9 10 #include <kunit/test.h> 11 #include <linux/bitops.h> 12 #include <linux/delay.h> 13 #include <linux/io.h> 14 #include <linux/kasan.h> 15 #include <linux/kernel.h> 16 #include <linux/mm.h> 17 #include <linux/mman.h> 18 #include <linux/module.h> 19 #include <linux/printk.h> 20 #include <linux/random.h> 21 #include <linux/set_memory.h> 22 #include <linux/slab.h> 23 #include <linux/string.h> 24 #include <linux/tracepoint.h> 25 #include <linux/uaccess.h> 26 #include <linux/vmalloc.h> 27 #include <trace/events/printk.h> 28 29 #include <asm/page.h> 30 31 #include "kasan.h" 32 33 #define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE) 34 35 static bool multishot; 36 37 /* Fields set based on lines observed in the console. */ 38 static struct { 39 bool report_found; 40 bool async_fault; 41 } test_status; 42 43 /* 44 * Some tests use these global variables to store return values from function 45 * calls that could otherwise be eliminated by the compiler as dead code. 46 */ 47 void *kasan_ptr_result; 48 int kasan_int_result; 49 50 /* Probe for console output: obtains test_status lines of interest. */ 51 static void probe_console(void *ignore, const char *buf, size_t len) 52 { 53 if (strnstr(buf, "BUG: KASAN: ", len)) 54 WRITE_ONCE(test_status.report_found, true); 55 else if (strnstr(buf, "Asynchronous fault: ", len)) 56 WRITE_ONCE(test_status.async_fault, true); 57 } 58 59 static void register_tracepoints(struct tracepoint *tp, void *ignore) 60 { 61 check_trace_callback_type_console(probe_console); 62 if (!strcmp(tp->name, "console")) 63 WARN_ON(tracepoint_probe_register(tp, probe_console, NULL)); 64 } 65 66 static void unregister_tracepoints(struct tracepoint *tp, void *ignore) 67 { 68 if (!strcmp(tp->name, "console")) 69 tracepoint_probe_unregister(tp, probe_console, NULL); 70 } 71 72 static int kasan_suite_init(struct kunit_suite *suite) 73 { 74 if (!kasan_enabled()) { 75 pr_err("Can't run KASAN tests with KASAN disabled"); 76 return -1; 77 } 78 79 /* Stop failing KUnit tests on KASAN reports. */ 80 kasan_kunit_test_suite_start(); 81 82 /* 83 * Temporarily enable multi-shot mode. Otherwise, KASAN would only 84 * report the first detected bug and panic the kernel if panic_on_warn 85 * is enabled. 86 */ 87 multishot = kasan_save_enable_multi_shot(); 88 89 /* 90 * Because we want to be able to build the test as a module, we need to 91 * iterate through all known tracepoints, since the static registration 92 * won't work here. 93 */ 94 for_each_kernel_tracepoint(register_tracepoints, NULL); 95 return 0; 96 } 97 98 static void kasan_suite_exit(struct kunit_suite *suite) 99 { 100 kasan_kunit_test_suite_end(); 101 kasan_restore_multi_shot(multishot); 102 for_each_kernel_tracepoint(unregister_tracepoints, NULL); 103 tracepoint_synchronize_unregister(); 104 } 105 106 static void kasan_test_exit(struct kunit *test) 107 { 108 KUNIT_EXPECT_FALSE(test, READ_ONCE(test_status.report_found)); 109 } 110 111 /** 112 * KUNIT_EXPECT_KASAN_FAIL() - check that the executed expression produces a 113 * KASAN report; causes a test failure otherwise. This relies on a KUnit 114 * resource named "kasan_status". Do not use this name for KUnit resources 115 * outside of KASAN tests. 116 * 117 * For hardware tag-based KASAN, when a synchronous tag fault happens, tag 118 * checking is auto-disabled. When this happens, this test handler reenables 119 * tag checking. As tag checking can be only disabled or enabled per CPU, 120 * this handler disables migration (preemption). 121 * 122 * Since the compiler doesn't see that the expression can change the test_status 123 * fields, it can reorder or optimize away the accesses to those fields. 124 * Use READ/WRITE_ONCE() for the accesses and compiler barriers around the 125 * expression to prevent that. 126 * 127 * In between KUNIT_EXPECT_KASAN_FAIL checks, test_status.report_found is kept 128 * as false. This allows detecting KASAN reports that happen outside of the 129 * checks by asserting !test_status.report_found at the start of 130 * KUNIT_EXPECT_KASAN_FAIL and in kasan_test_exit. 131 */ 132 #define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \ 133 if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \ 134 kasan_sync_fault_possible()) \ 135 migrate_disable(); \ 136 KUNIT_EXPECT_FALSE(test, READ_ONCE(test_status.report_found)); \ 137 barrier(); \ 138 expression; \ 139 barrier(); \ 140 if (kasan_async_fault_possible()) \ 141 kasan_force_async_fault(); \ 142 if (!READ_ONCE(test_status.report_found)) { \ 143 KUNIT_FAIL(test, KUNIT_SUBTEST_INDENT "KASAN failure " \ 144 "expected in \"" #expression \ 145 "\", but none occurred"); \ 146 } \ 147 if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \ 148 kasan_sync_fault_possible()) { \ 149 if (READ_ONCE(test_status.report_found) && \ 150 !READ_ONCE(test_status.async_fault)) \ 151 kasan_enable_tagging(); \ 152 migrate_enable(); \ 153 } \ 154 WRITE_ONCE(test_status.report_found, false); \ 155 WRITE_ONCE(test_status.async_fault, false); \ 156 } while (0) 157 158 #define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \ 159 if (!IS_ENABLED(config)) \ 160 kunit_skip((test), "Test requires " #config "=y"); \ 161 } while (0) 162 163 #define KASAN_TEST_NEEDS_CONFIG_OFF(test, config) do { \ 164 if (IS_ENABLED(config)) \ 165 kunit_skip((test), "Test requires " #config "=n"); \ 166 } while (0) 167 168 #define KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test) do { \ 169 if (IS_ENABLED(CONFIG_KASAN_HW_TAGS)) \ 170 break; /* No compiler instrumentation. */ \ 171 if (IS_ENABLED(CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX)) \ 172 break; /* Should always be instrumented! */ \ 173 if (IS_ENABLED(CONFIG_GENERIC_ENTRY)) \ 174 kunit_skip((test), "Test requires checked mem*()"); \ 175 } while (0) 176 177 static void kmalloc_oob_right(struct kunit *test) 178 { 179 char *ptr; 180 size_t size = 128 - KASAN_GRANULE_SIZE - 5; 181 182 ptr = kmalloc(size, GFP_KERNEL); 183 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 184 185 OPTIMIZER_HIDE_VAR(ptr); 186 /* 187 * An unaligned access past the requested kmalloc size. 188 * Only generic KASAN can precisely detect these. 189 */ 190 if (IS_ENABLED(CONFIG_KASAN_GENERIC)) 191 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 'x'); 192 193 /* 194 * An aligned access into the first out-of-bounds granule that falls 195 * within the aligned kmalloc object. 196 */ 197 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + 5] = 'y'); 198 199 /* Out-of-bounds access past the aligned kmalloc object. */ 200 KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = 201 ptr[size + KASAN_GRANULE_SIZE + 5]); 202 203 kfree(ptr); 204 } 205 206 static void kmalloc_oob_left(struct kunit *test) 207 { 208 char *ptr; 209 size_t size = 15; 210 211 ptr = kmalloc(size, GFP_KERNEL); 212 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 213 214 OPTIMIZER_HIDE_VAR(ptr); 215 KUNIT_EXPECT_KASAN_FAIL(test, *ptr = *(ptr - 1)); 216 kfree(ptr); 217 } 218 219 static void kmalloc_node_oob_right(struct kunit *test) 220 { 221 char *ptr; 222 size_t size = 4096; 223 224 ptr = kmalloc_node(size, GFP_KERNEL, 0); 225 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 226 227 OPTIMIZER_HIDE_VAR(ptr); 228 KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]); 229 kfree(ptr); 230 } 231 232 /* 233 * These kmalloc_pagealloc_* tests try allocating a memory chunk that doesn't 234 * fit into a slab cache and therefore is allocated via the page allocator 235 * fallback. Since this kind of fallback is only implemented for SLUB, these 236 * tests are limited to that allocator. 237 */ 238 static void kmalloc_pagealloc_oob_right(struct kunit *test) 239 { 240 char *ptr; 241 size_t size = KMALLOC_MAX_CACHE_SIZE + 10; 242 243 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB); 244 245 ptr = kmalloc(size, GFP_KERNEL); 246 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 247 248 OPTIMIZER_HIDE_VAR(ptr); 249 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 0); 250 251 kfree(ptr); 252 } 253 254 static void kmalloc_pagealloc_uaf(struct kunit *test) 255 { 256 char *ptr; 257 size_t size = KMALLOC_MAX_CACHE_SIZE + 10; 258 259 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB); 260 261 ptr = kmalloc(size, GFP_KERNEL); 262 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 263 kfree(ptr); 264 265 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]); 266 } 267 268 static void kmalloc_pagealloc_invalid_free(struct kunit *test) 269 { 270 char *ptr; 271 size_t size = KMALLOC_MAX_CACHE_SIZE + 10; 272 273 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB); 274 275 ptr = kmalloc(size, GFP_KERNEL); 276 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 277 278 KUNIT_EXPECT_KASAN_FAIL(test, kfree(ptr + 1)); 279 } 280 281 static void pagealloc_oob_right(struct kunit *test) 282 { 283 char *ptr; 284 struct page *pages; 285 size_t order = 4; 286 size_t size = (1UL << (PAGE_SHIFT + order)); 287 288 /* 289 * With generic KASAN page allocations have no redzones, thus 290 * out-of-bounds detection is not guaranteed. 291 * See https://bugzilla.kernel.org/show_bug.cgi?id=210503. 292 */ 293 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC); 294 295 pages = alloc_pages(GFP_KERNEL, order); 296 ptr = page_address(pages); 297 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 298 299 KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = ptr[size]); 300 free_pages((unsigned long)ptr, order); 301 } 302 303 static void pagealloc_uaf(struct kunit *test) 304 { 305 char *ptr; 306 struct page *pages; 307 size_t order = 4; 308 309 pages = alloc_pages(GFP_KERNEL, order); 310 ptr = page_address(pages); 311 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 312 free_pages((unsigned long)ptr, order); 313 314 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]); 315 } 316 317 static void kmalloc_large_oob_right(struct kunit *test) 318 { 319 char *ptr; 320 size_t size = KMALLOC_MAX_CACHE_SIZE - 256; 321 322 /* 323 * Allocate a chunk that is large enough, but still fits into a slab 324 * and does not trigger the page allocator fallback in SLUB. 325 */ 326 ptr = kmalloc(size, GFP_KERNEL); 327 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 328 329 OPTIMIZER_HIDE_VAR(ptr); 330 KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0); 331 kfree(ptr); 332 } 333 334 static void krealloc_more_oob_helper(struct kunit *test, 335 size_t size1, size_t size2) 336 { 337 char *ptr1, *ptr2; 338 size_t middle; 339 340 KUNIT_ASSERT_LT(test, size1, size2); 341 middle = size1 + (size2 - size1) / 2; 342 343 ptr1 = kmalloc(size1, GFP_KERNEL); 344 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1); 345 346 ptr2 = krealloc(ptr1, size2, GFP_KERNEL); 347 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2); 348 349 /* Suppress -Warray-bounds warnings. */ 350 OPTIMIZER_HIDE_VAR(ptr2); 351 352 /* All offsets up to size2 must be accessible. */ 353 ptr2[size1 - 1] = 'x'; 354 ptr2[size1] = 'x'; 355 ptr2[middle] = 'x'; 356 ptr2[size2 - 1] = 'x'; 357 358 /* Generic mode is precise, so unaligned size2 must be inaccessible. */ 359 if (IS_ENABLED(CONFIG_KASAN_GENERIC)) 360 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x'); 361 362 /* For all modes first aligned offset after size2 must be inaccessible. */ 363 KUNIT_EXPECT_KASAN_FAIL(test, 364 ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x'); 365 366 kfree(ptr2); 367 } 368 369 static void krealloc_less_oob_helper(struct kunit *test, 370 size_t size1, size_t size2) 371 { 372 char *ptr1, *ptr2; 373 size_t middle; 374 375 KUNIT_ASSERT_LT(test, size2, size1); 376 middle = size2 + (size1 - size2) / 2; 377 378 ptr1 = kmalloc(size1, GFP_KERNEL); 379 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1); 380 381 ptr2 = krealloc(ptr1, size2, GFP_KERNEL); 382 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2); 383 384 /* Suppress -Warray-bounds warnings. */ 385 OPTIMIZER_HIDE_VAR(ptr2); 386 387 /* Must be accessible for all modes. */ 388 ptr2[size2 - 1] = 'x'; 389 390 /* Generic mode is precise, so unaligned size2 must be inaccessible. */ 391 if (IS_ENABLED(CONFIG_KASAN_GENERIC)) 392 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x'); 393 394 /* For all modes first aligned offset after size2 must be inaccessible. */ 395 KUNIT_EXPECT_KASAN_FAIL(test, 396 ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x'); 397 398 /* 399 * For all modes all size2, middle, and size1 should land in separate 400 * granules and thus the latter two offsets should be inaccessible. 401 */ 402 KUNIT_EXPECT_LE(test, round_up(size2, KASAN_GRANULE_SIZE), 403 round_down(middle, KASAN_GRANULE_SIZE)); 404 KUNIT_EXPECT_LE(test, round_up(middle, KASAN_GRANULE_SIZE), 405 round_down(size1, KASAN_GRANULE_SIZE)); 406 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[middle] = 'x'); 407 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1 - 1] = 'x'); 408 KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1] = 'x'); 409 410 kfree(ptr2); 411 } 412 413 static void krealloc_more_oob(struct kunit *test) 414 { 415 krealloc_more_oob_helper(test, 201, 235); 416 } 417 418 static void krealloc_less_oob(struct kunit *test) 419 { 420 krealloc_less_oob_helper(test, 235, 201); 421 } 422 423 static void krealloc_pagealloc_more_oob(struct kunit *test) 424 { 425 /* page_alloc fallback in only implemented for SLUB. */ 426 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB); 427 428 krealloc_more_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 201, 429 KMALLOC_MAX_CACHE_SIZE + 235); 430 } 431 432 static void krealloc_pagealloc_less_oob(struct kunit *test) 433 { 434 /* page_alloc fallback in only implemented for SLUB. */ 435 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB); 436 437 krealloc_less_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 235, 438 KMALLOC_MAX_CACHE_SIZE + 201); 439 } 440 441 /* 442 * Check that krealloc() detects a use-after-free, returns NULL, 443 * and doesn't unpoison the freed object. 444 */ 445 static void krealloc_uaf(struct kunit *test) 446 { 447 char *ptr1, *ptr2; 448 int size1 = 201; 449 int size2 = 235; 450 451 ptr1 = kmalloc(size1, GFP_KERNEL); 452 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1); 453 kfree(ptr1); 454 455 KUNIT_EXPECT_KASAN_FAIL(test, ptr2 = krealloc(ptr1, size2, GFP_KERNEL)); 456 KUNIT_ASSERT_NULL(test, ptr2); 457 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)ptr1); 458 } 459 460 static void kmalloc_oob_16(struct kunit *test) 461 { 462 struct { 463 u64 words[2]; 464 } *ptr1, *ptr2; 465 466 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test); 467 468 /* This test is specifically crafted for the generic mode. */ 469 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC); 470 471 ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL); 472 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1); 473 474 ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL); 475 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2); 476 477 OPTIMIZER_HIDE_VAR(ptr1); 478 OPTIMIZER_HIDE_VAR(ptr2); 479 KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2); 480 kfree(ptr1); 481 kfree(ptr2); 482 } 483 484 static void kmalloc_uaf_16(struct kunit *test) 485 { 486 struct { 487 u64 words[2]; 488 } *ptr1, *ptr2; 489 490 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test); 491 492 ptr1 = kmalloc(sizeof(*ptr1), GFP_KERNEL); 493 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1); 494 495 ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL); 496 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2); 497 kfree(ptr2); 498 499 KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2); 500 kfree(ptr1); 501 } 502 503 /* 504 * Note: in the memset tests below, the written range touches both valid and 505 * invalid memory. This makes sure that the instrumentation does not only check 506 * the starting address but the whole range. 507 */ 508 509 static void kmalloc_oob_memset_2(struct kunit *test) 510 { 511 char *ptr; 512 size_t size = 128 - KASAN_GRANULE_SIZE; 513 514 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test); 515 516 ptr = kmalloc(size, GFP_KERNEL); 517 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 518 519 OPTIMIZER_HIDE_VAR(size); 520 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 1, 0, 2)); 521 kfree(ptr); 522 } 523 524 static void kmalloc_oob_memset_4(struct kunit *test) 525 { 526 char *ptr; 527 size_t size = 128 - KASAN_GRANULE_SIZE; 528 529 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test); 530 531 ptr = kmalloc(size, GFP_KERNEL); 532 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 533 534 OPTIMIZER_HIDE_VAR(size); 535 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 3, 0, 4)); 536 kfree(ptr); 537 } 538 539 static void kmalloc_oob_memset_8(struct kunit *test) 540 { 541 char *ptr; 542 size_t size = 128 - KASAN_GRANULE_SIZE; 543 544 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test); 545 546 ptr = kmalloc(size, GFP_KERNEL); 547 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 548 549 OPTIMIZER_HIDE_VAR(size); 550 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 7, 0, 8)); 551 kfree(ptr); 552 } 553 554 static void kmalloc_oob_memset_16(struct kunit *test) 555 { 556 char *ptr; 557 size_t size = 128 - KASAN_GRANULE_SIZE; 558 559 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test); 560 561 ptr = kmalloc(size, GFP_KERNEL); 562 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 563 564 OPTIMIZER_HIDE_VAR(size); 565 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + size - 15, 0, 16)); 566 kfree(ptr); 567 } 568 569 static void kmalloc_oob_in_memset(struct kunit *test) 570 { 571 char *ptr; 572 size_t size = 128 - KASAN_GRANULE_SIZE; 573 574 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test); 575 576 ptr = kmalloc(size, GFP_KERNEL); 577 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 578 579 OPTIMIZER_HIDE_VAR(ptr); 580 OPTIMIZER_HIDE_VAR(size); 581 KUNIT_EXPECT_KASAN_FAIL(test, 582 memset(ptr, 0, size + KASAN_GRANULE_SIZE)); 583 kfree(ptr); 584 } 585 586 static void kmalloc_memmove_negative_size(struct kunit *test) 587 { 588 char *ptr; 589 size_t size = 64; 590 size_t invalid_size = -2; 591 592 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test); 593 594 /* 595 * Hardware tag-based mode doesn't check memmove for negative size. 596 * As a result, this test introduces a side-effect memory corruption, 597 * which can result in a crash. 598 */ 599 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_HW_TAGS); 600 601 ptr = kmalloc(size, GFP_KERNEL); 602 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 603 604 memset((char *)ptr, 0, 64); 605 OPTIMIZER_HIDE_VAR(ptr); 606 OPTIMIZER_HIDE_VAR(invalid_size); 607 KUNIT_EXPECT_KASAN_FAIL(test, 608 memmove((char *)ptr, (char *)ptr + 4, invalid_size)); 609 kfree(ptr); 610 } 611 612 static void kmalloc_memmove_invalid_size(struct kunit *test) 613 { 614 char *ptr; 615 size_t size = 64; 616 size_t invalid_size = size; 617 618 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test); 619 620 ptr = kmalloc(size, GFP_KERNEL); 621 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 622 623 memset((char *)ptr, 0, 64); 624 OPTIMIZER_HIDE_VAR(ptr); 625 OPTIMIZER_HIDE_VAR(invalid_size); 626 KUNIT_EXPECT_KASAN_FAIL(test, 627 memmove((char *)ptr, (char *)ptr + 4, invalid_size)); 628 kfree(ptr); 629 } 630 631 static void kmalloc_uaf(struct kunit *test) 632 { 633 char *ptr; 634 size_t size = 10; 635 636 ptr = kmalloc(size, GFP_KERNEL); 637 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 638 639 kfree(ptr); 640 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[8]); 641 } 642 643 static void kmalloc_uaf_memset(struct kunit *test) 644 { 645 char *ptr; 646 size_t size = 33; 647 648 KASAN_TEST_NEEDS_CHECKED_MEMINTRINSICS(test); 649 650 /* 651 * Only generic KASAN uses quarantine, which is required to avoid a 652 * kernel memory corruption this test causes. 653 */ 654 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC); 655 656 ptr = kmalloc(size, GFP_KERNEL); 657 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 658 659 kfree(ptr); 660 KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size)); 661 } 662 663 static void kmalloc_uaf2(struct kunit *test) 664 { 665 char *ptr1, *ptr2; 666 size_t size = 43; 667 int counter = 0; 668 669 again: 670 ptr1 = kmalloc(size, GFP_KERNEL); 671 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1); 672 673 kfree(ptr1); 674 675 ptr2 = kmalloc(size, GFP_KERNEL); 676 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2); 677 678 /* 679 * For tag-based KASAN ptr1 and ptr2 tags might happen to be the same. 680 * Allow up to 16 attempts at generating different tags. 681 */ 682 if (!IS_ENABLED(CONFIG_KASAN_GENERIC) && ptr1 == ptr2 && counter++ < 16) { 683 kfree(ptr2); 684 goto again; 685 } 686 687 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[40]); 688 KUNIT_EXPECT_PTR_NE(test, ptr1, ptr2); 689 690 kfree(ptr2); 691 } 692 693 /* 694 * Check that KASAN detects use-after-free when another object was allocated in 695 * the same slot. Relevant for the tag-based modes, which do not use quarantine. 696 */ 697 static void kmalloc_uaf3(struct kunit *test) 698 { 699 char *ptr1, *ptr2; 700 size_t size = 100; 701 702 /* This test is specifically crafted for tag-based modes. */ 703 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC); 704 705 ptr1 = kmalloc(size, GFP_KERNEL); 706 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1); 707 kfree(ptr1); 708 709 ptr2 = kmalloc(size, GFP_KERNEL); 710 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2); 711 kfree(ptr2); 712 713 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr1)[8]); 714 } 715 716 static void kfree_via_page(struct kunit *test) 717 { 718 char *ptr; 719 size_t size = 8; 720 struct page *page; 721 unsigned long offset; 722 723 ptr = kmalloc(size, GFP_KERNEL); 724 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 725 726 page = virt_to_page(ptr); 727 offset = offset_in_page(ptr); 728 kfree(page_address(page) + offset); 729 } 730 731 static void kfree_via_phys(struct kunit *test) 732 { 733 char *ptr; 734 size_t size = 8; 735 phys_addr_t phys; 736 737 ptr = kmalloc(size, GFP_KERNEL); 738 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 739 740 phys = virt_to_phys(ptr); 741 kfree(phys_to_virt(phys)); 742 } 743 744 static void kmem_cache_oob(struct kunit *test) 745 { 746 char *p; 747 size_t size = 200; 748 struct kmem_cache *cache; 749 750 cache = kmem_cache_create("test_cache", size, 0, 0, NULL); 751 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); 752 753 p = kmem_cache_alloc(cache, GFP_KERNEL); 754 if (!p) { 755 kunit_err(test, "Allocation failed: %s\n", __func__); 756 kmem_cache_destroy(cache); 757 return; 758 } 759 760 KUNIT_EXPECT_KASAN_FAIL(test, *p = p[size + OOB_TAG_OFF]); 761 762 kmem_cache_free(cache, p); 763 kmem_cache_destroy(cache); 764 } 765 766 static void kmem_cache_accounted(struct kunit *test) 767 { 768 int i; 769 char *p; 770 size_t size = 200; 771 struct kmem_cache *cache; 772 773 cache = kmem_cache_create("test_cache", size, 0, SLAB_ACCOUNT, NULL); 774 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); 775 776 /* 777 * Several allocations with a delay to allow for lazy per memcg kmem 778 * cache creation. 779 */ 780 for (i = 0; i < 5; i++) { 781 p = kmem_cache_alloc(cache, GFP_KERNEL); 782 if (!p) 783 goto free_cache; 784 785 kmem_cache_free(cache, p); 786 msleep(100); 787 } 788 789 free_cache: 790 kmem_cache_destroy(cache); 791 } 792 793 static void kmem_cache_bulk(struct kunit *test) 794 { 795 struct kmem_cache *cache; 796 size_t size = 200; 797 char *p[10]; 798 bool ret; 799 int i; 800 801 cache = kmem_cache_create("test_cache", size, 0, 0, NULL); 802 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); 803 804 ret = kmem_cache_alloc_bulk(cache, GFP_KERNEL, ARRAY_SIZE(p), (void **)&p); 805 if (!ret) { 806 kunit_err(test, "Allocation failed: %s\n", __func__); 807 kmem_cache_destroy(cache); 808 return; 809 } 810 811 for (i = 0; i < ARRAY_SIZE(p); i++) 812 p[i][0] = p[i][size - 1] = 42; 813 814 kmem_cache_free_bulk(cache, ARRAY_SIZE(p), (void **)&p); 815 kmem_cache_destroy(cache); 816 } 817 818 static char global_array[10]; 819 820 static void kasan_global_oob_right(struct kunit *test) 821 { 822 /* 823 * Deliberate out-of-bounds access. To prevent CONFIG_UBSAN_LOCAL_BOUNDS 824 * from failing here and panicking the kernel, access the array via a 825 * volatile pointer, which will prevent the compiler from being able to 826 * determine the array bounds. 827 * 828 * This access uses a volatile pointer to char (char *volatile) rather 829 * than the more conventional pointer to volatile char (volatile char *) 830 * because we want to prevent the compiler from making inferences about 831 * the pointer itself (i.e. its array bounds), not the data that it 832 * refers to. 833 */ 834 char *volatile array = global_array; 835 char *p = &array[ARRAY_SIZE(global_array) + 3]; 836 837 /* Only generic mode instruments globals. */ 838 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC); 839 840 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p); 841 } 842 843 static void kasan_global_oob_left(struct kunit *test) 844 { 845 char *volatile array = global_array; 846 char *p = array - 3; 847 848 /* 849 * GCC is known to fail this test, skip it. 850 * See https://bugzilla.kernel.org/show_bug.cgi?id=215051. 851 */ 852 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_CC_IS_CLANG); 853 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC); 854 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p); 855 } 856 857 /* Check that ksize() does NOT unpoison whole object. */ 858 static void ksize_unpoisons_memory(struct kunit *test) 859 { 860 char *ptr; 861 size_t size = 128 - KASAN_GRANULE_SIZE - 5; 862 size_t real_size; 863 864 ptr = kmalloc(size, GFP_KERNEL); 865 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 866 867 real_size = ksize(ptr); 868 KUNIT_EXPECT_GT(test, real_size, size); 869 870 OPTIMIZER_HIDE_VAR(ptr); 871 872 /* These accesses shouldn't trigger a KASAN report. */ 873 ptr[0] = 'x'; 874 ptr[size - 1] = 'x'; 875 876 /* These must trigger a KASAN report. */ 877 if (IS_ENABLED(CONFIG_KASAN_GENERIC)) 878 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]); 879 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size + 5]); 880 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[real_size - 1]); 881 882 kfree(ptr); 883 } 884 885 /* 886 * Check that a use-after-free is detected by ksize() and via normal accesses 887 * after it. 888 */ 889 static void ksize_uaf(struct kunit *test) 890 { 891 char *ptr; 892 int size = 128 - KASAN_GRANULE_SIZE; 893 894 ptr = kmalloc(size, GFP_KERNEL); 895 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 896 kfree(ptr); 897 898 OPTIMIZER_HIDE_VAR(ptr); 899 KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr)); 900 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[0]); 901 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)ptr)[size]); 902 } 903 904 static void kasan_stack_oob(struct kunit *test) 905 { 906 char stack_array[10]; 907 /* See comment in kasan_global_oob_right. */ 908 char *volatile array = stack_array; 909 char *p = &array[ARRAY_SIZE(stack_array) + OOB_TAG_OFF]; 910 911 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK); 912 913 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p); 914 } 915 916 static void kasan_alloca_oob_left(struct kunit *test) 917 { 918 volatile int i = 10; 919 char alloca_array[i]; 920 /* See comment in kasan_global_oob_right. */ 921 char *volatile array = alloca_array; 922 char *p = array - 1; 923 924 /* Only generic mode instruments dynamic allocas. */ 925 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC); 926 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK); 927 928 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p); 929 } 930 931 static void kasan_alloca_oob_right(struct kunit *test) 932 { 933 volatile int i = 10; 934 char alloca_array[i]; 935 /* See comment in kasan_global_oob_right. */ 936 char *volatile array = alloca_array; 937 char *p = array + i; 938 939 /* Only generic mode instruments dynamic allocas. */ 940 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC); 941 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK); 942 943 KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p); 944 } 945 946 static void kmem_cache_double_free(struct kunit *test) 947 { 948 char *p; 949 size_t size = 200; 950 struct kmem_cache *cache; 951 952 cache = kmem_cache_create("test_cache", size, 0, 0, NULL); 953 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); 954 955 p = kmem_cache_alloc(cache, GFP_KERNEL); 956 if (!p) { 957 kunit_err(test, "Allocation failed: %s\n", __func__); 958 kmem_cache_destroy(cache); 959 return; 960 } 961 962 kmem_cache_free(cache, p); 963 KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p)); 964 kmem_cache_destroy(cache); 965 } 966 967 static void kmem_cache_invalid_free(struct kunit *test) 968 { 969 char *p; 970 size_t size = 200; 971 struct kmem_cache *cache; 972 973 cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU, 974 NULL); 975 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); 976 977 p = kmem_cache_alloc(cache, GFP_KERNEL); 978 if (!p) { 979 kunit_err(test, "Allocation failed: %s\n", __func__); 980 kmem_cache_destroy(cache); 981 return; 982 } 983 984 /* Trigger invalid free, the object doesn't get freed. */ 985 KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p + 1)); 986 987 /* 988 * Properly free the object to prevent the "Objects remaining in 989 * test_cache on __kmem_cache_shutdown" BUG failure. 990 */ 991 kmem_cache_free(cache, p); 992 993 kmem_cache_destroy(cache); 994 } 995 996 static void empty_cache_ctor(void *object) { } 997 998 static void kmem_cache_double_destroy(struct kunit *test) 999 { 1000 struct kmem_cache *cache; 1001 1002 /* Provide a constructor to prevent cache merging. */ 1003 cache = kmem_cache_create("test_cache", 200, 0, 0, empty_cache_ctor); 1004 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache); 1005 kmem_cache_destroy(cache); 1006 KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_destroy(cache)); 1007 } 1008 1009 static void kasan_memchr(struct kunit *test) 1010 { 1011 char *ptr; 1012 size_t size = 24; 1013 1014 /* 1015 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT. 1016 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details. 1017 */ 1018 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT); 1019 1020 if (OOB_TAG_OFF) 1021 size = round_up(size, OOB_TAG_OFF); 1022 1023 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO); 1024 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 1025 1026 OPTIMIZER_HIDE_VAR(ptr); 1027 OPTIMIZER_HIDE_VAR(size); 1028 KUNIT_EXPECT_KASAN_FAIL(test, 1029 kasan_ptr_result = memchr(ptr, '1', size + 1)); 1030 1031 kfree(ptr); 1032 } 1033 1034 static void kasan_memcmp(struct kunit *test) 1035 { 1036 char *ptr; 1037 size_t size = 24; 1038 int arr[9]; 1039 1040 /* 1041 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT. 1042 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details. 1043 */ 1044 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT); 1045 1046 if (OOB_TAG_OFF) 1047 size = round_up(size, OOB_TAG_OFF); 1048 1049 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO); 1050 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 1051 memset(arr, 0, sizeof(arr)); 1052 1053 OPTIMIZER_HIDE_VAR(ptr); 1054 OPTIMIZER_HIDE_VAR(size); 1055 KUNIT_EXPECT_KASAN_FAIL(test, 1056 kasan_int_result = memcmp(ptr, arr, size+1)); 1057 kfree(ptr); 1058 } 1059 1060 static void kasan_strings(struct kunit *test) 1061 { 1062 char *ptr; 1063 size_t size = 24; 1064 1065 /* 1066 * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT. 1067 * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details. 1068 */ 1069 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT); 1070 1071 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO); 1072 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 1073 1074 kfree(ptr); 1075 1076 /* 1077 * Try to cause only 1 invalid access (less spam in dmesg). 1078 * For that we need ptr to point to zeroed byte. 1079 * Skip metadata that could be stored in freed object so ptr 1080 * will likely point to zeroed byte. 1081 */ 1082 ptr += 16; 1083 KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strchr(ptr, '1')); 1084 1085 KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strrchr(ptr, '1')); 1086 1087 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strcmp(ptr, "2")); 1088 1089 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strncmp(ptr, "2", 1)); 1090 1091 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strlen(ptr)); 1092 1093 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strnlen(ptr, 1)); 1094 } 1095 1096 static void kasan_bitops_modify(struct kunit *test, int nr, void *addr) 1097 { 1098 KUNIT_EXPECT_KASAN_FAIL(test, set_bit(nr, addr)); 1099 KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(nr, addr)); 1100 KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(nr, addr)); 1101 KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(nr, addr)); 1102 KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(nr, addr)); 1103 KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(nr, addr)); 1104 KUNIT_EXPECT_KASAN_FAIL(test, change_bit(nr, addr)); 1105 KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(nr, addr)); 1106 } 1107 1108 static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr) 1109 { 1110 KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit(nr, addr)); 1111 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_set_bit(nr, addr)); 1112 KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit_lock(nr, addr)); 1113 KUNIT_EXPECT_KASAN_FAIL(test, test_and_clear_bit(nr, addr)); 1114 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_clear_bit(nr, addr)); 1115 KUNIT_EXPECT_KASAN_FAIL(test, test_and_change_bit(nr, addr)); 1116 KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr)); 1117 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr)); 1118 1119 #if defined(clear_bit_unlock_is_negative_byte) 1120 KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = 1121 clear_bit_unlock_is_negative_byte(nr, addr)); 1122 #endif 1123 } 1124 1125 static void kasan_bitops_generic(struct kunit *test) 1126 { 1127 long *bits; 1128 1129 /* This test is specifically crafted for the generic mode. */ 1130 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC); 1131 1132 /* 1133 * Allocate 1 more byte, which causes kzalloc to round up to 16 bytes; 1134 * this way we do not actually corrupt other memory. 1135 */ 1136 bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL); 1137 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits); 1138 1139 /* 1140 * Below calls try to access bit within allocated memory; however, the 1141 * below accesses are still out-of-bounds, since bitops are defined to 1142 * operate on the whole long the bit is in. 1143 */ 1144 kasan_bitops_modify(test, BITS_PER_LONG, bits); 1145 1146 /* 1147 * Below calls try to access bit beyond allocated memory. 1148 */ 1149 kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, bits); 1150 1151 kfree(bits); 1152 } 1153 1154 static void kasan_bitops_tags(struct kunit *test) 1155 { 1156 long *bits; 1157 1158 /* This test is specifically crafted for tag-based modes. */ 1159 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC); 1160 1161 /* kmalloc-64 cache will be used and the last 16 bytes will be the redzone. */ 1162 bits = kzalloc(48, GFP_KERNEL); 1163 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits); 1164 1165 /* Do the accesses past the 48 allocated bytes, but within the redone. */ 1166 kasan_bitops_modify(test, BITS_PER_LONG, (void *)bits + 48); 1167 kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, (void *)bits + 48); 1168 1169 kfree(bits); 1170 } 1171 1172 static void kmalloc_double_kzfree(struct kunit *test) 1173 { 1174 char *ptr; 1175 size_t size = 16; 1176 1177 ptr = kmalloc(size, GFP_KERNEL); 1178 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 1179 1180 kfree_sensitive(ptr); 1181 KUNIT_EXPECT_KASAN_FAIL(test, kfree_sensitive(ptr)); 1182 } 1183 1184 /* 1185 * The two tests below check that Generic KASAN prints auxiliary stack traces 1186 * for RCU callbacks and workqueues. The reports need to be inspected manually. 1187 * 1188 * These tests are still enabled for other KASAN modes to make sure that all 1189 * modes report bad accesses in tested scenarios. 1190 */ 1191 1192 static struct kasan_rcu_info { 1193 int i; 1194 struct rcu_head rcu; 1195 } *global_rcu_ptr; 1196 1197 static void rcu_uaf_reclaim(struct rcu_head *rp) 1198 { 1199 struct kasan_rcu_info *fp = 1200 container_of(rp, struct kasan_rcu_info, rcu); 1201 1202 kfree(fp); 1203 ((volatile struct kasan_rcu_info *)fp)->i; 1204 } 1205 1206 static void rcu_uaf(struct kunit *test) 1207 { 1208 struct kasan_rcu_info *ptr; 1209 1210 ptr = kmalloc(sizeof(struct kasan_rcu_info), GFP_KERNEL); 1211 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 1212 1213 global_rcu_ptr = rcu_dereference_protected( 1214 (struct kasan_rcu_info __rcu *)ptr, NULL); 1215 1216 KUNIT_EXPECT_KASAN_FAIL(test, 1217 call_rcu(&global_rcu_ptr->rcu, rcu_uaf_reclaim); 1218 rcu_barrier()); 1219 } 1220 1221 static void workqueue_uaf_work(struct work_struct *work) 1222 { 1223 kfree(work); 1224 } 1225 1226 static void workqueue_uaf(struct kunit *test) 1227 { 1228 struct workqueue_struct *workqueue; 1229 struct work_struct *work; 1230 1231 workqueue = create_workqueue("kasan_workqueue_test"); 1232 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, workqueue); 1233 1234 work = kmalloc(sizeof(struct work_struct), GFP_KERNEL); 1235 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, work); 1236 1237 INIT_WORK(work, workqueue_uaf_work); 1238 queue_work(workqueue, work); 1239 destroy_workqueue(workqueue); 1240 1241 KUNIT_EXPECT_KASAN_FAIL(test, 1242 ((volatile struct work_struct *)work)->data); 1243 } 1244 1245 static void vmalloc_helpers_tags(struct kunit *test) 1246 { 1247 void *ptr; 1248 1249 /* This test is intended for tag-based modes. */ 1250 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC); 1251 1252 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC); 1253 1254 ptr = vmalloc(PAGE_SIZE); 1255 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 1256 1257 /* Check that the returned pointer is tagged. */ 1258 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN); 1259 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL); 1260 1261 /* Make sure exported vmalloc helpers handle tagged pointers. */ 1262 KUNIT_ASSERT_TRUE(test, is_vmalloc_addr(ptr)); 1263 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vmalloc_to_page(ptr)); 1264 1265 #if !IS_MODULE(CONFIG_KASAN_KUNIT_TEST) 1266 { 1267 int rv; 1268 1269 /* Make sure vmalloc'ed memory permissions can be changed. */ 1270 rv = set_memory_ro((unsigned long)ptr, 1); 1271 KUNIT_ASSERT_GE(test, rv, 0); 1272 rv = set_memory_rw((unsigned long)ptr, 1); 1273 KUNIT_ASSERT_GE(test, rv, 0); 1274 } 1275 #endif 1276 1277 vfree(ptr); 1278 } 1279 1280 static void vmalloc_oob(struct kunit *test) 1281 { 1282 char *v_ptr, *p_ptr; 1283 struct page *page; 1284 size_t size = PAGE_SIZE / 2 - KASAN_GRANULE_SIZE - 5; 1285 1286 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC); 1287 1288 v_ptr = vmalloc(size); 1289 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr); 1290 1291 OPTIMIZER_HIDE_VAR(v_ptr); 1292 1293 /* 1294 * We have to be careful not to hit the guard page in vmalloc tests. 1295 * The MMU will catch that and crash us. 1296 */ 1297 1298 /* Make sure in-bounds accesses are valid. */ 1299 v_ptr[0] = 0; 1300 v_ptr[size - 1] = 0; 1301 1302 /* 1303 * An unaligned access past the requested vmalloc size. 1304 * Only generic KASAN can precisely detect these. 1305 */ 1306 if (IS_ENABLED(CONFIG_KASAN_GENERIC)) 1307 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)v_ptr)[size]); 1308 1309 /* An aligned access into the first out-of-bounds granule. */ 1310 KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)v_ptr)[size + 5]); 1311 1312 /* Check that in-bounds accesses to the physical page are valid. */ 1313 page = vmalloc_to_page(v_ptr); 1314 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, page); 1315 p_ptr = page_address(page); 1316 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr); 1317 p_ptr[0] = 0; 1318 1319 vfree(v_ptr); 1320 1321 /* 1322 * We can't check for use-after-unmap bugs in this nor in the following 1323 * vmalloc tests, as the page might be fully unmapped and accessing it 1324 * will crash the kernel. 1325 */ 1326 } 1327 1328 static void vmap_tags(struct kunit *test) 1329 { 1330 char *p_ptr, *v_ptr; 1331 struct page *p_page, *v_page; 1332 1333 /* 1334 * This test is specifically crafted for the software tag-based mode, 1335 * the only tag-based mode that poisons vmap mappings. 1336 */ 1337 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS); 1338 1339 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC); 1340 1341 p_page = alloc_pages(GFP_KERNEL, 1); 1342 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_page); 1343 p_ptr = page_address(p_page); 1344 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr); 1345 1346 v_ptr = vmap(&p_page, 1, VM_MAP, PAGE_KERNEL); 1347 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr); 1348 1349 /* 1350 * We can't check for out-of-bounds bugs in this nor in the following 1351 * vmalloc tests, as allocations have page granularity and accessing 1352 * the guard page will crash the kernel. 1353 */ 1354 1355 KUNIT_EXPECT_GE(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_MIN); 1356 KUNIT_EXPECT_LT(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_KERNEL); 1357 1358 /* Make sure that in-bounds accesses through both pointers work. */ 1359 *p_ptr = 0; 1360 *v_ptr = 0; 1361 1362 /* Make sure vmalloc_to_page() correctly recovers the page pointer. */ 1363 v_page = vmalloc_to_page(v_ptr); 1364 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_page); 1365 KUNIT_EXPECT_PTR_EQ(test, p_page, v_page); 1366 1367 vunmap(v_ptr); 1368 free_pages((unsigned long)p_ptr, 1); 1369 } 1370 1371 static void vm_map_ram_tags(struct kunit *test) 1372 { 1373 char *p_ptr, *v_ptr; 1374 struct page *page; 1375 1376 /* 1377 * This test is specifically crafted for the software tag-based mode, 1378 * the only tag-based mode that poisons vm_map_ram mappings. 1379 */ 1380 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS); 1381 1382 page = alloc_pages(GFP_KERNEL, 1); 1383 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, page); 1384 p_ptr = page_address(page); 1385 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p_ptr); 1386 1387 v_ptr = vm_map_ram(&page, 1, -1); 1388 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, v_ptr); 1389 1390 KUNIT_EXPECT_GE(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_MIN); 1391 KUNIT_EXPECT_LT(test, (u8)get_tag(v_ptr), (u8)KASAN_TAG_KERNEL); 1392 1393 /* Make sure that in-bounds accesses through both pointers work. */ 1394 *p_ptr = 0; 1395 *v_ptr = 0; 1396 1397 vm_unmap_ram(v_ptr, 1); 1398 free_pages((unsigned long)p_ptr, 1); 1399 } 1400 1401 static void vmalloc_percpu(struct kunit *test) 1402 { 1403 char __percpu *ptr; 1404 int cpu; 1405 1406 /* 1407 * This test is specifically crafted for the software tag-based mode, 1408 * the only tag-based mode that poisons percpu mappings. 1409 */ 1410 KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_SW_TAGS); 1411 1412 ptr = __alloc_percpu(PAGE_SIZE, PAGE_SIZE); 1413 1414 for_each_possible_cpu(cpu) { 1415 char *c_ptr = per_cpu_ptr(ptr, cpu); 1416 1417 KUNIT_EXPECT_GE(test, (u8)get_tag(c_ptr), (u8)KASAN_TAG_MIN); 1418 KUNIT_EXPECT_LT(test, (u8)get_tag(c_ptr), (u8)KASAN_TAG_KERNEL); 1419 1420 /* Make sure that in-bounds accesses don't crash the kernel. */ 1421 *c_ptr = 0; 1422 } 1423 1424 free_percpu(ptr); 1425 } 1426 1427 /* 1428 * Check that the assigned pointer tag falls within the [KASAN_TAG_MIN, 1429 * KASAN_TAG_KERNEL) range (note: excluding the match-all tag) for tag-based 1430 * modes. 1431 */ 1432 static void match_all_not_assigned(struct kunit *test) 1433 { 1434 char *ptr; 1435 struct page *pages; 1436 int i, size, order; 1437 1438 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC); 1439 1440 for (i = 0; i < 256; i++) { 1441 size = get_random_u32_inclusive(1, 1024); 1442 ptr = kmalloc(size, GFP_KERNEL); 1443 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 1444 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN); 1445 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL); 1446 kfree(ptr); 1447 } 1448 1449 for (i = 0; i < 256; i++) { 1450 order = get_random_u32_inclusive(1, 4); 1451 pages = alloc_pages(GFP_KERNEL, order); 1452 ptr = page_address(pages); 1453 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 1454 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN); 1455 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL); 1456 free_pages((unsigned long)ptr, order); 1457 } 1458 1459 if (!IS_ENABLED(CONFIG_KASAN_VMALLOC)) 1460 return; 1461 1462 for (i = 0; i < 256; i++) { 1463 size = get_random_u32_inclusive(1, 1024); 1464 ptr = vmalloc(size); 1465 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 1466 KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN); 1467 KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL); 1468 vfree(ptr); 1469 } 1470 } 1471 1472 /* Check that 0xff works as a match-all pointer tag for tag-based modes. */ 1473 static void match_all_ptr_tag(struct kunit *test) 1474 { 1475 char *ptr; 1476 u8 tag; 1477 1478 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC); 1479 1480 ptr = kmalloc(128, GFP_KERNEL); 1481 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 1482 1483 /* Backup the assigned tag. */ 1484 tag = get_tag(ptr); 1485 KUNIT_EXPECT_NE(test, tag, (u8)KASAN_TAG_KERNEL); 1486 1487 /* Reset the tag to 0xff.*/ 1488 ptr = set_tag(ptr, KASAN_TAG_KERNEL); 1489 1490 /* This access shouldn't trigger a KASAN report. */ 1491 *ptr = 0; 1492 1493 /* Recover the pointer tag and free. */ 1494 ptr = set_tag(ptr, tag); 1495 kfree(ptr); 1496 } 1497 1498 /* Check that there are no match-all memory tags for tag-based modes. */ 1499 static void match_all_mem_tag(struct kunit *test) 1500 { 1501 char *ptr; 1502 int tag; 1503 1504 KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC); 1505 1506 ptr = kmalloc(128, GFP_KERNEL); 1507 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); 1508 KUNIT_EXPECT_NE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL); 1509 1510 /* For each possible tag value not matching the pointer tag. */ 1511 for (tag = KASAN_TAG_MIN; tag <= KASAN_TAG_KERNEL; tag++) { 1512 if (tag == get_tag(ptr)) 1513 continue; 1514 1515 /* Mark the first memory granule with the chosen memory tag. */ 1516 kasan_poison(ptr, KASAN_GRANULE_SIZE, (u8)tag, false); 1517 1518 /* This access must cause a KASAN report. */ 1519 KUNIT_EXPECT_KASAN_FAIL(test, *ptr = 0); 1520 } 1521 1522 /* Recover the memory tag and free. */ 1523 kasan_poison(ptr, KASAN_GRANULE_SIZE, get_tag(ptr), false); 1524 kfree(ptr); 1525 } 1526 1527 static struct kunit_case kasan_kunit_test_cases[] = { 1528 KUNIT_CASE(kmalloc_oob_right), 1529 KUNIT_CASE(kmalloc_oob_left), 1530 KUNIT_CASE(kmalloc_node_oob_right), 1531 KUNIT_CASE(kmalloc_pagealloc_oob_right), 1532 KUNIT_CASE(kmalloc_pagealloc_uaf), 1533 KUNIT_CASE(kmalloc_pagealloc_invalid_free), 1534 KUNIT_CASE(pagealloc_oob_right), 1535 KUNIT_CASE(pagealloc_uaf), 1536 KUNIT_CASE(kmalloc_large_oob_right), 1537 KUNIT_CASE(krealloc_more_oob), 1538 KUNIT_CASE(krealloc_less_oob), 1539 KUNIT_CASE(krealloc_pagealloc_more_oob), 1540 KUNIT_CASE(krealloc_pagealloc_less_oob), 1541 KUNIT_CASE(krealloc_uaf), 1542 KUNIT_CASE(kmalloc_oob_16), 1543 KUNIT_CASE(kmalloc_uaf_16), 1544 KUNIT_CASE(kmalloc_oob_in_memset), 1545 KUNIT_CASE(kmalloc_oob_memset_2), 1546 KUNIT_CASE(kmalloc_oob_memset_4), 1547 KUNIT_CASE(kmalloc_oob_memset_8), 1548 KUNIT_CASE(kmalloc_oob_memset_16), 1549 KUNIT_CASE(kmalloc_memmove_negative_size), 1550 KUNIT_CASE(kmalloc_memmove_invalid_size), 1551 KUNIT_CASE(kmalloc_uaf), 1552 KUNIT_CASE(kmalloc_uaf_memset), 1553 KUNIT_CASE(kmalloc_uaf2), 1554 KUNIT_CASE(kmalloc_uaf3), 1555 KUNIT_CASE(kfree_via_page), 1556 KUNIT_CASE(kfree_via_phys), 1557 KUNIT_CASE(kmem_cache_oob), 1558 KUNIT_CASE(kmem_cache_accounted), 1559 KUNIT_CASE(kmem_cache_bulk), 1560 KUNIT_CASE(kasan_global_oob_right), 1561 KUNIT_CASE(kasan_global_oob_left), 1562 KUNIT_CASE(kasan_stack_oob), 1563 KUNIT_CASE(kasan_alloca_oob_left), 1564 KUNIT_CASE(kasan_alloca_oob_right), 1565 KUNIT_CASE(ksize_unpoisons_memory), 1566 KUNIT_CASE(ksize_uaf), 1567 KUNIT_CASE(kmem_cache_double_free), 1568 KUNIT_CASE(kmem_cache_invalid_free), 1569 KUNIT_CASE(kmem_cache_double_destroy), 1570 KUNIT_CASE(kasan_memchr), 1571 KUNIT_CASE(kasan_memcmp), 1572 KUNIT_CASE(kasan_strings), 1573 KUNIT_CASE(kasan_bitops_generic), 1574 KUNIT_CASE(kasan_bitops_tags), 1575 KUNIT_CASE(kmalloc_double_kzfree), 1576 KUNIT_CASE(rcu_uaf), 1577 KUNIT_CASE(workqueue_uaf), 1578 KUNIT_CASE(vmalloc_helpers_tags), 1579 KUNIT_CASE(vmalloc_oob), 1580 KUNIT_CASE(vmap_tags), 1581 KUNIT_CASE(vm_map_ram_tags), 1582 KUNIT_CASE(vmalloc_percpu), 1583 KUNIT_CASE(match_all_not_assigned), 1584 KUNIT_CASE(match_all_ptr_tag), 1585 KUNIT_CASE(match_all_mem_tag), 1586 {} 1587 }; 1588 1589 static struct kunit_suite kasan_kunit_test_suite = { 1590 .name = "kasan", 1591 .test_cases = kasan_kunit_test_cases, 1592 .exit = kasan_test_exit, 1593 .suite_init = kasan_suite_init, 1594 .suite_exit = kasan_suite_exit, 1595 }; 1596 1597 kunit_test_suite(kasan_kunit_test_suite); 1598 1599 MODULE_LICENSE("GPL"); 1600