1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * This is for all the tests related to logic bugs (e.g. bad dereferences, 4 * bad alignment, bad loops, bad locking, bad scheduling, deep stacks, and 5 * lockups) along with other things that don't fit well into existing LKDTM 6 * test source files. 7 */ 8 #include "lkdtm.h" 9 #include <linux/list.h> 10 #include <linux/sched.h> 11 #include <linux/sched/signal.h> 12 #include <linux/sched/task_stack.h> 13 #include <linux/uaccess.h> 14 #include <linux/slab.h> 15 16 #if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML) 17 #include <asm/desc.h> 18 #endif 19 20 struct lkdtm_list { 21 struct list_head node; 22 }; 23 24 /* 25 * Make sure our attempts to over run the kernel stack doesn't trigger 26 * a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we 27 * recurse past the end of THREAD_SIZE by default. 28 */ 29 #if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0) 30 #define REC_STACK_SIZE (_AC(CONFIG_FRAME_WARN, UL) / 2) 31 #else 32 #define REC_STACK_SIZE (THREAD_SIZE / 8) 33 #endif 34 #define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2) 35 36 static int recur_count = REC_NUM_DEFAULT; 37 38 static DEFINE_SPINLOCK(lock_me_up); 39 40 /* 41 * Make sure compiler does not optimize this function or stack frame away: 42 * - function marked noinline 43 * - stack variables are marked volatile 44 * - stack variables are written (memset()) and read (pr_info()) 45 * - function has external effects (pr_info()) 46 * */ 47 static int noinline recursive_loop(int remaining) 48 { 49 volatile char buf[REC_STACK_SIZE]; 50 51 memset((void *)buf, remaining & 0xFF, sizeof(buf)); 52 pr_info("loop %d/%d ...\n", (int)buf[remaining % sizeof(buf)], 53 recur_count); 54 if (!remaining) 55 return 0; 56 else 57 return recursive_loop(remaining - 1); 58 } 59 60 /* If the depth is negative, use the default, otherwise keep parameter. */ 61 void __init lkdtm_bugs_init(int *recur_param) 62 { 63 if (*recur_param < 0) 64 *recur_param = recur_count; 65 else 66 recur_count = *recur_param; 67 } 68 69 void lkdtm_PANIC(void) 70 { 71 panic("dumptest"); 72 } 73 74 void lkdtm_BUG(void) 75 { 76 BUG(); 77 } 78 79 static int warn_counter; 80 81 void lkdtm_WARNING(void) 82 { 83 WARN_ON(++warn_counter); 84 } 85 86 void lkdtm_WARNING_MESSAGE(void) 87 { 88 WARN(1, "Warning message trigger count: %d\n", ++warn_counter); 89 } 90 91 void lkdtm_EXCEPTION(void) 92 { 93 *((volatile int *) 0) = 0; 94 } 95 96 void lkdtm_LOOP(void) 97 { 98 for (;;) 99 ; 100 } 101 102 void lkdtm_EXHAUST_STACK(void) 103 { 104 pr_info("Calling function with %lu frame size to depth %d ...\n", 105 REC_STACK_SIZE, recur_count); 106 recursive_loop(recur_count); 107 pr_info("FAIL: survived without exhausting stack?!\n"); 108 } 109 110 static noinline void __lkdtm_CORRUPT_STACK(void *stack) 111 { 112 memset(stack, '\xff', 64); 113 } 114 115 /* This should trip the stack canary, not corrupt the return address. */ 116 noinline void lkdtm_CORRUPT_STACK(void) 117 { 118 /* Use default char array length that triggers stack protection. */ 119 char data[8] __aligned(sizeof(void *)); 120 121 pr_info("Corrupting stack containing char array ...\n"); 122 __lkdtm_CORRUPT_STACK((void *)&data); 123 } 124 125 /* Same as above but will only get a canary with -fstack-protector-strong */ 126 noinline void lkdtm_CORRUPT_STACK_STRONG(void) 127 { 128 union { 129 unsigned short shorts[4]; 130 unsigned long *ptr; 131 } data __aligned(sizeof(void *)); 132 133 pr_info("Corrupting stack containing union ...\n"); 134 __lkdtm_CORRUPT_STACK((void *)&data); 135 } 136 137 static pid_t stack_pid; 138 static unsigned long stack_addr; 139 140 void lkdtm_REPORT_STACK(void) 141 { 142 volatile uintptr_t magic; 143 pid_t pid = task_pid_nr(current); 144 145 if (pid != stack_pid) { 146 pr_info("Starting stack offset tracking for pid %d\n", pid); 147 stack_pid = pid; 148 stack_addr = (uintptr_t)&magic; 149 } 150 151 pr_info("Stack offset: %d\n", (int)(stack_addr - (uintptr_t)&magic)); 152 } 153 154 void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void) 155 { 156 static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5}; 157 u32 *p; 158 u32 val = 0x12345678; 159 160 p = (u32 *)(data + 1); 161 if (*p == 0) 162 val = 0x87654321; 163 *p = val; 164 } 165 166 void lkdtm_SOFTLOCKUP(void) 167 { 168 preempt_disable(); 169 for (;;) 170 cpu_relax(); 171 } 172 173 void lkdtm_HARDLOCKUP(void) 174 { 175 local_irq_disable(); 176 for (;;) 177 cpu_relax(); 178 } 179 180 void lkdtm_SPINLOCKUP(void) 181 { 182 /* Must be called twice to trigger. */ 183 spin_lock(&lock_me_up); 184 /* Let sparse know we intended to exit holding the lock. */ 185 __release(&lock_me_up); 186 } 187 188 void lkdtm_HUNG_TASK(void) 189 { 190 set_current_state(TASK_UNINTERRUPTIBLE); 191 schedule(); 192 } 193 194 volatile unsigned int huge = INT_MAX - 2; 195 volatile unsigned int ignored; 196 197 void lkdtm_OVERFLOW_SIGNED(void) 198 { 199 int value; 200 201 value = huge; 202 pr_info("Normal signed addition ...\n"); 203 value += 1; 204 ignored = value; 205 206 pr_info("Overflowing signed addition ...\n"); 207 value += 4; 208 ignored = value; 209 } 210 211 212 void lkdtm_OVERFLOW_UNSIGNED(void) 213 { 214 unsigned int value; 215 216 value = huge; 217 pr_info("Normal unsigned addition ...\n"); 218 value += 1; 219 ignored = value; 220 221 pr_info("Overflowing unsigned addition ...\n"); 222 value += 4; 223 ignored = value; 224 } 225 226 /* Intentionally using old-style flex array definition of 1 byte. */ 227 struct array_bounds_flex_array { 228 int one; 229 int two; 230 char data[1]; 231 }; 232 233 struct array_bounds { 234 int one; 235 int two; 236 char data[8]; 237 int three; 238 }; 239 240 void lkdtm_ARRAY_BOUNDS(void) 241 { 242 struct array_bounds_flex_array *not_checked; 243 struct array_bounds *checked; 244 volatile int i; 245 246 not_checked = kmalloc(sizeof(*not_checked) * 2, GFP_KERNEL); 247 checked = kmalloc(sizeof(*checked) * 2, GFP_KERNEL); 248 249 pr_info("Array access within bounds ...\n"); 250 /* For both, touch all bytes in the actual member size. */ 251 for (i = 0; i < sizeof(checked->data); i++) 252 checked->data[i] = 'A'; 253 /* 254 * For the uninstrumented flex array member, also touch 1 byte 255 * beyond to verify it is correctly uninstrumented. 256 */ 257 for (i = 0; i < sizeof(not_checked->data) + 1; i++) 258 not_checked->data[i] = 'A'; 259 260 pr_info("Array access beyond bounds ...\n"); 261 for (i = 0; i < sizeof(checked->data) + 1; i++) 262 checked->data[i] = 'B'; 263 264 kfree(not_checked); 265 kfree(checked); 266 pr_err("FAIL: survived array bounds overflow!\n"); 267 } 268 269 void lkdtm_CORRUPT_LIST_ADD(void) 270 { 271 /* 272 * Initially, an empty list via LIST_HEAD: 273 * test_head.next = &test_head 274 * test_head.prev = &test_head 275 */ 276 LIST_HEAD(test_head); 277 struct lkdtm_list good, bad; 278 void *target[2] = { }; 279 void *redirection = ⌖ 280 281 pr_info("attempting good list addition\n"); 282 283 /* 284 * Adding to the list performs these actions: 285 * test_head.next->prev = &good.node 286 * good.node.next = test_head.next 287 * good.node.prev = test_head 288 * test_head.next = good.node 289 */ 290 list_add(&good.node, &test_head); 291 292 pr_info("attempting corrupted list addition\n"); 293 /* 294 * In simulating this "write what where" primitive, the "what" is 295 * the address of &bad.node, and the "where" is the address held 296 * by "redirection". 297 */ 298 test_head.next = redirection; 299 list_add(&bad.node, &test_head); 300 301 if (target[0] == NULL && target[1] == NULL) 302 pr_err("Overwrite did not happen, but no BUG?!\n"); 303 else 304 pr_err("list_add() corruption not detected!\n"); 305 } 306 307 void lkdtm_CORRUPT_LIST_DEL(void) 308 { 309 LIST_HEAD(test_head); 310 struct lkdtm_list item; 311 void *target[2] = { }; 312 void *redirection = ⌖ 313 314 list_add(&item.node, &test_head); 315 316 pr_info("attempting good list removal\n"); 317 list_del(&item.node); 318 319 pr_info("attempting corrupted list removal\n"); 320 list_add(&item.node, &test_head); 321 322 /* As with the list_add() test above, this corrupts "next". */ 323 item.node.next = redirection; 324 list_del(&item.node); 325 326 if (target[0] == NULL && target[1] == NULL) 327 pr_err("Overwrite did not happen, but no BUG?!\n"); 328 else 329 pr_err("list_del() corruption not detected!\n"); 330 } 331 332 /* Test that VMAP_STACK is actually allocating with a leading guard page */ 333 void lkdtm_STACK_GUARD_PAGE_LEADING(void) 334 { 335 const unsigned char *stack = task_stack_page(current); 336 const unsigned char *ptr = stack - 1; 337 volatile unsigned char byte; 338 339 pr_info("attempting bad read from page below current stack\n"); 340 341 byte = *ptr; 342 343 pr_err("FAIL: accessed page before stack! (byte: %x)\n", byte); 344 } 345 346 /* Test that VMAP_STACK is actually allocating with a trailing guard page */ 347 void lkdtm_STACK_GUARD_PAGE_TRAILING(void) 348 { 349 const unsigned char *stack = task_stack_page(current); 350 const unsigned char *ptr = stack + THREAD_SIZE; 351 volatile unsigned char byte; 352 353 pr_info("attempting bad read from page above current stack\n"); 354 355 byte = *ptr; 356 357 pr_err("FAIL: accessed page after stack! (byte: %x)\n", byte); 358 } 359 360 void lkdtm_UNSET_SMEP(void) 361 { 362 #if IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_UML) 363 #define MOV_CR4_DEPTH 64 364 void (*direct_write_cr4)(unsigned long val); 365 unsigned char *insn; 366 unsigned long cr4; 367 int i; 368 369 cr4 = native_read_cr4(); 370 371 if ((cr4 & X86_CR4_SMEP) != X86_CR4_SMEP) { 372 pr_err("FAIL: SMEP not in use\n"); 373 return; 374 } 375 cr4 &= ~(X86_CR4_SMEP); 376 377 pr_info("trying to clear SMEP normally\n"); 378 native_write_cr4(cr4); 379 if (cr4 == native_read_cr4()) { 380 pr_err("FAIL: pinning SMEP failed!\n"); 381 cr4 |= X86_CR4_SMEP; 382 pr_info("restoring SMEP\n"); 383 native_write_cr4(cr4); 384 return; 385 } 386 pr_info("ok: SMEP did not get cleared\n"); 387 388 /* 389 * To test the post-write pinning verification we need to call 390 * directly into the middle of native_write_cr4() where the 391 * cr4 write happens, skipping any pinning. This searches for 392 * the cr4 writing instruction. 393 */ 394 insn = (unsigned char *)native_write_cr4; 395 for (i = 0; i < MOV_CR4_DEPTH; i++) { 396 /* mov %rdi, %cr4 */ 397 if (insn[i] == 0x0f && insn[i+1] == 0x22 && insn[i+2] == 0xe7) 398 break; 399 /* mov %rdi,%rax; mov %rax, %cr4 */ 400 if (insn[i] == 0x48 && insn[i+1] == 0x89 && 401 insn[i+2] == 0xf8 && insn[i+3] == 0x0f && 402 insn[i+4] == 0x22 && insn[i+5] == 0xe0) 403 break; 404 } 405 if (i >= MOV_CR4_DEPTH) { 406 pr_info("ok: cannot locate cr4 writing call gadget\n"); 407 return; 408 } 409 direct_write_cr4 = (void *)(insn + i); 410 411 pr_info("trying to clear SMEP with call gadget\n"); 412 direct_write_cr4(cr4); 413 if (native_read_cr4() & X86_CR4_SMEP) { 414 pr_info("ok: SMEP removal was reverted\n"); 415 } else { 416 pr_err("FAIL: cleared SMEP not detected!\n"); 417 cr4 |= X86_CR4_SMEP; 418 pr_info("restoring SMEP\n"); 419 native_write_cr4(cr4); 420 } 421 #else 422 pr_err("XFAIL: this test is x86_64-only\n"); 423 #endif 424 } 425 426 void lkdtm_DOUBLE_FAULT(void) 427 { 428 #if IS_ENABLED(CONFIG_X86_32) && !IS_ENABLED(CONFIG_UML) 429 /* 430 * Trigger #DF by setting the stack limit to zero. This clobbers 431 * a GDT TLS slot, which is okay because the current task will die 432 * anyway due to the double fault. 433 */ 434 struct desc_struct d = { 435 .type = 3, /* expand-up, writable, accessed data */ 436 .p = 1, /* present */ 437 .d = 1, /* 32-bit */ 438 .g = 0, /* limit in bytes */ 439 .s = 1, /* not system */ 440 }; 441 442 local_irq_disable(); 443 write_gdt_entry(get_cpu_gdt_rw(smp_processor_id()), 444 GDT_ENTRY_TLS_MIN, &d, DESCTYPE_S); 445 446 /* 447 * Put our zero-limit segment in SS and then trigger a fault. The 448 * 4-byte access to (%esp) will fault with #SS, and the attempt to 449 * deliver the fault will recursively cause #SS and result in #DF. 450 * This whole process happens while NMIs and MCEs are blocked by the 451 * MOV SS window. This is nice because an NMI with an invalid SS 452 * would also double-fault, resulting in the NMI or MCE being lost. 453 */ 454 asm volatile ("movw %0, %%ss; addl $0, (%%esp)" :: 455 "r" ((unsigned short)(GDT_ENTRY_TLS_MIN << 3))); 456 457 pr_err("FAIL: tried to double fault but didn't die\n"); 458 #else 459 pr_err("XFAIL: this test is ia32-only\n"); 460 #endif 461 } 462 463 #ifdef CONFIG_ARM64 464 static noinline void change_pac_parameters(void) 465 { 466 if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH)) { 467 /* Reset the keys of current task */ 468 ptrauth_thread_init_kernel(current); 469 ptrauth_thread_switch_kernel(current); 470 } 471 } 472 #endif 473 474 noinline void lkdtm_CORRUPT_PAC(void) 475 { 476 #ifdef CONFIG_ARM64 477 #define CORRUPT_PAC_ITERATE 10 478 int i; 479 480 if (!IS_ENABLED(CONFIG_ARM64_PTR_AUTH)) 481 pr_err("FAIL: kernel not built with CONFIG_ARM64_PTR_AUTH\n"); 482 483 if (!system_supports_address_auth()) { 484 pr_err("FAIL: CPU lacks pointer authentication feature\n"); 485 return; 486 } 487 488 pr_info("changing PAC parameters to force function return failure...\n"); 489 /* 490 * PAC is a hash value computed from input keys, return address and 491 * stack pointer. As pac has fewer bits so there is a chance of 492 * collision, so iterate few times to reduce the collision probability. 493 */ 494 for (i = 0; i < CORRUPT_PAC_ITERATE; i++) 495 change_pac_parameters(); 496 497 pr_err("FAIL: survived PAC changes! Kernel may be unstable from here\n"); 498 #else 499 pr_err("XFAIL: this test is arm64-only\n"); 500 #endif 501 } 502 503 void lkdtm_FORTIFY_OBJECT(void) 504 { 505 struct target { 506 char a[10]; 507 } target[2] = {}; 508 int result; 509 510 /* 511 * Using volatile prevents the compiler from determining the value of 512 * 'size' at compile time. Without that, we would get a compile error 513 * rather than a runtime error. 514 */ 515 volatile int size = 11; 516 517 pr_info("trying to read past the end of a struct\n"); 518 519 result = memcmp(&target[0], &target[1], size); 520 521 /* Print result to prevent the code from being eliminated */ 522 pr_err("FAIL: fortify did not catch an object overread!\n" 523 "\"%d\" was the memcmp result.\n", result); 524 } 525 526 void lkdtm_FORTIFY_SUBOBJECT(void) 527 { 528 struct target { 529 char a[10]; 530 char b[10]; 531 } target; 532 char *src; 533 534 src = kmalloc(20, GFP_KERNEL); 535 strscpy(src, "over ten bytes", 20); 536 537 pr_info("trying to strcpy past the end of a member of a struct\n"); 538 539 /* 540 * strncpy(target.a, src, 20); will hit a compile error because the 541 * compiler knows at build time that target.a < 20 bytes. Use strcpy() 542 * to force a runtime error. 543 */ 544 strcpy(target.a, src); 545 546 /* Use target.a to prevent the code from being eliminated */ 547 pr_err("FAIL: fortify did not catch an sub-object overrun!\n" 548 "\"%s\" was copied.\n", target.a); 549 550 kfree(src); 551 } 552