1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/string.h> 4 #include <linux/mm.h> 5 #include <linux/highmem.h> 6 #include <linux/page_ext.h> 7 #include <linux/poison.h> 8 #include <linux/ratelimit.h> 9 #include <linux/kasan.h> 10 11 static bool want_page_poisoning __read_mostly; 12 13 static int __init early_page_poison_param(char *buf) 14 { 15 if (!buf) 16 return -EINVAL; 17 return strtobool(buf, &want_page_poisoning); 18 } 19 early_param("page_poison", early_page_poison_param); 20 21 /** 22 * page_poisoning_enabled - check if page poisoning is enabled 23 * 24 * Return true if page poisoning is enabled, or false if not. 25 */ 26 bool page_poisoning_enabled(void) 27 { 28 /* 29 * Assumes that debug_pagealloc_enabled is set before 30 * memblock_free_all. 31 * Page poisoning is debug page alloc for some arches. If 32 * either of those options are enabled, enable poisoning. 33 */ 34 return (want_page_poisoning || 35 (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) && 36 debug_pagealloc_enabled())); 37 } 38 EXPORT_SYMBOL_GPL(page_poisoning_enabled); 39 40 static void poison_page(struct page *page) 41 { 42 void *addr = kmap_atomic(page); 43 44 /* KASAN still think the page is in-use, so skip it. */ 45 kasan_disable_current(); 46 memset(addr, PAGE_POISON, PAGE_SIZE); 47 kasan_enable_current(); 48 kunmap_atomic(addr); 49 } 50 51 static void poison_pages(struct page *page, int n) 52 { 53 int i; 54 55 for (i = 0; i < n; i++) 56 poison_page(page + i); 57 } 58 59 static bool single_bit_flip(unsigned char a, unsigned char b) 60 { 61 unsigned char error = a ^ b; 62 63 return error && !(error & (error - 1)); 64 } 65 66 static void check_poison_mem(unsigned char *mem, size_t bytes) 67 { 68 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 10); 69 unsigned char *start; 70 unsigned char *end; 71 72 if (IS_ENABLED(CONFIG_PAGE_POISONING_NO_SANITY)) 73 return; 74 75 start = memchr_inv(mem, PAGE_POISON, bytes); 76 if (!start) 77 return; 78 79 for (end = mem + bytes - 1; end > start; end--) { 80 if (*end != PAGE_POISON) 81 break; 82 } 83 84 if (!__ratelimit(&ratelimit)) 85 return; 86 else if (start == end && single_bit_flip(*start, PAGE_POISON)) 87 pr_err("pagealloc: single bit error\n"); 88 else 89 pr_err("pagealloc: memory corruption\n"); 90 91 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, start, 92 end - start + 1, 1); 93 dump_stack(); 94 } 95 96 static void unpoison_page(struct page *page) 97 { 98 void *addr; 99 100 addr = kmap_atomic(page); 101 /* 102 * Page poisoning when enabled poisons each and every page 103 * that is freed to buddy. Thus no extra check is done to 104 * see if a page was poisoned. 105 */ 106 check_poison_mem(addr, PAGE_SIZE); 107 kunmap_atomic(addr); 108 } 109 110 static void unpoison_pages(struct page *page, int n) 111 { 112 int i; 113 114 for (i = 0; i < n; i++) 115 unpoison_page(page + i); 116 } 117 118 void kernel_poison_pages(struct page *page, int numpages, int enable) 119 { 120 if (!page_poisoning_enabled()) 121 return; 122 123 if (enable) 124 unpoison_pages(page, numpages); 125 else 126 poison_pages(page, numpages); 127 } 128 129 #ifndef CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC 130 void __kernel_map_pages(struct page *page, int numpages, int enable) 131 { 132 /* This function does nothing, all work is done via poison pages */ 133 } 134 #endif 135