1 #include <linux/kernel.h> 2 #include <linux/string.h> 3 #include <linux/mm.h> 4 #include <linux/highmem.h> 5 #include <linux/page_ext.h> 6 #include <linux/poison.h> 7 #include <linux/ratelimit.h> 8 9 static bool __page_poisoning_enabled __read_mostly; 10 static bool want_page_poisoning __read_mostly; 11 12 static int early_page_poison_param(char *buf) 13 { 14 if (!buf) 15 return -EINVAL; 16 return strtobool(buf, &want_page_poisoning); 17 } 18 early_param("page_poison", early_page_poison_param); 19 20 bool page_poisoning_enabled(void) 21 { 22 return __page_poisoning_enabled; 23 } 24 25 static bool need_page_poisoning(void) 26 { 27 return want_page_poisoning; 28 } 29 30 static void init_page_poisoning(void) 31 { 32 /* 33 * page poisoning is debug page alloc for some arches. If either 34 * of those options are enabled, enable poisoning 35 */ 36 if (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC)) { 37 if (!want_page_poisoning && !debug_pagealloc_enabled()) 38 return; 39 } else { 40 if (!want_page_poisoning) 41 return; 42 } 43 44 __page_poisoning_enabled = true; 45 } 46 47 struct page_ext_operations page_poisoning_ops = { 48 .need = need_page_poisoning, 49 .init = init_page_poisoning, 50 }; 51 52 static inline void set_page_poison(struct page *page) 53 { 54 struct page_ext *page_ext; 55 56 page_ext = lookup_page_ext(page); 57 __set_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); 58 } 59 60 static inline void clear_page_poison(struct page *page) 61 { 62 struct page_ext *page_ext; 63 64 page_ext = lookup_page_ext(page); 65 __clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); 66 } 67 68 bool page_is_poisoned(struct page *page) 69 { 70 struct page_ext *page_ext; 71 72 page_ext = lookup_page_ext(page); 73 if (!page_ext) 74 return false; 75 76 return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags); 77 } 78 79 static void poison_page(struct page *page) 80 { 81 void *addr = kmap_atomic(page); 82 83 set_page_poison(page); 84 memset(addr, PAGE_POISON, PAGE_SIZE); 85 kunmap_atomic(addr); 86 } 87 88 static void poison_pages(struct page *page, int n) 89 { 90 int i; 91 92 for (i = 0; i < n; i++) 93 poison_page(page + i); 94 } 95 96 static bool single_bit_flip(unsigned char a, unsigned char b) 97 { 98 unsigned char error = a ^ b; 99 100 return error && !(error & (error - 1)); 101 } 102 103 static void check_poison_mem(unsigned char *mem, size_t bytes) 104 { 105 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 10); 106 unsigned char *start; 107 unsigned char *end; 108 109 if (IS_ENABLED(CONFIG_PAGE_POISONING_NO_SANITY)) 110 return; 111 112 start = memchr_inv(mem, PAGE_POISON, bytes); 113 if (!start) 114 return; 115 116 for (end = mem + bytes - 1; end > start; end--) { 117 if (*end != PAGE_POISON) 118 break; 119 } 120 121 if (!__ratelimit(&ratelimit)) 122 return; 123 else if (start == end && single_bit_flip(*start, PAGE_POISON)) 124 pr_err("pagealloc: single bit error\n"); 125 else 126 pr_err("pagealloc: memory corruption\n"); 127 128 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, start, 129 end - start + 1, 1); 130 dump_stack(); 131 } 132 133 static void unpoison_page(struct page *page) 134 { 135 void *addr; 136 137 if (!page_is_poisoned(page)) 138 return; 139 140 addr = kmap_atomic(page); 141 check_poison_mem(addr, PAGE_SIZE); 142 clear_page_poison(page); 143 kunmap_atomic(addr); 144 } 145 146 static void unpoison_pages(struct page *page, int n) 147 { 148 int i; 149 150 for (i = 0; i < n; i++) 151 unpoison_page(page + i); 152 } 153 154 void kernel_poison_pages(struct page *page, int numpages, int enable) 155 { 156 if (!page_poisoning_enabled()) 157 return; 158 159 if (enable) 160 unpoison_pages(page, numpages); 161 else 162 poison_pages(page, numpages); 163 } 164 165 #ifndef CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC 166 void __kernel_map_pages(struct page *page, int numpages, int enable) 167 { 168 /* This function does nothing, all work is done via poison pages */ 169 } 170 #endif 171