1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * KMSAN shadow implementation.
4 *
5 * Copyright (C) 2017-2022 Google LLC
6 * Author: Alexander Potapenko <glider@google.com>
7 *
8 */
9
10 #include <asm/kmsan.h>
11 #include <asm/tlbflush.h>
12 #include <linux/cacheflush.h>
13 #include <linux/memblock.h>
14 #include <linux/mm_types.h>
15 #include <linux/slab.h>
16 #include <linux/smp.h>
17 #include <linux/stddef.h>
18
19 #include "../internal.h"
20 #include "kmsan.h"
21
22 #define shadow_page_for(page) ((page)->kmsan_shadow)
23
24 #define origin_page_for(page) ((page)->kmsan_origin)
25
shadow_ptr_for(struct page * page)26 static void *shadow_ptr_for(struct page *page)
27 {
28 return page_address(shadow_page_for(page));
29 }
30
origin_ptr_for(struct page * page)31 static void *origin_ptr_for(struct page *page)
32 {
33 return page_address(origin_page_for(page));
34 }
35
page_has_metadata(struct page * page)36 static bool page_has_metadata(struct page *page)
37 {
38 return shadow_page_for(page) && origin_page_for(page);
39 }
40
set_no_shadow_origin_page(struct page * page)41 static void set_no_shadow_origin_page(struct page *page)
42 {
43 shadow_page_for(page) = NULL;
44 origin_page_for(page) = NULL;
45 }
46
47 /*
48 * Dummy load and store pages to be used when the real metadata is unavailable.
49 * There are separate pages for loads and stores, so that every load returns a
50 * zero, and every store doesn't affect other loads.
51 */
52 static char dummy_load_page[PAGE_SIZE] __aligned(PAGE_SIZE);
53 static char dummy_store_page[PAGE_SIZE] __aligned(PAGE_SIZE);
54
vmalloc_meta(void * addr,bool is_origin)55 static unsigned long vmalloc_meta(void *addr, bool is_origin)
56 {
57 unsigned long addr64 = (unsigned long)addr, off;
58
59 KMSAN_WARN_ON(is_origin && !IS_ALIGNED(addr64, KMSAN_ORIGIN_SIZE));
60 if (kmsan_internal_is_vmalloc_addr(addr)) {
61 off = addr64 - VMALLOC_START;
62 return off + (is_origin ? KMSAN_VMALLOC_ORIGIN_START :
63 KMSAN_VMALLOC_SHADOW_START);
64 }
65 if (kmsan_internal_is_module_addr(addr)) {
66 off = addr64 - MODULES_VADDR;
67 return off + (is_origin ? KMSAN_MODULES_ORIGIN_START :
68 KMSAN_MODULES_SHADOW_START);
69 }
70 return 0;
71 }
72
virt_to_page_or_null(void * vaddr)73 static struct page *virt_to_page_or_null(void *vaddr)
74 {
75 if (kmsan_virt_addr_valid(vaddr))
76 return virt_to_page(vaddr);
77 else
78 return NULL;
79 }
80
kmsan_get_shadow_origin_ptr(void * address,u64 size,bool store)81 struct shadow_origin_ptr kmsan_get_shadow_origin_ptr(void *address, u64 size,
82 bool store)
83 {
84 struct shadow_origin_ptr ret;
85 void *shadow;
86
87 /*
88 * Even if we redirect this memory access to the dummy page, it will
89 * go out of bounds.
90 */
91 KMSAN_WARN_ON(size > PAGE_SIZE);
92
93 if (!kmsan_enabled)
94 goto return_dummy;
95
96 KMSAN_WARN_ON(!kmsan_metadata_is_contiguous(address, size));
97 shadow = kmsan_get_metadata(address, KMSAN_META_SHADOW);
98 if (!shadow)
99 goto return_dummy;
100
101 ret.shadow = shadow;
102 ret.origin = kmsan_get_metadata(address, KMSAN_META_ORIGIN);
103 return ret;
104
105 return_dummy:
106 if (store) {
107 /* Ignore this store. */
108 ret.shadow = dummy_store_page;
109 ret.origin = dummy_store_page;
110 } else {
111 /* This load will return zero. */
112 ret.shadow = dummy_load_page;
113 ret.origin = dummy_load_page;
114 }
115 return ret;
116 }
117
118 /*
119 * Obtain the shadow or origin pointer for the given address, or NULL if there's
120 * none. The caller must check the return value for being non-NULL if needed.
121 * The return value of this function should not depend on whether we're in the
122 * runtime or not.
123 */
kmsan_get_metadata(void * address,bool is_origin)124 void *kmsan_get_metadata(void *address, bool is_origin)
125 {
126 u64 addr = (u64)address, pad, off;
127 struct page *page;
128 void *ret;
129
130 if (is_origin && !IS_ALIGNED(addr, KMSAN_ORIGIN_SIZE)) {
131 pad = addr % KMSAN_ORIGIN_SIZE;
132 addr -= pad;
133 }
134 address = (void *)addr;
135 if (kmsan_internal_is_vmalloc_addr(address) ||
136 kmsan_internal_is_module_addr(address))
137 return (void *)vmalloc_meta(address, is_origin);
138
139 ret = arch_kmsan_get_meta_or_null(address, is_origin);
140 if (ret)
141 return ret;
142
143 page = virt_to_page_or_null(address);
144 if (!page)
145 return NULL;
146 if (!page_has_metadata(page))
147 return NULL;
148 off = offset_in_page(addr);
149
150 return (is_origin ? origin_ptr_for(page) : shadow_ptr_for(page)) + off;
151 }
152
kmsan_copy_page_meta(struct page * dst,struct page * src)153 void kmsan_copy_page_meta(struct page *dst, struct page *src)
154 {
155 if (!kmsan_enabled || kmsan_in_runtime())
156 return;
157 if (!dst || !page_has_metadata(dst))
158 return;
159 if (!src || !page_has_metadata(src)) {
160 kmsan_internal_unpoison_memory(page_address(dst), PAGE_SIZE,
161 /*checked*/ false);
162 return;
163 }
164
165 kmsan_enter_runtime();
166 __memcpy(shadow_ptr_for(dst), shadow_ptr_for(src), PAGE_SIZE);
167 __memcpy(origin_ptr_for(dst), origin_ptr_for(src), PAGE_SIZE);
168 kmsan_leave_runtime();
169 }
170 EXPORT_SYMBOL(kmsan_copy_page_meta);
171
kmsan_alloc_page(struct page * page,unsigned int order,gfp_t flags)172 void kmsan_alloc_page(struct page *page, unsigned int order, gfp_t flags)
173 {
174 bool initialized = (flags & __GFP_ZERO) || !kmsan_enabled;
175 struct page *shadow, *origin;
176 depot_stack_handle_t handle;
177 int pages = 1 << order;
178
179 if (!page)
180 return;
181
182 shadow = shadow_page_for(page);
183 origin = origin_page_for(page);
184
185 if (initialized) {
186 __memset(page_address(shadow), 0, PAGE_SIZE * pages);
187 __memset(page_address(origin), 0, PAGE_SIZE * pages);
188 return;
189 }
190
191 /* Zero pages allocated by the runtime should also be initialized. */
192 if (kmsan_in_runtime())
193 return;
194
195 __memset(page_address(shadow), -1, PAGE_SIZE * pages);
196 kmsan_enter_runtime();
197 handle = kmsan_save_stack_with_flags(flags, /*extra_bits*/ 0);
198 kmsan_leave_runtime();
199 /*
200 * Addresses are page-aligned, pages are contiguous, so it's ok
201 * to just fill the origin pages with @handle.
202 */
203 for (int i = 0; i < PAGE_SIZE * pages / sizeof(handle); i++)
204 ((depot_stack_handle_t *)page_address(origin))[i] = handle;
205 }
206
kmsan_free_page(struct page * page,unsigned int order)207 void kmsan_free_page(struct page *page, unsigned int order)
208 {
209 if (!kmsan_enabled || kmsan_in_runtime())
210 return;
211 kmsan_enter_runtime();
212 kmsan_internal_poison_memory(page_address(page),
213 page_size(page),
214 GFP_KERNEL,
215 KMSAN_POISON_CHECK | KMSAN_POISON_FREE);
216 kmsan_leave_runtime();
217 }
218
kmsan_vmap_pages_range_noflush(unsigned long start,unsigned long end,pgprot_t prot,struct page ** pages,unsigned int page_shift)219 int kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
220 pgprot_t prot, struct page **pages,
221 unsigned int page_shift)
222 {
223 unsigned long shadow_start, origin_start, shadow_end, origin_end;
224 struct page **s_pages, **o_pages;
225 int nr, mapped, err = 0;
226
227 if (!kmsan_enabled)
228 return 0;
229
230 shadow_start = vmalloc_meta((void *)start, KMSAN_META_SHADOW);
231 shadow_end = vmalloc_meta((void *)end, KMSAN_META_SHADOW);
232 if (!shadow_start)
233 return 0;
234
235 nr = (end - start) / PAGE_SIZE;
236 s_pages = kcalloc(nr, sizeof(*s_pages), GFP_KERNEL);
237 o_pages = kcalloc(nr, sizeof(*o_pages), GFP_KERNEL);
238 if (!s_pages || !o_pages) {
239 err = -ENOMEM;
240 goto ret;
241 }
242 for (int i = 0; i < nr; i++) {
243 s_pages[i] = shadow_page_for(pages[i]);
244 o_pages[i] = origin_page_for(pages[i]);
245 }
246 prot = __pgprot(pgprot_val(prot) | _PAGE_NX);
247 prot = PAGE_KERNEL;
248
249 origin_start = vmalloc_meta((void *)start, KMSAN_META_ORIGIN);
250 origin_end = vmalloc_meta((void *)end, KMSAN_META_ORIGIN);
251 kmsan_enter_runtime();
252 mapped = __vmap_pages_range_noflush(shadow_start, shadow_end, prot,
253 s_pages, page_shift);
254 if (mapped) {
255 err = mapped;
256 goto ret;
257 }
258 mapped = __vmap_pages_range_noflush(origin_start, origin_end, prot,
259 o_pages, page_shift);
260 if (mapped) {
261 err = mapped;
262 goto ret;
263 }
264 kmsan_leave_runtime();
265 flush_tlb_kernel_range(shadow_start, shadow_end);
266 flush_tlb_kernel_range(origin_start, origin_end);
267 flush_cache_vmap(shadow_start, shadow_end);
268 flush_cache_vmap(origin_start, origin_end);
269
270 ret:
271 kfree(s_pages);
272 kfree(o_pages);
273 return err;
274 }
275
276 /* Allocate metadata for pages allocated at boot time. */
kmsan_init_alloc_meta_for_range(void * start,void * end)277 void __init kmsan_init_alloc_meta_for_range(void *start, void *end)
278 {
279 struct page *shadow_p, *origin_p;
280 void *shadow, *origin;
281 struct page *page;
282 u64 size;
283
284 start = (void *)PAGE_ALIGN_DOWN((u64)start);
285 size = PAGE_ALIGN((u64)end - (u64)start);
286 shadow = memblock_alloc(size, PAGE_SIZE);
287 origin = memblock_alloc(size, PAGE_SIZE);
288 for (u64 addr = 0; addr < size; addr += PAGE_SIZE) {
289 page = virt_to_page_or_null((char *)start + addr);
290 shadow_p = virt_to_page_or_null((char *)shadow + addr);
291 set_no_shadow_origin_page(shadow_p);
292 shadow_page_for(page) = shadow_p;
293 origin_p = virt_to_page_or_null((char *)origin + addr);
294 set_no_shadow_origin_page(origin_p);
295 origin_page_for(page) = origin_p;
296 }
297 }
298
kmsan_setup_meta(struct page * page,struct page * shadow,struct page * origin,int order)299 void kmsan_setup_meta(struct page *page, struct page *shadow,
300 struct page *origin, int order)
301 {
302 for (int i = 0; i < (1 << order); i++) {
303 set_no_shadow_origin_page(&shadow[i]);
304 set_no_shadow_origin_page(&origin[i]);
305 shadow_page_for(&page[i]) = &shadow[i];
306 origin_page_for(&page[i]) = &origin[i];
307 }
308 }
309