xref: /openbmc/linux/mm/kmsan/shadow.c (revision 108c3dc6)
1f80be457SAlexander Potapenko // SPDX-License-Identifier: GPL-2.0
2f80be457SAlexander Potapenko /*
3f80be457SAlexander Potapenko  * KMSAN shadow implementation.
4f80be457SAlexander Potapenko  *
5f80be457SAlexander Potapenko  * Copyright (C) 2017-2022 Google LLC
6f80be457SAlexander Potapenko  * Author: Alexander Potapenko <glider@google.com>
7f80be457SAlexander Potapenko  *
8f80be457SAlexander Potapenko  */
9f80be457SAlexander Potapenko 
10f80be457SAlexander Potapenko #include <asm/kmsan.h>
11f80be457SAlexander Potapenko #include <asm/tlbflush.h>
12f80be457SAlexander Potapenko #include <linux/cacheflush.h>
13f80be457SAlexander Potapenko #include <linux/memblock.h>
14f80be457SAlexander Potapenko #include <linux/mm_types.h>
15f80be457SAlexander Potapenko #include <linux/slab.h>
16f80be457SAlexander Potapenko #include <linux/smp.h>
17f80be457SAlexander Potapenko #include <linux/stddef.h>
18f80be457SAlexander Potapenko 
19f80be457SAlexander Potapenko #include "../internal.h"
20f80be457SAlexander Potapenko #include "kmsan.h"
21f80be457SAlexander Potapenko 
22f80be457SAlexander Potapenko #define shadow_page_for(page) ((page)->kmsan_shadow)
23f80be457SAlexander Potapenko 
24f80be457SAlexander Potapenko #define origin_page_for(page) ((page)->kmsan_origin)
25f80be457SAlexander Potapenko 
shadow_ptr_for(struct page * page)26f80be457SAlexander Potapenko static void *shadow_ptr_for(struct page *page)
27f80be457SAlexander Potapenko {
28f80be457SAlexander Potapenko 	return page_address(shadow_page_for(page));
29f80be457SAlexander Potapenko }
30f80be457SAlexander Potapenko 
origin_ptr_for(struct page * page)31f80be457SAlexander Potapenko static void *origin_ptr_for(struct page *page)
32f80be457SAlexander Potapenko {
33f80be457SAlexander Potapenko 	return page_address(origin_page_for(page));
34f80be457SAlexander Potapenko }
35f80be457SAlexander Potapenko 
page_has_metadata(struct page * page)36f80be457SAlexander Potapenko static bool page_has_metadata(struct page *page)
37f80be457SAlexander Potapenko {
38f80be457SAlexander Potapenko 	return shadow_page_for(page) && origin_page_for(page);
39f80be457SAlexander Potapenko }
40f80be457SAlexander Potapenko 
set_no_shadow_origin_page(struct page * page)41f80be457SAlexander Potapenko static void set_no_shadow_origin_page(struct page *page)
42f80be457SAlexander Potapenko {
43f80be457SAlexander Potapenko 	shadow_page_for(page) = NULL;
44f80be457SAlexander Potapenko 	origin_page_for(page) = NULL;
45f80be457SAlexander Potapenko }
46f80be457SAlexander Potapenko 
47f80be457SAlexander Potapenko /*
48f80be457SAlexander Potapenko  * Dummy load and store pages to be used when the real metadata is unavailable.
49f80be457SAlexander Potapenko  * There are separate pages for loads and stores, so that every load returns a
50f80be457SAlexander Potapenko  * zero, and every store doesn't affect other loads.
51f80be457SAlexander Potapenko  */
52f80be457SAlexander Potapenko static char dummy_load_page[PAGE_SIZE] __aligned(PAGE_SIZE);
53f80be457SAlexander Potapenko static char dummy_store_page[PAGE_SIZE] __aligned(PAGE_SIZE);
54f80be457SAlexander Potapenko 
vmalloc_meta(void * addr,bool is_origin)55f80be457SAlexander Potapenko static unsigned long vmalloc_meta(void *addr, bool is_origin)
56f80be457SAlexander Potapenko {
57f80be457SAlexander Potapenko 	unsigned long addr64 = (unsigned long)addr, off;
58f80be457SAlexander Potapenko 
59f80be457SAlexander Potapenko 	KMSAN_WARN_ON(is_origin && !IS_ALIGNED(addr64, KMSAN_ORIGIN_SIZE));
60f80be457SAlexander Potapenko 	if (kmsan_internal_is_vmalloc_addr(addr)) {
61f80be457SAlexander Potapenko 		off = addr64 - VMALLOC_START;
62f80be457SAlexander Potapenko 		return off + (is_origin ? KMSAN_VMALLOC_ORIGIN_START :
63f80be457SAlexander Potapenko 					  KMSAN_VMALLOC_SHADOW_START);
64f80be457SAlexander Potapenko 	}
65f80be457SAlexander Potapenko 	if (kmsan_internal_is_module_addr(addr)) {
66f80be457SAlexander Potapenko 		off = addr64 - MODULES_VADDR;
67f80be457SAlexander Potapenko 		return off + (is_origin ? KMSAN_MODULES_ORIGIN_START :
68f80be457SAlexander Potapenko 					  KMSAN_MODULES_SHADOW_START);
69f80be457SAlexander Potapenko 	}
70f80be457SAlexander Potapenko 	return 0;
71f80be457SAlexander Potapenko }
72f80be457SAlexander Potapenko 
virt_to_page_or_null(void * vaddr)73f80be457SAlexander Potapenko static struct page *virt_to_page_or_null(void *vaddr)
74f80be457SAlexander Potapenko {
75f80be457SAlexander Potapenko 	if (kmsan_virt_addr_valid(vaddr))
76f80be457SAlexander Potapenko 		return virt_to_page(vaddr);
77f80be457SAlexander Potapenko 	else
78f80be457SAlexander Potapenko 		return NULL;
79f80be457SAlexander Potapenko }
80f80be457SAlexander Potapenko 
kmsan_get_shadow_origin_ptr(void * address,u64 size,bool store)81f80be457SAlexander Potapenko struct shadow_origin_ptr kmsan_get_shadow_origin_ptr(void *address, u64 size,
82f80be457SAlexander Potapenko 						     bool store)
83f80be457SAlexander Potapenko {
84f80be457SAlexander Potapenko 	struct shadow_origin_ptr ret;
85f80be457SAlexander Potapenko 	void *shadow;
86f80be457SAlexander Potapenko 
87f80be457SAlexander Potapenko 	/*
88f80be457SAlexander Potapenko 	 * Even if we redirect this memory access to the dummy page, it will
89f80be457SAlexander Potapenko 	 * go out of bounds.
90f80be457SAlexander Potapenko 	 */
91f80be457SAlexander Potapenko 	KMSAN_WARN_ON(size > PAGE_SIZE);
92f80be457SAlexander Potapenko 
93f80be457SAlexander Potapenko 	if (!kmsan_enabled)
94f80be457SAlexander Potapenko 		goto return_dummy;
95f80be457SAlexander Potapenko 
96f80be457SAlexander Potapenko 	KMSAN_WARN_ON(!kmsan_metadata_is_contiguous(address, size));
97f80be457SAlexander Potapenko 	shadow = kmsan_get_metadata(address, KMSAN_META_SHADOW);
98f80be457SAlexander Potapenko 	if (!shadow)
99f80be457SAlexander Potapenko 		goto return_dummy;
100f80be457SAlexander Potapenko 
101f80be457SAlexander Potapenko 	ret.shadow = shadow;
102f80be457SAlexander Potapenko 	ret.origin = kmsan_get_metadata(address, KMSAN_META_ORIGIN);
103f80be457SAlexander Potapenko 	return ret;
104f80be457SAlexander Potapenko 
105f80be457SAlexander Potapenko return_dummy:
106f80be457SAlexander Potapenko 	if (store) {
107f80be457SAlexander Potapenko 		/* Ignore this store. */
108f80be457SAlexander Potapenko 		ret.shadow = dummy_store_page;
109f80be457SAlexander Potapenko 		ret.origin = dummy_store_page;
110f80be457SAlexander Potapenko 	} else {
111f80be457SAlexander Potapenko 		/* This load will return zero. */
112f80be457SAlexander Potapenko 		ret.shadow = dummy_load_page;
113f80be457SAlexander Potapenko 		ret.origin = dummy_load_page;
114f80be457SAlexander Potapenko 	}
115f80be457SAlexander Potapenko 	return ret;
116f80be457SAlexander Potapenko }
117f80be457SAlexander Potapenko 
118f80be457SAlexander Potapenko /*
119f80be457SAlexander Potapenko  * Obtain the shadow or origin pointer for the given address, or NULL if there's
120f80be457SAlexander Potapenko  * none. The caller must check the return value for being non-NULL if needed.
121f80be457SAlexander Potapenko  * The return value of this function should not depend on whether we're in the
122f80be457SAlexander Potapenko  * runtime or not.
123f80be457SAlexander Potapenko  */
kmsan_get_metadata(void * address,bool is_origin)124f80be457SAlexander Potapenko void *kmsan_get_metadata(void *address, bool is_origin)
125f80be457SAlexander Potapenko {
126f80be457SAlexander Potapenko 	u64 addr = (u64)address, pad, off;
127f80be457SAlexander Potapenko 	struct page *page;
128ce732a75SAlexander Potapenko 	void *ret;
129f80be457SAlexander Potapenko 
130f80be457SAlexander Potapenko 	if (is_origin && !IS_ALIGNED(addr, KMSAN_ORIGIN_SIZE)) {
131f80be457SAlexander Potapenko 		pad = addr % KMSAN_ORIGIN_SIZE;
132f80be457SAlexander Potapenko 		addr -= pad;
133f80be457SAlexander Potapenko 	}
134f80be457SAlexander Potapenko 	address = (void *)addr;
135f80be457SAlexander Potapenko 	if (kmsan_internal_is_vmalloc_addr(address) ||
136f80be457SAlexander Potapenko 	    kmsan_internal_is_module_addr(address))
137f80be457SAlexander Potapenko 		return (void *)vmalloc_meta(address, is_origin);
138f80be457SAlexander Potapenko 
139ce732a75SAlexander Potapenko 	ret = arch_kmsan_get_meta_or_null(address, is_origin);
140ce732a75SAlexander Potapenko 	if (ret)
141ce732a75SAlexander Potapenko 		return ret;
142ce732a75SAlexander Potapenko 
143f80be457SAlexander Potapenko 	page = virt_to_page_or_null(address);
144f80be457SAlexander Potapenko 	if (!page)
145f80be457SAlexander Potapenko 		return NULL;
146f80be457SAlexander Potapenko 	if (!page_has_metadata(page))
147f80be457SAlexander Potapenko 		return NULL;
1484852a805SZhangPeng 	off = offset_in_page(addr);
149f80be457SAlexander Potapenko 
150f80be457SAlexander Potapenko 	return (is_origin ? origin_ptr_for(page) : shadow_ptr_for(page)) + off;
151f80be457SAlexander Potapenko }
152b073d7f8SAlexander Potapenko 
kmsan_copy_page_meta(struct page * dst,struct page * src)153b073d7f8SAlexander Potapenko void kmsan_copy_page_meta(struct page *dst, struct page *src)
154b073d7f8SAlexander Potapenko {
155b073d7f8SAlexander Potapenko 	if (!kmsan_enabled || kmsan_in_runtime())
156b073d7f8SAlexander Potapenko 		return;
157b073d7f8SAlexander Potapenko 	if (!dst || !page_has_metadata(dst))
158b073d7f8SAlexander Potapenko 		return;
159b073d7f8SAlexander Potapenko 	if (!src || !page_has_metadata(src)) {
160b073d7f8SAlexander Potapenko 		kmsan_internal_unpoison_memory(page_address(dst), PAGE_SIZE,
161b073d7f8SAlexander Potapenko 					       /*checked*/ false);
162b073d7f8SAlexander Potapenko 		return;
163b073d7f8SAlexander Potapenko 	}
164b073d7f8SAlexander Potapenko 
165b073d7f8SAlexander Potapenko 	kmsan_enter_runtime();
166b073d7f8SAlexander Potapenko 	__memcpy(shadow_ptr_for(dst), shadow_ptr_for(src), PAGE_SIZE);
167b073d7f8SAlexander Potapenko 	__memcpy(origin_ptr_for(dst), origin_ptr_for(src), PAGE_SIZE);
168b073d7f8SAlexander Potapenko 	kmsan_leave_runtime();
169b073d7f8SAlexander Potapenko }
170f59a3ee6SAlexander Potapenko EXPORT_SYMBOL(kmsan_copy_page_meta);
171b073d7f8SAlexander Potapenko 
kmsan_alloc_page(struct page * page,unsigned int order,gfp_t flags)172b073d7f8SAlexander Potapenko void kmsan_alloc_page(struct page *page, unsigned int order, gfp_t flags)
173b073d7f8SAlexander Potapenko {
174b073d7f8SAlexander Potapenko 	bool initialized = (flags & __GFP_ZERO) || !kmsan_enabled;
175b073d7f8SAlexander Potapenko 	struct page *shadow, *origin;
176b073d7f8SAlexander Potapenko 	depot_stack_handle_t handle;
177b073d7f8SAlexander Potapenko 	int pages = 1 << order;
178b073d7f8SAlexander Potapenko 
179b073d7f8SAlexander Potapenko 	if (!page)
180b073d7f8SAlexander Potapenko 		return;
181b073d7f8SAlexander Potapenko 
182b073d7f8SAlexander Potapenko 	shadow = shadow_page_for(page);
183b073d7f8SAlexander Potapenko 	origin = origin_page_for(page);
184b073d7f8SAlexander Potapenko 
185b073d7f8SAlexander Potapenko 	if (initialized) {
186b073d7f8SAlexander Potapenko 		__memset(page_address(shadow), 0, PAGE_SIZE * pages);
187b073d7f8SAlexander Potapenko 		__memset(page_address(origin), 0, PAGE_SIZE * pages);
188b073d7f8SAlexander Potapenko 		return;
189b073d7f8SAlexander Potapenko 	}
190b073d7f8SAlexander Potapenko 
191b073d7f8SAlexander Potapenko 	/* Zero pages allocated by the runtime should also be initialized. */
192b073d7f8SAlexander Potapenko 	if (kmsan_in_runtime())
193b073d7f8SAlexander Potapenko 		return;
194b073d7f8SAlexander Potapenko 
195b073d7f8SAlexander Potapenko 	__memset(page_address(shadow), -1, PAGE_SIZE * pages);
196b073d7f8SAlexander Potapenko 	kmsan_enter_runtime();
197b073d7f8SAlexander Potapenko 	handle = kmsan_save_stack_with_flags(flags, /*extra_bits*/ 0);
198b073d7f8SAlexander Potapenko 	kmsan_leave_runtime();
199b073d7f8SAlexander Potapenko 	/*
200b073d7f8SAlexander Potapenko 	 * Addresses are page-aligned, pages are contiguous, so it's ok
201b073d7f8SAlexander Potapenko 	 * to just fill the origin pages with @handle.
202b073d7f8SAlexander Potapenko 	 */
203b073d7f8SAlexander Potapenko 	for (int i = 0; i < PAGE_SIZE * pages / sizeof(handle); i++)
204b073d7f8SAlexander Potapenko 		((depot_stack_handle_t *)page_address(origin))[i] = handle;
205b073d7f8SAlexander Potapenko }
206b073d7f8SAlexander Potapenko 
kmsan_free_page(struct page * page,unsigned int order)207b073d7f8SAlexander Potapenko void kmsan_free_page(struct page *page, unsigned int order)
208b073d7f8SAlexander Potapenko {
209b073d7f8SAlexander Potapenko 	if (!kmsan_enabled || kmsan_in_runtime())
210b073d7f8SAlexander Potapenko 		return;
211b073d7f8SAlexander Potapenko 	kmsan_enter_runtime();
212b073d7f8SAlexander Potapenko 	kmsan_internal_poison_memory(page_address(page),
2135d7800d9SZhangPeng 				     page_size(page),
214b073d7f8SAlexander Potapenko 				     GFP_KERNEL,
215b073d7f8SAlexander Potapenko 				     KMSAN_POISON_CHECK | KMSAN_POISON_FREE);
216b073d7f8SAlexander Potapenko 	kmsan_leave_runtime();
217b073d7f8SAlexander Potapenko }
218b073d7f8SAlexander Potapenko 
kmsan_vmap_pages_range_noflush(unsigned long start,unsigned long end,pgprot_t prot,struct page ** pages,unsigned int page_shift)21947ebd031SAlexander Potapenko int kmsan_vmap_pages_range_noflush(unsigned long start, unsigned long end,
220b073d7f8SAlexander Potapenko 				   pgprot_t prot, struct page **pages,
221b073d7f8SAlexander Potapenko 				   unsigned int page_shift)
222b073d7f8SAlexander Potapenko {
223b073d7f8SAlexander Potapenko 	unsigned long shadow_start, origin_start, shadow_end, origin_end;
224b073d7f8SAlexander Potapenko 	struct page **s_pages, **o_pages;
22547ebd031SAlexander Potapenko 	int nr, mapped, err = 0;
226b073d7f8SAlexander Potapenko 
227b073d7f8SAlexander Potapenko 	if (!kmsan_enabled)
22847ebd031SAlexander Potapenko 		return 0;
229b073d7f8SAlexander Potapenko 
230b073d7f8SAlexander Potapenko 	shadow_start = vmalloc_meta((void *)start, KMSAN_META_SHADOW);
231b073d7f8SAlexander Potapenko 	shadow_end = vmalloc_meta((void *)end, KMSAN_META_SHADOW);
232b073d7f8SAlexander Potapenko 	if (!shadow_start)
23347ebd031SAlexander Potapenko 		return 0;
234b073d7f8SAlexander Potapenko 
235b073d7f8SAlexander Potapenko 	nr = (end - start) / PAGE_SIZE;
236b073d7f8SAlexander Potapenko 	s_pages = kcalloc(nr, sizeof(*s_pages), GFP_KERNEL);
237b073d7f8SAlexander Potapenko 	o_pages = kcalloc(nr, sizeof(*o_pages), GFP_KERNEL);
23847ebd031SAlexander Potapenko 	if (!s_pages || !o_pages) {
23947ebd031SAlexander Potapenko 		err = -ENOMEM;
240b073d7f8SAlexander Potapenko 		goto ret;
24147ebd031SAlexander Potapenko 	}
242b073d7f8SAlexander Potapenko 	for (int i = 0; i < nr; i++) {
243b073d7f8SAlexander Potapenko 		s_pages[i] = shadow_page_for(pages[i]);
244b073d7f8SAlexander Potapenko 		o_pages[i] = origin_page_for(pages[i]);
245b073d7f8SAlexander Potapenko 	}
246b073d7f8SAlexander Potapenko 	prot = __pgprot(pgprot_val(prot) | _PAGE_NX);
247b073d7f8SAlexander Potapenko 	prot = PAGE_KERNEL;
248b073d7f8SAlexander Potapenko 
249b073d7f8SAlexander Potapenko 	origin_start = vmalloc_meta((void *)start, KMSAN_META_ORIGIN);
250b073d7f8SAlexander Potapenko 	origin_end = vmalloc_meta((void *)end, KMSAN_META_ORIGIN);
251b073d7f8SAlexander Potapenko 	kmsan_enter_runtime();
252b073d7f8SAlexander Potapenko 	mapped = __vmap_pages_range_noflush(shadow_start, shadow_end, prot,
253b073d7f8SAlexander Potapenko 					    s_pages, page_shift);
25447ebd031SAlexander Potapenko 	if (mapped) {
25547ebd031SAlexander Potapenko 		err = mapped;
25647ebd031SAlexander Potapenko 		goto ret;
25747ebd031SAlexander Potapenko 	}
258b073d7f8SAlexander Potapenko 	mapped = __vmap_pages_range_noflush(origin_start, origin_end, prot,
259b073d7f8SAlexander Potapenko 					    o_pages, page_shift);
26047ebd031SAlexander Potapenko 	if (mapped) {
26147ebd031SAlexander Potapenko 		err = mapped;
26247ebd031SAlexander Potapenko 		goto ret;
26347ebd031SAlexander Potapenko 	}
264b073d7f8SAlexander Potapenko 	kmsan_leave_runtime();
265b073d7f8SAlexander Potapenko 	flush_tlb_kernel_range(shadow_start, shadow_end);
266b073d7f8SAlexander Potapenko 	flush_tlb_kernel_range(origin_start, origin_end);
267b073d7f8SAlexander Potapenko 	flush_cache_vmap(shadow_start, shadow_end);
268b073d7f8SAlexander Potapenko 	flush_cache_vmap(origin_start, origin_end);
269b073d7f8SAlexander Potapenko 
270b073d7f8SAlexander Potapenko ret:
271b073d7f8SAlexander Potapenko 	kfree(s_pages);
272b073d7f8SAlexander Potapenko 	kfree(o_pages);
27347ebd031SAlexander Potapenko 	return err;
274b073d7f8SAlexander Potapenko }
2753c206509SAlexander Potapenko 
2763c206509SAlexander Potapenko /* Allocate metadata for pages allocated at boot time. */
kmsan_init_alloc_meta_for_range(void * start,void * end)2773c206509SAlexander Potapenko void __init kmsan_init_alloc_meta_for_range(void *start, void *end)
2783c206509SAlexander Potapenko {
2793c206509SAlexander Potapenko 	struct page *shadow_p, *origin_p;
2803c206509SAlexander Potapenko 	void *shadow, *origin;
2813c206509SAlexander Potapenko 	struct page *page;
2823c206509SAlexander Potapenko 	u64 size;
2833c206509SAlexander Potapenko 
284*108c3dc6SZhangPeng 	start = (void *)PAGE_ALIGN_DOWN((u64)start);
285*108c3dc6SZhangPeng 	size = PAGE_ALIGN((u64)end - (u64)start);
2863c206509SAlexander Potapenko 	shadow = memblock_alloc(size, PAGE_SIZE);
2873c206509SAlexander Potapenko 	origin = memblock_alloc(size, PAGE_SIZE);
2883c206509SAlexander Potapenko 	for (u64 addr = 0; addr < size; addr += PAGE_SIZE) {
2893c206509SAlexander Potapenko 		page = virt_to_page_or_null((char *)start + addr);
2903c206509SAlexander Potapenko 		shadow_p = virt_to_page_or_null((char *)shadow + addr);
2913c206509SAlexander Potapenko 		set_no_shadow_origin_page(shadow_p);
2923c206509SAlexander Potapenko 		shadow_page_for(page) = shadow_p;
2933c206509SAlexander Potapenko 		origin_p = virt_to_page_or_null((char *)origin + addr);
2943c206509SAlexander Potapenko 		set_no_shadow_origin_page(origin_p);
2953c206509SAlexander Potapenko 		origin_page_for(page) = origin_p;
2963c206509SAlexander Potapenko 	}
2973c206509SAlexander Potapenko }
2983c206509SAlexander Potapenko 
kmsan_setup_meta(struct page * page,struct page * shadow,struct page * origin,int order)2993c206509SAlexander Potapenko void kmsan_setup_meta(struct page *page, struct page *shadow,
3003c206509SAlexander Potapenko 		      struct page *origin, int order)
3013c206509SAlexander Potapenko {
3023c206509SAlexander Potapenko 	for (int i = 0; i < (1 << order); i++) {
3033c206509SAlexander Potapenko 		set_no_shadow_origin_page(&shadow[i]);
3043c206509SAlexander Potapenko 		set_no_shadow_origin_page(&origin[i]);
3053c206509SAlexander Potapenko 		shadow_page_for(&page[i]) = &shadow[i];
3063c206509SAlexander Potapenko 		origin_page_for(&page[i]) = &origin[i];
3073c206509SAlexander Potapenko 	}
3083c206509SAlexander Potapenko }
309