xref: /openbmc/linux/mm/kasan/shadow.c (revision 2612e3bbc0386368a850140a6c9b990cd496a5ec)
1bb359dbcSAndrey Konovalov // SPDX-License-Identifier: GPL-2.0
2bb359dbcSAndrey Konovalov /*
3bb359dbcSAndrey Konovalov  * This file contains KASAN runtime code that manages shadow memory for
4bb359dbcSAndrey Konovalov  * generic and software tag-based KASAN modes.
5bb359dbcSAndrey Konovalov  *
6bb359dbcSAndrey Konovalov  * Copyright (c) 2014 Samsung Electronics Co., Ltd.
7bb359dbcSAndrey Konovalov  * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
8bb359dbcSAndrey Konovalov  *
9bb359dbcSAndrey Konovalov  * Some code borrowed from https://github.com/xairy/kasan-prototype by
10bb359dbcSAndrey Konovalov  *        Andrey Konovalov <andreyknvl@gmail.com>
11bb359dbcSAndrey Konovalov  */
12bb359dbcSAndrey Konovalov 
13bb359dbcSAndrey Konovalov #include <linux/init.h>
14bb359dbcSAndrey Konovalov #include <linux/kasan.h>
15bb359dbcSAndrey Konovalov #include <linux/kernel.h>
162b830526SAlexander Potapenko #include <linux/kfence.h>
17bb359dbcSAndrey Konovalov #include <linux/kmemleak.h>
18bb359dbcSAndrey Konovalov #include <linux/memory.h>
19bb359dbcSAndrey Konovalov #include <linux/mm.h>
20bb359dbcSAndrey Konovalov #include <linux/string.h>
21bb359dbcSAndrey Konovalov #include <linux/types.h>
22bb359dbcSAndrey Konovalov #include <linux/vmalloc.h>
23bb359dbcSAndrey Konovalov 
24bb359dbcSAndrey Konovalov #include <asm/cacheflush.h>
25bb359dbcSAndrey Konovalov #include <asm/tlbflush.h>
26bb359dbcSAndrey Konovalov 
27bb359dbcSAndrey Konovalov #include "kasan.h"
28bb359dbcSAndrey Konovalov 
__kasan_check_read(const volatile void * p,unsigned int size)29bb359dbcSAndrey Konovalov bool __kasan_check_read(const volatile void *p, unsigned int size)
30bb359dbcSAndrey Konovalov {
31bb6e04a1SArnd Bergmann 	return kasan_check_range((void *)p, size, false, _RET_IP_);
32bb359dbcSAndrey Konovalov }
33bb359dbcSAndrey Konovalov EXPORT_SYMBOL(__kasan_check_read);
34bb359dbcSAndrey Konovalov 
__kasan_check_write(const volatile void * p,unsigned int size)35bb359dbcSAndrey Konovalov bool __kasan_check_write(const volatile void *p, unsigned int size)
36bb359dbcSAndrey Konovalov {
37bb6e04a1SArnd Bergmann 	return kasan_check_range((void *)p, size, true, _RET_IP_);
38bb359dbcSAndrey Konovalov }
39bb359dbcSAndrey Konovalov EXPORT_SYMBOL(__kasan_check_write);
40bb359dbcSAndrey Konovalov 
4136be5cbaSMarco Elver #if !defined(CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX) && !defined(CONFIG_GENERIC_ENTRY)
4269d4c0d3SPeter Zijlstra /*
4369d4c0d3SPeter Zijlstra  * CONFIG_GENERIC_ENTRY relies on compiler emitted mem*() calls to not be
4469d4c0d3SPeter Zijlstra  * instrumented. KASAN enabled toolchains should emit __asan_mem*() functions
4569d4c0d3SPeter Zijlstra  * for the sites they want to instrument.
4636be5cbaSMarco Elver  *
4736be5cbaSMarco Elver  * If we have a compiler that can instrument meminstrinsics, never override
4836be5cbaSMarco Elver  * these, so that non-instrumented files can safely consider them as builtins.
4969d4c0d3SPeter Zijlstra  */
50bb359dbcSAndrey Konovalov #undef memset
memset(void * addr,int c,size_t len)51bb359dbcSAndrey Konovalov void *memset(void *addr, int c, size_t len)
52bb359dbcSAndrey Konovalov {
53bb6e04a1SArnd Bergmann 	if (!kasan_check_range(addr, len, true, _RET_IP_))
54bb359dbcSAndrey Konovalov 		return NULL;
55bb359dbcSAndrey Konovalov 
56bb359dbcSAndrey Konovalov 	return __memset(addr, c, len);
57bb359dbcSAndrey Konovalov }
58bb359dbcSAndrey Konovalov 
59bb359dbcSAndrey Konovalov #ifdef __HAVE_ARCH_MEMMOVE
60bb359dbcSAndrey Konovalov #undef memmove
memmove(void * dest,const void * src,size_t len)61bb359dbcSAndrey Konovalov void *memmove(void *dest, const void *src, size_t len)
62bb359dbcSAndrey Konovalov {
63bb6e04a1SArnd Bergmann 	if (!kasan_check_range(src, len, false, _RET_IP_) ||
64bb6e04a1SArnd Bergmann 	    !kasan_check_range(dest, len, true, _RET_IP_))
65bb359dbcSAndrey Konovalov 		return NULL;
66bb359dbcSAndrey Konovalov 
67bb359dbcSAndrey Konovalov 	return __memmove(dest, src, len);
68bb359dbcSAndrey Konovalov }
69bb359dbcSAndrey Konovalov #endif
70bb359dbcSAndrey Konovalov 
71bb359dbcSAndrey Konovalov #undef memcpy
memcpy(void * dest,const void * src,size_t len)72bb359dbcSAndrey Konovalov void *memcpy(void *dest, const void *src, size_t len)
73bb359dbcSAndrey Konovalov {
74bb6e04a1SArnd Bergmann 	if (!kasan_check_range(src, len, false, _RET_IP_) ||
75bb6e04a1SArnd Bergmann 	    !kasan_check_range(dest, len, true, _RET_IP_))
76bb359dbcSAndrey Konovalov 		return NULL;
77bb359dbcSAndrey Konovalov 
78bb359dbcSAndrey Konovalov 	return __memcpy(dest, src, len);
79bb359dbcSAndrey Konovalov }
8069d4c0d3SPeter Zijlstra #endif
8169d4c0d3SPeter Zijlstra 
__asan_memset(void * addr,int c,ssize_t len)82bb6e04a1SArnd Bergmann void *__asan_memset(void *addr, int c, ssize_t len)
8369d4c0d3SPeter Zijlstra {
84bb6e04a1SArnd Bergmann 	if (!kasan_check_range(addr, len, true, _RET_IP_))
8569d4c0d3SPeter Zijlstra 		return NULL;
8669d4c0d3SPeter Zijlstra 
8769d4c0d3SPeter Zijlstra 	return __memset(addr, c, len);
8869d4c0d3SPeter Zijlstra }
8969d4c0d3SPeter Zijlstra EXPORT_SYMBOL(__asan_memset);
9069d4c0d3SPeter Zijlstra 
9169d4c0d3SPeter Zijlstra #ifdef __HAVE_ARCH_MEMMOVE
__asan_memmove(void * dest,const void * src,ssize_t len)92bb6e04a1SArnd Bergmann void *__asan_memmove(void *dest, const void *src, ssize_t len)
9369d4c0d3SPeter Zijlstra {
94bb6e04a1SArnd Bergmann 	if (!kasan_check_range(src, len, false, _RET_IP_) ||
95bb6e04a1SArnd Bergmann 	    !kasan_check_range(dest, len, true, _RET_IP_))
9669d4c0d3SPeter Zijlstra 		return NULL;
9769d4c0d3SPeter Zijlstra 
9869d4c0d3SPeter Zijlstra 	return __memmove(dest, src, len);
9969d4c0d3SPeter Zijlstra }
10069d4c0d3SPeter Zijlstra EXPORT_SYMBOL(__asan_memmove);
10169d4c0d3SPeter Zijlstra #endif
10269d4c0d3SPeter Zijlstra 
__asan_memcpy(void * dest,const void * src,ssize_t len)103bb6e04a1SArnd Bergmann void *__asan_memcpy(void *dest, const void *src, ssize_t len)
10469d4c0d3SPeter Zijlstra {
105bb6e04a1SArnd Bergmann 	if (!kasan_check_range(src, len, false, _RET_IP_) ||
106bb6e04a1SArnd Bergmann 	    !kasan_check_range(dest, len, true, _RET_IP_))
10769d4c0d3SPeter Zijlstra 		return NULL;
10869d4c0d3SPeter Zijlstra 
10969d4c0d3SPeter Zijlstra 	return __memcpy(dest, src, len);
11069d4c0d3SPeter Zijlstra }
11169d4c0d3SPeter Zijlstra EXPORT_SYMBOL(__asan_memcpy);
112bb359dbcSAndrey Konovalov 
11351287dcbSMarco Elver #ifdef CONFIG_KASAN_SW_TAGS
114bb6e04a1SArnd Bergmann void *__hwasan_memset(void *addr, int c, ssize_t len) __alias(__asan_memset);
11551287dcbSMarco Elver EXPORT_SYMBOL(__hwasan_memset);
11651287dcbSMarco Elver #ifdef __HAVE_ARCH_MEMMOVE
117bb6e04a1SArnd Bergmann void *__hwasan_memmove(void *dest, const void *src, ssize_t len) __alias(__asan_memmove);
11851287dcbSMarco Elver EXPORT_SYMBOL(__hwasan_memmove);
11951287dcbSMarco Elver #endif
120bb6e04a1SArnd Bergmann void *__hwasan_memcpy(void *dest, const void *src, ssize_t len) __alias(__asan_memcpy);
12151287dcbSMarco Elver EXPORT_SYMBOL(__hwasan_memcpy);
12251287dcbSMarco Elver #endif
12351287dcbSMarco Elver 
kasan_poison(const void * addr,size_t size,u8 value,bool init)124aa5c219cSAndrey Konovalov void kasan_poison(const void *addr, size_t size, u8 value, bool init)
125bb359dbcSAndrey Konovalov {
126bb359dbcSAndrey Konovalov 	void *shadow_start, *shadow_end;
127bb359dbcSAndrey Konovalov 
128af3751f3SDaniel Axtens 	if (!kasan_arch_is_ready())
129af3751f3SDaniel Axtens 		return;
130af3751f3SDaniel Axtens 
131bb359dbcSAndrey Konovalov 	/*
132bb359dbcSAndrey Konovalov 	 * Perform shadow offset calculation based on untagged address, as
133bb359dbcSAndrey Konovalov 	 * some of the callers (e.g. kasan_poison_object_data) pass tagged
134bb359dbcSAndrey Konovalov 	 * addresses to this function.
135bb359dbcSAndrey Konovalov 	 */
136cde8a7ebSAndrey Konovalov 	addr = kasan_reset_tag(addr);
137bb359dbcSAndrey Konovalov 
1382b830526SAlexander Potapenko 	/* Skip KFENCE memory if called explicitly outside of sl*b. */
139cde8a7ebSAndrey Konovalov 	if (is_kfence_address(addr))
1402b830526SAlexander Potapenko 		return;
1412b830526SAlexander Potapenko 
142cde8a7ebSAndrey Konovalov 	if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
143cde8a7ebSAndrey Konovalov 		return;
144cde8a7ebSAndrey Konovalov 	if (WARN_ON(size & KASAN_GRANULE_MASK))
145cde8a7ebSAndrey Konovalov 		return;
146cde8a7ebSAndrey Konovalov 
147cde8a7ebSAndrey Konovalov 	shadow_start = kasan_mem_to_shadow(addr);
148cde8a7ebSAndrey Konovalov 	shadow_end = kasan_mem_to_shadow(addr + size);
149bb359dbcSAndrey Konovalov 
150bb359dbcSAndrey Konovalov 	__memset(shadow_start, value, shadow_end - shadow_start);
151bb359dbcSAndrey Konovalov }
152573a4809SAndrey Konovalov EXPORT_SYMBOL(kasan_poison);
153bb359dbcSAndrey Konovalov 
154e2db1a9aSAndrey Konovalov #ifdef CONFIG_KASAN_GENERIC
kasan_poison_last_granule(const void * addr,size_t size)155cde8a7ebSAndrey Konovalov void kasan_poison_last_granule(const void *addr, size_t size)
156e2db1a9aSAndrey Konovalov {
157af3751f3SDaniel Axtens 	if (!kasan_arch_is_ready())
158af3751f3SDaniel Axtens 		return;
159af3751f3SDaniel Axtens 
160e2db1a9aSAndrey Konovalov 	if (size & KASAN_GRANULE_MASK) {
161cde8a7ebSAndrey Konovalov 		u8 *shadow = (u8 *)kasan_mem_to_shadow(addr + size);
162e2db1a9aSAndrey Konovalov 		*shadow = size & KASAN_GRANULE_MASK;
163e2db1a9aSAndrey Konovalov 	}
164e2db1a9aSAndrey Konovalov }
165e2db1a9aSAndrey Konovalov #endif
166e2db1a9aSAndrey Konovalov 
kasan_unpoison(const void * addr,size_t size,bool init)167aa5c219cSAndrey Konovalov void kasan_unpoison(const void *addr, size_t size, bool init)
168bb359dbcSAndrey Konovalov {
169cde8a7ebSAndrey Konovalov 	u8 tag = get_tag(addr);
170bb359dbcSAndrey Konovalov 
171bb359dbcSAndrey Konovalov 	/*
172bb359dbcSAndrey Konovalov 	 * Perform shadow offset calculation based on untagged address, as
173bb359dbcSAndrey Konovalov 	 * some of the callers (e.g. kasan_unpoison_object_data) pass tagged
174bb359dbcSAndrey Konovalov 	 * addresses to this function.
175bb359dbcSAndrey Konovalov 	 */
176cde8a7ebSAndrey Konovalov 	addr = kasan_reset_tag(addr);
177bb359dbcSAndrey Konovalov 
1782b830526SAlexander Potapenko 	/*
1792b830526SAlexander Potapenko 	 * Skip KFENCE memory if called explicitly outside of sl*b. Also note
1802b830526SAlexander Potapenko 	 * that calls to ksize(), where size is not a multiple of machine-word
1812b830526SAlexander Potapenko 	 * size, would otherwise poison the invalid portion of the word.
1822b830526SAlexander Potapenko 	 */
183cde8a7ebSAndrey Konovalov 	if (is_kfence_address(addr))
1842b830526SAlexander Potapenko 		return;
1852b830526SAlexander Potapenko 
186cde8a7ebSAndrey Konovalov 	if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
187cde8a7ebSAndrey Konovalov 		return;
188cde8a7ebSAndrey Konovalov 
189cde8a7ebSAndrey Konovalov 	/* Unpoison all granules that cover the object. */
190aa5c219cSAndrey Konovalov 	kasan_poison(addr, round_up(size, KASAN_GRANULE_SIZE), tag, false);
191bb359dbcSAndrey Konovalov 
192e2db1a9aSAndrey Konovalov 	/* Partially poison the last granule for the generic mode. */
193e2db1a9aSAndrey Konovalov 	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
194cde8a7ebSAndrey Konovalov 		kasan_poison_last_granule(addr, size);
195bb359dbcSAndrey Konovalov }
196bb359dbcSAndrey Konovalov 
197bb359dbcSAndrey Konovalov #ifdef CONFIG_MEMORY_HOTPLUG
shadow_mapped(unsigned long addr)198bb359dbcSAndrey Konovalov static bool shadow_mapped(unsigned long addr)
199bb359dbcSAndrey Konovalov {
200bb359dbcSAndrey Konovalov 	pgd_t *pgd = pgd_offset_k(addr);
201bb359dbcSAndrey Konovalov 	p4d_t *p4d;
202bb359dbcSAndrey Konovalov 	pud_t *pud;
203bb359dbcSAndrey Konovalov 	pmd_t *pmd;
204bb359dbcSAndrey Konovalov 	pte_t *pte;
205bb359dbcSAndrey Konovalov 
206bb359dbcSAndrey Konovalov 	if (pgd_none(*pgd))
207bb359dbcSAndrey Konovalov 		return false;
208bb359dbcSAndrey Konovalov 	p4d = p4d_offset(pgd, addr);
209bb359dbcSAndrey Konovalov 	if (p4d_none(*p4d))
210bb359dbcSAndrey Konovalov 		return false;
211bb359dbcSAndrey Konovalov 	pud = pud_offset(p4d, addr);
212bb359dbcSAndrey Konovalov 	if (pud_none(*pud))
213bb359dbcSAndrey Konovalov 		return false;
214bb359dbcSAndrey Konovalov 
215bb359dbcSAndrey Konovalov 	/*
216bb359dbcSAndrey Konovalov 	 * We can't use pud_large() or pud_huge(), the first one is
217bb359dbcSAndrey Konovalov 	 * arch-specific, the last one depends on HUGETLB_PAGE.  So let's abuse
218bb359dbcSAndrey Konovalov 	 * pud_bad(), if pud is bad then it's bad because it's huge.
219bb359dbcSAndrey Konovalov 	 */
220bb359dbcSAndrey Konovalov 	if (pud_bad(*pud))
221bb359dbcSAndrey Konovalov 		return true;
222bb359dbcSAndrey Konovalov 	pmd = pmd_offset(pud, addr);
223bb359dbcSAndrey Konovalov 	if (pmd_none(*pmd))
224bb359dbcSAndrey Konovalov 		return false;
225bb359dbcSAndrey Konovalov 
226bb359dbcSAndrey Konovalov 	if (pmd_bad(*pmd))
227bb359dbcSAndrey Konovalov 		return true;
228bb359dbcSAndrey Konovalov 	pte = pte_offset_kernel(pmd, addr);
229*c33c7948SRyan Roberts 	return !pte_none(ptep_get(pte));
230bb359dbcSAndrey Konovalov }
231bb359dbcSAndrey Konovalov 
kasan_mem_notifier(struct notifier_block * nb,unsigned long action,void * data)232bb359dbcSAndrey Konovalov static int __meminit kasan_mem_notifier(struct notifier_block *nb,
233bb359dbcSAndrey Konovalov 			unsigned long action, void *data)
234bb359dbcSAndrey Konovalov {
235bb359dbcSAndrey Konovalov 	struct memory_notify *mem_data = data;
236bb359dbcSAndrey Konovalov 	unsigned long nr_shadow_pages, start_kaddr, shadow_start;
237bb359dbcSAndrey Konovalov 	unsigned long shadow_end, shadow_size;
238bb359dbcSAndrey Konovalov 
239bb359dbcSAndrey Konovalov 	nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT;
240bb359dbcSAndrey Konovalov 	start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn);
241bb359dbcSAndrey Konovalov 	shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr);
242bb359dbcSAndrey Konovalov 	shadow_size = nr_shadow_pages << PAGE_SHIFT;
243bb359dbcSAndrey Konovalov 	shadow_end = shadow_start + shadow_size;
244bb359dbcSAndrey Konovalov 
245bb359dbcSAndrey Konovalov 	if (WARN_ON(mem_data->nr_pages % KASAN_GRANULE_SIZE) ||
246affc3f07SAndrey Konovalov 		WARN_ON(start_kaddr % KASAN_MEMORY_PER_SHADOW_PAGE))
247bb359dbcSAndrey Konovalov 		return NOTIFY_BAD;
248bb359dbcSAndrey Konovalov 
249bb359dbcSAndrey Konovalov 	switch (action) {
250bb359dbcSAndrey Konovalov 	case MEM_GOING_ONLINE: {
251bb359dbcSAndrey Konovalov 		void *ret;
252bb359dbcSAndrey Konovalov 
253bb359dbcSAndrey Konovalov 		/*
254bb359dbcSAndrey Konovalov 		 * If shadow is mapped already than it must have been mapped
255bb359dbcSAndrey Konovalov 		 * during the boot. This could happen if we onlining previously
256bb359dbcSAndrey Konovalov 		 * offlined memory.
257bb359dbcSAndrey Konovalov 		 */
258bb359dbcSAndrey Konovalov 		if (shadow_mapped(shadow_start))
259bb359dbcSAndrey Konovalov 			return NOTIFY_OK;
260bb359dbcSAndrey Konovalov 
261bb359dbcSAndrey Konovalov 		ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start,
262bb359dbcSAndrey Konovalov 					shadow_end, GFP_KERNEL,
263bb359dbcSAndrey Konovalov 					PAGE_KERNEL, VM_NO_GUARD,
264bb359dbcSAndrey Konovalov 					pfn_to_nid(mem_data->start_pfn),
265bb359dbcSAndrey Konovalov 					__builtin_return_address(0));
266bb359dbcSAndrey Konovalov 		if (!ret)
267bb359dbcSAndrey Konovalov 			return NOTIFY_BAD;
268bb359dbcSAndrey Konovalov 
269bb359dbcSAndrey Konovalov 		kmemleak_ignore(ret);
270bb359dbcSAndrey Konovalov 		return NOTIFY_OK;
271bb359dbcSAndrey Konovalov 	}
272bb359dbcSAndrey Konovalov 	case MEM_CANCEL_ONLINE:
273bb359dbcSAndrey Konovalov 	case MEM_OFFLINE: {
274bb359dbcSAndrey Konovalov 		struct vm_struct *vm;
275bb359dbcSAndrey Konovalov 
276bb359dbcSAndrey Konovalov 		/*
277bb359dbcSAndrey Konovalov 		 * shadow_start was either mapped during boot by kasan_init()
278bb359dbcSAndrey Konovalov 		 * or during memory online by __vmalloc_node_range().
279bb359dbcSAndrey Konovalov 		 * In the latter case we can use vfree() to free shadow.
280bb359dbcSAndrey Konovalov 		 * Non-NULL result of the find_vm_area() will tell us if
281bb359dbcSAndrey Konovalov 		 * that was the second case.
282bb359dbcSAndrey Konovalov 		 *
283bb359dbcSAndrey Konovalov 		 * Currently it's not possible to free shadow mapped
284bb359dbcSAndrey Konovalov 		 * during boot by kasan_init(). It's because the code
285bb359dbcSAndrey Konovalov 		 * to do that hasn't been written yet. So we'll just
286bb359dbcSAndrey Konovalov 		 * leak the memory.
287bb359dbcSAndrey Konovalov 		 */
288bb359dbcSAndrey Konovalov 		vm = find_vm_area((void *)shadow_start);
289bb359dbcSAndrey Konovalov 		if (vm)
290bb359dbcSAndrey Konovalov 			vfree((void *)shadow_start);
291bb359dbcSAndrey Konovalov 	}
292bb359dbcSAndrey Konovalov 	}
293bb359dbcSAndrey Konovalov 
294bb359dbcSAndrey Konovalov 	return NOTIFY_OK;
295bb359dbcSAndrey Konovalov }
296bb359dbcSAndrey Konovalov 
kasan_memhotplug_init(void)297bb359dbcSAndrey Konovalov static int __init kasan_memhotplug_init(void)
298bb359dbcSAndrey Konovalov {
2991eeaa4fdSLiu Shixin 	hotplug_memory_notifier(kasan_mem_notifier, DEFAULT_CALLBACK_PRI);
300bb359dbcSAndrey Konovalov 
301bb359dbcSAndrey Konovalov 	return 0;
302bb359dbcSAndrey Konovalov }
303bb359dbcSAndrey Konovalov 
304bb359dbcSAndrey Konovalov core_initcall(kasan_memhotplug_init);
305bb359dbcSAndrey Konovalov #endif
306bb359dbcSAndrey Konovalov 
307bb359dbcSAndrey Konovalov #ifdef CONFIG_KASAN_VMALLOC
308bb359dbcSAndrey Konovalov 
kasan_populate_early_vm_area_shadow(void * start,unsigned long size)3093252b1d8SKefeng Wang void __init __weak kasan_populate_early_vm_area_shadow(void *start,
3103252b1d8SKefeng Wang 						       unsigned long size)
3113252b1d8SKefeng Wang {
3123252b1d8SKefeng Wang }
3133252b1d8SKefeng Wang 
kasan_populate_vmalloc_pte(pte_t * ptep,unsigned long addr,void * unused)314bb359dbcSAndrey Konovalov static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr,
315bb359dbcSAndrey Konovalov 				      void *unused)
316bb359dbcSAndrey Konovalov {
317bb359dbcSAndrey Konovalov 	unsigned long page;
318bb359dbcSAndrey Konovalov 	pte_t pte;
319bb359dbcSAndrey Konovalov 
320*c33c7948SRyan Roberts 	if (likely(!pte_none(ptep_get(ptep))))
321bb359dbcSAndrey Konovalov 		return 0;
322bb359dbcSAndrey Konovalov 
323bb359dbcSAndrey Konovalov 	page = __get_free_page(GFP_KERNEL);
324bb359dbcSAndrey Konovalov 	if (!page)
325bb359dbcSAndrey Konovalov 		return -ENOMEM;
326bb359dbcSAndrey Konovalov 
327bb359dbcSAndrey Konovalov 	memset((void *)page, KASAN_VMALLOC_INVALID, PAGE_SIZE);
328bb359dbcSAndrey Konovalov 	pte = pfn_pte(PFN_DOWN(__pa(page)), PAGE_KERNEL);
329bb359dbcSAndrey Konovalov 
330bb359dbcSAndrey Konovalov 	spin_lock(&init_mm.page_table_lock);
331*c33c7948SRyan Roberts 	if (likely(pte_none(ptep_get(ptep)))) {
332bb359dbcSAndrey Konovalov 		set_pte_at(&init_mm, addr, ptep, pte);
333bb359dbcSAndrey Konovalov 		page = 0;
334bb359dbcSAndrey Konovalov 	}
335bb359dbcSAndrey Konovalov 	spin_unlock(&init_mm.page_table_lock);
336bb359dbcSAndrey Konovalov 	if (page)
337bb359dbcSAndrey Konovalov 		free_page(page);
338bb359dbcSAndrey Konovalov 	return 0;
339bb359dbcSAndrey Konovalov }
340bb359dbcSAndrey Konovalov 
kasan_populate_vmalloc(unsigned long addr,unsigned long size)341bb359dbcSAndrey Konovalov int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
342bb359dbcSAndrey Konovalov {
343bb359dbcSAndrey Konovalov 	unsigned long shadow_start, shadow_end;
344bb359dbcSAndrey Konovalov 	int ret;
345bb359dbcSAndrey Konovalov 
34655d77baeSChristophe Leroy 	if (!kasan_arch_is_ready())
34755d77baeSChristophe Leroy 		return 0;
34855d77baeSChristophe Leroy 
349bb359dbcSAndrey Konovalov 	if (!is_vmalloc_or_module_addr((void *)addr))
350bb359dbcSAndrey Konovalov 		return 0;
351bb359dbcSAndrey Konovalov 
352bb359dbcSAndrey Konovalov 	shadow_start = (unsigned long)kasan_mem_to_shadow((void *)addr);
353bb359dbcSAndrey Konovalov 	shadow_end = (unsigned long)kasan_mem_to_shadow((void *)addr + size);
3545b301409SPatricia Alfonso 
3555b301409SPatricia Alfonso 	/*
3565b301409SPatricia Alfonso 	 * User Mode Linux maps enough shadow memory for all of virtual memory
3575b301409SPatricia Alfonso 	 * at boot, so doesn't need to allocate more on vmalloc, just clear it.
3585b301409SPatricia Alfonso 	 *
3595b301409SPatricia Alfonso 	 * The remaining CONFIG_UML checks in this file exist for the same
3605b301409SPatricia Alfonso 	 * reason.
3615b301409SPatricia Alfonso 	 */
3625b301409SPatricia Alfonso 	if (IS_ENABLED(CONFIG_UML)) {
3635b301409SPatricia Alfonso 		__memset((void *)shadow_start, KASAN_VMALLOC_INVALID, shadow_end - shadow_start);
3645b301409SPatricia Alfonso 		return 0;
3655b301409SPatricia Alfonso 	}
3665b301409SPatricia Alfonso 
3675b301409SPatricia Alfonso 	shadow_start = PAGE_ALIGN_DOWN(shadow_start);
3685b301409SPatricia Alfonso 	shadow_end = PAGE_ALIGN(shadow_end);
369bb359dbcSAndrey Konovalov 
370bb359dbcSAndrey Konovalov 	ret = apply_to_page_range(&init_mm, shadow_start,
371bb359dbcSAndrey Konovalov 				  shadow_end - shadow_start,
372bb359dbcSAndrey Konovalov 				  kasan_populate_vmalloc_pte, NULL);
373bb359dbcSAndrey Konovalov 	if (ret)
374bb359dbcSAndrey Konovalov 		return ret;
375bb359dbcSAndrey Konovalov 
376bb359dbcSAndrey Konovalov 	flush_cache_vmap(shadow_start, shadow_end);
377bb359dbcSAndrey Konovalov 
378bb359dbcSAndrey Konovalov 	/*
379bb359dbcSAndrey Konovalov 	 * We need to be careful about inter-cpu effects here. Consider:
380bb359dbcSAndrey Konovalov 	 *
381bb359dbcSAndrey Konovalov 	 *   CPU#0				  CPU#1
382bb359dbcSAndrey Konovalov 	 * WRITE_ONCE(p, vmalloc(100));		while (x = READ_ONCE(p)) ;
383bb359dbcSAndrey Konovalov 	 *					p[99] = 1;
384bb359dbcSAndrey Konovalov 	 *
385bb359dbcSAndrey Konovalov 	 * With compiler instrumentation, that ends up looking like this:
386bb359dbcSAndrey Konovalov 	 *
387bb359dbcSAndrey Konovalov 	 *   CPU#0				  CPU#1
388bb359dbcSAndrey Konovalov 	 * // vmalloc() allocates memory
389bb359dbcSAndrey Konovalov 	 * // let a = area->addr
390bb359dbcSAndrey Konovalov 	 * // we reach kasan_populate_vmalloc
391f00748bfSAndrey Konovalov 	 * // and call kasan_unpoison:
392bb359dbcSAndrey Konovalov 	 * STORE shadow(a), unpoison_val
393bb359dbcSAndrey Konovalov 	 * ...
394bb359dbcSAndrey Konovalov 	 * STORE shadow(a+99), unpoison_val	x = LOAD p
395bb359dbcSAndrey Konovalov 	 * // rest of vmalloc process		<data dependency>
396bb359dbcSAndrey Konovalov 	 * STORE p, a				LOAD shadow(x+99)
397bb359dbcSAndrey Konovalov 	 *
398f0953a1bSIngo Molnar 	 * If there is no barrier between the end of unpoisoning the shadow
399bb359dbcSAndrey Konovalov 	 * and the store of the result to p, the stores could be committed
400bb359dbcSAndrey Konovalov 	 * in a different order by CPU#0, and CPU#1 could erroneously observe
401bb359dbcSAndrey Konovalov 	 * poison in the shadow.
402bb359dbcSAndrey Konovalov 	 *
403bb359dbcSAndrey Konovalov 	 * We need some sort of barrier between the stores.
404bb359dbcSAndrey Konovalov 	 *
405bb359dbcSAndrey Konovalov 	 * In the vmalloc() case, this is provided by a smp_wmb() in
406bb359dbcSAndrey Konovalov 	 * clear_vm_uninitialized_flag(). In the per-cpu allocator and in
407bb359dbcSAndrey Konovalov 	 * get_vm_area() and friends, the caller gets shadow allocated but
408bb359dbcSAndrey Konovalov 	 * doesn't have any pages mapped into the virtual address space that
409bb359dbcSAndrey Konovalov 	 * has been reserved. Mapping those pages in will involve taking and
410bb359dbcSAndrey Konovalov 	 * releasing a page-table lock, which will provide the barrier.
411bb359dbcSAndrey Konovalov 	 */
412bb359dbcSAndrey Konovalov 
413bb359dbcSAndrey Konovalov 	return 0;
414bb359dbcSAndrey Konovalov }
415bb359dbcSAndrey Konovalov 
kasan_depopulate_vmalloc_pte(pte_t * ptep,unsigned long addr,void * unused)416bb359dbcSAndrey Konovalov static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr,
417bb359dbcSAndrey Konovalov 					void *unused)
418bb359dbcSAndrey Konovalov {
419bb359dbcSAndrey Konovalov 	unsigned long page;
420bb359dbcSAndrey Konovalov 
421*c33c7948SRyan Roberts 	page = (unsigned long)__va(pte_pfn(ptep_get(ptep)) << PAGE_SHIFT);
422bb359dbcSAndrey Konovalov 
423bb359dbcSAndrey Konovalov 	spin_lock(&init_mm.page_table_lock);
424bb359dbcSAndrey Konovalov 
425*c33c7948SRyan Roberts 	if (likely(!pte_none(ptep_get(ptep)))) {
426bb359dbcSAndrey Konovalov 		pte_clear(&init_mm, addr, ptep);
427bb359dbcSAndrey Konovalov 		free_page(page);
428bb359dbcSAndrey Konovalov 	}
429bb359dbcSAndrey Konovalov 	spin_unlock(&init_mm.page_table_lock);
430bb359dbcSAndrey Konovalov 
431bb359dbcSAndrey Konovalov 	return 0;
432bb359dbcSAndrey Konovalov }
433bb359dbcSAndrey Konovalov 
434bb359dbcSAndrey Konovalov /*
435bb359dbcSAndrey Konovalov  * Release the backing for the vmalloc region [start, end), which
436bb359dbcSAndrey Konovalov  * lies within the free region [free_region_start, free_region_end).
437bb359dbcSAndrey Konovalov  *
438bb359dbcSAndrey Konovalov  * This can be run lazily, long after the region was freed. It runs
439bb359dbcSAndrey Konovalov  * under vmap_area_lock, so it's not safe to interact with the vmalloc/vmap
440bb359dbcSAndrey Konovalov  * infrastructure.
441bb359dbcSAndrey Konovalov  *
442bb359dbcSAndrey Konovalov  * How does this work?
443bb359dbcSAndrey Konovalov  * -------------------
444bb359dbcSAndrey Konovalov  *
445f0953a1bSIngo Molnar  * We have a region that is page aligned, labeled as A.
446bb359dbcSAndrey Konovalov  * That might not map onto the shadow in a way that is page-aligned:
447bb359dbcSAndrey Konovalov  *
448bb359dbcSAndrey Konovalov  *                    start                     end
449bb359dbcSAndrey Konovalov  *                    v                         v
450bb359dbcSAndrey Konovalov  * |????????|????????|AAAAAAAA|AA....AA|AAAAAAAA|????????| < vmalloc
451bb359dbcSAndrey Konovalov  *  -------- -------- --------          -------- --------
452bb359dbcSAndrey Konovalov  *      |        |       |                 |        |
453bb359dbcSAndrey Konovalov  *      |        |       |         /-------/        |
454bb359dbcSAndrey Konovalov  *      \-------\|/------/         |/---------------/
455bb359dbcSAndrey Konovalov  *              |||                ||
456bb359dbcSAndrey Konovalov  *             |??AAAAAA|AAAAAAAA|AA??????|                < shadow
457bb359dbcSAndrey Konovalov  *                 (1)      (2)      (3)
458bb359dbcSAndrey Konovalov  *
459bb359dbcSAndrey Konovalov  * First we align the start upwards and the end downwards, so that the
460bb359dbcSAndrey Konovalov  * shadow of the region aligns with shadow page boundaries. In the
461bb359dbcSAndrey Konovalov  * example, this gives us the shadow page (2). This is the shadow entirely
462bb359dbcSAndrey Konovalov  * covered by this allocation.
463bb359dbcSAndrey Konovalov  *
464bb359dbcSAndrey Konovalov  * Then we have the tricky bits. We want to know if we can free the
465bb359dbcSAndrey Konovalov  * partially covered shadow pages - (1) and (3) in the example. For this,
466bb359dbcSAndrey Konovalov  * we are given the start and end of the free region that contains this
467bb359dbcSAndrey Konovalov  * allocation. Extending our previous example, we could have:
468bb359dbcSAndrey Konovalov  *
469bb359dbcSAndrey Konovalov  *  free_region_start                                    free_region_end
470bb359dbcSAndrey Konovalov  *  |                 start                     end      |
471bb359dbcSAndrey Konovalov  *  v                 v                         v        v
472bb359dbcSAndrey Konovalov  * |FFFFFFFF|FFFFFFFF|AAAAAAAA|AA....AA|AAAAAAAA|FFFFFFFF| < vmalloc
473bb359dbcSAndrey Konovalov  *  -------- -------- --------          -------- --------
474bb359dbcSAndrey Konovalov  *      |        |       |                 |        |
475bb359dbcSAndrey Konovalov  *      |        |       |         /-------/        |
476bb359dbcSAndrey Konovalov  *      \-------\|/------/         |/---------------/
477bb359dbcSAndrey Konovalov  *              |||                ||
478bb359dbcSAndrey Konovalov  *             |FFAAAAAA|AAAAAAAA|AAF?????|                < shadow
479bb359dbcSAndrey Konovalov  *                 (1)      (2)      (3)
480bb359dbcSAndrey Konovalov  *
481bb359dbcSAndrey Konovalov  * Once again, we align the start of the free region up, and the end of
482bb359dbcSAndrey Konovalov  * the free region down so that the shadow is page aligned. So we can free
483bb359dbcSAndrey Konovalov  * page (1) - we know no allocation currently uses anything in that page,
484bb359dbcSAndrey Konovalov  * because all of it is in the vmalloc free region. But we cannot free
485bb359dbcSAndrey Konovalov  * page (3), because we can't be sure that the rest of it is unused.
486bb359dbcSAndrey Konovalov  *
487bb359dbcSAndrey Konovalov  * We only consider pages that contain part of the original region for
488bb359dbcSAndrey Konovalov  * freeing: we don't try to free other pages from the free region or we'd
489bb359dbcSAndrey Konovalov  * end up trying to free huge chunks of virtual address space.
490bb359dbcSAndrey Konovalov  *
491bb359dbcSAndrey Konovalov  * Concurrency
492bb359dbcSAndrey Konovalov  * -----------
493bb359dbcSAndrey Konovalov  *
494bb359dbcSAndrey Konovalov  * How do we know that we're not freeing a page that is simultaneously
495bb359dbcSAndrey Konovalov  * being used for a fresh allocation in kasan_populate_vmalloc(_pte)?
496bb359dbcSAndrey Konovalov  *
497bb359dbcSAndrey Konovalov  * We _can_ have kasan_release_vmalloc and kasan_populate_vmalloc running
498bb359dbcSAndrey Konovalov  * at the same time. While we run under free_vmap_area_lock, the population
499bb359dbcSAndrey Konovalov  * code does not.
500bb359dbcSAndrey Konovalov  *
501bb359dbcSAndrey Konovalov  * free_vmap_area_lock instead operates to ensure that the larger range
502bb359dbcSAndrey Konovalov  * [free_region_start, free_region_end) is safe: because __alloc_vmap_area and
503bb359dbcSAndrey Konovalov  * the per-cpu region-finding algorithm both run under free_vmap_area_lock,
504bb359dbcSAndrey Konovalov  * no space identified as free will become used while we are running. This
505bb359dbcSAndrey Konovalov  * means that so long as we are careful with alignment and only free shadow
506bb359dbcSAndrey Konovalov  * pages entirely covered by the free region, we will not run in to any
507bb359dbcSAndrey Konovalov  * trouble - any simultaneous allocations will be for disjoint regions.
508bb359dbcSAndrey Konovalov  */
kasan_release_vmalloc(unsigned long start,unsigned long end,unsigned long free_region_start,unsigned long free_region_end)509bb359dbcSAndrey Konovalov void kasan_release_vmalloc(unsigned long start, unsigned long end,
510bb359dbcSAndrey Konovalov 			   unsigned long free_region_start,
511bb359dbcSAndrey Konovalov 			   unsigned long free_region_end)
512bb359dbcSAndrey Konovalov {
513bb359dbcSAndrey Konovalov 	void *shadow_start, *shadow_end;
514bb359dbcSAndrey Konovalov 	unsigned long region_start, region_end;
515bb359dbcSAndrey Konovalov 	unsigned long size;
516bb359dbcSAndrey Konovalov 
51755d77baeSChristophe Leroy 	if (!kasan_arch_is_ready())
51855d77baeSChristophe Leroy 		return;
51955d77baeSChristophe Leroy 
520affc3f07SAndrey Konovalov 	region_start = ALIGN(start, KASAN_MEMORY_PER_SHADOW_PAGE);
521affc3f07SAndrey Konovalov 	region_end = ALIGN_DOWN(end, KASAN_MEMORY_PER_SHADOW_PAGE);
522bb359dbcSAndrey Konovalov 
523affc3f07SAndrey Konovalov 	free_region_start = ALIGN(free_region_start, KASAN_MEMORY_PER_SHADOW_PAGE);
524bb359dbcSAndrey Konovalov 
525bb359dbcSAndrey Konovalov 	if (start != region_start &&
526bb359dbcSAndrey Konovalov 	    free_region_start < region_start)
527affc3f07SAndrey Konovalov 		region_start -= KASAN_MEMORY_PER_SHADOW_PAGE;
528bb359dbcSAndrey Konovalov 
529affc3f07SAndrey Konovalov 	free_region_end = ALIGN_DOWN(free_region_end, KASAN_MEMORY_PER_SHADOW_PAGE);
530bb359dbcSAndrey Konovalov 
531bb359dbcSAndrey Konovalov 	if (end != region_end &&
532bb359dbcSAndrey Konovalov 	    free_region_end > region_end)
533affc3f07SAndrey Konovalov 		region_end += KASAN_MEMORY_PER_SHADOW_PAGE;
534bb359dbcSAndrey Konovalov 
535bb359dbcSAndrey Konovalov 	shadow_start = kasan_mem_to_shadow((void *)region_start);
536bb359dbcSAndrey Konovalov 	shadow_end = kasan_mem_to_shadow((void *)region_end);
537bb359dbcSAndrey Konovalov 
538bb359dbcSAndrey Konovalov 	if (shadow_end > shadow_start) {
539bb359dbcSAndrey Konovalov 		size = shadow_end - shadow_start;
5405b301409SPatricia Alfonso 		if (IS_ENABLED(CONFIG_UML)) {
5415b301409SPatricia Alfonso 			__memset(shadow_start, KASAN_SHADOW_INIT, shadow_end - shadow_start);
5425b301409SPatricia Alfonso 			return;
5435b301409SPatricia Alfonso 		}
544bb359dbcSAndrey Konovalov 		apply_to_existing_page_range(&init_mm,
545bb359dbcSAndrey Konovalov 					     (unsigned long)shadow_start,
546bb359dbcSAndrey Konovalov 					     size, kasan_depopulate_vmalloc_pte,
547bb359dbcSAndrey Konovalov 					     NULL);
548bb359dbcSAndrey Konovalov 		flush_tlb_kernel_range((unsigned long)shadow_start,
549bb359dbcSAndrey Konovalov 				       (unsigned long)shadow_end);
550bb359dbcSAndrey Konovalov 	}
551bb359dbcSAndrey Konovalov }
552bb359dbcSAndrey Konovalov 
__kasan_unpoison_vmalloc(const void * start,unsigned long size,kasan_vmalloc_flags_t flags)55323689e91SAndrey Konovalov void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
55423689e91SAndrey Konovalov 			       kasan_vmalloc_flags_t flags)
5555bd9bae2SAndrey Konovalov {
55623689e91SAndrey Konovalov 	/*
55723689e91SAndrey Konovalov 	 * Software KASAN modes unpoison both VM_ALLOC and non-VM_ALLOC
55823689e91SAndrey Konovalov 	 * mappings, so the KASAN_VMALLOC_VM_ALLOC flag is ignored.
55923689e91SAndrey Konovalov 	 * Software KASAN modes can't optimize zeroing memory by combining it
56023689e91SAndrey Konovalov 	 * with setting memory tags, so the KASAN_VMALLOC_INIT flag is ignored.
56123689e91SAndrey Konovalov 	 */
56223689e91SAndrey Konovalov 
56355d77baeSChristophe Leroy 	if (!kasan_arch_is_ready())
56455d77baeSChristophe Leroy 		return (void *)start;
56555d77baeSChristophe Leroy 
5665bd9bae2SAndrey Konovalov 	if (!is_vmalloc_or_module_addr(start))
5671d96320fSAndrey Konovalov 		return (void *)start;
5685bd9bae2SAndrey Konovalov 
569f6e39794SAndrey Konovalov 	/*
570f6e39794SAndrey Konovalov 	 * Don't tag executable memory with the tag-based mode.
571f6e39794SAndrey Konovalov 	 * The kernel doesn't tolerate having the PC register tagged.
572f6e39794SAndrey Konovalov 	 */
573f6e39794SAndrey Konovalov 	if (IS_ENABLED(CONFIG_KASAN_SW_TAGS) &&
574f6e39794SAndrey Konovalov 	    !(flags & KASAN_VMALLOC_PROT_NORMAL))
575f6e39794SAndrey Konovalov 		return (void *)start;
576f6e39794SAndrey Konovalov 
5771d96320fSAndrey Konovalov 	start = set_tag(start, kasan_random_tag());
5785bd9bae2SAndrey Konovalov 	kasan_unpoison(start, size, false);
5791d96320fSAndrey Konovalov 	return (void *)start;
5805bd9bae2SAndrey Konovalov }
5815bd9bae2SAndrey Konovalov 
5825bd9bae2SAndrey Konovalov /*
5835bd9bae2SAndrey Konovalov  * Poison the shadow for a vmalloc region. Called as part of the
5845bd9bae2SAndrey Konovalov  * freeing process at the time the region is freed.
5855bd9bae2SAndrey Konovalov  */
__kasan_poison_vmalloc(const void * start,unsigned long size)586579fb0acSAndrey Konovalov void __kasan_poison_vmalloc(const void *start, unsigned long size)
5875bd9bae2SAndrey Konovalov {
58855d77baeSChristophe Leroy 	if (!kasan_arch_is_ready())
58955d77baeSChristophe Leroy 		return;
59055d77baeSChristophe Leroy 
5915bd9bae2SAndrey Konovalov 	if (!is_vmalloc_or_module_addr(start))
5925bd9bae2SAndrey Konovalov 		return;
5935bd9bae2SAndrey Konovalov 
5945bd9bae2SAndrey Konovalov 	size = round_up(size, KASAN_GRANULE_SIZE);
5955bd9bae2SAndrey Konovalov 	kasan_poison(start, size, KASAN_VMALLOC_INVALID, false);
5965bd9bae2SAndrey Konovalov }
5975bd9bae2SAndrey Konovalov 
598bb359dbcSAndrey Konovalov #else /* CONFIG_KASAN_VMALLOC */
599bb359dbcSAndrey Konovalov 
kasan_alloc_module_shadow(void * addr,size_t size,gfp_t gfp_mask)60063840de2SAndrey Konovalov int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask)
601bb359dbcSAndrey Konovalov {
602bb359dbcSAndrey Konovalov 	void *ret;
603bb359dbcSAndrey Konovalov 	size_t scaled_size;
604bb359dbcSAndrey Konovalov 	size_t shadow_size;
605bb359dbcSAndrey Konovalov 	unsigned long shadow_start;
606bb359dbcSAndrey Konovalov 
607bb359dbcSAndrey Konovalov 	shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
608bb359dbcSAndrey Konovalov 	scaled_size = (size + KASAN_GRANULE_SIZE - 1) >>
609bb359dbcSAndrey Konovalov 				KASAN_SHADOW_SCALE_SHIFT;
610bb359dbcSAndrey Konovalov 	shadow_size = round_up(scaled_size, PAGE_SIZE);
611bb359dbcSAndrey Konovalov 
612bb359dbcSAndrey Konovalov 	if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
613bb359dbcSAndrey Konovalov 		return -EINVAL;
614bb359dbcSAndrey Konovalov 
6155b301409SPatricia Alfonso 	if (IS_ENABLED(CONFIG_UML)) {
6165b301409SPatricia Alfonso 		__memset((void *)shadow_start, KASAN_SHADOW_INIT, shadow_size);
6175b301409SPatricia Alfonso 		return 0;
6185b301409SPatricia Alfonso 	}
6195b301409SPatricia Alfonso 
620bb359dbcSAndrey Konovalov 	ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
621bb359dbcSAndrey Konovalov 			shadow_start + shadow_size,
622bb359dbcSAndrey Konovalov 			GFP_KERNEL,
623bb359dbcSAndrey Konovalov 			PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
624bb359dbcSAndrey Konovalov 			__builtin_return_address(0));
625bb359dbcSAndrey Konovalov 
626bb359dbcSAndrey Konovalov 	if (ret) {
62760115fa5SKefeng Wang 		struct vm_struct *vm = find_vm_area(addr);
628bb359dbcSAndrey Konovalov 		__memset(ret, KASAN_SHADOW_INIT, shadow_size);
62960115fa5SKefeng Wang 		vm->flags |= VM_KASAN;
630bb359dbcSAndrey Konovalov 		kmemleak_ignore(ret);
63160115fa5SKefeng Wang 
63260115fa5SKefeng Wang 		if (vm->flags & VM_DEFER_KMEMLEAK)
63360115fa5SKefeng Wang 			kmemleak_vmalloc(vm, size, gfp_mask);
63460115fa5SKefeng Wang 
635bb359dbcSAndrey Konovalov 		return 0;
636bb359dbcSAndrey Konovalov 	}
637bb359dbcSAndrey Konovalov 
638bb359dbcSAndrey Konovalov 	return -ENOMEM;
639bb359dbcSAndrey Konovalov }
640bb359dbcSAndrey Konovalov 
kasan_free_module_shadow(const struct vm_struct * vm)64163840de2SAndrey Konovalov void kasan_free_module_shadow(const struct vm_struct *vm)
642bb359dbcSAndrey Konovalov {
6435b301409SPatricia Alfonso 	if (IS_ENABLED(CONFIG_UML))
6445b301409SPatricia Alfonso 		return;
6455b301409SPatricia Alfonso 
646bb359dbcSAndrey Konovalov 	if (vm->flags & VM_KASAN)
647bb359dbcSAndrey Konovalov 		vfree(kasan_mem_to_shadow(vm->addr));
648bb359dbcSAndrey Konovalov }
649bb359dbcSAndrey Konovalov 
650bb359dbcSAndrey Konovalov #endif
651