xref: /openbmc/linux/arch/x86/mm/kasan_init_64.c (revision 92a0f81d)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2be3606ffSAndrey Ryabinin #define DISABLE_BRANCH_PROFILING
385155229SAndrey Ryabinin #define pr_fmt(fmt) "kasan: " fmt
4ef7f0d6aSAndrey Ryabinin #include <linux/bootmem.h>
5ef7f0d6aSAndrey Ryabinin #include <linux/kasan.h>
6ef7f0d6aSAndrey Ryabinin #include <linux/kdebug.h>
72aeb0736SAndrey Ryabinin #include <linux/memblock.h>
8ef7f0d6aSAndrey Ryabinin #include <linux/mm.h>
9ef7f0d6aSAndrey Ryabinin #include <linux/sched.h>
109164bb4aSIngo Molnar #include <linux/sched/task.h>
11ef7f0d6aSAndrey Ryabinin #include <linux/vmalloc.h>
12ef7f0d6aSAndrey Ryabinin 
135520b7e7SIngo Molnar #include <asm/e820/types.h>
142aeb0736SAndrey Ryabinin #include <asm/pgalloc.h>
15ef7f0d6aSAndrey Ryabinin #include <asm/tlbflush.h>
16ef7f0d6aSAndrey Ryabinin #include <asm/sections.h>
17b9d05200STom Lendacky #include <asm/pgtable.h>
1892a0f81dSThomas Gleixner #include <asm/cpu_entry_area.h>
19ef7f0d6aSAndrey Ryabinin 
2008b46d5dSIngo Molnar extern struct range pfn_mapped[E820_MAX_ENTRIES];
21ef7f0d6aSAndrey Ryabinin 
2212a8cc7fSAndrey Ryabinin static p4d_t tmp_p4d_table[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
2312a8cc7fSAndrey Ryabinin 
242aeb0736SAndrey Ryabinin static __init void *early_alloc(size_t size, int nid)
252aeb0736SAndrey Ryabinin {
262aeb0736SAndrey Ryabinin 	return memblock_virt_alloc_try_nid_nopanic(size, size,
272aeb0736SAndrey Ryabinin 		__pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid);
282aeb0736SAndrey Ryabinin }
292aeb0736SAndrey Ryabinin 
302aeb0736SAndrey Ryabinin static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
312aeb0736SAndrey Ryabinin 				      unsigned long end, int nid)
322aeb0736SAndrey Ryabinin {
332aeb0736SAndrey Ryabinin 	pte_t *pte;
342aeb0736SAndrey Ryabinin 
352aeb0736SAndrey Ryabinin 	if (pmd_none(*pmd)) {
362aeb0736SAndrey Ryabinin 		void *p;
372aeb0736SAndrey Ryabinin 
382aeb0736SAndrey Ryabinin 		if (boot_cpu_has(X86_FEATURE_PSE) &&
392aeb0736SAndrey Ryabinin 		    ((end - addr) == PMD_SIZE) &&
402aeb0736SAndrey Ryabinin 		    IS_ALIGNED(addr, PMD_SIZE)) {
412aeb0736SAndrey Ryabinin 			p = early_alloc(PMD_SIZE, nid);
422aeb0736SAndrey Ryabinin 			if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL))
432aeb0736SAndrey Ryabinin 				return;
442aeb0736SAndrey Ryabinin 			else if (p)
452aeb0736SAndrey Ryabinin 				memblock_free(__pa(p), PMD_SIZE);
462aeb0736SAndrey Ryabinin 		}
472aeb0736SAndrey Ryabinin 
482aeb0736SAndrey Ryabinin 		p = early_alloc(PAGE_SIZE, nid);
492aeb0736SAndrey Ryabinin 		pmd_populate_kernel(&init_mm, pmd, p);
502aeb0736SAndrey Ryabinin 	}
512aeb0736SAndrey Ryabinin 
522aeb0736SAndrey Ryabinin 	pte = pte_offset_kernel(pmd, addr);
532aeb0736SAndrey Ryabinin 	do {
542aeb0736SAndrey Ryabinin 		pte_t entry;
552aeb0736SAndrey Ryabinin 		void *p;
562aeb0736SAndrey Ryabinin 
572aeb0736SAndrey Ryabinin 		if (!pte_none(*pte))
582aeb0736SAndrey Ryabinin 			continue;
592aeb0736SAndrey Ryabinin 
602aeb0736SAndrey Ryabinin 		p = early_alloc(PAGE_SIZE, nid);
612aeb0736SAndrey Ryabinin 		entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL);
622aeb0736SAndrey Ryabinin 		set_pte_at(&init_mm, addr, pte, entry);
632aeb0736SAndrey Ryabinin 	} while (pte++, addr += PAGE_SIZE, addr != end);
642aeb0736SAndrey Ryabinin }
652aeb0736SAndrey Ryabinin 
662aeb0736SAndrey Ryabinin static void __init kasan_populate_pud(pud_t *pud, unsigned long addr,
672aeb0736SAndrey Ryabinin 				      unsigned long end, int nid)
682aeb0736SAndrey Ryabinin {
692aeb0736SAndrey Ryabinin 	pmd_t *pmd;
702aeb0736SAndrey Ryabinin 	unsigned long next;
712aeb0736SAndrey Ryabinin 
722aeb0736SAndrey Ryabinin 	if (pud_none(*pud)) {
732aeb0736SAndrey Ryabinin 		void *p;
742aeb0736SAndrey Ryabinin 
752aeb0736SAndrey Ryabinin 		if (boot_cpu_has(X86_FEATURE_GBPAGES) &&
762aeb0736SAndrey Ryabinin 		    ((end - addr) == PUD_SIZE) &&
772aeb0736SAndrey Ryabinin 		    IS_ALIGNED(addr, PUD_SIZE)) {
782aeb0736SAndrey Ryabinin 			p = early_alloc(PUD_SIZE, nid);
792aeb0736SAndrey Ryabinin 			if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL))
802aeb0736SAndrey Ryabinin 				return;
812aeb0736SAndrey Ryabinin 			else if (p)
822aeb0736SAndrey Ryabinin 				memblock_free(__pa(p), PUD_SIZE);
832aeb0736SAndrey Ryabinin 		}
842aeb0736SAndrey Ryabinin 
852aeb0736SAndrey Ryabinin 		p = early_alloc(PAGE_SIZE, nid);
862aeb0736SAndrey Ryabinin 		pud_populate(&init_mm, pud, p);
872aeb0736SAndrey Ryabinin 	}
882aeb0736SAndrey Ryabinin 
892aeb0736SAndrey Ryabinin 	pmd = pmd_offset(pud, addr);
902aeb0736SAndrey Ryabinin 	do {
912aeb0736SAndrey Ryabinin 		next = pmd_addr_end(addr, end);
922aeb0736SAndrey Ryabinin 		if (!pmd_large(*pmd))
932aeb0736SAndrey Ryabinin 			kasan_populate_pmd(pmd, addr, next, nid);
942aeb0736SAndrey Ryabinin 	} while (pmd++, addr = next, addr != end);
952aeb0736SAndrey Ryabinin }
962aeb0736SAndrey Ryabinin 
972aeb0736SAndrey Ryabinin static void __init kasan_populate_p4d(p4d_t *p4d, unsigned long addr,
982aeb0736SAndrey Ryabinin 				      unsigned long end, int nid)
992aeb0736SAndrey Ryabinin {
1002aeb0736SAndrey Ryabinin 	pud_t *pud;
1012aeb0736SAndrey Ryabinin 	unsigned long next;
1022aeb0736SAndrey Ryabinin 
1032aeb0736SAndrey Ryabinin 	if (p4d_none(*p4d)) {
1042aeb0736SAndrey Ryabinin 		void *p = early_alloc(PAGE_SIZE, nid);
1052aeb0736SAndrey Ryabinin 
1062aeb0736SAndrey Ryabinin 		p4d_populate(&init_mm, p4d, p);
1072aeb0736SAndrey Ryabinin 	}
1082aeb0736SAndrey Ryabinin 
1092aeb0736SAndrey Ryabinin 	pud = pud_offset(p4d, addr);
1102aeb0736SAndrey Ryabinin 	do {
1112aeb0736SAndrey Ryabinin 		next = pud_addr_end(addr, end);
1122aeb0736SAndrey Ryabinin 		if (!pud_large(*pud))
1132aeb0736SAndrey Ryabinin 			kasan_populate_pud(pud, addr, next, nid);
1142aeb0736SAndrey Ryabinin 	} while (pud++, addr = next, addr != end);
1152aeb0736SAndrey Ryabinin }
1162aeb0736SAndrey Ryabinin 
1172aeb0736SAndrey Ryabinin static void __init kasan_populate_pgd(pgd_t *pgd, unsigned long addr,
1182aeb0736SAndrey Ryabinin 				      unsigned long end, int nid)
1192aeb0736SAndrey Ryabinin {
1202aeb0736SAndrey Ryabinin 	void *p;
1212aeb0736SAndrey Ryabinin 	p4d_t *p4d;
1222aeb0736SAndrey Ryabinin 	unsigned long next;
1232aeb0736SAndrey Ryabinin 
1242aeb0736SAndrey Ryabinin 	if (pgd_none(*pgd)) {
1252aeb0736SAndrey Ryabinin 		p = early_alloc(PAGE_SIZE, nid);
1262aeb0736SAndrey Ryabinin 		pgd_populate(&init_mm, pgd, p);
1272aeb0736SAndrey Ryabinin 	}
1282aeb0736SAndrey Ryabinin 
1292aeb0736SAndrey Ryabinin 	p4d = p4d_offset(pgd, addr);
1302aeb0736SAndrey Ryabinin 	do {
1312aeb0736SAndrey Ryabinin 		next = p4d_addr_end(addr, end);
1322aeb0736SAndrey Ryabinin 		kasan_populate_p4d(p4d, addr, next, nid);
1332aeb0736SAndrey Ryabinin 	} while (p4d++, addr = next, addr != end);
1342aeb0736SAndrey Ryabinin }
1352aeb0736SAndrey Ryabinin 
1362aeb0736SAndrey Ryabinin static void __init kasan_populate_shadow(unsigned long addr, unsigned long end,
1372aeb0736SAndrey Ryabinin 					 int nid)
1382aeb0736SAndrey Ryabinin {
1392aeb0736SAndrey Ryabinin 	pgd_t *pgd;
1402aeb0736SAndrey Ryabinin 	unsigned long next;
1412aeb0736SAndrey Ryabinin 
1422aeb0736SAndrey Ryabinin 	addr = addr & PAGE_MASK;
1432aeb0736SAndrey Ryabinin 	end = round_up(end, PAGE_SIZE);
1442aeb0736SAndrey Ryabinin 	pgd = pgd_offset_k(addr);
1452aeb0736SAndrey Ryabinin 	do {
1462aeb0736SAndrey Ryabinin 		next = pgd_addr_end(addr, end);
1472aeb0736SAndrey Ryabinin 		kasan_populate_pgd(pgd, addr, next, nid);
1482aeb0736SAndrey Ryabinin 	} while (pgd++, addr = next, addr != end);
1492aeb0736SAndrey Ryabinin }
1502aeb0736SAndrey Ryabinin 
1512aeb0736SAndrey Ryabinin static void __init map_range(struct range *range)
152ef7f0d6aSAndrey Ryabinin {
153ef7f0d6aSAndrey Ryabinin 	unsigned long start;
154ef7f0d6aSAndrey Ryabinin 	unsigned long end;
155ef7f0d6aSAndrey Ryabinin 
156ef7f0d6aSAndrey Ryabinin 	start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start));
157ef7f0d6aSAndrey Ryabinin 	end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end));
158ef7f0d6aSAndrey Ryabinin 
1592aeb0736SAndrey Ryabinin 	kasan_populate_shadow(start, end, early_pfn_to_nid(range->start));
160ef7f0d6aSAndrey Ryabinin }
161ef7f0d6aSAndrey Ryabinin 
162ef7f0d6aSAndrey Ryabinin static void __init clear_pgds(unsigned long start,
163ef7f0d6aSAndrey Ryabinin 			unsigned long end)
164ef7f0d6aSAndrey Ryabinin {
165d691a3cfSKirill A. Shutemov 	pgd_t *pgd;
16612a8cc7fSAndrey Ryabinin 	/* See comment in kasan_init() */
16712a8cc7fSAndrey Ryabinin 	unsigned long pgd_end = end & PGDIR_MASK;
168d691a3cfSKirill A. Shutemov 
16912a8cc7fSAndrey Ryabinin 	for (; start < pgd_end; start += PGDIR_SIZE) {
170d691a3cfSKirill A. Shutemov 		pgd = pgd_offset_k(start);
171d691a3cfSKirill A. Shutemov 		/*
172d691a3cfSKirill A. Shutemov 		 * With folded p4d, pgd_clear() is nop, use p4d_clear()
173d691a3cfSKirill A. Shutemov 		 * instead.
174d691a3cfSKirill A. Shutemov 		 */
175d691a3cfSKirill A. Shutemov 		if (CONFIG_PGTABLE_LEVELS < 5)
176d691a3cfSKirill A. Shutemov 			p4d_clear(p4d_offset(pgd, start));
177d691a3cfSKirill A. Shutemov 		else
178d691a3cfSKirill A. Shutemov 			pgd_clear(pgd);
179d691a3cfSKirill A. Shutemov 	}
18012a8cc7fSAndrey Ryabinin 
18112a8cc7fSAndrey Ryabinin 	pgd = pgd_offset_k(start);
18212a8cc7fSAndrey Ryabinin 	for (; start < end; start += P4D_SIZE)
18312a8cc7fSAndrey Ryabinin 		p4d_clear(p4d_offset(pgd, start));
18412a8cc7fSAndrey Ryabinin }
18512a8cc7fSAndrey Ryabinin 
18612a8cc7fSAndrey Ryabinin static inline p4d_t *early_p4d_offset(pgd_t *pgd, unsigned long addr)
18712a8cc7fSAndrey Ryabinin {
18812a8cc7fSAndrey Ryabinin 	unsigned long p4d;
18912a8cc7fSAndrey Ryabinin 
19012a8cc7fSAndrey Ryabinin 	if (!IS_ENABLED(CONFIG_X86_5LEVEL))
19112a8cc7fSAndrey Ryabinin 		return (p4d_t *)pgd;
19212a8cc7fSAndrey Ryabinin 
19312a8cc7fSAndrey Ryabinin 	p4d = __pa_nodebug(pgd_val(*pgd)) & PTE_PFN_MASK;
19412a8cc7fSAndrey Ryabinin 	p4d += __START_KERNEL_map - phys_base;
19512a8cc7fSAndrey Ryabinin 	return (p4d_t *)p4d + p4d_index(addr);
19612a8cc7fSAndrey Ryabinin }
19712a8cc7fSAndrey Ryabinin 
19812a8cc7fSAndrey Ryabinin static void __init kasan_early_p4d_populate(pgd_t *pgd,
19912a8cc7fSAndrey Ryabinin 		unsigned long addr,
20012a8cc7fSAndrey Ryabinin 		unsigned long end)
20112a8cc7fSAndrey Ryabinin {
20212a8cc7fSAndrey Ryabinin 	pgd_t pgd_entry;
20312a8cc7fSAndrey Ryabinin 	p4d_t *p4d, p4d_entry;
20412a8cc7fSAndrey Ryabinin 	unsigned long next;
20512a8cc7fSAndrey Ryabinin 
20612a8cc7fSAndrey Ryabinin 	if (pgd_none(*pgd)) {
20712a8cc7fSAndrey Ryabinin 		pgd_entry = __pgd(_KERNPG_TABLE | __pa_nodebug(kasan_zero_p4d));
20812a8cc7fSAndrey Ryabinin 		set_pgd(pgd, pgd_entry);
20912a8cc7fSAndrey Ryabinin 	}
21012a8cc7fSAndrey Ryabinin 
21112a8cc7fSAndrey Ryabinin 	p4d = early_p4d_offset(pgd, addr);
21212a8cc7fSAndrey Ryabinin 	do {
21312a8cc7fSAndrey Ryabinin 		next = p4d_addr_end(addr, end);
21412a8cc7fSAndrey Ryabinin 
21512a8cc7fSAndrey Ryabinin 		if (!p4d_none(*p4d))
21612a8cc7fSAndrey Ryabinin 			continue;
21712a8cc7fSAndrey Ryabinin 
21812a8cc7fSAndrey Ryabinin 		p4d_entry = __p4d(_KERNPG_TABLE | __pa_nodebug(kasan_zero_pud));
21912a8cc7fSAndrey Ryabinin 		set_p4d(p4d, p4d_entry);
22012a8cc7fSAndrey Ryabinin 	} while (p4d++, addr = next, addr != end && p4d_none(*p4d));
221ef7f0d6aSAndrey Ryabinin }
222ef7f0d6aSAndrey Ryabinin 
2235d5aa3cfSAlexander Popov static void __init kasan_map_early_shadow(pgd_t *pgd)
224ef7f0d6aSAndrey Ryabinin {
22512a8cc7fSAndrey Ryabinin 	/* See comment in kasan_init() */
22612a8cc7fSAndrey Ryabinin 	unsigned long addr = KASAN_SHADOW_START & PGDIR_MASK;
227ef7f0d6aSAndrey Ryabinin 	unsigned long end = KASAN_SHADOW_END;
22812a8cc7fSAndrey Ryabinin 	unsigned long next;
229ef7f0d6aSAndrey Ryabinin 
23012a8cc7fSAndrey Ryabinin 	pgd += pgd_index(addr);
23112a8cc7fSAndrey Ryabinin 	do {
23212a8cc7fSAndrey Ryabinin 		next = pgd_addr_end(addr, end);
23312a8cc7fSAndrey Ryabinin 		kasan_early_p4d_populate(pgd, addr, next);
23412a8cc7fSAndrey Ryabinin 	} while (pgd++, addr = next, addr != end);
235ef7f0d6aSAndrey Ryabinin }
236ef7f0d6aSAndrey Ryabinin 
237ef7f0d6aSAndrey Ryabinin #ifdef CONFIG_KASAN_INLINE
238ef7f0d6aSAndrey Ryabinin static int kasan_die_handler(struct notifier_block *self,
239ef7f0d6aSAndrey Ryabinin 			     unsigned long val,
240ef7f0d6aSAndrey Ryabinin 			     void *data)
241ef7f0d6aSAndrey Ryabinin {
242ef7f0d6aSAndrey Ryabinin 	if (val == DIE_GPF) {
2432ba78056SDmitry Vyukov 		pr_emerg("CONFIG_KASAN_INLINE enabled\n");
2442ba78056SDmitry Vyukov 		pr_emerg("GPF could be caused by NULL-ptr deref or user memory access\n");
245ef7f0d6aSAndrey Ryabinin 	}
246ef7f0d6aSAndrey Ryabinin 	return NOTIFY_OK;
247ef7f0d6aSAndrey Ryabinin }
248ef7f0d6aSAndrey Ryabinin 
249ef7f0d6aSAndrey Ryabinin static struct notifier_block kasan_die_notifier = {
250ef7f0d6aSAndrey Ryabinin 	.notifier_call = kasan_die_handler,
251ef7f0d6aSAndrey Ryabinin };
252ef7f0d6aSAndrey Ryabinin #endif
253ef7f0d6aSAndrey Ryabinin 
2545d5aa3cfSAlexander Popov void __init kasan_early_init(void)
2555d5aa3cfSAlexander Popov {
2565d5aa3cfSAlexander Popov 	int i;
25721729f81STom Lendacky 	pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL | _PAGE_ENC;
2585d5aa3cfSAlexander Popov 	pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;
2595d5aa3cfSAlexander Popov 	pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
2605480bb61SKirill A. Shutemov 	p4dval_t p4d_val = __pa_nodebug(kasan_zero_pud) | _KERNPG_TABLE;
2615d5aa3cfSAlexander Popov 
2625d5aa3cfSAlexander Popov 	for (i = 0; i < PTRS_PER_PTE; i++)
2635d5aa3cfSAlexander Popov 		kasan_zero_pte[i] = __pte(pte_val);
2645d5aa3cfSAlexander Popov 
2655d5aa3cfSAlexander Popov 	for (i = 0; i < PTRS_PER_PMD; i++)
2665d5aa3cfSAlexander Popov 		kasan_zero_pmd[i] = __pmd(pmd_val);
2675d5aa3cfSAlexander Popov 
2685d5aa3cfSAlexander Popov 	for (i = 0; i < PTRS_PER_PUD; i++)
2695d5aa3cfSAlexander Popov 		kasan_zero_pud[i] = __pud(pud_val);
2705d5aa3cfSAlexander Popov 
27112a8cc7fSAndrey Ryabinin 	for (i = 0; IS_ENABLED(CONFIG_X86_5LEVEL) && i < PTRS_PER_P4D; i++)
2725480bb61SKirill A. Shutemov 		kasan_zero_p4d[i] = __p4d(p4d_val);
2735480bb61SKirill A. Shutemov 
27465ade2f8SKirill A. Shutemov 	kasan_map_early_shadow(early_top_pgt);
27565ade2f8SKirill A. Shutemov 	kasan_map_early_shadow(init_top_pgt);
2765d5aa3cfSAlexander Popov }
2775d5aa3cfSAlexander Popov 
278ef7f0d6aSAndrey Ryabinin void __init kasan_init(void)
279ef7f0d6aSAndrey Ryabinin {
280ef7f0d6aSAndrey Ryabinin 	int i;
28121506525SAndy Lutomirski 	void *shadow_cpu_entry_begin, *shadow_cpu_entry_end;
282ef7f0d6aSAndrey Ryabinin 
283ef7f0d6aSAndrey Ryabinin #ifdef CONFIG_KASAN_INLINE
284ef7f0d6aSAndrey Ryabinin 	register_die_notifier(&kasan_die_notifier);
285ef7f0d6aSAndrey Ryabinin #endif
286ef7f0d6aSAndrey Ryabinin 
28765ade2f8SKirill A. Shutemov 	memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt));
28812a8cc7fSAndrey Ryabinin 
28912a8cc7fSAndrey Ryabinin 	/*
29012a8cc7fSAndrey Ryabinin 	 * We use the same shadow offset for 4- and 5-level paging to
29112a8cc7fSAndrey Ryabinin 	 * facilitate boot-time switching between paging modes.
29212a8cc7fSAndrey Ryabinin 	 * As result in 5-level paging mode KASAN_SHADOW_START and
29312a8cc7fSAndrey Ryabinin 	 * KASAN_SHADOW_END are not aligned to PGD boundary.
29412a8cc7fSAndrey Ryabinin 	 *
29512a8cc7fSAndrey Ryabinin 	 * KASAN_SHADOW_START doesn't share PGD with anything else.
29612a8cc7fSAndrey Ryabinin 	 * We claim whole PGD entry to make things easier.
29712a8cc7fSAndrey Ryabinin 	 *
29812a8cc7fSAndrey Ryabinin 	 * KASAN_SHADOW_END lands in the last PGD entry and it collides with
29912a8cc7fSAndrey Ryabinin 	 * bunch of things like kernel code, modules, EFI mapping, etc.
30012a8cc7fSAndrey Ryabinin 	 * We need to take extra steps to not overwrite them.
30112a8cc7fSAndrey Ryabinin 	 */
30212a8cc7fSAndrey Ryabinin 	if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
30312a8cc7fSAndrey Ryabinin 		void *ptr;
30412a8cc7fSAndrey Ryabinin 
30512a8cc7fSAndrey Ryabinin 		ptr = (void *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END));
30612a8cc7fSAndrey Ryabinin 		memcpy(tmp_p4d_table, (void *)ptr, sizeof(tmp_p4d_table));
30712a8cc7fSAndrey Ryabinin 		set_pgd(&early_top_pgt[pgd_index(KASAN_SHADOW_END)],
30812a8cc7fSAndrey Ryabinin 				__pgd(__pa(tmp_p4d_table) | _KERNPG_TABLE));
30912a8cc7fSAndrey Ryabinin 	}
31012a8cc7fSAndrey Ryabinin 
31165ade2f8SKirill A. Shutemov 	load_cr3(early_top_pgt);
312241d2c54SAndrey Ryabinin 	__flush_tlb_all();
313ef7f0d6aSAndrey Ryabinin 
31412a8cc7fSAndrey Ryabinin 	clear_pgds(KASAN_SHADOW_START & PGDIR_MASK, KASAN_SHADOW_END);
315ef7f0d6aSAndrey Ryabinin 
31612a8cc7fSAndrey Ryabinin 	kasan_populate_zero_shadow((void *)(KASAN_SHADOW_START & PGDIR_MASK),
317ef7f0d6aSAndrey Ryabinin 			kasan_mem_to_shadow((void *)PAGE_OFFSET));
318ef7f0d6aSAndrey Ryabinin 
31908b46d5dSIngo Molnar 	for (i = 0; i < E820_MAX_ENTRIES; i++) {
320ef7f0d6aSAndrey Ryabinin 		if (pfn_mapped[i].end == 0)
321ef7f0d6aSAndrey Ryabinin 			break;
322ef7f0d6aSAndrey Ryabinin 
3232aeb0736SAndrey Ryabinin 		map_range(&pfn_mapped[i]);
324ef7f0d6aSAndrey Ryabinin 	}
3252aeb0736SAndrey Ryabinin 
32692a0f81dSThomas Gleixner 	shadow_cpu_entry_begin = (void *)CPU_ENTRY_AREA_BASE;
32792a0f81dSThomas Gleixner 	shadow_cpu_entry_begin = kasan_mem_to_shadow(shadow_cpu_entry_begin);
32892a0f81dSThomas Gleixner 	shadow_cpu_entry_begin = (void *)round_down((unsigned long)shadow_cpu_entry_begin,
32992a0f81dSThomas Gleixner 						PAGE_SIZE);
33092a0f81dSThomas Gleixner 
33192a0f81dSThomas Gleixner 	shadow_cpu_entry_end = (void *)(CPU_ENTRY_AREA_BASE +
33292a0f81dSThomas Gleixner 					CPU_ENTRY_AREA_MAP_SIZE);
33392a0f81dSThomas Gleixner 	shadow_cpu_entry_end = kasan_mem_to_shadow(shadow_cpu_entry_end);
33492a0f81dSThomas Gleixner 	shadow_cpu_entry_end = (void *)round_up((unsigned long)shadow_cpu_entry_end,
33592a0f81dSThomas Gleixner 					PAGE_SIZE);
33692a0f81dSThomas Gleixner 
33769786cdbSAndrey Ryabinin 	kasan_populate_zero_shadow(
33869786cdbSAndrey Ryabinin 		kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
33992a0f81dSThomas Gleixner 		shadow_cpu_entry_begin);
34092a0f81dSThomas Gleixner 
34192a0f81dSThomas Gleixner 	kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin,
34292a0f81dSThomas Gleixner 			      (unsigned long)shadow_cpu_entry_end, 0);
34392a0f81dSThomas Gleixner 
34492a0f81dSThomas Gleixner 	kasan_populate_zero_shadow(shadow_cpu_entry_end,
345c420f167SAndrey Ryabinin 				kasan_mem_to_shadow((void *)__START_KERNEL_map));
346c420f167SAndrey Ryabinin 
3472aeb0736SAndrey Ryabinin 	kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext),
348c420f167SAndrey Ryabinin 			      (unsigned long)kasan_mem_to_shadow(_end),
3492aeb0736SAndrey Ryabinin 			      early_pfn_to_nid(__pa(_stext)));
350c420f167SAndrey Ryabinin 
35169786cdbSAndrey Ryabinin 	kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
35292a0f81dSThomas Gleixner 				(void *)KASAN_SHADOW_END);
353ef7f0d6aSAndrey Ryabinin 
35465ade2f8SKirill A. Shutemov 	load_cr3(init_top_pgt);
355241d2c54SAndrey Ryabinin 	__flush_tlb_all();
35685155229SAndrey Ryabinin 
35769e0210fSAndrey Ryabinin 	/*
35869e0210fSAndrey Ryabinin 	 * kasan_zero_page has been used as early shadow memory, thus it may
359063fb3e5SAndrey Ryabinin 	 * contain some garbage. Now we can clear and write protect it, since
360063fb3e5SAndrey Ryabinin 	 * after the TLB flush no one should write to it.
36169e0210fSAndrey Ryabinin 	 */
36269e0210fSAndrey Ryabinin 	memset(kasan_zero_page, 0, PAGE_SIZE);
363063fb3e5SAndrey Ryabinin 	for (i = 0; i < PTRS_PER_PTE; i++) {
36421729f81STom Lendacky 		pte_t pte = __pte(__pa(kasan_zero_page) | __PAGE_KERNEL_RO | _PAGE_ENC);
365063fb3e5SAndrey Ryabinin 		set_pte(&kasan_zero_pte[i], pte);
366063fb3e5SAndrey Ryabinin 	}
367063fb3e5SAndrey Ryabinin 	/* Flush TLBs again to be sure that write protection applied. */
368063fb3e5SAndrey Ryabinin 	__flush_tlb_all();
36969e0210fSAndrey Ryabinin 
37069e0210fSAndrey Ryabinin 	init_task.kasan_depth = 0;
37125add7ecSAndrey Konovalov 	pr_info("KernelAddressSanitizer initialized\n");
372ef7f0d6aSAndrey Ryabinin }
373