xref: /openbmc/linux/arch/x86/mm/kasan_init_64.c (revision 907835e6)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2be3606ffSAndrey Ryabinin #define DISABLE_BRANCH_PROFILING
385155229SAndrey Ryabinin #define pr_fmt(fmt) "kasan: " fmt
439b95522SKirill A. Shutemov 
5ad3fe525SKirill A. Shutemov /* cpu_feature_enabled() cannot be used this early */
6ad3fe525SKirill A. Shutemov #define USE_EARLY_PGTABLE_L5
739b95522SKirill A. Shutemov 
857c8a661SMike Rapoport #include <linux/memblock.h>
9ef7f0d6aSAndrey Ryabinin #include <linux/kasan.h>
10ef7f0d6aSAndrey Ryabinin #include <linux/kdebug.h>
11ef7f0d6aSAndrey Ryabinin #include <linux/mm.h>
12ef7f0d6aSAndrey Ryabinin #include <linux/sched.h>
139164bb4aSIngo Molnar #include <linux/sched/task.h>
14ef7f0d6aSAndrey Ryabinin #include <linux/vmalloc.h>
15ef7f0d6aSAndrey Ryabinin 
165520b7e7SIngo Molnar #include <asm/e820/types.h>
172aeb0736SAndrey Ryabinin #include <asm/pgalloc.h>
18ef7f0d6aSAndrey Ryabinin #include <asm/tlbflush.h>
19ef7f0d6aSAndrey Ryabinin #include <asm/sections.h>
2092a0f81dSThomas Gleixner #include <asm/cpu_entry_area.h>
21ef7f0d6aSAndrey Ryabinin 
2208b46d5dSIngo Molnar extern struct range pfn_mapped[E820_MAX_ENTRIES];
23ef7f0d6aSAndrey Ryabinin 
24c65e774fSKirill A. Shutemov static p4d_t tmp_p4d_table[MAX_PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
2512a8cc7fSAndrey Ryabinin 
early_alloc(size_t size,int nid,bool should_panic)2626fb3daeSMike Rapoport static __init void *early_alloc(size_t size, int nid, bool should_panic)
272aeb0736SAndrey Ryabinin {
2826fb3daeSMike Rapoport 	void *ptr = memblock_alloc_try_nid(size, size,
2997ad1087SMike Rapoport 			__pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid);
3026fb3daeSMike Rapoport 
3126fb3daeSMike Rapoport 	if (!ptr && should_panic)
3226fb3daeSMike Rapoport 		panic("%pS: Failed to allocate page, nid=%d from=%lx\n",
3326fb3daeSMike Rapoport 		      (void *)_RET_IP_, nid, __pa(MAX_DMA_ADDRESS));
3426fb3daeSMike Rapoport 
3526fb3daeSMike Rapoport 	return ptr;
362aeb0736SAndrey Ryabinin }
372aeb0736SAndrey Ryabinin 
kasan_populate_pmd(pmd_t * pmd,unsigned long addr,unsigned long end,int nid)382aeb0736SAndrey Ryabinin static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
392aeb0736SAndrey Ryabinin 				      unsigned long end, int nid)
402aeb0736SAndrey Ryabinin {
412aeb0736SAndrey Ryabinin 	pte_t *pte;
422aeb0736SAndrey Ryabinin 
432aeb0736SAndrey Ryabinin 	if (pmd_none(*pmd)) {
442aeb0736SAndrey Ryabinin 		void *p;
452aeb0736SAndrey Ryabinin 
462aeb0736SAndrey Ryabinin 		if (boot_cpu_has(X86_FEATURE_PSE) &&
472aeb0736SAndrey Ryabinin 		    ((end - addr) == PMD_SIZE) &&
482aeb0736SAndrey Ryabinin 		    IS_ALIGNED(addr, PMD_SIZE)) {
490d39e266SAndrey Ryabinin 			p = early_alloc(PMD_SIZE, nid, false);
502aeb0736SAndrey Ryabinin 			if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL))
512aeb0736SAndrey Ryabinin 				return;
524421cca0SMike Rapoport 			memblock_free(p, PMD_SIZE);
532aeb0736SAndrey Ryabinin 		}
542aeb0736SAndrey Ryabinin 
550d39e266SAndrey Ryabinin 		p = early_alloc(PAGE_SIZE, nid, true);
562aeb0736SAndrey Ryabinin 		pmd_populate_kernel(&init_mm, pmd, p);
572aeb0736SAndrey Ryabinin 	}
582aeb0736SAndrey Ryabinin 
592aeb0736SAndrey Ryabinin 	pte = pte_offset_kernel(pmd, addr);
602aeb0736SAndrey Ryabinin 	do {
612aeb0736SAndrey Ryabinin 		pte_t entry;
622aeb0736SAndrey Ryabinin 		void *p;
632aeb0736SAndrey Ryabinin 
642aeb0736SAndrey Ryabinin 		if (!pte_none(*pte))
652aeb0736SAndrey Ryabinin 			continue;
662aeb0736SAndrey Ryabinin 
670d39e266SAndrey Ryabinin 		p = early_alloc(PAGE_SIZE, nid, true);
682aeb0736SAndrey Ryabinin 		entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL);
692aeb0736SAndrey Ryabinin 		set_pte_at(&init_mm, addr, pte, entry);
702aeb0736SAndrey Ryabinin 	} while (pte++, addr += PAGE_SIZE, addr != end);
712aeb0736SAndrey Ryabinin }
722aeb0736SAndrey Ryabinin 
kasan_populate_pud(pud_t * pud,unsigned long addr,unsigned long end,int nid)732aeb0736SAndrey Ryabinin static void __init kasan_populate_pud(pud_t *pud, unsigned long addr,
742aeb0736SAndrey Ryabinin 				      unsigned long end, int nid)
752aeb0736SAndrey Ryabinin {
762aeb0736SAndrey Ryabinin 	pmd_t *pmd;
772aeb0736SAndrey Ryabinin 	unsigned long next;
782aeb0736SAndrey Ryabinin 
792aeb0736SAndrey Ryabinin 	if (pud_none(*pud)) {
802aeb0736SAndrey Ryabinin 		void *p;
812aeb0736SAndrey Ryabinin 
822aeb0736SAndrey Ryabinin 		if (boot_cpu_has(X86_FEATURE_GBPAGES) &&
832aeb0736SAndrey Ryabinin 		    ((end - addr) == PUD_SIZE) &&
842aeb0736SAndrey Ryabinin 		    IS_ALIGNED(addr, PUD_SIZE)) {
850d39e266SAndrey Ryabinin 			p = early_alloc(PUD_SIZE, nid, false);
862aeb0736SAndrey Ryabinin 			if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL))
872aeb0736SAndrey Ryabinin 				return;
884421cca0SMike Rapoport 			memblock_free(p, PUD_SIZE);
892aeb0736SAndrey Ryabinin 		}
902aeb0736SAndrey Ryabinin 
910d39e266SAndrey Ryabinin 		p = early_alloc(PAGE_SIZE, nid, true);
922aeb0736SAndrey Ryabinin 		pud_populate(&init_mm, pud, p);
932aeb0736SAndrey Ryabinin 	}
942aeb0736SAndrey Ryabinin 
952aeb0736SAndrey Ryabinin 	pmd = pmd_offset(pud, addr);
962aeb0736SAndrey Ryabinin 	do {
972aeb0736SAndrey Ryabinin 		next = pmd_addr_end(addr, end);
982aeb0736SAndrey Ryabinin 		if (!pmd_large(*pmd))
992aeb0736SAndrey Ryabinin 			kasan_populate_pmd(pmd, addr, next, nid);
1002aeb0736SAndrey Ryabinin 	} while (pmd++, addr = next, addr != end);
1012aeb0736SAndrey Ryabinin }
1022aeb0736SAndrey Ryabinin 
kasan_populate_p4d(p4d_t * p4d,unsigned long addr,unsigned long end,int nid)1032aeb0736SAndrey Ryabinin static void __init kasan_populate_p4d(p4d_t *p4d, unsigned long addr,
1042aeb0736SAndrey Ryabinin 				      unsigned long end, int nid)
1052aeb0736SAndrey Ryabinin {
1062aeb0736SAndrey Ryabinin 	pud_t *pud;
1072aeb0736SAndrey Ryabinin 	unsigned long next;
1082aeb0736SAndrey Ryabinin 
1092aeb0736SAndrey Ryabinin 	if (p4d_none(*p4d)) {
1100d39e266SAndrey Ryabinin 		void *p = early_alloc(PAGE_SIZE, nid, true);
1112aeb0736SAndrey Ryabinin 
1122aeb0736SAndrey Ryabinin 		p4d_populate(&init_mm, p4d, p);
1132aeb0736SAndrey Ryabinin 	}
1142aeb0736SAndrey Ryabinin 
1152aeb0736SAndrey Ryabinin 	pud = pud_offset(p4d, addr);
1162aeb0736SAndrey Ryabinin 	do {
1172aeb0736SAndrey Ryabinin 		next = pud_addr_end(addr, end);
118*907835e6SPeter Xu 		if (!pud_leaf(*pud))
1192aeb0736SAndrey Ryabinin 			kasan_populate_pud(pud, addr, next, nid);
1202aeb0736SAndrey Ryabinin 	} while (pud++, addr = next, addr != end);
1212aeb0736SAndrey Ryabinin }
1222aeb0736SAndrey Ryabinin 
kasan_populate_pgd(pgd_t * pgd,unsigned long addr,unsigned long end,int nid)1232aeb0736SAndrey Ryabinin static void __init kasan_populate_pgd(pgd_t *pgd, unsigned long addr,
1242aeb0736SAndrey Ryabinin 				      unsigned long end, int nid)
1252aeb0736SAndrey Ryabinin {
1262aeb0736SAndrey Ryabinin 	void *p;
1272aeb0736SAndrey Ryabinin 	p4d_t *p4d;
1282aeb0736SAndrey Ryabinin 	unsigned long next;
1292aeb0736SAndrey Ryabinin 
1302aeb0736SAndrey Ryabinin 	if (pgd_none(*pgd)) {
1310d39e266SAndrey Ryabinin 		p = early_alloc(PAGE_SIZE, nid, true);
1322aeb0736SAndrey Ryabinin 		pgd_populate(&init_mm, pgd, p);
1332aeb0736SAndrey Ryabinin 	}
1342aeb0736SAndrey Ryabinin 
1352aeb0736SAndrey Ryabinin 	p4d = p4d_offset(pgd, addr);
1362aeb0736SAndrey Ryabinin 	do {
1372aeb0736SAndrey Ryabinin 		next = p4d_addr_end(addr, end);
1382aeb0736SAndrey Ryabinin 		kasan_populate_p4d(p4d, addr, next, nid);
1392aeb0736SAndrey Ryabinin 	} while (p4d++, addr = next, addr != end);
1402aeb0736SAndrey Ryabinin }
1412aeb0736SAndrey Ryabinin 
kasan_populate_shadow(unsigned long addr,unsigned long end,int nid)1422aeb0736SAndrey Ryabinin static void __init kasan_populate_shadow(unsigned long addr, unsigned long end,
1432aeb0736SAndrey Ryabinin 					 int nid)
1442aeb0736SAndrey Ryabinin {
1452aeb0736SAndrey Ryabinin 	pgd_t *pgd;
1462aeb0736SAndrey Ryabinin 	unsigned long next;
1472aeb0736SAndrey Ryabinin 
1482aeb0736SAndrey Ryabinin 	addr = addr & PAGE_MASK;
1492aeb0736SAndrey Ryabinin 	end = round_up(end, PAGE_SIZE);
1502aeb0736SAndrey Ryabinin 	pgd = pgd_offset_k(addr);
1512aeb0736SAndrey Ryabinin 	do {
1522aeb0736SAndrey Ryabinin 		next = pgd_addr_end(addr, end);
1532aeb0736SAndrey Ryabinin 		kasan_populate_pgd(pgd, addr, next, nid);
1542aeb0736SAndrey Ryabinin 	} while (pgd++, addr = next, addr != end);
1552aeb0736SAndrey Ryabinin }
1562aeb0736SAndrey Ryabinin 
map_range(struct range * range)1572aeb0736SAndrey Ryabinin static void __init map_range(struct range *range)
158ef7f0d6aSAndrey Ryabinin {
159ef7f0d6aSAndrey Ryabinin 	unsigned long start;
160ef7f0d6aSAndrey Ryabinin 	unsigned long end;
161ef7f0d6aSAndrey Ryabinin 
162ef7f0d6aSAndrey Ryabinin 	start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start));
163ef7f0d6aSAndrey Ryabinin 	end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end));
164ef7f0d6aSAndrey Ryabinin 
1652aeb0736SAndrey Ryabinin 	kasan_populate_shadow(start, end, early_pfn_to_nid(range->start));
166ef7f0d6aSAndrey Ryabinin }
167ef7f0d6aSAndrey Ryabinin 
clear_pgds(unsigned long start,unsigned long end)168ef7f0d6aSAndrey Ryabinin static void __init clear_pgds(unsigned long start,
169ef7f0d6aSAndrey Ryabinin 			unsigned long end)
170ef7f0d6aSAndrey Ryabinin {
171d691a3cfSKirill A. Shutemov 	pgd_t *pgd;
17212a8cc7fSAndrey Ryabinin 	/* See comment in kasan_init() */
17312a8cc7fSAndrey Ryabinin 	unsigned long pgd_end = end & PGDIR_MASK;
174d691a3cfSKirill A. Shutemov 
17512a8cc7fSAndrey Ryabinin 	for (; start < pgd_end; start += PGDIR_SIZE) {
176d691a3cfSKirill A. Shutemov 		pgd = pgd_offset_k(start);
177d691a3cfSKirill A. Shutemov 		/*
178d691a3cfSKirill A. Shutemov 		 * With folded p4d, pgd_clear() is nop, use p4d_clear()
179d691a3cfSKirill A. Shutemov 		 * instead.
180d691a3cfSKirill A. Shutemov 		 */
181ed7588d5SKirill A. Shutemov 		if (pgtable_l5_enabled())
182d691a3cfSKirill A. Shutemov 			pgd_clear(pgd);
18391f606a8SKirill A. Shutemov 		else
18491f606a8SKirill A. Shutemov 			p4d_clear(p4d_offset(pgd, start));
185d691a3cfSKirill A. Shutemov 	}
18612a8cc7fSAndrey Ryabinin 
18712a8cc7fSAndrey Ryabinin 	pgd = pgd_offset_k(start);
18812a8cc7fSAndrey Ryabinin 	for (; start < end; start += P4D_SIZE)
18912a8cc7fSAndrey Ryabinin 		p4d_clear(p4d_offset(pgd, start));
19012a8cc7fSAndrey Ryabinin }
19112a8cc7fSAndrey Ryabinin 
early_p4d_offset(pgd_t * pgd,unsigned long addr)19212a8cc7fSAndrey Ryabinin static inline p4d_t *early_p4d_offset(pgd_t *pgd, unsigned long addr)
19312a8cc7fSAndrey Ryabinin {
19412a8cc7fSAndrey Ryabinin 	unsigned long p4d;
19512a8cc7fSAndrey Ryabinin 
196ed7588d5SKirill A. Shutemov 	if (!pgtable_l5_enabled())
19712a8cc7fSAndrey Ryabinin 		return (p4d_t *)pgd;
19812a8cc7fSAndrey Ryabinin 
199f3176ec9SAndrey Ryabinin 	p4d = pgd_val(*pgd) & PTE_PFN_MASK;
20012a8cc7fSAndrey Ryabinin 	p4d += __START_KERNEL_map - phys_base;
20112a8cc7fSAndrey Ryabinin 	return (p4d_t *)p4d + p4d_index(addr);
20212a8cc7fSAndrey Ryabinin }
20312a8cc7fSAndrey Ryabinin 
kasan_early_p4d_populate(pgd_t * pgd,unsigned long addr,unsigned long end)20412a8cc7fSAndrey Ryabinin static void __init kasan_early_p4d_populate(pgd_t *pgd,
20512a8cc7fSAndrey Ryabinin 		unsigned long addr,
20612a8cc7fSAndrey Ryabinin 		unsigned long end)
20712a8cc7fSAndrey Ryabinin {
20812a8cc7fSAndrey Ryabinin 	pgd_t pgd_entry;
20912a8cc7fSAndrey Ryabinin 	p4d_t *p4d, p4d_entry;
21012a8cc7fSAndrey Ryabinin 	unsigned long next;
21112a8cc7fSAndrey Ryabinin 
21212a8cc7fSAndrey Ryabinin 	if (pgd_none(*pgd)) {
2139577dd74SAndrey Konovalov 		pgd_entry = __pgd(_KERNPG_TABLE |
2149577dd74SAndrey Konovalov 					__pa_nodebug(kasan_early_shadow_p4d));
21512a8cc7fSAndrey Ryabinin 		set_pgd(pgd, pgd_entry);
21612a8cc7fSAndrey Ryabinin 	}
21712a8cc7fSAndrey Ryabinin 
21812a8cc7fSAndrey Ryabinin 	p4d = early_p4d_offset(pgd, addr);
21912a8cc7fSAndrey Ryabinin 	do {
22012a8cc7fSAndrey Ryabinin 		next = p4d_addr_end(addr, end);
22112a8cc7fSAndrey Ryabinin 
22212a8cc7fSAndrey Ryabinin 		if (!p4d_none(*p4d))
22312a8cc7fSAndrey Ryabinin 			continue;
22412a8cc7fSAndrey Ryabinin 
2259577dd74SAndrey Konovalov 		p4d_entry = __p4d(_KERNPG_TABLE |
2269577dd74SAndrey Konovalov 					__pa_nodebug(kasan_early_shadow_pud));
22712a8cc7fSAndrey Ryabinin 		set_p4d(p4d, p4d_entry);
22812a8cc7fSAndrey Ryabinin 	} while (p4d++, addr = next, addr != end && p4d_none(*p4d));
229ef7f0d6aSAndrey Ryabinin }
230ef7f0d6aSAndrey Ryabinin 
kasan_map_early_shadow(pgd_t * pgd)2315d5aa3cfSAlexander Popov static void __init kasan_map_early_shadow(pgd_t *pgd)
232ef7f0d6aSAndrey Ryabinin {
23312a8cc7fSAndrey Ryabinin 	/* See comment in kasan_init() */
23412a8cc7fSAndrey Ryabinin 	unsigned long addr = KASAN_SHADOW_START & PGDIR_MASK;
235ef7f0d6aSAndrey Ryabinin 	unsigned long end = KASAN_SHADOW_END;
23612a8cc7fSAndrey Ryabinin 	unsigned long next;
237ef7f0d6aSAndrey Ryabinin 
23812a8cc7fSAndrey Ryabinin 	pgd += pgd_index(addr);
23912a8cc7fSAndrey Ryabinin 	do {
24012a8cc7fSAndrey Ryabinin 		next = pgd_addr_end(addr, end);
24112a8cc7fSAndrey Ryabinin 		kasan_early_p4d_populate(pgd, addr, next);
24212a8cc7fSAndrey Ryabinin 	} while (pgd++, addr = next, addr != end);
243ef7f0d6aSAndrey Ryabinin }
244ef7f0d6aSAndrey Ryabinin 
kasan_shallow_populate_p4ds(pgd_t * pgd,unsigned long addr,unsigned long end)2450609ae01SDaniel Axtens static void __init kasan_shallow_populate_p4ds(pgd_t *pgd,
2460609ae01SDaniel Axtens 					       unsigned long addr,
2470609ae01SDaniel Axtens 					       unsigned long end)
2480609ae01SDaniel Axtens {
2490609ae01SDaniel Axtens 	p4d_t *p4d;
2500609ae01SDaniel Axtens 	unsigned long next;
2510609ae01SDaniel Axtens 	void *p;
2520609ae01SDaniel Axtens 
2530609ae01SDaniel Axtens 	p4d = p4d_offset(pgd, addr);
2540609ae01SDaniel Axtens 	do {
2550609ae01SDaniel Axtens 		next = p4d_addr_end(addr, end);
2560609ae01SDaniel Axtens 
2570609ae01SDaniel Axtens 		if (p4d_none(*p4d)) {
2580609ae01SDaniel Axtens 			p = early_alloc(PAGE_SIZE, NUMA_NO_NODE, true);
2590609ae01SDaniel Axtens 			p4d_populate(&init_mm, p4d, p);
2600609ae01SDaniel Axtens 		}
2610609ae01SDaniel Axtens 	} while (p4d++, addr = next, addr != end);
2620609ae01SDaniel Axtens }
2630609ae01SDaniel Axtens 
kasan_shallow_populate_pgds(void * start,void * end)2640609ae01SDaniel Axtens static void __init kasan_shallow_populate_pgds(void *start, void *end)
2650609ae01SDaniel Axtens {
2660609ae01SDaniel Axtens 	unsigned long addr, next;
2670609ae01SDaniel Axtens 	pgd_t *pgd;
2680609ae01SDaniel Axtens 	void *p;
2690609ae01SDaniel Axtens 
2700609ae01SDaniel Axtens 	addr = (unsigned long)start;
2710609ae01SDaniel Axtens 	pgd = pgd_offset_k(addr);
2720609ae01SDaniel Axtens 	do {
2730609ae01SDaniel Axtens 		next = pgd_addr_end(addr, (unsigned long)end);
2740609ae01SDaniel Axtens 
2750609ae01SDaniel Axtens 		if (pgd_none(*pgd)) {
2760609ae01SDaniel Axtens 			p = early_alloc(PAGE_SIZE, NUMA_NO_NODE, true);
2770609ae01SDaniel Axtens 			pgd_populate(&init_mm, pgd, p);
2780609ae01SDaniel Axtens 		}
2790609ae01SDaniel Axtens 
2800609ae01SDaniel Axtens 		/*
2810609ae01SDaniel Axtens 		 * we need to populate p4ds to be synced when running in
2820609ae01SDaniel Axtens 		 * four level mode - see sync_global_pgds_l4()
2830609ae01SDaniel Axtens 		 */
2840609ae01SDaniel Axtens 		kasan_shallow_populate_p4ds(pgd, addr, next);
2850609ae01SDaniel Axtens 	} while (pgd++, addr = next, addr != (unsigned long)end);
2860609ae01SDaniel Axtens }
2870609ae01SDaniel Axtens 
kasan_early_init(void)2885d5aa3cfSAlexander Popov void __init kasan_early_init(void)
2895d5aa3cfSAlexander Popov {
2905d5aa3cfSAlexander Popov 	int i;
2919577dd74SAndrey Konovalov 	pteval_t pte_val = __pa_nodebug(kasan_early_shadow_page) |
2929577dd74SAndrey Konovalov 				__PAGE_KERNEL | _PAGE_ENC;
2939577dd74SAndrey Konovalov 	pmdval_t pmd_val = __pa_nodebug(kasan_early_shadow_pte) | _KERNPG_TABLE;
2949577dd74SAndrey Konovalov 	pudval_t pud_val = __pa_nodebug(kasan_early_shadow_pmd) | _KERNPG_TABLE;
2959577dd74SAndrey Konovalov 	p4dval_t p4d_val = __pa_nodebug(kasan_early_shadow_pud) | _KERNPG_TABLE;
2965d5aa3cfSAlexander Popov 
297fb43d6cbSDave Hansen 	/* Mask out unsupported __PAGE_KERNEL bits: */
298fb43d6cbSDave Hansen 	pte_val &= __default_kernel_pte_mask;
299fb43d6cbSDave Hansen 	pmd_val &= __default_kernel_pte_mask;
300fb43d6cbSDave Hansen 	pud_val &= __default_kernel_pte_mask;
301fb43d6cbSDave Hansen 	p4d_val &= __default_kernel_pte_mask;
302fb43d6cbSDave Hansen 
3035d5aa3cfSAlexander Popov 	for (i = 0; i < PTRS_PER_PTE; i++)
3049577dd74SAndrey Konovalov 		kasan_early_shadow_pte[i] = __pte(pte_val);
3055d5aa3cfSAlexander Popov 
3065d5aa3cfSAlexander Popov 	for (i = 0; i < PTRS_PER_PMD; i++)
3079577dd74SAndrey Konovalov 		kasan_early_shadow_pmd[i] = __pmd(pmd_val);
3085d5aa3cfSAlexander Popov 
3095d5aa3cfSAlexander Popov 	for (i = 0; i < PTRS_PER_PUD; i++)
3109577dd74SAndrey Konovalov 		kasan_early_shadow_pud[i] = __pud(pud_val);
3115d5aa3cfSAlexander Popov 
312ed7588d5SKirill A. Shutemov 	for (i = 0; pgtable_l5_enabled() && i < PTRS_PER_P4D; i++)
3139577dd74SAndrey Konovalov 		kasan_early_shadow_p4d[i] = __p4d(p4d_val);
3145480bb61SKirill A. Shutemov 
31565ade2f8SKirill A. Shutemov 	kasan_map_early_shadow(early_top_pgt);
31665ade2f8SKirill A. Shutemov 	kasan_map_early_shadow(init_top_pgt);
3175d5aa3cfSAlexander Popov }
3185d5aa3cfSAlexander Popov 
kasan_mem_to_shadow_align_down(unsigned long va)319bde258d9SSean Christopherson static unsigned long kasan_mem_to_shadow_align_down(unsigned long va)
320bde258d9SSean Christopherson {
321bde258d9SSean Christopherson 	unsigned long shadow = (unsigned long)kasan_mem_to_shadow((void *)va);
322bde258d9SSean Christopherson 
323bde258d9SSean Christopherson 	return round_down(shadow, PAGE_SIZE);
324bde258d9SSean Christopherson }
325bde258d9SSean Christopherson 
kasan_mem_to_shadow_align_up(unsigned long va)326bde258d9SSean Christopherson static unsigned long kasan_mem_to_shadow_align_up(unsigned long va)
327bde258d9SSean Christopherson {
328bde258d9SSean Christopherson 	unsigned long shadow = (unsigned long)kasan_mem_to_shadow((void *)va);
329bde258d9SSean Christopherson 
330bde258d9SSean Christopherson 	return round_up(shadow, PAGE_SIZE);
331bde258d9SSean Christopherson }
332bde258d9SSean Christopherson 
kasan_populate_shadow_for_vaddr(void * va,size_t size,int nid)3333f148f33SAndrey Ryabinin void __init kasan_populate_shadow_for_vaddr(void *va, size_t size, int nid)
3343f148f33SAndrey Ryabinin {
3353f148f33SAndrey Ryabinin 	unsigned long shadow_start, shadow_end;
3363f148f33SAndrey Ryabinin 
337bde258d9SSean Christopherson 	shadow_start = kasan_mem_to_shadow_align_down((unsigned long)va);
338bde258d9SSean Christopherson 	shadow_end = kasan_mem_to_shadow_align_up((unsigned long)va + size);
3393f148f33SAndrey Ryabinin 	kasan_populate_shadow(shadow_start, shadow_end, nid);
3403f148f33SAndrey Ryabinin }
3413f148f33SAndrey Ryabinin 
kasan_init(void)342ef7f0d6aSAndrey Ryabinin void __init kasan_init(void)
343ef7f0d6aSAndrey Ryabinin {
3441cfaac24SSean Christopherson 	unsigned long shadow_cea_begin, shadow_cea_per_cpu_begin, shadow_cea_end;
345ef7f0d6aSAndrey Ryabinin 	int i;
346ef7f0d6aSAndrey Ryabinin 
34765ade2f8SKirill A. Shutemov 	memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt));
34812a8cc7fSAndrey Ryabinin 
34912a8cc7fSAndrey Ryabinin 	/*
35012a8cc7fSAndrey Ryabinin 	 * We use the same shadow offset for 4- and 5-level paging to
35112a8cc7fSAndrey Ryabinin 	 * facilitate boot-time switching between paging modes.
35212a8cc7fSAndrey Ryabinin 	 * As result in 5-level paging mode KASAN_SHADOW_START and
35312a8cc7fSAndrey Ryabinin 	 * KASAN_SHADOW_END are not aligned to PGD boundary.
35412a8cc7fSAndrey Ryabinin 	 *
35512a8cc7fSAndrey Ryabinin 	 * KASAN_SHADOW_START doesn't share PGD with anything else.
35612a8cc7fSAndrey Ryabinin 	 * We claim whole PGD entry to make things easier.
35712a8cc7fSAndrey Ryabinin 	 *
35812a8cc7fSAndrey Ryabinin 	 * KASAN_SHADOW_END lands in the last PGD entry and it collides with
35912a8cc7fSAndrey Ryabinin 	 * bunch of things like kernel code, modules, EFI mapping, etc.
36012a8cc7fSAndrey Ryabinin 	 * We need to take extra steps to not overwrite them.
36112a8cc7fSAndrey Ryabinin 	 */
362ed7588d5SKirill A. Shutemov 	if (pgtable_l5_enabled()) {
36312a8cc7fSAndrey Ryabinin 		void *ptr;
36412a8cc7fSAndrey Ryabinin 
36512a8cc7fSAndrey Ryabinin 		ptr = (void *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END));
36612a8cc7fSAndrey Ryabinin 		memcpy(tmp_p4d_table, (void *)ptr, sizeof(tmp_p4d_table));
36712a8cc7fSAndrey Ryabinin 		set_pgd(&early_top_pgt[pgd_index(KASAN_SHADOW_END)],
36812a8cc7fSAndrey Ryabinin 				__pgd(__pa(tmp_p4d_table) | _KERNPG_TABLE));
36912a8cc7fSAndrey Ryabinin 	}
37012a8cc7fSAndrey Ryabinin 
37165ade2f8SKirill A. Shutemov 	load_cr3(early_top_pgt);
372241d2c54SAndrey Ryabinin 	__flush_tlb_all();
373ef7f0d6aSAndrey Ryabinin 
37412a8cc7fSAndrey Ryabinin 	clear_pgds(KASAN_SHADOW_START & PGDIR_MASK, KASAN_SHADOW_END);
375ef7f0d6aSAndrey Ryabinin 
3769577dd74SAndrey Konovalov 	kasan_populate_early_shadow((void *)(KASAN_SHADOW_START & PGDIR_MASK),
377ef7f0d6aSAndrey Ryabinin 			kasan_mem_to_shadow((void *)PAGE_OFFSET));
378ef7f0d6aSAndrey Ryabinin 
37908b46d5dSIngo Molnar 	for (i = 0; i < E820_MAX_ENTRIES; i++) {
380ef7f0d6aSAndrey Ryabinin 		if (pfn_mapped[i].end == 0)
381ef7f0d6aSAndrey Ryabinin 			break;
382ef7f0d6aSAndrey Ryabinin 
3832aeb0736SAndrey Ryabinin 		map_range(&pfn_mapped[i]);
384ef7f0d6aSAndrey Ryabinin 	}
3852aeb0736SAndrey Ryabinin 
386bde258d9SSean Christopherson 	shadow_cea_begin = kasan_mem_to_shadow_align_down(CPU_ENTRY_AREA_BASE);
3871cfaac24SSean Christopherson 	shadow_cea_per_cpu_begin = kasan_mem_to_shadow_align_up(CPU_ENTRY_AREA_PER_CPU);
388bde258d9SSean Christopherson 	shadow_cea_end = kasan_mem_to_shadow_align_up(CPU_ENTRY_AREA_BASE +
38992a0f81dSThomas Gleixner 						      CPU_ENTRY_AREA_MAP_SIZE);
39092a0f81dSThomas Gleixner 
3919577dd74SAndrey Konovalov 	kasan_populate_early_shadow(
39269786cdbSAndrey Ryabinin 		kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
3930609ae01SDaniel Axtens 		kasan_mem_to_shadow((void *)VMALLOC_START));
3940609ae01SDaniel Axtens 
3950609ae01SDaniel Axtens 	/*
3960609ae01SDaniel Axtens 	 * If we're in full vmalloc mode, don't back vmalloc space with early
3970609ae01SDaniel Axtens 	 * shadow pages. Instead, prepopulate pgds/p4ds so they are synced to
3980609ae01SDaniel Axtens 	 * the global table and we can populate the lower levels on demand.
3990609ae01SDaniel Axtens 	 */
4000609ae01SDaniel Axtens 	if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
4010609ae01SDaniel Axtens 		kasan_shallow_populate_pgds(
4020609ae01SDaniel Axtens 			kasan_mem_to_shadow((void *)VMALLOC_START),
4030609ae01SDaniel Axtens 			kasan_mem_to_shadow((void *)VMALLOC_END));
4040609ae01SDaniel Axtens 	else
4050609ae01SDaniel Axtens 		kasan_populate_early_shadow(
4060609ae01SDaniel Axtens 			kasan_mem_to_shadow((void *)VMALLOC_START),
4070609ae01SDaniel Axtens 			kasan_mem_to_shadow((void *)VMALLOC_END));
4080609ae01SDaniel Axtens 
4090609ae01SDaniel Axtens 	kasan_populate_early_shadow(
4100609ae01SDaniel Axtens 		kasan_mem_to_shadow((void *)VMALLOC_END + 1),
411bde258d9SSean Christopherson 		(void *)shadow_cea_begin);
41292a0f81dSThomas Gleixner 
4131cfaac24SSean Christopherson 	/*
4141cfaac24SSean Christopherson 	 * Populate the shadow for the shared portion of the CPU entry area.
4151cfaac24SSean Christopherson 	 * Shadows for the per-CPU areas are mapped on-demand, as each CPU's
4161cfaac24SSean Christopherson 	 * area is randomly placed somewhere in the 512GiB range and mapping
4171cfaac24SSean Christopherson 	 * the entire 512GiB range is prohibitively expensive.
4181cfaac24SSean Christopherson 	 */
4191cfaac24SSean Christopherson 	kasan_populate_shadow(shadow_cea_begin,
4201cfaac24SSean Christopherson 			      shadow_cea_per_cpu_begin, 0);
4211cfaac24SSean Christopherson 
422bde258d9SSean Christopherson 	kasan_populate_early_shadow((void *)shadow_cea_end,
423c420f167SAndrey Ryabinin 			kasan_mem_to_shadow((void *)__START_KERNEL_map));
424c420f167SAndrey Ryabinin 
4252aeb0736SAndrey Ryabinin 	kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext),
426c420f167SAndrey Ryabinin 			      (unsigned long)kasan_mem_to_shadow(_end),
4272aeb0736SAndrey Ryabinin 			      early_pfn_to_nid(__pa(_stext)));
428c420f167SAndrey Ryabinin 
4299577dd74SAndrey Konovalov 	kasan_populate_early_shadow(kasan_mem_to_shadow((void *)MODULES_END),
43092a0f81dSThomas Gleixner 					(void *)KASAN_SHADOW_END);
431ef7f0d6aSAndrey Ryabinin 
43265ade2f8SKirill A. Shutemov 	load_cr3(init_top_pgt);
433241d2c54SAndrey Ryabinin 	__flush_tlb_all();
43485155229SAndrey Ryabinin 
43569e0210fSAndrey Ryabinin 	/*
4369577dd74SAndrey Konovalov 	 * kasan_early_shadow_page has been used as early shadow memory, thus
4379577dd74SAndrey Konovalov 	 * it may contain some garbage. Now we can clear and write protect it,
4389577dd74SAndrey Konovalov 	 * since after the TLB flush no one should write to it.
43969e0210fSAndrey Ryabinin 	 */
4409577dd74SAndrey Konovalov 	memset(kasan_early_shadow_page, 0, PAGE_SIZE);
441063fb3e5SAndrey Ryabinin 	for (i = 0; i < PTRS_PER_PTE; i++) {
442fb43d6cbSDave Hansen 		pte_t pte;
443fb43d6cbSDave Hansen 		pgprot_t prot;
444fb43d6cbSDave Hansen 
445fb43d6cbSDave Hansen 		prot = __pgprot(__PAGE_KERNEL_RO | _PAGE_ENC);
446fb43d6cbSDave Hansen 		pgprot_val(prot) &= __default_kernel_pte_mask;
447fb43d6cbSDave Hansen 
4489577dd74SAndrey Konovalov 		pte = __pte(__pa(kasan_early_shadow_page) | pgprot_val(prot));
4499577dd74SAndrey Konovalov 		set_pte(&kasan_early_shadow_pte[i], pte);
450063fb3e5SAndrey Ryabinin 	}
451063fb3e5SAndrey Ryabinin 	/* Flush TLBs again to be sure that write protection applied. */
452063fb3e5SAndrey Ryabinin 	__flush_tlb_all();
45369e0210fSAndrey Ryabinin 
45469e0210fSAndrey Ryabinin 	init_task.kasan_depth = 0;
45525add7ecSAndrey Konovalov 	pr_info("KernelAddressSanitizer initialized\n");
456ef7f0d6aSAndrey Ryabinin }
457